diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b4461d8a04edec5a31875ad2e6201d2df60e2645..3d3f7a14640a09818144dd65bb9d95707f3365f9 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -7,57 +7,57 @@ stages:
   - install
   - test
 
-env:focal:
-  image: keckj/hysop:focal
+env:groovy:
+  image: keckj/hysop:groovy
   stage: env
   script:
       - "bash ci/scripts/version.sh"
 
-config:focal:
-  image: keckj/hysop:focal
+config:groovy:
+  image: keckj/hysop:groovy
   stage: configure
   script: 
-      - "bash ci/scripts/config.sh $CI_PROJECT_DIR/build/gcc   $CI_PROJECT_DIR/install/gcc   gcc   g++     gfortran"
-      - "bash ci/scripts/config.sh $CI_PROJECT_DIR/build/clang-8 $CI_PROJECT_DIR/install/clang-8 clang-8 clang++-8 gfortran"
+      - "bash ci/scripts/config.sh $CI_PROJECT_DIR/build/gcc   $CI_PROJECT_DIR/install/gcc gcc-10 g++-10 gfortran-10"
+      - "bash ci/scripts/config.sh $CI_PROJECT_DIR/build/clang $CI_PROJECT_DIR/install/clang clang-10 clang++-10 gfortran-10"
   dependencies:
-    - env:focal
+    - env:groovy
   artifacts:
     expire_in: 1 week
     paths:
         - $CI_PROJECT_DIR/build
 
-build:focal:
-  image: keckj/hysop:focal
+build:groovy:
+  image: keckj/hysop:groovy
   stage: build
   script: 
-      - "bash ci/scripts/build.sh $CI_PROJECT_DIR/build/gcc   gcc   g++     gfortran"
-      - "bash ci/scripts/build.sh $CI_PROJECT_DIR/build/clang-8 clang-8 clang++-8 gfortran"
+      - "bash ci/scripts/build.sh $CI_PROJECT_DIR/build/gcc gcc-10 g++-10 gfortran-10"
+      - "bash ci/scripts/build.sh $CI_PROJECT_DIR/build/clang clang-10 clang++-10 gfortran-10"
   dependencies:
-    - config:focal
+    - config:groovy
   artifacts:
     expire_in: 1 week
     paths:
         - $CI_PROJECT_DIR/build
 
-install:focal:
-  image: keckj/hysop:focal
+install:groovy:
+  image: keckj/hysop:groovy
   stage: install
   script: 
       - "bash ci/scripts/install.sh $CI_PROJECT_DIR/build/gcc $CI_PROJECT_DIR/install/gcc"
   dependencies:
-    - build:focal
+    - build:groovy
   artifacts:
     expire_in: 1 week
     paths:
         - $CI_PROJECT_DIR/install
 
-test:focal:
-  image: keckj/hysop:focal
+test:groovy:
+  image: keckj/hysop:groovy
   stage: test
   script:
     - "bash ci/scripts/test.sh $CI_PROJECT_DIR/install/gcc $CI_PROJECT_DIR/hysop $CI_PROJECT_DIR/cache"
   dependencies:
-    - install:focal
+    - install:groovy
   cache:
     paths:
       - $CI_PROJECT_DIR/cache
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4f4a57e288c841c2388112e3db56dbf7743c9415..1d4e033dc65f0f56ca8fc79c46690aedc90b606a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -39,25 +39,22 @@ option(DOUBLEPREC "Set default HySoP floating point precision. Default = on, ie
 option(USE_MPI "Compile and link HySoP with mpi when this mode is enable. Default = on. WARNING: off mode is bugged." ON)
 option(WITH_TESTS "Enable testing. Default = OFF" ON)
 option(BUILD_SHARED_LIBS "Enable dynamic library build, default = ON." ON)
-option(USE_CXX "Expand hysop with some new functions from a generated c++ to python interface, wrapped into hysop.cpp2hysop module. Default = ON." OFF)
 option(WITH_SCALES "compile/create scales lib and link it with HySoP. Default = ON." ON)
 option(WITH_PARALLEL_COMPRESSED_HDF5 "Try to enable parallel compressed hdf5 interface. Default = ON." ON)
 option(WITH_FFTW "Link with fftw library (required for some HySoP solvers), default = ON" ON)
 option(WITH_EXTRAS "Link with some extra fortran libraries (like arnoldi solver), default = OFF" OFF)
 option(WITH_OPENCL "Use of GPU (required for some HySoP solvers), default = ON" ON)
-option(WITH_MAIN_FORTRAN "Create an executable (test purpose) from fortran sources in src/main, linked with libhysop, default = OFF" OFF)
-option(WITH_MAIN_CXX "Create an executable (test purpose) from cxx sources in src/hysop++/main, linked with libhysop, default = OFF" OFF)
 option(PROFILE "Enable profiling mode for HySoP. Can also be enabled with HYSOP_PROFILE environment variable. Default = OFF" OFF)
 option(VERBOSE "Enable verbose mode for HySoP. Can also be enabled with HYSOP_VERBOSE environment variable. Default = OFF" OFF)
 option(DEBUG "Enable debug mode for HySoP. Can also be enabled with HYSOP_DEBUG environment variable. Default = OFF" OFF)
 option(FULL_TEST "Enable all test options (pep8, mpi ...) - Default = OFF" OFF)
 option(OPTIM "To allow python -OO run, some packages must be deactivated. Set this option to 'ON' to do so. Default = OFF" OFF)
 option(WITH_MPI_TESTS "Enable mpi tests. Default = OFF." OFF)
-option(WITH_GOOGLE_TESTS "Enable google tests (c++). Default = OFF." OFF)
 option(FORTRAN_LAYOUT "Choose default data layout ('fortran', column-major or 'C' order, row-major) for arrays. Default = column-major." OFF)
 option(WITH_DOCUMENTATION "Build Documentation. Default = OFF" OFF)
 option(ENABLE_LONG_TESTS "Enable tests that may run for long time with important memory print. Default = OFF." OFF)
 option(DEV_MODE "Enable devel mode (aggressive checking of warnings ..). Default = ON." ON)
+
 # Set python install mode:
 # - user --> behave as 'python setup.py install --user'
 # - standard --> install in python site-package (ie behave as python setup.py install)
@@ -68,6 +65,7 @@ set(HYSOP_INSTALL "user" CACHE STRING "Install mode for hysop python package")
 # http://public.kitware.com/Bug/view.php?id=11964
 # See also http://www.cmake.org/cmake/help/v3.0/module/GNUInstallDirs.html?highlight=gnuinstalldirs
 include(GNUInstallDirs)
+
 # Set prefix path for libraries installation
 # --> means that any library target will be installed
 # in CMAKE_INSTALL_PREFIX/_install_lib
@@ -89,11 +87,6 @@ if(WITH_FFTW OR WITH_SCALES OR WITH_EXTRAS)
   set(USE_FORTRAN "ON")
 endif()
 
-if(NOT USE_CXX)
-    set(WITH_MAIN_CXX "OFF")
-    set(WITH_GOOGLE_TESTS "OFF")
-endif()
-
 # Force a default build type if not provided by user
 # CMAKE_BUILD_TYPE = empty, Debug, Release, RelWithDebInfo or MinSizeRel.
 if(NOT CMAKE_BUILD_TYPE)
@@ -104,11 +97,13 @@ endif()
 
 # cmake project name
 set(PROJECT_NAME hysop)
-# --- Name for the package ---
+
 # This name will be used as the Python Package name
 set(PACKAGE_NAME "hysop")
+
 # --- The name (without extension) of the lib to be created ---
 set(PROJECT_LIBRARY_NAME ${PROJECT_NAME})
+
 # ============= The project =============
 # Set project name and project languages
 # => this automatically defines:
@@ -117,10 +112,6 @@ set(PROJECT_LIBRARY_NAME ${PROJECT_NAME})
 # Note that because of OutOfSourceBuild, binary_dir and source_dir must be different.
 
 set(LANGLIST)
-if(USE_CXX)
-  set(LANGLIST ${LANGLIST} C CXX)
-endif()
-
 if(USE_FORTRAN)
   set(LANGLIST ${LANGLIST} C Fortran)
 endif()
@@ -135,59 +126,63 @@ set(HYSOP_LINK_LIBRARIES CACHE INTERNAL "List of external libraries.")
 
 # ============= Python and its packages =============
 # - Global setup (interp and lib) -
-find_package(PythonFull REQUIRED)
+find_package (Python COMPONENTS Interpreter Development)
 include(FindPythonModule)
+
 # - python packages -
 find_python_module(numpy        REQUIRED)
 find_python_module(scipy        REQUIRED)
-find_python_module(h5py         REQUIRED)
 find_python_module(sympy        REQUIRED)
+find_python_module(h5py         REQUIRED)
 find_python_module(psutil       REQUIRED)
 find_python_module(cpuinfo      REQUIRED)
 find_python_module(gmpy2        REQUIRED)
-find_python_module(subprocess32 REQUIRED)
 find_python_module(editdistance REQUIRED)
 find_python_module(portalocker  REQUIRED)
 find_python_module(tee          REQUIRED)
-find_python_module(colors       REQUIRED) # ansicolor package
-find_python_module(argparse_color_formatter REQUIRED)
+find_python_module(colors       REQUIRED) # corresponds ansicolor package
 find_python_module(primefac     REQUIRED)
 find_python_module(networkx     REQUIRED)
 find_python_module(pyfftw       REQUIRED)
-find_python_module(backports.weakref REQUIRED) # python-backports.weakref
-#find_python_module(backports.functools-lru-cache REQUIRED) # python-backports.functools-lru-cache
-find_python_module(matplotlib OPTIONAL)
-find_python_module(pyvis OPTIONAL)
-
+find_python_module(zarr         REQUIRED)
+find_python_module(numcodecs    REQUIRED)
+find_python_module(jsonpickle   REQUIRED)
+find_python_module(argparse_color_formatter REQUIRED)
+find_python_module(numba OPTIONAL)      # c++ jit compiler, required for CPU spectral filters
+find_python_module(hptt OPTIONAL)       # high performance cpu tensor transpose (fallback to numpy)
+find_python_module(flint OPTIONAL)      # high performance linear system solver (fallback to sympy)
+find_python_module(tbb OPTIONAL)        # additional numba threading backend (fallback to pthreads)
+find_python_module(mkl_fft OPTIONAL)    # additional fft backend, faster then FFTW, faster then clFFT on CPU OpenCL platforms.
+find_python_module(cairo OPTIONAL)      # additional plotting backend
+find_python_module(matplotlib OPTIONAL) # required for general plots
+find_python_module(pyvis OPTIONAL)      # required for graph plots
+find_python_module(memory_tempfile OPTIONAL) # enable in memory HDF5 I/O to call paraview during simulation
+
+# Some opencl related python package fails to import on non OpenCL machines (cluster's frontend for instance)
 find_package( OpenCL  )
-if(${OpenCL_LIBRARY})  # Some opencl related python package fails to import on non OpenCL machines (cluster's frontend for instance)
-  find_python_module(pyopencl     REQUIRED)
-  find_python_module(gpyfft       REQUIRED)
+if(${OpenCL_LIBRARY})
+  find_python_module(pyopencl REQUIRED)
+  find_python_module(mako     REQUIRED)
+  find_python_module(gpyfft   REQUIRED)
 else()
-  find_python_module(pyopencl     )
-  find_python_module(gpyfft       )
+  find_python_module(pyopencl OPTIONAL)
+  find_python_module(mako     OPTIONAL)
+  find_python_module(gpyfft   OPTIONAL)
 endif()
+
 # --- MPI ---
 if(USE_MPI)
   find_package(MPI REQUIRED)
   find_python_module(mpi4py REQUIRED)
 endif()
+
 # --- Wheel, required for a proper build/install process ---
 find_python_module(wheel REQUIRED)
-if(USE_CXX)
-  find_package(SWIG 3.0.2 REQUIRED)
-  # WARNING FP : for cmake < 3.0 UseSWIG.cmake
-  # does not work properly (bug for swig outdir)
-  if(CMAKE_VERSION VERSION_LESS 3.0.0)
-    set(SWIG_USE_FILE ${CMAKE_SOURCE_DIR}/cmake/UseSWIG.cmake)
-  endif()
-  include(${SWIG_USE_FILE})
-endif()
 
 # Find python build dir name --> needed for tests and doc
-if(USE_CXX OR USE_FORTRAN)
+if(USE_FORTRAN)
   execute_process(
-    COMMAND ${PYTHON_EXECUTABLE} -c "import distutils.util as ut ; import distutils.sysconfig as sy; print 'lib.'+ut.get_platform()+'-'+sy.get_python_version()"
+    COMMAND ${PYTHON_EXECUTABLE} -c "import distutils.util as ut ; import distutils.sysconfig as sy; print('lib.'+ut.get_platform()+'-'+sy.get_python_version())"
     OUTPUT_VARIABLE ${PROJECT_NAME}_PYTHON_BUILD_DIR)
   string(STRIP ${${PROJECT_NAME}_PYTHON_BUILD_DIR} ${PROJECT_NAME}_PYTHON_BUILD_DIR)
   set(HYSOP_BUILD_PYTHONPATH ${CMAKE_BINARY_DIR}/build/${${PROJECT_NAME}_PYTHON_BUILD_DIR} CACHE INTERNAL "Python package build path")
@@ -221,10 +216,6 @@ if(WITH_FFTW)
     add_definitions(${FFTW_DEFINES})
 endif()
 
-if(USE_CXX)
-    compile_with(Boost REQUIRED)
-endif()
-
 if(WITH_EXTRAS)
   # Arnoldi solver needs zgeev, which means lapack
   compile_with(LAPACK)
@@ -236,7 +227,6 @@ if(WITH_EXTRAS)
     endif()
   endforeach()
   set(EXTRASLIB ${dirlist} CACHE PATH "extras libraries dir")
-
 endif()
 
 # ========= Check parallel hdf5 availability =========
@@ -262,15 +252,15 @@ endif()
 # ========= Check which opencl devices are available on the system =========
 if(WITH_OPENCL)
   execute_process(
-    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/opencl_explore.py "EXPLORE")
+    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/cmake/opencl_explore.py "EXPLORE")
   execute_process(
-    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/opencl_explore.py
+    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/cmake/opencl_explore.py
     OUTPUT_VARIABLE OPENCL_DEFAULT_OPENCL_ID)
 else()
   execute_process(
-    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/opencl_explore.py "EXPLORE" CPU)
+    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/cmake/opencl_explore.py "EXPLORE" CPU)
   execute_process(
-    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/opencl_explore.py CPU
+    COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/cmake/opencl_explore.py CPU
     OUTPUT_VARIABLE OPENCL_DEFAULT_OPENCL_ID)
 endif()
 
@@ -279,6 +269,8 @@ list(GET MY_LIST 0 OPENCL_DEFAULT_OPENCL_PLATFORM_ID)
 list(GET MY_LIST 1 OPENCL_DEFAULT_OPENCL_DEVICE_ID)
 display(OPENCL_DEFAULT_OPENCL_PLATFORM_ID)
 display(OPENCL_DEFAULT_OPENCL_DEVICE_ID)
+
+
 # =========== RPATH stuff ===========
 # Doc :
 #  - https://cmake.org/Wiki/CMake_RPATH_handling
@@ -326,7 +318,6 @@ else()
 endif()
 
 #  ====== Create (and setup) install/uninstall targets ======
-#
 # --> set installation dir
 # --> set options for python install
 # --> create install/uninstall targets
@@ -368,19 +359,17 @@ endif()
 
 
 if(USE_FORTRAN)
-
   # Path to fortran-related generated files (.pyf, precision ...)
   set(GENERATED_FORTRAN_FILES_DIR ${CMAKE_BINARY_DIR}/generated_fortran/)
 
-
   # --- copy sort_f90 file to build ---
   # Required for setup.py to handle fortran files dependencies
-  configure_file(${CMAKE_SOURCE_DIR}/sort_f90.py ${CMAKE_BINARY_DIR}/sort_f90.py)
+  configure_file(${CMAKE_SOURCE_DIR}/cmake/sort_f90.py ${CMAKE_BINARY_DIR}/sort_f90.py)
 
   # --- Generate f2py_f2cmap file ---
-  if(EXISTS ${CMAKE_SOURCE_DIR}/f2py_f2cmap.in)
+  if(EXISTS ${CMAKE_SOURCE_DIR}/cmake/f2py_f2cmap.in)
 	message(STATUS "Generate f2py map file ...")
-	configure_file(${CMAKE_SOURCE_DIR}/f2py_f2cmap.in
+	configure_file(${CMAKE_SOURCE_DIR}/cmake/f2py_f2cmap.in
       ${CMAKE_BINARY_DIR}/.f2py_f2cmap)
   endif()
 
@@ -404,9 +393,11 @@ if(USE_FORTRAN)
   # Set module files directory (i.e. where .mod will be created)
   set(CMAKE_Fortran_MODULE_DIRECTORY ${CMAKE_BINARY_DIR}/Modules)
   #  Add compilation flags:
-  #append_Fortran_FLAGS("-Wall -fPIC -ffree-line-length-none -DBLOCKING_SEND_PLUS -DBLOCKING_SEND")
   append_Fortran_FLAGS("-Wall -fPIC -ffree-line-length-none -cpp")
   append_Fortran_FLAGS("-Wno-unused-dummy-argument -Wno-integer-division -Wno-unused-value -Wno-maybe-uninitialized -Wno-unused-function")
+  if (CMAKE_Fortran_COMPILER_ID MATCHES "GNU" AND CMAKE_Fortran_COMPILER_VERSION GREATER_EQUAL 10.0)
+    append_Fortran_FLAGS("-fallow-argument-mismatch")
+  endif()
 
   if(USE_MPI)
     # -I
@@ -421,11 +412,11 @@ if(USE_FORTRAN)
   set(Fortran_FLAGS ${CMAKE_Fortran_FLAGS})
   append_flags(Fortran_FLAGS ${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}})
   # --- Generate precision.f95 and precision.pyf files ---
-  if(EXISTS ${CMAKE_SOURCE_DIR}/precision.conf.in)
+  if(EXISTS ${CMAKE_SOURCE_DIR}/cmake/precision.conf.in)
     message(STATUS "Generate precision.f95 file ...")
-    configure_file(${CMAKE_SOURCE_DIR}/precision.conf.in
+    configure_file(${CMAKE_SOURCE_DIR}/cmake/precision.conf.in
       ${GENERATED_FORTRAN_FILES_DIR}/precision.f95)
-    configure_file(${CMAKE_SOURCE_DIR}/precision.conf.in
+    configure_file(${CMAKE_SOURCE_DIR}/cmake/precision.conf.in
       ${GENERATED_FORTRAN_FILES_DIR}/precision.pyf)
   endif()
 endif()
@@ -435,42 +426,6 @@ if(USE_FORTRAN)
   set(FORTRAN_INCLUDE_DIRS ${FFTW_INCLUDE_DIRS})
 endif()
 
-if(USE_CXX)
-    #C++ variables used by setup.py.in for swig
-    if(DEV_MODE)
-        if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
-            set(CXX_WARNING_FLAGS "-W -Wall -Wextra -Wno-unused-variable -Wno-unused-parameter -Wno-unused-local-typedefs -Wno-missing-braces")
-        elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
-            set(CXX_WARNING_FLAGS "-W -Wall -Wextra -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-parameter -Wno-unused-local-typedefs -Wno-deprecated-declarations")
-            #elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
-            #elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
-        else()
-            set(CXX_WARNING_FLAGS "")
-        endif()
-    endif()
-
-    list(APPEND CMAKE_CXX_FLAGS "${CXX_WARNING_FLAGS} ${FFTW_COMPILE_FLAGS} -fPIC -std=c++11")
-    if(APPLE)
-        list(APPEND CMAKE_EXE_LINKER_FLAGS "-ldl -lutil")
-    else()
-        list(APPEND CMAKE_EXE_LINKER_FLAGS "-Wl,--no-as-needed -ldl -lutil")
-    endif()
-
-    set(CXX_FLAGS ${CMAKE_CXX_FLAGS})
-    set(CXX_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS})
-    set(CXX_EXTRA_DEFINES ${FFTW_DEFINES} "-DHAS_EXTERN_TEMPLATES")
-
-    set(CXX_EXT_INCLUDES ${Boost_INCLUDE_DIR} ${PYTHON_INCLUDE_DIR} ${FFTW_INCLUDE_DIRS})
-    set(CXX_EXT_LIBS ${Boost_LIBRARIES} ${PYTHON_LIBRARIES} ${FFTW_LIBRARIES})
-    set(CXX_EXT_LIB_DIR ${Boost_LIBRARY_DIRS} ${PYTHON_LIBRARY_DIRS} ${FFTW_LIBRARY_DIRS})
-
-    set(CMAKE_INCLUDE_SYSTEM_FLAG_C "-isystem ")
-    set(CMAKE_INCLUDE_SYSTEM_FLAG_CXX "-isystem ")
-
-    #swig package name (lib name generated by swig)
-    set(CPP_2_HYSOP "cpp2hysop")
-endif()
-
 # Append pythonlib to hysop link floags
 list(APPEND HYSOP_LINK_LIBRARIES ${PYTHON_LIBRARIES} )
 
@@ -488,65 +443,6 @@ if(EXISTS ${CMAKE_SOURCE_DIR}/hysop/__init__.py.in)
   configure_file(hysop/__init__.py.in ${CMAKE_SOURCE_DIR}/hysop/__init__.py)
 endif()
 
-if(EXISTS ${CMAKE_SOURCE_DIR}/hysop/mpi/__init__.py.in)
-  message(STATUS "Generate mpi/__init__.py file ...")
-  file(REMOVE ${CMAKE_SOURCE_DIR}/hysop/mpi/__init__.py)
-configure_file(hysop/mpi/__init__.py.in ${CMAKE_SOURCE_DIR}/hysop/mpi/__init__.py)
-endif()
-
-# Hysop C++ library is generated in setup.py by swig
-# --- C++ main and tests  ---
-if(USE_CXX)
-
-    get_filename_component(CXX_DIR  "${CMAKE_SOURCE_DIR}/src/hysop++" ABSOLUTE)
-    get_filename_component(CXX_MAIN_DIR "${CXX_DIR}/main"             ABSOLUTE)
-    get_filename_component(CXX_TEST_DIR "${CXX_DIR}/tests"            ABSOLUTE)
-    get_filename_component(CXX_SOURCE_DIR "${CXX_DIR}/src"            ABSOLUTE)
-
-    include_directories(${CXX_SOURCE_DIR})
-    include_directories(SYSTEM ${CXX_EXT_INCLUDES})
-    link_directories(${CXX_EXT_LIB_DIRS})
-    add_definitions(${CXX_EXTRA_DEFINES})
-
-    if(WITH_MAIN_CXX OR WITH_GOOGLE_TESTS)
-        if(APPLE) #swig only generates a bundle, need to generate another static library...
-            set(HYSOP_CXX_LIBRARY_DYLIB "cpp2hysop_dylib")
-
-            file(GLOB_RECURSE source_files ${CXX_SOURCE_DIR}/*.cpp)
-            add_library(${HYSOP_CXX_LIBRARY_DYLIB} STATIC ${source_files})
-            target_link_libraries(${HYSOP_CXX_LIBRARY_DYLIB} ${EXT_LIBRARIES})
-
-            set(HYSOP_CXX_LIBRARY ${HYSOP_CXX_LIBRARY_DYLIB})
-            set(HYSOP_CXX_LIBRARY_DEP cpp2hysop_dylib)
-        else() #nothing to do on other platforms bundle <=> dynamic libraries, so just copy the swig generated one
-            set(HYSOP_CXX_LIBRARY_BUNDLE "${CMAKE_CURRENT_BINARY_DIR}/libcpp2hysop_bundle.so")
-            add_custom_target(cpp2hysop_bundle
-                DEPENDS wheel
-                COMMAND cp `find ${CMAKE_CURRENT_BINARY_DIR}/build -name _${CPP_2_HYSOP}.so` ${HYSOP_CXX_LIBRARY_BUNDLE}
-                WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
-                COMMENT "Copy swig c++ library to link")
-            set(HYSOP_CXX_LIBRARY ${HYSOP_CXX_LIBRARY_BUNDLE})
-            set(HYSOP_CXX_LIBRARY_DEP cpp2hysop_bundle)
-        endif()
-    endif()
-
-    if(WITH_MAIN_CXX)
-        list(APPEND cxx_executable_sources "${CXX_MAIN_DIR}/planner.cpp")
-        list(APPEND cxx_executable_sources "${CXX_MAIN_DIR}/diffSolver.cpp")
-        list(APPEND cxx_executable_sources "${CXX_MAIN_DIR}/poissonSolver.cpp")
-        foreach(cxx_main_source ${cxx_executable_sources})
-            get_filename_component(cxx_exec_name "${cxx_main_source}" NAME_WE)
-            add_executable(${cxx_exec_name} ${cxx_main_source})
-            add_dependencies(${cxx_exec_name} ${HYSOP_CXX_LIBRARY_DEP})
-            target_link_libraries(${cxx_exec_name} ${HYSOP_CXX_LIBRARY} ${CXX_EXT_LIBS})
-        endforeach()
-    endif()
-
-    if(WITH_GOOGLE_TESTS)
-        add_subdirectory(${CXX_TEST_DIR})
-    endif()
-endif()
-
 # =========== RPATH stuff ===========
 # Doc :
 #  - https://cmake.org/Wiki/CMake_RPATH_handling
@@ -621,9 +517,9 @@ endif()
 if(VERBOSE_MODE)
   message("\n====================== End of configuration process ======================")
   message("\n Summary: ")
-  message(STATUS " Python libraries : ${PYTHON_LIBRARIES}")
-  message(STATUS " Python include : ${PYTHON_INCLUDE_DIRS}")
-  message(STATUS " Python version : ${PYTHON_VERSION_STRING}")
+  message(STATUS " Python libraries : ${Python_LIBRARY_DIRS}")
+  message(STATUS " Python include : ${Python_INCLUDE_DIRS}")
+  message(STATUS " Python version : ${Python_VERSION}")
   message(STATUS " Python executable : ${PYTHON_EXECUTABLE}")
   message(STATUS " Install mode is `${HYSOP_INSTALL}` and ${PACKAGE_NAME} will be installed in : ${HYSOP_PYTHON_INSTALL_DIR}")
   message(STATUS " ${PACKAGE_NAME} will be built in ${HYSOP_BUILD_PYTHONPATH}")
@@ -632,11 +528,6 @@ if(VERBOSE_MODE)
   else()
     message(WARNING "You deactivate fortran to python interface generation. This will disable the fortran interface, including fftw and scales fonctionnalities.")
   endif()
-  if(USE_CXX)
-    message(STATUS " CXX compiler : ${CMAKE_CXX_COMPILER}")
-  else()
-    message(WARNING "You deactivate c++ to python interface generation. This will disable the Aitken-Schwarz Poisson solver.")
-  endif()
   message(STATUS " Sources are in : ${CMAKE_SOURCE_DIR}")
   message(STATUS " Build is done in : ${CMAKE_BINARY_DIR}")
   message(STATUS " Project uses MPI : ${USE_MPI}")
@@ -665,11 +556,3 @@ if(VERBOSE_MODE)
   message("Try to run python -c 'import hysop'. If it fails, add ${HYSOP_PYTHON_INSTALL_DIR} to PYTHONPATH environment variable.")
   message("Example : \n export PYTHONPATH=${HYSOP_PYTHON_INSTALL_DIR}/:\${PYTHONPATH}\n")
 endif()
-
-# Add custom target to install compiled libraries locally
-add_custom_target(update_libs
-                  COMMAND find ${CMAKE_SOURCE_DIR}/hysop/ -name '*.so' -type f -delete
-                  COMMAND find ${CMAKE_BINARY_DIR} -name '*.so' -type f -print0 | xargs -0 cp -t ${CMAKE_SOURCE_DIR}/hysop/
-                  COMMAND [ -f "$ENV{HOME}/.hysop.__init__.py" ] && rm ${CMAKE_SOURCE_DIR}/hysop/__init__.py
-                  COMMAND [ -f "$ENV{HOME}/.hysop.__init__.py" ] && cp $ENV{HOME}/.hysop.__init__.py ${CMAKE_SOURCE_DIR}/hysop/__init__.py
-                  DEPENDS wheel)
diff --git a/CTestConfig.cmake b/CTestConfig.cmake
deleted file mode 100644
index 93d5f9d9011c8ca475b4f66dd9dbfeadef271ed4..0000000000000000000000000000000000000000
--- a/CTestConfig.cmake
+++ /dev/null
@@ -1,6 +0,0 @@
-set(CTEST_PROJECT_NAME "HySoP")
-set(CTEST_NIGHTLY_START_TIME "01:00:00 UTC")
-set(CTEST_DROP_METHOD "http")
-set(CTEST_DROP_SITE "my.cdash.org")
-set(CTEST_DROP_LOCATION "/submit.php?project=HySoP")
-set(CTEST_DROP_SITE_CDASH TRUE)
diff --git a/HySoPConfig.cmake.in b/HySoPConfig.cmake.in
deleted file mode 100644
index c435c411fef14a6ed82c1aa37508308a0fc4563e..0000000000000000000000000000000000000000
--- a/HySoPConfig.cmake.in
+++ /dev/null
@@ -1,38 +0,0 @@
-# - config file for @PACKAGE_NAME@ package
-# Written by F. Pérignon, 2011 march
-#
-# This file generates @PACKAGE_NAME@Config.cmake, that may be used by another cmake project
-# to retrieve all the configuration variables from @PACKAGE_NAME@
-#
-# It defines the following variables
-#
-# @PACKAGE_NAME@_INCLUDE_DIRS - include directories for ppmcore
-# @PACKAGE_NAME@_EXTRA_INCLUDE_DIRS - path to extra headers needed for @PACKAGE_NAME@ (metis.h ...)
-# @PACKAGE_NAME@_LIBRARY_DIRS - path to @PACKAGE_NAME@ library(ies)
-# @PACKAGE_NAME@_LIBRARIES  - libraries to link against to use ppmcore
-# @PACKAGE_NAME@_USE_XXX - value of option "USE_XXX" (for example USE_MPI, USE_Metis ... = ON or OFF)
-
-# Tell the user where to find ppmcore headers
-# Tell the user project where to find our headers and libraries
-set(@PACKAGE_NAME@_INCLUDE_DIRS "${${PACKAGE_SHORT_NAME}_INCLUDE_DIRS}")
-set(@PACKAGE_NAME@_EXTRA_INCLUDE_DIRS "${${PACKAGE_NAME}_EXTRA_INCLUDE_DIRS}")
-set(@PACKAGE_NAME@_LIBRARY_DIRS "${${PACKAGE_NAME}_LIB_DIR}")
-set(@PACKAGE_NAME@_MODULE_DIR "${${PACKAGE_NAME}_INCLUDE_DIRS}/Modules")
-
-# Our library dependencies (contains definitions for IMPORTED targets)
-include("${${PACKAGE_NAME}_CMAKE_DIR}/@PACKAGE_NAME@LibraryDepends.cmake")
- 
-# These are IMPORTED targets created by FooBarLibraryDepends.cmake
-set(@PACKAGE_NAME@_LIBRARIES @PROJECT_LIBRARY_NAME@)
-
-# Set all @PACKAGE_NAME@ options
-set(@PACKAGE_NAME@_USE_MPI @USE_MPI@)
-set(@PACKAGE_NAME@_USE_PPM @WITH_PPM@)
-set(@PACKAGE_NAME@_USE_PPM_Numerics @WITH_PPM_Numerics@)
-
-
-# Set var for compilers used by @PACKAGE_NAME@
-#set(@PACKAGE_NAME@_Fortran_COMPILER @CMAKE_Fortran_COMPILER@)
-
-# Fortran flags
-#set(@PACKAGE_NAME@_Fortran_FLAGS @CMAKE_Fortran_FLAGS@)
diff --git a/HySoPConfigVersion.cmake.in b/HySoPConfigVersion.cmake.in
deleted file mode 100644
index ad9445c38a0d4d54e9620b9585305cbbd3db4b30..0000000000000000000000000000000000000000
--- a/HySoPConfigVersion.cmake.in
+++ /dev/null
@@ -1,12 +0,0 @@
-set(PACKAGE_VERSION "@HySoP_version@")
- 
-
-# Check whether the requested PACKAGE_FIND_VERSION is compatible
-if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}")
-  set(PACKAGE_VERSION_COMPATIBLE FALSE)
-else()
-  set(PACKAGE_VERSION_COMPATIBLE TRUE)
-  if ("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}")
-    set(PACKAGE_VERSION_EXACT TRUE)
-  endif()
-endif()
diff --git a/README.md b/README.md
index 6a37b8f960983470a5607f6de292004764d003a5..14ed56a2c3e4eb845ae61cb3fbfcc20fd225363c 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,6 @@
 [![Platform](https://img.shields.io/badge/platform-linux--64%20%7C%C2%A0%20osx--64-lightgrey.svg)]()
-[![Python 2.7](https://img.shields.io/badge/python-2.7-blue.svg)](https://www.python.org/downloads/release/python-270/)
+[![Python 3.8](https://img.shields.io/badge/python-3.8-blue.svg)](https://www.python.org/downloads/release/python-380/)
+[![Python 3.9](https://img.shields.io/badge/python-3.9-blue.svg)](https://www.python.org/downloads/release/python-390/)
 [![Licence](https://img.shields.io/badge/licence-APLv2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0)
 [![Pipeline Status](https://gricad-gitlab.univ-grenoble-alpes.fr/particle_methods/hysop/badges/master/pipeline.svg)](https://gricad-gitlab.univ-grenoble-alpes.fr/particle_methods/hysop/commits/master)
 [![Docker Pulls](https://img.shields.io/docker/pulls/keckj/hysop.svg)](https://hub.docker.com/r/keckj/hysop/tags)
diff --git a/ci/docker_images/ubuntu/groovy/Dockerfile b/ci/docker_images/ubuntu/groovy/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..8f4cc15136927bc9b5305cca2e72defdb94e3326
--- /dev/null
+++ b/ci/docker_images/ubuntu/groovy/Dockerfile
@@ -0,0 +1,243 @@
+# Test docker for gitlab-ci
+FROM ubuntu:groovy
+MAINTAINER Jean-Baptiste.Keck@imag.fr
+
+# parallel builds
+ARG NTHREADS
+ENV MAKEFLAGS "-j${NTHREADS}"
+
+# upgrade initial image
+ENV DEBIAN_FRONTEND noninteractive
+RUN apt-get update && apt-get full-upgrade -y
+
+# get build tools and required libraries
+RUN apt-get update && apt-get install -y --no-install-recommends expat unzip xz-utils automake libtool pkg-config cmake rsync git vim ssh curl wget ca-certificates gcc g++ gfortran lsb-core cpio libnuma1 libpciaccess0 libreadline-dev libblas-dev liblapack-dev libgcc-10-dev libgfortran-10-dev libgmp-dev libmpfr-dev libmpc-dev python3.8-dev opencl-headers swig libgmp-dev libmpfr-dev libmpc-dev libcairo-dev libcairomm-1.0-dev python3.8-tk
+
+# python packages using pip3.8
+RUN cd /tmp && \
+ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \
+ python3.8 get-pip.py && \
+ pip3.8 install --upgrade pip && \
+ rm -f /tmp/get-pip.py
+RUN pip3.8 install --upgrade numpy setuptools cffi wheel pytest pybind11 cython
+
+# OpenMPI 4 + mpi4py (enable mpi1 compatibility for mpi4py)
+ENV MPI_ROOT "/usr/local"
+RUN cd /tmp && \
+ wget https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-4.0.5.tar.gz && \
+ tar -xvzf openmpi-*.tar.gz && \
+ rm -f openmpi-*.tar.gz && \
+ cd openmpi-* && \
+ ./configure --enable-shared --disable-static --with-threads=posix --enable-ipv6 --prefix="${MPI_ROOT}" --with-hwloc=internal --with-libevent=internal --enable-mpi1-compatibility && \
+ make && \
+ make install && \
+ rm -rf /tmp/openmpi-*
+
+ENV MPICC "${MPI_ROOT}/bin/mpicc"
+RUN ldconfig && pip3.8 install --upgrade mpi4py
+
+# HPTT (CPU tensor permutation library)
+RUN cd /tmp && \
+ git clone https://gitlab.com/keckj/hptt.git && \
+ cd hptt && \
+ mkdir build && \
+ cd build && \
+ cmake -DCMAKE_BUILD_TYPE=Release .. && \
+ make && \
+ make install && \
+ cd ../pythonAPI && \
+ pip3.8 install --upgrade . && \
+ cd /tmp && \
+ rm -Rf /tmp/hptt
+
+# HDF5 1.12.0 + h5py 3.0.0
+RUN cd /tmp && \
+ wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.12/hdf5-1.12.0/src/hdf5-1.12.0.tar.gz && \
+ tar -xvzf hdf5-*.tar.gz && \
+ rm -f hdf5-*.tar.gz && \
+ cd hdf5-* && \
+ CC="${MPICC}" ./configure --prefix="${MPI_ROOT}" --enable-parallel --enable-shared=yes --enable-static=no && \
+ make && \
+ make install && \
+ rm -rf /tmp/hdf5-*
+RUN CC="${MPICC}" HDF5_MPI="ON" HDF5_VERSION="1.12.0" HDF5_DIR="${MPI_ROOT}" pip3.8 install --upgrade --no-binary=h5py h5py
+
+# other python packages (standard primefac package does not support python3)
+RUN pip3.8 install --upgrade scipy sympy matplotlib gmpy2 psutil py-cpuinfo Mako editdistance portalocker colors.py tee pycairo argparse_color_formatter networkx pyvis zarr numcodecs jsonpickle memory-tempfile
+RUN pip3.8 install git+git://github.com/keckj/primefac-fork@master 
+
+# patchelf
+RUN cd /tmp && \
+ git clone https://github.com/NixOS/patchelf.git && \
+ cd patchelf && \
+ ./bootstrap.sh && \
+ ./configure && \
+ make && \
+ make install && \
+ cd - && \
+ rm -Rf /tmp/patchelf
+
+# Intel experimental OpenCL platform with SYCL support (2020-10)
+# /!\`Newest version 2020.11 does not work 
+ENV TBBROOT="/opt/intel/oclcpuexp/x64"
+ENV LD_LIBRARY_PATH "${TBBROOT}:${LD_LIBRARY_PATH}"
+RUN mkdir -p /opt/intel/oclcpuexp && \
+ wget https://github.com/intel/llvm/releases/download/2020-06/oclcpuexp-2020.10.6.0.4_rel.tar.gz && \
+ tar -xvzf oclcpuexp-*.tar.gz && \
+ mv x64/ /opt/intel/oclcpuexp/ && \
+ mv clbltfnshared.rtl /opt/intel/oclcpuexp/ && \
+ rm -f *.rtl && \
+ rm -rf oclcpuexp-* && \
+ wget https://github.com/oneapi-src/oneTBB/releases/download/v2020.3/tbb-2020.3-lin.tgz && \
+ tar -xvzf tbb-*.tgz && \
+ mv tbb/lib/intel64/gcc4.8/* "${TBBROOT}/" && \
+ rm -f /usr/local/lib/libOpenCL.so && \
+ rm -f /usr/local/lib/libOpenCL.so && \
+ rm -f /usr/local/lib/libOpenCL.so.1 && \
+ rm -f /usr/local/lib/libOpenCL.so.2.0 && \
+ ln -s "${TBBROOT}/libOpenCL.so" /usr/local/lib/libOpenCL.so && \
+ ln -s "${TBBROOT}/libOpenCL.so.1" /usr/local/lib/libOpenCL.so.1 && \
+ ln -s "${TBBROOT}/libOpenCL.so.2.0" /usr/local/lib/libOpenCL.so.2.0 && \
+ mkdir -p /etc/OpenCL/vendors && \
+ echo "${TBBROOT}/libintelocl.so" > /etc/OpenCL/vendors/intel_expcpu.icd && \
+ rm -rf /tmp/tbb*
+
+# llvm + numba + llvmlite
+RUN apt-get update && \
+ apt-get install -y llvm-10-dev libclang-10-dev clang-10
+ENV LLVM_CONFIG=llvm-config-10
+RUN pip3.8 install --upgrade numba llvmlite
+
+# clinfo 2.2.18 (2018)
+RUN cd /tmp && \
+ wget https://github.com/Oblomov/clinfo/archive/2.2.18.04.06.tar.gz && \
+ tar -xvzf *.tar.gz && \
+ rm -f *.tar.gz && \
+ cd clinfo-* && \
+ make && \
+ mv clinfo /usr/local/bin && \
+ rm -rf /tmp/clinfo-*
+
+# clpeak 1.1.0 RC2 (2019)
+RUN cd /tmp && \
+ wget https://github.com/krrishnarraj/clpeak/archive/1.1.0.tar.gz && \
+ tar -xvzf *.tar.gz && \
+ rm -f *.tar.gz && \
+ cd clpeak-* && \
+ mkdir build && \
+ cd build/ && \
+ cmake .. && \
+ make && \
+ mv clpeak /usr/local/bin && \
+ rm -rf /tmp/clpeak-*
+
+# pyopencl
+RUN cd /tmp && \
+ git clone https://github.com/inducer/pyopencl.git && \
+ cd pyopencl && \
+ git checkout v2020.2.2 && \
+ git submodule update --init && \
+ python3.8 configure.py && \
+ make && \
+ pip3.8 install --upgrade . && \
+ cd - && \
+ rm -Rf /tmp/pyopencl
+
+# oclgrind
+RUN cd /tmp && \
+ git clone https://github.com/jrprice/Oclgrind.git && \
+ cd Oclgrind && \
+ mkdir build && \
+ cd build && \
+ cmake -DCMAKE_BUILD_TYPE=Release .. && \
+ make && \
+ make install && \
+ cd - && \
+ rm -Rf /tmp/Oclgrind
+
+# clFFT
+RUN cd /tmp && \
+ ln -s /usr/local/lib /usr/local/lib64 && \
+ git clone https://github.com/clMathLibraries/clFFT.git && \
+ cd clFFT && \
+ cd src && \
+ mkdir build && \
+ cd build && \
+ cmake -DCMAKE_BUILD_TYPE=Release .. && \
+ make && \
+ make install && \
+ cd - && \
+ rm -Rf /tmp/clFFT
+
+# gpyFFT, we need to fix a segfault on weakref.finalize(plan)
+# clFFT plans are destroyed when atexit(clFFT.teardown) is called
+RUN cd /tmp && \
+ git clone https://github.com/geggo/gpyfft.git && \
+ cd gpyfft && \
+ sed 's#finalize(self, _destroy_plan, self.plan)##' -i gpyfft/gpyfftlib.pyx && \
+ pip3.8 install . && \
+ cd - && \
+ rm -Rf /tmp/gpyfft
+
+# python flint (FLINT2 + ARB + python-flint)
+RUN cd /tmp && \
+  wget https://github.com/wbhart/flint2/archive/v2.6.3.tar.gz && \
+  tar -xvzf v*.tar.gz && \
+  rm -f v*.tar.gz && \
+  cd flint2-* && \
+  ./configure && \
+  make && \
+  make install && \
+  cd - && \
+  rm -rf flint2-*
+RUN cd /tmp && \
+  wget https://github.com/fredrik-johansson/arb/archive/2.18.1.tar.gz && \
+  tar -xvzf *.tar.gz && \
+  rm -f *.tar.gz && \
+  cd arb-* && \
+  ./configure && \
+  make && \
+  make install && \
+  cd - && \
+  rm -rf arb-*
+RUN pip3.8 install --upgrade python-flint
+
+# static fftw + pyfftw (with R2R transforms)
+# Weird pyfftw bug : not passing -O2 explicitely during build causes a segfault on import...
+# See https://bugs.gentoo.org/548776
+ENV FFTW_ROOT="/usr/local"
+RUN cd /tmp && \
+ wget http://www.fftw.org/fftw-3.3.8.tar.gz && \
+ tar -xvzf fftw-*.tar.gz && \
+ rm -f fftw-*.tar.gz && \
+ cd fftw-* && \
+ ./configure --enable-openmp --enable-threads --enable-mpi --enable-static --with-pic --prefix="${FFTW_ROOT}" --enable-single && \
+ make && make install && make clean && \
+ ./configure --enable-openmp --enable-threads --enable-mpi --enable-static --with-pic --prefix="${FFTW_ROOT}" && \
+ make && make install && make clean && \
+ ./configure --enable-openmp --enable-threads --enable-mpi --enable-static --with-pic --prefix="${FFTW_ROOT}" --enable-long-double && \
+ make && make install && make clean && \
+ rm -rf /tmp/fftw-*
+
+RUN cd /tmp && \ 
+ git config --global user.email "you@example.com" && \
+ git config --global user.name "Your Name" && \
+ git clone https://github.com/grlee77/pyFFTW.git && \
+ cd pyFFTW && \
+ git checkout r2r-try-three && \
+ git remote add fork https://github.com/pyFFTW/pyFFTW && \
+ git fetch fork && \
+ git merge v0.12.0 && \
+ STATIC_FFTW_DIR="${FFTW_ROOT}/lib" CFLAGS="-Wl,-Bsymbolic -fopenmp -I${FFTW_ROOT}/include -O2" python3.8 setup.py build_ext --inplace && \
+ pip3.8 install --upgrade . && \
+ rm -rf /tmp/pyFFTW
+
+# ensure all libraries are known by the runtime linker
+RUN ldconfig
+
+# clean cached packages
+RUN rm -rf /var/lib/apt/lists/*
+RUN rm -rf $HOME/.cache/pip/*
+RUN rm -rf /tmp/*
+
+CMD ["/bin/bash"]
diff --git a/ci/scripts/build_and_debug.sh b/ci/scripts/build_and_debug.sh
index de66fc1046209834cc08435120916087392c244e..f05483bc8dece7be174791192eff9084fd4cc2df 100755
--- a/ci/scripts/build_and_debug.sh
+++ b/ci/scripts/build_and_debug.sh
@@ -26,7 +26,7 @@ make install
 cd -
 rm -rf build
 
-apt-get update
-apt-get install -y gdb python-dbg
+#apt-get update
+#apt-get install -y gdb python3.8-dbg
 
 bash
diff --git a/ci/scripts/build_and_test.sh b/ci/scripts/build_and_test.sh
index 9465a2dd6a3e2544369ed5ca0fb1273be2b9f19e..9fcbd15052434f65fb0450ab17514f1453c367dc 100755
--- a/ci/scripts/build_and_test.sh
+++ b/ci/scripts/build_and_test.sh
@@ -30,4 +30,4 @@ time ${SCRIPT_DIR}/test.sh "${HYSOP_INSTALL_DIR}" "${HYSOP_DIR}/hysop"
 # clean everything because image may be commited to retain hysop cache
 cd
 rm -rf /tmp/hysop
-pip2.7 uninstall hysop
+pip3.8 uninstall hysop
diff --git a/ci/scripts/config.sh b/ci/scripts/config.sh
index ac2158623814d2e73893c48d6e827c8dd2c9c12e..9654bf957ac2853b60ee801f415d0b2355dc3a3a 100755
--- a/ci/scripts/config.sh
+++ b/ci/scripts/config.sh
@@ -22,7 +22,7 @@ INSTALL_DIR="$2"
 
 mkdir -p "${BUILD_DIR}"
 cd "${BUILD_DIR}"
-CC="$3" CXX="$4" FC="$5" cmake -DCMAKE_BUILD_TYPE=Release -DVERBOSE=OFF -DWITH_SCALES=ON -DPYTHON_EXECUTABLE="$(which python2.7)" -DHYSOP_INSTALL="${INSTALL_DIR}" -DFIND_FFTW_STATIC_ONLY=ON -DFIND_FFTW_VERBOSE=ON "${ROOT_DIR}"
+CC="$3" CXX="$4" FC="$5" cmake -DCMAKE_BUILD_TYPE=Release -DVERBOSE=OFF -DWITH_SCALES=ON -DPYTHON_EXECUTABLE="$(which python3.8)" -DHYSOP_INSTALL="${INSTALL_DIR}" -DFIND_FFTW_STATIC_ONLY=ON -DFIND_FFTW_VERBOSE=ON "${ROOT_DIR}"
 
 if [ ! -f Makefile ]; then
     echo "The makefile has not been generated."
diff --git a/ci/scripts/install.sh b/ci/scripts/install.sh
index 82f3f64561b88e76e38b02cac4a2b8b6e66aaa44..6fe6b4b9b226dbe1fd9ff53a7f27006e7b7e6d9d 100755
--- a/ci/scripts/install.sh
+++ b/ci/scripts/install.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 set -feu -o pipefail
 
-PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE:-"$(which python2.7)"}
+PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE:-"$(which python3.8)"}
 
 if [ $# -ne 2 ]; then
     echo "Usage ./install build_folder install_folder"
@@ -24,12 +24,12 @@ INSTALL_FOLDER="$2"
 cd "${BUILD_FOLDER}"
 make install
 
-if [ ! -d "${INSTALL_FOLDER}/lib/python2.7/site-packages/hysop" ]; then
-    echo "${INSTALL_FOLDER}/lib/python2.7/site-packages/hysop was not created."
+if [ ! -d "${INSTALL_FOLDER}/lib/python3.8/site-packages/hysop" ]; then
+    echo "${INSTALL_FOLDER}/lib/python3.8/site-packages/hysop was not created."
     exit 1
 fi
 
-export PYTHONPATH="${INSTALL_FOLDER}/lib/python2.7/site-packages"
-"${PYTHON_EXECUTABLE}" -c 'import hysop; print hysop'
+export PYTHONPATH="${INSTALL_FOLDER}/lib/python3.8/site-packages"
+"${PYTHON_EXECUTABLE}" -c 'import hysop; print(hysop)'
 
 exit 0
diff --git a/ci/scripts/test.sh b/ci/scripts/test.sh
index 4ff2091fa0d9aa32ff8b070ba812c6cd56117177..4a9f3812043b94294920b359c413b40ad71b8453 100755
--- a/ci/scripts/test.sh
+++ b/ci/scripts/test.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 set -feu -o pipefail
 
-PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE:-"$(which python2.7)"}
+PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE:-"$(which python3.8)"}
 
 if [ $# -lt 2 ]; then
     echo "Usage ./test install_folder hysop_folder [cache_dir] [backup_cache_dir]"
@@ -66,41 +66,49 @@ if [ "${HAS_CACHE_DIR}" = true ]; then
     mkdir -p "${CACHE_DIR}"
 fi
 
-export PYTHONPATH="${INSTALL_DIR}/lib/python2.7/site-packages:${INSTALL_DIR}"
+# Environment variables
+export PYTHONPATH="${INSTALL_DIR}/lib/python3.8/site-packages:${INSTALL_DIR}"
+export PYTHONHASHSEED=42  # get consistent hashes accross MPI processes
+export PYOPENCL_COMPILER_OUTPUT=0
 export MPLBACKEND='cairo'
+
+# OpenMPI specific variables
+export OMPI_ALLOW_RUN_AS_ROOT=1
+export OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1
+export OMPI_MCA_rmaps_base_oversubscribe=1 
+export OMPI_MCA_btl_vader_single_copy_mechanism=none  # see https://github.com/open-mpi/ompi/issues/4948
+
+# HySoP specific variables
 export HYSOP_VERBOSE=0
 export HYSOP_DEBUG=0
 export HYSOP_PROFILE=0
 export HYSOP_KERNEL_DEBUG=0
+export HYSOP_TRACE_WARNINGS=1
 
-# OpenMPI specific variables
-export OMPI_ALLOW_RUN_AS_ROOT=1
-export OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1
-export OMPI_MCA_rmaps_base_oversubscribe=1
 
 echo "Trying to load hysop module:"
-${PYTHON_EXECUTABLE} -c 'import hysop; print hysop'
+${PYTHON_EXECUTABLE} -c 'import hysop; print(hysop)'
 echo "module import successful !"
 echo
 
 echo "Default testing OpenCL platform is:"
-${PYTHON_EXECUTABLE} -c 'import hysop; from hysop.testsenv import iter_clenv; print next(iter(iter_clenv()));'
+${PYTHON_EXECUTABLE} -c 'import hysop; from hysop.testsenv import iter_clenv; print(next(iter(iter_clenv())));'
 
 RUN_TESTS=${RUN_TESTS:-true}
 RUN_EXAMPLES=${RUN_EXAMPLES:-true}
 RUN_LONG_TESTS=${RUN_LONG_TESTS:-false}
 
 COMMON_TEST_OPTIONS=''
-TEST_DIR="$HYSOP_DIR"
+TEST_DIR="${HYSOP_DIR}"
 COMMON_EXAMPLE_OPTIONS='-VNC -d16 -cp float -maxit 2 --autotuner-max-candidates 1 --save-checkpoint --checkpoint-dump-freq 0 --checkpoint-dump-period 0 --checkpoint-dump-last --checkpoint-dump-times'
-EXAMPLE_DIR="$HYSOP_DIR/../hysop_examples/examples"
+EXAMPLE_DIR="${HYSOP_DIR}/../hysop_examples/examples"
 
 hysop_test() {
      test=$1
      echo 
      echo "TESTING $1"
      echo "========$(printf '=%.0s' `seq ${#1}`)"
-     ${PYTHON_EXECUTABLE} "${TEST_DIR}/${1}" ${@:2} ${COMMON_TEST_OPTIONS} 
+     ${PYTHON_EXECUTABLE} -Wd "${TEST_DIR}/${1}" ${@:2} ${COMMON_TEST_OPTIONS} 
      echo
 }
 example_test() {
@@ -108,7 +116,7 @@ example_test() {
      echo 
      echo "EXAMPLE $1"
      echo "========$(printf '=%.0s' `seq ${#1}`)"
-     ${PYTHON_EXECUTABLE} "${EXAMPLE_DIR}/${1}" ${@:2} ${COMMON_EXAMPLE_OPTIONS}
+     ${PYTHON_EXECUTABLE} -Wd "${EXAMPLE_DIR}/${1}" ${@:2} ${COMMON_EXAMPLE_OPTIONS}
      echo
 }
 
@@ -124,6 +132,8 @@ if [ "$RUN_TESTS" = true ]; then
     hysop_test "operator/tests/test_penalization.py"
     hysop_test "operator/tests/test_velocity_correction.py"
     hysop_test "operator/tests/test_restriction_filter.py"
+    hysop_test "operator/tests/test_scales_advection.py"
+    hysop_test "operator/tests/test_bilevel_advection.py"
     hysop_test "operator/tests/test_directional_advection.py"
     hysop_test "operator/tests/test_directional_diffusion.py"
     hysop_test "operator/tests/test_directional_stretching.py"
@@ -161,7 +171,7 @@ if [ "${RUN_EXAMPLES}" = true ]; then
 fi
 
 if [ "${HAS_CACHE_DIR}" = true ]; then
-    rsync -rtvu "${HYSOP_CACHE_DIR}/" "${CACHE_DIR}/"
+    rsync -rtu "${HYSOP_CACHE_DIR}/" "${CACHE_DIR}/"
     find "${CACHE_DIR}" -name '*.lock' -delete
 fi
 
diff --git a/ci/utils/build_docker_image.sh b/ci/utils/build_docker_image.sh
index 5722a9b78d340a131a25a01feb7abec641d1c632..68d7a3dff8d887ef6d2cda0d268faf1e1f8d5bdf 100755
--- a/ci/utils/build_docker_image.sh
+++ b/ci/utils/build_docker_image.sh
@@ -2,6 +2,6 @@
 set -feu -o pipefail
 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 NTHREADS="$(nproc)"
-UBUNTU_RELEASE=${1:-focal}
+UBUNTU_RELEASE=${1:-groovy}
 
 docker build --rm=true --build-arg "NTHREADS=$NTHREADS" -t "keckj/hysop:${UBUNTU_RELEASE}" -f "${SCRIPT_DIR}/../docker_images/ubuntu/${UBUNTU_RELEASE}/Dockerfile" "${SCRIPT_DIR}/../.."
diff --git a/ci/utils/pull_docker_image.sh b/ci/utils/pull_docker_image.sh
index e73e452de24bad3f050af715fb8c875c74324d53..3545b0bad044031421b6c2a8850ee2b0f7efd327 100755
--- a/ci/utils/pull_docker_image.sh
+++ b/ci/utils/pull_docker_image.sh
@@ -1,5 +1,5 @@
 #!/usr/bin/env bash
 set -euf -o pipefail
-UBUNTU_RELEASE=${1:-focal}
+UBUNTU_RELEASE=${1:-groovy}
 docker logout
 docker pull "keckj/hysop:${UBUNTU_RELEASE}"
diff --git a/ci/utils/push_docker_image.sh b/ci/utils/push_docker_image.sh
index b36ade258042775cae7990aadb763c5b7a1747d1..1c697b5c77575d53baadb7e14819ff4e3db9a30d 100755
--- a/ci/utils/push_docker_image.sh
+++ b/ci/utils/push_docker_image.sh
@@ -1,6 +1,6 @@
 #!/usr/bin/env bash
 set -euf -o pipefail
-UBUNTU_RELEASE=${1:-focal}
+UBUNTU_RELEASE=${1:-groovy}
 docker login
 docker push "keckj/hysop:${UBUNTU_RELEASE}"
 docker logout
diff --git a/ci/utils/run_ci.sh b/ci/utils/run_ci.sh
index 0706ebb1251c5fd7e95d4d84bc7039de875bc957..a1a6191cd1d402d82bbb1dd529763371f869380f 100755
--- a/ci/utils/run_ci.sh
+++ b/ci/utils/run_ci.sh
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 set -feu -o pipefail
 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-UBUNTU_RELEASE=${1:-focal}
+UBUNTU_RELEASE=${1:-groovy}
 DOCKER_IMG="keckj/hysop:${UBUNTU_RELEASE}"
 CONTAINER_ID='hysop_build_and_test'
 
diff --git a/ci/utils/run_debug.sh b/ci/utils/run_debug.sh
index 39b1b64a914b06922a4175735c46a8413953f9e9..8c168dd4beaadbc5ba4026d8d09d91d46a00b154 100755
--- a/ci/utils/run_debug.sh
+++ b/ci/utils/run_debug.sh
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 set -feu -o pipefail
 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-UBUNTU_RELEASE=${1:-focal}
+UBUNTU_RELEASE=${1:-groovy}
 DOCKER_IMG="keckj/hysop:${UBUNTU_RELEASE}"
 CONTAINER_ID='hysop_build_and_debug'
 
diff --git a/ci/utils/run_docker_image.sh b/ci/utils/run_docker_image.sh
index 05cd63e1ae16fdcfaff076dcd3604cdac73a8381..4ca3ab273d323f34eb54b2209ab68cf9c5342d4a 100755
--- a/ci/utils/run_docker_image.sh
+++ b/ci/utils/run_docker_image.sh
@@ -1,5 +1,5 @@
 #!/usr/bin/env bash
 set -feu -o pipefail
 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-UBUNTU_RELEASE=${1:-focal}
+UBUNTU_RELEASE=${1:-groovy}
 docker run -it -v "${SCRIPT_DIR}/../..:/hysop:ro" "keckj/hysop:${UBUNTU_RELEASE}"
diff --git a/cmake/FindPythonFull.cmake b/cmake/FindPythonFull.cmake
deleted file mode 100644
index 857bde5c98a46db43bd3d7ff3314bf0bd7f3ee9d..0000000000000000000000000000000000000000
--- a/cmake/FindPythonFull.cmake
+++ /dev/null
@@ -1,109 +0,0 @@
-#.rst:
-# FindPythonFull
-# --------------
-#
-# Find python interpreter and all required libraries and headers.
-#
-# The default cmake find_package(PythonLibs) process does not work for us.
-#
-# Usage:
-# find_package(PythonFull)
-#
-# This call will set the following variables :
-# ::
-#
-#   PYTHON_FOUND           - True if Python executable, libraries and header were found
-#   PYTHON_EXECUTABLE          - python interpreter (full path)
-#   PYTHON_VERSION_STRING      - Python version found e.g. 2.5.2
-#   PYTHON_LIBRARIES           - full path to the python library
-#   PYTHON_INCLUDE_DIRS        - full path to Python.h
-#   PYTHONLIBS_VERSION_STRING  - version of the Python libs found
-#
-# By default, we search for the current active python version first.
-# If you need another version, use -DPYTHON_EXECUTABLE=full-path-to-python-exe
-# during cmake call.
-#
-
-set(PYTHON_FOUND FALSE)
-
-# Does nothing if vars are already in cache
-if(EXISTS "${PYTHON_INCLUDE_DIRS}" AND EXISTS "${PYTHON_LIBRARY}" AND EXISTS "${PYTHON_SITE_PACKAGES_DIR}")
-  set(PYTHON_FOUND TRUE)
-else()
-  set(PYTHON_FOUND FALSE)
-  # --- Find python interpreter
-  set(Python_ADDITIONAL_VERSIONS 2.7)
-  find_package(PythonInterp)
-
-  # --- Use distutils to explore python configuration corresponding to
-  # the python executable found.
-  find_file(_findpython explore_python_config.py PATHS ${CMAKE_MODULE_PATH})
-
-  execute_process(
-    COMMAND ${PYTHON_EXECUTABLE} ${_findpython}
-    OUTPUT_VARIABLE python_config
-    )
-
-  # --- Post-process distutils results
-  if(python_config)
-    string(REGEX REPLACE ".*exec_prefix:([^\n]+).*$" "\\1" PYTHON_PREFIX ${python_config})
-    string(REGEX REPLACE ".*\nversion:([^\n]+).*$" "\\1" PYTHON_VERSION ${python_config})
-    string(REGEX REPLACE ".*\npy_inc_dir:([^\n]+).*$" "\\1" PYTHON_INCLUDE_DIRS ${python_config})
-    string(REGEX REPLACE ".*\nsite_packages_dir:([^\n]+).*$" "\\1" PYTHON_SITE_PACKAGES_DIR ${python_config})
-    string(REGEX REPLACE "([0-9]+).([0-9]+)" "\\1\\2" PYTHON_VERSION_NO_DOTS ${PYTHON_VERSION})
-    if(WIN32)
-      string(REPLACE "\\" "/" PYTHON_SITE_PACKAGES_DIR ${PYTHON_SITE_PACKAGES_DIR})
-      string(REPLACE "\\" "/" PYTHON_PREFIX ${PYTHON_PREFIX})
-    endif(WIN32)
-
-    # --- Search python library corresponding to python exec.
-    find_library(PYTHON_LIBRARY
-      NAMES
-      python${PYTHON_VERSION_NO_DOTS} python${PYTHON_VERSION}
-      NO_DEFAULT_PATH
-      HINTS ${PYTHON_PREFIX} ${PYTHON_PREFIX}/lib/python${PYTHON_VERSION}/config ${PYTHON_PREFIX}/lib/python${PYTHON_VERSION}/config-${CMAKE_LIBRARY_ARCHITECTURE}
-      PATH_SUFFIXES lib libs
-      )
-
-    set(PYTHON_LIBRARIES ${PYTHON_LIBRARY} CACHE FILEPATH "Python libraries" FORCE)
-
-    set(PYTHON_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS} CACHE FILEPATH "Path to Python.h" FORCE)
-
-    # --- Extract python library version for further checks.
-    if(PYTHON_INCLUDE_DIRS AND EXISTS "${PYTHON_INCLUDE_DIRS}/patchlevel.h")
-      file(STRINGS "${PYTHON_INCLUDE_DIRS}/patchlevel.h" python_version_str
-        REGEX "^#define[ \t]+PY_VERSION[ \t]+\"[^\"]+\"")
-      string(REGEX REPLACE "^#define[ \t]+PY_VERSION[ \t]+\"([^\"]+)\".*" "\\1"
-        PYTHONLIBS_VERSION_STRING "${python_version_str}")
-      unset(python_version_str)
-    endif()
-    
-  endif()
-
-  unset(PYTHON_FOUND)
-  include(FindPackageHandleStandardArgs)
-  find_package_handle_standard_args(Python
-    REQUIRED_VARS PYTHON_LIBRARIES PYTHON_INCLUDE_DIRS PYTHON_EXECUTABLE
-    VERSION_VAR PYTHONLIBS_VERSION_STRING)
-  if(PYTHON_FOUND)
-    set(PYTHONFULL_FOUND TRUE)
-    if(NOT PythonFull_FIND_QUIETLY)
-      message("-- Found Python executable: ${PYTHON_EXECUTABLE}")
-      message("-- Found Python library: ${PYTHON_LIBRARIES}")
-      message("-- Python version is : ${PYTHON_VERSION_STRING}")
-      message("-- Python include dir is : ${PYTHON_INCLUDE_DIRS}")
-      message("-- Python Site package dir is : ${PYTHON_SITE_PACKAGES_DIR}\n")
-    endif()
-  else()
-    if(PythonFull_FIND_REQUIRED)
-      message(FATAL_ERROR "Could not find Python")
-    endif()
-  endif()
-  
-endif()
-
-if(NOT PYTHONLIBS_VERSION_STRING VERSION_EQUAL PYTHON_VERSION_STRING)
-  display(PYTHONLIBS_VERSION_STRING)
-  display(PYTHON_VERSION_STRING)
-  message(FATAL_ERROR "Python library and executable versions do not match. Please check your python installation.")
-endif()
diff --git a/cmake/FindPythonModule.cmake b/cmake/FindPythonModule.cmake
index 82218f509286489f208111e6e66120972e746904..106d82d2c6aebe8d64fb27c6e162deb521418c4b 100644
--- a/cmake/FindPythonModule.cmake
+++ b/cmake/FindPythonModule.cmake
@@ -11,7 +11,7 @@ function(find_python_module module)
 	# A module's location is usually a directory, but for binary modules
 	# it's a .so file.
 	execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
-	  "import re, ${module}; print(re.compile('/__init__.py.*').sub('',${module}.__file__))"
+	  "import re, ${module}; print(re.compile(r'/__init__.py.*').sub('',${module}.__file__))"
 	  RESULT_VARIABLE _${module}_status
 	  OUTPUT_VARIABLE _${module}_location
 	  ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
diff --git a/cmake/FindSphinxModule.cmake b/cmake/FindSphinxModule.cmake
index 7bbdb18a977e9b50f193913b3cac14846ca99701..13c5b1b0deb315d1595ca429f613b8584208d9dd 100644
--- a/cmake/FindSphinxModule.cmake
+++ b/cmake/FindSphinxModule.cmake
@@ -20,7 +20,7 @@ function(find_sphinx_module parent module)
 	  set(${module}_FIND_REQUIRED TRUE)
 	endif()
 	execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
-	  "import re; from ${parent} import ${module}; print re.compile('/__init__.py.*').sub('',${module}.__file__)"
+        "import re; from ${parent} import ${module}; print(re.compile(r'/__init__.py.*').sub('',${module}.__file__))"
 	  RESULT_VARIABLE _${module}_status
 	  OUTPUT_VARIABLE _${module}_location
 	  ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
diff --git a/cmake/HysopVersion.cmake b/cmake/HysopVersion.cmake
index c62cd8e1c9f0a6e61e5ea0c46b438c4f4171433e..0d36a01d7786de1ca262a6f1f1c47be805930c44 100644
--- a/cmake/HysopVersion.cmake
+++ b/cmake/HysopVersion.cmake
@@ -2,4 +2,4 @@
 set(MAJOR_VERSION 2)
 set(MINOR_VERSION 0)
 set(PATCH_VERSION 0)
-set(HYSOP_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}-rc1")
+set(HYSOP_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}rc1")
diff --git a/cmake/MyTools.cmake b/cmake/MyTools.cmake
index ac90f59e11e95bfb470c63bcd7d0e444c9ad11fb..fb9d20ed1b1c832f233ce7ab0b0694cd52d437b5 100644
--- a/cmake/MyTools.cmake
+++ b/cmake/MyTools.cmake
@@ -22,14 +22,6 @@ macro(append_flags)
 endmacro(append_flags)
 
 # The use of ADD_DEFINITION results in a warning with Fortran compiler
-macro(APPEND_C_FLAGS)
-  append_flags(CMAKE_C_FLAGS ${ARGV})
-endmacro(APPEND_C_FLAGS)
-
-macro(APPEND_CXX_FLAGS)
-  append_flags(CMAKE_CXX_FLAGS ${ARGV})
-endmacro(APPEND_CXX_FLAGS)
-
 macro(APPEND_Fortran_FLAGS)
   append_flags(CMAKE_Fortran_FLAGS ${ARGV})
 endmacro(APPEND_Fortran_FLAGS)
diff --git a/cmake/PythonInstallSetup.cmake b/cmake/PythonInstallSetup.cmake
index c7b1ec86ce6b1e521c00b0ee254189ecdcc3f5ba..65ae09ee576a908e6557acc294fe5c7ba286c046 100755
--- a/cmake/PythonInstallSetup.cmake
+++ b/cmake/PythonInstallSetup.cmake
@@ -37,19 +37,19 @@ function(set_python_install_path)
     # and on which python is used (virtualenv or not)
     # First, we need to check if '--user' option works in the current environment.
     execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
-      "import site; print site.ENABLE_USER_SITE" OUTPUT_VARIABLE ENABLE_USER)
+    "import site; print(site.ENABLE_USER_SITE)" OUTPUT_VARIABLE ENABLE_USER)
     string(STRIP ${ENABLE_USER} ENABLE_USER)
     
     if(ENABLE_USER) # --user works ...
       # Find install path for --user (site.USER_SITE)
       execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
-	"import site; print site.USER_BASE" OUTPUT_VARIABLE USER_BASE)
+          "import site; print(site.USER_BASE)" OUTPUT_VARIABLE USER_BASE)
       string(STRIP ${USER_BASE} USER_BASE)
       list(APPEND python_install_options --user)#prefix=${USER_BASE})
       # Get python user site and install path = USER_SITE + project_name
       set(PYTHON_COMMAND_GET_INSTALL_DIR
-       "import site, os, sys ; print os.path.join(site.USER_BASE, os.path.join(\"lib\", os.path.join(\"python\" + str(sys.version_info.major) + '.' + str(sys.version_info.minor),
- \"site-packages\")))")
+          "import site, os, sys ; print(os.path.join(site.USER_BASE, os.path.join(\"lib\", os.path.join(\"python\" + str(sys.version_info.major) + '.' + str(sys.version_info.minor),
+ \"site-packages\"))))")
     execute_process(
       COMMAND ${PYTHON_EXECUTABLE} -c "${PYTHON_COMMAND_GET_INSTALL_DIR}"
       OUTPUT_VARIABLE PY_INSTALL_DIR)
@@ -80,7 +80,7 @@ function(set_python_install_path)
     # list depends on the OS, the python version ...
     configure_file(cmake/fake/setup.py tmp/setup.py)
     configure_file(cmake/fake/__init__.py tmp/fake/__init__.py)
-    configure_file(cmake/find_python_install.py tmp/find_python_install.py)
+    configure_file(cmake/find_python_install.py.in tmp/find_python_install.py)
     execute_process(
       COMMAND ${PYTHON_EXECUTABLE} find_python_install.py
       WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/tmp/
diff --git a/config.hpp.cmake b/cmake/config.hpp.cmake
similarity index 100%
rename from config.hpp.cmake
rename to cmake/config.hpp.cmake
diff --git a/cmake/explore_python_config.py b/cmake/explore_python_config.py
index 4d0c7b4ac9a9b9601c414cfa51f4948a19ad9f69..2a0d8d19bbf33132a47923eb3c5e450cce5afdc7 100644
--- a/cmake/explore_python_config.py
+++ b/cmake/explore_python_config.py
@@ -31,10 +31,10 @@
 # For details see the accompanying COPYING-CMAKE-SCRIPTS file.
 
 import sys
-import distutils.sysconfig
+import distutils.sysconfig as cfg
 
 print("exec_prefix:%s" % sys.exec_prefix)
 print("version:%s" % sys.version[:3])
-print("py_inc_dir:%s" % distutils.sysconfig.get_python_inc())
-print("site_packages_dir:%s" % distutils.sysconfig.get_python_lib(plat_specific=1))
-
+print("py_inc_dir:%s" % cfg.get_python_inc())
+print("py_lib_dir:%s" % cfg.get_config_var('LIBDIR'))
+print("site_packages_dir:%s" % cfg.get_python_lib(plat_specific=1))
diff --git a/f2py_f2cmap.in b/cmake/f2py_f2cmap.in
similarity index 100%
rename from f2py_f2cmap.in
rename to cmake/f2py_f2cmap.in
diff --git a/cmake/find_python_install.py b/cmake/find_python_install.py.in
similarity index 87%
rename from cmake/find_python_install.py
rename to cmake/find_python_install.py.in
index 4e0a2146a77dbb36cb00e3c5df745b871f73449c..ae92f3440215072cfecdea8a81e72f7cc55f4ada 100644
--- a/cmake/find_python_install.py
+++ b/cmake/find_python_install.py.in
@@ -5,7 +5,7 @@ import locale
 
 encoding = locale.getdefaultlocale()[1]
 
-output = subprocess.Popen(["python", "setup.py", "--dry-run", "install"],
+output = subprocess.Popen(["@PYTHON_EXECUTABLE@", "setup.py", "--dry-run", "install"],
                           stdout=subprocess.PIPE).communicate()[0]
 
 if encoding:
diff --git a/opencl_explore.py b/cmake/opencl_explore.py
similarity index 98%
rename from opencl_explore.py
rename to cmake/opencl_explore.py
index 57618f37c96ab0143473ded0208e5e4143e22784..85060df8f6fe752bb349ba95cf6036c22967f5c4 100644
--- a/opencl_explore.py
+++ b/cmake/opencl_explore.py
@@ -11,7 +11,7 @@ def size_human(s):
     c = ['b', 'Kb', 'Mb', 'Gb']
     i = 0
     while s >= 1024:
-        s /= 1024
+        s //= 1024
         i += 1
     return str(int(s)) + c[i]
 
@@ -117,7 +117,7 @@ def explore(device_type=cl.device_type.GPU):
                 out += str(d_data[dev][i])
                 out += ' ' * (d_str_max[dev] - len(str(d_data[dev][i]))) + ' |'
         out += "\n"
-        print (out)
+        print(out)
     except cl.LogicError:
         pass
 
@@ -134,4 +134,4 @@ if __name__ == "__main__":
             p_id, d_id = get_defaults(device_type=cl.device_type.CPU)
         else:
             p_id, d_id = get_defaults()
-        print (str(p_id) + ' ' + str(d_id))
+        print(str(p_id) + ' ' + str(d_id))
diff --git a/precision.conf.in b/cmake/precision.conf.in
similarity index 100%
rename from precision.conf.in
rename to cmake/precision.conf.in
diff --git a/sort_f90.py b/cmake/sort_f90.py
similarity index 92%
rename from sort_f90.py
rename to cmake/sort_f90.py
index 86257d4cfdbc396d6cc93c1fe1b6aefcfc9fad7a..a5f8cd45acfdd6d26431efed6c8d4236ba7c4115 100644
--- a/sort_f90.py
+++ b/cmake/sort_f90.py
@@ -37,7 +37,7 @@ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 """
 
-import re
+import re, functools
 
 __all__ = ['FortranFileSorter', 'sort']
 
@@ -92,7 +92,7 @@ Here is a breakdown of the regex:
    (modulename_regex) : match the module name that is being USE'd, see above.
 """
 use_regex = \
-    "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(%s)" %\
+    r"(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(%s)" %\
     modulename
 
 
@@ -100,27 +100,27 @@ use_regex = \
 Regular expression for a MODULE statement
 -----------------------------------------
 
-This regex finds module definitions by matching the following: 
+This regex finds module definitions by matching the following:
 
-MODULE module_name 
+MODULE module_name
 
-but *not* the following: 
- 
-MODULE PROCEDURE procedure_name 
- 
-Here is a breakdown of the regex: 
+but *not* the following:
+
+MODULE PROCEDURE procedure_name
+
+Here is a breakdown of the regex:
    (?i)               : regex is case insensitive
-   ^\s*               : any amount of white space 
-   MODULE             : match the string MODULE, case insensitive 
-   \s+                : match one or more white space characters 
-   (?!PROCEDURE)      : but *don't* match if the next word matches 
-                        PROCEDURE (negative lookahead assertion), 
-                        case insensitive 
-   ([a-z_]\w*)        : match one or more alphanumeric characters 
-                        that make up the defined module name and 
-                        save it in a group 
+   ^\s*               : any amount of white space
+   MODULE             : match the string MODULE, case insensitive
+   \s+                : match one or more white space characters
+   (?!PROCEDURE)      : but *don't* match if the next word matches
+                        PROCEDURE (negative lookahead assertion),
+                        case insensitive
+   ([a-z_]\w*)        : match one or more alphanumeric characters
+                        that make up the defined module name and
+                        save it in a group
 """
-def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(%s)""" % modulename
+def_regex = r"""(?i)^\s*MODULE\s+(?!PROCEDURE)(%s)""" % modulename
 
 
 """
@@ -179,21 +179,21 @@ Here is a breakdown of the regex:
                         (as allowed by the F2003 standard)
 """
 include_regex = \
-    """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
+    r"""(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
 
 
 """
 Regular expression for a comment
 --------------------------------
 
-One limitation of the original Scons scanner is that it cannot properly USE 
+One limitation of the original Scons scanner is that it cannot properly USE
 statements if they are commented out. In either of the following cases:
 
    !  USE mod_a ; USE mod_b         [entire line is commented out]
    USE mod_a ! ; USE mod_b       [in-line comment of second USE statement]
 
-the second module name (mod_b) will be picked up as a dependency even though 
-it should be ignored. The proposed solution is to first parse the file to 
+the second module name (mod_b) will be picked up as a dependency even though
+it should be ignored. The proposed solution is to first parse the file to
 remove all the comments.
 
 (^.*)!?             : match everything on a line before an optional comment
@@ -204,8 +204,8 @@ comment_regex = r'(^.*)!?.*$'
 
 class FortranFileSorter:
     """Given a list of fortran 90/95 files, return a file list sorted by module
-    dependency. If a file depends on a parent through a USE MODULE statement, 
-    this parent file will occur earlier in the list. 
+    dependency. If a file depends on a parent through a USE MODULE statement,
+    this parent file will occur earlier in the list.
 
     Parameters
     ----------
@@ -244,17 +244,17 @@ class FortranFileSorter:
         return set(self.include_regex.findall(code))
 
     def externals(self):
-        """Return the modules that are used but not defined in the list of 
+        """Return the modules that are used but not defined in the list of
         files."""
         if not hasattr(self, 'mod_defined'):
             self.scan()
-        all_used = reduce(set.union, self.mod_used.values())
-        all_defined = reduce(set.union, self.mod_defined.values())
+        all_used = functools.reduce(set.union, self.mod_used.values())
+        all_defined = functools.reduce(set.union, self.mod_defined.values())
         return all_used.difference(all_defined)
 
     def scan(self):
         """For each file, identify the set of modules that are
-         defined, used but not defined in the same file, and the set of 
+         defined, used but not defined in the same file, and the set of
          included files.
         """
         self.mod_defined = {}
@@ -273,7 +273,7 @@ class FortranFileSorter:
 
     # TODO : Deal with include (do we have too?)
     def sort(self):
-        """Sort the files in order of dependency. 
+        """Sort the files in order of dependency.
         """
         ordered_list = []
 
@@ -302,8 +302,8 @@ class FortranFileSorter:
 
 def sort(files):
     """Given a list of fortran 90/95 files, return a file list sorted by module
-    dependency. If a file depends on a parent through a USE MODULE statement, 
-    this parent file will occur earlier in the list. 
+    dependency. If a file depends on a parent through a USE MODULE statement,
+    this parent file will occur earlier in the list.
 
     Parameters
     ----------
@@ -317,5 +317,5 @@ def sort(files):
 
 import sys
 if __name__ == "__main__":
-    print sys.argv[1:]
+    print(sys.argv[1:])
     sort(sys.argv[1:])
diff --git a/docs/config/mainpage.doxygen b/docs/config/mainpage.doxygen
index e1f084f09e143121e91bbee813b7da227c3a4692..419d0ad3c934137e43d905b0024da12969164477 100644
--- a/docs/config/mainpage.doxygen
+++ b/docs/config/mainpage.doxygen
@@ -132,14 +132,14 @@ If you are using a Python distribution from the <a href="http://brew.sh/">Homebr
 \code
 cmake -DPREFIX=$(brew --prefix) $SOURCEDIR
 \endcode
-and it will install package to '$(brew --prefix)/lib/pythonX.Y/site-packages', for example '/usr/local/lib/python2.7/site-packages'
+and it will install package to '$(brew --prefix)/lib/pythonX.Y/site-packages', for example '/usr/local/lib/python3.8/site-packages'
 
 A posible issue is that Python libraries and headers are not correctly discovered by CMake. We invite the user to carefully check that Python stuff in the cmake summary are correct and set the two variables : PYTHON_LIBRARY and PYTHON_INCLUDE_DIR.
 
 For example:
 \code
-cmake -DPYTHON_LIBRARY=$(brew --prefix)/Cellar/python/2.7.5/Frameworks/Python.framework/Versions/2.7/Python \
-      -DPYTHON_INCLUDE_DIR=$(brew --prefix)/Cellar/python/2.7.5/Frameworks/Python.framework/Headers \
+cmake -DPYTHON_LIBRARY=$(brew --prefix)/Cellar/python/3.8.5/Frameworks/Python.framework/Versions/3.8/Python \
+      -DPYTHON_INCLUDE_DIR=$(brew --prefix)/Cellar/python/3.8.5/Frameworks/Python.framework/Headers \
       $SOURCEDIR
 \endcode
 
diff --git a/docs/sphinx/devel/memo_sphinx.rst b/docs/sphinx/devel/memo_sphinx.rst
index 0e0edf97f31e183bd42a1f56a34df16d54bd642d..c93a3c0da3e9cdc73d144490b504ff8240af66b6 100644
--- a/docs/sphinx/devel/memo_sphinx.rst
+++ b/docs/sphinx/devel/memo_sphinx.rst
@@ -7,7 +7,7 @@ Source code for this page::
 
   To print source Code::
 
-    for i in xrange(3):
+    for i in range(3):
         do un_truc()
 
 
@@ -50,7 +50,7 @@ Which results in:
   
 To print source Code::
 
-  for i in xrange(3):
+  for i in range(3):
       do un_truc()
 
 
diff --git a/docs/sphinx/users_guide/fields.rst b/docs/sphinx/users_guide/fields.rst
index f89a5f30d60d4bdc9436383d1ea200b9fd0f49d0..edceb24d73b8d90aaf5c04a0052b4e0f8ed2f528 100644
--- a/docs/sphinx/users_guide/fields.rst
+++ b/docs/sphinx/users_guide/fields.rst
@@ -69,7 +69,7 @@ For example::
   # another data distribution
   vd2 = vec.discretize(topo_g)
   
-  for i in xrange(vec.nb_components):
+  for i in range(vec.nb_components):
       print vd.data[i].shape
       print vd2.data[i].shape
 
diff --git a/examples/flow_around_sphere/flow_around_sphere.py b/examples/flow_around_sphere/flow_around_sphere.py
deleted file mode 100644
index 005fc88435082cbdafd36b1f9d4cc67c7cfc58e7..0000000000000000000000000000000000000000
--- a/examples/flow_around_sphere/flow_around_sphere.py
+++ /dev/null
@@ -1,286 +0,0 @@
-import os
-import numpy as np
-
-from hysop import Box, Simulation, Problem, MPIParams, Field
-from hysop.defaults import VelocityField, VorticityField, \
-                           EnstrophyParameter, TimeParameters
-from hysop.parameters.tensor_parameter import TensorParameter
-from hysop.constants import Implementation, AdvectionCriteria, HYSOP_REAL, \
-    StretchingFormulation, StretchingCriteria
-from hysop.operators import Advection, StaticDirectionalStretching, Diffusion, \
-                            PoissonCurl, AdaptiveTimeStep,                  \
-                            Enstrophy, MinMaxFieldStatistics, StrangSplitting,    \
-                            ParameterPlotter, PenalizeVorticity, FlowRateCorrection, \
-                            VorticityAbsorption, CustomOperator
-from hysop.numerics.odesolvers.runge_kutta import RK2
-from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
-                          ComputeGranularity, Interpolation, StrangOrder
-from hysop.topology.cartesian_topology import CartesianTopology
-from hysop.tools.parameters import CartesianDiscretization
-
-
-pi  = np.pi
-cos = np.cos
-sin = np.sin
-
-
-# Define the domain
-dim = 3
-npts = (32,32,64)
-box = Box(dim=dim,
-          origin=[-2.56, -2.56, -2.56],
-          length=[5.12, 5.12, 10.24])
-cfl = 0.5
-lcfl = 0.125
-uinf = 1.0
-viscosity = 1. / 250.
-outfreq = 10
-dt0 = 0.0125
-
-# Get default MPI Parameters from domain (even for serial jobs)
-mpi_params = MPIParams(comm=box.task_comm,
-                       task_id=box.current_task())
-
-# Setup usual implementation specific variables
-impl = None
-extra_op_kwds = {'mpi_params': mpi_params}
-
-
-# ====== Sphere inside the domain ======
-RADIUS = 0.5
-pos = [0., 0., 0.]
-def computeSphere(data, coords):
-    (x, y, z) = coords[0]
-    dx = x[0,0,1] - x[0,0,0]
-    dy = y[0,1,0] - y[0,0,0]
-    dz = z[1,0,0] - z[0,0,0]
-    data[0][...] = 0.
-    chi = lambda x,y,z: np.sqrt((x-pos[0])*(x-pos[0])+(y-pos[1])*(y-pos[1])+(z-pos[2])*(z-pos[2]))<=RADIUS
-    data[0][chi(x,y,z)] = 1.
-
-    # Smooth the sphere surface with a Volume-of-fluid fraction
-    vof = 5  # number of points in the subgrid
-    front_z = np.where(np.abs(data[0][0:-1,:,:]-data[0][1:,:,:])>0.1)
-    front_z = (np.concatenate((front_z[0],front_z[0]+1)),
-               np.concatenate((front_z[1],front_z[1])),
-               np.concatenate((front_z[2],front_z[2])))
-    front_y = np.where(np.abs(data[0][:,0:-1,:]-data[0][:,1:,:])>0.1)
-    front_y = (np.concatenate((front_y[0],front_y[0])),
-               np.concatenate((front_y[1],front_y[1]+1)),
-               np.concatenate((front_y[2],front_y[2])))
-    front = (np.concatenate((front_z[0],front_y[0])),
-             np.concatenate((front_z[1],front_y[1])),
-             np.concatenate((front_z[2],front_y[2])))
-    front_x = np.where(np.abs(data[0][:,:,0:-1]-data[0][:,:,1:])>0.1)
-    front_x = (np.concatenate((front_x[0],front_x[0])),
-               np.concatenate((front_x[1],front_x[1])),
-               np.concatenate((front_x[2],front_x[2]+1)))
-    front = (np.concatenate((front[0],front_x[0])),
-             np.concatenate((front[1],front_x[1])),
-             np.concatenate((front[2],front_x[2])))
-
-    for k,j,i in zip(*front):
-        sx = np.linspace(x[0,0,i]-dx/2,x[0,0,i]+dx/2,vof)[np.newaxis,np.newaxis,:]
-        sy = np.linspace(y[0,j,0]-dy/2,y[0,j,0]+dy/2,vof)[np.newaxis,:,np.newaxis]
-        sz = np.linspace(z[k,0,0]-dz/2,z[k,0,0]+dz/2,vof)[:,np.newaxis,np.newaxis]
-        data[0][k,j,i] = 1.*(np.sum(chi(sx, sy, sz))/(1.0*vof**3))
-
-    return data
-
-
-# ======= Function to compute initial velocity  =======
-def computeVel(data, coords):
-    data[0][...] = uinf
-    data[1][...] = 0.
-    data[2][...] = 0.
-    return data
-
-
-# ======= Function to compute initial vorticity =======
-def computeVort(data, coords):
-    data[0][...] = 0.
-    data[1][...] = 0.
-    data[2][...] = 0.
-    return data
-
-
-
-method = {}
-
-# Define parameters and field (time, timestep, velocity, vorticity, enstrophy)
-t, dt = TimeParameters(dtype=HYSOP_REAL)
-velo = VelocityField(domain=box, dtype=HYSOP_REAL)
-vorti = VorticityField(velocity=velo, dtype=HYSOP_REAL)
-sphere = Field(domain=box, name="Sphere", is_vector=False, dtype=HYSOP_REAL)
-wdotw = Field(domain=box, dtype=HYSOP_REAL, is_vector=False, name="WdotW")
-enstrophy = EnstrophyParameter(dtype=HYSOP_REAL)
-flowrate = TensorParameter(name="flowrate", dtype=HYSOP_REAL, shape=(3, ),
-                           initial_value=[0., 0., uinf * box.length[1] * box.length[0]])
-
-
-# Topologies
-topo_nogh = CartesianTopology(domain=box,
-                              discretization=CartesianDiscretization(npts,
-                                  default_boundaries=True),
-                              mpi_params=mpi_params,
-                              cutdirs=[False, False, True])
-topo_gh = CartesianTopology(domain=box,
-                            discretization=CartesianDiscretization(npts,
-                                ghosts=(4, 4, 4), default_boundaries=True),
-                            mpi_params=mpi_params,
-                            cutdirs=[False, False, True])
-
-
-### Build the directional operators
-#> Directional advection
-advec = Advection(
-    implementation=Implementation.FORTRAN,
-    name='advec',
-    velocity=velo,
-    advected_fields=(vorti,),
-    variables={velo: topo_nogh, vorti: topo_nogh},
-    dt=dt, **extra_op_kwds)
-#> Directional stretching + diffusion
-stretch = StaticDirectionalStretching(
-    implementation=Implementation.PYTHON,
-    name='stretch',
-    formulation=StretchingFormulation.CONSERVATIVE,
-    velocity=velo,
-    vorticity=vorti,
-    variables={velo: topo_gh, vorti: topo_gh},
-    dt=dt, **extra_op_kwds)
-#> Directional splitting operator subgraph
-splitting = StrangSplitting(splitting_dim=dim,
-                            order=StrangOrder.STRANG_FIRST_ORDER)
-splitting.push_operators(stretch)
-#> Penalization
-penal = PenalizeVorticity(
-    implementation=Implementation.PYTHON,
-    name='penalization',
-    velocity=velo, vorticity=vorti,
-    variables={velo: topo_gh, vorti: topo_gh, sphere: topo_gh},
-    obstacles=[sphere, ], coeff=1e8,
-    dt=dt, **extra_op_kwds)
-#> Diffusion operator
-diffuse = Diffusion(
-    implementation=Implementation.FORTRAN,
-    name='diffuse',
-    nu=viscosity,
-    Fin=vorti,
-    variables={vorti: topo_nogh},
-    dt=dt, **extra_op_kwds)
-#> Vorticity absorption
-absorption = VorticityAbsorption(
-    implementation=Implementation.PYTHON,
-    velocity=velo, vorticity=vorti,
-    start_coord=6.68,
-    flowrate=flowrate,
-    name="absorption",
-    variables={velo: topo_nogh, vorti: topo_nogh},
-    dt=dt, **extra_op_kwds)
-#> Poisson operator to recover the velocity from the vorticity
-poisson = PoissonCurl(
-    implementation=Implementation.FORTRAN,
-    name='poisson',
-    velocity=velo,
-    vorticity=vorti,
-    variables={velo: topo_nogh, vorti: topo_nogh},
-    projection=None,
-    **extra_op_kwds)
-#> Flowrate correction operator to adjust velocity with prescribed flowrate
-def computeFlowrate(t, flowrate):
-    fr = np.zeros(3)
-    fr[0] = uinf * box.length[1] * box.length[0]
-    Tstart=3.0
-    if t() >= Tstart and t() <= Tstart + 1.0:
-        fr[1] = sin(pi * (t() - Tstart)) * \
-                      box.length[1] * box.length[0]
-    flowrate.value = fr
-computeFlowrate = CustomOperator(func=computeFlowrate,
-                                 invars=(t, ),
-                                 outvars=(flowrate, ))
-correctFlowrate = FlowRateCorrection(
-    implementation=Implementation.PYTHON,
-    name="flowrate_correction",
-    velocity=velo, vorticity=vorti,
-    flowrate=flowrate,
-    dt=dt,
-    variables={velo: topo_nogh, vorti: topo_nogh},
-    **extra_op_kwds)
-
-#> outputs
-#penal.dump_inputs(fields=(sphere, ), frequency=outfrea)
-correctFlowrate.dump_inputs(fields=(vorti, ), frequency=outfreq)
-correctFlowrate.dump_outputs(fields=(velo,),  frequency=outfreq)
-
-#> Operator to compute the infinite norm of the velocity
-min_max_U = MinMaxFieldStatistics(name='min_max_U', field=velo,
-        Finf=True, implementation=Implementation.PYTHON, variables={velo:npts},
-        **extra_op_kwds)
-#> Operator to compute the infinite norm of the vorticity
-min_max_W = MinMaxFieldStatistics(name='min_max_W', field=vorti,
-        Finf=True, implementation=Implementation.PYTHON, variables={vorti:npts},
-        **extra_op_kwds)
-#> Operator to compute the enstrophy
-enstrophy_op = Enstrophy(
-    name='enstrophy',
-    vorticity=vorti, enstrophy=enstrophy, WdotW=wdotw,
-    variables={vorti:topo_nogh, wdotw: topo_nogh},
-    implementation=Implementation.PYTHON, **extra_op_kwds)
-
-### Adaptive timestep operator
-#TODO:move advection to GRAD_U
-#TODO:add stretching criteria, based on a gradient
-adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True,
-                            start_time=10*dt0)  # start adapting timestep at t=10*dt0
-dt_cfl = adapt_dt.push_cfl_criteria(cfl=cfl,
-                                    Finf=min_max_U.Finf,
-                                    equivalent_CFL=True)
-dt_advec = adapt_dt.push_advection_criteria(lcfl=lcfl,
-                                            Finf=min_max_W.Finf,
-                                            criteria=AdvectionCriteria.W_INF)
-                                            #criteria=AdvectionCriteria.GRAD_U)
-# dt_advec = adapt_dt.push_stretching_criteria(lcfl=lcfl,
-#                                              gradFinf=grad_W.Finf,
-#                                              criteria=StretchingCriteria.GRAD_U)
-
-## Create the problem we want to solve and insert our
-# directional splitting subgraph and the standard operators.
-# The method dictionnary passed to this graph will be dispatched
-# accross all operators contained in the graph.
-method.update({SpaceDiscretization:   4,
-               TimeIntegrator:        RK2,
-               Remesh:                Remesh.L4_2,
-               Interpolation:         Interpolation.LINEAR})
-problem = Problem(method=method)
-problem.insert(
-    computeFlowrate,
-    penal,
-    splitting,
-    diffuse,
-    advec,
-    absorption,
-    poisson,
-    correctFlowrate,
-    enstrophy_op, min_max_U, min_max_W,
-    adapt_dt
-)
-problem.build()
-
-# # Create a simulation
-# # (do not forget to specify the t and dt parameters here)
-simu = Simulation(start=0., end=10.0,
-                  dt0=dt0, t=t, dt=dt)
-simu.write_parameters(t, dt_cfl, dt_advec, dt, enstrophy, flowrate,
-                      min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL,
-                      filename='parameters.txt', precision=8)
-
-problem.initialize_field(vorti, formula=computeVort)
-problem.initialize_field(velo, formula=computeVel)
-problem.initialize_field(sphere, formula=computeSphere)
-
-# Finally solve the problem
-problem.solve(simu)
-
-# Finalize
-problem.finalize()
diff --git a/hysop/__init__.py.in b/hysop/__init__.py.in
index 701856c217556b09b0468e89f46aa8d91b5651e2..73a195c6f53ff5c372bdbca45f90181eff039988 100644
--- a/hysop/__init__.py.in
+++ b/hysop/__init__.py.in
@@ -4,7 +4,6 @@ on hybrid architectures (MPI-GPU)
 """
 import psutil, signal, traceback, threading, sys, os, warnings
 from functools import wraps
-from hysop.deps import __builtin__, print_function
 
 # Register debug signals (SIGUSR1(10)=print the main stack, SIGUSR2(12)=print the stack of all threads)
 def dumpstack(signal, frame):
@@ -45,16 +44,16 @@ package_name = "@PACKAGE_NAME@"
 version      = "@HYSOP_VERSION@"
 
 # Compilation flags
-__MPI_ENABLED__    = "@USE_MPI@" is "ON"
-__GPU_ENABLED__    = "@WITH_GPU@" is "ON"
-__FFTW_ENABLED__   = "@WITH_FFTW@" is "ON"
-__SCALES_ENABLED__ = "@WITH_SCALES@" is "ON"
+__MPI_ENABLED__    = "@USE_MPI@" == "ON"
+__GPU_ENABLED__    = "@WITH_GPU@" == "ON"
+__FFTW_ENABLED__   = "@WITH_FFTW@" == "ON"
+__SCALES_ENABLED__ = "@WITH_SCALES@" == "ON"
 __OPTIMIZE__       = not __debug__
-__H5PY_PARALLEL_COMPRESSION_ENABLED__ = ("@H5PY_PARALLEL_COMPRESSION_ENABLED@" is "ON")
+__H5PY_PARALLEL_COMPRESSION_ENABLED__ = ("@H5PY_PARALLEL_COMPRESSION_ENABLED@" == "ON")
 
-__VERBOSE__        = get_env('VERBOSE', ("@VERBOSE@" is "ON"))
-__DEBUG__          = get_env('DEBUG',   ("@DEBUG@" is "ON"))
-__PROFILE__        = get_env('PROFILE', ("@PROFILE@" is "ON"))
+__VERBOSE__        = get_env('VERBOSE', ("@VERBOSE@" == "ON"))
+__DEBUG__          = get_env('DEBUG',   ("@DEBUG@" == "ON"))
+__PROFILE__        = get_env('PROFILE', ("@PROFILE@" == "ON"))
 
 __TRACE_CALLS__     = get_env('TRACE_CALLS',   False)
 __TRACE_WARNINGS__  = get_env('TRACE_WARNINGS', False)
@@ -66,7 +65,7 @@ __KERNEL_DEBUG__    = get_env('KERNEL_DEBUG', False)
 __BACKTRACE_BIG_MEMALLOCS__ = get_env('BACKTRACE_BIG_MEMALLOCS', False)
 
 __TEST_ALL_OPENCL_PLATFORMS__ = get_env('TEST_ALL_OPENCL_PLATFORMS', False)
-__ENABLE_LONG_TESTS__ = get_env('ENABLE_LONG_TESTS', ("@ENABLE_LONG_TESTS@" is "ON"))
+__ENABLE_LONG_TESTS__ = get_env('ENABLE_LONG_TESTS', ("@ENABLE_LONG_TESTS@" == "ON"))
 
 # Threads
 __ENABLE_THREADING__ = get_env('ENABLE_THREADING', True)
@@ -87,6 +86,16 @@ __FFTW_PLANNER_TIMELIMIT__ = int(get_env('FFTW_PLANNER_TIMELIMIT', -1))
 __DEFAULT_PLATFORM_ID__ = int(get_env('DEFAULT_PLATFORM_ID', @OPENCL_DEFAULT_OPENCL_PLATFORM_ID@))
 __DEFAULT_DEVICE_ID__   = int(get_env('DEFAULT_DEVICE_ID', @OPENCL_DEFAULT_OPENCL_DEVICE_ID@))
 
+
+if __TRACE_WARNINGS__:
+    warnings.simplefilter('always')
+    try: 
+        import pyopencl
+        warnings.filterwarnings(action='module', category=pyopencl.CompilerWarning)
+    except ImportError:
+        pass
+
+
 if __MPI_ENABLED__:
     from hysop.core.mpi import MPI, main_rank, main_size, \
                                host_rank, interhost_size, \
@@ -97,9 +106,6 @@ else:
     host_rank, shm_rank = 0, 0
 
 # define printing functions
-def print(*args, **kargs):
-    """Wrap print function (because of python 3)"""
-    __builtin__.print(*args, **kargs)
 def vprint(*args, **kargs):
     """prints only if __VERBOSE__ has been set"""
     if __VERBOSE__:
@@ -130,11 +136,20 @@ def reset():
 # override warning printer
 if '__formatwarning' not in locals():
     __formatwarning = warnings.formatwarning
-def __new_formatwarning(message, category, filename, lineno, line=None):
-    if __TRACE_WARNINGS__:
+def __new_formatwarning(warning, category, filename, lineno, line=None):
+    # filter out useless intel opencl build "warnings" stacktraces
+    message = str(warning)
+    warning_msg = __formatwarning(warning, category, filename, lineno, line=line)
+    if not warning_msg:
+        pass
+    elif ('Compilation started' in message):
+        pass
+    elif ('Non-empty compiler output encountered.' in message):
+        pass
+    elif __TRACE_WARNINGS__:
         print('::WARNING STACK TRACE::')
         traceback.print_stack()
-    return __formatwarning(message, category, filename, lineno, line='')
+    return warning_msg
 warnings.formatwarning = __new_formatwarning
 
 if __TRACE_CALLS__:
diff --git a/hysop/backend/device/autotunable_kernel.py b/hysop/backend/device/autotunable_kernel.py
index b70bcf0dd35dcdaba49a087a8c3b22b110d0f874..28018548670a5207770074e5225d340b6e803cb5 100644
--- a/hysop/backend/device/autotunable_kernel.py
+++ b/hysop/backend/device/autotunable_kernel.py
@@ -1,6 +1,6 @@
+import functools, itertools as it
 
 from abc import ABCMeta, abstractmethod
-from hysop.deps import it, functools
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.numpywrappers import npw
 from hysop.tools.misc import next_pow2, upper_pow2
@@ -8,13 +8,12 @@ from hysop.backend.device.kernel_autotuner_config import KernelAutotunerConfig
 from hysop.backend.device.codegen.structs.mesh_info import MeshInfoStruct
 from hysop.fields.cartesian_discrete_field import CartesianDiscreteScalarFieldView
 
-class AutotunableKernel(object):
-    __metaclass__ = ABCMeta
-    
-    def __init__(self, autotuner_config, build_opts, 
+class AutotunableKernel(object, metaclass=ABCMeta):
+
+    def __init__(self, autotuner_config, build_opts,
                 dump_src=None, symbolic_mode=None, **kwds):
         super(AutotunableKernel, self).__init__(**kwds)
-        self._check_build_configuration(autotuner_config, build_opts)            
+        self._check_build_configuration(autotuner_config, build_opts)
 
         self.autotuner_config = autotuner_config
         self.build_opts = build_opts
@@ -84,7 +83,7 @@ class AutotunableKernel(object):
         def _hash_karg(k,v):
             s = ''
             if (k == 'mesh_info_vars'):
-                # for mesh infos we just hash the code generated constants that 
+                # for mesh infos we just hash the code generated constants that
                 # may alter the code branching.
                 if HASH_DEBUG:
                     s += '\n<HASHING MESHINFO'
@@ -99,7 +98,7 @@ class AutotunableKernel(object):
                     s += '\n>HASHED MESHINFO: hash={}'.format(h)
                 return h, s
             elif (k == 'expr_info'):
-                # for expr infos we just hash the continous and discrete expressions  
+                # for expr infos we just hash the continous and discrete expressions
                 # and some additional variables
                 if HASH_DEBUG:
                     s += '\n>HASHING EXPR_INFO:'
@@ -108,7 +107,7 @@ class AutotunableKernel(object):
                 extras = (v.name, v.direction, v.has_direction, v.dt_coeff, v.kind)
                 for k in sorted(v.min_ghosts_per_components.keys(), key=lambda x: x.name):
                     extras += (k.name, _hash_arg(v.min_ghosts_per_components[k]))
-                for mem_obj_key in ('input_arrays',   'output_arrays', 
+                for mem_obj_key in ('input_arrays',   'output_arrays',
                                     'input_buffers',  'output_buffers',
                                     'input_params',   'output_params'):
                     mem_objects = getattr(v, mem_obj_key)
@@ -132,7 +131,7 @@ class AutotunableKernel(object):
             else:
                 msg='Unknown custom hash key \'{}\'.'.format(k)
                 raise KeyError(msg)
-        
+
         def hash_all(*args, **kwds):
             h, s = None, None
             if args:
@@ -145,8 +144,8 @@ class AutotunableKernel(object):
                     if HASH_DEBUG:
                         s += ss
                         s += '\nHASHED ARGUMENT {}: {}'.format(i, h)
-            if kwds: 
-                items = sorted(kwds.items(), key=lambda x: x[0])
+            if kwds:
+                items = tuple(sorted(kwds.items(), key=lambda x: x[0]))
                 if (h is None):
                     h, s  = _hash_karg(*items[0])
                 else:
@@ -165,10 +164,10 @@ class AutotunableKernel(object):
 
         h, s = hash_all(*args, **kwds)
         return h, s
-        
-    
+
+
     @abstractmethod
-    def autotune(self, name, kernel_args, 
+    def autotune(self, name, kernel_args,
             force_verbose=False, force_debug=False,
             **extra_kwds):
         """Autotune this kernel with given name and extra_kwds."""
@@ -183,11 +182,11 @@ class AutotunableKernel(object):
     def max_device_work_group_size(self):
         """Return the maximum number of work items allowed by the device."""
         pass
-    
+
     @abstractmethod
     def max_device_work_item_sizes(self):
         """
-        Maximum number of work-items that can be specified in each dimension 
+        Maximum number of work-items that can be specified in each dimension
         of the work-group.
         """
         pass
@@ -203,33 +202,33 @@ class AutotunableKernel(object):
         tuple of types which will be checked against.
         """
         pass
-    
+
     @abstractmethod
     def format_best_candidate(self, extra_kwds, extra_parameters, work_load,
-            global_work_size, local_work_size, kernel, kernel_statistics, 
+            global_work_size, local_work_size, kernel, kernel_statistics,
             src_hash, hash_logs):
         """
         Post treatment callback for autotuner results.
         Transform autotuner results in user friendly kernel wrappers.
         """
         pass
-    
-    def compute_parameters(self, extra_kwds): 
+
+    def compute_parameters(self, extra_kwds):
         """Register extra parameters to optimize."""
         return AutotunerParameterConfiguration()
-    
-    def compute_work_bounds(self, 
+
+    def compute_work_bounds(self,
             max_kernel_work_group_size,
             preferred_work_group_size_multiple,
             extra_parameters, extra_kwds,
-            work_size=None, work_dim=None, 
+            work_size=None, work_dim=None,
             min_work_load=None, max_work_load=None):
         """
         Configure work_bounds (work_dim, work_size, max_work_load).
         Return a WorkBoundsConfiguration object.
         """
-        check_instance(max_kernel_work_group_size, (int,long))
-        check_instance(preferred_work_group_size_multiple, (int, long))
+        check_instance(max_kernel_work_group_size, int)
+        check_instance(preferred_work_group_size_multiple, int)
         check_instance(extra_parameters, dict, keys=str)
         check_instance(extra_kwds, dict, keys=str)
         assert (max_kernel_work_group_size>0), max_kernel_work_group_size
@@ -238,9 +237,9 @@ class AutotunableKernel(object):
         msg='FATAL ERROR: Could not extract {} from keyword arguments, '
         msg+= 'extra_parameters and extra_kwds.'
         msg+='\nFix {}::compute_work_bounds().'.format(type(self))
-        
-        work_dim = first_not_None(work_dim, 
-                    extra_parameters.get('work_dim', None), 
+
+        work_dim = first_not_None(work_dim,
+                    extra_parameters.get('work_dim', None),
                     extra_kwds.get('work_dim', None))
         max_work_dim = self.max_device_work_dim()
         if (work_dim is None):
@@ -251,20 +250,20 @@ class AutotunableKernel(object):
             msg=msg.format(work_dim, max_work_dim)
             raise ValueError(msg)
 
-        work_size = first_not_None(work_size, 
-                    extra_parameters.get('work_size', None), 
+        work_size = first_not_None(work_size,
+                    extra_parameters.get('work_size', None),
                     extra_kwds.get('work_size', None))
         if (work_size is None):
             msg=msg.format('work_size')
             raise RuntimeError(msg)
-        
+
         min_work_load = first_not_None(min_work_load,
-                    extra_parameters.get('min_work_load', None), 
+                    extra_parameters.get('min_work_load', None),
                     extra_kwds.get('min_work_load', None),
                     (1,)*work_dim)
 
         max_work_load = first_not_None(max_work_load,
-                    extra_parameters.get('max_work_load', None), 
+                    extra_parameters.get('max_work_load', None),
                     extra_kwds.get('max_work_load', None),
                     min_work_load)
         assert (min_work_load is not None)
@@ -273,14 +272,14 @@ class AutotunableKernel(object):
         max_device_work_dim        = self.max_device_work_dim()
         max_device_work_group_size = self.max_device_work_group_size()
         max_device_work_item_sizes = self.max_device_work_item_sizes()
-        
+
         max_work_group_size = min(max_device_work_group_size, max_kernel_work_group_size)
 
         work_bounds = AutotunerWorkBoundsConfiguration(
-                work_dim=work_dim, work_size=work_size, 
+                work_dim=work_dim, work_size=work_size,
                 min_work_load=min_work_load, max_work_load=max_work_load,
                 max_device_work_dim=max_device_work_dim,
-                max_device_work_group_size=max_work_group_size, 
+                max_device_work_group_size=max_work_group_size,
                 max_device_work_item_sizes=max_device_work_item_sizes,
                 preferred_work_group_size_multiple=preferred_work_group_size_multiple)
         return work_bounds
@@ -296,27 +295,27 @@ class AutotunableKernel(object):
         check_instance(work_load, npw.ndarray, dtype=npw.int32, size=work_bounds.work_dim)
         check_instance(extra_parameters, dict, keys=str)
         check_instance(extra_kwds, dict, keys=str)
-        
-        global_work_size = (work_bounds.work_size + work_load - 1) / work_load
+
+        global_work_size = (work_bounds.work_size + work_load - 1) // work_load
 
         (min_wg_size, max_wg_size) = self.compute_min_max_wg_size(
             work_bounds=work_bounds, work_load=work_load, global_work_size=global_work_size,
             extra_parameters=extra_parameters, extra_kwds=extra_kwds)
-        
+
         work = AutotunerWorkConfiguration(work_bounds=work_bounds, work_load=work_load,
             min_wg_size=min_wg_size, max_wg_size=max_wg_size)
         return work
-    
+
     def compute_min_max_wg_size(self, work_bounds, work_load, global_work_size,
             extra_parameters, extra_kwds):
         """Default min and max workgroup size."""
-        min_wg_size = npw.ones(shape=work_bounds.work_dim, dtype=npw.int32) 
+        min_wg_size = npw.ones(shape=work_bounds.work_dim, dtype=npw.int32)
         max_wg_size = global_work_size.copy()
         return (min_wg_size, max_wg_size)
-    
+
     def hash_extra_kwds(self, extra_kwds):
         """Hash extra_kwds dictionnary for caching purposes."""
-        for (k,v) in extra_kwds.iteritems():
+        for (k,v) in extra_kwds.items():
             try:
                 h = hash(v)
                 if (h == hash(id(v))):
@@ -325,26 +324,26 @@ class AutotunableKernel(object):
                     hashable='hashable'
             except:
                 hashable='hash failed'
-            print k, type(v), hashable
+            print(k, type(v), hashable)
         raise NotImplementedError('{}.hash_extra_kwds()'.format(type(self).__name__))
-    
+
     def hash_extra_parameters(self, extra_parameters):
         """Hash extra_parameters dictionnary for caching purposes."""
-        for k,v in extra_parameters.iteritems():
+        for k,v in extra_parameters.items():
             if hash(v)==hash(id(v)):
                 msg='Parameter {} of type {} is not safe to hash.'
                 msg+='\nImplement a {}.__hash__() to that it depends only on its values '
                 msg+='and not its instance id.'
                 msg=msg.format(t, type(v), str(type(v)))
                 raise RuntimeError(msg)
-        items = sorted(extra_parameters.items(), key=lambda x: x[0])
+        items = tuple(sorted(extra_parameters.items(), key=lambda x: x[0]))
         return hash(frozenset(items))
-    
+
     @abstractmethod
     def compute_global_work_size(self, work, local_work_size,
             extra_parameters, extra_kwds):
         """
-        Compute aligned global_work_size from unaligned global_work_size 
+        Compute aligned global_work_size from unaligned global_work_size
         and local_work_size.
         """
         pass
@@ -366,11 +365,11 @@ class AutotunableKernel(object):
     @classmethod
     def check_cartesian_field(cls, field, dtype=None, size=None,
             resolution=None, compute_resolution=None, nb_components=None,
-            ghosts=None, min_ghosts=None, max_ghosts=None, 
+            ghosts=None, min_ghosts=None, max_ghosts=None,
             domain=None, topology=None):
 
         check_instance(field, CartesianDiscreteScalarFieldView)
-        
+
         if (domain is not None) and (field.domain.domain is not domain):
             msg='Domain mismatch for dfield {}.'
             msg=msg.format(field.name)
@@ -411,7 +410,7 @@ class AutotunableKernel(object):
             msg='max ghosts mismatch for dfield {}, expected {} got {}.'
             msg=msg.format(field.name, max_ghosts, field.ghosts)
             raise RuntimeError(msg)
-    
+
     @classmethod
     def check_cartesian_fields(cls, *fields, **kwds):
         """
@@ -421,7 +420,7 @@ class AutotunableKernel(object):
         as boolean keyword arguments.
         """
         check_instance(fields, tuple, values=CartesianDiscreteScalarFieldView, minsize=1)
-        
+
         check_resolution = kwds.pop('check_res', False)
         check_compute_resolution = kwds.pop('check_cres', False)
         check_size = kwds.pop('check_size', True)
@@ -430,11 +429,11 @@ class AutotunableKernel(object):
         assert not kwds, 'Unused keyword arguments {}.'.format(kwds.keys())
 
         domain        = fields[0].domain
-        resolution    = fields[0].compute_resolution 
+        resolution    = fields[0].compute_resolution
         dtype         = fields[0].dtype
         size          = fields[0].npoints
         nb_components = fields[0].nb_components
-       
+
         for field in fields:
             if (field.domain.domain is not domain.domain):
                 msg='Domain mismatch between dfield {} and dfield {}.'
@@ -460,17 +459,17 @@ class AutotunableKernel(object):
                 msg='nb_components mismatch between dfield {} and dfield {}.'
                 msg=msg.format(fields[0].name, field.name)
                 raise RuntimeError(msg)
-    
+
     def mesh_info(self, name, mesh):
         """Create a MeshInfoStruct from a CartesianMesh."""
-        return MeshInfoStruct.create_from_mesh(name=name, mesh=mesh, 
+        return MeshInfoStruct.create_from_mesh(name=name, mesh=mesh,
                 typegen=self.typegen)[1]
 
     def input_mesh_info(self, field):
         """Create a MeshInfoStruct for an input DisreteCartesianField."""
         name='{}_in_field_mesh_info'.format(field.name)
         return self.mesh_info(name=name, mesh=field.mesh.mesh)
-    
+
     def output_mesh_info(self, field):
         """Create a MeshInfoStruct for an output DisreteCartesianField."""
         name='{}_out_field_mesh_info'.format(field.name)
@@ -499,8 +498,8 @@ class AutotunerParameterConfiguration(object):
             msg='Parameter {} has already been registered.'
             msg=msg.format(param_name)
             raise RuntimeError(msg)
-        
-        candidate_values = tuple(candidate_values) 
+
+        candidate_values = tuple(candidate_values)
         if len(candidate_values)==0:
             msg='candidates_values is empty.'
             raise ValueError(msg)
@@ -519,8 +518,8 @@ class AutotunerParameterConfiguration(object):
 
 class AutotunerWorkBoundsConfiguration(object):
     """Helper class for kernel autotuning to handle work bounds."""
-    def __init__(self, work_dim, work_size, 
-            min_work_load, max_work_load, 
+    def __init__(self, work_dim, work_size,
+            min_work_load, max_work_load,
             max_device_work_dim, max_device_work_group_size, max_device_work_item_sizes,
             preferred_work_group_size_multiple,
             **kwds):
@@ -536,19 +535,19 @@ class AutotunerWorkBoundsConfiguration(object):
         work_size = npw.asarray(work_size, dtype=npw.int32)
         min_work_load = npw.asarray(min_work_load, dtype=npw.int32)
         max_work_load = npw.asarray(max_work_load, dtype=npw.int32)
-        
+
         check_instance(work_size,     npw.ndarray, dtype=npw.int32, size=work_dim)
         check_instance(min_work_load, npw.ndarray, dtype=npw.int32, size=work_dim)
         check_instance(max_work_load, npw.ndarray, dtype=npw.int32, size=work_dim)
         assert (work_size > 0).all()
         assert (min_work_load>0).all()
         assert (max_work_load>=min_work_load).all()
-        
+
         self._work_dim = work_dim
         self._work_size = work_size
         self._min_work_load = min_work_load
         self._max_work_load = max_work_load
-        
+
         self._max_device_work_dim        = int(max_device_work_dim)
         self._max_device_work_group_size = int(max_device_work_group_size)
         self._max_device_work_item_sizes = npw.asarray(max_device_work_item_sizes[:work_dim], dtype=npw.int32)
@@ -599,8 +598,8 @@ class AutotunerWorkBoundsConfiguration(object):
             res.append(maxw)
             res = tuple(res)
             return res
-        
-        work_loads = tuple(_compute_pows(min_w, max_w) for (min_w, max_w) in 
+
+        work_loads = tuple(_compute_pows(min_w, max_w) for (min_w, max_w) in
                 zip(min_work_load.tolist(), max_work_load.tolist()))
 
         work_loads = it.product(*work_loads)
@@ -626,14 +625,14 @@ class AutotunerWorkConfiguration(object):
 
         self._work_bounds = work_bounds
         self._work_load = work_load
-        self._global_work_size = (work_bounds.work_size + work_load - 1) / work_load
+        self._global_work_size = (work_bounds.work_size + work_load - 1) // work_load
 
         self._filters = {}
         self._filter_names = ()
-        
+
         self._min_wg_size = min_wg_size
         self._max_wg_size = max_wg_size
-        
+
         self._local_work_size_generator = self._default_work_size_generator
         self._generate_unfiltered_candidates()
         self._load_default_filters(work_bounds, ordered_workload)
@@ -650,27 +649,27 @@ class AutotunerWorkConfiguration(object):
         return self._filter_names
     def _get_work_dim(self):
         return self._work_bounds.work_dim
-    
+
     work_bounds = property(_get_work_bounds)
     work_load = property(_get_work_load)
     work_dim = property(_get_work_dim)
     global_work_size = property(_get_global_work_size)
     filters = property(_get_filters)
     filter_names = property(_get_filter_names)
-    
+
     def _generate_unfiltered_candidates(self):
         candidates = self._local_work_size_generator()
         check_instance(candidates, tuple, values=npw.ndarray, minsize=1)
         self._unfiltered_candidates = candidates
-    
+
     def _default_work_size_generator(self):
         """Default local_work_size generator."""
         pows = []
         size = 1
-        
+
         min_wi_size = self._min_wg_size
         max_wi_size = self._max_wg_size
-        
+
         def _compute_pows(min_wi, max_wi):
             res = []
             wi = min_wi
@@ -681,9 +680,9 @@ class AutotunerWorkConfiguration(object):
             res = tuple(res)
             return res
 
-        work_items = tuple(_compute_pows(min_wi, max_wi)[::-1] for (min_wi, max_wi) in 
+        work_items = tuple(_compute_pows(min_wi, max_wi)[::-1] for (min_wi, max_wi) in
                 zip(min_wi_size.tolist(), max_wi_size.tolist()))
-        
+
         wi_candidates = it.product(*work_items)
         return tuple(npw.asarray(wi, dtype=npw.int32) for wi in wi_candidates)
 
@@ -704,16 +703,16 @@ class AutotunerWorkConfiguration(object):
         if self.__debug_filters:
             msg='  *Initial workitems candidates:\n {}\n'.format(
                     tuple(tuple(x) for x in candidates))
-            print msg
+            print(msg)
 
         for fname in self.filter_names:
             fn = self._filters[fname]
-            candidates = it.ifilter(fn, candidates)
+            candidates = tuple(filter(fn, candidates))
             if self.__debug_filters:
                 candidates, _ = it.tee(candidates)
-                msg=' *Filter {}:\n {}\n'.format(fname, 
+                msg=' *Filter {}:\n {}\n'.format(fname,
                         tuple(tuple(x) for x in _))
-                print msg
+                print(msg)
         return candidates
 
     def push_filter(self, filter_name, filter_fn, **filter_kwds):
@@ -732,11 +731,11 @@ class AutotunerWorkConfiguration(object):
 
     def _load_default_filters(self, work_bounds, ordered_workload):
         """Load default local_work_size filters (mostly device limitations.)"""
-        self.push_filter('max_device_work_item_sizes (default filter, max_work_item_sizes={})'.format(work_bounds.max_device_work_item_sizes), 
-                self.max_wi_sizes_filter, 
+        self.push_filter('max_device_work_item_sizes (default filter, max_work_item_sizes={})'.format(work_bounds.max_device_work_item_sizes),
+                self.max_wi_sizes_filter,
                 max_work_item_sizes=work_bounds.max_device_work_item_sizes)
         self.push_filter('max_device_work_group_size (default filter, max_device_work_group_size={})'.format(work_bounds.max_device_work_group_size),
-                self.max_wg_size_filter, 
+                self.max_wg_size_filter,
                 max_work_group_size=work_bounds.max_device_work_group_size)
         if ordered_workload:
             self.push_filter('ordered_workload (default)', self.ordered_workload_filter)
@@ -745,12 +744,12 @@ class AutotunerWorkConfiguration(object):
     def max_wi_sizes_filter(local_work_size, max_work_item_sizes):
         """Filter out work items by size given a maximum size."""
         return (local_work_size <= max_work_item_sizes).all()
-    
+
     @staticmethod
     def min_wi_sizes_filter(local_work_size, min_work_item_sizes):
         """Filter out work items by size given a minimum size."""
         return (local_work_size >= min_work_item_sizes).all()
-    
+
     @staticmethod
     def max_wg_size_filter(local_work_size, max_work_group_size):
         """Filter out work items by workgroup size given a maximum workgroup size."""
@@ -765,7 +764,7 @@ class AutotunerWorkConfiguration(object):
                 return False
             oldval = val
         return True
-    
+
     @abstractmethod
     def make_parameter(self, param):
         pass
diff --git a/hysop/backend/device/codegen/base/codegen.py b/hysop/backend/device/codegen/base/codegen.py
index 9110b9ee3b98f5eb357ecea056467f6c935fd936..d3c227832fcd83db9ce394a3f839d35180c6dbcd 100644
--- a/hysop/backend/device/codegen/base/codegen.py
+++ b/hysop/backend/device/codegen/base/codegen.py
@@ -1,8 +1,8 @@
+import itertools as it, sys, os, string, tempfile, operator
 from contextlib import contextmanager
 from subprocess import call
 
 from hysop.tools.types import check_instance
-from hysop.deps import it, sys, os, string, tempfile, operator
 from hysop.backend.device.opencl import cl
 from hysop.backend.device.codegen.base.utils import WriteOnceDict, VarDict
 from hysop.backend.device.codegen.base.variables import CodegenVariable
@@ -108,7 +108,7 @@ class CodeGenerator(object):
         return self
 
     def update_requirements(self, reqs):
-        for reqname,req in reqs.iteritems():
+        for reqname,req in reqs.items():
             self.reqs[reqname] = req
         return self
 
@@ -318,7 +318,7 @@ class CodeGenerator(object):
 
             kargs = kargs[:-1].replace('[i]','[{i}]')
             lines = []
-            for i in xrange(len(self._varnames)):
+            for i in range(len(self._varnames)):
                 l = eval('line.format('+kargs.format(i=i)+')')
                 lines.append(l)
             return lines
@@ -366,7 +366,7 @@ class CodeGenerator(object):
 
             maxlen  = lambda i: max([len(line[i]) for line in self._lines if len(line)>1])
             line_str = ''
-            for i in xrange(self._parts_count):
+            for i in range(self._parts_count):
                 ml = maxlen(i)
                 if ml==0:
                     line_str+='{}'
@@ -557,7 +557,7 @@ class CodeGenerator(object):
         if (unroll is not None) and (unroll is not False):
             if unroll is True:
                 header_prefix = '#pragma unroll\n{}'.format(header_prefix)
-            elif isinstance(unroll, (int,long)):
+            elif isinstance(unroll, int):
                 header_prefix = '#pragma unroll {}\n{}'.format(unroll, header_prefix)
         with self._block_(header_prefix=header_prefix,compact=compact) as b:
             yield b
@@ -650,7 +650,7 @@ class CodeGenerator(object):
         else:
             blocks[dname] = [poverride if poverride else prio,self.code,self_com]
 
-        for blk_name, blk_val in self.blocks.iteritems():
+        for blk_name, blk_val in self.blocks.items():
             priority,self_code,self_com = blk_val
             poverride = self.block_priorities_override[blk_name] if blk_name in self.block_priorities_override else None
             if blk_name in blocks.keys():
@@ -667,7 +667,7 @@ class CodeGenerator(object):
         self.generate(blocks,genset)
         if blocks:
             entire_code = ''
-            blocks = sorted(blocks.items(), key=operator.itemgetter(1))
+            blocks = tuple(sorted(blocks.items(), key=operator.itemgetter(1)))
             for bn,(priority,code,comment) in blocks:
                 if not code:
                     continue
@@ -751,4 +751,4 @@ if __name__ == '__main__':
     cg.to_file('.','test.c')
 
     #output the generated code to stdout
-    #print cg
+    #print(cg)
diff --git a/hysop/backend/device/codegen/base/enum_codegen.py b/hysop/backend/device/codegen/base/enum_codegen.py
index 8701ee23396b504004fc36c79509a53b80163de4..cedab79ca66bb8521ed1a9cfeecd4110729fd0fe 100644
--- a/hysop/backend/device/codegen/base/enum_codegen.py
+++ b/hysop/backend/device/codegen/base/enum_codegen.py
@@ -10,7 +10,7 @@ class EnumCodeGenerator(CodeGenerator):
 
     def __init__(self, enum, comments=None, ext='.c',
             initial_indent_level=0,escape_seqs=None,keywords=None):
-        
+
         super(EnumCodeGenerator,self).__init__(name=enum.name,typegen=TypeGen('float'),
                 ext=ext,initial_indent_level=initial_indent_level,
                 escape_seqs=escape_seqs,keywords=keywords)
@@ -19,16 +19,16 @@ class EnumCodeGenerator(CodeGenerator):
             raise ValueError('Input enum should be generated with hysop.tools.enum.EnumFactory.')
 
         self.enum = enum
-        
+
         self.dtype       = self.enum.dtype
         self.fields      = self.enum.fields
         self.rfields     = self.enum.rfields
         self.__getitem__ = self.enum.__getitem__
-        for k,v in self.fields().iteritems():
+        for k,v in self.fields().items():
             setattr(self,k,v)
-        
+
         self.ctype = 'enum ' + self.enum.name
-        
+
         register_ctype_dtype(self.ctype,self.dtype)
         self.gencode(comments)
 
@@ -65,9 +65,9 @@ if __name__ == '__main__':
 
     from hysop.tools.enum import EnumFactory
 
-    days = ['MONDAY','TUESDAY']     
+    days = ['MONDAY','TUESDAY']
     enum = EnumFactory.create('Days',days)
     scg  = EnumCodeGenerator(enum, comments='Days enumeration')
     scg.edit()
-    
+
 
diff --git a/hysop/backend/device/codegen/base/function_codegen.py b/hysop/backend/device/codegen/base/function_codegen.py
index 4f280358c5bb891c15844d04c1fefb4d06d14e91..9a0e1301c7a082c1cd0fedea8498dd5a1956dca1 100644
--- a/hysop/backend/device/codegen/base/function_codegen.py
+++ b/hysop/backend/device/codegen/base/function_codegen.py
@@ -10,19 +10,19 @@ from hysop.backend.device.opencl.opencl_types     import TypeGen, OpenClTypeGen
 from hysop.backend.device.codegen.base.variables import CodegenVariable, CodegenVectorClBuiltin, CodegenVectorClBuiltinFunc
 
 class FunctionBase(object):
-    
+
     def __init__(self, fname, output,
             fargs, known_args, typegen,
             symbolic_mode=None,
             inline=False, **kargs):
-        
+
         super(FunctionBase,self).__init__(typegen=typegen,**kargs)
         known_args = ArgDict() if (known_args is None) else known_args
         output = 'void' if (output is None) else output
         check_instance(fargs,ArgDict)
-        
+
         fargs.release()
-        for (varname, varval) in known_args.iteritems():
+        for (varname, varval) in known_args.items():
             if varname in fargs.keys():
                 if isinstance(varval, CodegenVariable):
                     if not varval.known():
@@ -31,20 +31,20 @@ class FunctionBase(object):
                         varval.const = True
                         if symbolic_mode:
                             varval.force_symbolic(symbolic_mode)
-                        fargs[varname]=varval 
+                        fargs[varname]=varval
                 else:
                     try:
                         fargs[varname].set_value(varval)
                     except ValueError:
                         var = fargs[varname]
-                        print
-                        print 'FATAL ERROR: Failed to set value of known variable {}.'.format(varname)
-                        print ' *variable type was {}'.format(type(var).__name__)
+                        print()
+                        print('FATAL ERROR: Failed to set value of known variable {}.'.format(varname))
+                        print(' *variable type was {}'.format(type(var).__name__))
                         if hasattr(var, 'dim'):
-                            print ' *variable dim was {}'.format(var.dim)
-                        print ' *value was of type {}'.format(type(varval))
-                        print ' *value was {}'.format(varval)
-                        print
+                            print(' *variable dim was {}'.format(var.dim))
+                        print(' *value was of type {}'.format(type(varval)))
+                        print(' *value was {}'.format(varval))
+                        print()
                         raise
                     fargs[varname].const = True
                     if symbolic_mode:
@@ -64,7 +64,7 @@ class FunctionBase(object):
     def _return(self, what):
         code = 'return {};'.format(what)
         self.append(code)
-        
+
 
     def match_args(self,*vars,**kvars):
         args = self.args
@@ -120,7 +120,7 @@ class FunctionCodeGenerator(FunctionBase, CodeGenerator):
                 fname=fname,output=output,inline=inline,
                 ext=ext,fargs=args, known_args=known_args)
         self.inject_vars(args)
-    
+
     @contextmanager
     def _function_(self):
         name=self.fname
@@ -149,7 +149,7 @@ class OpenClFunctionCodeGenerator(FunctionBase, OpenClCodeGenerator):
                 fname=fname,output=output,inline=inline,
                 ext=ext,fargs=args, known_args=known_args)
         self.inject_vars(args)
-    
+
     @contextmanager
     def _function_(self):
         name=self.fname
diff --git a/hysop/backend/device/codegen/base/kernel_codegen.py b/hysop/backend/device/codegen/base/kernel_codegen.py
index 1c44d051014aa03f982308db197cef3dd70a3cf5..45d8b29c7ae0113773f410521a50d7afcb3321a3 100644
--- a/hysop/backend/device/codegen/base/kernel_codegen.py
+++ b/hysop/backend/device/codegen/base/kernel_codegen.py
@@ -1,5 +1,5 @@
 from contextlib import contextmanager
-from hysop.deps import np
+import numpy as np
 
 from hysop.tools.types import check_instance
 from hysop.constants import Backend
@@ -11,21 +11,21 @@ from hysop.backend.device.codegen.base.variables import CodegenVariable, Codegen
                                                  CodegenVectorClBuiltinFunc
 
 class KernelBase(FunctionBase):
-    def __init__(self, kname, vec_type_hint=None, 
+    def __init__(self, kname, vec_type_hint=None,
             kernel_args=None, known_args=None,
             **kargs):
-        
-        super(KernelBase,self).__init__(fname=kname, 
-                output='void', inline=False, 
+
+        super(KernelBase,self).__init__(fname=kname,
+                output='void', inline=False,
                 fargs=kernel_args, known_args=known_args,
                 **kargs)
 
 class KernelCodeGenerator(KernelBase, OpenClCodeGenerator):
-    
+
     def __init__(self,name,typegen,work_dim,symbolic_mode=True,
             kernel_args=None, known_vars=None,
             vec_type_hint=None, **kwds):
-            
+
         kernel_args = ArgDict()       if (kernel_args is None) else kernel_args
         known_vars  = WriteOnceDict() if (known_vars is None)  else known_vars
 
@@ -45,7 +45,7 @@ class KernelCodeGenerator(KernelBase, OpenClCodeGenerator):
 
         if 'work_dim' not in known_vars.keys():
             known_vars['work_dim'] = work_dim
-        
+
         known_args = {}
         for ka in kernel_args.keys():
             if ka in known_vars.keys():
@@ -56,20 +56,20 @@ class KernelCodeGenerator(KernelBase, OpenClCodeGenerator):
                 symbolic_mode=symbolic_mode,
                 kernel_args=kernel_args, known_args=known_args,
                 known_vars=known_vars, **kwds)
-        
+
         self.inject_vars(kernel_args)
         self.symbolic_mode=symbolic_mode
         self.gen_kernel_variables()
         self.gen_kernel_attributes()
-    
-    
+
+
     #return global_work_size from effective work_size and given local_work_size
     # /!\ it should be garanted that global_work_size is a multiple of local_work_size
     def get_global_work_size(self, work_size, local_work_size):
         work_size       = np.asarray(work_size)
         local_work_size = np.asarray(local_work_size)
-        return ((work_size+local_work_size-1)/local_work_size) * local_work_size
-    
+        return ((work_size+local_work_size-1)//local_work_size) * local_work_size
+
     def min_ghosts(self):
         ghosts = (0,)*self.work_dim
         return np.asarray(ghosts)
@@ -77,7 +77,7 @@ class KernelCodeGenerator(KernelBase, OpenClCodeGenerator):
     #return a tuple of required (static,dynamic) cache bytes per workgroup
     def required_workgroup_cache_size(self, local_work_size):
         return (0,0)
-    
+
     def gen_kernel_variables(self):
         tg = self.typegen
         work_dim = self.work_dim
@@ -87,21 +87,21 @@ class KernelCodeGenerator(KernelBase, OpenClCodeGenerator):
         kvars['work_dim']  = CodegenVariable('work_dim','uint', tg, symbolic_mode=sm)
         kvars['global_index'] = CodegenVariable('GID', 'int', tg)
         kvars['local_index']  = CodegenVariable('LID', 'int', tg)
-        kvars['global_size']  = CodegenVectorClBuiltinFunc('global_size', 'G',       
+        kvars['global_size']  = CodegenVectorClBuiltinFunc('global_size', 'G',
 								'int',work_dim,tg,symbolic_mode=sm)
-        kvars['local_size']   = CodegenVectorClBuiltinFunc('local_size',  'L',       
+        kvars['local_size']   = CodegenVectorClBuiltinFunc('local_size',  'L',
 								'int',work_dim,tg,symbolic_mode=sm)
-        kvars['global_id']    = CodegenVectorClBuiltinFunc('global_id',   'gid',     
+        kvars['global_id']    = CodegenVectorClBuiltinFunc('global_id',   'gid',
 								'int',work_dim,tg)
-        kvars['local_id']     = CodegenVectorClBuiltinFunc('local_id',    'lid',     
+        kvars['local_id']     = CodegenVectorClBuiltinFunc('local_id',    'lid',
 								'int',work_dim,tg)
-        kvars['num_groups']   = CodegenVectorClBuiltinFunc('num_groups',  'ngroups', 
-								'int',work_dim,tg,symbolic_mode=sm) 
+        kvars['num_groups']   = CodegenVectorClBuiltinFunc('num_groups',  'ngroups',
+								'int',work_dim,tg,symbolic_mode=sm)
         kvars['group_id']     = CodegenVectorClBuiltinFunc('group_id',    'group_id',
-								'int',work_dim,tg)         
+								'int',work_dim,tg)
 
         self.update_vars(kvars)
-    
+
     def gen_kernel_attributes(self):
         vec_type_hint = self.vec_type_hint
         local_work_size = self.vars['local_size'].value
@@ -114,7 +114,7 @@ class KernelCodeGenerator(KernelBase, OpenClCodeGenerator):
         #if (vec_type_hint is not None):
             #kernel_attributes['vec_type_hint'] = 'vec_type_hint({})'.format(vec_type_hint)
         self.kernel_attributes = kernel_attributes
-    
+
     def check_workitem_bounds(self,varname,compact=True):
         gid = self.vars['global_id']
         if isinstance(varname,str):
@@ -123,11 +123,11 @@ class KernelCodeGenerator(KernelBase, OpenClCodeGenerator):
             N = varname
         else:
             raise TypeError('varname')
-        conditions = ['({}>={})'.format(gid[i],N[i]) for i in xrange(self.work_dim)]
+        conditions = ['({}>={})'.format(gid[i],N[i]) for i in range(self.work_dim)]
         cond = ' || '.join(conditions)
         with self._if_(cond,compact=compact):
             self.append('return;')
-    
+
     @contextmanager
     def _kernel_(self):
         name=self.fname
@@ -139,7 +139,7 @@ class KernelCodeGenerator(KernelBase, OpenClCodeGenerator):
                 carg.ptr=False
                 carg.storage='__constant'
                 carg.declare(self)
-        
+
         with super(KernelCodeGenerator,self)._kernel_(name=name,args=fargs, args_impl=fargs_impl,
                 attributes=self.kernel_attributes) as k:
             yield k
diff --git a/hysop/backend/device/codegen/base/opencl_codegen.py b/hysop/backend/device/codegen/base/opencl_codegen.py
index b984212441f0ddd9e13a6a7ed9ade79cbf7d1da2..6c94ad397f941beacbadb3a6372f5fe7ae2d220c 100644
--- a/hysop/backend/device/codegen/base/opencl_codegen.py
+++ b/hysop/backend/device/codegen/base/opencl_codegen.py
@@ -14,7 +14,7 @@ class OpenClCodeGenerator(CodeGenerator):
         'local'      : '__local',
         'constant'   : '__constant',
         'private'    : '__private',
-        
+
         'read_only'  : '__read_only',
         'write_only' : '__write_only',
         'read_write' : '__read_write',
@@ -27,35 +27,35 @@ class OpenClCodeGenerator(CodeGenerator):
         '\n': '\n',
         ' ': ' '
     }
-        
+
     _global, _local = '__global', '__local'
-    
+
     def __init__(self,name,typegen,ext='.cl',
             known_vars=None, declare_cl_exts=True,
             **kargs):
 
         check_instance(typegen,OpenClTypeGen)
-        super(OpenClCodeGenerator,self).__init__(name=name,typegen=typegen, ext=ext, 
-                known_vars=known_vars, keywords=self.default_keywords, 
+        super(OpenClCodeGenerator,self).__init__(name=name,typegen=typegen, ext=ext,
+                known_vars=known_vars, keywords=self.default_keywords,
                 escape_seqs=self.default_escape_seqs, **kargs)
 
         self.device   = typegen.device
         self.context  = typegen.context
         self.platform = typegen.platform
-        
+
         if declare_cl_exts:
             for cl_ext in typegen.cl_requirements():
                 if (cl_ext is not None):
                     self.declare_cl_extension(cl_ext)
-    
+
 
     def test_compile(self, contexts=None):
-        print 'Test build on device {}: '.format(self.device.name),
+        print('Test build on device {}: '.format(self.device.name),)
         src=self.__str__()
         prg = cl.Program(self.context,src)
         prg.build()
-        print 'OK'
-    
+        print('OK')
+
 
     @staticmethod
     def _mem_flags(_local, _global):
@@ -77,7 +77,7 @@ class OpenClCodeGenerator(CodeGenerator):
     def mem_fence(self,read=False,write=False, _local=False, _global=False):
         if (not read and not write) or (not _local and not _global):
             raise ValueError('Bad memfence configuration!')
-        
+
         #opencl 1.0 has only the barrier function
         self.append('#if __OPENCL_VERSION__ < 110')
         self.indent()
@@ -96,16 +96,16 @@ class OpenClCodeGenerator(CodeGenerator):
         self.append(code)
         self.dedent()
         self.append('#endif')
-         
+
 
     def declare_cl_extension(self,extname):
         from hysop.backend.device.codegen.base.cl_extensions import ClExtCodeGen
         self.require(extname, ClExtCodeGen(extname))
         return self
-    
+
     @contextmanager
     def _kernel_(self,name,args=None,args_impl=None,arg_spaces=True,attributes=None):
-        
+
         def filter_args(_args):
             if not args:
                 newargs = ['']
@@ -118,7 +118,7 @@ class OpenClCodeGenerator(CodeGenerator):
                     if newl and i != len(_args)-1:
                         newargs.append(self.newl())
             return newargs
-        
+
         args = filter_args(args)
         if not args_impl:
             args_impl = args
@@ -131,11 +131,11 @@ class OpenClCodeGenerator(CodeGenerator):
             comma = ','
 
         kernel_kwd = self.keywords['kernel']
-        
+
         prefix = '{kernel} void {name}('.format(kernel=kernel_kwd,name=name)
         indent_proto = len(prefix)*' '
         if attributes:
-            attr = ['{} __attribute__(({}))'.format(len(kernel_kwd)*' ',at) 
+            attr = ['{} __attribute__(({}))'.format(len(kernel_kwd)*' ',at)
                     for at in attributes.values()]
             attr[0] = attr[0][len(kernel_kwd)+1:]
             attr = '\n'.join(attr)
@@ -146,22 +146,22 @@ class OpenClCodeGenerator(CodeGenerator):
             proto_prefix = prefix
         suffix = '{args})'
         indent = len(self.current_indent() + prefix)*' '
-       
-        pargs = [arg+comma if arg!=self.newl() else arg+indent_proto 
+
+        pargs = [arg+comma if arg!=self.newl() else arg+indent_proto
                 for arg in args[:-1]]+[args[-1]]
         prototype = proto_prefix + suffix.format(args=''.join(pargs)) + ';'
 
-        dargs = [arg+comma if arg!=self.newl() else arg+indent 
+        dargs = [arg+comma if arg!=self.newl() else arg+indent
                 for arg in args_impl[:-1]]+[args_impl[-1]]
         definition = prefix + suffix.format(args=''.join(dargs)) + ' '
-        
+
         self.declare_prototype(prototype, 'kernel')
         with self._codeblock_('kernel_declarations'):
             with self._block_(header_prefix=definition) as b:
                 yield b
-    
-    def vstore(self, n, ptr, offset, data, 
-            offset_is_ftype=True, align=False, 
+
+    def vstore(self, n, ptr, offset, data,
+            offset_is_ftype=True, align=False,
             jmp=False, suppress_semicolon=False):
         assert n*ptr.dim in [1,2,4,8,16]
         if ptr.dim>1:
@@ -196,14 +196,14 @@ class OpenClCodeGenerator(CodeGenerator):
         if suppress_semicolon:
             code = code.replace(';','')
         return code
-    
-    def vstore_if(self, cond, scalar_cond, 
-            n, ptr, offset, data, 
+
+    def vstore_if(self, cond, scalar_cond,
+            n, ptr, offset, data,
             offset_is_ftype=True, align=False,
             jmp=False, suppress_semicolon=False,
             use_short_circuit=False, else_cond=None):
         with self._if_(cond):
-            for i in xrange(n):
+            for i in range(n):
                 assert callable(scalar_cond), type(scalar_cond)
                 icond = scalar_cond(i)
                 idata = data[i]
@@ -219,12 +219,12 @@ class OpenClCodeGenerator(CodeGenerator):
                 self.append(code)
         _else = self._else_() if (else_cond is None) else self._elif_(else_cond)
         with _else:
-            code = self.vstore(n=n, ptr=ptr, offset=offset, data=data, 
+            code = self.vstore(n=n, ptr=ptr, offset=offset, data=data,
                     offset_is_ftype=offset_is_ftype, align=align,
                     jmp=jmp, suppress_semicolon=suppress_semicolon)
             self.append(code)
 
-    def vload(self, n, ptr, offset, 
+    def vload(self, n, ptr, offset,
             offset_is_ftype=True, align=False, jmp=False):
         assert n*ptr.dim in [1,2,4,8,16]
         if ptr.dim>1:
@@ -256,13 +256,13 @@ class OpenClCodeGenerator(CodeGenerator):
         if not jmp:
             code = code.replace('\n','')
         return code
-    
-    def vload_if(self, cond, scalar_cond, 
+
+    def vload_if(self, cond, scalar_cond,
             n, ptr, offset, dst, default_value,
-            offset_is_ftype=True, align=False, jmp=False, 
+            offset_is_ftype=True, align=False, jmp=False,
             use_short_circuit=False, else_cond=None):
         with self._if_(cond):
-            for i in xrange(n):
+            for i in range(n):
                 assert callable(scalar_cond), type(scalar_cond)
                 icond = scalar_cond(i)
                 odata = dst[i]
@@ -279,27 +279,27 @@ class OpenClCodeGenerator(CodeGenerator):
                 self.append(code)
         _else = self._else_() if (else_cond is None) else self._elif_(else_cond)
         with _else:
-            code = self.vload(n=n, ptr=ptr, offset=offset, 
+            code = self.vload(n=n, ptr=ptr, offset=offset,
                     offset_is_ftype=offset_is_ftype, align=align,
                     jmp=jmp)
             code = '{} = {};'.format(dst, code)
             self.append(code)
-    
+
     def multi_vload_if(self, cond, scalar_cond, n, offset,
             srcs, dsts, default_values,
-            offset_is_ftype=True, jmp=False, 
+            offset_is_ftype=True, jmp=False,
             use_short_circuit=False, else_cond=None,
             extra_offsets=None):
         assert len(srcs)==len(dsts)==len(default_values)>=1
         extra_offsets = first_not_None(extra_offsets, (0,)*len(srcs))
         with self._if_(cond):
             with self._align_() as al:
-                for i in xrange(n):
+                for i in range(n):
                     assert callable(scalar_cond), type(scalar_cond)
                     icond = scalar_cond(i)
                     for (ptr, dst, dval, eo) in zip(srcs, dsts, default_values, extra_offsets):
                         if (n*ptr.dim != dst.dim):
-                            msg='Pointer datatype is not consistent with destination variable.'  
+                            msg='Pointer datatype is not consistent with destination variable.'
                             raise RuntimeError(msg)
                         ii = '{}'.format(i)
                         if eo:
@@ -323,22 +323,22 @@ class OpenClCodeGenerator(CodeGenerator):
                     eoffset = '{}'.format(offset)
                     if eo:
                         eoffset += '+{}'.format(eo)
-                    code = self.vload(n=n, ptr=ptr, offset=eoffset, 
+                    code = self.vload(n=n, ptr=ptr, offset=eoffset,
                             offset_is_ftype=offset_is_ftype, align=True,
                             jmp=jmp)
                     code = '{} $= {};'.format(dst, code)
                     al.append(code)
-    
+
     def multi_vstore_if(self, cond, scalar_cond, n, offset,
-            srcs, dsts, 
-            offset_is_ftype=True, jmp=False, 
+            srcs, dsts,
+            offset_is_ftype=True, jmp=False,
             use_short_circuit=False, else_cond=None,
             extra_offsets=None):
         assert len(srcs)==len(dsts)>=1
         extra_offsets = first_not_None(extra_offsets, (0,)*len(srcs))
         with self._if_(cond):
             with self._align_() as al:
-                for i in xrange(n):
+                for i in range(n):
                     assert callable(scalar_cond), type(scalar_cond)
                     icond = scalar_cond(i)
                     for (src, ptr, eo) in zip(srcs, dsts, extra_offsets):
@@ -377,11 +377,11 @@ class OpenClCodeGenerator(CodeGenerator):
         return 'wait_group_events({}, {});'.format(num_events, event_list)
 
     #looping facilities
-    @contextmanager 
+    @contextmanager
     def _ordered_wi_execution_(self, barrier=True):
         cond0 = 'int wi{i}=0; wi{i}<get_local_size({i}); wi{i}++'
-        cond1 = ' && '.join('(wi{i}==get_local_id({i}))'.format(i=i) for i in xrange(3))
-        cond2 = ' && '.join('(get_group_id({i})==0)'.format(i=i) for i in xrange(3))
+        cond1 = ' && '.join('(wi{i}==get_local_id({i}))'.format(i=i) for i in range(3))
+        cond2 = ' && '.join('(get_group_id({i})==0)'.format(i=i) for i in range(3))
         with self._if_(cond2):
             with self._for_(cond0.format(i=2)):
                 with self._for_(cond0.format(i=1)):
@@ -390,24 +390,24 @@ class OpenClCodeGenerator(CodeGenerator):
                             self.barrier(_local=True)
                         with self._if_(cond1):
                             yield
-    @contextmanager 
+    @contextmanager
     def _first_wg_execution_(self):
-        cond = ' && '.join('(0==get_group_id({i}))'.format(i=i) for i in xrange(3))
+        cond = ' && '.join('(0==get_group_id({i}))'.format(i=i) for i in range(3))
         with self._if_(cond):
             yield
-    
-    @contextmanager 
+
+    @contextmanager
     def _first_wi_execution_(self):
-        cond1 = ' && '.join('(0==get_local_id({i}))'.format(i=i) for i in xrange(3))
-        cond2 = ' && '.join('(0==get_group_id({i}))'.format(i=i) for i in xrange(3))
+        cond1 = ' && '.join('(0==get_local_id({i}))'.format(i=i) for i in range(3))
+        cond2 = ' && '.join('(0==get_group_id({i}))'.format(i=i) for i in range(3))
         cond = '{} && {}'.format(cond1, cond2)
         with self._if_(cond):
             yield
     @staticmethod
-    def _printv(ncomponents): 
+    def _printv(ncomponents):
         assert ncomponents in [1,2,4,8,16]
         if ncomponents==1:
-            return '' 
+            return ''
         else:
             return 'v{}'.format(ncomponents)
 
diff --git a/hysop/backend/device/codegen/base/statistics.py b/hysop/backend/device/codegen/base/statistics.py
index 716015254a7a80ac0ea95c364b2bb0b52df7dc66..bdb3deccde38a1507d4b7bd4d67b6b8198340545 100644
--- a/hysop/backend/device/codegen/base/statistics.py
+++ b/hysop/backend/device/codegen/base/statistics.py
@@ -32,12 +32,12 @@ def _fill_dtype_ops():
         for itype,size in zip(int_base_types,ibytes):
             for vsize in vsizes:
                 typename = itype + ('' if vsize==1 else str(vsize))
-                dtype_ops[typename] = (vsize*float(size)/4, 'IOPS')
+                dtype_ops[typename] = (vsize*float(size)//4, 'IOPS')
     fbytes = [2,4,8]
     for ftype,size in zip(float_base_types,fbytes):
         for vsize in vsizes:
             typename = ftype + ('' if vsize==1 else str(vsize))
-            dtype_ops[typename] = (vsize*float(size)/4, 'FLOPS')
+            dtype_ops[typename] = (vsize*float(size)//4, 'FLOPS')
 
 _fill_dtype_ops()
 
@@ -67,7 +67,7 @@ class WorkStatistics(object):
         return float(self.global_mem_byte_writes)/self.global_mem_transactions()
     def global_mem_read_ratio(self):
         return float(self.global_mem_byte_reads)/self.global_mem_transactions()
-    
+
     def local_mem_transactions(self):
         return self.local_mem_byte_writes + self.local_mem_byte_reads
     def local_mem_rw_ratio(self):
@@ -82,7 +82,7 @@ class WorkStatistics(object):
         return (self.local_mem_transactions() > 0)
     def has_global_mem_transactions(self):
         return (self.global_mem_transactions() > 0)
-    
+
     def __add__(self, rhs):
         check_instance(rhs,WorkStatistics)
         stats = copy.deepcopy(self)
@@ -91,7 +91,7 @@ class WorkStatistics(object):
         stats.local_mem_byte_reads   += rhs.local_mem_byte_reads
         stats.local_mem_byte_writes  += rhs.local_mem_byte_writes
 
-        for (k,v) in rhs.ops_per_type.iteritems():
+        for (k,v) in rhs.ops_per_type.items():
             if k not in stats.ops_per_type:
                 stats.ops_per_type[k]  = v
             else:
@@ -114,18 +114,18 @@ class WorkStatistics(object):
         return self.__mul__(lhs)
 
     def __str__(self):
-        op_count = [''] + ['{}: {}'.format(k,v) for (k,v) in self.ops_per_type.iteritems() ]
+        op_count = [''] + ['{}: {}'.format(k,v) for (k,v) in self.ops_per_type.items() ]
         op_count = '\n    '.join(op_count)
 
         ss = ':: Work Statistics ::'
-        
+
         if self.has_global_mem_transactions():
             ss += '\n  Global memory:  load={} store={} total={} rw_ratio={}'.format(
             bytes2str(self.global_mem_byte_reads),
             bytes2str(self.global_mem_byte_writes),
             bytes2str(self.global_mem_transactions()),
             round(self.global_mem_rw_ratio(),2))
-        
+
         if self.has_local_mem_transactions():
             ss += '\n  Local  memory:  load={} store={} total={} rw_ratio={}'.format(
             bytes2str(self.local_mem_byte_reads),
@@ -136,7 +136,7 @@ class WorkStatistics(object):
         ss += '\n  Operations count: {}'.format(op_count)
 
         return ss
-            
+
 class TimedWorkStatistics(WorkStatistics):
     def __init__(self, workstat, duration):
         super(TimedWorkStatistics,self).__init__(workstat)
@@ -150,29 +150,29 @@ class TimedWorkStatistics(WorkStatistics):
         return self._ops_per_category
 
     def global_mem_throughput(self):
-        return self.global_mem_transactions()/self.duration
+        return self.global_mem_transactions() / self.duration
     def local_mem_throughput(self):
-        return self.local_mem_transactions()/self.duration
+        return self.local_mem_transactions() / self.duration
     def total_mem_throughput(self):
-        return self.total_mem_transactions()/self.duration
+        return self.total_mem_transactions() / self.duration
 
     def _init(self):
         for dtype in self.ops_per_type:
             if dtype not in dtype_ops.keys():
                 msg = 'unknown type {}, valed types are:\n\t{}.'.format(dtype, dtype_ops.keys())
                 raise ValueError(msg)
-        
+
         ops_count = {}
-        for (dtype, N) in self.ops_per_type.iteritems():
+        for (dtype, N) in self.ops_per_type.items():
             (multiplier,op_category) = dtype_ops[dtype]
             if op_category not in ops_count:
                 ops_count[op_category] = 0.0
             ops_count[op_category] += multiplier*N
 
         ops_per_second = {}
-        for (op_category, op_count) in ops_count.iteritems():
-            ops_per_second[op_category] = op_count/self.duration
-        
+        for (op_category, op_count) in ops_count.items():
+            ops_per_second[op_category] = op_count / self.duration
+
         self._ops_per_category = ops_count
         self._ops_per_second   = ops_per_second
 
diff --git a/hysop/backend/device/codegen/base/struct_codegen.py b/hysop/backend/device/codegen/base/struct_codegen.py
index f858f28f44ec88832b67c6605688be344852be06..fb4b913a04bac5b55414e85880a3b6e7458d167e 100644
--- a/hysop/backend/device/codegen/base/struct_codegen.py
+++ b/hysop/backend/device/codegen/base/struct_codegen.py
@@ -15,17 +15,17 @@ class StructCodeGenerator(OpenClCodeGenerator):
             typedef=None,comments=None,
             ctype_overrides=None,
             custom_types={}):
-        
+
         super(StructCodeGenerator,self).__init__(name=name,typegen=typegen)
 
         self.typedef = typedef
         self.dtype = np.dtype(dtype)
         self.ctype = self.typedef if self.typedef else 'struct {}'.format(self.name)
-        
+
         cl.tools.get_or_register_dtype(self.ctype, self.dtype)
         register_ctype_dtype(self.ctype, self.dtype)
 
-        for _ctype,_dtype in custom_types.iteritems():
+        for _ctype,_dtype in custom_types.items():
             cl.tools.get_or_register_dtype(_ctype,dtype=_dtype)
 
         self.gencode(comments, ctype_overrides)
@@ -35,14 +35,14 @@ class StructCodeGenerator(OpenClCodeGenerator):
 
     def c_decl(self):
         assert (self.context is not None)
-        dtype,cdecl = cl.tools.match_dtype_to_c_struct( \
-                self.device,self.ctype.replace('struct',''),self.dtype,context=self.context)
+        (dtype, cdecl) = cl.tools.match_dtype_to_c_struct( \
+                self.device, self.ctype.replace('struct',''), self.dtype, context=self.context)
         return cdecl
 
     def gencode(self, comments, ctype_overrides):
-        struct_vars = re.compile('\s+((?:struct\s+)?\w+)\s+((?:\s*\**(?:\w+)(?:\[\d+\])*[,;])+)')
+        struct_vars = re.compile(r'\s+((?:struct\s+)?\w+)\s+((?:\s*\**(?:\w+)(?:\[\d+\])*[,;])+)')
         lines = self.c_decl().split('\n')
-        
+
         with self._struct_(name=self.name,typedef=self.typedef):
             with self._var_block_() as vb:
                 i=0
@@ -75,5 +75,5 @@ if __name__ == '__main__':
 
     scg = StructCodeGenerator('TestStruct',dtype,typedef=None,typegen=tg)
     scg.edit()
-    
+
 
diff --git a/hysop/backend/device/codegen/base/test.py b/hysop/backend/device/codegen/base/test.py
index 31f66b47751dcc51d34bc81ea8b269f1b89f382b..ced3eaae605fc0bf6bc3299c388a408c4d58c1db 100644
--- a/hysop/backend/device/codegen/base/test.py
+++ b/hysop/backend/device/codegen/base/test.py
@@ -20,7 +20,7 @@ def _test_mesh_info(name, typegen, dim, ghosts, resolution, **kargs):
     vsize = upper_pow2_or_3(dim)
     ghosts = [ghosts]*dim if np.isscalar(ghosts) else ghosts
     ghosts = np.asarray(ghosts)[:dim]
-    
+
     resolution = [resolution]*dim if np.isscalar(resolution) else resolution
     resolution = np.asarray(resolution)[:dim]
 
@@ -31,7 +31,7 @@ def _test_mesh_info(name, typegen, dim, ghosts, resolution, **kargs):
     compute_resolution = resolution-2*ghosts
 
     xmin = np.zeros((dim,))
-    xmax  = np.ones((dim,)) 
+    xmax  = np.ones((dim,))
     size = xmax - xmin
     dx = size / (compute_resolution - 1)
 
@@ -41,26 +41,26 @@ def _test_mesh_info(name, typegen, dim, ghosts, resolution, **kargs):
     mbs = MeshBaseStruct(typegen, vsize)
     mis = MeshInfoStruct(typegen, vsize, mbs_typedef=mbs.typedef)
 
-    # create a local numpy and a codegen MeshInfoStruct variable 
+    # create a local numpy and a codegen MeshInfoStruct variable
     global_mesh = mbs.create(name='global',
-            resolution=resolution, compute_resolution=compute_resolution, 
-            xmin=xmin, xmax=xmax, 
+            resolution=resolution, compute_resolution=compute_resolution,
+            xmin=xmin, xmax=xmax,
             boundaries=boundaries,
             size=size,
             **kargs)
-    
+
     xmin -= ghosts*dx
     xmax += ghosts*dx
 
     local_mesh = mbs.create(name='local',
-            resolution=resolution, compute_resolution=compute_resolution, 
-            xmin=xmin, xmax=xmax, 
+            resolution=resolution, compute_resolution=compute_resolution,
+            xmin=xmin, xmax=xmax,
             boundaries=boundaries,
             size=size,
             **kargs)
 
     (np_mis, cg_mis) = mis.create(name=name,
-            dim=dim, 
+            dim=dim,
             start=start, stop=stop,
             ghosts=ghosts,
             dx=dx,
@@ -69,7 +69,7 @@ def _test_mesh_info(name, typegen, dim, ghosts, resolution, **kargs):
 
     return (np_mis, cg_mis)
 
-def make_slice_views(compute_grid_size, 
+def make_slice_views(compute_grid_size,
         lghosts=None, rghosts=None, step=None):
     compute_grid_size = np.asarray(compute_grid_size)
     dim = compute_grid_size.size
@@ -79,13 +79,13 @@ def make_slice_views(compute_grid_size,
     elif np.isscalar(lghosts):
         lghosts = (lghosts,)*dim
     lghosts = np.asarray(lghosts)
-    
+
     if (rghosts is None):
         rghosts = (0,)*dim
     elif np.isscalar(rghosts):
         rghosts = (rghosts,)*dim
     rghosts = np.asarray(rghosts)
-    
+
     if (step is None):
         step = (1,)*dim
     elif np.isscalar(step):
@@ -99,5 +99,5 @@ def make_slice_views(compute_grid_size,
 
     return view[::-1], grid_size, grid_shape, lghosts, rghosts, step
 
-    
+
 
diff --git a/hysop/backend/device/codegen/base/union_codegen.py b/hysop/backend/device/codegen/base/union_codegen.py
index c574b2c5eac4126b2ec3ee9c511ee02b5e40ccb1..7f50762a3c903cbfcbec36acd7754823765ddb1a 100644
--- a/hysop/backend/device/codegen/base/union_codegen.py
+++ b/hysop/backend/device/codegen/base/union_codegen.py
@@ -15,9 +15,9 @@ class UnionCodeGenerator(OpenClCodeGenerator):
             typedef=None,comments=None,
             ctype_overrides=None,
             custom_types={}):
-        
+
         super(UnionCodeGenerator,self).__init__(name=name,typegen=typegen)
-        
+
         self.typedef = typedef
         self.dtype = np.dtype(dtype)
         self.ctype = self.typedef if (self.typedef is not None) else 'union {}'.format(self.name)
@@ -26,20 +26,20 @@ class UnionCodeGenerator(OpenClCodeGenerator):
         register_ctype_dtype(self.ctype,self.dtype)
 
         self.gencode(comments, ctype_overrides)
-    
+
     def c_decl(self):
         assert (self.context is not None)
         dtype,cdecl = cl.tools.match_dtype_to_c_struct( \
                 self.device,self.ctype.replace('union',''),self.dtype,context=self.context)
         return cdecl
-    
+
     def fields(self):
         return self.dtype.fields
 
     def gencode(self, comments, ctype_overrides):
-        union_vars = re.compile('\s+((?:struct\s+)?\w+)\s+((?:\s*\**(?:\w+)(?:\[\d+\])*[,;])+)')
+        union_vars = re.compile(r'\s+((?:struct\s+)?\w+)\s+((?:\s*\**(?:\w+)(?:\[\d+\])*[,;])+)')
         lines = self.c_decl().split('\n')
-        
+
         with self._union_(name=self.name,typedef=self.typedef):
             with self._var_block_() as vb:
                 i=0
@@ -73,5 +73,5 @@ if __name__ == '__main__':
             typegen=_test_typegen())
     scg.edit()
     scg.test_compile()
-    
+
 
diff --git a/hysop/backend/device/codegen/base/utils.py b/hysop/backend/device/codegen/base/utils.py
index 7ef7fda6e714af8fb8e19689c51974ed735d9648..d530944259d304daf05d98c7e6504717f82dca2c 100644
--- a/hysop/backend/device/codegen/base/utils.py
+++ b/hysop/backend/device/codegen/base/utils.py
@@ -1,4 +1,4 @@
-import hashlib 
+import hashlib
 
 class WriteOnceDict(dict):
     def __init__(self,**kargs):
@@ -65,7 +65,7 @@ class ArgDict(WriteOnceDict):
         return iter([(argname, self.__getitem__(argname)) for argname in self.arg_order])
 
     def update(self, other):
-        for (key,val) in other.iteritems():
+        for (key,val) in other.items():
             self[key] = val
         return self
 
@@ -88,7 +88,7 @@ class ArgDict(WriteOnceDict):
                 assert var.known()
                 assert var.symbolic_mode == False
                 assert var.is_symbolic() == False
-        
+
         if len(function_impl_args) and len(function_proto_args[-1]):
             if function_proto_args[-1][-1]=='\n':
                 function_proto_args[-1] = function_proto_args[-1][:-1]
@@ -112,7 +112,7 @@ class ArgDict(WriteOnceDict):
             return '_'+self.hash(suffix)
         else:
             return ''
-    
+
     #handle type function overloading
     def codegen_name_suffix(self,return_type, known_args):
         suffix = '({})_'.format(return_type)
@@ -130,15 +130,15 @@ class ArgDict(WriteOnceDict):
             return '_'+self.hash(suffix)
         else:
             return ''
-    
+
     # robust with up to 256 functions with the same basename
     # max_fun = sqrt(16**nb) = 2**(2*nb)
     def hash(self,string):
-        return hashlib.sha1(string).hexdigest()[:4]
+        return hashlib.sha1(string.encode('utf-8')).hexdigest()[:4]
 
 
 class SortedDict(dict):
-    
+
     @classmethod
     def _key(cls, k):
         if hasattr(k, 'name'):
diff --git a/hysop/backend/device/codegen/base/variables.py b/hysop/backend/device/codegen/base/variables.py
index 2e84e7fae725311e1239985278d29f78ee2b4671..19abfce870cc12b5836b07bc9002d491b2ac46e4 100644
--- a/hysop/backend/device/codegen/base/variables.py
+++ b/hysop/backend/device/codegen/base/variables.py
@@ -1,12 +1,13 @@
-    
-from hysop.deps import np, re, copy
+import re, copy
+import numpy as np
+
 from hysop.tools.types import to_list, check_instance, first_not_None
 import hysop.backend.device.opencl.opencl_types
 
 from hysop.backend.device.codegen.base.utils import VarDict
 from hysop.backend.device.opencl.opencl_types import TypeGen, OpenClTypeGen
 
-# opencl extras        
+# opencl extras
 from hysop.backend.device.opencl.opencl_types import cl_type_to_dtype
 
 _ctype_to_dtype = {
@@ -30,7 +31,7 @@ _ctype_to_dtype = {
         'ushort':                 np.uint16,
         'uint':                   np.uint32,
         'ulong':                  np.uint64,
-        
+
         'unsigned char':          np.uint8,
         'unsigned short':         np.uint16,
         'unsigned int':           np.uint32,
@@ -39,7 +40,7 @@ _ctype_to_dtype = {
 
         'size_t':                 np.uint64, # warning: host and/or device can be 32bits, do not use in kernel arguments
         'ptrdiff_t':              np.int64,  # warning: host and/or device can be 32bits, do not use in kernel arguments
-        
+
         'half':                   np.float16,
         'float' :                 np.float32,
         'double':                 np.float64,
@@ -95,12 +96,12 @@ def register_ctype_dtype(ctype,dtype):
 class CodegenVariable(object):
 
     def __init__(self, name, ctype, typegen,
-            storage=None, const=False, volatile=False, 
+            storage=None, const=False, volatile=False,
             add_impl_const=False, nl=None, align=None,
             ptr=False, ptr_restrict=None, ptr_volatile=None, ptr_const=None,
             value=None, svalue=None, init=None,
             symbolic_mode=False, struct_var=None):
-        
+
         check_instance(typegen, TypeGen)
 
         check_instance(name, str)
@@ -109,13 +110,13 @@ class CodegenVariable(object):
         assert len(name)>0
         assert len(ctype)>0
         assert (storage is None) or len(ctype)>0
-        
+
         check_instance(const, bool)
         check_instance(volatile, bool)
         check_instance(add_impl_const, bool)
         check_instance(nl, bool, allow_none=True)
         check_instance(align, bool, allow_none=True)
-        
+
         self.name     = name
         self.ctype    = ctype
         self.typegen  = typegen
@@ -129,17 +130,17 @@ class CodegenVariable(object):
         self.nl = nl if (nl is not None) else (storage is not None)
         self.struct_var = struct_var
         self.symbolic_mode = symbolic_mode
-        
-        # pointer 
+
+        # pointer
         if isinstance(ptr,bool):
             is_ptr = ptr
             ptr_level = int(ptr)
         else:
-            check_instance(ptr, (int, long))
+            check_instance(ptr, int)
             is_ptr = True
             ptr_level = ptr
         del ptr
-        
+
         if is_ptr:
             ptr_restrict = [] if (ptr_restrict is None) else to_list(ptr_restrict)
             ptr_const    = [] if (ptr_const is None)    else to_list(ptr_const)
@@ -147,7 +148,7 @@ class CodegenVariable(object):
 
             _len = max(len(ptr_restrict), len(ptr_const), len(ptr_volatile))
             assert (_len <= ptr_level)
-            
+
             ptr_restrict = np.asarray(ptr_restrict + [False]*(ptr_level-len(ptr_restrict)))
             ptr_volatile = np.asarray(ptr_volatile + [False]*(ptr_level-len(ptr_volatile)))
             ptr_const    = np.asarray(ptr_const    + [False]*(ptr_level-len(ptr_const)))
@@ -161,7 +162,7 @@ class CodegenVariable(object):
         self.ptr_restrict = ptr_restrict
         self.ptr_const = ptr_const
         self.ptr_volatile = ptr_volatile
-        
+
         # value and svalue
         if value is None:
             value = None
@@ -185,11 +186,11 @@ class CodegenVariable(object):
                 value = value.copy()
         else:
             pass
-        
+
         self.value   = value
         self.svalue  = svalue
         self.init    = init
-        
+
         # check
         if add_impl_const:
             if (not is_ptr):
@@ -202,22 +203,22 @@ class CodegenVariable(object):
                     msg='Variable {} has ptr_const[-1]=True and add_impl_const has been specified!'
                     msg=msg.format(name)
                     raise ValueError(msg)
-    
+
     def nv_replace(self, old, new):
         name = self.name.replace(old, new)
         return self.newvar(name)
 
     def copy(self):
         return self.newvar(name=self.name)
-    
+
     @property
     def dim(self):
         return self.typegen.components(self.ctype)
-    
+
     @property
     def basetype(self):
         return self.typegen.basetype(self.ctype)
-    
+
     def newvar(self, name, nl=False, typegen=None,
             storage=None, value=None, svalue=None, init=None,
             ctype=None, const=None, volatile=None, add_impl_const=None,
@@ -237,13 +238,13 @@ class CodegenVariable(object):
             ptr_const    = self.ptr_const    if (ptr_const is None)    else ptr_const
             ptr_volatile = self.ptr_volatile if (ptr_volatile is None) else ptr_volatile
             ptr_restrict = self.ptr_restrict if (ptr_restrict is None) else ptr_restrict
-        
+
         return cls(name=name, nl=nl,
                 value=value, svalue=svalue, init=init,
-                ctype=ctype, storage=storage, 
-                const=const, volatile=volatile, 
+                ctype=ctype, storage=storage,
+                const=const, volatile=volatile,
                 add_impl_const=add_impl_const,
-                ptr=ptr, ptr_restrict=ptr_restrict, 
+                ptr=ptr, ptr_restrict=ptr_restrict,
                 ptr_volatile=ptr_volatile, ptr_const=ptr_const,
                 typegen=typegen, **kwds)
 
@@ -256,13 +257,13 @@ class CodegenVariable(object):
             return self.newvar(name=varname, value=value, init=None, **kwds)
 
     def pointer_alias(self, name, ctype, **kargs):
-        handle = self.newvar(name=name, ctype=ctype, 
+        handle = self.newvar(name=name, ctype=ctype,
                     init='({})({})'.format(
                         self.full_ctype(cast=True,ctype=ctype), self),
                     **kargs)
         return handle
 
-    def pointer(self, name, ptr_level, 
+    def pointer(self, name, ptr_level,
             ptr_const=None, ptr_volatile=None, ptr_restrict=None,
             add_impl_const=False, with_init=True, **kargs):
         ptr_const    = [False]*ptr_level if (ptr_const is None)    else to_list(ptr_const)
@@ -282,12 +283,12 @@ class CodegenVariable(object):
         else:
             init = None
         return self.newvar(name=name, ptr=ptr_level,
-                ptr_const=ptr_const, ptr_volatile=ptr_volatile, 
+                ptr_const=ptr_const, ptr_volatile=ptr_volatile,
                 ptr_restrict=ptr_restrict, init=init,
                 add_impl_const=add_impl_const, **kargs)
 
 
-    def base_ctype(self, storage=None, ctype=None, 
+    def base_ctype(self, storage=None, ctype=None,
             const=None, volatile=None,
             impl=True, align=None,
             add_impl_const=None):
@@ -295,7 +296,7 @@ class CodegenVariable(object):
         storage  = self.storage  if (storage is None)  else storage
         ctype    = self.ctype    if (ctype is None)    else ctype
         volatile = self.volatile if (volatile is None) else volatile
-        
+
         if (const is None):
             const = self.const
             if impl and (not self.is_ptr) and (not const):
@@ -309,8 +310,8 @@ class CodegenVariable(object):
         if not align:
             base_ctype = base_ctype.replace('$','')
         return base_ctype.strip()
-        
-    
+
+
     def ptr_ctype(self, impl=True, add_impl_const=None, cast=False):
         if self.is_ptr:
             ptrs=[]
@@ -350,7 +351,7 @@ class CodegenVariable(object):
             full_ctype = full_ctype.replace('$ ', '$')
 
         return full_ctype.strip()
-    
+
     def argument(self,impl,
             nl=None, name=None,
             **kargs):
@@ -367,7 +368,7 @@ class CodegenVariable(object):
         return (self.struct_var is not None)
     def known(self):
         return (self.value is not None)
-    
+
     def force_symbolic(self, force=True):
         if force is None:
             return
@@ -375,7 +376,7 @@ class CodegenVariable(object):
     def set_value(self, val):
         assert not self.known(), 'Value was already set in variable {}!'.format(self.name)
         self.value = val
-    
+
     def sval(self, symbolic=None):
         symbolic = self.is_symbolic() if (symbolic is None) else symbolic
         if symbolic:
@@ -413,10 +414,10 @@ class CodegenVariable(object):
         if (not multidecl) and len(ctype)==0:
             msg= 'Failed to get full ctype in {}.'.format(self.__class__)
             raise RuntimeError(msg)
-        
+
         # static array ctype needs to be split
         name = self.decl_name()
-        
+
         if init is False:
             msg='Const variable should be initialized at declaration.'
             assert (not self.const) or (not self.add_impl_const), msg
@@ -427,12 +428,12 @@ class CodegenVariable(object):
             code = '{}${}'.format(ctype, name)
         else:
             code = '{} ${}'.format(ctype, name)
-        
+
         if (init is not None):
             if compact:
-                code = '{}={}'.format(code,init)
+                code = '{}={}'.format(code, init)
             else:
-                code = '{} $= {}'.format(code,init)
+                code = '{} $= {}'.format(code, init)
         elif self.known():
             self.force_symbolic(False)
             sval = self.sval()
@@ -440,19 +441,19 @@ class CodegenVariable(object):
                 code = '{}={}'.format(code,sval)
             else:
                 code = '{} $= {}'.format(code,sval)
-        
+
         if not multidecl:
             code+=';'
-        
-        self.force_symbolic() 
+
+        self.force_symbolic()
         self.declared = True
-        
+
         if not align:
             code = code.replace('$','')
         if codegen is not None:
             codegen.append(code)
         return code.strip()
-    
+
     def affect(self, codegen=None, align=None, init=None,
             compact=False, i=None):
         align = first_not_None(align, self.align, False)
@@ -473,11 +474,11 @@ class CodegenVariable(object):
         if codegen is not None:
             codegen.append(code)
         return code
-    
+
     @property
     def dtype(self):
         return ctype_to_dtype(self.ctype)
-    
+
     def __getitem__(self,ss):
         if self.is_ptr:
             return '{}[{}]'.format(self.name,ss)
@@ -491,7 +492,7 @@ class CodegenVariable(object):
             return '{}({})'.format(self.name,self.ctype)
         else:
             return '{}({},{})'.format(self.name,self.ctype,self.value)
-    
+
     def __call__(self):
         return self.sval()
     def __str__(self):
@@ -501,13 +502,13 @@ class CodegenArray(CodegenVariable):
     @staticmethod
     def _initialize_rec(name, typegen,
                     storage, ctype, const, volatile,
-                    shape, sshape, value, svalue, 
+                    shape, sshape, value, svalue,
                     ptr_level, ptr_restrict, ptr_const, ptr_volatile,
                     symbolic_mode):
-       
+
         if (value is None):
             return value, svalue
-            
+
         s0 = shape[0]
 
         if ptr_level==1:
@@ -518,15 +519,15 @@ class CodegenArray(CodegenVariable):
         else:
             _shape  = shape[1:]
             _sshape = sshape[1:]
-            
+
             _ptr_level      = ptr_level - 1
             _ptr_const    = ptr_const[:-1]
             _ptr_restrict = ptr_restrict[:-1]
             _ptr_volatile = ptr_volatile[:-1]
-            
+
             _value  = [None]*s0
             _svalue = [None]*s0
-            for d in xrange(s0):
+            for d in range(s0):
                 _name   = '{}_{}'.format(name, d)
                 dvalue  = value[d]
                 dsvalue = svalue[d]
@@ -551,10 +552,10 @@ class CodegenArray(CodegenVariable):
                 _svalue=None
             else:
                 _svalue = '{\n\t'+',\n\t'.join(_svalue)+'\n}'
-        
+
         return _value, _svalue
 
-    def __init__(self, name, ctype, typegen, 
+    def __init__(self, name, ctype, typegen,
             storage=None, volatile=False, const=False, add_impl_const = False,
             dim=1, ptr_const=None, ptr_volatile=None, ptr_restrict=None,
             shape=None, sshape=None,
@@ -564,13 +565,13 @@ class CodegenArray(CodegenVariable):
 
         ptr_level = dim
         del dim
-        
+
         if _direct_init:
             _value, _svalue = value, svalue
         else:
-            ptr_const    = [] if (ptr_const is None)    else to_list(ptr_const) 
-            ptr_volatile = [] if (ptr_volatile is None) else to_list(ptr_volatile) 
-            ptr_restrict = [] if (ptr_restrict is None) else to_list(ptr_restrict) 
+            ptr_const    = [] if (ptr_const is None)    else to_list(ptr_const)
+            ptr_volatile = [] if (ptr_volatile is None) else to_list(ptr_volatile)
+            ptr_restrict = [] if (ptr_restrict is None) else to_list(ptr_restrict)
             ptr_const    += [False]*(ptr_level-len(ptr_const))
             ptr_volatile += [False]*(ptr_level-len(ptr_volatile))
             ptr_restrict += [False]*(ptr_level-len(ptr_restrict))
@@ -583,7 +584,7 @@ class CodegenArray(CodegenVariable):
                     shape = value.shape
                 else:
                     assert value.shape == shape, 'shape mismatch.'
-               
+
             if (svalue is not None):
                 svalue = np.asarray(svalue)
                 if value is None:
@@ -598,14 +599,14 @@ class CodegenArray(CodegenVariable):
                 sview = svalue.flat
                 for i,v in enumerate(value.flat):
                     sview[i] = typegen.dump(v)
-            
+
             if (shape is not None):
                 if len(shape)!=ptr_level:
                     raise ValueError('shape dim mismatch!')
             else:
                 shape = (None,)*ptr_level
             shape = np.asarray(shape)
-            
+
             if (sshape is None) and (shape[0] != None):
                 sshape = [str(s) for s in shape]
             elif (sshape is not None) and len(sshape)!=dim:
@@ -613,22 +614,22 @@ class CodegenArray(CodegenVariable):
             else:
                 sshape = (None,)*ptr_level
             sshape = np.asarray(sshape)
-       
-            _value, _svalue = CodegenArray._initialize_rec(name, typegen, 
+
+            _value, _svalue = CodegenArray._initialize_rec(name, typegen,
                     storage, ctype, const, volatile,
-                    shape, sshape, value, svalue, 
+                    shape, sshape, value, svalue,
                     ptr_level, ptr_restrict, ptr_const, ptr_volatile,
                     symbolic_mode)
-        
+
         super(CodegenArray, self).__init__(name=name,
-                storage=storage, ctype=ctype, typegen=typegen, 
+                storage=storage, ctype=ctype, typegen=typegen,
                 value=_value, svalue=_svalue,
                 const=const, add_impl_const=add_impl_const, volatile=volatile,
                 ptr=ptr_level, ptr_restrict=ptr_restrict, ptr_const=ptr_const, ptr_volatile=ptr_volatile,
                 symbolic_mode=symbolic_mode, struct_var=struct_var)
         self.shape = shape
         self.sshape = sshape
-        
+
     def decl_name(self):
         if self.shape is not None:
             static_array = ['[{}]'.format(val) for val in self.shape]
@@ -645,11 +646,11 @@ class CodegenArray(CodegenVariable):
             return self.sshape.size
         msg='unknown array dim.'
         raise RuntimeError(msg)
-    
+
     def ptr_ctype(self, impl=True, add_impl_const=None, cast=False):
         if self.is_ptr:
             add_impl_const = self.add_impl_const if (add_impl_const is None) else add_impl_const
-            
+
             dim = self.array_dim()
             ptr_const    = self.ptr_const[dim:]
             ptr_volatile = self.ptr_volatile[dim:]
@@ -668,20 +669,20 @@ class CodegenArray(CodegenVariable):
         else:
             ptr_ctype=''
         return ptr_ctype
-    
-        
-        
+
+
+
 class CodegenVector(CodegenVariable):
     def __init__(self, name, ctype, dim, typegen,
             value=None,svalue=None,
-            storage=None, const=False, volatile=False, 
+            storage=None, const=False, volatile=False,
             ptr=False, ptr_const=None, ptr_volatile=None, ptr_restrict=None,
             add_impl_const=False, nl=None,
             symbolic_mode=False, struct_var=None,
             init=None):
         super(CodegenVector,self).__init__(name=name,ctype=ctype,value=value,typegen=typegen,
                 const=const, volatile=volatile, add_impl_const=add_impl_const,
-                storage=storage, nl=nl, 
+                storage=storage, nl=nl,
                 ptr=ptr, ptr_const=ptr_const, ptr_volatile=ptr_volatile, ptr_restrict=ptr_restrict,
                 symbolic_mode=symbolic_mode,struct_var=struct_var,init=init)
 
@@ -689,7 +690,7 @@ class CodegenVector(CodegenVariable):
         self._dim = dim
 
         if (value is not None):
-            self.svalue = svalue if svalue else [typegen.dump(v) for v in value] 
+            self.svalue = svalue if svalue else [typegen.dump(v) for v in value]
         else:
             assert(not svalue)
             self.svalue = None
@@ -714,7 +715,7 @@ class CodegenVector(CodegenVariable):
 
     def __getitem__(self,i):
         return self.sval(i)
-    
+
     def __repr__(self):
         if self.is_symbolic():
             return '{}({})'.format(self.name,self.ctype)
@@ -731,7 +732,7 @@ class CodegenVectorClBuiltin(CodegenVector):
             storage=None, nl=None,
             init=None,
             symbolic_mode=False, struct_var=None,**kwds):
-        
+
         factor = typegen.components(btype)
         btype  = typegen.basetype(btype)
         dim *= factor
@@ -748,34 +749,34 @@ class CodegenVectorClBuiltin(CodegenVector):
             # scalar type
             ctype = btype
             access_mode=None
-        
+
         svalue = None
         if (value is not None):
             dtype = ctype_to_dtype(btype)
             value = np.asarray(value,dtype)
             assert value.size == dim
             svalue = [typegen.dump(np.asarray([f],dtype=dtype)[0]) for f in value]
-        
+
         super(CodegenVectorClBuiltin,self).__init__(name=name,ctype=ctype,dim=dim,typegen=typegen,
                 value=value,svalue=svalue, const=const, add_impl_const=add_impl_const,
                 storage=storage, nl=nl, symbolic_mode=symbolic_mode, struct_var=struct_var,
                 init=init)
         self.btype  = btype
         self.access_mode = access_mode
-    
+
     def newvar(self, name, btype=None, dim=None, **kwds):
         btype = first_not_None(btype, self.btype)
         dim = first_not_None(dim, self.dim)
         return super(CodegenVectorClBuiltin, self).newvar(name=name, btype=btype, dim=dim,
                 **kwds)
 
-    def view(self,name,components,const=False):
-        if isinstance(components,slice):
+    def view(self, name, components, const=False):
+        if isinstance(components, slice):
             start, stop, step = components.indices(self.dim)
-            it = range(start,stop,step)
+            it = tuple(range(start, stop, step))
             dim = len(it)
         elif components is Ellipsis:
-            it = range(self.dim)
+            it = tuple(range(self.dim))
             dim = self.dim
         else:
             raise ValueError('Unknown components type {}.'.format(components.__class__.__name__))
@@ -784,13 +785,13 @@ class CodegenVectorClBuiltin(CodegenVector):
             raise ValueError('Dimension of view is greater than original vector!')
 
         return CodegenVectorClBuiltin(name=name,
-                btype=self.btype, 
+                btype=self.btype,
                 dim=dim,
                 typegen=self.typegen,
                 access_mode=self.access_mode,
                 const=self.const or const,
                 add_impl_const=self.add_impl_const,
-                storage=self.storage, 
+                storage=self.storage,
                 symbolic_mode=self.symbolic_mode,
                 value = self.value[components] if (self.value is not None) else None,
                 init=self[components])
@@ -802,7 +803,7 @@ class CodegenVectorClBuiltin(CodegenVector):
         value = np.asarray(value,dtype).copy()
         if (value.size != self.dim):
             raise ValueError('value dimension mismatch!')
-        
+
         if (self.value != value).any():
             if self.known():
                 msg='Value was already set in variable {}!'.format(self.name)
@@ -825,11 +826,13 @@ class CodegenVectorClBuiltin(CodegenVector):
                 return self.svalue[i]
             else:
                 return self[:]
-    
+
     def __getitem__(self, key):
         dim = self.dim
+        if isinstance(key, range):
+            key = tuple(key)
         if isinstance(key,slice) :
-            ids = range(*key.indices(dim))
+            ids = tuple(range(*key.indices(dim)))
             if self.declared and key.indices(dim)==(0,dim,1):
                 return self.name
             else:
@@ -851,26 +854,26 @@ class CodegenVectorClBuiltin(CodegenVector):
                 value = [self.svalue[i] for i in key]
                 return '({})({})'.format(ctype, ','.join(value))
             return access
-        elif isinstance(key, (int,long)) :
+        elif isinstance(key, (int, np.integer)) :
             if key<0:
                 key += dim
             if key<0 or key>=dim:
-                raise IndexError, "The index {} is out of range.".format(key)
+                raise IndexError("The index {} is out of range.".format(key))
             return self.sval(key)
         else:
             msg='Invalid key type {}!'.format(type(key))
             raise TypeError(msg)
-    
+
     def declare(self, codegen=None, init=None, **kargs):
-        init = init or self.init
-        if isinstance(init,int):
-            init = ','.join([self.typegen.dump(init) for _ in xrange(self.dim)])
+        init = first_not_None(init, self.init)
+        if isinstance(init, int):
+            init = ','.join([self.typegen.dump(init) for _ in range(self.dim)])
             init = '({})({})'.format(self.ctype,init)
         elif init.__class__ in [list,tuple,np.ndarray]:
-            init = ','.join([self.typegen.dump(init[i]) for i in xrange(self.dim)])
+            init = ','.join([self.typegen.dump(init[i]) for i in range(self.dim)])
             init = '({})({})'.format(self.ctype,init)
         return super(CodegenVectorClBuiltin,self).declare(init=init, codegen=codegen, **kargs)
-    
+
 
 class CodegenVectorClBuiltinFunc(CodegenVectorClBuiltin):
     def __init__(self,fname,name,btype,dim,typegen,
@@ -883,7 +886,7 @@ class CodegenVectorClBuiltinFunc(CodegenVectorClBuiltin):
 
     def fval(self,i=None):
         if i is None:
-            value = [self.fval(i) for i in xrange(self.dim)]
+            value = [self.fval(i) for i in range(self.dim)]
             return '({})({})'.format(self.ctype, ','.join(value))
         else:
             assert i<self.dim
@@ -905,22 +908,22 @@ class CodegenStruct(CodegenVariable):
                     struct_var=None,
                     value=None,
                     var_overrides=None):
-        
+
         super(CodegenStruct,self).__init__(
                 name=name,
-                ctype=struct.ctype, 
+                ctype=struct.ctype,
                 typegen=struct.typegen,
                 storage=storage, const=const, volatile=volatile,
-                ptr=ptr, ptr_const=ptr_const, 
+                ptr=ptr, ptr_const=ptr_const,
                 ptr_volatile=ptr_volatile, ptr_restrict=ptr_restrict,
-                add_impl_const=add_impl_const, nl=nl, 
+                add_impl_const=add_impl_const, nl=nl,
                 value=value,
-                symbolic_mode=symbolic_mode, 
+                symbolic_mode=symbolic_mode,
                 struct_var=struct_var)
-       
+
         self.genvars(struct, var_overrides)
-    
-    
+
+
     def newvar(self, *args, **kwds):
         kwds.setdefault('cls', CodegenVariable)
         return super(CodegenStruct, self).newvar(*args, **kwds)
@@ -929,7 +932,7 @@ class CodegenStruct(CodegenVariable):
         if force is None:
             return
         super(CodegenStruct,self).force_symbolic(force)
-        for k,var in self.vars.iteritems():
+        for k,var in self.vars.items():
             var.force_symbolic(force)
 
     def __getitem__(self,key):
@@ -951,9 +954,9 @@ class CodegenStruct(CodegenVariable):
             var_overrides = {}
 
         self.vars = VarDict()
-        
-        struct_vars = re.compile('\s+((?:struct\s+)?\w+)\s+((?:\s*\**(?:\w+)(?:\[\d+\])*[,;])+)')
-        var_decl    = re.compile('(\**)(\w+)((?:\[\d+\])*)')
+
+        struct_vars = re.compile(r'\s+((?:struct\s+)?\w+)\s+((?:\s*\**(?:\w+)(?:\[\d+\])*[,;])+)')
+        var_decl    = re.compile(r'(\**)(\w+)((?:\[\d+\])*)')
         lines = struct.c_decl().split('\n')
 
         svalue = []
@@ -967,7 +970,7 @@ class CodegenStruct(CodegenVariable):
                     ptrs      = match.group(1)
                     fieldname = match.group(2)
                     array     = match.group(3)
-                    
+
                     nptrs     = len(ptrs) if ptrs else 0
                     narray    = [int(x) for x in array[:-1].replace('[','').split(']')] \
                                     if array else None
@@ -1012,31 +1015,29 @@ class CodegenStruct(CodegenVariable):
                                 value=field_value,
                                 typegen=struct.typegen,
                                 struct_var=self)
-                    
+
                     var.force_symbolic(False)
-                    
+
                     decl = var()
                     decl = decl.replace('\n', '\n\t')
                     sval = '.{} $= {}'.format(fieldname,decl)
                     svalue.append(sval)
-                    
+
                     var.force_symbolic(self.symbolic_mode)
                     self.vars[fieldname] = var
 
         if self.known():
             self.svalue = '{\n\t' + ',\n\t'.join(svalue) + '\n}'
-    
+
         def __getitem__(self,key):
             return self.vars[key]
-    
+
 
 if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import _test_typegen as test_typegen
     tg = test_typegen('float')
 
-    print':: ARRAYS ::'
-    #var = CodegenArray(name='A0',ctype='float',shape=(3,),value=[1,2,3],typegen=tg)
-    #print var.declare()
+    print(':: ARRAYS ::')
     var = CodegenArray(name='A0',ctype='float',dim=3,
             value=[[
                     [1,2,3],
@@ -1047,72 +1048,72 @@ if __name__ == '__main__':
                     [13,14,15],
                     [16,17,18]
                 ]],typegen=tg)
-    print var.declare()
-    
+    print(var.declare())
+
     # runtime known variable
-    print ':: SYMBOLIC VECTOR ::'
+    print(':: SYMBOLIC VECTOR ::')
     var = CodegenVectorClBuiltin('gid','int',3, tg)
-    print var()
-    for i in xrange(var.dim):
-        print var[i]
-    print var[:]
-    print var.declare()
-    print
-    
+    print(var())
+    for i in range(var.dim):
+        print(var[i])
+    print(var[:])
+    print(var.declare())
+    print()
+
     # change access mode
-    print ':: ACCESS MODES ::'
+    print(':: ACCESS MODES ::')
     var = CodegenVectorClBuiltin('ids','int',4, tg, access_mode='pos')
-    print var[::2]
+    print(var[::2])
     var = CodegenVectorClBuiltin('ids','int',16, tg, access_mode='hex')
-    print var[::2]
+    print(var[::2])
     var = CodegenVectorClBuiltin('ids','int',16, tg,  access_mode='HEX')
-    print var[::2]
-    print
-    
+    print(var[::2])
+    print()
+
     # compilation time known variable
-    print ':: KNOWN VECTOR ::'
+    print(':: KNOWN VECTOR ::')
     var = CodegenVectorClBuiltin('lid','int',3, tg, value=(256,512,1024))
-    print var()
-    for i in xrange(var.dim):
-        print var[i]
-    print var[1:3]
-    print var.declare(const=True)
-    print
-   
+    print(var())
+    for i in range(var.dim):
+        print(var[i])
+    print(var[1:3])
+    print(var.declare(const=True))
+    print()
+
     #force the use of symbolic value
-    print ':: FORCE SYMBOLIC ACCESS ::'
+    print(':: FORCE SYMBOLIC ACCESS ::')
     var = CodegenVectorClBuiltin('id','int',4, tg, value=(1,2,3,4))
-    print var.declare()
+    print(var.declare())
     var.force_symbolic()
-    print var[:]
+    print(var[:])
     var.force_symbolic(False)
-    print var[:]
-    print
-    
+    print(var[:])
+    print()
+
     # default decimal float dumper
-    print ':: DEFAULT FLOAT DUMPER ::'
+    print(':: DEFAULT FLOAT DUMPER ::')
     var = CodegenVectorClBuiltin('size','float',4, tg, value=(1.0,2.0,4.0,8.0))
-    print var()
-    for i in xrange(var.dim):
-        print var[i]
-    print var[1:3]
-    print var.declare()
-    print
-    
+    print(var())
+    for i in range(var.dim):
+        print(var[i])
+    print(var[1:3])
+    print(var.declare())
+    print()
+
     # hexadecimal deciml float dumper
-    print ':: HEXADECIMAL FLOAT DUMPER ::'
+    print(':: HEXADECIMAL FLOAT DUMPER ::')
     var = CodegenVectorClBuiltin('size_hex','float',4, value=(1.0,2.0,4.0,8.0),
                                  typegen=test_typegen('float',float_dump_mode='hex'))
-    print var()
-    for i in xrange(var.dim):
-        print var[i]
-    print var[1:3]
-    print var.declare()
-    print
+    print(var())
+    for i in range(var.dim):
+        print(var[i])
+    print(var[1:3])
+    print(var.declare())
+    print()
 
     # bultin opencl functions
     for fname,name in [('global_size','gsize'), ('local_size','lsize'), ('global_id','gid'), ('local_id','lid')]:
         var = CodegenVectorClBuiltinFunc(fname,name,'int',3, tg)
-        print var.declare()
+        print(var.declare())
 
 
diff --git a/hysop/backend/device/codegen/functions/advection_rhs.py b/hysop/backend/device/codegen/functions/advection_rhs.py
index 08ce04c49eb2556daae7b9839202977cabd3f8ca..cfc40dd2655db032458d5c96231156ad5c611b32 100644
--- a/hysop/backend/device/codegen/functions/advection_rhs.py
+++ b/hysop/backend/device/codegen/functions/advection_rhs.py
@@ -17,25 +17,25 @@ from hysop.numerics.stencil.stencil import Stencil
 # with finite difference centred stencil of given order
 class DirectionalAdvectionRhsFunction(OpenClFunctionCodeGenerator):
 
-    def __init__(self, typegen, ftype, work_dim, nparticles, is_cached, boundary, 
+    def __init__(self, typegen, ftype, work_dim, nparticles, is_cached, boundary,
             relative_velocity,
-            ptr_restrict=True, 
-            itype='int', 
+            ptr_restrict=True,
+            itype='int',
             known_args=None,
             field_infos=None):
-        
+
         assert work_dim>=1 and work_dim<=3
         check_instance(boundary,BoundaryCondition)
         assert nparticles  in [1,2,4,8,16]
         assert isinstance(relative_velocity, (float,str))
-        
+
         is_periodic = (boundary==BoundaryCondition.PERIODIC)
 
         if is_cached:
             storage='__local'
         else:
             storage='__global'
-        
+
         vtype = typegen.vtype(ftype,nparticles)
 
         (args,basename) = self.build_prototype(typegen,work_dim,itype,ftype,vtype,
@@ -43,16 +43,16 @@ class DirectionalAdvectionRhsFunction(OpenClFunctionCodeGenerator):
 
         reqs = self.build_requirements(typegen,work_dim,itype,ftype,vtype,
                 nparticles,ptr_restrict,storage,is_cached, is_periodic)
-        
+
         super(DirectionalAdvectionRhsFunction,self).__init__(basename=basename,
                 output=vtype,typegen=typegen,inline=True,
                 args=args, known_args=known_args)
 
         self.update_requirements(reqs)
-         
+
         self.work_dim = work_dim
 
-        self.itype = itype 
+        self.itype = itype
         self.ftype = ftype
         self.vtype = vtype
 
@@ -62,34 +62,34 @@ class DirectionalAdvectionRhsFunction(OpenClFunctionCodeGenerator):
         self.is_cached  = is_cached
         self.is_periodic = is_periodic
         self.relative_velocity = relative_velocity
-        
+
         self.gencode()
 
     def build_prototype(self, typegen, work_dim, itype, ftype, vtype,
             nparticles, ptr_restrict, storage, is_cached, is_periodic, field_infos):
 
         args = ArgDict()
-        
-        args['line_velocity'] = CodegenVariable('line_velocity', ftype, typegen, const=True, 
+
+        args['line_velocity'] = CodegenVariable('line_velocity', ftype, typegen, const=True,
                 add_impl_const=True, storage=storage, ptr=True, ptr_restrict=ptr_restrict,
                 nl=True)
-        args['X'] = CodegenVectorClBuiltin('X', ftype, nparticles, typegen, add_impl_const=True, 
+        args['X'] = CodegenVectorClBuiltin('X', ftype, nparticles, typegen, add_impl_const=True,
                                             nl=True)
-        
+
         if is_periodic and (not is_cached):
             args['line_width']   = CodegenVariable('line_width', itype, typegen,
                     add_impl_const=True)
-        args['line_offset']   = CodegenVariable('line_offset',  itype, typegen, 
+        args['line_offset']   = CodegenVariable('line_offset',  itype, typegen,
                     add_impl_const=True, nl=True)
 
         # args['rk_step'] = CodegenVariable('rk_step', itype, typegen)
         args['inv_dx'] = CodegenVariable('inv_dx', ftype, typegen, add_impl_const=True, nl=True)
         # args['active'] = CodegenVariable('active','bool',typegen, add_impl_const=True)
-        
+
         if (field_infos is not None):
-            args['field_infos'] = field_infos.pointer(name='field_infos', ptr_level=1, 
+            args['field_infos'] = field_infos.pointer(name='field_infos', ptr_level=1,
                     const=True, ptr_const=True, nl=True)
-        
+
         cached   = 'cached_' if is_cached else ''
         basename = 'advection_rhs_{}{}{}p'.format(cached, ftype[0], nparticles)
 
@@ -106,26 +106,26 @@ class DirectionalAdvectionRhsFunction(OpenClFunctionCodeGenerator):
         work_dim = s.work_dim
 
         nparticles = s.nparticles
-        
+
         is_cached   = s.is_cached
         is_periodic = s.is_periodic
 
         itype = s.itype
         ftype = s.ftype
         vtype = s.vtype
-        
+
         position = s.args['X']
         velocity = s.args['line_velocity']
-        
+
         line_offset = s.args['line_offset']
         if not is_cached and is_periodic:
             line_width = s.args['line_width']
 
         inv_dx       = s.args['inv_dx']
         # rk_step      = s.args['rk_step']
-        
+
         dX_dt  = CodegenVectorClBuiltin('dX_dt', ftype, nparticles, tg)
-        
+
         pos   = CodegenVectorClBuiltin('pos',   ftype, nparticles, tg)
         lidx  = CodegenVectorClBuiltin('lidx',  itype, nparticles, tg)
         idx   = CodegenVectorClBuiltin('idx',   itype, nparticles, tg)
@@ -135,7 +135,7 @@ class DirectionalAdvectionRhsFunction(OpenClFunctionCodeGenerator):
         Vr    = CodegenVectorClBuiltin('Vr',    ftype, nparticles, tg)
 
         relative_velocity = self.relative_velocity
-        if isinstance(relative_velocity, float): 
+        if isinstance(relative_velocity, float):
             Vrel = CodegenVectorClBuiltin('Vrel',  ftype, nparticles, tg,
                     const=True, value=(relative_velocity,)*nparticles)
         elif isinstance(relative_velocity, str):
@@ -148,7 +148,7 @@ class DirectionalAdvectionRhsFunction(OpenClFunctionCodeGenerator):
 
         part_ftype = Vl.ctype
         part_itype = lidx.ctype
-        
+
         with s._function_():
             s.jumpline()
 
@@ -161,14 +161,14 @@ class DirectionalAdvectionRhsFunction(OpenClFunctionCodeGenerator):
             Vrel.declare(s)
             idx.declare(s)
             s.jumpline()
-            
+
             # /!\
-            # DEPENDING ON USED PRECISION, ASSUMING alpha = 0.0 is not TRUE 
+            # DEPENDING ON USED PRECISION, ASSUMING alpha = 0.0 is not TRUE
             # for the first step, so we interpolate even on the first step
 
             # with s._if_('{} == 0'.format(rk_step())):
                 # s.append('{} = {}-{};'.format(idx(), lidx(), line_offset()))
-                # reads = [velocity[idx[i] ] for i in xrange(nparticles)]
+                # reads = [velocity[idx[i] ] for i in range(nparticles)]
                 # init = '({})({})'.format(part_ftype,', '.join(reads))
                 # s.append('printf("X=%f => alpha=%f, idx=%i, V=%f\\n", X, alpha, idx, {});'.format(velocity[idx[0]]))
 
@@ -179,7 +179,7 @@ class DirectionalAdvectionRhsFunction(OpenClFunctionCodeGenerator):
                 s.append('{lidx} = ({lidx}+{size})%{size};'.format(lidx=lidx(),
                     size=line_width()))
             s.append('{} = {}-{};'.format(idx(), lidx(), line_offset()))
-            reads = [velocity[idx[i] ] for i in xrange(nparticles)]
+            reads = [velocity[idx[i] ] for i in range(nparticles)]
             init = '({})({})'.format(part_ftype,', '.join(reads))
             Vl.declare(s,init=init)
             if is_periodic and not is_cached:
@@ -191,11 +191,11 @@ class DirectionalAdvectionRhsFunction(OpenClFunctionCodeGenerator):
             s.append('{} = mix({},{},{}) - {};'.format(dX_dt(), Vl(), Vr(), alpha(), Vrel))
 
             s.append('return {};'.format(dX_dt()))
-    
+
 if __name__ == '__main__':
 
     from hysop.backend.device.codegen.base.test import _test_typegen
-    
+
     tg = _test_typegen('float')
     asf = DirectionalAdvectionRhsFunction(tg, 'float', 3, 4, False,
             BoundaryCondition.PERIODIC, 0.66)
diff --git a/hysop/backend/device/codegen/functions/apply_stencil.py b/hysop/backend/device/codegen/functions/apply_stencil.py
index 88b9dfd59c012dd6979dcfe859ee58972c7b306e..614d1363f35d5fba552a04d4928d148ece324cca 100644
--- a/hysop/backend/device/codegen/functions/apply_stencil.py
+++ b/hysop/backend/device/codegen/functions/apply_stencil.py
@@ -1,5 +1,5 @@
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.tools.types import check_instance, first_not_None
 from hysop.backend.device.codegen.base.opencl_codegen   import OpenClCodeGenerator
 from hysop.backend.device.codegen.base.function_codegen import OpenClFunctionCodeGenerator
@@ -11,7 +11,7 @@ from hysop.backend.device.codegen.base.statistics import WorkStatistics
 from hysop.numerics.stencil.stencil import Stencil
 
 class ApplyStencilFunction(OpenClFunctionCodeGenerator):
-    
+
     def __init__(self,typegen,stencil,ftype,
             symbol2vars=None,
             components=1, vectorize=True,
@@ -23,24 +23,24 @@ class ApplyStencilFunction(OpenClFunctionCodeGenerator):
             vector_suffixes=None,
             data_storage='__local', ptr_restrict=True,
             multipliers={},
-            itype='int', 
+            itype='int',
             known_args=None):
-         
+
         check_instance(stencil, Stencil)
         check_instance(symbol2vars, dict, keys=sm.Symbol, values=CodegenVariable, allow_none=True)
         symbol2vars = first_not_None(symbol2vars, {})
         assert set(symbol2vars.keys())==stencil.variables()
 
-        extra_inputs  = set(extra_inputs + symbol2vars.values())
+        extra_inputs  = set(extra_inputs).union(symbol2vars.values())
         scalar_inputs = set(scalar_inputs)
         vector_inputs = set(vector_inputs)
 
         dim = stencil.dim
-        
+
         vtype = typegen.vtype(ftype,components)
 
         if (vector_suffixes is None):
-            vector_suffixes = range(components)
+            vector_suffixes = tuple(range(components))
 
         has_custom_id = (custom_id is not None)
 
@@ -48,32 +48,32 @@ class ApplyStencilFunction(OpenClFunctionCodeGenerator):
         for iname in vector_inputs:
             if vectorize:
                 name = iname
-                args[name] = CodegenVariable(name, vtype, typegen, const=True, 
+                args[name] = CodegenVariable(name, vtype, typegen, const=True,
                     add_impl_const=True, storage=data_storage, ptr=True, ptr_restrict=ptr_restrict)
             else:
-                for i in xrange(components):
+                for i in range(components):
                     name = '{}{}'.format(iname,vector_suffixes[i])
-                    args[name] = CodegenVariable(name, ftype, typegen, 
-                            const=True, add_impl_const=True, storage=data_storage, 
+                    args[name] = CodegenVariable(name, ftype, typegen,
+                            const=True, add_impl_const=True, storage=data_storage,
                             ptr=True, ptr_restrict=ptr_restrict)
         for iname in scalar_inputs:
             name = iname
-            args[name] = CodegenVariable(name, ftype, typegen, const=True, 
+            args[name] = CodegenVariable(name, ftype, typegen, const=True,
                     add_impl_const=True, storage=data_storage, ptr=True, ptr_restrict=ptr_restrict)
         for arg in extra_inputs:
             args[arg.name] = arg
-        
+
         if not has_custom_id:
             args['offset']  = CodegenVariable('offset', itype, typegen, add_impl_const=True)
-            args['stride']  = CodegenVectorClBuiltin('stride', itype, dim, typegen, 
+            args['stride']  = CodegenVectorClBuiltin('stride', itype, dim, typegen,
                     add_impl_const=True,nl=True)
-        for varname,vartype in multipliers.iteritems():
+        for varname,vartype in multipliers.items():
             if vartype=='ftype':
                 vartype=ftype
             elif vartype=='vtype':
                 vartype=vtype
             args[varname] = CodegenVariable(varname, vartype, typegen, add_impl_const=True)
-        
+
         _type = '{}{}'.format(ftype[0],components)
         if vectorize:
             _type+='v'
@@ -108,7 +108,7 @@ class ApplyStencilFunction(OpenClFunctionCodeGenerator):
             return False
         else:
             raise NotImplemented()
-    
+
     def per_work_statistics(self):
         tg         = self.typegen
         itype      = self.itype
@@ -145,22 +145,22 @@ class ApplyStencilFunction(OpenClFunctionCodeGenerator):
 
         components = s.components
         vectorized = s.vectorized
-        
+
         has_custom_id = s.has_custom_id
-        
+
         vector_suffixes = s.vector_suffixes
 
         ftype = s.ftype
         vtype = s.vtype
         stencil = s.stencil
-        
+
         res = CodegenVectorClBuiltin('res',ftype,components,tg)
-        
+
         with s._function_():
             s.append(res.declare(init=0))
             with s._block_():
                 with s._align_() as al:
-                    for i in xrange(components):
+                    for i in range(components):
                         if i>0:
                             al.jumpline()
                         operands = {}
@@ -168,7 +168,7 @@ class ApplyStencilFunction(OpenClFunctionCodeGenerator):
                             vector_varnames = s.vector_inputs
                             _res = res()
                         else:
-                            vector_varnames = ['{}{}'.format(varname,vector_suffixes[i]) 
+                            vector_varnames = ['{}{}'.format(varname,vector_suffixes[i])
                                                     for varname in s.vector_inputs]
                             _res = res[i]
                         for j,vn in enumerate(vector_varnames):
@@ -182,7 +182,7 @@ class ApplyStencilFunction(OpenClFunctionCodeGenerator):
                                 offset = s.vars['offset']
                                 stride = s.vars['stride']
                                 strided=''
-                                for i in xrange(dim): 
+                                for i in range(dim):
                                     if stride.known() and stride.value[0] == 1:
                                         strided+='{}'.format(tg.dump(off[i]))
                                     else:
@@ -195,7 +195,7 @@ class ApplyStencilFunction(OpenClFunctionCodeGenerator):
                             al.append(code)
                         if vectorized:
                             break
-    
+
             mul = ''
             for mult in s.multipliers:
                 mul+='{}*'.format(s.vars[mult]())
@@ -205,19 +205,19 @@ class ApplyStencilFunction(OpenClFunctionCodeGenerator):
             ret = 'return {}{};'.format(mul,res())
             s.append(ret)
 
-           
+
 if __name__ == '__main__':
 
     from hysop.backend.device.codegen.base.test import _test_typegen
-    
+
     stencil = Stencil([2.0,1.0,0.0,-1.0,-2.0], origin=2, order=2)
 
     tg = _test_typegen('double', float_dump_mode='hex')
-    asf = ApplyStencilFunction(tg,stencil,ftype=tg.fbtype, 
-            components=3, vectorize=False, data_storage='__local', 
+    asf = ApplyStencilFunction(tg,stencil,ftype=tg.fbtype,
+            components=3, vectorize=False, data_storage='__local',
             scalar_inputs = ['S'], vector_inputs=['A','B'], vector_suffixes=['x','y','z'],
             op='{sinput0}[{id}] * ({vinput0}[{id}] + {vinput1}[{id}])',
             multipliers={'a':'int','b':'float','c':'ftype','d':'vtype'})
-    print asf.per_work_statistics()
+    print(asf.per_work_statistics())
     asf.edit()
 
diff --git a/hysop/backend/device/codegen/functions/cache_load.py b/hysop/backend/device/codegen/functions/cache_load.py
index 58d9bb1651526cecbb1584f7e2ac3ca3fc7eb10c..f54332125ef534efe23458e1085b9601afecfecf 100644
--- a/hysop/backend/device/codegen/functions/cache_load.py
+++ b/hysop/backend/device/codegen/functions/cache_load.py
@@ -3,6 +3,7 @@ import contextlib
 from contextlib import contextmanager
 
 from hysop.tools.types import check_instance
+from hysop.tools.contexts import nested
 from hysop.backend.device.codegen.base.opencl_codegen import OpenClCodeGenerator
 from hysop.backend.device.codegen.base.function_codegen import OpenClFunctionCodeGenerator
 from hysop.backend.device.codegen.base.variables  import CodegenVariable, CodegenVectorClBuiltin
@@ -14,7 +15,7 @@ from hysop.backend.device.codegen.functions.compute_index import ComputeIndexFun
 from hysop.constants import BoundaryCondition
 
 class CacheLoadFunction(OpenClFunctionCodeGenerator):
-    
+
     arguments = {
             'src':'source data',
             'dst':'destination buffer',
@@ -24,19 +25,19 @@ class CacheLoadFunction(OpenClFunctionCodeGenerator):
             'data_size':'size of source data'
             }
 
-    def __init__(self, typegen, ftype, work_dim, boundary, 
+    def __init__(self, typegen, ftype, work_dim, boundary,
             components=1, src_vectorize=True, dst_vectorize=True,
-            itype='int', 
+            itype='int',
             with_gid_ghost_offset=True,
             with_gid_bound_check=True,
             known_args=None,force_symbolic=False):
 
         assert work_dim>0
-        
+
         check_instance(boundary,BoundaryCondition)
         if boundary not in [BoundaryCondition.NONE, BoundaryCondition.PERIODIC]:
             raise NotImplemented('Boundary \'{}\' not implemented yet!'.format(str(boundary).lower()))
-        
+
         tg = typegen
         fs = force_symbolic
         vtype = tg.vtype(ftype,components)
@@ -44,23 +45,23 @@ class CacheLoadFunction(OpenClFunctionCodeGenerator):
         if boundary != BoundaryCondition.NONE:
             name+='_{}'.format(str(boundary).lower())
         output = 'void'
-        
+
         args = ArgDict()
         if src_vectorize:
-            args['src'] = CodegenVariable('src', vtype, typegen=tg, ptr=True, const=True,  
+            args['src'] = CodegenVariable('src', vtype, typegen=tg, ptr=True, const=True,
                     storage='__global', nl=True, ptr_restrict=True)
         else:
-            for i in xrange(components):
+            for i in range(components):
                 src = 'src{}'.format(i)
-                args[src] = CodegenVariable(src, ftype, typegen=tg, ptr=True, const=True, 
+                args[src] = CodegenVariable(src, ftype, typegen=tg, ptr=True, const=True,
                         storage='__global', nl=True, ptr_restrict=True)
         if dst_vectorize:
-            args['dst'] = CodegenVariable('dst', vtype, typegen=tg, ptr=True, const=False, 
+            args['dst'] = CodegenVariable('dst', vtype, typegen=tg, ptr=True, const=False,
                     storage='__local' , nl=True, ptr_restrict=True)
         else:
-            for i in xrange(components):
+            for i in range(components):
                 dst = 'dst{}'.format(i)
-                args[dst] = CodegenVariable(dst, ftype, typegen=tg, ptr=True, const=False, 
+                args[dst] = CodegenVariable(dst, ftype, typegen=tg, ptr=True, const=False,
                         storage='__local' , nl=True, ptr_restrict=True)
 
         args['global_id']  = CodegenVectorClBuiltin('gid',itype,work_dim,typegen,
@@ -112,7 +113,7 @@ class CacheLoadFunction(OpenClFunctionCodeGenerator):
 
         with_gid_ghost_offset = self.with_gid_ghost_offset
         with_gid_bound_check  = self.with_gid_bound_check
-        
+
         global_id  = s.args['global_id'];
         local_id   = s.args['local_id'];
 
@@ -122,19 +123,19 @@ class CacheLoadFunction(OpenClFunctionCodeGenerator):
         lghosts    = s.args['lghosts'];
         rghosts    = s.args['rghosts'];
         multiplier = s.args['multiplier'];
-        
+
         cache_size = CodegenVectorClBuiltin('S'  ,itype,work_dim,tg,const=True)
-        local_pos  = CodegenVectorClBuiltin('lidx',itype,work_dim,tg) 
-        global_pos = CodegenVectorClBuiltin('gidx',itype,work_dim,tg) 
+        local_pos  = CodegenVectorClBuiltin('lidx',itype,work_dim,tg)
+        global_pos = CodegenVectorClBuiltin('gidx',itype,work_dim,tg)
 
         LID = CodegenVariable('LID',itype, tg)
         GID = CodegenVariable('GID',itype, tg)
 
         tmp = CodegenVectorClBuiltin('tmp', ftype, components, tg)
-                
+
         compute_index = ComputeIndexFunction(tg,work_dim,wrap=False)
         s.require('compute_index',compute_index)
-                
+
         with s._function_():
             s.jumpline()
             s.append(cache_size.declare(init='{}+{}*{}+{}'.format(lghosts(),multiplier(),
@@ -143,14 +144,14 @@ class CacheLoadFunction(OpenClFunctionCodeGenerator):
             s.append(global_pos.declare())
             s.append(local_pos.declare())
             s.jumpline()
-            
+
             if boundary == BoundaryCondition.PERIODIC:
                 if with_gid_ghost_offset:
                     s.append('{} += {}-{};'.format(global_id(),src_size(),lghosts()))
                 else:
                     s.append('{} += {};'.format(global_id(),src_size()))
                 s.append('{} %= {};'.format(global_id(), src_size()))
-            
+
             @contextmanager
             def _cache_iterate_(i):
                 try:
@@ -175,43 +176,43 @@ class CacheLoadFunction(OpenClFunctionCodeGenerator):
                         yield ctx
                 except:
                     raise
-            
+
             s.barrier(_local=True)
             with s._block_():
-                nested_loops = [_cache_iterate_(i) for i in xrange(work_dim-1,-1,-1)]
-                with contextlib.nested(*nested_loops):
+                nested_loops = [_cache_iterate_(i) for i in range(work_dim-1, -1, -1)]
+                with nested(*nested_loops):
                     with s._block_():
                         s.append(LID.declare(const=True,init=compute_index(idx=local_pos,
                             size=cache_size[:work_dim])))
                         s.append(GID.declare(const=True,init=compute_index(idx=global_pos,
                             size=src_size[:work_dim])))
-                        
+
                         if src_vectorize:
                             load=s.vars['src'][GID()]
                         else:
                             load = ','.join([s.vars['src{}'.format(i)][GID()] \
-                                    for i in xrange(components)])
+                                    for i in range(components)])
                             load = '({})({})'.format(vtype,load)
                         tmp.declare(s,init=load,const=True)
                         if dst_vectorize:
                             store='{} = {};'.format(s.vars['dst'][LID()], tmp())
                             s.append(store)
                         else:
-                            for i in xrange(components):
+                            for i in range(components):
                                 store='{} = {};'.format(s.vars['dst{}'.format(i)][LID()], tmp[i])
                                 s.append(store)
 
-                        
+
             s.barrier(_local=True)
-        
-        
-    def per_work_statistics(self): 
+
+
+    def per_work_statistics(self):
         typegen = self.typegen
         itype = self.itype
         ftype = self.ftype
         work_dim   = self.work_dim
         components = self.components
-    
+
         size  = typegen.FLT_BYTES[ftype]
         reads  = components*size
         writes = reads
@@ -231,7 +232,7 @@ class CacheLoadFunction(OpenClFunctionCodeGenerator):
 
 
 
-           
+
 if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import _test_typegen
 
@@ -243,9 +244,9 @@ if __name__ == '__main__':
     cache_load1 = CacheLoadFunction(typegen=tg,ftype=tg.fbtype,work_dim=3, boundary=BoundaryCondition.PERIODIC,
             components=4, src_vectorize=False, dst_vectorize=True)
 
-    print cache_load0.per_work_statistics()
-    print
-    print cache_load1.per_work_statistics()
+    print(cache_load0.per_work_statistics())
+    print()
+    print(cache_load1.per_work_statistics())
 
     cg.require('cl0',cache_load0)
     #cg.require('cl1',cache_load1)
diff --git a/hysop/backend/device/codegen/functions/complex.py b/hysop/backend/device/codegen/functions/complex.py
index ef71208dc46d2e94a2b0d6fd6c5acfb3616ec9e3..1225b48e3698c94801c0d4ec183c6c946f3d3466 100644
--- a/hysop/backend/device/codegen/functions/complex.py
+++ b/hysop/backend/device/codegen/functions/complex.py
@@ -1,5 +1,7 @@
 from abc import ABCMeta, abstractmethod
-from hysop.deps import sm, np
+import sympy as sm
+import numpy as np
+
 from hysop.tools.types import check_instance
 from hysop.backend.device.codegen.base.opencl_codegen   import OpenClCodeGenerator
 from hysop.backend.device.codegen.base.function_codegen import OpenClFunctionCodeGenerator
@@ -8,18 +10,16 @@ from hysop.backend.device.codegen.base.utils            import WriteOnceDict, Ar
 from hysop.backend.device.codegen.base.statistics       import WorkStatistics
 from hysop.backend.device.opencl.opencl_types           import OpenClTypeGen, basetype
 
-class OpenClComplexOperator(OpenClFunctionCodeGenerator):
+class OpenClComplexOperator(OpenClFunctionCodeGenerator, metaclass=ABCMeta):
 
-    __metaclass__ = ABCMeta
-    
     def __init__(self, typegen, ftype, vectorization, output=None, known_args=None):
 
         assert vectorization in (1,2,4,8)
-        
+
         ftype = basetype(ftype)
         vtype = typegen.vtype(ftype, 2*vectorization)
         rtype = typegen.vtype(ftype, 1*vectorization)
-        
+
         args   = self.generate_arguments(ftype, vtype, rtype, vectorization, typegen)
         reqs   = self.generate_requirements(ftype, vtype, rtype, vectorization, typegen, args)
         output = self.determine_output_ctype(ftype, vtype, rtype, vectorization, typegen, args, reqs)
@@ -29,14 +29,14 @@ class OpenClComplexOperator(OpenClFunctionCodeGenerator):
                 args=args, known_args=known_args)
 
         self.update_requirements(reqs)
-        
+
         self.ftype = ftype
         self.vtype = vtype
         self.rtype = rtype
         self.vectorization = vectorization
-        
+
         self.gencode()
-        
+
     def generate_arguments(self, ftype, vtype, rtype, vectorization, typegen):
         return ArgDict()
 
@@ -50,7 +50,7 @@ class OpenClComplexOperator(OpenClFunctionCodeGenerator):
     def gencode(self):
         pass
 
-        
+
 class OpenClComplexUnaryOperator(OpenClComplexOperator):
 
     def generate_arguments(self, ftype, vtype, rtype, vectorization, typegen):
@@ -58,7 +58,7 @@ class OpenClComplexUnaryOperator(OpenClComplexOperator):
                 vtype=vtype, rtype=rtype, vectorization=vectorization, typegen=typegen)
         args['a'] = CodegenVectorClBuiltin('a', ftype, 2*vectorization, typegen, add_impl_const=True)
         return args
-    
+
     def get_attrs(self):
         return (self, self.typegen, self.vectorization, self.ftype,
                 self.vtype, self.rtype, self.args['a'])
@@ -86,7 +86,7 @@ class OpenClComplexMul(OpenClComplexBinaryOperator):
         with s._function_():
             with s._align_() as al:
                 init = ''
-                for i in xrange(vec):
+                for i in range(vec):
                     real = '{}*{}-{}*{}'.format(lhs[2*i],   rhs[2*i],
                                                 lhs[2*i+1], rhs[2*i+1])
                     imag = '{}*{}+{}*{}'.format(lhs[2*i],   rhs[2*i+1],
@@ -101,13 +101,13 @@ class OpenClComplexModulus2(OpenClComplexUnaryOperator):
 
     def determine_output_ctype(self, ftype, vtype, rtype, vectorization, typegen, args, reqs):
         return rtype
-    
+
     def gencode(self):
         (s, tg, vec, ftype, vtype, rtype, a) = self.get_attrs()
         with s._function_():
             with s._align_() as al:
                 init = ''
-                for i in xrange(vec):
+                for i in range(vec):
                     real = '{}*{}+{}*{}'.format(a[2*i],   a[2*i],
                                                 a[2*i+1], a[2*i+1])
                     init += ',\n${}'.format(real)
diff --git a/hysop/backend/device/codegen/functions/compute_index.py b/hysop/backend/device/codegen/functions/compute_index.py
index 2208ac3219d8126f4cb8f5fcbfa14e20165ce5c7..f349df121e38abfa1f7112550736db21b61ee05c 100644
--- a/hysop/backend/device/codegen/functions/compute_index.py
+++ b/hysop/backend/device/codegen/functions/compute_index.py
@@ -8,7 +8,7 @@ from hysop.backend.device.codegen.base.utils      import ArgDict
 from hysop.backend.device.codegen.base.statistics import WorkStatistics
 
 class ComputeIndexFunction(OpenClFunctionCodeGenerator):
-    
+
     def __init__(self,typegen,dim,wrap=None,itype='int'):
         assert dim>0
         args = ArgDict()
@@ -36,7 +36,7 @@ class ComputeIndexFunction(OpenClFunctionCodeGenerator):
             with s._if_(wrap()):
                 self.append('{idx} = ({idx}+{size}) % {size};'.format(idx=idx(),size=size))
             ss = '{}'.format(idx[dim-1])
-            for i in xrange(dim-2,-1,-1):
+            for i in range(dim-2, -1, -1):
                 ss = '({}*{}+{})'.format(ss,size[i],idx[i])
             ret = 'return {};'.format(ss)
             self.append(ret)
@@ -45,7 +45,7 @@ class ComputeIndexFunction(OpenClFunctionCodeGenerator):
         dim   = self.dim
         wrap  = self.wrap
         itype = self.itype
-        
+
         ops = {}
         ops[itype] = int(wrap)*2*dim + 2*(dim-1)*dim
 
@@ -54,12 +54,12 @@ class ComputeIndexFunction(OpenClFunctionCodeGenerator):
 
         return stats
 
-           
+
 if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import test_typegen
     tg = test_typegen('float')
     cg = OpenClCodeGenerator('main',tg)
-    
+
     ci0 = ComputeIndexFunction(tg,3,wrap=None)
     ci1 = ComputeIndexFunction(tg,3,wrap=True)
     ci2 = ComputeIndexFunction(tg,3,wrap=False)
diff --git a/hysop/backend/device/codegen/functions/directional_remesh.py b/hysop/backend/device/codegen/functions/directional_remesh.py
index 68defa89f3af24678e34b3f65f4853c580d8cf5f..4e249a725182e07aa27d219683309eb626c8dd9b 100644
--- a/hysop/backend/device/codegen/functions/directional_remesh.py
+++ b/hysop/backend/device/codegen/functions/directional_remesh.py
@@ -1,4 +1,7 @@
-from hysop.deps import sm, np, contextlib
+import contextlib
+import numpy as np
+import sympy as sm
+
 from hysop.tools.types import check_instance, first_not_None
 from hysop.backend.device.opencl.opencl_types import OpenClTypeGen
 
@@ -30,7 +33,7 @@ class DirectionalRemeshFunction(OpenClFunctionCodeGenerator):
 
         check_instance(sboundary, tuple, values=BoundaryCondition)
         check_instance(remesh_kernel, (RemeshKernel, Kernel))
-        assert remesh_kernel.n % 2 == 0 or remesh_kernel.n == 1
+        assert (remesh_kernel.n % 2 == 0) or (remesh_kernel.n == 1)
         assert remesh_kernel.n > 0
 
         use_short_circuit = first_not_None(use_short_circuit, typegen.use_short_circuit_ops)
@@ -102,7 +105,7 @@ class DirectionalRemeshFunction(OpenClFunctionCodeGenerator):
                                            add_impl_const=True, nl=True)
 
         scalars = []
-        for i in xrange(nscalars):
+        for i in range(nscalars):
             Si = CodegenVectorClBuiltin(name='s{}'.format(i), btype=ftype, dim=nparticles, typegen=typegen,
                                         add_impl_const=True, nl=(i == nscalars-1))
             scalars.append(Si)
@@ -125,7 +128,7 @@ class DirectionalRemeshFunction(OpenClFunctionCodeGenerator):
                                          add_impl_const=True, nl=True)
 
         cached_scalars = []
-        for i in xrange(nscalars):
+        for i in range(nscalars):
             Si = CodegenVariable(name='S{}'.format(i), ctype=ftype, typegen=typegen,
                                  ptr_restrict=True, ptr=True, storage=self._local,
                                  ptr_const=False, add_impl_const=True, nl=True)
@@ -217,12 +220,12 @@ class DirectionalRemeshFunction(OpenClFunctionCodeGenerator):
 
         lb = '[' if (nparticles > 1) else ''
         rb = ']' if (nparticles > 1) else ''
-        vnf = '{}{}{}'.format(lb, ', '.join('%2.2f' for _ in xrange(nparticles)), rb)
-        vni = '{}{}{}'.format(lb, ', '.join('%i' for _ in xrange(nparticles)), rb)
+        vnf = '{}{}{}'.format(lb, ', '.join('%2.2f' for _ in range(nparticles)), rb)
+        vni = '{}{}{}'.format(lb, ', '.join('%i' for _ in range(nparticles)), rb)
 
         def expand_printf_vector(x): return str(x) if (nparticles == 1) else ','.join(
             '({}).s{}'.format(x, '0123456789abcdef'[i])
-            if isinstance(x, str) else x[i] for i in xrange(nparticles))
+            if isinstance(x, str) else x[i] for i in range(nparticles))
         epv = expand_printf_vector
 
         @contextlib.contextmanager
@@ -236,7 +239,7 @@ class DirectionalRemeshFunction(OpenClFunctionCodeGenerator):
         dtype = tg.np_dtype(ftype)
 
         P = CodegenVariable(name='P', ctype=itype, typegen=tg,
-                            const=True, value=1+(self.kernel.n/2))
+                            const=True, value=1+(self.kernel.n//2))
         eps = CodegenVariable(name='eps', ctype=ftype, typegen=tg,
                               const=True, value=np.finfo(dtype).eps)
 
@@ -320,7 +323,7 @@ class DirectionalRemeshFunction(OpenClFunctionCodeGenerator):
                             s.comment(comment)
                         criterias = []
                         with s._block_():
-                            for ipart in xrange(nparticles):
+                            for ipart in range(nparticles):
                                 cache_idx = '{}+{}'.format(cache_ghosts, ind[ipart])
                                 val = '{}*{}'.format(w[ipart], scalar[ipart])
                                 if (remesh_criteria_eps is not None):
diff --git a/hysop/backend/device/codegen/functions/gradient.py b/hysop/backend/device/codegen/functions/gradient.py
index 876e5814de01ba90868faad128bb47bab14f6357..85a1d88d9580f5c338483f853c4de8a6b4166433 100644
--- a/hysop/backend/device/codegen/functions/gradient.py
+++ b/hysop/backend/device/codegen/functions/gradient.py
@@ -14,10 +14,10 @@ from hysop.backend.device.codegen.functions.cache_load    import CacheLoadFuncti
 from hysop.backend.device.codegen.functions.apply_stencil import ApplyStencilFunction
 
 class GradientFunction(OpenClFunctionCodeGenerator):
-    
+
     def __init__(self,typegen,dim,order,
             itype='int',cached=True, known_args=None):
-        
+
         assert dim>0
         fbtype = typegen.fbtype
         output = typegen.vtype(fbtype,dim)
@@ -38,7 +38,7 @@ class GradientFunction(OpenClFunctionCodeGenerator):
             basename += '_cached'
         super(GradientFunction,self).__init__(basename=basename,ext='.cl', output=output,
                 args=args,typegen=typegen,inline=True,known_args=known_args)
-       
+
         self.dim = dim
         self.cached = cached
 
@@ -47,17 +47,17 @@ class GradientFunction(OpenClFunctionCodeGenerator):
         self.gencode()
 
     def gen_stencil(self, order):
-        
+
         tg  = self.typegen
         dim = self.dim
         order = [order]*dim if np.isscalar(order) else order
-        
+
         S = []
         L = []
         R = []
         if np.isscalar(order):
             order = (order,)*dim
-        for i in xrange(dim):
+        for i in range(dim):
             o = order[i]
             assert o%2==0
             h = o//2
@@ -65,23 +65,23 @@ class GradientFunction(OpenClFunctionCodeGenerator):
             S.append(Si)
             L.append(h)
             R.append(h)
-            
+
         self.order = order
 
         lghosts = CodegenVectorClBuiltin('lghosts', 'int', dim, tg, const=True)
         rghosts = CodegenVectorClBuiltin('rghosts', 'int', dim, tg, const=True)
         self.known_vars.update(lghosts=L, rghosts=R)
         self.update_vars(lghosts=lghosts, rghosts=rghosts)
-        
+
         compute_index = ComputeIndexFunction(tg, dim, wrap=False, itype='int')
         self.require('compute_index', compute_index)
 
         if self.cached:
             cache_load = CacheLoadFunction(dtype=tg.fbtype,dim=dim,typegen=tg,boundary='periodic')
             self.require('cache_load', cache_load)
-        
+
         self.stencil_name = []
-        for i in xrange(dim):
+        for i in range(dim):
             Si = S[i]
             stencil = Stencil(Si.stencil, origin=Si.Lx, order=Si.order)
 
@@ -90,8 +90,8 @@ class GradientFunction(OpenClFunctionCodeGenerator):
             self.require(apply_stencil.name, apply_stencil)
             self.stencil_name.append(apply_stencil.name)
 
-        
-        
+
+
 
     def gencode(self):
         s = self
@@ -130,8 +130,8 @@ class GradientFunction(OpenClFunctionCodeGenerator):
             s.append(S.declare(init='{}+{}+{}'.format(lghosts(),local_size(),rghosts())))
             s.jumpline()
             if cached:
-                call = cache_load(src=field, dst=cache_buffer, 
-                        global_id=global_id, local_id=local_id, 
+                call = cache_load(src=field, dst=cache_buffer,
+                        global_id=global_id, local_id=local_id,
                         local_size=local_size, src_size=field_size,
                         lghosts=lghosts, rghosts=rghosts)
                 s.comment('Load field in local memory cache')
@@ -144,7 +144,7 @@ class GradientFunction(OpenClFunctionCodeGenerator):
                     idx='{}+{}'.format(local_id(),lghosts()),
                     size=S())))
                 s.append(stride.declare(init='1'))
-                for i in xrange(dim):
+                for i in range(dim):
                     F = self.reqs[self.stencil_name[i]]
                     call = F(data=cache_buffer, offset=offset, stride=stride)
                     code = '{} = {}*{};'.format(grad[i], call, inv_dx[i])
@@ -152,9 +152,9 @@ class GradientFunction(OpenClFunctionCodeGenerator):
                     if i<dim-1:
                         s.append('stride *= {};'.format(S[i]))
             s.append('return {};'.format(grad()))
-        
 
-           
+
+
 if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import test_typegen
     tg = test_typegen('float', float_dump_mode='dec')
diff --git a/hysop/backend/device/codegen/functions/polynomial.py b/hysop/backend/device/codegen/functions/polynomial.py
index 78b1a689dfde82d4662f01d1723f37e99a781c32..0501e67e34fb33300826178f53e1913053d9505c 100644
--- a/hysop/backend/device/codegen/functions/polynomial.py
+++ b/hysop/backend/device/codegen/functions/polynomial.py
@@ -1,5 +1,6 @@
+import numpy as np
+import sympy as sm
 
-from hysop.deps import sm, np
 from hysop.tools.types import check_instance
 from hysop.backend.device.codegen.base.opencl_codegen   import OpenClCodeGenerator
 from hysop.backend.device.codegen.base.function_codegen import OpenClFunctionCodeGenerator
@@ -12,26 +13,26 @@ class PolynomialFunction(OpenClFunctionCodeGenerator):
 
     def __init__(self, typegen, ftype, dim, coeffs, name, var='x',
             use_fma=True, known_args=None):
-        
+
         vtype = typegen.vtype(ftype,dim)
 
         args = self.build_prototype(typegen, ftype, dim, var)
 
         reqs = self.build_requirements(typegen, ftype, dim)
-        
+
         super(PolynomialFunction,self).__init__(basename=name,
                 output=vtype, typegen=typegen, inline=True,
                 args=args, known_args=known_args)
 
         self.update_requirements(reqs)
-        
+
         self.dim   = dim
         self.ftype = ftype
         self.vtype = vtype
         self.coeffs = np.asarray(coeffs)
         self.use_fma = use_fma
         self.var = var
-        
+
         self.gencode()
 
     def build_prototype(self, typegen, ftype, dim, var):
@@ -42,12 +43,12 @@ class PolynomialFunction(OpenClFunctionCodeGenerator):
     def build_requirements(self, typegen, ftype, dim):
         reqs = WriteOnceDict()
         return reqs
-    
+
     def gencode(self):
         s = self
         dim = s.dim
         tg = self.typegen
-        
+
         ftype = s.ftype
         vtype = s.vtype
 
@@ -57,14 +58,14 @@ class PolynomialFunction(OpenClFunctionCodeGenerator):
         assert coeffs.size >= 1
 
         x = s.args[s.var]
-        
-        C = [ tg.dump(float(Ci)) if Ci != 0 else None 
+
+        C = [ tg.dump(float(Ci)) if Ci != 0 else None
                 for Ci in coeffs ]
-        
+
         with s._function_():
             mul = '{}*{}'
             fma = 'fma({},{},{})' if use_fma else '({}*{}+{})'
-            
+
             i=0
             while (C[i] is None):
                 i+=1
@@ -76,14 +77,13 @@ class PolynomialFunction(OpenClFunctionCodeGenerator):
                     P = fma.format(x,P,Ci)
             P = 'return {};'.format(P)
             s.append(P)
-    
+
     def per_work_statistics(self):
         from hysop.backend.device.codegen.base.statistics import WorkStatistics
         dim     = self.dim
         ftype   = self.ftype
         ncoeffs = np.sum(self.coeffs!=0)
-        print self.coeffs
-        
+
         stats = WorkStatistics()
         stats.ops_per_type[ftype] = 2*(ncoeffs-1)*dim
 
@@ -92,12 +92,12 @@ class PolynomialFunction(OpenClFunctionCodeGenerator):
 if __name__ == '__main__':
 
     from hysop.backend.device.codegen.base.test import _test_typegen
-    
+
     tg = _test_typegen('float')
-    pf = PolynomialFunction(tg,'float',4, 
+    pf = PolynomialFunction(tg,'float',4,
             [0,1,2,3,4,0,0,0,5,6,7,8,9,0], 'test_poly', 'x', True)
     pf.edit()
 
-    print pf.per_work_statistics()
+    print(pf.per_work_statistics())
 
     pf.test_compile()
diff --git a/hysop/backend/device/codegen/functions/runge_kutta.py b/hysop/backend/device/codegen/functions/runge_kutta.py
index f65a631bddf87555cec1a5e9b34f5ed79694b5c3..325be7b1f990bbaccfec48a87c8abf7c7b9aa081 100644
--- a/hysop/backend/device/codegen/functions/runge_kutta.py
+++ b/hysop/backend/device/codegen/functions/runge_kutta.py
@@ -19,15 +19,15 @@ class RungeKuttaFunction(OpenClFunctionCodeGenerator):
             'y':'y',
             'step': 'step'
     }
-    
-    def __init__(self,typegen,ftype,method,rhs, 
+
+    def __init__(self,typegen,ftype,method,rhs,
             used_vars=_default_used_vars,
             known_args=None):
 
         check_instance(method,ExplicitRungeKutta)
         check_instance(rhs,OpenClFunctionCodeGenerator)
         method.dump = typegen.dump
-        
+
         #find out rhs function arguments
         rhs_args_name = set(rhs.args.keys())
         rhs_args_name = rhs_args_name.difference(rhs.known_args.keys())
@@ -41,7 +41,7 @@ class RungeKuttaFunction(OpenClFunctionCodeGenerator):
         has_step =  (step  in rhs_args_name)
         has_dt   =  (dt    in rhs_args_name)
         has_var  =  (y     in rhs_args_name)
-        
+
         # runge kutta function args
         # it is ok for the rhs not to depend on 't' or 'dt' but not on the scheme variable 'y'
         args = ArgDict()
@@ -65,21 +65,21 @@ class RungeKuttaFunction(OpenClFunctionCodeGenerator):
                 args[arg] = rhs.args[arg]
                 rhs_args[arg] = args[arg]
 
-        rhs_name = hashlib.md5(rhs.fname).hexdigest()[0:8]
+        rhs_name = hashlib.md5(rhs.fname.encode('utf-8')).hexdigest()[0:8]
         ctype    = args[y].ctype
         basename = 'apply_{}_{}_{}'.format(method.name(),ctype,rhs_name)
 
         ctype = args[y].ctype
-        
+
         super(RungeKuttaFunction,self).__init__(basename=basename,
                 output=ctype,typegen=typegen,inline=True,
                 args=args, known_args=known_args)
-        
+
         self.update_requirements({'rhs':rhs})
-        
+
         self.ftype  = ftype
         self.method = method
-        
+
         self.has_time = has_time
         self.has_step = has_step
         self.rhs_args = rhs_args
@@ -99,7 +99,7 @@ class RungeKuttaFunction(OpenClFunctionCodeGenerator):
         has_step = s.has_step
         rhs_args = s.rhs_args
         rhs   = s.reqs['rhs']
-        
+
         dt = s.vars[used_vars['dt']]
         y  = s.vars[used_vars['y']]
         if has_time:
@@ -111,22 +111,22 @@ class RungeKuttaFunction(OpenClFunctionCodeGenerator):
         Ti = CodegenVariable('ti',ftype,tg)
         Ki = copy.copy(y)
         Yi = copy.copy(y)
-        
+
         K = CodegenArray('K',Yi.ctype,tg,shape=(method.order,))
-        
+
         dy = copy.copy(y)
         dy.name = 'k'
 
-        
+
         with s._function_():
-            
+
             s.jumpline()
             s.comment('Estimated slopes')
             K.declare(s)
             if has_step:
                 step.declare(s, init=0, const=False)
             __sum = ''
-            for i in xrange(method.stages):
+            for i in range(method.stages):
                 s.jumpline()
                 alpha = method.alpha[i]
                 beta  = method.beta[i]
@@ -143,7 +143,7 @@ class RungeKuttaFunction(OpenClFunctionCodeGenerator):
                             continue
                         else:
                             _sum += '+{}*{}'.format(tg.dump(float(g)),K[j])
-                    
+
                 with s._block_():
                     Yi.name='{}{}'.format(y,i)
                     rhs_args[used_vars['y']] = Yi
@@ -157,7 +157,7 @@ class RungeKuttaFunction(OpenClFunctionCodeGenerator):
                         s.append(Yi.declare(init='{} + {}*{}'.format(y,Ki,dt)))
                     else:
                         s.append(Yi.declare(init=y))
-                    
+
                     code='{} = {};'.format(K[i], rhs(**rhs_args))
                     s.append(code)
                     if beta!=0:
@@ -165,16 +165,16 @@ class RungeKuttaFunction(OpenClFunctionCodeGenerator):
 
                     if has_step:
                         s.append('{} += 1;'.format(step))
-            
+
             s.jumpline()
             s.append(dy.declare(const=True, init=__sum))
             s.append('return {} + {}*{};'.format(y,dy,dt))
-    
+
     def per_work_statistics(self):
         ftype    = self.ftype
         method   = self.method
         stages   = method.stages
-        rhs      = self.rhs 
+        rhs      = self.rhs
         has_time = int(self.has_time)
 
         ops = {}
@@ -185,18 +185,18 @@ class RungeKuttaFunction(OpenClFunctionCodeGenerator):
 
         stats = WorkStatistics()
         stats.ops_per_type = ops
-        
+
         return stats + stages*rhs.per_work_statistics()
 
-           
+
 if __name__ == '__main__':
 
     from hysop.backend.device.codegen.base.test import test_typegen
-    
+
     method = ExplicitRungeKutta('RK4_38')
 
     tg = test_typegen('float')
     rkf = RungeKuttaFunction(tg,ftype=tg.fbtype,method=method,rhs='function')
     rkf.edit()
-    print 
+    print
 
diff --git a/hysop/backend/device/codegen/functions/stretching_rhs.py b/hysop/backend/device/codegen/functions/stretching_rhs.py
index 5cda6360b57475de224a40f03b96e93645925e42..4ddd12faf6e5e4e91082640904962657f6e465ad 100644
--- a/hysop/backend/device/codegen/functions/stretching_rhs.py
+++ b/hysop/backend/device/codegen/functions/stretching_rhs.py
@@ -1,5 +1,5 @@
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.tools.types import check_instance
 from hysop.backend.device.codegen.base.opencl_codegen   import OpenClCodeGenerator
 from hysop.backend.device.codegen.base.function_codegen import OpenClFunctionCodeGenerator
@@ -25,19 +25,19 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
         'components': 'xyz',
         'directions': 'XYZ'
     }
-    
+
     def __init__(self, typegen, dim, ftype, order, direction, formulation, cached, boundary,
             ptr_restrict=True, vectorize_u=False,
-            itype='int', 
+            itype='int',
             used_variables = _default_used_variables,
             known_args=None):
-        
+
         assert dim==3
         assert direction<dim
         assert order>1 and order%2==0
         check_instance(formulation,StretchingFormulation)
         check_instance(boundary,tuple, values=BoundaryCondition)
-        
+
         is_conservative = (formulation==StretchingFormulation.CONSERVATIVE)
         is_periodic     = (boundary[0]==BoundaryCondition.PERIODIC) and \
                             (boundary[1]==BoundaryCondition.PERIODIC)
@@ -46,7 +46,7 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
             storage='__local'
         else:
             storage='__global'
-        
+
         vtype = typegen.vtype(ftype,dim)
 
         (args,basename) = self.build_prototype(typegen,dim,itype,ftype,vtype,order,
@@ -56,18 +56,18 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
         reqs = self.build_requirements(typegen,dim,itype,ftype,vtype,order,direction,
                 boundary,cached,ptr_restrict,storage,vectorize_u,used_variables,
                 is_conservative,is_periodic,args)
-        
+
         super(DirectionalStretchingRhsFunction,self).__init__(basename=basename,
                 output=vtype,typegen=typegen,inline=True,
                 args=args, known_args=known_args)
 
         self.update_requirements(reqs)
-        
+
         self.dim   = dim
-        self.itype = itype 
+        self.itype = itype
         self.ftype = ftype
         self.vtype = vtype
-        
+
         self.order           = order
         self.direction       = direction
         self.used_variables  = used_variables
@@ -78,7 +78,7 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
         self.formulation     = formulation
         self.cached          = cached
         self.boundary        = boundary
-        
+
         self.gencode()
 
     def build_prototype(self,typegen,dim,itype,ftype,vtype,order,direction,cached,
@@ -88,21 +88,21 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
         W = used_variables['W']
         xyz = used_variables['components']
         XYZ = used_variables['directions']
-        
+
         args = ArgDict()
         if vectorize_u:
             args[U] = CodegenVariable(U, vtype, typegen, const=True, add_impl_const=True, storage=storage, ptr=True, ptr_restrict=ptr_restrict,nl=True)
         else:
-            for i in xrange(dim):
+            for i in range(dim):
                 Uxyz= '{}{}'.format(U,xyz[i])
                 args[Uxyz] = CodegenVariable(Uxyz, ftype, typegen, const=True, add_impl_const=True, storage=storage, ptr=True, ptr_restrict=ptr_restrict, nl=True)
         if is_conservative:
                 Wd= '{}{}'.format(W,xyz[direction])
                 args[Wd] = CodegenVariable(Wd, ftype, typegen, const=False, add_impl_const=True, storage=storage, ptr=True, ptr_restrict=ptr_restrict, nl=True)
-                
+
         args[W]        = CodegenVectorClBuiltin(W, ftype, dim, typegen, add_impl_const=True)
         args['inv_dx'] = CodegenVariable('inv_dx', ftype, typegen, add_impl_const=True, nl=True)
-        
+
         if is_conservative:
             args['rk_step'] = CodegenVariable('rk_step', itype, typegen, add_impl_const=True)
         if is_periodic and (not cached):
@@ -114,7 +114,7 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
         args['lidx']   = CodegenVariable('lidx', itype, typegen, add_impl_const=True)
         args['Lx']     = CodegenVariable('Lx',   itype, typegen, add_impl_const=True, nl=True)
         args['active'] = CodegenVariable('active','bool',typegen)
-        
+
         basename = 'stretching_rhs_{}_{}{}{}_fdc{}'.format(str(formulation).lower(),
                 ftype[0],dim,('v' if vectorize_u else ''),order)
         basename+='_'+XYZ[direction]
@@ -123,12 +123,12 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
 
     def build_stencil(self,order):
         assert order%2==0
-        h = order/2
+        h = order//2
         sg = StencilGenerator()
         sg.configure(dim=1,derivative=1,order=order)
         stencil = sg.generate_exact_stencil(origin=h)
         return stencil
-        
+
     def build_requirements(self, typegen,dim,itype,ftype,vtype,order,direction,boundary,cached,
             ptr_restrict,storage,vectorize_u,used_variables,is_conservative,is_periodic,args):
 
@@ -160,21 +160,21 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
 
         inv_dx_s   = sm.Symbol('inv_dx')
         inv_dx_var = CodegenVariable('inv_dx', ftype, typegen, add_impl_const=True, nl=True)
-        
+
         stencil = self.build_stencil(order)
-        stencil.replace_symbols({stencil.dx:1/inv_dx_s})
+        stencil.replace_symbols({stencil.dx:1.0/inv_dx_s})
         symbol2vars = {inv_dx_s:inv_dx_var}
 
         apply_stencil = ApplyStencilFunction(typegen=typegen,
                 stencil=stencil,
                 symbol2vars=symbol2vars,
-                ftype=ftype, itype=itype, 
-                data_storage=storage, 
+                ftype=ftype, itype=itype,
+                data_storage=storage,
                 vectorize=vectorize_u,
                 components=dim,
                 extra_inputs  = extra_inputs,
                 scalar_inputs = scalar_inputs,
-                vector_inputs = vector_inputs, 
+                vector_inputs = vector_inputs,
                 vector_suffixes = xyz,
                 op=op,
                 custom_id=custom_id,
@@ -187,7 +187,7 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
         s = self
         dim = s.dim
         tg = self.typegen
-        
+
         cached = s.cached
         direction = s.direction
         formulation = s.formulation
@@ -200,7 +200,7 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
         is_periodic = s.is_periodic
 
         apply_stencil = s.reqs['apply_stencil']
-        
+
         used_variables=s.used_variables
         U = used_variables['U']
         W = used_variables['W']
@@ -217,7 +217,7 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
         if vectorize_u:
             fargs[U] = s.args[U]
         else:
-            for i in xrange(dim):
+            for i in range(dim):
                 Ui = U+xyz[i]
                 fargs[Ui] = s.args[Ui]
         if is_conservative:
@@ -234,14 +234,14 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
 
         offset = s.args['offset']
         active = s.args['active']
-        
+
         dw_dt  = CodegenVectorClBuiltin('dW_dt', ftype, dim, tg)
-        ghosts = CodegenVariable('ghosts', itype, tg, const=True, value=order/2, symbolic_mode=True)
-        
+        ghosts = CodegenVariable('ghosts', itype, tg, const=True, value=order//2, symbolic_mode=True)
+
         with s._function_():
             s.jumpline()
-            
-            ghosts.declare(s) 
+
+            ghosts.declare(s)
 
             if is_conservative:
                 s.jumpline()
@@ -254,14 +254,14 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
                 s.jumpline(2)
 
             dw_dt.declare(s,init=0)
-        
+
             if is_conservative:
                 cond = '({active}) && ({lid}>={step}*{ghosts}) && ({lid}<{L}-{step}*{ghosts})'.format(
                         active=active(), lid=lidx(), L=Lx(), ghosts=ghosts(), step='({}+1)'.format(rk_step()))
             else:
                 cond = '({active}) && ({lid}>={ghosts}) && ({lid}<{L}-{ghosts})'.format(
                         active=active(), lid=lidx(), L=Lx(), ghosts=ghosts())
-            
+
             with s._if_(cond):
 
                 if is_conservative:
@@ -270,7 +270,7 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
                     du_dx.declare(s,init=call)
 
                     s.jumpline()
-                
+
                     s.comment('directional contribution due to div(U:W)')
                     code = '{} += {};'.format(dw_dt(),du_dx())
                     s.append(code)
@@ -289,7 +289,7 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
                         s.comment('directional contribution due to grad(U)^T.W')
                         code = '{} += dot({},{});'.format(dw_dt[direction], du_dx(), W())
                         s.append(code)
-                    
+
                     elif formulation==StretchingFormulation.MIXED_GRAD_UW:
                         s.comment('directional contribution due to grad(U).W')
                         code = '{}   += 0.5*{}*{};'.format(dw_dt(), du_dx(), W[direction])
@@ -301,14 +301,14 @@ class DirectionalStretchingRhsFunction(OpenClFunctionCodeGenerator):
                         raise ValueError()
 
             s.append('return {};'.format(dw_dt()))
-    
+
     def per_work_statistics(self):
         dim     = self.dim
         ftype   = self.ftype
         storage = self.storage
 
         stats = self.reqs['apply_stencil'].per_work_statistics()
-        
+
         if 'alpha' in self.known_args:
             alpha = self.known_args['alpha']
         else:
@@ -322,7 +322,7 @@ if __name__ == '__main__':
 
     from hysop.backend.device.codegen.base.test import test_typegen
     formulation = StretchingFormulation.GRAD_UW
-    
+
     tg = test_typegen('float')
     asf = DirectionalStretchingRhsFunction(tg,3,tg.fbtype,4,1,vectorize_u=False,formulation=formulation,cached=False)
     asf.edit()
diff --git a/hysop/backend/device/codegen/functions/vload.py b/hysop/backend/device/codegen/functions/vload.py
index a0a9a29a9d0a0de6d5abf19393f5c3cea9f82795..3ebb87f9a48eb9b6d783ba96e3b41911dbdb5a53 100644
--- a/hysop/backend/device/codegen/functions/vload.py
+++ b/hysop/backend/device/codegen/functions/vload.py
@@ -7,8 +7,8 @@ from hysop.backend.device.opencl.opencl_types     import TypeGen
 from hysop.backend.device.codegen.base.utils     import ArgDict
 
 class Vload(OpenClFunctionCodeGenerator):
-    
-    def __init__(self, typegen, ptype, vectorization, 
+
+    def __init__(self, typegen, ptype, vectorization,
             default_val='0', itype='int', restrict=True, storage=None,
             known_args=None, use_short_circuit=None):
         check_instance(ptype, str)
@@ -19,11 +19,11 @@ class Vload(OpenClFunctionCodeGenerator):
         fname = 'vload_{}{}'.format(ptype, vectorization)
         ctype = typegen.vtype(ptype, vectorization)
         check_instance(use_short_circuit, bool, allow_none=True)
-        
+
         use_short_circuit = first_not_None(use_short_circuit, typegen.use_short_circuit_ops)
 
         args = ArgDict()
-        args['data']   = CodegenVariable('data', ptype, ptr=True, const=True, add_impl_const=True, 
+        args['data']   = CodegenVariable('data', ptype, ptr=True, const=True, add_impl_const=True,
                 storage=storage, ptr_restrict=restrict, typegen=typegen)
         args['offset'] = CodegenVariable('offset', itype, add_impl_const=True, typegen=typegen)
         args['size']   = CodegenVariable('size', itype, add_impl_const=True, typegen=typegen)
@@ -49,7 +49,7 @@ class Vload(OpenClFunctionCodeGenerator):
         use_short_circuit = s.use_short_circuit
 
         ret = CodegenVectorClBuiltin('res', data.ctype, vectorization, typegen)
-        
+
         with s._function_():
             vcond='({i}>=0) && ({i}<({}-{}))'.format(size, vectorization-1, i=offset)
             scond='(({i}>=0) $&& ({i}<{}))'
@@ -59,7 +59,7 @@ class Vload(OpenClFunctionCodeGenerator):
                 ret.affect(s, init=load)
             with s._else_():
                 with s._align_() as al:
-                    for j in xrange(vectorization):
+                    for j in range(vectorization):
                        offsetj='{}+{}'.format(offset,j)
                        scondj = scond.format(size, i=offsetj)
                        loadj = data[offsetj]
@@ -71,7 +71,7 @@ class Vload(OpenClFunctionCodeGenerator):
                        al.append(code)
             s._return(ret)
 
-           
+
 if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import _test_typegen
     tg = _test_typegen()
diff --git a/hysop/backend/device/codegen/functions/vstore.py b/hysop/backend/device/codegen/functions/vstore.py
index e1bf2d11f413ac5dbd9d4d52ba2a019df5752beb..ee191e3a875b87f30375d37511f83a236fed1537 100644
--- a/hysop/backend/device/codegen/functions/vstore.py
+++ b/hysop/backend/device/codegen/functions/vstore.py
@@ -7,8 +7,8 @@ from hysop.backend.device.opencl.opencl_types     import TypeGen
 from hysop.backend.device.codegen.base.utils     import ArgDict
 
 class Vstore(OpenClFunctionCodeGenerator):
-    
-    def __init__(self, typegen, ptype, vectorization, 
+
+    def __init__(self, typegen, ptype, vectorization,
             itype='int', restrict=True, storage=None,
             known_args=None, use_short_circuit=None):
         check_instance(ptype, str)
@@ -16,16 +16,16 @@ class Vstore(OpenClFunctionCodeGenerator):
         check_instance(restrict, bool)
         check_instance(use_short_circuit, bool, allow_none=True)
         assert vectorization in typegen.vsizes
-        
+
         use_short_circuit = first_not_None(use_short_circuit, typegen.use_short_circuit_ops)
 
         fname = 'vstore_{}{}'.format(ptype, vectorization)
         vtype = typegen.vtype(ptype, vectorization)
 
         args = ArgDict()
-        args['value']   = CodegenVectorClBuiltin('value', ptype, vectorization, typegen, 
+        args['value']   = CodegenVectorClBuiltin('value', ptype, vectorization, typegen,
                 add_impl_const=True)
-        args['data']   = CodegenVariable('data', ptype, ptr=True, const=False, add_impl_const=True, 
+        args['data']   = CodegenVariable('data', ptype, ptr=True, const=False, add_impl_const=True,
                 storage=storage, ptr_restrict=restrict, typegen=typegen)
         args['offset'] = CodegenVariable('offset', itype, add_impl_const=True, typegen=typegen)
         args['size']   = CodegenVariable('size', itype, add_impl_const=True, typegen=typegen)
@@ -58,7 +58,7 @@ class Vstore(OpenClFunctionCodeGenerator):
                 s.append(store)
             with s._else_():
                 with s._align_() as al:
-                    for j in xrange(vectorization):
+                    for j in range(vectorization):
                        offsetj='{}+{}'.format(offset,j)
                        scondj = scond.format(size, i=offsetj)
                        storej = data[offsetj]
@@ -69,7 +69,7 @@ class Vstore(OpenClFunctionCodeGenerator):
                            code='if {} ${{ {} $= {}; }}'.format(scondj, storej, valj)
                        al.append(code)
 
-           
+
 if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import _test_typegen
     tg = _test_typegen()
diff --git a/hysop/backend/device/codegen/kernels/bandwidth.py b/hysop/backend/device/codegen/kernels/bandwidth.py
index afbcd00ead28987401620ae21e3de4c7d5e90a61..89efd2cdb4288972344447eb5f937affd310b843 100644
--- a/hysop/backend/device/codegen/kernels/bandwidth.py
+++ b/hysop/backend/device/codegen/kernels/bandwidth.py
@@ -28,27 +28,27 @@ class BandwidthKernel(KernelCodeGenerator):
         vtype = vtype.replace(' ','_')
         name = 'bandwidth_{}_{}_{}'.format(vtype,nreads,nwrites)
         return name
-    
+
     def __init__(self, typegen, vtype, nreads, nwrites, known_vars=None):
 
         assert nreads>0 and nwrites>0
-        
+
         kernel_args = self.gen_kernel_arguments(typegen, vtype)
-        
+
         name = BandwidthKernel.codegen_name(vtype, nreads, nwrites)
 
         super(BandwidthKernel,self).__init__(
                 name=name,
                 typegen=typegen,
-                work_dim=1, 
+                work_dim=1,
                 kernel_args=kernel_args,
                 known_vars=known_vars)
-        
+
         self.vtype   = vtype
         self.nreads  = nreads
         self.nwrites = nwrites
         self.gencode()
-    
+
 
     def gen_kernel_arguments(self, typegen, vtype):
         kargs = ArgDict()
@@ -88,10 +88,10 @@ class BandwidthKernel(KernelCodeGenerator):
 
 if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import _test_typegen
-    
+
     tg = _test_typegen('float')
     vtype = 'float4'
-    
-    ck = BandwidthKernel(tg, vtype, nreads=10, nwrites=1, 
+
+    ck = BandwidthKernel(tg, vtype, nreads=10, nwrites=1,
             known_vars={'global_size':1024, 'local_size': 512})
     ck.edit()
diff --git a/hysop/backend/device/codegen/kernels/copy_kernel.py b/hysop/backend/device/codegen/kernels/copy_kernel.py
index 819e4576e752b736a45769177125c44a7a25fbdb..f37d8a14353913369c0cfc1202213832b675ddee 100644
--- a/hysop/backend/device/codegen/kernels/copy_kernel.py
+++ b/hysop/backend/device/codegen/kernels/copy_kernel.py
@@ -30,12 +30,12 @@ class CopyKernel(KernelCodeGenerator):
         if vectorized>1:
             name += '_v{}'.format(vectorized)
         return name
-    
+
     def __init__(self, typegen, vtype, vectorized=1,
             src_mem='global', dst_mem='global', restrict=True,
             known_vars = None,
             force_symbolic = False):
-        
+
         if vectorized==1:
             pass
         elif vectorized not in typegen.vsizes:
@@ -44,18 +44,18 @@ class CopyKernel(KernelCodeGenerator):
             raise ValueError('Cannot vectorize vector types.')
 
         kernel_args = self.gen_kernel_arguments(typegen, vtype, src_mem, dst_mem, restrict)
-        
+
         name = CopyKernel.codegen_name(src_mem, dst_mem, vtype, restrict, vectorized)
 
         super(CopyKernel,self).__init__(
                 name=name,
                 typegen=typegen,
-                work_dim=1, 
+                work_dim=1,
                 kernel_args=kernel_args,
                 known_vars=known_vars,
                 vec_type_hint=vtype,
                 symbolic_mode=force_symbolic)
-        
+
         self.vtype = vtype
         self.src_mem = src_mem
         self.dst_mem = dst_mem
@@ -63,17 +63,17 @@ class CopyKernel(KernelCodeGenerator):
         self.vectorized = vectorized
 
         self.gencode()
-    
+
 
     def gen_kernel_arguments(self, typegen, vtype, src_mem, dst_mem, restrict):
-        
+
         ftype = typegen.basetype(vtype)
         components  = typegen.components(vtype)
         is_base_type = (components==1)
 
         _src_mem  = OpenClCodeGenerator.default_keywords[src_mem]
         _dst_mem  = OpenClCodeGenerator.default_keywords[dst_mem]
-        
+
         kargs = ArgDict()
 
         kargs['dst'] = CodegenVariable(ctype=vtype,name='dst',ptr=True,typegen=typegen,
@@ -94,11 +94,11 @@ class CopyKernel(KernelCodeGenerator):
         dst   = s.vars['dst']
         src   = s.vars['src']
         count = s.vars['count']
-            
+
 
         with s._kernel_():
             global_size.declare(s,const=True)
-            
+
             if global_size.known():
                 s.pragma('unroll')
             with s._for_('int {gid}={fval}; {gid}<{N}; {gid}+={gsize}'.format(gid=global_id(),
@@ -109,7 +109,7 @@ class CopyKernel(KernelCodeGenerator):
                     vstore = 'vstore{N}({vload}, {offset}, {dst});'.format(vload=vload,N=N,offset=global_id(), dst=dst())
                     s.append(vstore)
                 else:
-                    s.append("{} = {};".format(dst[global_id()],src[global_id()])) 
+                    s.append("{} = {};".format(dst[global_id()],src[global_id()]))
 
     @staticmethod
     def autotune(cl_env, typegen, src, dst, vtype, count, restrict,
@@ -119,12 +119,12 @@ class CopyKernel(KernelCodeGenerator):
                 raise ValueError('cl_env is not an OpenClEnvironment.')
             if not isinstance(typegen, OpenClTypeGen):
                 raise ValueError('typegen is not an OpenClTypeGen.')
-            
+
             device   = cl_env.device
             context  = cl_env.ctx
             platform = cl_env.platform
             queue    = cl_env.queue
-            
+
             if vtype not in typegen.builtin_types:
                 raise ValueError('{} is not an opencl bultin type.'.format(vtype))
             if count < 0:
@@ -145,7 +145,7 @@ class CopyKernel(KernelCodeGenerator):
             work_size=count
             min_local_size = max(1,clCharacterize.get_simd_group_size(device,1))
             max_workitem_workload = 256
-            
+
             symbolic_mode = False #__KERNEL_DEBUG__
             dump_src      = __KERNEL_DEBUG__
 
@@ -153,24 +153,24 @@ class CopyKernel(KernelCodeGenerator):
             def kernel_generator(work_size, global_size, local_size, kernel_args, vectorized,
                     force_verbose=False, force_debug=False,
                     **kargs):
-                    
+
                     ## Compile time known variables
                     known_vars = dict(
                             global_size=global_size[0],
                             local_size=local_size[0],
                             count = work_size[0]
                         )
-                        
+
                     ## CodeGenerator
                     codegen = CopyKernel(typegen=typegen, vtype=vtype, vectorized=vectorized,
                             src_mem='global', dst_mem='global', restrict=restrict,
                             force_symbolic=symbolic_mode,
                             known_vars=known_vars)
-                    
+
                     ## generate source code and build kernel
                     src       = codegen.__str__()
                     src_hash  = hashlib.sha512(src).hexdigest()
-                    prg       = cl_env.build_raw_src(src, build_opts, 
+                    prg       = cl_env.build_raw_src(src, build_opts,
                                    kernel_name=codegen.name,
                                    force_verbose=force_verbose, force_debug=force_debug)
                     kernel    = prg.all_kernels()[0]
@@ -182,11 +182,11 @@ class CopyKernel(KernelCodeGenerator):
 
                 if work_size%vectorized!=0:
                     raise ValueError('Invalid vector size.')
-                    
+
                 codegen_name =CopyKernel.codegen_name('global','global',vtype,restrict,vectorized)
                 autotuner = KernelAutotuner(name=codegen_name,
                         work_dim=1,
-                        build_opts=build_opts, 
+                        build_opts=build_opts,
                         autotuner_config=autotuner_config)
                 autotuner.add_filter('1d_shape_min', autotuner.min_workitems_per_direction)
                 autotuner.enable_variable_workitem_workload(max_workitem_workload=max_workitem_workload)
@@ -196,24 +196,24 @@ class CopyKernel(KernelCodeGenerator):
                         'dst':slice(1,2,1),
                         'src':slice(2,3,1)
                     }
-                
+
                 (gwi, lwi, stats, wl) = autotuner.bench(typegen=typegen,
-                        work_size=work_size/vectorized, kernel_args=kernel_args, 
+                        work_size=work_size//vectorized, kernel_args=kernel_args,
                         kernel_generator=kernel_generator,
                         dump_src=dump_src,
                         min_local_size=min_local_size,
                         vectorized=vectorized)
 
                 (kernel, kernel_args, cached_bytes, src_hash) = kernel_generator(
-                        work_size=[work_size/vectorized], global_size=gwi,local_size=lwi,
+                        work_size=[work_size//vectorized], global_size=gwi,local_size=lwi,
                         kernel_args=kernel_args, vectorized=vectorized,
                         force_verbose=None,force_debug=None)
-                
+
                 kernel_launcher = OpenClKernelLauncher(kernel, queue, list(gwi), list(lwi))
                 return (stats, kernel_launcher, kernel_args, kernel_args_mapping, cached_bytes)
-            
+
             candidates = [i for i in typegen.vsizes if work_size%i==0]
-            
+
             best = None
             for vectorized in candidates:
                 res = _autotune(vectorized);
@@ -224,10 +224,10 @@ class CopyKernel(KernelCodeGenerator):
 
 if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import  test_typegen
-    
+
     tg = test_typegen('float','dec')
     vtype = 'float'
-    
+
     ck = CopyKernel(tg, vtype, vectorized=16,
         force_symbolic=True,
         known_vars=dict(
diff --git a/hysop/backend/device/codegen/kernels/custom_symbolic.py b/hysop/backend/device/codegen/kernels/custom_symbolic.py
index 54954ac0e26005e2000179202604bbf798764925..a7d6aa0b8f47175a721e1a17a315b061e2eb3bf4 100644
--- a/hysop/backend/device/codegen/kernels/custom_symbolic.py
+++ b/hysop/backend/device/codegen/kernels/custom_symbolic.py
@@ -1,11 +1,12 @@
-  
-import contextlib
+
+import contextlib, math, operator, hashlib
 from contextlib import contextmanager
+import sympy as sm
 
 from abc import ABCMeta, abstractmethod
 from hysop import __VERBOSE__, __KERNEL_DEBUG__
-from hysop.deps import sm, math, operator, hashlib
 from hysop.core.arrays.all import OpenClArray
+from hysop.tools.contexts import nested
 from hysop.constants import DirectionLabels, BoundaryCondition, Backend, Precision, \
                             SymbolicExpressionKind
 
@@ -39,7 +40,7 @@ from hysop.backend.device.codegen.base.variables      import CodegenVariable, \
 from hysop.backend.device.codegen.structs.mesh_info   import MeshBaseStruct, MeshInfoStruct
 from hysop.backend.device.codegen.symbolic.functions.custom_symbolic_function import \
                                                                         CustomSymbolicFunction
-        
+
 from hysop.operator.base.custom_symbolic_operator import ValidExpressions
 from hysop.symbolic.field import SymbolicDiscreteField
 from hysop.symbolic.relational import Assignment
@@ -52,18 +53,18 @@ class SymbolicCodegenContext(object):
     """Store all information required to generate custom code."""
 
     def __init__(self, typegen, expr_info,
-                    ftype, itype, 
+                    ftype, itype,
                     vectorization, granularity, kernel_dim,
                     use_short_circuit, work_dim, known_vars,
                     tuning_mode, debug_mode, symbolic_mode):
 
         vftype = typegen.vtype(ftype, vectorization)
         vitype = typegen.vtype(itype, vectorization)
-        
+
         vgranularity_dim = upper_pow2_or_3(granularity)
         gftype = typegen.vtype(ftype, vgranularity_dim)
         gitype = typegen.vtype(itype, vgranularity_dim)
-        
+
         vkernel_dim = upper_pow2_or_3(kernel_dim)
         kftype = typegen.vtype(ftype, vkernel_dim)
         kitype = typegen.vtype(itype, vkernel_dim)
@@ -72,7 +73,7 @@ class SymbolicCodegenContext(object):
         varray_dim = upper_pow2_or_3(array_dim)
         aftype = typegen.vtype(ftype, varray_dim)
         aitype = typegen.vtype(itype, varray_dim)
-        
+
         self.expr_info   = expr_info
         self.typegen     = typegen
 
@@ -100,11 +101,11 @@ class SymbolicCodegenContext(object):
         self.debug_mode = debug_mode
         self.symbolic_mode = symbolic_mode
         self.known_vars = known_vars
-        
+
         self.array_sizes = SortedDict()
         self.array_ghosts = SortedDict()
         self.array_contiguous_ghosts = SortedDict()
-        
+
         self.buffer_args = SortedDict()
 
         self.compute_work_per_step()
@@ -129,7 +130,7 @@ class SymbolicCodegenContext(object):
         self.extra_vwork_per_step = extra_vwork_per_step
         self.max_extra_vwork = max_extra_vwork
 
-    def array_size(self, varname, index=None): 
+    def array_size(self, varname, index=None):
         assert (varname in self.array_sizes), self.array_sizes.keys()
         sizes = self.array_sizes[varname]
         if (index is not None):
@@ -169,46 +170,46 @@ class SymbolicCodegenContext(object):
         tg = self.typegen
 
         args = ArgDict()
-        args['offset']         = CodegenVariable('offset', self.itype, tg, const=True) 
+        args['offset']         = CodegenVariable('offset', self.itype, tg, const=True)
         args['local_offset']   = CodegenVariable('local_offset', self.itype, tg, const=True)
         args['line_offset']    = CodegenVariable('line_offset',  self.itype, tg, const=True)
         args['full_offset']    = CodegenVariable('full_offset',  self.itype, tg, const=True)
-        args['last_offset']    = CodegenVariable('last_offset',  self.itype, tg, const=True, 
-                                                    nl=True) 
+        args['last_offset']    = CodegenVariable('last_offset',  self.itype, tg, const=True,
+                                                    nl=True)
 
         args['is_first']        = CodegenVariable('is_first', 'bool', tg, const=True)
         args['is_last']         = CodegenVariable('is_last', 'bool', tg, const=True)
         args['is_active']       = CodegenVariable('is_active', 'bool', tg, const=True)
-        args['is_first_active'] = CodegenVariable('is_first_active', 'bool', tg, const=True, 
+        args['is_first_active'] = CodegenVariable('is_first_active', 'bool', tg, const=True,
                                                     nl=True)
-        args['is_last_active']  = CodegenVariable('is_last_active', 'bool', tg, const=True, 
+        args['is_last_active']  = CodegenVariable('is_last_active', 'bool', tg, const=True,
                                                     nl=True)
-        args['is_active_boundary'] = CodegenVariable('is_active_boundary', 'bool', tg, 
+        args['is_active_boundary'] = CodegenVariable('is_active_boundary', 'bool', tg,
                                                     const=True, nl=True)
-        
+
         args['lid'] = CodegenVariable('lid', self.itype, tg, const=True)
         args['local_work'] = CodegenVariable('lwork', self.itype, tg, const=True)
-        args['current_local_work'] = CodegenVariable('clwork', self.itype, tg, 
+        args['current_local_work'] = CodegenVariable('clwork', self.itype, tg,
                                                                     const=True, nl=True)
-        args['compute_grid_size'] = CodegenVectorClBuiltin('compute_grid_size', self.itype, 
-                self.varray_dim, typegen=tg, 
+        args['compute_grid_size'] = CodegenVectorClBuiltin('compute_grid_size', self.itype,
+                self.varray_dim, typegen=tg,
                 value=self.expr_info.compute_resolution[::-1], const=True, nl=True)
 
         args['dx'] = CodegenVariable('dx', tg.fbtype, tg, const=True)
-        
+
         self.space_symbols = SortedDict()
         for (i,xi) in enumerate(symbolic_space_symbols[:self.varray_dim]):
             if (i==0):
-                args[xi.varname] = CodegenVectorClBuiltin(xi.varname, self.ftype, 
+                args[xi.varname] = CodegenVectorClBuiltin(xi.varname, self.ftype,
                     self.vectorization, typegen=tg)
             else:
                 args[xi.varname] = CodegenVariable(xi.varname, self.ftype, tg)
             self.space_symbols[xi] = args[xi.varname]
-        
+
         self.local_indices_symbols = SortedDict()
         for (i,Li) in enumerate(symbolic_local_indices[:self.varray_dim]):
             if (i==0):
-                args[Li.varname] = CodegenVectorClBuiltin(Li.varname, self.itype, 
+                args[Li.varname] = CodegenVectorClBuiltin(Li.varname, self.itype,
                     self.vectorization, typegen=tg)
             else:
                 args[Li.varname] = CodegenVariable(Li.varname, self.itype, tg)
@@ -219,9 +220,9 @@ class SymbolicCodegenContext(object):
         else:
             local_size = CodegenVariable('L', self.itype, tg, const=True)
             args['L'] = local_size
-        self.local_size = local_size 
+        self.local_size = local_size
 
-        for argname, arg in args.iteritems():
+        for argname, arg in args.items():
             setattr(self, argname, arg)
 
         return args
@@ -235,13 +236,13 @@ class SymbolicCodegenContext(object):
         read_counter  = expr_info.discretization_info.read_counter
 
         args = ArgDict()
-        
+
         array_ghosts = SortedDict()
         array_contiguous_ghosts = SortedDict()
         array_sizes = SortedDict()
 
-        dfields = set(f.dfield for f in (expr_info.input_dfields.values() + 
-                                         expr_info.output_dfields.values()))
+        dfields = set(f.dfield for f in set(expr_info.input_dfields.values()).union(
+                                            expr_info.output_dfields.values()))
 
         for dfield in dfields:
             field = dfield._field
@@ -252,9 +253,9 @@ class SymbolicCodegenContext(object):
             name = '{}_{{}}'.format(name)
             reads  = read_counter.get(dfield, None)
             writes = write_counter.get(dfield, None)
-            local_size_per_index = array_sizes.setdefault(dfield, 
+            local_size_per_index = array_sizes.setdefault(dfield,
                                         npw.int_zeros(shape=(dfield.nb_components, 3)))
-            for index in xrange(dfield.nb_components):
+            for index in range(dfield.nb_components):
                 is_read    = (reads  is not None) and (reads[index]>0)
                 is_written = (writes is not None) and (writes[index]>0)
                 if not (is_read or is_written):
@@ -264,8 +265,8 @@ class SymbolicCodegenContext(object):
                 cname = name.format(index)
                 array_ghosts[cname] = ghosts
                 if has_ghosts:
-                    args[cname] = CodegenVariable(name=cname, typegen=typegen, 
-                            ctype=ctype, ptr=True, ptr_restrict=True, const=True, 
+                    args[cname] = CodegenVariable(name=cname, typegen=typegen,
+                            ctype=ctype, ptr=True, ptr_restrict=True, const=True,
                             storage='__local',
                             add_impl_const=True, nl=True)
                     if self.local_size_known:
@@ -274,17 +275,17 @@ class SymbolicCodegenContext(object):
                     else:
                         local_size_per_index[index] = (0, self.vectorization, 2*ghosts)
                 else:
-                    args[cname] = CodegenVectorClBuiltin(cname, ctype, self.vectorization, 
+                    args[cname] = CodegenVectorClBuiltin(cname, ctype, self.vectorization,
                                     typegen=typegen, const=True, nl=True)
             array_contiguous_ghosts[dfield] = min_ghosts[field].copy()
-        
+
         array_ghosts.update(array_contiguous_ghosts)
         self.array_sizes.update(array_sizes)
         self.array_ghosts.update(array_ghosts)
         self.array_contiguous_ghosts.update(array_contiguous_ghosts)
         return args
-    
-    
+
+
     def generate_array_args(self):
         typegen = self.typegen
         expr_info = self.expr_info
@@ -294,13 +295,12 @@ class SymbolicCodegenContext(object):
         read_counter  = expr_info.discretization_info.read_counter
 
         args = ArgDict()
-        
+
         array_ghosts = SortedDict()
         array_contiguous_ghosts = SortedDict()
         array_sizes = SortedDict()
 
-        arrays = set(f for f in (expr_info.input_arrays.values() + 
-                                            expr_info.output_arrays.values()))
+        arrays = set(expr_info.input_arrays.values()).union(expr_info.output_arrays.values())
 
         for a in arrays:
             ctype = a.ctype
@@ -317,9 +317,9 @@ class SymbolicCodegenContext(object):
 
             local_size = 0
             array_ghosts[name] = ghosts
-            
+
             if has_ghosts:
-                args[name] = CodegenVariable(name=name, typegen=typegen, 
+                args[name] = CodegenVariable(name=name, typegen=typegen,
                         ctype=ctype, ptr=True, ptr_restrict=True, const=True, storage='__local',
                         add_impl_const=True, nl=True)
                 if self.local_size_known:
@@ -327,66 +327,64 @@ class SymbolicCodegenContext(object):
                 else:
                     local_size = (0, self.vectorization, 2*ghosts)
             else:
-                args[name] = CodegenVectorClBuiltin(name, ctype, self.vectorization, 
+                args[name] = CodegenVectorClBuiltin(name, ctype, self.vectorization,
                         typegen=typegen, const=True, nl=True)
                 local_size = 0
             array_sizes[a] = local_size
             array_contiguous_ghosts[a] = min_ghosts[a].copy()
-        
+
         array_ghosts.update(array_contiguous_ghosts)
         self.array_sizes.update(array_sizes)
         self.array_ghosts.update(array_ghosts)
         self.array_contiguous_ghosts.update(array_contiguous_ghosts)
         return args
 
-    def generate_param_args(self):        
+    def generate_param_args(self):
         typegen = self.typegen
         expr_info = self.expr_info
-        
+
         args = ArgDict()
-        
+
         # READ ONLY PARAMETERS
         # (ndim<=1) and (1<=size<=16) => simple vector constant
         # (ndim>1) or (size>16)       => ptr (const) __constant memory space
-        for (pname, param) in expr_info.input_params.iteritems():
+        for (pname, param) in expr_info.input_params.items():
             assert (pname not in expr_info.output_params)
             shape = param.shape
             ctype = param.ctype
             if (len(shape)==0) or ((len(shape)==1) and (shape[0]<=16)):
                 vsize = upper_pow2_or_3(shape[0]) if (len(shape)==1) else 1
-                arg = CodegenVectorClBuiltin(pname, ctype, vsize, typegen=typegen, 
+                arg = CodegenVectorClBuiltin(pname, ctype, vsize, typegen=typegen,
                         const=True, nl=True)
             else:
                 storage = '__constant'
-                arg= CodegenVariable(name=pname, typegen=typegen, 
+                arg= CodegenVariable(name=pname, typegen=typegen,
                         ctype=ctype, ptr=True, ptr_restrict=True, const=True, storage=storage,
                         add_impl_const=True, nl=True)
             args[pname] = arg
 
         # OUTPUT PARAMETERS
         # not supported yet (should be non const __global ptrs).
-        for pname, param in expr_info.output_params.iteritems():
+        for pname, param in expr_info.output_params.items():
             raise NotImplementedError('Output parameters are not supported.')
-        
+
         return args
-        
-    def generate_scalar_args(self):        
+
+    def generate_scalar_args(self):
         typegen = self.typegen
         expr_info = self.expr_info
-        
+
         args = ArgDict()
-        for (sname, scalar) in expr_info.scalars.iteritems():
+        for (sname, scalar) in expr_info.scalars.items():
             ctype = scalar.ctype
             vsize = self.vectorization
-            scalar = CodegenVectorClBuiltin(sname, ctype, vsize, typegen=typegen, 
+            scalar = CodegenVectorClBuiltin(sname, ctype, vsize, typegen=typegen,
                     const=True, nl=True)
             args[sname] = scalar
         return args
 
 
-class CustomSymbolicKernelGenerator(KernelCodeGenerator):
-
-    __metaclass__ = ABCMeta
+class CustomSymbolicKernelGenerator(KernelCodeGenerator, metaclass=ABCMeta):
 
     @classmethod
     def create(cls, expr_info, **kwds):
@@ -403,40 +401,40 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
             msg='Expression kind {} is not supported yet.'
             msg=msg.format(expr_info.kind)
             raise RuntimeError(msg)
-    
+
     @classmethod
     @not_implemented
     def custom_name(cls):
         pass
-    
+
     @abstractmethod
     def generate_expr_code(self):
         pass
 
     @classmethod
-    def codegen_name(cls, work_dim, array_dim, kernel_dim, granularity, ftype, 
+    def codegen_name(cls, work_dim, array_dim, kernel_dim, granularity, ftype,
             vectorization, name, direction):
         return '{}__{}d_kdim{}_wdim{}_gr{}__{}_v{}'.format(
-                name, 
+                name,
                 array_dim, kernel_dim, work_dim, granularity,
                 ftype, vectorization)
 
-    def __init__(self, typegen, expr_info, ftype, 
+    def __init__(self, typegen, expr_info, ftype,
                        kernel_dim, work_dim, granularity,
-                       vectorization, 
+                       vectorization,
                        itype='int',
                        use_short_circuit = None,
                        symbolic_mode = False,
-                       debug_mode    = False, 
+                       debug_mode    = False,
                        tuning_mode   = False,
                        known_vars    = None):
-        
+
         assert vectorization in [1,2,4,8,16]
         use_short_circuit = first_not_None(use_short_circuit, typegen.use_short_circuit_ops)
         known_vars = first_not_None(known_vars, {})
-        
+
         csc = SymbolicCodegenContext(typegen, expr_info,
-                            ftype, itype, 
+                            ftype, itype,
                             vectorization, granularity, kernel_dim,
                             use_short_circuit, work_dim, known_vars,
                             tuning_mode, debug_mode, symbolic_mode)
@@ -447,22 +445,22 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         kernel_reqs = self.build_requirements(csc)
 
         kernel_args = self.gen_kernel_arguments(csc, kernel_reqs)
-        
+
         expr_reqs = self.build_expr_requirements(csc, kernel_reqs, kernel_args, known_vars)
 
         kernel_reqs.update(expr_reqs)
-        
+
         super(CustomSymbolicKernelGenerator,self).__init__(
                 name=name,
                 typegen=typegen,
-                work_dim=work_dim, 
+                work_dim=work_dim,
                 kernel_args=kernel_args,
                 known_vars=known_vars,
                 vec_type_hint=ftype,
                 symbolic_mode=symbolic_mode)
-        
+
         self.update_requirements(kernel_reqs)
-        
+
         self.csc = csc
 
         self.gencode()
@@ -472,7 +470,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         typegen = csc.typegen
 
         reqs = WriteOnceDict()
-        
+
         # discrete cartesian fields mesh info
         mesh_base_struct = MeshBaseStruct(typegen=typegen, vsize=csc.varray_dim)
         reqs['MeshBaseStruct'] = mesh_base_struct
@@ -481,13 +479,13 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         reqs['MeshInfoStruct'] = mesh_info_struct
 
         return reqs
-   
+
 
     @abstractmethod
     def build_expr_requirements(self, csc, kernel_reqs, kernel_args):
         """Generate requirements and generate new expressions."""
         return WriteOnceDict()
-    
+
     def required_workgroup_cache_size(self, local_work_size):
         """
         Return a tuple of required (static,dynamic,total) cache bytes per workgroup
@@ -509,29 +507,29 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
 
 
 
-    
+
     def gen_kernel_arguments(self, csc, kernel_reqs):
 
         expr_info = csc.expr_info
         typegen = csc.typegen
-        
+
         kargs = ArgDict()
-        
+
         # declare all array like arguments
         mesh_infos = SortedDict()
         param_args = SortedDict()
-        
+
         array_args = SortedDict()
         array_strides = SortedDict()
-        
+
         # read-only input fields
         ei = expr_info
         di = expr_info.discretization_info
-        for (obj, counts) in di.read_counter.iteritems():
+        for (obj, counts) in di.read_counter.items():
             assert (counts is not None)
             if npw.array_equal(counts,0):
                 continue
-            
+
             if isinstance(obj, di.IndexedCounterTypes):
                 assert isinstance(obj, DiscreteScalarFieldView)
                 dfield = obj
@@ -553,7 +551,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                     volatile = (vname in ei.is_volatile)
                     (arg, stride) = OpenClArrayBackend.build_codegen_arguments(kargs, name=vname,
                                 known_vars=csc.known_vars, symbolic_mode=csc.symbolic_mode,
-                                storage=self._global, ctype=dfield.ctype, 
+                                storage=self._global, ctype=dfield.ctype,
                                 typegen=typegen, mesh_dim=csc.varray_dim,
                                 ptr_restrict=True, const=True, volatile=volatile)
                     assert i not in args
@@ -569,7 +567,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                 volatile = (vname in ei.is_volatile)
                 (arg, stride) = OpenClArrayBackend.build_codegen_arguments(kargs, name=vname,
                             known_vars=csc.known_vars, symbolic_mode=csc.symbolic_mode,
-                            storage=self._global, ctype=obj.ctype, 
+                            storage=self._global, ctype=obj.ctype,
                             typegen=typegen, mesh_dim=csc.varray_dim,
                             ptr_restrict=True, const=True, volatile=volatile)
                 if isinstance(obj, (OpenClSymbolicBuffer,OpenClSymbolicNdBuffer)):
@@ -580,9 +578,9 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
             else:
                 msg='Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        
+
         # output fields
-        for (obj, counts) in di.write_counter.iteritems():
+        for (obj, counts) in di.write_counter.items():
             assert (counts is not None)
             if npw.array_equal(counts,0):
                 continue
@@ -602,10 +600,10 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                         continue
                     vname = dfield.var_name + '_' + str(i)
                     volatile = (vname in ei.is_volatile)
-                    arg, arg_strides = OpenClArrayBackend.build_codegen_arguments(kargs, 
+                    arg, arg_strides = OpenClArrayBackend.build_codegen_arguments(kargs,
                                 name=vname,
                                 known_vars=csc.known_vars, symbolic_mode=csc.symbolic_mode,
-                                storage=self._global, ctype=dfield.ctype, 
+                                storage=self._global, ctype=dfield.ctype,
                                 typegen=typegen, mesh_dim=csc.varray_dim,
                                 ptr_restrict=True, const=False, volatile=volatile)
                     assert i not in args
@@ -619,7 +617,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                 volatile = (vname in ei.is_volatile)
                 (arg, stride) = OpenClArrayBackend.build_codegen_arguments(kargs, name=vname,
                             known_vars=csc.known_vars, symbolic_mode=csc.symbolic_mode,
-                            storage=self._global, ctype=obj.ctype, 
+                            storage=self._global, ctype=obj.ctype,
                             typegen=typegen, mesh_dim=csc.varray_dim,
                             ptr_restrict=True, const=False, volatile=volatile)
                 if isinstance(obj, (OpenClSymbolicBuffer,OpenClSymbolicNdBuffer)):
@@ -630,9 +628,9 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
             else:
                 msg='Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        
+
         # parameters
-        for argname, arg in csc.param_args.iteritems():
+        for argname, arg in csc.param_args.items():
             param_args[argname] = arg
             kargs[argname] = arg
 
@@ -643,11 +641,11 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
             kargs['gidx'] = gidx
         else:
             gidx = None
-        
+
         # cache
         if not csc.local_size_known:
-            lmem = CodegenVariable(storage=self._local, ctype='uchar', 
-                     add_impl_const=True, name='buffer', ptr=True, ptr_restrict=True, 
+            lmem = CodegenVariable(storage=self._local, ctype='uchar',
+                     add_impl_const=True, name='buffer', ptr=True, ptr_restrict=True,
                      typegen=typegen, nl=False)
             kargs['buffer'] = lmem
             msg='Cannot handle dynamic local memory yet, '
@@ -657,7 +655,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
             lmem = None
         self.field_mesh_infos = mesh_infos
         self.array_args    = array_args
-        self.array_strides = array_strides 
+        self.array_strides = array_strides
         self.param_args = param_args
         self.gidx = gidx
         self.lmem = lmem
@@ -674,14 +672,14 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         local_size    = self.vars['local_size']
 
         loop_id = CodegenVectorClBuiltin('vid', itype, varray_dim, typegen=tg)
-        
-        vectorization_var = CodegenVariable('n', itype, tg, 
+
+        vectorization_var = CodegenVariable('n', itype, tg,
                 const=True, value=vectorization)
         local_work = csc.local_work
-        
-        max_extra_vwork_var = CodegenVariable('extra_vwork', csc.itype, typegen=tg, const=True, 
+
+        max_extra_vwork_var = CodegenVariable('extra_vwork', csc.itype, typegen=tg, const=True,
                                                     value=csc.max_extra_vwork)
-        local_work.init='{}*({}-2*{})'.format(vectorization_var, local_size[0], 
+        local_work.init='{}*({}-2*{})'.format(vectorization_var, local_size[0],
                                                                     max_extra_vwork_var)
 
         vzero = CodegenVectorClBuiltin('vzero', itype, vectorization, typegen=tg, const=True,
@@ -701,7 +699,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         self.azero = azero
         self.max_extra_vwork_var = max_extra_vwork_var
 
-        return (compute_grid_size, loop_id, vectorization_var, max_extra_vwork_var, 
+        return (compute_grid_size, loop_id, vectorization_var, max_extra_vwork_var,
                     local_work, vzero, voffset, azero)
 
     def _generate_mesh_variables(self):
@@ -711,7 +709,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
             xmin, dx, inv_dx = None, None, None
         else:
             declare_mesh_properties = True
-            mesh_info_0 = field_mesh_infos.values()[0]
+            mesh_info_0 = next(iter(field_mesh_infos.values()))
             dx     = mesh_info_0['dx'].alias('dx', const=True)
             inv_dx = mesh_info_0['inv_dx'].alias('inv_dx', const=True)
             xmin   = mesh_info_0['local_mesh']['xmin'].alias('xmin', const=True)
@@ -720,12 +718,12 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         self.dx = dx
         self.inv_dx = inv_dx
         self.xmin = xmin
-        
+
         return declare_mesh_properties, xmin, dx, inv_dx
 
     def _generate_array_variables(self):
         array_args = self.array_args
-        
+
         field_mesh_infos = self.field_mesh_infos
 
         tg  = self.typegen
@@ -738,13 +736,13 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         vzero = self.vzero
         voffset = self.voffset
         azero = self.azero
-        
+
         array_gids = SortedDict()
         array_vids = SortedDict()
         array_grid_ghosts = SortedDict()
         array_grid_sizes = SortedDict()
         array_line_data = SortedDict()
-        
+
         array_local_data = SortedDict()
         array_local_rdata = SortedDict()
         array_private_data = SortedDict()
@@ -756,7 +754,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         has_private_loads, has_private_stores = False, False
         has_local_loads,   has_local_stores   = False, False
 
-        for (array, array_data) in array_args.iteritems():
+        for (array, array_data) in array_args.items():
             if isinstance(array, OpenClSymbolicArray):
                 name = array.varname
             elif isinstance(array, DiscreteScalarFieldView):
@@ -779,7 +777,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                 # array is a numpy like array (without ghosts)
                 grid_size   = compute_grid_size.alias(grid_size_varname)
                 grid_ghosts = azero.alias(ghosts_varname)
-            
+
             indexed_line_data = array_line_data.setdefault(array, {})
             indexed_gid = array_gids.setdefault(array, {})
 
@@ -795,9 +793,9 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                 is_ro = (is_read and not is_written)
                 is_wo = (is_written and not is_read)
                 is_rw = (is_read and is_written)
-                gindex = CodegenVariable('{}_gid'.format(name), 'ptrdiff_t', tg, 
+                gindex = CodegenVariable('{}_gid'.format(name), 'ptrdiff_t', tg,
                         const=True)
-                line_data = array_data[0].newvar('line_{}'.format(name), 
+                line_data = array_data[0].newvar('line_{}'.format(name),
                         init='{} $+ {}'.format(array_data[0], gindex))
                 valname = name.lower()
                 if valname==name:
@@ -810,14 +808,14 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                 elif csc.local_size_known:
                     L = self.known_vars['local_size']
                     S = csc.vectorization*L[0]+2*ghosts
-                    var = CodegenArray(valname, dim=1, 
+                    var = CodegenArray(valname, dim=1,
                                 ctype=array.ctype, typegen=tg,
                                 shape=(S,), storage=self._local)
                     local_size_per_index = (S,0,0)
                     itemsize = array.dtype.itemsize
                     local_mem_size[0] += S*itemsize
                     if is_rw:
-                        rvar = CodegenArray(valname+'_r', dim=1, 
+                        rvar = CodegenArray(valname+'_r', dim=1,
                                     ctype=array.ctype, typegen=tg,
                                     shape=(2*ghosts,), storage=self._local)
                         local_mem_size[0] += 2*ghosts*itemsize
@@ -826,8 +824,8 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                 else:
                     init = '{} + {}*{} + {}'.format(self.lmem,
                             local_mem_size[1], self.vars['local_size'][0], local_mem_size[2])
-                    var = CodegenVariable(name=valname, typegen=typegen, 
-                            ctype=array.ctype, ptr=True, ptr_restrict=True, const=False, 
+                    var = CodegenVariable(name=valname, typegen=typegen,
+                            ctype=array.ctype, ptr=True, ptr_restrict=True, const=False,
                             storage=storage, add_impl_const=True, nl=True, init=init)
                     local_size_per_index = (0,csc.vectorization,2*ghosts)
                     itemsize = array.dtype.itemsize
@@ -836,8 +834,8 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                     if is_rw:
                         init = '{} + {}*{} + {}'.format(self.lmem,
                                 local_mem_size[1], self.vars['local_size'][0], local_mem_size[2])
-                        rvar = CodegenVariable(name=valname+'_r', typegen=typegen, 
-                                ctype=array.ctype, ptr=True, ptr_restrict=True, const=False, 
+                        rvar = CodegenVariable(name=valname+'_r', typegen=typegen,
+                                ctype=array.ctype, ptr=True, ptr_restrict=True, const=False,
                                 storage=storage, add_impl_const=True, nl=True, init=init)
                         local_mem_size[2] += 2*ghosts*itemsize
                     else:
@@ -859,41 +857,41 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                     has_local_stores |= is_written
             elif isinstance(array, CartesianDiscreteField):
                 array_ghosts = self.csc.array_contiguous_ghosts[array]
-                local_size_per_index = local_size_per_field.setdefault(array, 
+                local_size_per_index = local_size_per_field.setdefault(array,
                                             npw.int_zeros(shape=(array.nb_components, 3)))
-                for (i, data) in array_data.iteritems():
+                for (i, data) in array_data.items():
                     is_read    = (read_counts is not None)  and (read_counts[i]>0)
                     is_written = (write_counts is not None) and (write_counts[i]>0)
                     is_ro = (is_read and not is_written)
                     is_wo = (is_written and not is_read)
                     is_rw = (is_read and is_written)
 
-                    gindex = CodegenVariable('{}_{}_gid'.format(name, i), 'ptrdiff_t', tg, 
+                    gindex = CodegenVariable('{}_{}_gid'.format(name, i), 'ptrdiff_t', tg,
                             const=True)
-                    line_data = data.newvar('line_{}_{}'.format(name, i), 
+                    line_data = data.newvar('line_{}_{}'.format(name, i),
                             init='{} $+ {}'.format(data, gindex))
-                    
+
                     valname = name.lower()
                     if valname==name:
                         valname = '_{}'.format(valname)
                     valname+='_{}'.format(i)
-                    
+
                     ghosts = array_ghosts[i]
                     if (ghosts==0):
-                        var = CodegenVectorClBuiltin(valname, array.ctype, vectorization, 
+                        var = CodegenVectorClBuiltin(valname, array.ctype, vectorization,
                                 typegen=tg, storage='__private')
                         local_size_per_index[i] = (0,0,0)
                     elif csc.local_size_known:
                         L = self.known_vars['local_size']
                         S = csc.vectorization*L[0]+2*ghosts
-                        var = CodegenArray(valname, dim=1, 
+                        var = CodegenArray(valname, dim=1,
                                     ctype=array.ctype, typegen=tg,
                                     shape=(S,), storage=self._local)
                         local_size_per_index[i] = (S,0,0)
                         itemsize = array.dtype.itemsize
                         local_mem_size[0] += S*itemsize
                         if is_rw:
-                            rvar = CodegenArray(valname+'_r', dim=1, 
+                            rvar = CodegenArray(valname+'_r', dim=1,
                                         ctype=array.ctype, typegen=tg,
                                         shape=(2*ghosts,), storage=self._local)
                             local_mem_size[0] += 2*ghosts*itemsize
@@ -902,8 +900,8 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                     else:
                         init = '{} + {}*{} + {}'.format(self.lmem,
                                 local_mem_size[1], self.vars['local_size'][0], local_mem_size[2])
-                        var = CodegenVariable(name=valname, typegen=typegen, 
-                                ctype=array.ctype, ptr=True, ptr_restrict=True, 
+                        var = CodegenVariable(name=valname, typegen=typegen,
+                                ctype=array.ctype, ptr=True, ptr_restrict=True,
                                 const=False, storage=storage,
                                 add_impl_const=True, nl=True, init=init)
                         local_size_per_index[i] = (0,csc.vectorization,2*ghosts)
@@ -912,10 +910,10 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                         local_mem_size[2] += 2*ghosts*itemsize
                         if is_rw:
                             init = '{} + {}*{} + {}'.format(self.lmem,
-                                    local_mem_size[1], self.vars['local_size'][0], 
+                                    local_mem_size[1], self.vars['local_size'][0],
                                     local_mem_size[2])
-                            rvar = CodegenVariable(name=valname+'_r', typegen=typegen, 
-                                    ctype=array.ctype, ptr=True, ptr_restrict=True, 
+                            rvar = CodegenVariable(name=valname+'_r', typegen=typegen,
+                                    ctype=array.ctype, ptr=True, ptr_restrict=True,
                                     const=False, storage=storage,
                                     add_impl_const=True, nl=True, init=init)
                             local_mem_size[2] += 2*ghosts*itemsize
@@ -924,7 +922,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                         msg='Cannot handle offset to different types yet '
                         msg+='(need to consider alignment).'
                         raise NotImplementedError(msg)
-                        
+
                     indexed_gid[i] = gindex
                     indexed_line_data[i] = line_data
 
@@ -949,7 +947,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                 array_local_rdata.pop(array)
             if not indexed_private_data:
                 array_private_data.pop(array)
-        
+
             array_vids[array]       = vindex
             array_grid_sizes[array] = grid_size
             array_grid_ghosts[array] = grid_ghosts
@@ -967,14 +965,14 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
 
         self.local_size_per_field = local_size_per_field
         self.local_mem_size = local_mem_size
-        
+
         self.has_private_loads = has_private_loads
         self.has_private_stores = has_private_stores
         self.has_local_loads = has_local_loads
         self.has_local_stores = has_local_stores
 
         return (array_gids, array_vids, array_values,
-                array_grid_sizes, array_grid_ghosts, 
+                array_grid_sizes, array_grid_ghosts,
                 array_local_data, array_local_rdata, array_private_data)
 
     def _generate_inner_loop_variables(self):
@@ -988,7 +986,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         local_work = self.local_work
         current_local_work = csc.current_local_work
         self.current_local_work = current_local_work
-        
+
         local_offset = csc.local_offset
         line_offset = csc.line_offset
         full_offset = csc.full_offset
@@ -1008,15 +1006,15 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
 
         self.k = k
         self.kmax = kmax
-        
-        
-        return (local_offset, line_offset, last_offset, full_offset, 
+
+
+        return (local_offset, line_offset, last_offset, full_offset,
                     k, kmax, current_local_work,
-                    is_first, is_last, is_active, is_first_active, is_last_active, 
+                    is_first, is_last, is_active, is_first_active, is_last_active,
                     is_active_boundary)
 
     def _generate_loop_context(self):
-        csc = self.csc 
+        csc = self.csc
         itype = csc.itype
         array_dim = csc.array_dim
         compute_grid_size = csc.compute_grid_size
@@ -1038,57 +1036,57 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
 
         (local_offset, line_offset, last_offset, full_offset,
                     k, kmax, current_local_work,
-                    is_first, is_last, is_active, is_first_active, is_last_active, 
+                    is_first, is_last, is_active, is_first_active, is_last_active,
                     is_active_boundary) = self._generate_inner_loop_variables()
 
         if self.work_dim==1:
             kmax.declare(self)
-            last_offset.declare(self, 
-                    init='{} - {}*({}-1)*{}'.format(compute_grid_size[0], vectorization_var, 
+            last_offset.declare(self,
+                    init='{} - {}*({}-1)*{}'.format(compute_grid_size[0], vectorization_var,
                         kmax, local_size[0]))
-        
+
         if (granularity>0):
             self.jumpline()
-        self.decl_vars(*tuple([loop_id]+array_vids.values()))
-        
+        self.decl_vars(*tuple([loop_id]+list(array_vids.values())))
+
         if self.field_mesh_infos:
             x0 = csc.space_symbols[symbolic_space_symbols[0]]
             self.decl_vars(x0)
             if (csc.array_dim>1):
-                self.decl_vars(*tuple(csc.space_symbols[symbolic_space_symbols[i]] 
-                            for i in xrange(1,csc.array_dim)))
+                self.decl_vars(*tuple(csc.space_symbols[symbolic_space_symbols[i]]
+                            for i in range(1,csc.array_dim)))
 
         i0 = csc.local_indices_symbols[symbolic_local_indices[0]]
         self.decl_vars(i0)
         if (csc.array_dim>1):
-            self.decl_vars(*tuple(csc.local_indices_symbols[symbolic_local_indices[i]] 
-                        for i in xrange(1,csc.array_dim)))
+            self.decl_vars(*tuple(csc.local_indices_symbols[symbolic_local_indices[i]]
+                        for i in range(1,csc.array_dim)))
 
         if (granularity>0):
-            code = '{} = {};'.format(loop_id[kdim:kdim+granularity], gidx[:granularity]) 
+            code = '{} = {};'.format(loop_id[kdim:kdim+granularity], gidx[:granularity])
             self.append(code)
             with self._align_() as al:
-                for (array, array_vid) in array_vids.iteritems():
+                for (array, array_vid) in array_vids.items():
                     ghosts = array_ghosts[array]
-                    code = '{} $= {} $+ {};'.format(array_vid[kdim:kdim+granularity], 
-                                                  loop_id[kdim:kdim+granularity], 
+                    code = '{} $= {} $+ {};'.format(array_vid[kdim:kdim+granularity],
+                                                  loop_id[kdim:kdim+granularity],
                                                   ghosts[kdim:kdim+granularity])
                     al.append(code)
 
-            for i in xrange(kdim, kdim+granularity):
+            for i in range(kdim, kdim+granularity):
                 idx_i = csc.local_indices_symbols[symbolic_local_indices[i]]
                 code = idx_i.affect(self, init=loop_id[i])
-            
+
             if self.field_mesh_infos:
-                fmi = self.field_mesh_infos[array_vids.keys()[0]]
-                for i in xrange(kdim, kdim+granularity):
+                fmi = self.field_mesh_infos[next(iter(array_vids))]
+                for i in range(kdim, kdim+granularity):
                     xi = csc.space_symbols[symbolic_space_symbols[i]]
                     code = '{xi} = {x0} + {vid}*{dx};'.format(
-                            xi=xi, vid=array_vids.values()[0][i], 
-                            voffset=self.voffset, 
+                            xi=xi, vid=next(iter(array_vids.values()))[i],
+                            voffset=self.voffset,
                             x0=fmi['local_mesh']['xmin'][i], dx=fmi['dx'][i])
                     self.append(code)
-            
+
         self.jumpline()
 
 
@@ -1111,74 +1109,74 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                     unroll = (not csc.tuning_mode)
                 with self._for_('{decl}{j}={j0}; {j}<{N}; {j}+={gsize}'.format(
                     decl=decl, j=j, j0=j0, gsize=gsize, N=N), unroll=unroll) as ctx:
-                    
+
                     if i>0:
                         with self._align_() as al:
                             al.jumpline()
                             for vid, ghosts in zip(array_vids.values(), array_ghosts.values()):
-                                al.append('{} $= {} $+ {};'.format(vid[i], 
+                                al.append('{} $= {} $+ {};'.format(vid[i],
                                     loop_id[i], ghosts[i]))
                             al.jumpline()
-                        
+
                         idx_i = csc.local_indices_symbols[symbolic_local_indices[i]]
                         code = idx_i.affect(self, init=loop_id[i])
 
                         if self.field_mesh_infos:
-                            fmi = self.field_mesh_infos[array_vids.keys()[0]]
+                            fmi = self.field_mesh_infos[next(iter(array_vids))]
                             xi = csc.space_symbols[symbolic_space_symbols[i]]
                             code = '{xi} = {x0} + {vid}*{dx};'.format(
-                                    xi=xi, vid=array_vids.values()[0][i], 
-                                    voffset=self.voffset, 
+                                    xi=xi, vid=next(iter(array_vids.values()))[i],
+                                    voffset=self.voffset,
                                     x0=fmi['local_mesh']['xmin'][i], dx=fmi['dx'][i])
                             self.append(code)
 
                     if i==1:
                         kmax.declare(self)
-                        last_offset.declare(self, 
-                                init='{} - {}*({}-1)*{}'.format(compute_grid_size[0], 
+                        last_offset.declare(self,
+                                init='{} - {}*({}-1)*{}'.format(compute_grid_size[0],
                                     vectorization_var, kmax, local_size[0]))
                     elif i==0:
                         with self._align_() as al:
                             line_offset.declare(al, align=True, const=True, init='{}*{}'.format(
                                 k, local_work))
-                            local_offset.declare(al, align=True, const=True, 
+                            local_offset.declare(al, align=True, const=True,
                                                     init='{}*({}-{})'.format(
                                 vectorization_var, local_id[0], self.max_extra_vwork_var))
                             full_offset.declare(al, align=True, const=True, init='{}+{}'.format(
                                 line_offset, local_offset))
-                        
+
                         self.jumpline()
                         with self._align_() as al:
                             al.append('{} $= {};'.format(loop_id[0], line_offset))
                             for vid, ghosts in zip(array_vids.values(), array_ghosts.values()):
                                 al.append('{} $= {} + {};'.format(vid[0], loop_id[0], ghosts[0]))
                             idx_i = csc.local_indices_symbols[symbolic_local_indices[0]]
-                            idx_i.affect(al, init='{}+{}'.format(full_offset, self.voffset), 
+                            idx_i.affect(al, init='{}+{}'.format(full_offset, self.voffset),
                                             align=True)
-                            
+
                         if self.field_mesh_infos:
-                            fmi = self.field_mesh_infos[array_vids.keys()[0]]
+                            fmi = self.field_mesh_infos[next(iter(array_vids))]
                             xi = csc.space_symbols[symbolic_space_symbols[i]]
                             code = '{xi} = {x0} + convert_{vftype}({vid}+{voffset}+{lo})*{dx};'
                             code=code.format(
-                                    xi=xi, vid=array_vids.values()[0][i], lo=local_offset,
+                                    xi=xi, vid=next(iter(array_vids.values()))[i], lo=local_offset,
                                     voffset=self.voffset, vftype=csc.vftype,
                                     x0=fmi['local_mesh']['xmin'][i], dx=fmi['dx'][i])
                             self.append(code)
-                        
+
                         with self._align_() as al:
                             is_first.declare(al, align=True, init='({}==0)'.format(k))
                             is_last.declare(al, align=True, init='({}=={}-1)'.format(k,kmax))
 
                             init='({fo} >= -{n}*{evwork}) && ({fo} < {S}+{n}*{evwork})'
-                            init=init.format(fo=full_offset, n=self.vectorization_var, 
+                            init=init.format(fo=full_offset, n=self.vectorization_var,
                                         S=compute_grid_size[0], evwork=self.max_extra_vwork_var)
                             is_active.declare(al, init=init, align=True)
-                            
+
                             init='{} && ({} < 0)'.format(is_active, full_offset,
                                 vectorization_var, compute_grid_size[0])
                             is_first_active.declare(al, init=init, align=True)
-                            
+
                             init='{} && ({}+{} > {})'.format(is_active, full_offset,
                                                     vectorization_var, compute_grid_size[0])
                             is_last_active.declare(al, init=init, align=True)
@@ -1187,21 +1185,21 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                             is_active_boundary.declare(al, init=init, align=True)
 
                             current_local_work.declare(al, align=True,
-                                    init='({} ? {} : {})'.format(is_last, 
+                                    init='({} ? {} : {})'.format(is_last,
                                         '{} - {}*{}'.format(compute_grid_size[0], k, local_work),
                                         local_work))
                             self.jumpline()
-                
+
                         if self.array_vids:
                             self.comment('Compute global offsets and line pointers')
                             with self._align_() as al:
-                                for array, vid in array_vids.iteritems():
+                                for array, vid in array_vids.items():
                                     gids    = array_gids[array]
                                     strides = array_strides[array]
-                                    for (key, gid) in gids.iteritems():
+                                    for (key, gid) in gids.items():
                                         stride = strides[key]
-                                        idot = ' $+ '.join('{}*{}'.format(vid[i], stride[i]) 
-                                                for i in xrange(array_dim-1, -1, -1))
+                                        idot = ' $+ '.join('{}*{}'.format(vid[i], stride[i])
+                                                for i in range(array_dim-1, -1, -1))
                                         gid.declare(al, init=idot, align=True)
                             self.jumpline()
                         self.decl_aligned_vars(*tuple(aij for ai in self.array_line_data.values()
@@ -1210,14 +1208,14 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
             except:
                 raise
 
-        nested_loops = [work_iterate(i) for i in xrange(kdim-1,-1,-1)]
+        nested_loops = [work_iterate(i) for i in range(kdim-1, -1, -1)]
         return nested_loops
-    
+
     def gencode(self):
         s  = self
         csc = s.csc
         tg  = s.typegen
-        
+
         expr_info = csc.expr_info
         ftype     = csc.ftype
 
@@ -1228,15 +1226,15 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         global_size = s.vars['global_size']
         local_size  = s.vars['local_size']
         num_groups  = s.vars['num_groups']
-        
-        field_mesh_infos = {k:s.args[v] for (k,v) in self.field_mesh_infos.iteritems()}
+
+        field_mesh_infos = {k:s.args[v] for (k,v) in self.field_mesh_infos.items()}
         self.field_mesh_infos = field_mesh_infos
 
-        (compute_grid_size, loop_id, vectorization_var, max_extra_vwork_var, local_work, 
+        (compute_grid_size, loop_id, vectorization_var, max_extra_vwork_var, local_work,
                 vzero, voffset, azero) = s._generate_common_variables()
 
         (array_gids, array_vids, array_values,
-                array_grid_sizes, array_grid_ghosts, 
+                array_grid_sizes, array_grid_ghosts,
                     array_local_data, array_local_rdata, array_private_data) = \
                             s._generate_array_variables()
 
@@ -1248,7 +1246,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
             s.decl_aligned_vars(global_id, local_id, group_id, const=True)
             s.decl_aligned_vars(global_size, local_size, num_groups, const=True)
             s.comment('Common variables')
-            s.decl_aligned_vars(compute_grid_size, azero, vzero, voffset, 
+            s.decl_aligned_vars(compute_grid_size, azero, vzero, voffset,
                     vectorization_var, max_extra_vwork_var, local_work)
             s.comment('Array specific variables')
             s.decl_aligned_vars(*tuple(array_grid_sizes.values()), const=True)
@@ -1257,12 +1255,14 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
             s.decl_aligned_vars(*(aij for ai in s.array_args.values() for aij in ai.values()))
             s.decl_aligned_vars(*csc.buffer_args.values())
             s.comment('Local and private memory arrays')
-            s.decl_aligned_vars(*(aij for ai in array_local_data.values() + 
-                                    array_local_rdata.values()+array_private_data.values()
+            s.decl_aligned_vars(*(aij for ai in
+                                    tuple(array_local_data.values())  +
+                                    tuple(array_local_rdata.values()) +
+                                    tuple(array_private_data.values())
                                     for aij in filter(lambda x:x, ai.values())))
             s.comment('Iterating over array lines')
             nested_loops = self._generate_loop_context()
-            with contextlib.nested(*nested_loops):
+            with nested(*nested_loops):
                 s.load_data(event, local_id)
                 s.jumpline()
 
@@ -1274,7 +1274,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         # s.test_compile()
         # import sys
         # sys.exit(1)
-    
+
     def compute(self):
         s = self
         s.comment('Compute expressions')
@@ -1282,7 +1282,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         for fcall in self.fcalls:
             fcall.fn_kwds['offset'] = self.csc.local_offset
             if 'dx' in fcall.fn_kwds:
-                mesh_info_0 = self.field_mesh_infos.values()[0]
+                mesh_info_0 = next(iter(self.field_mesh_infos.values()))
                 dx = mesh_info_0['dx'][0]
                 fcall.fn_kwds['dx'] = dx
 
@@ -1294,10 +1294,10 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         csc = s.csc
         di  = csc.expr_info.discretization_info
 
-        has_local_loads   = self.has_local_loads 
+        has_local_loads   = self.has_local_loads
         has_private_loads = self.has_private_loads
         has_local_right_cache = False
-                
+
         lid = local_id[0]
 
         if not (has_local_loads or has_private_loads):
@@ -1323,7 +1323,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                         _gdata   += (global_data[i],)
                         _ghosts  += (min_ghosts[i],)
                         has_local_right_cache |= (local_rdata[i] is not None)
-                        
+
                 event.declare(s)
                 s.comment('Copy previously loaded data from right to left.')
                 with s._if_('!{}'.format(csc.is_first)):
@@ -1349,7 +1349,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                             src = '{}$-{}'.format(gdata , ghosts)
                             dst = ldata
                             num_elements = '{}$+2*{}'.format(s.current_local_work, ghosts)
-                            code = s.async_work_group_copy(dst, src, num_elements, event, 
+                            code = s.async_work_group_copy(dst, src, num_elements, event,
                                                             align=True)
                             al.append(code)
                 with s._else_():
@@ -1358,10 +1358,10 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                                 src = '{}$+{}'.format(gdata , ghosts)
                                 dst = '{}$+2*{}'.format(ldata, ghosts)
                                 num_elements = '{}'.format(s.current_local_work)
-                                code = s.async_work_group_copy(dst, src, num_elements, event, 
+                                code = s.async_work_group_copy(dst, src, num_elements, event,
                                                                 align=True)
                                 al.append(code)
-            
+
             if has_private_loads:
                 s.comment('Load private data from global memory')
                 ptrs, dsts, default_vals = (), (), ()
@@ -1379,16 +1379,16 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                         dsts += (private_data[i],)
                         ptrs += (global_data[i],)
                         default_vals += (dval,)
-                
+
                 cond='({fo}+{i} >= 0) && ({fo}+{i} < {})'
                 fcond = lambda i: cond.format(csc.compute_grid_size[0], fo=csc.full_offset, i=i)
-                s.multi_vload_if(csc.is_active_boundary, 
+                s.multi_vload_if(csc.is_active_boundary,
                         fcond,
                         csc.vectorization, csc.local_offset,
-                        ptrs, dsts, default_vals, 
+                        ptrs, dsts, default_vals,
                         use_short_circuit=csc.use_short_circuit,
                         else_cond=csc.is_active)
-        
+
             if has_local_loads:
                 s.comment('Wait for local memory transactions to finish')
                 code = s.wait_group_events(1, '&{}'.format(event))
@@ -1420,7 +1420,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
 
         has_local_stores   = self.has_local_stores
         has_private_stores = self.has_private_stores
-                
+
         lid = local_id[0]
 
         if not (has_local_stores or has_private_stores):
@@ -1445,7 +1445,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                         _lrdata  += (local_rdata[i],)
                         _gdata   += (global_data[i],)
                         _ghosts  += (min_ghosts[i],)
-                        
+
                 event.declare(s)
                 s.comment('Load local memory to global memory.')
                 with s._align_() as al:
@@ -1455,7 +1455,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                         num_elements = '{}'.format(s.current_local_work)
                         code = s.async_work_group_copy(dst, src, num_elements, event, align=True)
                         al.append(code)
-            
+
             if has_private_stores:
                 s.comment('Load private data to global memory')
                 ptrs, srcs = (), ()
@@ -1470,22 +1470,22 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                             continue
                         srcs += (private_data[i],)
                         ptrs += (global_data[i],)
-                
+
                 cond='({fo}+{i} >= 0) && ({fo}+{i} < {})'
                 fcond = lambda i: cond.format(csc.compute_grid_size[0], fo=csc.full_offset, i=i)
-                s.multi_vstore_if(csc.is_active_boundary, 
+                s.multi_vstore_if(csc.is_active_boundary,
                         fcond,
                         csc.vectorization, csc.local_offset,
                         srcs, ptrs,
                         use_short_circuit=csc.use_short_circuit,
                         else_cond=csc.is_active)
-        
+
             if has_local_stores:
                 s.comment('Wait for local memory transactions to finish')
                 code = s.wait_group_events(1, '&{}'.format(event))
                 s.append(code)
                 s.barrier(_local=True)
-       
+
     def fmt_counter(self, count):
         if isinstance(count, int):
             return {0: count}
diff --git a/hysop/backend/device/codegen/kernels/directional_advection.py b/hysop/backend/device/codegen/kernels/directional_advection.py
index 3c27a5bffd338b30e0262c1a278b4f513b0558ef..218d4b0085a2ecc0f4452ad431c3937d31e0c2dd 100644
--- a/hysop/backend/device/codegen/kernels/directional_advection.py
+++ b/hysop/backend/device/codegen/kernels/directional_advection.py
@@ -1,7 +1,7 @@
-import contextlib
+import contextlib, math, operator, hashlib
 from contextlib import contextmanager
+import numpy as np
 
-from hysop.deps import math, operator, hashlib
 from hysop.constants import DirectionLabels
 
 from hysop import __VERBOSE__, __KERNEL_DEBUG__
@@ -9,8 +9,8 @@ from hysop.backend.device.opencl import cl
 
 from hysop.tools.misc import Utils, upper_pow2_or_3
 from hysop.tools.types import check_instance, first_not_None
+from hysop.tools.contexts import nested
 
-from hysop.deps import np
 from hysop.constants import BoundaryCondition, Backend
 from hysop.core.arrays.all import OpenClArray
 
@@ -79,7 +79,7 @@ class DirectionalAdvectionKernelGenerator(KernelCodeGenerator):
             msg='Bilevel support with multiple particles at a time has not been implemented yet.'
             raise NotImplementedError(msg)
 
-        known_vars = known_vars or dict()
+        known_vars = first_not_None(known_vars, {})
 
         use_short_circuit = first_not_None(use_short_circuit, typegen.use_short_circuit_ops)
 
@@ -465,9 +465,9 @@ class DirectionalAdvectionKernelGenerator(KernelCodeGenerator):
                                 line_offset, velocity_ghosts, advec_ghosts))
 
                         line_velocity_offset = ' + '.join('{}*{}'.format(velocity_gid[j], velocity_strides[j])
-                                for j in xrange(work_dim-1, -1, -1))
+                                for j in range(work_dim-1, -1, -1))
                         line_position_offset = ' + '.join('{}*{}'.format(position_gid[j], position_strides[j])
-                                for j in xrange(work_dim-1, -1, -1))
+                                for j in range(work_dim-1, -1, -1))
 
                         with s._align_() as al:
                             line_position.declare(al, init='{} + {}'.format(position, line_position_offset), align=True)
@@ -544,7 +544,7 @@ class DirectionalAdvectionKernelGenerator(KernelCodeGenerator):
                     yield ctx
             except:
                 raise
-        nested_loops = [_work_iterate_(i) for i in xrange(work_dim-1,-1,-1)]
+        nested_loops = [_work_iterate_(i) for i in range(work_dim-1, -1, -1)]
 
         with s._kernel_():
             field_infos.declare(s)
@@ -574,7 +574,7 @@ class DirectionalAdvectionKernelGenerator(KernelCodeGenerator):
 
             with s._align_() as al:
                 npart.declare(al, const=True, align=True)
-                poffset.declare(al, init=range(nparticles), const=True, align=True)
+                poffset.declare(al, init=tuple(range(nparticles)), const=True, align=True)
             s.jumpline()
 
             if is_cached:
@@ -596,13 +596,13 @@ class DirectionalAdvectionKernelGenerator(KernelCodeGenerator):
             if has_bilevel:
                 s.decl_aligned_vars(velocity_gid_pos, velocity_h)
 
-            with contextlib.nested(*nested_loops):
+            with nested(*nested_loops):
                 s.jumpline()
 
                 if is_cached and not has_bilevel:
                     if tuning_mode:
                         loop = 'int {i}={Lx}; {i}<{N}; {i}+={gsize}'.format(
-                                i='idx', N=V_cache_width, 
+                                i='idx', N=V_cache_width,
                                 Lx=local_id[0], gsize=local_size[0])
                         with s._for_(loop):
                             code='{dst}[{i}] = 0.5;'.format(i='idx', dst=Vc)
@@ -622,7 +622,7 @@ class DirectionalAdvectionKernelGenerator(KernelCodeGenerator):
                     s_mesh_size = position_mesh_info['global_mesh']['compute_resolution'].value[0]
                     v_mesh_size = velocity_mesh_info['global_mesh']['compute_resolution'].value[0]
                     if (s_mesh_size%v_mesh_size==0):
-                        mesh_ratio = s_mesh_size / v_mesh_size
+                        mesh_ratio = s_mesh_size // v_mesh_size
                     with s._if_('k%{}==0'.format(mesh_ratio)):
                         s.append('{} = convert_int_rtn(convert_{}({}+1)*{}*{});'.format(line_offset_for_v, ftype, line_offset, dx[0], v_inv_dx[0]))
                         with s._for_('int {i}={fval}; {i}<{N} && ({i}+{o})<{Nv}; {i}+={gsize}'.format(
diff --git a/hysop/backend/device/codegen/kernels/directional_remesh.py b/hysop/backend/device/codegen/kernels/directional_remesh.py
index 9c1a437ac945caafa4301a7d9550efc2e6b9069f..0027087b81e1f051672e49501145fdeb9ed4321d 100644
--- a/hysop/backend/device/codegen/kernels/directional_remesh.py
+++ b/hysop/backend/device/codegen/kernels/directional_remesh.py
@@ -1,12 +1,13 @@
-import contextlib
+import contextlib, math, operator, hashlib
 from contextlib import contextmanager
+import numpy as np
 
 from hysop import __VERBOSE__, __KERNEL_DEBUG__
-from hysop.deps import np, math, operator, hashlib
 
 from hysop.tools.misc import Utils, upper_pow2_or_3
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.numpywrappers import npw
+from hysop.tools.contexts import nested
 from hysop.constants import DirectionLabels, BoundaryCondition, Backend, Precision
 
 from hysop.core.arrays.all import OpenClArray
@@ -57,7 +58,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
         assert remesh_kernel.n >= 1, 'Bad remeshing kernel.'
         if remesh_kernel.n > 1:
             assert remesh_kernel.n % 2 == 0, 'Odd remeshing kernel moments.'
-        min_ghosts = int(1+npw.floor(scalar_cfl)+remesh_kernel.n/2)
+        min_ghosts = int(1+npw.floor(scalar_cfl)+remesh_kernel.n//2)
         return min_ghosts
 
     @classmethod
@@ -73,7 +74,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
         work_load[0] = nparticles
 
         global_size = work_size.copy()
-        global_size = ((global_size+work_load-1)/work_load)
+        global_size = ((global_size+work_load-1)//work_load)
         return global_size
 
     def get_global_size(self, work_size, local_work_size, work_load=None):
@@ -90,7 +91,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
 
         nparticles = self.nparticles
 
-        for i in xrange(1, work_dim):
+        for i in range(1, work_dim):
             assert (local_work_size[i] == 1), 'local_work_size error!'
 
         if 'local_size' in self.known_vars:
@@ -98,7 +99,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
                 'local_work_size mismatch!'
 
         max_global_size = self.get_max_global_size(work_size, work_load, nparticles)
-        global_size = ((max_global_size+local_work_size-1)/local_work_size) * local_work_size
+        global_size = ((max_global_size+local_work_size-1)//local_work_size) * local_work_size
         global_size[0] = local_work_size[0]
 
         return global_size
@@ -153,7 +154,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
         if tuning_mode:
             unroll_loops = False
 
-        group_scalars = group_scalars or tuple(1 for _ in xrange(nscalars))
+        group_scalars = first_not_None(group_scalars, tuple(1 for _ in range(nscalars)))
         check_instance(group_scalars, tuple, values=int)
         assert sum(group_scalars) == nscalars
         nfields = len(group_scalars)
@@ -167,7 +168,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
             msg = 'Local periodic boundary have been deprecated, use BoundaryCondition.NONE instead.'
             raise RuntimeError(msg)
 
-        known_vars = known_vars or dict()
+        known_vars = first_not_None(known_vars, {})
         local_size_known = ('local_size' in known_vars)
 
         itype = 'int'
@@ -260,10 +261,10 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
 
         scalars_data_in = []
         scalars_strides_in = []
-        for i in xrange(nfields):
+        for i in range(nfields):
             args_in = []
             strides_in = []
-            for j in xrange(group_scalars[i]):
+            for j in range(group_scalars[i]):
                 if is_inplace:
                     arg, strides = OpenClArrayBackend.build_codegen_arguments(kargs, name='S{}_{}_inout'.format(i, j),
                                                                               known_vars=known_vars, symbolic_mode=symbolic_mode,
@@ -287,10 +288,10 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
         else:
             scalars_data_out = []
             scalars_strides_out = []
-            for i in xrange(nfields):
+            for i in range(nfields):
                 args_out = []
                 strides_out = []
-                for j in xrange(group_scalars[i]):
+                for j in range(group_scalars[i]):
                     arg, strides = OpenClArrayBackend.build_codegen_arguments(kargs, name='S{}_{}_out'.format(i, j),
                                                                               known_vars=known_vars, symbolic_mode=symbolic_mode,
                                                                               storage=self._global, ctype=ftype, typegen=typegen, mesh_dim=mesh_dim,
@@ -317,16 +318,16 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
             const=True, name='position_mesh_info')
 
         if is_inplace:
-            for i in xrange(nfields):
+            for i in range(nfields):
                 kargs['S{}_inout_mesh_info'.format(i)] = \
                     kernel_reqs['MeshInfoStruct'].build_codegen_variable(
                     const=True, name='S{}_inout_mesh_info'.format(i), nl=True)
         else:
-            for i in xrange(nfields):
+            for i in range(nfields):
                 kargs['S{}_in_mesh_info'.format(i)] = \
                     kernel_reqs['MeshInfoStruct'].build_codegen_variable(
                     const=True, name='S{}_in_mesh_info'.format(i), nl=True)
-            for i in xrange(nfields):
+            for i in range(nfields):
                 kargs['S{}_out_mesh_info'.format(i)] = \
                     kernel_reqs['MeshInfoStruct'].build_codegen_variable(
                     const=True, name='S{}_out_mesh_info'.format(i), nl=True)
@@ -382,23 +383,23 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
 
         lb = '[' if (nparticles > 1) else ''
         rb = ']' if (nparticles > 1) else ''
-        vnf = '{}{}{}'.format(lb, ', '.join('%2.2f' for _ in xrange(nparticles)), rb)
-        vni = '{}{}{}'.format(lb, ', '.join('%i' for _ in xrange(nparticles)), rb)
+        vnf = '{}{}{}'.format(lb, ', '.join('%2.2f' for _ in range(nparticles)), rb)
+        vni = '{}{}{}'.format(lb, ', '.join('%i' for _ in range(nparticles)), rb)
 
         def expand_printf_vector(x): return str(x) if (nparticles == 1) else ','.join(
             '({}).s{}'.format(x, '0123456789abcdef'[i])
-            if isinstance(x, str) else x[i] for i in xrange(nparticles))
+            if isinstance(x, str) else x[i] for i in range(nparticles))
         epv = expand_printf_vector
 
         position_mesh_info = s.vars['position_mesh_info']
 
         if is_inplace:
             scalars_mesh_info_in = [
-                s.vars['S{}_inout_mesh_info'.format(i)] for i in xrange(nfields)]
+                s.vars['S{}_inout_mesh_info'.format(i)] for i in range(nfields)]
             scalars_mesh_info_out = scalars_mesh_info_in
         else:
-            scalars_mesh_info_in = [s.vars['S{}_in_mesh_info'.format(i)] for i in xrange(nfields)]
-            scalars_mesh_info_out = [s.vars['S{}_out_mesh_info'.format(i)] for i in xrange(nfields)]
+            scalars_mesh_info_in = [s.vars['S{}_in_mesh_info'.format(i)] for i in range(nfields)]
+            scalars_mesh_info_out = [s.vars['S{}_out_mesh_info'.format(i)] for i in range(nfields)]
 
         position_base = s.vars['position_base']
         position_offset = s.vars['position_offset']
@@ -432,7 +433,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
                 for (i, smi) in enumerate(scalars_mesh_info_out))
 
             scalars_in_global_id = tuple(CodegenVectorClBuiltin('S{}_inout_gid'.format(i),
-                                                                itype, work_dim, typegen=tg) for i in xrange(nfields))
+                                                                itype, work_dim, typegen=tg) for i in range(nfields))
 
             scalars_data_out_grid_size = scalars_in_grid_size
             scalars_data_out_grid_ghosts = scalars_in_grid_ghosts
@@ -455,9 +456,9 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
                                                                     slice(0, work_dim), const=True) for (i, smi) in enumerate(scalars_mesh_info_out))
 
             scalars_in_global_id = tuple(CodegenVectorClBuiltin('S{}_in_gid'.format(i),
-                                                                itype, work_dim, typegen=tg) for i in xrange(nfields))
+                                                                itype, work_dim, typegen=tg) for i in range(nfields))
             scalars_data_out_global_id = tuple(CodegenVectorClBuiltin('S{}_out_gid'.format(i),
-                                                                      itype, work_dim, typegen=tg) for i in xrange(nfields))
+                                                                      itype, work_dim, typegen=tg) for i in range(nfields))
 
             grid_ghosts = (position_grid_ghosts,) + scalars_in_grid_ghosts + \
                 scalars_data_out_grid_ghosts
@@ -490,7 +491,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
             _id = ''
             if work_dim > 1:
                 _id += '$ + ({} $* {})'.format(global_id[work_dim-1], grid_strides[work_dim-1])
-                for i in xrange(work_dim-2, 0, -1):
+                for i in range(work_dim-2, 0, -1):
                     _id += ' $+ ({} $* {})'.format(global_id[i], grid_strides[i])
             _id += ' $+ (({} $- {}))'.format(global_id[0], particle_offset)
             return '{}{}'.format(base_ptr, _id)
@@ -527,10 +528,10 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
         boundary_scalars = []
         if local_size_known:
             L = s.known_vars['local_size']
-            for i in xrange(nfields):
+            for i in range(nfields):
                 Si = []
                 BSi = []
-                for j in xrange(group_scalars[i]):
+                for j in range(group_scalars[i]):
                     Sij = CodegenArray(name='S{}_{}'.format(i, j),
                                        dim=1, ctype=ftype, typegen=tg,
                                        shape=(nparticles*L[0]+2*min_ghosts,), storage=self._local)
@@ -540,10 +541,10 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
         else:
             buf = self.vars['buffer']
             k = 0
-            for i in xrange(nfields):
+            for i in range(nfields):
                 Si = []
                 BSi = []
-                for j in xrange(group_scalars[i]):
+                for j in range(group_scalars[i]):
                     Sij = CodegenVariable(name='S{}_{}'.format(i, j), ctype=ftype, typegen=tg,
                                           ptr_restrict=True, ptr=True, storage=self._local,
                                           ptr_const=True,
@@ -558,9 +559,9 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
         pos = CodegenVectorClBuiltin('p', ftype, nparticles, tg)
         tuning_pos = CodegenVectorClBuiltin('tp', ftype, nparticles, tg)
         scalars = []
-        for i in xrange(nfields):
+        for i in range(nfields):
             si = []
-            for j in xrange(group_scalars[i]):
+            for j in range(group_scalars[i]):
                 sij = CodegenVectorClBuiltin('s{}_{}'.format(i, j), ftype, nparticles, tg)
                 si.append(sij)
             scalars.append(tuple(si))
@@ -638,7 +639,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
             cond = last_active if (cond is None) else cond
             with s._if_(cond):
                 with s._align_() as al:
-                    for j in xrange(nparticles):
+                    for j in range(nparticles):
                         cond = '({}+{}$ < {})'.format(particle_offset, j, compute_grid_size[0])
                         yield al, cond, j
 
@@ -692,11 +693,11 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
                             align=True, codegen=al)
             s.jumpline()
 
-            nested_loops = [_work_iterate_(i) for i in xrange(dim-1, -1, -1)]
+            nested_loops = [_work_iterate_(i) for i in range(dim-1, -1, -1)]
             if work_dim == 1:
                 kmax.declare(s)
                 last_particle.declare(s)
-            with contextlib.nested(*nested_loops):
+            with nested(*nested_loops):
 
                 s.comment('Compute global offsets and line pointers')
                 with s._align_() as al:
@@ -784,7 +785,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
                                 sij.affect(al, align=True,
                                            init=self.vload(nparticles, line_sij, _id, align=True))
                 with s._else_():
-                    for k in xrange(nparticles):
+                    for k in range(nparticles):
                         code = '{} = NAN;'.format(pos[k])
                         s.append(code)
                         for si, line_si in zip(scalars, line_scalars_in):
@@ -854,7 +855,7 @@ class DirectionalRemeshKernelGenerator(KernelCodeGenerator):
                         s.append('{};'.format(call))
                     else:
                         # without atomics we can only remesh on particle at a time
-                        for p in xrange(nparticles):
+                        for p in range(nparticles):
                             if debug_mode:
                                 with s._first_wi_execution_():
                                     s.append(
@@ -989,8 +990,8 @@ if __name__ == '__main__':
                                            )
                                            )
 
-    print 'scalars_out_min_gosts = {}'.format(dak.scalars_out_cache_ghosts(scalar_cfl, kernel))
-    print 'required cache: {}'.format(dak.required_workgroup_cache_size(local_size))
+    print('scalars_out_min_gosts = {}'.format(dak.scalars_out_cache_ghosts(scalar_cfl, kernel)))
+    print('required cache: {}'.format(dak.required_workgroup_cache_size(local_size)))
 
     dak.edit()
     dak.test_compile()
diff --git a/hysop/backend/device/codegen/kernels/directional_stretching.py b/hysop/backend/device/codegen/kernels/directional_stretching.py
index 5a53ddab1501e492a96d3e9a446aeaa4b15aeaa5..6c64a88172774934da1c4f77bda716d3896ff7c6 100644
--- a/hysop/backend/device/codegen/kernels/directional_stretching.py
+++ b/hysop/backend/device/codegen/kernels/directional_stretching.py
@@ -1,10 +1,11 @@
-
-from hysop.deps import np, operator, hashlib, contextlib
+import operator, hashlib, contextlib
+import numpy as np
 
 from hysop import __VERBOSE__, __KERNEL_DEBUG__
 
 from hysop.tools.misc import Utils, upper_pow2_or_3
 from hysop.tools.types import check_instance
+from hysop.tools.contexts import nested
 
 from hysop.constants import DirectionLabels, BoundaryCondition, Backend, SpaceDiscretization
 from hysop.methods import StretchingFormulation
@@ -46,19 +47,19 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
         sformulation = str(formulation).lower()
         return 'directional_{}{}stretching_{}_{}{}'.format(cache,inplace,sformulation,
                 ftype[0],DirectionLabels[direction])
-   
+
     def __init__(self, typegen, dim, ftype, order, direction,
                        is_cached, is_inplace,
                        boundary, formulation, time_integrator,
                        symbolic_mode = False,
                        known_vars = None):
-        
+
         check_instance(formulation, StretchingFormulation)
         check_instance(boundary[0], BoundaryCondition)
         check_instance(boundary[1], BoundaryCondition)
         check_instance(time_integrator,ExplicitRungeKutta)
         check_instance(is_inplace, bool)
-        
+
         assert dim==3
         assert direction<dim
         assert order>1 and order%2==0
@@ -67,39 +68,39 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
 
         if known_vars is None:
             known_vars = {}
-        
+
         local_size_known = ('local_size' in known_vars)
         is_conservative  = (formulation==StretchingFormulation.CONSERVATIVE)
         is_periodic      = (boundary[0]==BoundaryCondition.PERIODIC) \
                             and (boundary[1]==BoundaryCondition.PERIODIC)
-        
+
         if is_cached:
             storage = OpenClCodeGenerator.default_keywords['local']
         else:
             storage = OpenClCodeGenerator.default_keywords['global']
-       
+
         if is_inplace and is_conservative and not is_cached:
             raise ValueError('Inplace conservetive stretching requires caching.')
 
         name = DirectionalStretchingKernel.codegen_name(ftype,is_cached,is_inplace,
                 direction,formulation)
 
-        kernel_reqs = self.build_requirements(typegen, dim, ftype, order, is_cached, 
+        kernel_reqs = self.build_requirements(typegen, dim, ftype, order, is_cached,
                 time_integrator, direction, boundary, symbolic_mode, formulation, storage,
                 is_periodic, is_inplace)
 
         kernel_args = self.gen_kernel_arguments(typegen, dim, ftype, kernel_reqs, is_cached,
                 is_inplace, local_size_known)
-        
+
         super(DirectionalStretchingKernel,self).__init__(
                 name=name,
                 typegen=typegen,
-                work_dim=dim, 
+                work_dim=dim,
                 kernel_args=kernel_args,
                 known_vars=known_vars,
                 vec_type_hint=ftype,
                 symbolic_mode=symbolic_mode)
-        
+
         self.update_requirements(kernel_reqs)
 
         self.order            = order
@@ -118,7 +119,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
         self.is_inplace       = is_inplace
 
         self.gencode()
-    
+
     #return minimal number of ghosts required on the grid
     # for input velocity and vorticity.
     @staticmethod
@@ -130,7 +131,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
             pass
         elif lboundary in [BoundaryCondition.NONE, BoundaryCondition.PERIODIC]:
             assert order%2==0
-            stencil_ghost = order/2
+            stencil_ghost = order//2
             if formulation == StretchingFormulation.CONSERVATIVE:
                 u_ghosts[direction] = time_integrator.stages * stencil_ghost
             else:
@@ -144,11 +145,11 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
             # (a*grad(u) + (1-a)*grad(u)^T) w
             w_ghosts = [0]*3
         return np.asarray(u_ghosts), np.asarray(w_ghosts)
-    
+
     @staticmethod
     def min_wg_size(formulation, order, time_integrator):
         ghosts = [1]*3
-        stencil_ghost = order/2
+        stencil_ghost = order//2
         if formulation == StretchingFormulation.CONSERVATIVE:
             ghosts[0] = time_integrator.stages * stencil_ghost
         else:
@@ -163,9 +164,9 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
         work_size       = np.asarray(work_size)
         work_load       = np.asarray(work_load)
         assert work_load[0] == 1
-        
+
         global_size = work_size.copy()
-        global_size = ((global_size+work_load-1)/work_load)
+        global_size = ((global_size+work_load-1)//work_load)
         return global_size
 
     #return global_work_size from effective work_size and given local_work_size
@@ -175,7 +176,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
         work_load       = np.asarray(work_load)
         local_work_size = np.asarray(local_work_size)
         v_min_ghosts, w_min_ghosts = self._min_ghosts()
-        
+
         assert (local_work_size[1] == 1) and (local_work_size[2]==1)
         assert (local_work_size > 2*w_min_ghosts).all()
 
@@ -192,7 +193,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
 
         max_global_size = self.get_max_global_size(work_size, work_load)
         max_global_size[0] = local_work_size[0]
-        global_size = ((max_global_size+local_work_size-1)/local_work_size) * local_work_size
+        global_size = ((max_global_size+local_work_size-1)//local_work_size) * local_work_size
 
         return global_size
 
@@ -206,11 +207,11 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
         is_periodic     = self.is_periodic
         is_conservative = self.is_conservative
         flt_bytes       = self.typegen.FLT_BYTES[ftype]
-        
+
         local_work_size = np.asarray(local_work_size)
-       
+
         sc,dc = 0,0
-        if is_cached: 
+        if is_cached:
             count = dim*local_work_size[0]
             if is_conservative:
                 count += local_work_size[0]
@@ -224,30 +225,30 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
         sc += 2*dim*(2*cache_ghosts)
         if self.is_periodic:
             sc += 2*dim*(2*cache_ghosts)
-        
+
         sc *= flt_bytes
         dc *= flt_bytes
         tc = sc+dc
-        
+
         return (sc,dc,tc)
-    
+
     def _cache_ghosts(self):
-        stencil_ghost = self.order/2
+        stencil_ghost = self.order//2
         if self.is_conservative:
             return self.time_integrator.stages * stencil_ghost
         else:
             return stencil_ghost
     def _min_ghosts(self):
-        return self.min_ghosts(self.boundary, self.formulation, 
+        return self.min_ghosts(self.boundary, self.formulation,
                 self.order, self.time_integrator, self.direction)
 
 
     def build_requirements(self,typegen,work_dim,ftype,order,is_cached,time_integrator,direction,
             boundary,force_symbolic,formulation,storage,is_periodic,is_inplace):
-        tg=typegen 
+        tg=typegen
         reqs = WriteOnceDict()
-        
-        compute_id = ComputeIndexFunction(typegen=typegen, dim=work_dim, itype='int', 
+
+        compute_id = ComputeIndexFunction(typegen=typegen, dim=work_dim, itype='int',
                 wrap=is_periodic)
         reqs['compute_id'] = compute_id
 
@@ -257,29 +258,29 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
 
         mesh_info_struct = MeshInfoStruct(typegen=typegen, vsize=vsize)
         reqs['MeshInfoStruct'] = mesh_info_struct
-         
-        stretching_rhs = DirectionalStretchingRhsFunction(typegen=typegen, dim=work_dim, 
+
+        stretching_rhs = DirectionalStretchingRhsFunction(typegen=typegen, dim=work_dim,
                 ftype=ftype, cached=is_cached,
                 order=order, direction=direction, boundary=boundary,
                 formulation=formulation,
                 ptr_restrict=True, vectorize_u=False,
                 itype='int')
-   
+
         used_vars = RungeKuttaFunction._default_used_vars.copy()
         used_vars['y']='W'
         used_vars['step']='rk_step'
-        runge_kutta = RungeKuttaFunction(typegen=tg, ftype=ftype, 
+        runge_kutta = RungeKuttaFunction(typegen=tg, ftype=ftype,
                 method=time_integrator,
-                rhs=stretching_rhs, 
+                rhs=stretching_rhs,
                 used_vars=used_vars,
                 known_args=None)
         reqs['runge_kutta'] = runge_kutta
-            
+
         return reqs
-    
+
     def gen_kernel_arguments(self, typegen, work_dim, ftype, requirements,is_cached,is_inplace,
             local_size_known):
-        
+
         xyz = 'xyz'
         svelocity  = 'U'
         svorticity = 'W'
@@ -290,17 +291,17 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
 
         _global = OpenClCodeGenerator.default_keywords['global']
         _local  = OpenClCodeGenerator.default_keywords['local']
-        for i in xrange(work_dim):
+        for i in range(work_dim):
             name = svelocity+xyz[i]
             kargs[name] = CodegenVariable(storage=_global,name=name,typegen=typegen,
                 ctype=ftype,ptr=True,ptr_restrict=True,const=True,add_impl_const=True)
 
-        for i in xrange(work_dim):
+        for i in range(work_dim):
             name = svorticity+xyz[i]+'_in'
             kargs[name] = CodegenVariable(storage=_global,name=name,typegen=typegen,
               ctype=ftype,ptr=True,ptr_restrict=(not is_inplace),const=True,add_impl_const=True)
-        
-        for i in xrange(work_dim):
+
+        for i in range(work_dim):
             name = svorticity+xyz[i]+'_out'
             kargs[name] = CodegenVariable(storage=_global,name=name,typegen=typegen,
               ctype=ftype,ptr=True,ptr_restrict=(not is_inplace),const=False,add_impl_const=True)
@@ -313,16 +314,16 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                         name='vorticity_mesh_info')
 
         if is_cached and not local_size_known:
-             kargs['buffer'] = CodegenVariable(storage=_local,ctype=ftype, 
-                     add_impl_const=True, name='buffer', ptr=True, ptr_restrict=True, 
+             kargs['buffer'] = CodegenVariable(storage=_local,ctype=ftype,
+                     add_impl_const=True, name='buffer', ptr=True, ptr_restrict=True,
                      typegen=typegen, nl=False)
 
         self.svorticity = svorticity
         self.svelocity  = svelocity
         self.xyz = xyz
-        
+
         return kargs
-    
+
     def gencode(self):
         s = self
         tg = s.typegen
@@ -344,7 +345,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
 
         xyz = s.xyz
         vtype = tg.vtype(ftype,work_dim)
-       
+
         global_id     = s.vars['global_id']
         local_id      = s.vars['local_id']
         group_id      = s.vars['group_id']
@@ -358,7 +359,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
         dt            = s.vars['dt']
         velocity_mesh_info  = s.vars['velocity_mesh_info']
         vorticity_mesh_info = s.vars['vorticity_mesh_info']
-        
+
         grid_size           = vorticity_mesh_info['local_mesh']['resolution'].view(
                 'grid_size', slice(None,dim))
         compute_grid_size   = vorticity_mesh_info['local_mesh']['compute_resolution'].view(
@@ -367,12 +368,12 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                 'compute_grid_ghosts', slice(0,dim), const=True)
         inv_dx              = vorticity_mesh_info['inv_dx'].view(
                 'inv_dx', slice(0,1), const=True)
-        s.update_vars(grid_size=grid_size, inv_dx=inv_dx, 
+        s.update_vars(grid_size=grid_size, inv_dx=inv_dx,
                 compute_grid_ghosts=compute_grid_ghosts, compute_grid_size=compute_grid_size)
 
         compute_index = self.reqs['compute_id']
         runge_kutta   = self.reqs['runge_kutta']
-        
+
         W          = CodegenVectorClBuiltin('W',ftype,dim,tg)
         U          = CodegenVectorClBuiltin('U',ftype,dim,tg)
 
@@ -382,10 +383,10 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
         cache_ghosts = CodegenVariable('cache_ghosts','int',tg,
                 const=True,value=self._cache_ghosts())
         local_work = CodegenVariable('lwork','int',tg,const=True)
-        
+
         cached_vars = ArgDict()
         if is_cached:
-            for i in xrange(work_dim):
+            for i in range(work_dim):
                 Vi = self.svelocity+self.xyz[i]
                 if local_size_known:
                     Vic = CodegenArray(name=Vi+'c',dim=1,ctype=ftype,typegen=tg,
@@ -395,8 +396,8 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                     init = '{} + {}*{}'.format(buf(), i, local_size[0])
                     Vic = CodegenVariable(storage=storage,name=Vi+'c',ctype=ftype,typegen=tg,
                                 const=True, ptr_restrict=True,ptr=True,init=init)
-                cached_vars[Vi] = Vic 
-            
+                cached_vars[Vi] = Vic
+
             if is_conservative:
                 Wi = self.svorticity+self.xyz[direction]
                 if local_size_known:
@@ -407,7 +408,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                     init = '{} + {}*{}'.format(buf(), work_dim, local_size[0])
                     Wic = CodegenVariable(storage=storage,name=Wi+'c',ctype=ftype,typegen=tg,
                                     const=True, ptr_restrict=True,ptr=True,init=init)
-                cached_vars[Wi] = Wic 
+                cached_vars[Wi] = Wic
 
         _U   = self.svelocity
         size = cache_ghosts.value
@@ -423,7 +424,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
             Wl  = CodegenArray(storage='__local',name=_W+'l',dim=1,ctype=vtype,typegen=tg,
                                 shape=(2*size,))
 
-            
+
         @contextlib.contextmanager
         def _work_iterate_(i):
             try:
@@ -442,12 +443,12 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                 s.append('#pragma unroll 4')
                 with s._for_('int {i}={fval}; {i}<{N}; {i}+={gsize}'.format(
                     i='kji'[i], fval=fval, gsize=gsize,N=N)) as ctx:
-                    
+
                     if i==0:
                         s.append('{} = {}+{};'.format(global_id[i], 'kji'[i], local_id[0]))
                     else:
                         s.append('{} = {}+{};'.format(global_id[i], 'kji'[i], ghosts))
-                    
+
                     if i==0:
                         active.declare(s, init='({} < {}+2*{})'.format(
                             global_id[0], compute_grid_size[0], cache_ghosts()))
@@ -458,13 +459,13 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                     yield ctx
             except:
                 raise
-        nested_loops = [_work_iterate_(i) for i in xrange(dim-1,-1,-1)]
-        
+        nested_loops = [_work_iterate_(i) for i in range(dim-1,-1,-1)]
+
         @contextlib.contextmanager
         def if_thread_active():
             with s._if_('{}'.format(active())):
                 yield
-        
+
         with s._kernel_():
             s.jumpline()
             with s._align_() as al:
@@ -472,26 +473,26 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                 global_size.declare(al,align=True,const=True)
                 local_size.declare(al,align=True,const=True)
             s.jumpline()
-            
+
             with s._align_() as al:
                 compute_grid_size.declare(al,const=True,align=True)
                 compute_grid_ghosts.declare(al,align=True)
                 grid_size.declare(al,align=True,const=True)
                 inv_dx.declare(al,align=True)
             s.jumpline()
-            
+
             with s._align_() as al:
                 cache_ghosts.declare(al,align=True)
                 local_work.declare(al,align=True,
                         init='{} - 2*{}'.format(local_size[0],cache_ghosts()))
             s.jumpline()
-            
+
             if is_cached:
                 with s._align_() as al:
-                    for varname,var in cached_vars.iteritems():
+                    for varname,var in cached_vars.items():
                         var.declare(al,align=True)
                 s.jumpline()
-        
+
             Ur.declare(s)
             Wr.declare(s)
             s.jumpline()
@@ -505,14 +506,14 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
 
             global_id.declare(s,init=False)
             s.jumpline()
-                
-            with contextlib.nested(*nested_loops):
+
+            with nested(*nested_loops):
                 s.jumpline()
                 init = compute_index(idx=global_id, size=grid_size)
                 global_index.declare(s, init=init, const=True)
 
                 winit, uinit = '',''
-                for i in xrange(work_dim):
+                for i in range(work_dim):
                     Wi_in  = self.svorticity+self.xyz[i]+'_in'
                     Wi_out = self.svorticity+self.xyz[i]+'_out'
                     Ui = self.svelocity+self.xyz[i]
@@ -520,7 +521,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                     winit += self.args[Wi_in][global_index()] + ','
                 uinit='({}{})({})'.format(ftype, work_dim, uinit[:-1])
                 winit='({}{})({})'.format(ftype, work_dim, winit[:-1])
-                       
+
 
                 s.jumpline()
                 s.append('{} {},{};'.format(U.ctype,U(),W()))
@@ -552,7 +553,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                         with s._else_():
                             s.append('{} = {};'.format(U(), uinit))
                             s.append('{} = {};'.format(W(), winit))
-               
+
 
                 s.barrier(_local=True)
                 s.jumpline()
@@ -560,24 +561,24 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
 
                 with if_thread_active():
                     if self.is_cached:
-                        for i in xrange(work_dim):
+                        for i in range(work_dim):
                             Ui = self.svelocity+self.xyz[i]
                             Uic = cached_vars[Ui]
                             code = '{} = {};'.format(Uic[local_id[0]],U[i])
                             s.append(code)
                         s.jumpline()
 
-                    
+
                     with s._if_('{} >= {}-2*{}'.format(local_id[0],local_size[0],
                         cache_ghosts())):
                         _id = '{}-{}+2*{}'.format(local_id[0],local_size[0],cache_ghosts())
                         s.append('{} = {};'.format(Ur[_id], U()))
                         s.append('{} = {};'.format(Wr[_id], W()))
-                
-                
+
+
                 s.barrier(_local=True)
                 s.jumpline()
-           
+
 
                 rk_args={'dt': dt,
                          'inv_dx': inv_dx,
@@ -585,7 +586,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                          'active': active,
                          'Lx'   : local_size[0],
                          'lidx' : local_id[0]}
-                
+
                 if is_periodic and (not is_cached):
                     base = CodegenVariable('base','int',typegen=tg,const=True)
                     base.declare(s,init='({}/{}) * {}'.format(global_index(),grid_size[0],
@@ -598,7 +599,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                 else:
                     rk_args['offset'] = local_id[0] if is_cached else global_index
 
-                for i in xrange(work_dim):
+                for i in range(work_dim):
                     Ui_name  = self.svelocity+xyz[i]
                     if is_cached:
                         Ui = cached_vars[Ui_name]
@@ -613,13 +614,13 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                     else:
                         Wd  = s.vars[Wd_name]
                     rk_args[Wd_name] = Wd
-                
+
                 call = runge_kutta(**rk_args)
                 code = '{} = {};'.format(W(), call)
                 s.append(code)
                 s.jumpline()
-                    
-                with if_thread_active():    
+
+                with if_thread_active():
                     if is_periodic:
                         cond = '({lid}>={ghosts}) && ({lid}<{L}-{ghosts}) && ({gidx}<{size})'
                         cond = cond.format(
@@ -629,12 +630,12 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                         cond = '({lid}>={ghosts}) && ({lid}<{L}-{ghosts})'.format(
                                 lid=local_id[0], ghosts=cache_ghosts(), L=local_size[0])
                     with s._if_(cond):
-                        for i in xrange(work_dim):
+                        for i in range(work_dim):
                             Wi_out = self.svorticity+self.xyz[i]+'_out'
                             Wi_out = s.vars[Wi_out]
                             code='{} = {};'.format(Wi_out[global_index()], W[i])
                             s.append(code)
-    
+
 
     def per_work_statistics(self):
         tg     = self.typegen
@@ -653,21 +654,21 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
         stats.global_mem_byte_writes += dim*size
 
         return stats
-    
+
     if False:
         @staticmethod
         def autotune(cl_env, typegen, build_options,autotuner_config,
                     direction, time_integrator, formulation, discretization,
                     velocity, vorticity_in, vorticity_out,
                     velocity_mesh_info, vorticity_mesh_info):
-                
+
                 dir = direction
-                
+
                 if not isinstance(cl_env,OpenClEnvironment):
                     raise ValueError('cl_env is not an OpenClEnvironment.')
                 if not isinstance(typegen,OpenClTypeGen):
                     raise ValueError('typegen is not an OpenClTypeGen.')
-                
+
                 precision = typegen.dtype
                 ftype     = typegen.fbtype
 
@@ -686,7 +687,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                     raise ValueError('queue cannot be None.')
                 if typegen.platform!=platform or typegen.device!=device:
                     raise ValueError('platform or device mismatch.')
-                
+
                 if not isinstance(velocity,DiscreteScalarFieldView) or \
                         velocity.backend.kind != Backend.OPENCL:
                     raise ValueError('velocity is not a DiscreteScalarFieldView of kind OpenCL.')
@@ -706,7 +707,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                    (vorticity_in.nb_components  != dim) or \
                    (vorticity_out.nb_components != dim):
                     raise ValueError('Vector components mismatch with dim {}.'.format(dim))
-                
+
                 if not isinstance(time_integrator, ExplicitRungeKutta):
                     msg = 'Given time integrator is not an instance of ExplicitRungeKutta, '
                     msg+='got a {}.'.format(time_integrator.__class__)
@@ -716,7 +717,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                     msg = 'Unknown stretching formulation of type \'{}\', valid ones are {}.'
                     msg=msg.format(formulation.__class__, formulation.svalues())
                     raise TypeError(msg)
-                
+
                 if not isinstance(discretization, SpaceDiscretization):
                     msg='Discretization parameter is not an instance of SpaceDiscretization, '
                     msg+='but a {}.'.format(discretization.__class__)
@@ -749,18 +750,18 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                 v_rboundary  = velocity_mesh_info['local_mesh']['rboundary'].value[:dim]
                 v_ghosts     = velocity_mesh_info['ghosts'].value[:dim]
                 v_dx         = velocity_mesh_info['dx'].value[:dim]
-                
+
                 w_resolution = vorticity_mesh_info['local_mesh']['compute_resolution'].value[:dim]
                 w_lboundary  = vorticity_mesh_info['local_mesh']['lboundary'].value[:dim]
                 w_rboundary  = vorticity_mesh_info['local_mesh']['rboundary'].value[:dim]
                 w_ghosts     = vorticity_mesh_info['ghosts'].value[:dim]
                 w_dx         = vorticity_mesh_info['dx'].value[:dim]
-                
+
                 is_multi_scale = (v_resolution != w_resolution).any()
                 is_inplace = (vorticity_in.data[0].data == vorticity_out.data[0].data) \
                           or (vorticity_in.data[1].data == vorticity_out.data[1].data) \
                           or (vorticity_in.data[2].data == vorticity_out.data[2].data)
-                
+
 
                 w_boundary = (w_lboundary[dir], w_rboundary[dir])
                 v_boundary = (v_lboundary[dir], v_rboundary[dir])
@@ -772,7 +773,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
 
                 (min_v_ghosts, min_w_ghosts) =  DirectionalStretchingKernel.min_ghosts(
                         boundary, formulation, order, time_integrator, direction)
-                
+
                 assert (min_v_ghosts>=0).all()
                 assert (min_w_ghosts>=0).all()
 
@@ -782,7 +783,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                     msg+='are present in the grid.'
                     msg=msg.format(min_v_ghosts, v_ghosts)
                     raise RuntimeError(msg)
-                
+
                 if (w_ghosts < min_w_ghosts).any():
                     msg= 'Given boundary condition implies minimum ghosts numbers to be at least {} '
                     msg+='in current direction for position but only {} ghosts '
@@ -795,10 +796,10 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                     msg+='got {} and {} and multiscale has not been implemented yet.'
                     msg=msg.format(v_resolution,w_resolution)
                     raise RuntimeError(msg)
-                
-                min_wg_size = DirectionalStretchingKernel.min_wg_size(formulation, order, 
+
+                min_wg_size = DirectionalStretchingKernel.min_wg_size(formulation, order,
                         time_integrator)
-                
+
                 # work size is the resolution without ghosts
                 compute_resolution = w_resolution
                 work_size = np.ones(3,dtype=np.int32)
@@ -809,13 +810,13 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                 dump_src      = __KERNEL_DEBUG__
                 symbolic_mode = False #__KERNEL_DEBUG__
 
-                min_local_size  = np.maximum( min_wg_size, 
+                min_local_size  = np.maximum( min_wg_size,
                                               [clCharacterize.get_simd_group_size(device,1),1,1])
 
                 caching_options = [True]
                 if formulation != StretchingFormulation.CONSERVATIVE:
                     caching_options.append(False)
-                
+
                 autotuner_flag = autotuner_config.autotuner_flag
                 if (autotuner_flag == AutotunerFlags.ESTIMATE):
                     max_workitem_workload = (1,1,1)
@@ -828,9 +829,9 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
 
                 ## kernel generator
                 def kernel_generator(work_size, work_load, local_work_size,
-                        kernel_args, 
+                        kernel_args,
                         extra_parameters,
-                        force_verbose  = False, 
+                        force_verbose  = False,
                         force_debug    = False,
                         return_codegen = False,
                         **kargs):
@@ -843,7 +844,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                                 vorticity_mesh_info=vorticity_mesh_info,
                                 local_size=local_work_size[:dim]
                             )
-                            
+
                         ## CodeGenerator
                         cached=True
                         codegen = DirectionalStretchingKernel(typegen=typegen,
@@ -852,23 +853,23 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                             is_inplace=is_inplace,
                             symbolic_mode=symbolic_mode, ftype=ftype,
                             known_vars=known_vars, **extra_parameters)
-                        
-                        global_size = codegen.get_global_size(work_size=work_size, 
+
+                        global_size = codegen.get_global_size(work_size=work_size,
                                 work_load=work_load, local_work_size=local_work_size)
-                        
+
                         usable_cache_bytes_per_wg = clCharacterize.usable_local_mem_size(device)
                         if codegen.required_workgroup_cache_size(local_work_size[:dim])[2] > \
                                 usable_cache_bytes_per_wg:
                             raise KernelGenerationError('Insufficient device cache.')
-                        
+
                         ## generate source code and build kernel
                         src        = codegen.__str__()
                         src_hash   = hashlib.sha512(src).hexdigest()
-                        prg        = cl_env.build_raw_src(src, build_options, 
+                        prg        = cl_env.build_raw_src(src, build_options,
                                         kernel_name=codegen.name,
                                         force_verbose=force_verbose, force_debug=force_debug)
                         kernel     = prg.all_kernels()[0]
-                        
+
                         if return_codegen:
                             return (codegen, kernel, kernel_args, src_hash, global_size)
                         else:
@@ -877,14 +878,14 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                 ## Kernel Autotuner
                 name = DirectionalStretchingKernel.codegen_name(ftype, False, is_inplace,
                         direction, formulation)
-                
+
                 autotuner = KernelAutotuner(name=name, work_dim=dim, local_work_dim=1,
                         build_opts=build_options, autotuner_config=autotuner_config)
                 autotuner.add_filter('1d_shape_min', autotuner.min_workitems_per_direction)
                 autotuner.register_extra_parameter('is_cached', caching_options)
                 autotuner.enable_variable_workitem_workload(
                         max_workitem_workload=max_workitem_workload)
-                
+
                 dt=1.0
                 kernel_args = [precision(dt)]
                 kernel_args += velocity.buffers + vorticity_in.buffers+ vorticity_out.buffers
@@ -892,39 +893,39 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
                 kernel_args_mapping = {
                         'dt': (0,precision),
                         'velocity':      (slice(1+0*dim, 1+1*dim, 1), cl.MemoryObjectHolder),
-                        'vorticity_in':  (slice(1+1*dim, 1+2*dim, 1), cl.MemoryObjectHolder), 
-                        'vorticity_out': (slice(1+2*dim, 1+3*dim, 1), cl.MemoryObjectHolder), 
+                        'vorticity_in':  (slice(1+1*dim, 1+2*dim, 1), cl.MemoryObjectHolder),
+                        'vorticity_out': (slice(1+2*dim, 1+3*dim, 1), cl.MemoryObjectHolder),
                     }
-                
+
                 (gwi, lwi, stats, work_load, extra_params) = autotuner.bench(typegen=typegen,
-                        work_size=work_size, kernel_args=kernel_args, 
+                        work_size=work_size, kernel_args=kernel_args,
                         kernel_generator=kernel_generator,
-                        dump_src=dump_src, 
+                        dump_src=dump_src,
                         min_local_size=min_local_size,
                         get_max_global_size=DirectionalStretchingKernel.get_max_global_size)
 
                 (codegen, kernel, kernel_args, src_hash, global_size) = kernel_generator(
-                        work_size=work_size, work_load=work_load, 
-                        local_work_size=lwi, kernel_args=kernel_args, 
+                        work_size=work_size, work_load=work_load,
+                        local_work_size=lwi, kernel_args=kernel_args,
                         extra_parameters=extra_params,
                         force_verbose=False,force_debug=False,
                         return_codegen=True)
-                
+
                 kernel_launcher = None#OpenClKernelLauncher(kernel, queue, list(gwi), list(lwi))
-                
+
                 total_work = work_size[0]*work_size[1]*work_size[2]
                 per_work_statistics = codegen.per_work_statistics()
 
                 cache_info = codegen.required_workgroup_cache_size(lwi)
 
-                return (kernel_launcher, kernel_args, kernel_args_mapping, 
+                return (kernel_launcher, kernel_args, kernel_args_mapping,
                         total_work, per_work_statistics, cache_info)
-                
+
 
 if __name__ == '__main__':
     from hysop.backend.device.opencl import cl
     from hysop.backend.device.codegen.base.test import _test_mesh_info, _test_typegen
-    
+
     dim=3
     ghosts=(0,0,0)
     v_resolution=(256,128,64)
@@ -934,9 +935,9 @@ if __name__ == '__main__':
     tg = _test_typegen('float')
     (_,w_mesh_info) = _test_mesh_info('vorticity_mesh_info',tg,dim,ghosts,w_resolution)
     (_,v_mesh_info) = _test_mesh_info('velocity_mesh_info',tg,dim,ghosts,v_resolution)
-    
+
     dsk = DirectionalStretchingKernel(typegen=tg, ftype=tg.fbtype,
-        order=4, dim=dim, direction=0, 
+        order=4, dim=dim, direction=0,
         formulation=StretchingFormulation.CONSERVATIVE,
         time_integrator=ExplicitRungeKutta('RK4'),
         is_cached=True,
diff --git a/hysop/backend/device/codegen/kernels/empty.py b/hysop/backend/device/codegen/kernels/empty.py
index d9aa561028c8f74999947134255cf32b2942a954..bdd008accde3489d8249391a71a06e43fd0e1e93 100644
--- a/hysop/backend/device/codegen/kernels/empty.py
+++ b/hysop/backend/device/codegen/kernels/empty.py
@@ -5,15 +5,15 @@ from hysop.backend.device.opencl.opencl_types          import OpenClTypeGen
 from hysop.backend.device.codegen.base.utils          import ArgDict
 
 class EmptyKernel(KernelCodeGenerator):
-    
+
     def __init__(self, typegen, known_vars=None, symbolic_mode=True):
-            
+
         kargs = ArgDict()
         kargs['grid_size'] = CodegenVectorClBuiltin('N','int',3,typegen,symbolic_mode=symbolic_mode)
 
         super(EmptyKernel,self).__init__(name='empty_kernel', typegen=typegen,
                 work_dim=3, kernel_args=kargs, known_vars=known_vars)
-        
+
         self.gencode()
 
     def gencode(self):
@@ -25,26 +25,26 @@ class EmptyKernel(KernelCodeGenerator):
             s.check_workitem_bounds('grid_size')
 
 if __name__ == '__main__':
-        
+
     from hysop.backend.device.codegen.base.test import _test_typegen
     typegen = _test_typegen('float')
 
     ek = EmptyKernel(typegen)
-    print ek
-    print
+    print(ek)
+    print()
 
     known_vars = dict(global_size=(1024,1024,1024),
                       local_size=(8,8,8),
                       grid_size=(1023,1023,1023))
     ek = EmptyKernel(typegen, known_vars, symbolic_mode=True)
-    print ek
-    print
+    print(ek)
+    print()
 
     known_vars = dict(global_size=(1024,1024,1024),
                       local_size=(8,8,8),
                       grid_size=(1023,1023,1023))
     ek = EmptyKernel(typegen, known_vars, symbolic_mode=False)
-    print ek
-    print
+    print(ek)
+    print()
 
     ek.edit()
diff --git a/hysop/backend/device/codegen/kernels/stretching.py b/hysop/backend/device/codegen/kernels/stretching.py
index 2bcec67ecdcba4e546354077ed2ce41f42c36d5b..901c03d136165f89d3d42bf077cae44979933dd6 100644
--- a/hysop/backend/device/codegen/kernels/stretching.py
+++ b/hysop/backend/device/codegen/kernels/stretching.py
@@ -18,34 +18,34 @@ class CachedStretchingKernel(KernelCodeGenerator):
     @staticmethod
     def codegen_name(ftype,work_dim):
         return 'cached_stretching_{}_{}d'.format(ftype,work_dim)
-    
-    def __init__(self, typegen, dim, 
-                       device,context, 
+
+    def __init__(self, typegen, dim,
+                       device,context,
                        order=2,
                        ftype=None,
                        known_vars = None,
                        symbolic_mode=True):
-        
+
         cached = True
         ftype = ftype if ftype is not None else typegen.fbtype
-        
+
         work_dim=3
         kernel_reqs = self.build_requirements(typegen=typegen,device=device,context=context,
                 work_dim=work_dim, order=order, cached=cached)
         kernel_args = self.gen_kernel_arguments(typegen, work_dim, ftype, kernel_reqs)
-        
+
         name = CachedStretchingKernel.codegen_name(ftype, dim)
         super(CachedStretchingKernel,self).__init__(
                 name=name,
                 typegen=typegen,
-                work_dim=work_dim, 
+                work_dim=work_dim,
                 kernel_args=kernel_args,
                 known_vars=known_vars,
                 device=device, context=context,
                 vec_type_hint=ftype)
 
         self.update_requirements(kernel_reqs)
-        
+
         self.order = order
         self.ftype = ftype
         self.dim = dim
@@ -56,10 +56,10 @@ class CachedStretchingKernel(KernelCodeGenerator):
         order = np.asarray([self.order]*self.work_dim) if np.isscalar(self.order) else self.order
         return reduce(operator.mul, local_size+order, 1)*self.typegen.FLT_BYTES[self.ftype]
 
-    
+
     def build_requirements(self,typegen,device,context,work_dim,order,cached):
         reqs = WriteOnceDict()
-        
+
         compute_id = ComputeIndexFunction(typegen=typegen, dim=work_dim, itype='int', wrap=False)
         reqs['compute_id'] = compute_id
 
@@ -80,7 +80,7 @@ class CachedStretchingKernel(KernelCodeGenerator):
         fbtype   = typegen.fbtype
         _global = OpenClCodeGenerator.default_keywords['global']
         _local  = OpenClCodeGenerator.default_keywords['local']
-        
+
         xyz = ['x','y','z']
         svelocity  = 'V'
         svorticity = 'W'
@@ -88,11 +88,11 @@ class CachedStretchingKernel(KernelCodeGenerator):
         kargs = ArgDict()
         kargs['dt'] = CodegenVariable(ctype=fbtype,name='dt',typegen=typegen,add_impl_const=True,nl=True)
 
-        for i in xrange(work_dim):
+        for i in range(work_dim):
             name = svorticity+xyz[i]
             kargs[name] = CodegenVariable(storage=_global,name=name,typegen=typegen,
                 ctype=ftype,ptr=True)
-        for i in xrange(work_dim):
+        for i in range(work_dim):
             name = svelocity+xyz[i]
             kargs[name] = CodegenVariable(storage=_global,name=name,ctype=ftype,typegen=typegen,ptr=True,const=True)
 
@@ -102,7 +102,7 @@ class CachedStretchingKernel(KernelCodeGenerator):
         self.svorticity = svorticity
         self.svelocity  = svelocity
         self.xyz = xyz
-        
+
         return kargs
 
 
@@ -112,7 +112,7 @@ class CachedStretchingKernel(KernelCodeGenerator):
         fbtype   = tg.fbtype
         work_dim = self.work_dim
         dim      = self.dim
-       
+
         global_id     = s.vars['global_id']
         local_id      = s.vars['local_id']
         global_index  = s.vars['global_index']
@@ -144,16 +144,16 @@ class CachedStretchingKernel(KernelCodeGenerator):
             s.check_workitem_bounds(grid_size, compact=False)
             s.jumpline()
             s.append(inv_dx.declare(init=mesh_info['inv_dx'][:3]))
-            with s._block_(): 
+            with s._block_():
                 with s._align_() as al:
                     al.append(global_index.declare(const=True, init=compute_index(idx=global_id, size=grid_size),align=True))
                     winit = ''
-                    for i in xrange(work_dim):
+                    for i in range(work_dim):
                         Wi = self.svorticity+self.xyz[i]
                         winit += self.vars[Wi][global_index()] + ','
                     winit='({}{})({})'.format(fbtype, work_dim, winit[:-1])
                     al.append(W.declare(init=winit,align=True))
-                for i in xrange(work_dim):
+                for i in range(work_dim):
                     Wi = self.svorticity+self.xyz[i]
                     Wi = self.vars[Wi]
                     Vi = self.svelocity+self.xyz[i]
@@ -168,7 +168,7 @@ if __name__ == '__main__':
 
         import pyopencl as cl
         from hysop.backend.device.codegen.base.test import _test_typegen
-        
+
         devices  = []
         contexts = {}
         for plat in cl.get_platforms():
@@ -178,9 +178,9 @@ if __name__ == '__main__':
             contexts[dev] = ctx
 
         tg = _test_typegen('float', 'dec')
-        for dev,ctx in contexts.iteritems():
+        for dev,ctx in contexts.items():
             ek = CachedStretchingKernel(typegen=tg, context=ctx, device=dev,
-                    order=16, dim=1 ,ftype=tg.fbtype, 
+                    order=16, dim=1 ,ftype=tg.fbtype,
                     known_vars=dict(local_size=(1024,1,1)))
             ek.edit()
             ek.test_compile()
diff --git a/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_advection.py b/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_advection.py
index 55ff2a53e6491f469afd5e6851fa586c2ad5788c..a4bd40c1605a6a3c9dafc20733cffbdc471dd40b 100644
--- a/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_advection.py
+++ b/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_advection.py
@@ -15,7 +15,7 @@ from hysop.backend.device.codegen.kernels.directional_advection import \
 from hysop.numerics.odesolvers.runge_kutta import ExplicitRungeKutta
 
 class TestDirectionalAdvection(object):
-    
+
     @classmethod
     def setup_class(cls,
             enable_extra_tests=__ENABLE_LONG_TESTS__,
@@ -27,16 +27,16 @@ class TestDirectionalAdvection(object):
 
         queue = cl.CommandQueue(typegen.context)
         ctx   = typegen.context
-        
+
         max_dim = 3
 
         compute_grid_size  = npw.asintarray([13,7,23]) # X Y Z
         compute_grid_shape = compute_grid_size[::-1]
-        
+
         dt     = dtype(0.1)
         dx     = dtype( 1.0 / (compute_grid_size[0]-1) )
         inv_dx = dtype(1.0/dx)
-        
+
         umax = dtype(+1.0)
         umin = dtype(-1.0)
         uinf = dtype(1.0)
@@ -55,12 +55,12 @@ class TestDirectionalAdvection(object):
         P_grid_size    = compute_grid_size + 2*P_ghosts
         P_grid_shape   = P_grid_size[::-1]
         P_grid_bytes   = prod(P_grid_size) * dt.itemsize
-        
+
         mesh_infos = {}
-        for dim in xrange(1, max_dim+1):
-            (A, V_mesh_info) = _test_mesh_info('V_mesh_info', typegen, 
+        for dim in range(1, max_dim+1):
+            (A, V_mesh_info) = _test_mesh_info('V_mesh_info', typegen,
                     dim, V_ghosts[:dim], V_grid_size[:dim])
-            (B, P_mesh_info) = _test_mesh_info('P_mesh_info', typegen, 
+            (B, P_mesh_info) = _test_mesh_info('P_mesh_info', typegen,
                     dim, P_ghosts[:dim], P_grid_size[:dim])
             if dim==1:
                 assert A['dx'][0] == dx
@@ -70,8 +70,8 @@ class TestDirectionalAdvection(object):
                 assert B['dx'][0][0] == dx
             mesh_infos[dim] = { 'P_mesh_info': P_mesh_info,
                                 'V_mesh_info': V_mesh_info }
-        
-        print '''\
+
+        print('''\
         Compute Grid:
             base size:  {}
         Velocity:
@@ -88,8 +88,8 @@ class TestDirectionalAdvection(object):
             bytes:      {}
         '''.format(compute_grid_size,
                 V_min_ghosts, V_extra_ghosts, V_ghosts, V_grid_size, bytes2str(V_grid_bytes),
-                P_min_ghosts, P_extra_ghosts, P_ghosts, P_grid_size, bytes2str(P_grid_bytes))
-        
+                P_min_ghosts, P_extra_ghosts, P_ghosts, P_grid_size, bytes2str(P_grid_bytes)))
+
         mf = cl.mem_flags
 
         host_buffers_init = {
@@ -100,19 +100,19 @@ class TestDirectionalAdvection(object):
         }
 
         device_buffers = {
-            'V': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR, 
+            'V': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR,
                         hostbuf=host_buffers_init['V']),
-            'P': cl.Buffer(ctx, flags=mf.WRITE_ONLY  | mf.COPY_HOST_PTR, 
+            'P': cl.Buffer(ctx, flags=mf.WRITE_ONLY  | mf.COPY_HOST_PTR,
                         hostbuf=host_buffers_init['P']),
-            'dbg0': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+            'dbg0': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
                         hostbuf=host_buffers_init['dbg0']),
-            'dbg1': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+            'dbg1': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
                         hostbuf=host_buffers_init['dbg1']),
         }
 
         host_buffers_reference = copy.deepcopy(host_buffers_init)
         host_buffers_gpu       = copy.deepcopy(host_buffers_init)
-        
+
         Lx = min(typegen.device.max_work_item_sizes[0], typegen.device.max_work_group_size)
         Lx = min(Lx, compute_grid_size[0])
 
@@ -131,41 +131,41 @@ class TestDirectionalAdvection(object):
         cls.V_ghosts       = V_ghosts
         cls.V_grid_size    = V_grid_size
         cls.V_grid_shape   = V_grid_shape
-        
+
         cls.P_min_ghosts   = P_min_ghosts
         cls.P_extra_ghosts = P_extra_ghosts
         cls.P_ghosts       = P_ghosts
         cls.P_grid_size    = P_grid_size
         cls.P_grid_shape   = P_grid_shape
-        
+
         cls.mesh_infos = mesh_infos
 
         cls.host_buffers_init      = host_buffers_init
         cls.host_buffers_reference = host_buffers_reference
         cls.host_buffers_gpu       = host_buffers_gpu
         cls.device_buffers         = device_buffers
-        
+
         cls.local_work_size = local_work_size
         cls.work_load       = work_load
 
         cls.dx, cls.inv_dx, cls.dt = dx, inv_dx, dt
         cls.umin, cls.umax, cls.uinf = umin, umax, uinf
-        
+
         cls.enable_extra_tests = enable_extra_tests
         cls.enable_error_plots = enable_error_plots
         cls.enable_debug_logs = enable_debug_logs
-        
+
     def P_view(self, dim):
         compute_grid_size = self.compute_grid_size
-        P_min_ghosts, P_extra_ghosts, P_ghosts = (self.P_min_ghosts, 
-                                                  self.P_extra_ghosts, 
+        P_min_ghosts, P_extra_ghosts, P_ghosts = (self.P_min_ghosts,
+                                                  self.P_extra_ghosts,
                                                   self.P_ghosts)
         field_view = [] # view of the computational grid with ghosts in the full grid
         cg_view = []    # view of the computational grid without ghosts in the field_view subgrid
-        for i in xrange(self.max_dim):
+        for i in range(self.max_dim):
             j = self.max_dim - i - 1
             if j < dim:
-                field_sl = slice(P_extra_ghosts[j] - P_min_ghosts[j], 
+                field_sl = slice(P_extra_ghosts[j] - P_min_ghosts[j],
                                  compute_grid_size[j]+P_min_ghosts[j]+P_extra_ghosts[j])
                 cg_sl = slice(P_min_ghosts[j], compute_grid_size[j]+P_min_ghosts[j])
                 field_view.append(field_sl)
@@ -173,19 +173,19 @@ class TestDirectionalAdvection(object):
             else:
                 field_view.append(P_ghosts[j])
         return tuple(field_view), tuple(cg_view)
-    
+
     def V_view(self, dim):
         compute_grid_size = self.compute_grid_size
-        V_min_ghosts, V_extra_ghosts, V_ghosts = (self.V_min_ghosts, 
-                                                  self.V_extra_ghosts, 
+        V_min_ghosts, V_extra_ghosts, V_ghosts = (self.V_min_ghosts,
+                                                  self.V_extra_ghosts,
                                                   self.V_ghosts)
         field_view = [] # view of the computational grid with ghosts in the full grid
-        cg_view = []    # view of the computational grid without ghosts in the field_view 
+        cg_view = []    # view of the computational grid without ghosts in the field_view
                         # subgrid
-        for i in xrange(self.max_dim):
+        for i in range(self.max_dim):
             j = self.max_dim - i - 1
             if j < dim:
-                field_sl = slice(V_ghosts[j] - V_min_ghosts[j], 
+                field_sl = slice(V_ghosts[j] - V_min_ghosts[j],
                                  compute_grid_size[j]+V_min_ghosts[j]+V_ghosts[j])
                 cg_sl = slice(V_min_ghosts[j], compute_grid_size[j]+V_min_ghosts[j])
                 field_view.append(field_sl)
@@ -197,7 +197,7 @@ class TestDirectionalAdvection(object):
     @classmethod
     def teardown_class(cls):
         pass
-    
+
     def setup_method(self, method):
         pass
 
@@ -214,13 +214,13 @@ class TestDirectionalAdvection(object):
         compute_grid_size = self.compute_grid_size
         compute_grid_shape = self.compute_grid_shape
         enable_debug_logs = self.enable_debug_logs
-        
+
         P_field_view, P_view_cg = self.P_view(dim=3)
         V_field_view, V_view_cg = self.V_view(dim=3)
-        
+
         host_init_buffers      = self.host_buffers_init
         host_buffers_reference = self.host_buffers_reference
-    
+
         velocity = host_init_buffers['V'][V_field_view]
         position = host_init_buffers['P'][P_field_view]
 
@@ -228,12 +228,12 @@ class TestDirectionalAdvection(object):
         pos[...] = npw.arange(compute_grid_size[0])[None,None,:]
         pos *= dx
         if enable_debug_logs:
-            print 'TIME STEP (DT):'
-            print dt
-            print 'INITIAL REFERENCE COMPUTE POSITION LINE:'
-            print pos[0,0]
-            print 'COMPUTE REFERENCE GRID VELOCITY:'
-            print velocity[V_view_cg][0,0]
+            print('TIME STEP (DT):')
+            print(dt)
+            print('INITIAL REFERENCE COMPUTE POSITION LINE:')
+            print(pos[0,0])
+            print('COMPUTE REFERENCE GRID VELOCITY:')
+            print(velocity[V_view_cg][0,0])
 
         is_periodic = False
 
@@ -251,12 +251,12 @@ class TestDirectionalAdvection(object):
 
             Vl = npw.empty_like(X)
             Vr = npw.empty_like(X)
-            for i in xrange(*V_view_cg[0].indices(velocity.shape[0])):
-                for j in xrange(*V_view_cg[1].indices(velocity.shape[1])):
+            for i in range(*V_view_cg[0].indices(velocity.shape[0])):
+                for j in range(*V_view_cg[1].indices(velocity.shape[1])):
                     Vl[i,j,:] = velocity[i,j,lidx[i,j,:]]
                     Vr[i,j,:] = velocity[i,j,ridx[i,j,:]]
             return Vl + alpha*(Vr-Vl)
-        
+
         if rk_scheme.name() == 'Euler':
             pos += velocity[V_view_cg]*dt
         elif rk_scheme.name() == 'RK2':
@@ -280,13 +280,13 @@ class TestDirectionalAdvection(object):
         else:
             msg = 'Unknown Runge-Kutta scheme {}.'.format(rk_scheme)
             raise ValueError(msg)
-            
+
         if enable_debug_logs:
-            print 'FINAL REFERENCE COMPUTE POSITION LINE'
-            print pos[0,0]
-        
+            print('FINAL REFERENCE COMPUTE POSITION LINE')
+            print(pos[0,0])
+
         host_buffers_reference['P'][P_field_view] = pos
-    
+
     def _do_compute_gpu_and_check(self, rk_scheme, boundary, cached, nparticles, work_dim):
 
         msg =  '\nTesting {}directional {}d advection with {} scheme and {} boundaries, '
@@ -296,7 +296,7 @@ class TestDirectionalAdvection(object):
                     rk_scheme.name(),
                     str(boundary).lower(),
                     nparticles)
-        print msg
+        print(msg)
         grid_size       = self.compute_grid_size[:work_dim]
         grid_shape      = self.compute_grid_shape[:work_dim]
 
@@ -305,10 +305,10 @@ class TestDirectionalAdvection(object):
         local_work_size = self.local_work_size[:work_dim]
         lwsl = work_load * local_work_size
         global_work_size = ((work_size+lwsl-1)//lwsl)*lwsl
-        
+
         typegen       = self.typegen
         ndim          = upper_pow2_or_3(work_dim)
-        strides_dtype = typegen.uintn(ndim) 
+        strides_dtype = typegen.uintn(ndim)
         offset_dtype  = npw.uint64
         def compute_offset(base, slc, work_dim):
             view = base[slc]
@@ -324,10 +324,10 @@ class TestDirectionalAdvection(object):
             view = base[slc]
             strides = view.strides
             return strides
-            
+
         def make_offset(offset, dtype):
             """Build an offset in number of elements instead of bytes."""
-            msg='Unaligned offset {} for dtype {} (itemsize={}).'.format(offset, 
+            msg='Unaligned offset {} for dtype {} (itemsize={}).'.format(offset,
                     dtype, dtype.itemsize)
             assert (offset % dtype.itemsize) == 0
             return offset_dtype(offset // dtype.itemsize)
@@ -336,10 +336,10 @@ class TestDirectionalAdvection(object):
                     dtype.__class__.__name__, dtype.itemsize)
             assert (npw.mod(bstrides, dtype.itemsize) == 0).all(), msg
             data = typegen.make_uintn(
-                vals=tuple(x//dtype.itemsize for x in bstrides[::-1]), 
+                vals=tuple(x//dtype.itemsize for x in bstrides[::-1]),
                 n=ndim, dval=0)
             return to_list(data)[:ndim]
-        
+
         host_init_buffers      = self.host_buffers_init
         host_buffers_reference = self.host_buffers_reference
         host_buffers_gpu       = self.host_buffers_gpu
@@ -354,20 +354,20 @@ class TestDirectionalAdvection(object):
         V_strides = compute_strides(V_host, V_field_view, work_dim)
         P_offset  = compute_offset(P_host, P_field_view, work_dim)
         V_offset  = compute_offset(V_host, V_field_view, work_dim)
-        
+
         known_vars = { 'global_size': global_work_size, 'local_size': local_work_size }
         known_vars.update(self.mesh_infos[work_dim])
-        known_vars['V_strides'] = make_strides(V_strides, V_host.dtype)  
-        known_vars['V_offset'] = make_offset(V_offset, V_host.dtype)  
-        known_vars['P_strides'] = make_strides(P_strides, P_host.dtype)  
+        known_vars['V_strides'] = make_strides(V_strides, V_host.dtype)
+        known_vars['V_offset'] = make_offset(V_offset, V_host.dtype)
+        known_vars['P_strides'] = make_strides(P_strides, P_host.dtype)
         known_vars['P_offset'] = make_offset(P_offset, P_host.dtype)
 
         min_ghosts = self.V_min_ghosts[0]
 
         dak = DirectionalAdvectionKernelGenerator(
-            typegen=self.typegen, 
+            typegen=self.typegen,
             ftype=self.typegen.fbtype,
-            work_dim=work_dim, 
+            work_dim=work_dim,
             rk_scheme=rk_scheme,
             is_cached=cached,
             vboundary=(boundary, boundary),
@@ -377,10 +377,10 @@ class TestDirectionalAdvection(object):
             debug_mode=False,
             known_vars=known_vars)
         #dak.edit()
-        
+
         (static_shared_bytes, dynamic_shared_bytes, total_bytes) = \
                 dak.required_workgroup_cache_size(local_work_size)
-        
+
         queue = self.queue
         kernel_args = [self.dt]
         kernel_args.append(device_buffers['V'])
@@ -388,54 +388,54 @@ class TestDirectionalAdvection(object):
         if (dynamic_shared_bytes != 0):
             shared_buffer = cl.LocalMemory(dynamic_shared_bytes)
             kernel_args.append(shared_buffer)
-    
-        print '\tGenerating and compiling Kernel...'
+
+        print('\tGenerating and compiling Kernel...')
         source = dak.__str__()
         prg = cl.Program(self.typegen.context, source)
         prg.build(devices=[self.typegen.device])
         kernel = prg.all_kernels()[0]
         kernel.set_args(*kernel_args)
-        
-        print '\tCPU => GPU'
+
+        print('\tCPU => GPU')
         variables = ('P','V',)
         views = (P_field_view, V_field_view,)
         for buf in variables:
             src = host_init_buffers[buf]
             dst = device_buffers[buf]
             cl.enqueue_copy(queue,dst,src)
-        
-        print '\tKernel execution <<<{},{}>>>'.format(global_work_size,local_work_size)
-        evt = cl.enqueue_nd_range_kernel(queue, kernel, 
+
+        print('\tKernel execution <<<{},{}>>>'.format(global_work_size,local_work_size))
+        evt = cl.enqueue_nd_range_kernel(queue, kernel,
                 global_work_size.tolist(), local_work_size.tolist())
         evt.wait()
-        
-        print '\tGPU => CPU'
+
+        print('\tGPU => CPU')
         for buf in variables:
             src = device_buffers[buf]
             dst = host_buffers_gpu[buf]
             cl.enqueue_copy(queue,dst,src)
 
-        print '\tSynchronize queue'
+        print('\tSynchronize queue')
         queue.flush()
         queue.finish()
 
-        buffers = [(varname,host_buffers_reference[varname],host_buffers_gpu[varname], view) 
+        buffers = [(varname,host_buffers_reference[varname],host_buffers_gpu[varname], view)
                         for varname,view in zip(variables,views)]
         self._cmp_buffers(buffers,dak,work_dim)
-    
+
     def _cmp_buffers(self,buffers,dak,work_dim):
         good = True
         err_buffers = []
 
         for (name,host,dev,view) in buffers:
             (l1,l2,linf) = self._distances(host,dev,view)
-            print '\t{} -> l1={}  l2={}  linf={}'.format(name,l1,l2,linf)
+            print('\t{} -> l1={}  l2={}  linf={}'.format(name,l1,l2,linf))
             if (linf>100*npw.finfo(host.dtype).eps):
                 err_buffers.append(name)
                 good = False
         if not good:
             msg = '\n[FAIL] Buffer comparisson failed for buffers {}.\n'.format(err_buffers)
-            print msg
+            print(msg)
             dak.edit()
             if self.enable_error_plots:
                 from matplotlib import pyplot as plt
@@ -446,7 +446,7 @@ class TestDirectionalAdvection(object):
 
                         d = (dev-host)*(dev-host)
                         d -= npw.mean(d)
-                    
+
                         if work_dim==3:
                             fig,axes = plt.subplots(2,2)
                             axes[0][0].imshow(npw.sum(d,axis=0),interpolation='nearest')
@@ -474,11 +474,11 @@ class TestDirectionalAdvection(object):
                         dev  = dev[view]
 
                         d = npw.sqrt((dev-host)*(dev-host))
-                        print '== {} HOST =='.format(name)
-                        print host
-                        print '== {} DEVICE =='.format(name)
-                        print dev
-                    
+                        print('== {} HOST =='.format(name))
+                        print(host)
+                        print('== {} DEVICE =='.format(name))
+                        print(dev)
+
             raise RuntimeError(msg)
 
     def _distances(self,lhs,rhs,view):
@@ -489,8 +489,8 @@ class TestDirectionalAdvection(object):
         l2   = npw.sqrt(npw.sum(d*d))/d.size
         linf = npw.max(da)
         return (l1,l2,linf)
-    
-    
+
+
     def _check_kernels(self, rk_scheme):
         check_instance(rk_scheme, ExplicitRungeKutta)
 
@@ -503,9 +503,9 @@ class TestDirectionalAdvection(object):
         else:
             nparticles=(1,2)
             work_dims=(1,2,3)
-        
-        with printoptions(threshold=10000, linewidth=240, 
-                          nanstr='nan', infstr='inf', 
+
+        with printoptions(threshold=10000, linewidth=240,
+                          nanstr='nan', infstr='inf',
                           formatter={'float': lambda x: '{:>6.2f}'.format(x)}):
             for boundary in boundaries:
                 self._do_compute_cpu(boundary=boundary, rk_scheme=rk_scheme)
@@ -513,18 +513,18 @@ class TestDirectionalAdvection(object):
                     for cache in cached:
                         for nparticle in nparticles:
                             self._do_compute_gpu_and_check(boundary=boundary,
-                                    rk_scheme=rk_scheme, work_dim=work_dim, 
+                                    rk_scheme=rk_scheme, work_dim=work_dim,
                                     nparticles=nparticle, cached=cache)
-   
+
 
     def test_advection_Euler(self):
         rk_scheme=ExplicitRungeKutta('Euler')
         self._check_kernels(rk_scheme=rk_scheme)
-    
+
     def test_advection_RK2(self):
         rk_scheme=ExplicitRungeKutta('RK2')
         self._check_kernels(rk_scheme=rk_scheme)
-    
+
     def test_advection_RK4(self):
         rk_scheme=ExplicitRungeKutta('RK4')
         self._check_kernels(rk_scheme=rk_scheme)
@@ -533,7 +533,7 @@ if __name__ == '__main__':
     TestDirectionalAdvection.setup_class(enable_extra_tests=False, enable_error_plots=False,
             enable_debug_logs=False)
     test = TestDirectionalAdvection()
-    
+
     test.test_advection_Euler()
     test.test_advection_RK2()
     test.test_advection_RK4()
diff --git a/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_remesh.py b/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_remesh.py
index be718dc17eb3ae69b16c99b4fcd9914ded08ceeb..069ea02aa7344ad75706cc2647582ca86a1b8f00 100644
--- a/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_remesh.py
+++ b/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_remesh.py
@@ -1,6 +1,6 @@
+import copy, math
 
 from hysop import __ENABLE_LONG_TESTS__
-from hysop.deps import copy, math
 from hysop.testsenv import __HAS_OPENCL_BACKEND__, iter_clenv, opencl_failed, TestCartesianField
 from hysop.constants import BoundaryCondition, Precision
 
@@ -16,8 +16,8 @@ from hysop.numerics.odesolvers.runge_kutta import ExplicitRungeKutta
 from hysop.numerics.remesh.remesh import RemeshKernel
 from hysop.backend.device.codegen.kernels.directional_remesh \
         import DirectionalRemeshKernelGenerator
-        
-        
+
+
 class TestDirectionalRemesh(object):
 
     DEBUG = False
@@ -28,31 +28,31 @@ class TestDirectionalRemesh(object):
             enable_error_plots=False,
             enable_debug_logs=False,
             grid_size=None):
-        
+
         # grid configuration
         default_grid_size = [9,7,3]
         grid_size = first_not_None(grid_size, default_grid_size)
         compute_grid_size  = npw.asintarray(grid_size) # X Y Z
         compute_grid_shape = compute_grid_size[::-1]
         assert compute_grid_size.size == 3
-        
+
         # velocity and scalar bounds
         umax = +1.0
         umin = -1.0
         uinf = 1.0
-        
+
         S0_min=-1.0
         S0_max=+1.0
         S0_inf=1.0
-        
+
         S1_min=-10.0
         S1_max=+10.0
         S1_inf=10.0
 
         # time and space discretizations
         inv_dx = compute_grid_size[0]-1
-        dx     =  1.0 / (compute_grid_size[0]-1) 
-        
+        dx     =  1.0 / (compute_grid_size[0]-1)
+
         # field configs
         extra_ghosts = {
             'V':      [1,2,3],
@@ -79,20 +79,20 @@ class TestDirectionalRemesh(object):
             'S0_out': mf.READ_WRITE,
             'S1_out': mf.READ_WRITE
         }
-       
-
-        print '\n::Setup TestDirectionalRemesh ::'
-        print '    enable_error_plots: {}'.format(enable_error_plots)
-        print '    enable_extra_tests: {}'.format(enable_extra_tests)
-        print '    enable_debug_logs:  {}'.format(enable_debug_logs)
-        print '  Compute Grid:'
-        print '    compute_grid_size:  {}'.format(compute_grid_size)
-        print '    compute_grid_shape: {}'.format(compute_grid_shape)
-        print '    dx:                 {}'.format(dx)
-        print '    umin/umax:          {} / {}'.format(umin, umax)
-        print '    S0_min/S0_max:      {} / {}'.format(S0_min, S0_max)
-        print '    S1_min/S1_max:      {} / {}'.format(S1_min, S1_max)
-        
+
+
+        print('\n::Setup TestDirectionalRemesh ::')
+        print('    enable_error_plots: {}'.format(enable_error_plots))
+        print('    enable_extra_tests: {}'.format(enable_extra_tests))
+        print('    enable_debug_logs:  {}'.format(enable_debug_logs))
+        print('  Compute Grid:')
+        print('    compute_grid_size:  {}'.format(compute_grid_size))
+        print('    compute_grid_shape: {}'.format(compute_grid_shape))
+        print('    dx:                 {}'.format(dx))
+        print('    umin/umax:          {} / {}'.format(umin, umax))
+        print('    S0_min/S0_max:      {} / {}'.format(S0_min, S0_max))
+        print('    S1_min/S1_max:      {} / {}'.format(S1_min, S1_max))
+
         cls.enable_extra_tests = enable_extra_tests
         cls.enable_error_plots = enable_error_plots
         cls.enable_debug_logs  = enable_debug_logs
@@ -109,17 +109,17 @@ class TestDirectionalRemesh(object):
     def _initialize(self, cl_env, precision, cfl, kernel_config, null_velocity,
             dump_mode='dec'):
 
-        print '''
-Allocating fields: 
-    platform:   {} 
+        print('''
+Allocating fields:
+    platform:   {}
     device:     {}
-    precision:  {} 
-    cfl:        {} 
+    precision:  {}
+    cfl:        {}
     kernel_cfg: {}
 '''.format(cl_env.platform.name.strip(),
-           cl_env.device.name.strip(), 
-           precision, cfl, kernel_config)
-        
+           cl_env.device.name.strip(),
+           precision, cfl, kernel_config))
+
         ctx      = cl_env.context
         device   = cl_env.device
         platform = cl_env.platform
@@ -133,9 +133,9 @@ Allocating fields:
             typegen   = _test_typegen('double', dump_mode)
         else:
             msg='Unsupported precision {}.'.format(precision)
-        
+
         eps = dtype(npw.finfo(dtype).eps)
-        
+
         # min and max ghosts
         dt     = dtype((cfl * self.dx) / self.uinf)
         kernel_moments = kernel_config[0]
@@ -144,11 +144,11 @@ Allocating fields:
         assert (kernel_moments % 2 == 0)
         assert (MAX_ADVEC > 0)
         assert (MAX_REMESH > 0)
-        
+
         min_velocity_ghosts   = MAX_ADVEC
         min_scalar_ghosts_out = MAX_ADVEC + MAX_REMESH
-        
-        # fields 
+
+        # fields
         min_ghosts = {
                 'V': min_velocity_ghosts,
                 'P': 0,
@@ -157,7 +157,7 @@ Allocating fields:
                 'S0_out': min_scalar_ghosts_out,
                 'S1_out': min_scalar_ghosts_out,
         }
-        
+
         all_fields = {}
         keys = ['no_ghosts', 'with_ghosts']
         for i,k in enumerate(keys):
@@ -170,10 +170,10 @@ Allocating fields:
                     val = 0.0
                 field = TestCartesianField(name=vname, dim=3, dtype=dtype,
                             nb_components=1, compute_grid_size=self.compute_grid_size.tolist(),
-                            min_ghosts=mg, extra_ghosts=eg, 
+                            min_ghosts=mg, extra_ghosts=eg,
                             init=val, ghosts_init=gval, extra_ghosts_init=egval)
                 fields[vname] = field
-        
+
         ug = self.extra_ghosts['V'][0]
         umin, umax = self.umin, self.umax
         if not null_velocity:
@@ -187,42 +187,42 @@ Allocating fields:
         dx = dtype(self.dx)
         for i,k in enumerate(keys):
             mesh_infos = all_mesh_infos.setdefault(k, {})
-            for dim in xrange(1, 4):
+            for dim in range(1, 4):
                 ref_values, _ = _test_mesh_info('ref_mesh_info', typegen,
                         dim, [0,]*dim, self.compute_grid_size[:dim])
                 if (dim==1):
                     assert ref_values['dx'][0] == dx
                 else:
                     assert ref_values['dx'][0][0] == dx
-                
+
                 dim_mesh_infos = mesh_infos.setdefault(dim, {})
-                for vname, field in all_fields[k].iteritems():
-                    (values, mesh_info) = _test_mesh_info('{}_mesh_info'.format(vname), 
+                for vname, field in all_fields[k].items():
+                    (values, mesh_info) = _test_mesh_info('{}_mesh_info'.format(vname),
                             typegen, dim, field.ghosts[:dim], field.grid_size[:dim])
                     if (dim==1):
                         assert values['dx'][0] == dx
                     else:
                         assert values['dx'][0][0] == dx
                     dim_mesh_infos[vname] = mesh_info
-        
+
         intent = self.intent
         mf = cl.mem_flags
         clbuf = lambda vname, data: tuple(cl.Buffer(ctx, flags=intent[vname] | mf.COPY_HOST_PTR,
-                                        hostbuf=data.data[i]) for i in xrange(data.nb_components))
-        
+                                        hostbuf=data.data[i]) for i in range(data.nb_components))
+
         host_buffers_init = dict(
-                no_ghosts   = {k:v.data for (k,v) in all_fields['no_ghosts'].iteritems()   },
-                with_ghosts = {k:v.data for (k,v) in all_fields['with_ghosts'].iteritems() })
+                no_ghosts   = {k:v.data for (k,v) in all_fields['no_ghosts'].items()   },
+                with_ghosts = {k:v.data for (k,v) in all_fields['with_ghosts'].items() })
         device_buffers = dict(
-                no_ghosts   = {k:clbuf(k,v) for (k,v) in all_fields['no_ghosts'].iteritems()   },
-                with_ghosts = {k:clbuf(k,v) for (k,v) in all_fields['with_ghosts'].iteritems() })
+                no_ghosts   = {k:clbuf(k,v) for (k,v) in all_fields['no_ghosts'].items()   },
+                with_ghosts = {k:clbuf(k,v) for (k,v) in all_fields['with_ghosts'].items() })
         host_buffers_reference = copy.deepcopy(host_buffers_init)
         host_buffers_gpu       = copy.deepcopy(host_buffers_init)
-        
-        print '\n'+'\n\n'.join(str(x) for x in all_fields['with_ghosts'].values())
-        
+
+        print('\n'+'\n\n'.join(str(x) for x in all_fields['with_ghosts'].values()))
+
         Lx = min(typegen.device.max_work_item_sizes[0], typegen.device.max_work_group_size)
-        Lx = min(Lx, self.compute_grid_size[0]/2)
+        Lx = min(Lx, self.compute_grid_size[0]//2)
 
         local_work_size = npw.asarray([Lx,1,1])
         work_load = npw.asarray([1,1,3])
@@ -248,7 +248,7 @@ Allocating fields:
         self.host_buffers_reference = host_buffers_reference
         self.host_buffers_gpu       = host_buffers_gpu
         self.device_buffers         = device_buffers
-        
+
         self.local_work_size = local_work_size
         self.work_load       = work_load
         self.min_scalar_ghosts_out = min_scalar_ghosts_out
@@ -256,7 +256,7 @@ Allocating fields:
 
     def _do_advec_on_cpu(self, rk_scheme, boundary):
 
-        print '\n_do_advec_on_cpu(rk_scheme={}, boundary={})'.format(rk_scheme.name(), boundary)
+        print('\n_do_advec_on_cpu(rk_scheme={}, boundary={})'.format(rk_scheme.name(), boundary))
         dt = self.dt
         dx = self.dtype(self.dx)
         inv_dx = self.dtype(self.inv_dx)
@@ -264,16 +264,16 @@ Allocating fields:
         compute_grid_size  = self.compute_grid_size
         compute_grid_shape = self.compute_grid_shape
         enable_debug_logs  = self.enable_debug_logs
-        
+
         for key in self.all_fields.keys():
             P = self.all_fields[key]['P']
             V = self.all_fields[key]['V']
             P_field_view, P_view_cg = P.view(dim=3)
             V_field_view, V_view_cg = V.view(dim=3)
-        
+
             host_init_buffers      = self.host_buffers_init[key]
             host_buffers_reference = self.host_buffers_reference[key]
-            
+
             velocity = host_init_buffers['V'][0][V_field_view]
             position = host_init_buffers['P'][0][P_field_view]
 
@@ -281,12 +281,12 @@ Allocating fields:
             pos[...] = npw.arange(compute_grid_size[0])[None,None,:]
             pos *= dx
             if enable_debug_logs:
-                print 'TIME STEP (DT):'
-                print dt
-                print 'INITIAL REFERENCE COMPUTE POSITION LINE:'
-                print pos[0,0]
-                print 'COMPUTE REFERENCE GRID VELOCITY:'
-                print velocity[V_view_cg][0,0]
+                print('TIME STEP (DT):')
+                print(dt)
+                print('INITIAL REFERENCE COMPUTE POSITION LINE:')
+                print(pos[0,0])
+                print('COMPUTE REFERENCE GRID VELOCITY:')
+                print(velocity[V_view_cg][0,0])
 
             def interp_velocity(X):
                 Gx = compute_grid_size[0]
@@ -298,12 +298,12 @@ Allocating fields:
 
                 Vl = npw.empty_like(X)
                 Vr = npw.empty_like(X)
-                for i in xrange(*V_view_cg[0].indices(velocity.shape[0])):
-                    for j in xrange(*V_view_cg[1].indices(velocity.shape[1])):
+                for i in range(*V_view_cg[0].indices(velocity.shape[0])):
+                    for j in range(*V_view_cg[1].indices(velocity.shape[1])):
                         Vl[i,j,:] = velocity[i,j,lidx[i,j,:]]
                         Vr[i,j,:] = velocity[i,j,ridx[i,j,:]]
                 return Vl + alpha*(Vr-Vl)
-        
+
             if rk_scheme.name() == 'Euler':
                 pos += velocity[V_view_cg]*dt
             elif rk_scheme.name() == 'RK2':
@@ -327,19 +327,19 @@ Allocating fields:
             else:
                 msg = 'Unknown Runge-Kutta scheme {}.'.format(rk_scheme)
                 raise ValueError(msg)
-            
+
             if enable_debug_logs:
-                print 'FINAL REFERENCE COMPUTE POSITION LINE'
-                print pos[0,0]
-            
+                print('FINAL REFERENCE COMPUTE POSITION LINE')
+                print(pos[0,0])
+
             host_buffers_reference['P'][0][P_field_view] = pos
-    
+
     def _do_remesh_on_cpu(self, rk_scheme, boundary, kernel, remesh_criteria_eps):
-        print
-        print '_do_remesh_on_cpu(rk_scheme={}, boundary={}, criteria_eps={},\n\t\t   kernel={})'.format(
-                rk_scheme, boundary, remesh_criteria_eps, kernel)
-        
-        P = 1 + kernel.n/2
+        print()
+        print('_do_remesh_on_cpu(rk_scheme={}, boundary={}, criteria_eps={},\n\t\t   kernel={})'.format(
+                rk_scheme, boundary, remesh_criteria_eps, kernel))
+
+        P = 1 + kernel.n//2
         assert (boundary == BoundaryCondition.NONE)
         assert (kernel.n == self.kernel_moments)
         assert (remesh_criteria_eps is None) or (remesh_criteria_eps>0)
@@ -352,21 +352,21 @@ Allocating fields:
 
         compute_grid_size  = self.compute_grid_size
         compute_grid_shape = self.compute_grid_shape
-        
+
         theorical_min_ind_advec = -self.MAX_ADVEC
         theorical_max_ind_advec = self.compute_grid_size[0] + self.MAX_ADVEC
 
         theorical_min_ind = theorical_min_ind_advec - self.MAX_REMESH
-        theorical_max_ind = theorical_max_ind_advec + self.MAX_REMESH - 1 
-        MIN_GHOSTS = self.MAX_ADVEC + self.MAX_REMESH 
+        theorical_max_ind = theorical_max_ind_advec + self.MAX_REMESH - 1
+        MIN_GHOSTS = self.MAX_ADVEC + self.MAX_REMESH
         self.MIN_GHOSTS = MIN_GHOSTS
-        
-        for target, fields in self.all_fields.iteritems():
-            print '>Target {}'.format(target)
-            host_init_buffers      = self.host_buffers_init[target] 
+
+        for target, fields in self.all_fields.items():
+            print('>Target {}'.format(target))
+            host_init_buffers      = self.host_buffers_init[target]
             host_buffers_reference = self.host_buffers_reference[target]
             device_buffers         = self.device_buffers[target]
-        
+
             assert (fields['S0_out'].min_ghosts[0] == MIN_GHOSTS)
             assert (fields['S1_out'].min_ghosts[0] == MIN_GHOSTS)
             assert (fields['S0_out'].ghosts[0]     >= MIN_GHOSTS)
@@ -377,13 +377,13 @@ Allocating fields:
             S1_in_view,  S1_in_view_cg  = fields['S1_in'].view(dim=3)
             S0_out_view, S0_out_view_cg = fields['S0_out'].view(dim=3)
             S1_out_view, S1_out_view_cg = fields['S1_out'].view(dim=3)
-            
+
             pos     = host_buffers_reference['P'][0][P_view]
             S0_in   = host_init_buffers['S0_in'][0][S0_in_view]
             S1_in   = host_init_buffers['S1_in'][0][S1_in_view]
             S0_out  = host_buffers_reference['S0_out'][0][S0_out_view]
             S1_out  = host_buffers_reference['S1_out'][0][S1_out_view]
-                            
+
             S0_ghosts_in  = fields['S0_in'].min_ghosts
             S0_ghosts_out = fields['S0_out'].min_ghosts
             S1_ghosts_in  = fields['S1_in'].min_ghosts
@@ -393,48 +393,48 @@ Allocating fields:
             ind = npw.floor(pos*inv_dx).astype(npw.int32)
             y = (pos - ind*dx)*inv_dx
             y = (1-y)
-            
+
             min_ind, max_ind = npw.min(ind), npw.max(ind)
             ymin, ymax = npw.min(y), npw.max(y)
-           
+
             if self.DEBUG:
-                print 'imin={}, imax={}'.format(min_ind, max_ind)
-                print 'ymin={}, ymax={}'.format(ymin, ymax)
+                print('imin={}, imax={}'.format(min_ind, max_ind))
+                print('ymin={}, ymax={}'.format(ymin, ymax))
             # assert (y>0.0).all() and (y<1.0).all()
 
             if (min_ind < theorical_min_ind):
-                msg0='min_ind={} <= floor(umin*dt*inv_dx)={}'.format(min_ind, 
+                msg0='min_ind={} <= floor(umin*dt*inv_dx)={}'.format(min_ind,
                         theorical_min_ind_advec)
                 raise RuntimeError(msg0)
 
             if (max_ind > theorical_max_ind):
-                msg1='max_ind={} <= floor(umax*dt*inv_dx)={}'.format(max_ind, 
+                msg1='max_ind={} <= floor(umax*dt*inv_dx)={}'.format(max_ind,
                         theorical_max_ind_advec)
                 raise RuntimeError(msg1)
 
             Gx = compute_grid_size[0]
             ind -= P
             assert (MIN_GHOSTS+ind+1>=0).all()
-            
+
             S0_out[...] = 0.0
             S1_out[...] = 0.0
             S0_not_remeshed = 0.0
             S1_not_remeshed = 0.0
-            for q in xrange(-P, +P):
-                yi = y+q 
+            for q in range(-P, +P):
+                yi = y+q
                 wi = kernel.gamma(yi)
                 ind += 1
                 if self.DEBUG:
-                    print 'ind', ind[0,0,:]
-                    print 'yi', yi[0,0,:]
-                    print
+                    print('ind', ind[0,0,:])
+                    print('yi', yi[0,0,:])
+                    print()
 
                 _min_ind, _max_ind = npw.min(ind), npw.max(ind)
                 assert (_min_ind >= theorical_min_ind).all(), 'min_ind = {} < {}'.format(
                         _min_ind, theorical_min_ind)
                 assert (_max_ind <= theorical_max_ind).all(), 'max_ind = {} > {}'.format(
                         _max_ind, theorical_max_ind)
-               
+
                 for (i,j,k) in npw.ndindex(*self.compute_grid_shape):
                     in_index  = (S0_ghosts_in[2] + i,
                                  S0_ghosts_in[1] + j,
@@ -447,14 +447,14 @@ Allocating fields:
                     val =  W * sin
                     if (remesh_criteria_eps is None) or \
                             (abs(sin)>remesh_criteria_eps*eps):
-                        S0_out[out_index] += val 
+                        S0_out[out_index] += val
                     else:
                         S0_not_remeshed   += val
 
                     if self.DEBUG and (i==0) and (j==0):
                         msg='wrote {:0.6f} to index {} with y={}, s0={} and W={}'
                         msg=msg.format( val, out_index[2], y[i,j,k], sin, W )
-                        print msg
+                        print(msg)
 
                     in_index  = (S1_ghosts_in[2] + i,
                                  S1_ghosts_in[1] + j,
@@ -469,14 +469,14 @@ Allocating fields:
                         S1_out[out_index] += val
                     else:
                         S1_not_remeshed   += val
-            
-            
+
+
             # check if scalar was conserved
             I0 = npw.sum(S0_in[S0_in_view_cg])
             I1 = npw.sum(S0_out)
             J0 = npw.sum(S1_in[S1_in_view_cg])
             J1 = npw.sum(S1_out)
-                
+
             err0 = I1-I0+S0_not_remeshed
             err1 = J1-J0+S1_not_remeshed
 
@@ -485,23 +485,23 @@ Allocating fields:
                         I0, I1+S0_not_remeshed, I1+S0_not_remeshed-I0, abs(100*S1_not_remeshed/J0))
                 raise ValueError(msg)
             else:
-                print '  #S0 remesh error: {} ({} eps, not remeshed {:3.02}%)'.format(
-                        err0, int(err0/eps), abs(100*S0_not_remeshed/I0))
-            
+                print('  #S0 remesh error: {} ({} eps, not remeshed {:3.02}%)'.format(
+                        err0, int(err0/eps), abs(100*S0_not_remeshed/I0)))
+
             if not npw.isclose(J0, J1+S1_not_remeshed, atol=2e-5):
                 msg = '  #S1 failed: J0:{} !=  J1:{}, error={}, not remesh {:3.02}%'.format(
                         J0,J1+S1_not_remeshed, J1+S1_not_remeshed-J0, abs(100*S1_not_remeshed/J0))
                 raise ValueError(msg)
             else:
-                print '  #S1 remesh error: {} ({} eps, not remeshed {:3.02}%)'.format(
-                        err1, int(err1/eps), abs(100*S1_not_remeshed/J0))
+                print('  #S1 remesh error: {} ({} eps, not remeshed {:3.02}%)'.format(
+                        err1, int(err1/eps), abs(100*S1_not_remeshed/J0)))
 
             host_buffers_reference['S0_out'][0][S0_out_view] = S0_out
             host_buffers_reference['S1_out'][0][S1_out_view] = S1_out
-    
-    def _do_remesh_on_gpu_and_check(self, cfl, cl_env, 
+
+    def _do_remesh_on_gpu_and_check(self, cfl, cl_env,
                                      boundary, kernel, work_dim,
-                                     is_inplace, use_atomics, 
+                                     is_inplace, use_atomics,
                                      use_short_circuit, symbolic_mode,
                                      remesh_criteria_eps,
                                      nparticles, nscalars,
@@ -510,49 +510,49 @@ Allocating fields:
         msg = '''
 _do_remesh_on_gpu_and_check()
  -- testing directional {}d remesh with {}
- --     boundaries={}, nparticles={}, nscalars={} 
+ --     boundaries={}, nparticles={}, nscalars={}
  --     inplace={}, atomic={}, criteria={}'
  --     use_short_circuit={}, symbolic_mode={}'
  --     {}
-'''.format(work_dim, kernel, 
-            str(boundary).lower(), nparticles, nscalars, 
+'''.format(work_dim, kernel,
+            str(boundary).lower(), nparticles, nscalars,
                     is_inplace, use_atomics, remesh_criteria_eps,
                     use_short_circuit, symbolic_mode,
                     'NULL VELOCITY' if null_velocity else 'RANDOM_VELOCITY')
-        print msg
-        
+        print(msg)
+
         work_size       = self.compute_grid_size[:work_dim]
         work_load       = self.work_load[:work_dim]
         local_work_size = self.local_work_size.copy()
         queue           = self.queue
-        
+
         local_work_size = local_work_size[:work_dim]
-        max_global_size = DirectionalRemeshKernelGenerator.get_max_global_size(work_size, 
+        max_global_size = DirectionalRemeshKernelGenerator.get_max_global_size(work_size,
                 work_load, nparticles)
         local_work_size = npw.minimum(local_work_size, max_global_size)
         local_work_size[0] = max(local_work_size[0], 2*self.min_scalar_ghosts_out)
-            
-        print '  work_size={}, work_load={}, nparticles={}'.format(work_size, work_load, nparticles)
-        print '  max_global_size={}'.format(max_global_size)
-        print
+
+        print('  work_size={}, work_load={}, nparticles={}'.format(work_size, work_load, nparticles))
+        print('  max_global_size={}'.format(max_global_size))
+        print()
 
         eps = self.eps
         itemsize = self.dt.itemsize
-        
+
         sboundary  = (boundary, boundary,)
         typegen    = self.typegen
         ftype      = typegen.fbtype
         scalar_cfl = cfl
-        
+
         if (boundary != BoundaryCondition.NONE):
             raise RuntimeError('Unknown boundaty {}.'.format(boundary))
-        
-        for target, fields in self.all_fields.iteritems():
-            print '>Target {}'.format(target)
+
+        for target, fields in self.all_fields.items():
+            print('>Target {}'.format(target))
             mesh_infos             = self.all_mesh_infos[target][work_dim]
             device_buffers         = self.device_buffers[target]
             host_buffers_gpu       = self.host_buffers_gpu[target]
-            host_init_buffers      = self.host_buffers_init[target] 
+            host_init_buffers      = self.host_buffers_init[target]
             host_buffers_reference = self.host_buffers_reference[target]
 
             field_views = {}
@@ -560,11 +560,11 @@ _do_remesh_on_gpu_and_check()
             field_mesh_infos = {}
             field_offsets = {}
             field_strides = {}
-            for fname, field in fields.iteritems():
+            for fname, field in fields.items():
                 grid_size = field.grid_size
                 ghosts = field.ghosts
                 views = field.view(dim=work_dim)
-                
+
                 strides = field.sdata.strides[-work_dim:]
                 assert (npw.mod(strides, itemsize) == 0).all()
                 strides = tuple(x//itemsize for x in strides[::-1])
@@ -573,13 +573,13 @@ _do_remesh_on_gpu_and_check()
                     offset = 0
                 elif work_dim == 2:
                     offset = ghosts[2]*grid_size[1]*grid_size[0]
-                elif work_dim == 1: 
+                elif work_dim == 1:
                     offset = (ghosts[2]*grid_size[1] + ghosts[1])*grid_size[0]
                 else:
                     msg='Invalid work dimesion {}.'.format(work_dim)
                     raise ValueError(msg)
                 offset = npw.uint64(offset)
-                
+
                 vnames = {
                     'P': 'position',
                     'S0_in':  'S0_0_in',
@@ -603,13 +603,13 @@ _do_remesh_on_gpu_and_check()
             known_vars.update(field_mesh_infos)
             known_vars.update(field_offsets)
             known_vars.update(field_strides)
-            
-            drk = DirectionalRemeshKernelGenerator(typegen=typegen, 
+
+            drk = DirectionalRemeshKernelGenerator(typegen=typegen,
                 work_dim=work_dim, ftype=ftype,
                 nparticles=nparticles, nscalars=nscalars,
                 sboundary=sboundary, is_inplace=is_inplace,
                 remesh_kernel=kernel,
-                scalar_cfl=scalar_cfl, 
+                scalar_cfl=scalar_cfl,
                 use_atomics=use_atomics,
                 remesh_criteria_eps=remesh_criteria_eps,
                 symbolic_mode=symbolic_mode,
@@ -618,23 +618,23 @@ _do_remesh_on_gpu_and_check()
                 debug_mode=False,
                 tuning_mode=False,
                 known_vars=known_vars)
-            
-            global_work_size = drk.get_global_size(work_size, local_work_size, 
+
+            global_work_size = drk.get_global_size(work_size, local_work_size,
                                                     work_load=work_load)
-            print '  |- Generating and compiling Kernel...'
-            print '  |- global_work_size={}, local_work_size={}'.format(global_work_size, local_work_size)
+            print('  |- Generating and compiling Kernel...')
+            print('  |- global_work_size={}, local_work_size={}'.format(global_work_size, local_work_size))
             (static_shared_bytes, dynamic_shared_bytes, total_sharedbytes) = \
                     drk.required_workgroup_cache_size(local_work_size)
             assert dynamic_shared_bytes==0
-            
+
             in_variables  = ['P']
             out_variables = []
             if is_inplace:
-                in_variables += ['S{}_out'.format(i) for i in xrange(nscalars)]
+                in_variables += ['S{}_out'.format(i) for i in range(nscalars)]
             else:
-                in_variables += ['S{}_in'.format(i) for i in xrange(nscalars)]
-            out_variables += ['S{}_out'.format(i) for i in xrange(nscalars)]
-            
+                in_variables += ['S{}_in'.format(i) for i in range(nscalars)]
+            out_variables += ['S{}_out'.format(i) for i in range(nscalars)]
+
             kernel_args = []
             for varname in in_variables:
                 kernel_args.append(device_buffers[varname][0])
@@ -646,16 +646,16 @@ _do_remesh_on_gpu_and_check()
             prg = cl.Program(self.ctx, source)
             prg.build(devices=[self.device])
             cl_kernel = prg.all_kernels()[0]
-             
+
             try:
                 cl_kernel.set_args(*kernel_args)
             except:
                 msg='call to set_args failed, args were: {}'.format(kernel_args)
                 raise RuntimeError(msg)
-            
-            print '  |- CPU => GPU:  ',
+
+            print('  |- CPU => GPU:  ', sep=' ')
             for buf in in_variables + out_variables:
-                print '{}, '.format(buf),
+                print('{}, '.format(buf), sep=' ')
                 if buf == 'P':
                     src = host_buffers_reference[buf][0]
                 elif is_inplace and buf in ['S0_out', 'S1_out']:
@@ -667,21 +667,21 @@ _do_remesh_on_gpu_and_check()
                     src = host_init_buffers[buf][0]
                 dst = device_buffers[buf][0]
                 cl.enqueue_copy(queue,dst,src)
-            print '\n  |- Kernel execution <<<{},{}>>>'.format(global_work_size,local_work_size)
-            evt = cl.enqueue_nd_range_kernel(queue, cl_kernel, 
+            print('\n  |- Kernel execution <<<{},{}>>>'.format(global_work_size,local_work_size))
+            evt = cl.enqueue_nd_range_kernel(queue, cl_kernel,
                     global_work_size.tolist(), local_work_size.tolist())
             evt.wait()
-            
-            print '  |- GPU => CPU:  ',
+
+            print('  |- GPU => CPU:  ', sep=' ')
             for buf in in_variables+out_variables:
-                print '{}, '.format(buf),
+                print('{}, '.format(buf), sep=' ')
                 src = device_buffers[buf][0]
                 dst = host_buffers_gpu[buf][0]
                 cl.enqueue_copy(queue,dst,src)
-            print
-            print '  |- Synchronizing queue'
+            print()
+            print('  |- Synchronizing queue')
             queue.finish()
-            
+
             if (self.DEBUG):
                 S0_in_grid_size,   S0_out_grid_size   = fields['S0_in'].grid_size, fields['S0_out'].grid_size
                 S1_in_grid_size,   S1_out_grid_size   = fields['S1_in'].grid_size, fields['S1_out'].grid_size
@@ -700,68 +700,68 @@ _do_remesh_on_gpu_and_check()
                 P_view, V_view = field_views['P'], field_views['V']
                 P_strides, P_offset = field_strides['position_strides'], field_offsets['position_offset']
                 P_grid_size, P_grid_ghosts = fields['P'].grid_size, fields['P'].ghosts
-                print
-                print 'WORKDIM = {}'.format(work_dim)
-                print 'ADVEC   = {}'.format(self.MAX_ADVEC)
-                print 'REMESH  = {}'.format(self.MAX_REMESH)
-                print 'GHOSTS  = {}'.format(self.MIN_GHOSTS)
-                print
-                print 'COMPUTE_GRID_SIZE = {}'.format(self.compute_grid_size)
-                print 'PRECISION         = {}'.format(self.precision)
-                print 'ITEMSIZE          = {}'.format(self.dt.itemsize)
-                print
-                print '::P::\n  *grid_size={}\n  *ghosts={},\n  *strides={},\n  *offset={}\n  *view={}'.format(
-                        P_grid_size, P_grid_ghosts, P_strides, P_offset, P_view)
-                print
+                print()
+                print('WORKDIM = {}'.format(work_dim))
+                print('ADVEC   = {}'.format(self.MAX_ADVEC))
+                print('REMESH  = {}'.format(self.MAX_REMESH))
+                print('GHOSTS  = {}'.format(self.MIN_GHOSTS))
+                print()
+                print('COMPUTE_GRID_SIZE = {}'.format(self.compute_grid_size))
+                print('PRECISION         = {}'.format(self.precision))
+                print('ITEMSIZE          = {}'.format(self.dt.itemsize))
+                print()
+                print('::P::\n  *grid_size={}\n  *ghosts={},\n  *strides={},\n  *offset={}\n  *view={}'.format(
+                        P_grid_size, P_grid_ghosts, P_strides, P_offset, P_view))
+                print()
                 if not is_inplace:
-                    print '::S0_in::\n  *grid_size={}\n  *ghosts={},\n  *strides={},\n  *offset={}\n  *view={}'.format(
-                            S0_in_grid_size, S0_in_grid_ghosts, S0_in_strides, S0_in_offset, S0_in_view)
-                    print
+                    print('::S0_in::\n  *grid_size={}\n  *ghosts={},\n  *strides={},\n  *offset={}\n  *view={}'.format(
+                            S0_in_grid_size, S0_in_grid_ghosts, S0_in_strides, S0_in_offset, S0_in_view))
+                    print()
                     if nscalars > 1:
-                        print '::S1_in::\n  *grid_size={}\n  *ghosts={},\n  *strides={},\n  *offset={}\n  *view={}'.format(
-                                S1_in_grid_size, S1_in_grid_ghosts, S1_in_strides, S1_in_offset, S1_in_view)
-                        print
-                print '::S0_out::\n  *grid_size={}\n  *ghosts={},\n  *strides={},\n  *offset={}\n  *view={}'.format(
-                        S0_out_grid_size, S0_out_grid_ghosts, S0_out_strides, S0_out_offset, S0_out_view)
-                print
+                        print('::S1_in::\n  *grid_size={}\n  *ghosts={},\n  *strides={},\n  *offset={}\n  *view={}'.format(
+                                S1_in_grid_size, S1_in_grid_ghosts, S1_in_strides, S1_in_offset, S1_in_view))
+                        print()
+                print('::S0_out::\n  *grid_size={}\n  *ghosts={},\n  *strides={},\n  *offset={}\n  *view={}'.format(
+                        S0_out_grid_size, S0_out_grid_ghosts, S0_out_strides, S0_out_offset, S0_out_view))
+                print()
                 if nscalars > 1:
-                    print '::S1_out::\n  *grid_size={}\n  *ghosts={},\n  *strides={},\n  *offset={}\n  *view={}'.format(
-                            S1_out_grid_size, S1_out_grid_ghosts, S1_out_strides, S1_out_offset, S1_out_view)
-                    print
+                    print('::S1_out::\n  *grid_size={}\n  *ghosts={},\n  *strides={},\n  *offset={}\n  *view={}'.format(
+                            S1_out_grid_size, S1_out_grid_ghosts, S1_out_strides, S1_out_offset, S1_out_view))
+                    print()
 
                 if work_dim < 3:
-                    print '::velocity CPU::\n{}'.format(host_buffers_reference['V'][0][V_view])
-                    print '::velocity GPU::\n{}'.format(host_buffers_gpu['V'][0][V_view])
-                    print
-
-                    print '::pos CPU::\n{}'.format(host_buffers_reference['P'][0][P_view])
-                    print '::pos GPU::\n{}'.format(host_buffers_gpu['P'][0][P_view])
-                    print
-                    
-                    print '::S0_in CPU::\n{}'.format(host_init_buffers['S0_in'][0][S0_in_view])
+                    print('::velocity CPU::\n{}'.format(host_buffers_reference['V'][0][V_view]))
+                    print('::velocity GPU::\n{}'.format(host_buffers_gpu['V'][0][V_view]))
+                    print()
+
+                    print('::pos CPU::\n{}'.format(host_buffers_reference['P'][0][P_view]))
+                    print('::pos GPU::\n{}'.format(host_buffers_gpu['P'][0][P_view]))
+                    print()
+
+                    print('::S0_in CPU::\n{}'.format(host_init_buffers['S0_in'][0][S0_in_view]))
                     if not is_inplace:
-                        print '::S0_in GPU::\n{}'.format(host_buffers_gpu['S0_in'][0][S0_in_view])
-                        print
+                        print('::S0_in GPU::\n{}'.format(host_buffers_gpu['S0_in'][0][S0_in_view]))
+                        print()
                     else:
-                        print
-                    
+                        print()
+
                     if nscalars > 1:
-                        print '::S1_in CPU::\n{}'.format(host_init_buffers['S1_in'][0][S1_in_view])
+                        print('::S1_in CPU::\n{}'.format(host_init_buffers['S1_in'][0][S1_in_view]))
                         if not is_inplace:
-                            print '::S1_in GPU::\n{}'.format(host_buffers_gpu['S1_in'][0][S1_in_view])
-                            print
+                            print('::S1_in GPU::\n{}'.format(host_buffers_gpu['S1_in'][0][S1_in_view]))
+                            print()
                         else:
-                            print
+                            print()
 
-                    print '::S0_out CPU::\n{}'.format(host_buffers_reference['S0_out'][0][S0_out_view])
-                    print '::S0_out GPU::\n{}'.format(host_buffers_gpu['S0_out'][0][S0_out_view])
-                    print
+                    print('::S0_out CPU::\n{}'.format(host_buffers_reference['S0_out'][0][S0_out_view]))
+                    print('::S0_out GPU::\n{}'.format(host_buffers_gpu['S0_out'][0][S0_out_view]))
+                    print()
 
                     if nscalars > 1:
-                        print '::S1_out CPU::\n{}'.format(host_buffers_reference['S1_out'][0][S1_out_view])
-                        print '::S1_out GPU::\n{}'.format(host_buffers_gpu['S1_out'][0][S1_out_view])
-                        print
-            
+                        print('::S1_out CPU::\n{}'.format(host_buffers_reference['S1_out'][0][S1_out_view]))
+                        print('::S1_out GPU::\n{}'.format(host_buffers_gpu['S1_out'][0][S1_out_view]))
+                        print()
+
             variables = out_variables
             if self.DEBUG:
                 variables += in_variables
@@ -769,12 +769,12 @@ _do_remesh_on_gpu_and_check()
                     variables.remove('S0_in')
                     if (nscalars > 1):
                         variables.remove('S1_in')
-            
+
             def max_tol(vname):
                 _max_tol = 100*eps
-                if vname.find('S0')==0: 
+                if vname.find('S0')==0:
                     return self.S0_inf*_max_tol
-                elif vname.find('S1')==0: 
+                elif vname.find('S1')==0:
                     return self.S1_inf*_max_tol
                 else:
                     return _max_tol
@@ -787,29 +787,29 @@ _do_remesh_on_gpu_and_check()
                         for varname in variables]
 
             self._cmp_buffers(buffers,drk,work_dim)
-    
+
     def _cmp_buffers(self,buffers,drk,work_dim):
         good = True
         err_buffers = []
         eps = self.eps
-        
-        print '  |- Checking outputs (eps={}):'.format(eps)
+
+        print('  |- Checking outputs (eps={}):'.format(eps))
         for (name,host,dev,view,max_tol) in buffers:
             if not npw.isfinite(host[view]).all():
                 msg='FATAL ERROR: Host input for field {} is not finite.'.format(name)
                 raise ValueError(msg)
             if npw.isfinite(dev[view]).all():
                 (l1,l2,linf) = self._distances(host,dev,view)
-                print '  |  *{} -> l1={}  l2={}  linf={}'.format(name,l1,l2,linf)
+                print('  |  *{} -> l1={}  l2={}  linf={}'.format(name,l1,l2,linf))
                 if (linf>max_tol):
                     err_buffers.append(name)
                     good = False
             else:
-                print '  |  *{} -> output is not finite'.format(name)
+                print('  |  *{} -> output is not finite'.format(name))
                 good = False
         if not good:
             msg = '\n[FAIL] Buffer comparisson failed for buffers {}.\n'.format(err_buffers)
-            print msg
+            print(msg)
             if self.enable_error_plots:
                 from matplotlib import pyplot as plt
                 for (name,host,dev,view,max_tol) in buffers:
@@ -819,7 +819,7 @@ _do_remesh_on_gpu_and_check()
 
                         d = (dev-host)*(dev-host)
                         d -= npw.mean(d)
-                    
+
                         if work_dim==3:
                             fig,axes = plt.subplots(2,2)
                             axes[0][0].imshow(npw.sum(d,axis=0),interpolation='nearest')
@@ -848,27 +848,27 @@ _do_remesh_on_gpu_and_check()
                         dist = npw.abs(dev-host)
                         dist[~npw.isfinite(dev)] = npw.inf
 
-                        print '{} HOST'.format(name)
-                        print host
-                        print '{} DEVICE'.format(name)
-                        print dev
-                        print '{} ABS(DEVICE - HOST)'.format(name)
-                        npw.fancy_print(dist, replace_values={(lambda a: a<max_tol): '.'})
-                        
+                        print('{} HOST'.format(name))
+                        print(host)
+                        print('{} DEVICE'.format(name))
+                        print(dev)
+                        print('{} ABS(DEVICE - HOST)'.format(name))
+                        npw.fancy_print((dist, replace_values={(lambda a: a<max_tol): '.'}))
+
                         I0, I1 = npw.sum(host), npw.sum(dev)
-                        print 'I0={}, I1={}'.format(I0, I1)
+                        print('I0={}, I1={}'.format(I0, I1))
                         if (abs(I1-I0) < 10*self.eps):
-                            print '[FATAL ERROR] scalar {} was conserved but output mismatched.'.format(name)
+                            print('[FATAL ERROR] scalar {} was conserved but output mismatched.'.format(name))
                         else:
-                            print '[FATAL ERROR] scalar {} was not conserved and output mismatched'.format(name)
-                        print
-                        print
+                            print('[FATAL ERROR] scalar {} was not conserved and output mismatched'.format(name))
+                        print()
+                        print()
             if self.DEBUG:
                 drk.edit()
-                    
-            print '  |>TEST KO'
+
+            print('  |>TEST KO')
             raise RuntimeError(msg)
-        print '  |>TEST OK'
+        print('  |>TEST OK')
 
     def _distances(self,lhs,rhs, view):
         d    = rhs[view]-lhs[view]
@@ -878,14 +878,14 @@ _do_remesh_on_gpu_and_check()
         l2   = npw.sqrt(npw.sum(d*d)/d.size)
         linf = npw.max(da)
         return (l1,l2,linf)
-     
-    
+
+
     def _check_kernels(self, rk_scheme, cfl):
         assert cfl>0
         check_instance(rk_scheme, ExplicitRungeKutta)
-        
+
         boundaries=(BoundaryCondition.NONE,)
-           
+
         split_polys=(False, True)
         use_atomics=(False, True)
         is_inplaces=(False, True)
@@ -907,22 +907,22 @@ _do_remesh_on_gpu_and_check()
             kernels=[(2,2)]
             work_dims=(2,)
             nparticles=(4,)
-        
+
         cl_envs = tuple(cl_env for cl_env in iter_clenv())
-        print 
-        print 'ENABLE_LONG_TESTS = {}'.format(__ENABLE_LONG_TESTS__)
-        print
+        print()
+        print('ENABLE_LONG_TESTS = {}'.format(__ENABLE_LONG_TESTS__))
+        print()
         if not __ENABLE_LONG_TESTS__:
-            print 'Long tests disabled, only the first OpenCl platform will be tested.'
-        print
-        print 'Found {} OpenCl environments:'.format(len(cl_envs))
+            print('Long tests disabled, only the first OpenCl platform will be tested.')
+        print()
+        print('Found {} OpenCl environments:'.format(len(cl_envs)))
         for cl_env in cl_envs:
-            print cl_env
-        
+            print(cl_env)
+
         ntests = 0
         for cl_env in cl_envs:
-            print 'SWITCHING CL_ENV TO: platform {}, device {}'.format(cl_env.platform.name.strip(), 
-                                                                       cl_env.device.name.strip())
+            print('SWITCHING CL_ENV TO: platform {}, device {}'.format(cl_env.platform.name.strip(),
+                                                                       cl_env.device.name.strip()))
             for precision in precisions:
                 for kernel_config in kernels:
                     for null_velocity in [True, False]:
@@ -931,11 +931,11 @@ _do_remesh_on_gpu_and_check()
                         for boundary in boundaries:
                             self._do_advec_on_cpu(boundary=boundary, rk_scheme=rk_scheme)
                             for split_poly in split_polys:
-                                kernel = RemeshKernel(*kernel_config, split_polys=split_poly, 
+                                kernel = RemeshKernel(*kernel_config, split_polys=split_poly,
                                         verbose=False)
                                 for remesh_criteria_eps in remesh_criterias:
-                                    self._do_remesh_on_cpu(boundary=boundary, rk_scheme=rk_scheme, 
-                                                            kernel=kernel, 
+                                    self._do_remesh_on_cpu(boundary=boundary, rk_scheme=rk_scheme,
+                                                            kernel=kernel,
                                                             remesh_criteria_eps=remesh_criteria_eps)
                                     for work_dim in work_dims:
                                         for is_inplace in is_inplaces:
@@ -950,25 +950,25 @@ _do_remesh_on_gpu_and_check()
                                                                 self._do_remesh_on_gpu_and_check(
                                                                        cfl=cfl,
                                                                        cl_env=cl_env,
-                                                                       boundary=boundary, 
-                                                                       kernel=kernel, 
+                                                                       boundary=boundary,
+                                                                       kernel=kernel,
                                                                        work_dim=work_dim,
-                                                                       is_inplace=is_inplace, 
-                                                                       use_atomics=use_atomic, 
+                                                                       is_inplace=is_inplace,
+                                                                       use_atomics=use_atomic,
                                                                        use_short_circuit=use_short_circuit,
                                                                        symbolic_mode=symbolic_mode,
                                                                        remesh_criteria_eps=remesh_criteria_eps,
                                                                        nparticles=nparticle, nscalars=nscalar,
                                                                        null_velocity=null_velocity)
                                                                 ntests += 2 # with and without extra ghosts
-        print
-        print 'DirectionalRemesh: All {} tests passed.'.format(ntests)
+        print()
+        print('DirectionalRemesh: All {} tests passed.'.format(ntests))
 
     @opencl_failed
     def test_remesh_from_Euler_advection_low_cfl(self, cfl=0.5788):
         rk_scheme=ExplicitRungeKutta('Euler')
         self._check_kernels(rk_scheme=rk_scheme, cfl=cfl)
-    
+
     @opencl_failed
     def test_remesh_from_Euler_advection_high_cfl(self, cfl=1.78):
         rk_scheme=ExplicitRungeKutta('Euler')
@@ -979,12 +979,12 @@ if __name__ == '__main__':
         msg='OpenCL is not present (pyopencl has not been found).'
         raise RuntimeError(msg)
 
-    TestDirectionalRemesh.setup_class(enable_extra_tests=False, 
+    TestDirectionalRemesh.setup_class(enable_extra_tests=False,
                                       enable_error_plots=False,
                                       enable_debug_logs=False)
     test = TestDirectionalRemesh()
 
-    with printoptions(linewidth=200, 
+    with printoptions(linewidth=200,
             formatter={'float':lambda x: '{:0.2f}'.format(x)}):
         test.test_remesh_from_Euler_advection_low_cfl()
         test.test_remesh_from_Euler_advection_high_cfl()
diff --git a/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_stretching.py b/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_stretching.py
index ef41f9b03e95b007d079ba523586fddea00cebc2..adbb6a16989f9b1706f5cf694fc544e6b2524f03 100644
--- a/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_stretching.py
+++ b/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_stretching.py
@@ -14,7 +14,7 @@ class TestDirectionalStretching(object):
 
     @classmethod
     def setup_class(cls,
-            do_extra_tests=__ENABLE_LONG_TESTS__, 
+            do_extra_tests=__ENABLE_LONG_TESTS__,
             enable_error_plots=False):
         typegen = _test_typegen('double','dec')
         dtype = np.float64
@@ -28,7 +28,7 @@ class TestDirectionalStretching(object):
 
         (A,grid_mesh_info)         = _test_mesh_info('grid_mesh_info', typegen,3,0,grid_size)
         (B,compute_grid_mesh_info) = _test_mesh_info('compute_grid_mesh_info', typegen,3,compute_grid_ghosts,compute_grid_size)
-        
+
         grid_shape         = grid_size[::-1]
         compute_grid_shape = compute_grid_size[::-1]
 
@@ -65,39 +65,39 @@ class TestDirectionalStretching(object):
 
         device_buffers = {
                 'no_ghosts': {
-                    'ux': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR, 
+                    'ux': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['no_ghosts']['ux']),
-                    'uy': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR, 
+                    'uy': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['no_ghosts']['uy']),
-                    'uz': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR, 
+                    'uz': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['no_ghosts']['uz']),
-                    'wx': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+                    'wx': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['no_ghosts']['wx']),
-                    'wy': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+                    'wy': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['no_ghosts']['wy']),
-                    'wz': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+                    'wz': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['no_ghosts']['wz']),
-                    'dbg0': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+                    'dbg0': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['no_ghosts']['dbg0']),
-                    'dbg1': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+                    'dbg1': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['no_ghosts']['dbg1']),
                 },
                 'with_ghosts': {
-                    'ux': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR, 
+                    'ux': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['with_ghosts']['ux']),
-                    'uy': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR, 
+                    'uy': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['with_ghosts']['uy']),
-                    'uz': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR, 
+                    'uz': cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['with_ghosts']['uz']),
-                    'wx': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+                    'wx': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['with_ghosts']['wx']),
-                    'wy': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+                    'wy': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['with_ghosts']['wy']),
-                    'wz': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+                    'wz': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['with_ghosts']['wz']),
-                    'dbg0': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+                    'dbg0': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['with_ghosts']['dbg0']),
-                    'dbg1': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR, 
+                    'dbg1': cl.Buffer(ctx, flags=mf.READ_WRITE | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['with_ghosts']['dbg1']),
                 }
         }
@@ -124,7 +124,7 @@ class TestDirectionalStretching(object):
         cls.host_buffers_reference = host_buffers_reference
         cls.host_buffers_gpu       = host_buffers_gpu
         cls.device_buffers         = device_buffers
-        
+
         Lx = min(typegen.device.max_work_item_sizes[0], typegen.device.max_work_group_size)
         Lx = min(Lx, grid_size[0])
 
@@ -132,7 +132,7 @@ class TestDirectionalStretching(object):
         cls.work_load       = np.asarray([1,2,3])
         cls.inv_dx = inv_dx
         cls.dt = dtype(0.5)
-        
+
         cls.do_extra_tests = do_extra_tests
         cls.enable_error_plots = enable_error_plots
 
@@ -140,27 +140,27 @@ class TestDirectionalStretching(object):
     @classmethod
     def teardown_class(cls):
         pass
-    
+
     def setup_method(self, method):
         pass
 
     def teardown_method(self, method):
         pass
 
-    def _do_compute_cpu(self, formulation, rk_scheme, order, direction, 
+    def _do_compute_cpu(self, formulation, rk_scheme, order, direction,
             boundary):
 
         dt = self.dt
         ghosts = self.compute_grid_ghosts
         grid_size = self.grid_size
-        
+
         if boundary   == BoundaryCondition.PERIODIC:
             target = 'no_ghosts'
         elif boundary == BoundaryCondition.NONE:
             target = 'with_ghosts'
         else:
             raise ValueError()
-        
+
         if order==2:
             stencil = [-1./2, 0, +1./2]
         elif order==4:
@@ -172,11 +172,11 @@ class TestDirectionalStretching(object):
 
         def deriv(field):
             res = np.zeros_like(field)
-            for i in xrange(-order/2,order/2+1,1):
-                res += stencil[i+order/2]*np.roll(field,-i,axis=2)
+            for i in range(-order//2, order//2+1, 1):
+                res += stencil[i+order//2]*np.roll(field,-i,axis=2)
             return res*self.inv_dx[0]
 
-        host_init_buffers      = self.host_buffers_init[target] 
+        host_init_buffers      = self.host_buffers_init[target]
         host_buffers_reference = self.host_buffers_reference[target]
 
         vorticity = ['wx','wy','wz']
@@ -184,23 +184,23 @@ class TestDirectionalStretching(object):
         U = [host_init_buffers[name].copy() for name in velocity]
         W = [host_init_buffers[name].copy() for name in vorticity]
         dUdx  = [deriv(ui) for ui in U]
-        
+
         if rk_scheme.name() == 'Euler':
             Wc = [ w.copy() for w in W ]
             if formulation==StretchingFormulation.GRAD_UW:
-                for i in xrange(3):
+                for i in range(3):
                     W[i] += dt*dUdx[i]*Wc[direction]
             elif formulation==StretchingFormulation.GRAD_UW_T:
-                for i in xrange(3):
+                for i in range(3):
                     W[direction] += dt*dUdx[i]*Wc[i]
             elif formulation==StretchingFormulation.MIXED_GRAD_UW:
-                for i in xrange(3):
+                for i in range(3):
                     W[i]         += 0.5*dt*dUdx[i]*Wc[direction]
-                    W[direction] += 0.5*dt*dUdx[i]*Wc[i] 
+                    W[direction] += 0.5*dt*dUdx[i]*Wc[i]
             elif formulation==StretchingFormulation.CONSERVATIVE:
                 W0 = Wc
                 K0 = [deriv(ui*W0[direction]) for ui in U]
-                W1 = [W0[i] + dt*K0[i]  for i in xrange(3)]
+                W1 = [W0[i] + dt*K0[i]  for i in range(3)]
                 W  = W1
             else:
                 msg = 'Unknown stretching formulation scheme {}.'.format(formulation)
@@ -209,34 +209,34 @@ class TestDirectionalStretching(object):
             Wc = [ w.copy() for w in W ]
             if formulation==StretchingFormulation.GRAD_UW:
                 W0 = Wc
-                K0 = [dUdx[i]*W0[direction] for i in xrange(3)]
-                W1 = [W0[i] + 0.5*dt*K0[i]  for i in xrange(3)]
-                K1 = [dUdx[i]*W1[direction] for i in xrange(3)]
-                W2 = [W0[i] + 1.0*dt*K1[i]  for i in xrange(3)]
+                K0 = [dUdx[i]*W0[direction] for i in range(3)]
+                W1 = [W0[i] + 0.5*dt*K0[i]  for i in range(3)]
+                K1 = [dUdx[i]*W1[direction] for i in range(3)]
+                W2 = [W0[i] + 1.0*dt*K1[i]  for i in range(3)]
                 W  = W2
             elif formulation==StretchingFormulation.GRAD_UW_T:
                 W0 = Wc
-                K0 = sum([dUdx[i]*W0[i] for i in xrange(3)])
+                K0 = sum([dUdx[i]*W0[i] for i in range(3)])
                 W1 = W0[direction] + 0.5*dt*K0
-                K1 = sum([dUdx[i]*W0[i] for i in xrange(3) if i!=direction]) + W1*dUdx[direction]
+                K1 = sum([dUdx[i]*W0[i] for i in range(3) if i!=direction]) + W1*dUdx[direction]
                 W2 = W0
                 W2[direction] += dt*K1
                 W  = W2
             elif formulation==StretchingFormulation.MIXED_GRAD_UW:
                 W0 = Wc
-                K0             =     [0.5*dUdx[i]*W0[direction] for i in xrange(3)]
-                K0[direction] += sum([0.5*dUdx[i]*W0[i]         for i in xrange(3)])
-                W1 = [W0[i] + 0.5*dt*K0[i]  for i in xrange(3)]
-                K1             =     [0.5*dUdx[i]*W1[direction] for i in xrange(3)]
-                K1[direction] += sum([0.5*dUdx[i]*W1[i]         for i in xrange(3)])
-                W2 = [W0[i] + 1.0*dt*K1[i]  for i in xrange(3)]
+                K0             =     [0.5*dUdx[i]*W0[direction] for i in range(3)]
+                K0[direction] += sum([0.5*dUdx[i]*W0[i]         for i in range(3)])
+                W1 = [W0[i] + 0.5*dt*K0[i]  for i in range(3)]
+                K1             =     [0.5*dUdx[i]*W1[direction] for i in range(3)]
+                K1[direction] += sum([0.5*dUdx[i]*W1[i]         for i in range(3)])
+                W2 = [W0[i] + 1.0*dt*K1[i]  for i in range(3)]
                 W  = W2
             elif formulation==StretchingFormulation.CONSERVATIVE:
                 W0 = Wc
                 K0 = [deriv(ui*W0[direction]) for ui in U]
-                W1 = [W0[i] + 0.5*dt*K0[i]  for i in xrange(3)]
+                W1 = [W0[i] + 0.5*dt*K0[i]  for i in range(3)]
                 K1 = [deriv(ui*W1[direction]) for ui in U]
-                W2 = [W0[i] + 1.0*dt*K1[i]  for i in xrange(3)]
+                W2 = [W0[i] + 1.0*dt*K1[i]  for i in range(3)]
                 W  = W2
             else:
                 msg = 'Unknown stretching formulation scheme {}.'.format(formulation)
@@ -245,56 +245,56 @@ class TestDirectionalStretching(object):
             Wc = [ w.copy() for w in W ]
             if formulation==StretchingFormulation.GRAD_UW:
                 W0 = Wc
-                K0 = [dUdx[i]*W0[direction] for i in xrange(3)]
-                W1 = [W0[i] + 0.5*dt*K0[i]  for i in xrange(3)]
-                K1 = [dUdx[i]*W1[direction] for i in xrange(3)]
-                W2 = [W0[i] + 0.5*dt*K1[i]  for i in xrange(3)]
-                K2 = [dUdx[i]*W2[direction] for i in xrange(3)]
-                W3 = [W0[i] + 1.0*dt*K2[i]  for i in xrange(3)]
-                K3 = [dUdx[i]*W3[direction] for i in xrange(3)]
-                K =  [1./6*K0[i] + 1./3*K1[i] + 1./3*K2[i] + 1./6*K3[i] for i in xrange(3)]
-                W4 = [W0[i] + dt*K[i]  for i in xrange(3)]
+                K0 = [dUdx[i]*W0[direction] for i in range(3)]
+                W1 = [W0[i] + 0.5*dt*K0[i]  for i in range(3)]
+                K1 = [dUdx[i]*W1[direction] for i in range(3)]
+                W2 = [W0[i] + 0.5*dt*K1[i]  for i in range(3)]
+                K2 = [dUdx[i]*W2[direction] for i in range(3)]
+                W3 = [W0[i] + 1.0*dt*K2[i]  for i in range(3)]
+                K3 = [dUdx[i]*W3[direction] for i in range(3)]
+                K =  [1./6*K0[i] + 1./3*K1[i] + 1./3*K2[i] + 1./6*K3[i] for i in range(3)]
+                W4 = [W0[i] + dt*K[i]  for i in range(3)]
                 W  = W4
             elif formulation==StretchingFormulation.GRAD_UW_T:
                 W0 = Wc
-                K0 = sum([dUdx[i]*W0[i] for i in xrange(3)])
+                K0 = sum([dUdx[i]*W0[i] for i in range(3)])
                 W1 = W0[direction] + 0.5*dt*K0
-                K1 = sum([dUdx[i]*W0[i] for i in xrange(3) if i!=direction]) + W1*dUdx[direction]
+                K1 = sum([dUdx[i]*W0[i] for i in range(3) if i!=direction]) + W1*dUdx[direction]
                 W2 = W0[direction] + 0.5*dt*K1
-                K2 = sum([dUdx[i]*W0[i] for i in xrange(3) if i!=direction]) + W2*dUdx[direction]
+                K2 = sum([dUdx[i]*W0[i] for i in range(3) if i!=direction]) + W2*dUdx[direction]
                 W3 = W0[direction] + 1.0*dt*K2
-                K3 = sum([dUdx[i]*W0[i] for i in xrange(3) if i!=direction]) + W3*dUdx[direction]
+                K3 = sum([dUdx[i]*W0[i] for i in range(3) if i!=direction]) + W3*dUdx[direction]
                 K =  1./6*K0 + 1./3*K1 + 1./3*K2 + 1./6*K3
                 W4 = W0
                 W4[direction] += dt*K
                 W  = W4
             elif formulation==StretchingFormulation.MIXED_GRAD_UW:
                 W0 = Wc
-                K0             =     [0.5*dUdx[i]*W0[direction] for i in xrange(3)]
-                K0[direction] += sum([0.5*dUdx[i]*W0[i]         for i in xrange(3)])
-                W1 = [W0[i] + 0.5*dt*K0[i]  for i in xrange(3)]
-                K1             =     [0.5*dUdx[i]*W1[direction] for i in xrange(3)]
-                K1[direction] += sum([0.5*dUdx[i]*W1[i]         for i in xrange(3)])
-                W2 = [W0[i] + 0.5*dt*K1[i]  for i in xrange(3)]
-                K2             =     [0.5*dUdx[i]*W2[direction] for i in xrange(3)]
-                K2[direction] += sum([0.5*dUdx[i]*W2[i]         for i in xrange(3)])
-                W3 = [W0[i] + 1.0*dt*K2[i]  for i in xrange(3)]
-                K3             =     [0.5*dUdx[i]*W3[direction] for i in xrange(3)]
-                K3[direction] += sum([0.5*dUdx[i]*W3[i]         for i in xrange(3)])
-                K =  [1./6*K0[i] + 1./3*K1[i] + 1./3*K2[i] + 1./6*K3[i] for i in xrange(3)]
-                W4 = [W0[i] + dt*K[i]  for i in xrange(3)]
+                K0             =     [0.5*dUdx[i]*W0[direction] for i in range(3)]
+                K0[direction] += sum([0.5*dUdx[i]*W0[i]         for i in range(3)])
+                W1 = [W0[i] + 0.5*dt*K0[i]  for i in range(3)]
+                K1             =     [0.5*dUdx[i]*W1[direction] for i in range(3)]
+                K1[direction] += sum([0.5*dUdx[i]*W1[i]         for i in range(3)])
+                W2 = [W0[i] + 0.5*dt*K1[i]  for i in range(3)]
+                K2             =     [0.5*dUdx[i]*W2[direction] for i in range(3)]
+                K2[direction] += sum([0.5*dUdx[i]*W2[i]         for i in range(3)])
+                W3 = [W0[i] + 1.0*dt*K2[i]  for i in range(3)]
+                K3             =     [0.5*dUdx[i]*W3[direction] for i in range(3)]
+                K3[direction] += sum([0.5*dUdx[i]*W3[i]         for i in range(3)])
+                K =  [1./6*K0[i] + 1./3*K1[i] + 1./3*K2[i] + 1./6*K3[i] for i in range(3)]
+                W4 = [W0[i] + dt*K[i]  for i in range(3)]
                 W  = W4
             elif formulation==StretchingFormulation.CONSERVATIVE:
                 W0 = Wc
                 K0 = [deriv(ui*W0[direction]) for ui in U]
-                W1 = [W0[i] + 0.5*dt*K0[i]  for i in xrange(3)]
+                W1 = [W0[i] + 0.5*dt*K0[i]  for i in range(3)]
                 K1 = [deriv(ui*W1[direction]) for ui in U]
-                W2 = [W0[i] + 0.5*dt*K1[i]  for i in xrange(3)]
+                W2 = [W0[i] + 0.5*dt*K1[i]  for i in range(3)]
                 K2 = [deriv(ui*W2[direction]) for ui in U]
-                W3 = [W0[i] + 1.0*dt*K2[i]  for i in xrange(3)]
+                W3 = [W0[i] + 1.0*dt*K2[i]  for i in range(3)]
                 K3 = [deriv(ui*W3[direction]) for ui in U]
-                K =  [1./6*K0[i] + 1./3*K1[i] + 1./3*K2[i] + 1./6*K3[i] for i in xrange(3)]
-                W4 = [W0[i] + dt*K[i]  for i in xrange(3)]
+                K =  [1./6*K0[i] + 1./3*K1[i] + 1./3*K2[i] + 1./6*K3[i] for i in range(3)]
+                W4 = [W0[i] + dt*K[i]  for i in range(3)]
                 W  = W4
             else:
                 msg = 'Unknown stretching formulation scheme {}.'.format(formulation)
@@ -302,21 +302,21 @@ class TestDirectionalStretching(object):
         else:
             msg = 'Unknown Runge-Kutta scheme {}.'.format(rk_scheme)
             raise ValueError(msg)
-        
+
         for i,name in enumerate(vorticity):
             host_buffers_reference[name] = W[i]
-    
-    def _do_compute_gpu_and_check(self, formulation, rk_scheme, order, direction, 
+
+    def _do_compute_gpu_and_check(self, formulation, rk_scheme, order, direction,
             boundary, cached):
 
         msg = '\nTesting {}{} with order {} and scheme {} in direction {} with {} boundaries.'\
             .format('cached ' if cached else '',
                     str(formulation).lower(),
-                    order, 
+                    order,
                     rk_scheme.name(),
-                    direction, 
+                    direction,
                     str(boundary).lower())
-        print msg
+        print(msg)
 
         dt = self.dt
         work_size       = self.grid_size
@@ -348,18 +348,18 @@ class TestDirectionalStretching(object):
             'local_size': local_work_size,
             'mesh_info': mesh_info
         }
-        
-        host_init_buffers      = self.host_buffers_init[target] 
+
+        host_init_buffers      = self.host_buffers_init[target]
         host_buffers_reference = self.host_buffers_reference[target]
         host_buffers_gpu       = self.host_buffers_gpu[target]
         device_buffers         = self.device_buffers[target]
 
         dsk = DirectionalStretchingKernel(
-            typegen=self.typegen, 
-            dim=3, 
+            typegen=self.typegen,
+            dim=3,
             ftype=self.typegen.fbtype,
-            order=order, 
-            direction=direction, 
+            order=order,
+            direction=direction,
             is_cached=cached,
             is_inplace=True,
             boundary=(boundary, boundary),
@@ -371,7 +371,7 @@ class TestDirectionalStretching(object):
         global_work_size = dsk.get_global_size(work_size,local_work_size,work_load)
         (static_shared_bytes, dynamic_shared_bytes, total_bytes) = \
                 dsk.required_workgroup_cache_size(local_work_size)
-        
+
         vorticity = ['wx','wy','wz']
         velocity  = ['ux','uy','uz']
         debug = ['dbg0', 'dbg1']
@@ -380,53 +380,53 @@ class TestDirectionalStretching(object):
         if (dynamic_shared_bytes != 0):
             shared_buffer = cl.LocalMemory(dynamic_shared_bytes)
             kernel_args.append(shared_buffer)
-    
-        print '\tGenerating and compiling Kernel...'
+
+        print('\tGenerating and compiling Kernel...')
         dsk.edit()
         source = dsk.__str__()
         prg = cl.Program(self.typegen.context, source)
         prg.build(devices=[self.typegen.device])
         kernel = prg.all_kernels()[0]
         kernel.set_args(*kernel_args)
-        
-        print '\tCPU => GPU'
+
+        print('\tCPU => GPU')
         for buf in velocity+vorticity:
             src = host_init_buffers[buf]
             dst = device_buffers[buf]
             cl.enqueue_copy(queue,dst,src)
-        
-        print '\tKernel execution <<<{},{}>>>'.format(global_work_size,local_work_size)
-        evt = cl.enqueue_nd_range_kernel(queue, kernel, 
+
+        print('\tKernel execution <<<{},{}>>>'.format(global_work_size,local_work_size))
+        evt = cl.enqueue_nd_range_kernel(queue, kernel,
                 global_work_size.tolist(), local_work_size.tolist())
         evt.wait()
-        
-        print '\tGPU => CPU'
+
+        print('\tGPU => CPU')
         for buf in vorticity:
             src = device_buffers[buf]
             dst = host_buffers_gpu[buf]
             cl.enqueue_copy(queue,dst,src)
 
-        print '\tSynchronize queue'
+        print('\tSynchronize queue')
         queue.flush()
         queue.finish()
-        
-        buffers = [(varname,host_buffers_reference[varname],host_buffers_gpu[varname]) 
+
+        buffers = [(varname,host_buffers_reference[varname],host_buffers_gpu[varname])
                         for varname in vorticity]
         self._cmp_buffers(buffers,view,dsk)
-    
+
     def _cmp_buffers(self,buffers,view,dsk):
         good = True
         err_buffers = []
 
         for (name,host,dev) in buffers:
             (l1,l2,linf) = self._distances(host,dev,view)
-            print '\t{} -> l1={}  l2={}  linf={}'.format(name,l1,l2,linf)
+            print('\t{} -> l1={}  l2={}  linf={}'.format(name,l1,l2,linf))
             if l2>1e-8:
                 err_buffers.append(name)
                 good = False
         if not good:
             msg = '\n[FAIL] Buffer comparisson failed for buffers {}.\n'.format(err_buffers)
-            print msg
+            print(msg)
             dsk.edit()
             if self.enable_error_plots:
                 from matplotlib import pyplot as plt
@@ -439,7 +439,7 @@ class TestDirectionalStretching(object):
 
                         d = (dev-host)*(dev-host)
                         d -= np.mean(d)
-                        
+
                         plt.title(name)
 
                         axes[0][0].imshow(np.sum(d,axis=0),interpolation='nearest')
@@ -461,35 +461,35 @@ class TestDirectionalStretching(object):
         l2   = np.sqrt(np.sum(d*d))/d.size
         linf = np.max(da)
         return (l1,l2,linf)
-    
-    
+
+
     def _check_kernels(self, formulation, rk_scheme):
         check_instance(formulation,StretchingFormulation)
         check_instance(rk_scheme,ExplicitRungeKutta)
 
         cached=[False,True]
         boundaries=[BoundaryCondition.NONE, BoundaryCondition.PERIODIC]
-        
+
         if self.do_extra_tests:
             directions=[0,1,2]
             orders=[2,4,6]
         else:
             directions=[1]
             orders=[4]
-        
+
         for cache in cached:
-            if (formulation==StretchingFormulation.CONSERVATIVE) and not cache: 
+            if (formulation==StretchingFormulation.CONSERVATIVE) and not cache:
                 continue
             for boundary in boundaries:
                 for direction in directions:
                     for order in orders:
                         self._do_compute_cpu(order=order, direction=direction, boundary=boundary,
                                 formulation=formulation, rk_scheme=rk_scheme)
-                        
-                        self._do_compute_gpu_and_check(order=order, direction=direction, 
-                                boundary=boundary, formulation=formulation, 
+
+                        self._do_compute_gpu_and_check(order=order, direction=direction,
+                                boundary=boundary, formulation=formulation,
                                 rk_scheme=rk_scheme, cached=cache)
-   
+
 
 
 
@@ -497,61 +497,61 @@ class TestDirectionalStretching(object):
         formulation=StretchingFormulation.GRAD_UW
         rk_scheme=ExplicitRungeKutta('Euler')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-    
+
     def test_stretching_gradUW_T_Euler(self):
         formulation=StretchingFormulation.GRAD_UW_T
         rk_scheme=ExplicitRungeKutta('Euler')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-    
+
     def test_stretching_mixed_gradUW_Euler(self):
         formulation=StretchingFormulation.MIXED_GRAD_UW
         rk_scheme=ExplicitRungeKutta('Euler')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-    
+
     def test_stretching_conservative_Euler(self):
         formulation=StretchingFormulation.CONSERVATIVE
         rk_scheme=ExplicitRungeKutta('Euler')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-   
+
 
 
     def test_stretching_gradUW_RK2(self):
         formulation=StretchingFormulation.GRAD_UW
         rk_scheme=ExplicitRungeKutta('RK2')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-    
+
     def test_stretching_gradUW_T_RK2(self):
         formulation=StretchingFormulation.GRAD_UW_T
         rk_scheme=ExplicitRungeKutta('RK2')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-    
+
     def test_stretching_mixed_gradUW_RK2(self):
         formulation=StretchingFormulation.MIXED_GRAD_UW
         rk_scheme=ExplicitRungeKutta('RK2')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-    
+
     def test_stretching_conservative_RK2(self):
         formulation=StretchingFormulation.CONSERVATIVE
         rk_scheme=ExplicitRungeKutta('RK2')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-    
-   
+
+
 
     def test_stretching_gradUW_RK4(self):
         formulation=StretchingFormulation.GRAD_UW
         rk_scheme=ExplicitRungeKutta('RK4')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-    
+
     def test_stretching_gradUW_T_RK4(self):
         formulation=StretchingFormulation.GRAD_UW_T
         rk_scheme=ExplicitRungeKutta('RK4')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-    
+
     def test_stretching_mixed_gradUW_RK4(self):
         formulation=StretchingFormulation.MIXED_GRAD_UW
         rk_scheme=ExplicitRungeKutta('RK4')
         self._check_kernels(formulation=formulation, rk_scheme=rk_scheme)
-    
+
     def test_stretching_conservative_RK4(self):
         formulation=StretchingFormulation.CONSERVATIVE
         rk_scheme=ExplicitRungeKutta('RK4')
@@ -561,17 +561,17 @@ class TestDirectionalStretching(object):
 if __name__ == '__main__':
     TestDirectionalStretching.setup_class(do_extra_tests=False, enable_error_plots=False)
     test = TestDirectionalStretching()
-    
+
     test.test_stretching_gradUW_Euler()
     test.test_stretching_gradUW_T_Euler()
     test.test_stretching_mixed_gradUW_Euler()
     test.test_stretching_conservative_Euler()
-    
+
     test.test_stretching_gradUW_RK2()
     test.test_stretching_gradUW_T_RK2()
     test.test_stretching_mixed_gradUW_RK2()
     test.test_stretching_conservative_RK2()
-    
+
     test.test_stretching_gradUW_RK4()
     test.test_stretching_gradUW_T_RK4()
     test.test_stretching_mixed_gradUW_RK4()
diff --git a/hysop/backend/device/codegen/kernels/tests/test_codegen_transpose.py b/hysop/backend/device/codegen/kernels/tests/test_codegen_transpose.py
index f5281606e9460729c2baac4d056d1b41501b4c4c..152c89cad4140299bcc8da042337656504a95801 100644
--- a/hysop/backend/device/codegen/kernels/tests/test_codegen_transpose.py
+++ b/hysop/backend/device/codegen/kernels/tests/test_codegen_transpose.py
@@ -1,8 +1,8 @@
-
 import copy, math, sys, os, tempfile
+import itertools as it
+import numpy as np
 
 from hysop import __ENABLE_LONG_TESTS__
-from hysop.deps import np, it
 from hysop.tools.misc import upper_pow2_or_3, prod
 from hysop.tools.types import check_instance
 from hysop.tools.numerics import is_integer
@@ -21,11 +21,11 @@ class TestTranspose(object):
             enable_error_plots=False,
             enable_interactive_debug=False,
             enable_debug_mode=False):
-        
+
         typegen = _test_typegen('float','dec')
         queue = cl.CommandQueue(typegen.context)
         ctx   = typegen.context
-        
+
         cls.enable_extra_tests = enable_extra_tests
         cls.enable_error_plots = enable_error_plots
         cls.enable_interactive_debug = enable_interactive_debug
@@ -37,14 +37,14 @@ class TestTranspose(object):
             cls.size_min = 1
             cls.size_max = 32
             # sizeof(double) * (16^4) = 2^(4*4+3) = 2^(20)/2 = 512Mo buffers in worst case
-        
+
         cls.queue   = queue
         cls.ctx     = ctx
         cls.typegen = typegen
         cls.device = typegen.device
 
         cls.enable_debug_mode = enable_debug_mode
-        
+
 
     def _alloc_cpu_gpu(self, dtype, dim, is_inplace):
 
@@ -55,45 +55,45 @@ class TestTranspose(object):
         grid_size = grid_size.astype(np.int32)
         if is_inplace:
             grid_size[1:] = grid_size[0]
-        
+
         igrid_ghosts = 1 + np.rint(dim*np.random.rand(dim)).astype(np.int32)
         ogrid_ghosts = 1 + np.rint(dim*np.random.rand(dim)).astype(np.int32)
 
         igrid_size = grid_size + 2*igrid_ghosts
         ogrid_size = grid_size + 2*ogrid_ghosts
-        
+
         grid_shape  = grid_size[::-1]
         igrid_shape = igrid_size[::-1]
         ogrid_shape = ogrid_size[::-1]
-        
+
         grid_bytes  = grid_size.size  * np.dtype(dtype).itemsize
-        igrid_bytes = igrid_size.size * np.dtype(dtype).itemsize 
-        ogrid_bytes = ogrid_size.size * np.dtype(dtype).itemsize 
-        
-        grid_view  = [ slice(0,               grid_size[i] + 0              ) 
-                for i in xrange(dim-1,-1,-1) ]
-        igrid_view = [ slice(igrid_ghosts[i], grid_size[i] + igrid_ghosts[i]) 
-                for i in xrange(dim-1,-1,-1) ]
-        ogrid_view = [ slice(ogrid_ghosts[i], grid_size[i] + ogrid_ghosts[i]) 
-                for i in xrange(dim-1,-1,-1) ]
-        
-        print
-        print '::Alloc:: dtype={}  dim={}'.format(dtype, dim)
-        print '  *INPUT:  base={}  ghosts={}  size={}'.format(grid_size, igrid_ghosts, igrid_size)
-        print '  *OUTPUT: base={}  ghosts={}  size={}'.format(grid_size, ogrid_ghosts, ogrid_size)
+        igrid_bytes = igrid_size.size * np.dtype(dtype).itemsize
+        ogrid_bytes = ogrid_size.size * np.dtype(dtype).itemsize
+
+        grid_view  = [ slice(0,               grid_size[i] + 0              )
+                for i in range(dim-1,-1,-1) ]
+        igrid_view = [ slice(igrid_ghosts[i], grid_size[i] + igrid_ghosts[i])
+                for i in range(dim-1,-1,-1) ]
+        ogrid_view = [ slice(ogrid_ghosts[i], grid_size[i] + ogrid_ghosts[i])
+                for i in range(dim-1,-1,-1) ]
+
+        print()
+        print('::Alloc:: dtype={}  dim={}'.format(dtype, dim))
+        print('  *INPUT:  base={}  ghosts={}  size={}'.format(grid_size, igrid_ghosts, igrid_size))
+        print('  *OUTPUT: base={}  ghosts={}  size={}'.format(grid_size, ogrid_ghosts, ogrid_size))
 
         mf = cl.mem_flags
-                   
+
         def random(*shape):
             array = np.random.rand(*shape)
             if is_integer(dtype):
                 info = np.iinfo(dtype)
-                min_ = max(-99, info.min)  
+                min_ = max(-99, info.min)
                 max_ = min(+99, info.max)
                 return np.rint(min_+array*(max_-min_)).astype(dtype)
             else:
                 return array.astype(dtype)
-            
+
         host_buffers_init = {
                 'no_ghosts': {
                     'Tin':         random(*grid_shape),
@@ -104,34 +104,34 @@ class TestTranspose(object):
                     'Tout': -1   * np.ones(shape=ogrid_shape, dtype=dtype),
                 }
         }
-                    
+
         device_buffers = {
                 'no_ghosts': {
-                    'Tin':  cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR, 
+                    'Tin':  cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['no_ghosts']['Tin']),
-                    'Tout': cl.Buffer(ctx, flags=mf.WRITE_ONLY  | mf.COPY_HOST_PTR, 
+                    'Tout': cl.Buffer(ctx, flags=mf.WRITE_ONLY  | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['no_ghosts']['Tout']),
                 },
                 'with_ghosts': {
-                    'Tin':  cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR, 
+                    'Tin':  cl.Buffer(ctx, flags=mf.READ_ONLY  | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['with_ghosts']['Tin']),
-                    'Tout': cl.Buffer(ctx, flags=mf.WRITE_ONLY  | mf.COPY_HOST_PTR, 
+                    'Tout': cl.Buffer(ctx, flags=mf.WRITE_ONLY  | mf.COPY_HOST_PTR,
 								hostbuf=host_buffers_init['with_ghosts']['Tout']),
                 }
         }
 
-        for i in xrange(TransposeKernelGenerator.n_dbg_arrays):
+        for i in range(TransposeKernelGenerator.n_dbg_arrays):
             name = 'dbg{}'.format(i)
             for target in ['no_ghosts', 'with_ghosts']:
                 arr = (-i) * np.ones(shape=grid_shape, dtype=np.int32)
                 host_buffers_init[target][name] = arr
-                device_buffers[target][name] = cl.Buffer(ctx, 
+                device_buffers[target][name] = cl.Buffer(ctx,
                         flags=mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=arr)
 
 
         host_buffers_reference = copy.deepcopy(host_buffers_init)
         host_buffers_gpu       = copy.deepcopy(host_buffers_init)
-        
+
         self.dtype = dtype
         self.ctype = dtype_to_ctype(dtype)
         self.dim   = dim
@@ -142,28 +142,28 @@ class TestTranspose(object):
         self.device_buffers         = device_buffers
 
         self.grid_size  = grid_size
-        self.igrid_size = igrid_size 
+        self.igrid_size = igrid_size
         self.ogrid_size = ogrid_size
-        
+
         self.grid_shape  = grid_shape
-        self.igrid_shape = igrid_shape 
+        self.igrid_shape = igrid_shape
         self.ogrid_shape = ogrid_shape
 
         self.grid_bytes  = grid_bytes
-        self.igrid_bytes = igrid_bytes 
+        self.igrid_bytes = igrid_bytes
         self.ogrid_bytes = ogrid_bytes
 
         self.grid_view  = grid_view
-        self.igrid_view = igrid_view 
+        self.igrid_view = igrid_view
         self.ogrid_view = ogrid_view
-        
-        self.igrid_ghosts = igrid_ghosts 
+
+        self.igrid_ghosts = igrid_ghosts
         self.ogrid_ghosts = ogrid_ghosts
 
     @classmethod
     def teardown_class(cls):
         pass
-    
+
     def setup_method(self, method):
         pass
 
@@ -171,57 +171,57 @@ class TestTranspose(object):
         pass
 
     def _do_transpose_cpu(self, is_inplace, axes):
-        print '  *Transposing on CPU.'
+        print('  *Transposing on CPU.')
         out_target =  'Tout'
 
         for target in ['no_ghosts']: #, 'with_ghosts']:
-            host_init_buffers      = self.host_buffers_init[target] 
+            host_init_buffers      = self.host_buffers_init[target]
             host_buffers_reference = self.host_buffers_reference[target]
-            
+
             if target == 'no_ghosts':
                 in_view  = self.grid_view
                 out_view = self.grid_view
             else:
-                in_view  = self.igrid_view 
+                in_view  = self.igrid_view
                 out_view = self.igrid_view if is_inplace else self.ogrid_view
             Tin  = host_init_buffers['Tin']
             Tout = host_buffers_reference[out_target]
-            
+
             out_view = [ out_view[i] for i in axes ]
             Tout = Tout.reshape(tuple(Tout.shape[i] for i in axes))
             Tout[out_view] = np.transpose(Tin[in_view].copy(), axes=axes)
-    
-    def _do_compute_gpu_and_check(self, dim, axes, is_inplace, vectorization, 
+
+    def _do_compute_gpu_and_check(self, dim, axes, is_inplace, vectorization,
             tile_size, tile_padding):
 
         msg  = '\nTesting {}d {} transposition with {} axes, ({})({}) '
         msg += 'vectorization and tiles of size {}.'
-        msg = msg.format(dim, 
+        msg = msg.format(dim,
                 'inplace' if is_inplace else 'out of place',
                 axes, self.ctype, vectorization, tile_size)
-        print msg
+        print(msg)
 
-        device = self.device 
+        device = self.device
         enable_debug_mode = self.enable_debug_mode
 
         for target in ['no_ghosts']:
-            host_init_buffers      = self.host_buffers_init[target] 
+            host_init_buffers      = self.host_buffers_init[target]
             host_buffers_reference = self.host_buffers_reference[target]
             host_buffers_gpu       = self.host_buffers_gpu[target]
             device_buffers         = self.device_buffers[target]
-            
+
             if target == 'no_ghosts':
                 in_view  = self.grid_view
                 out_view = self.grid_view
             else:
-                in_view  = self.igrid_view 
+                in_view  = self.igrid_view
                 out_view = self.igrid_view if is_inplace else self.ogrid_view
             out_view = [ out_view[i] for i in axes ]
 
             shape_dim = upper_pow2_or_3(dim)
             shape = tuple( gs for gs in self.grid_size )
             shape += (shape_dim - dim)*(0,)
-            
+
             known_vars = { 'shape': shape }
 
             dak = TransposeKernelGenerator(
@@ -229,13 +229,13 @@ class TestTranspose(object):
                 tile_padding=tile_padding,
                 axes=axes,
                 ctype=self.ctype,
-                typegen=self.typegen, 
+                typegen=self.typegen,
                 vectorization=vectorization,
                 is_inplace=is_inplace,
                 symbolic_mode=False,
                 debug_mode=enable_debug_mode,
                 known_vars=known_vars)
-            
+
             usable_cache_bytes_per_wg = clCharacterize.usable_local_mem_size(device)
             (static_shared_bytes, dynamic_shared_bytes, total_bytes) = \
                     dak.required_workgroup_cache_size()
@@ -243,67 +243,67 @@ class TestTranspose(object):
 
             work_dim = dak.work_dim
 
-            local_work_size = np.minimum([tile_size]*work_dim, 
+            local_work_size = np.minimum([tile_size]*work_dim,
                     device.max_work_item_sizes[:work_dim])
             for i in dak.workload_indexes:
                 local_work_size[i] = 1
             max_work = device.max_work_group_size
             while (prod(local_work_size) > max_work):
-                for i in xrange(work_dim-1,-1,-1):
+                for i in range(work_dim-1, -1, -1):
                     if local_work_size[i] > 1:
                         break
-                local_work_size[i] /= 2
+                local_work_size[i] //= 2
 
             work_load = [1]*work_dim
-            work_size = dak.shape_to_worksize(self.grid_size, tile_size, 
+            work_size = dak.shape_to_worksize(self.grid_size, tile_size,
                     vectorization, axes, local_work_size)
 
             max_global_size  = dak.get_max_global_size(work_size, work_load)
             local_work_size  = np.minimum(max_global_size, local_work_size)
             global_work_size = dak.get_global_size(work_size, local_work_size, work_load)
-            
-            tile_grid_size = np.asarray([ self.grid_size[i] 
+
+            tile_grid_size = np.asarray([ self.grid_size[i]
                 for (i,j) in enumerate(axes) if (i!=j) or (i==0) ])
 
-            print 'AXES={}   TILE=({})'.format(axes,
-                    ','.join([ str(j) if (i!=j) else ('X' if i>0 else '*') 
-                        for (i,j) in enumerate(axes) ]))
-            print '  *shape={}  in_tile_shape=[{}]  out_tile_shape=[{}]'.format(self.grid_size, 
-                    ','.join([str(self.grid_size[i]) 
+            print('AXES={}   TILE=({})'.format(axes,
+                    ','.join([ str(j) if (i!=j) else ('X' if i>0 else '*')
+                        for (i,j) in enumerate(axes) ])))
+            print('  *shape={}  in_tile_shape=[{}]  out_tile_shape=[{}]'.format(self.grid_size,
+                    ','.join([str(self.grid_size[i])
                         for (i,j) in enumerate(axes) if (i!=j) or (i==0)]),
-                    ','.join([str(self.grid_size[j]) 
-                        for (i,j) in enumerate(axes) if (i!=j) or (i==0)]))
-
-            print '  *tile={} vec={} worksize={} L={} WL={} G={} num_groups={} num_blocks={}'\
-                    .format(tile_size, vectorization, work_size,  
-                            local_work_size, work_load, global_work_size, 
-                            (global_work_size+local_work_size-1)/local_work_size, 
-                            (tile_grid_size+tile_size-1)/tile_size)
-            
+                    ','.join([str(self.grid_size[j])
+                        for (i,j) in enumerate(axes) if (i!=j) or (i==0)])))
+
+            print('  *tile={} vec={} worksize={} L={} WL={} G={} num_groups={} num_blocks={}'\
+                    .format(tile_size, vectorization, work_size,
+                            local_work_size, work_load, global_work_size,
+                            (global_work_size+local_work_size-1)//local_work_size,
+                            (tile_grid_size+tile_size-1)//tile_size))
+
             if is_inplace:
                 variables = ['Tin']
-            else: 
+            else:
                 variables = ['Tin','Tout']
 
             debug = []
             if enable_debug_mode:
-                debug += [ 'dbg{}'.format(i) for i in xrange(dak.n_dbg_arrays) ]
+                debug += [ 'dbg{}'.format(i) for i in range(dak.n_dbg_arrays) ]
             kernel_args = []
             for varname in variables + debug:
                 kernel_args.append(device_buffers[varname])
             assert (dynamic_shared_bytes == 0)
-        
-            print '  *Generating and compiling Kernel...'
+
+            print('  *Generating and compiling Kernel...')
             source = dak.__str__()
-            
+
             # loop for interactive debugging
             good = False
             i=0
             e = None
             while(not good):
                 if self.enable_interactive_debug:
-                    print '== {}th iteration =='.format(i)
-                    print
+                    print('== {}th iteration =='.format(i))
+                    print()
                 try:
                     prg = cl.Program(self.ctx, source)
                     prg.build(devices=[self.device])
@@ -311,20 +311,20 @@ class TestTranspose(object):
                     kernel.set_args(*kernel_args)
 
                     queue = self.queue
-                    
-                    print '   ::CPU => GPU'
+
+                    print('   ::CPU => GPU')
                     for buf in variables+debug:
                         src = host_init_buffers[buf]
                         dst = device_buffers[buf]
                         cl.enqueue_copy(queue,dst,src)
-                    
-                    print '   ::Kernel execution <<<{},{}>>>'.format(global_work_size,
-                                                                     local_work_size)
-                    evt = cl.enqueue_nd_range_kernel(queue, kernel, 
+
+                    print('   ::Kernel execution <<<{},{}>>>'.format(global_work_size,
+                                                                     local_work_size))
+                    evt = cl.enqueue_nd_range_kernel(queue, kernel,
                             global_work_size.tolist(), local_work_size.tolist())
                     evt.wait()
-                    
-                    print '   ::GPU => CPU'
+
+                    print('   ::GPU => CPU')
                     for buf in variables+debug:
                         src = device_buffers[buf]
                         if is_inplace and buf=='Tin':
@@ -333,7 +333,7 @@ class TestTranspose(object):
                             dst = host_buffers_gpu[buf]
                         cl.enqueue_copy(queue,dst,src)
 
-                    print '   ::Synchronize queue'
+                    print('   ::Synchronize queue')
                     queue.flush()
                     queue.finish()
 
@@ -346,19 +346,19 @@ class TestTranspose(object):
                     Tout_cpu = Tout_cpu.reshape(nshape)
 
                     if enable_debug_mode:
-                        for i in xrange(dak.n_dbg_arrays):
-                            print '::DBG{}::'.format(i)
-                            print host_buffers_gpu['dbg{}'.format(i)]
-                            print
-                        print '::IN::'
-                        print Tin_cpu[in_view]
-                        print
-                        print '::OUT CPU::'
-                        print Tout_cpu[out_view]
-                        print
-                        print '::OUT GPU::'
-                        print Tout[out_view]
-                    
+                        for i in range(dak.n_dbg_arrays):
+                            print('::DBG{}::'.format(i))
+                            print(host_buffers_gpu['dbg{}'.format(i)])
+                            print()
+                        print('::IN::')
+                        print(Tin_cpu[in_view])
+                        print()
+                        print('::OUT CPU::')
+                        print(Tout_cpu[out_view])
+                        print()
+                        print('::OUT GPU::')
+                        print(Tout[out_view])
+
                     if is_inplace:
                         buffers = (('Tout', Tout_cpu, Tout, out_view),)
                     else:
@@ -367,7 +367,7 @@ class TestTranspose(object):
                     good, err_buffers = self._cmp_buffers(buffers,dak,dim)
                 except cl_api.RuntimeError as error:
                     e = error
-                    print 'ERROR: ',e
+                    print('ERROR: ',e)
                     good = False
 
                 if not self.enable_interactive_debug:
@@ -376,7 +376,7 @@ class TestTranspose(object):
                     filepath = '{}/debug_dump_{}.cl'.format(tempfile.gettempdir(), i)
                     dak.edit(filepath=filepath, modified=True)
                     source = dak.modified_code
-                    print 'Source dumped to {}.'.format(filepath)
+                    print('Source dumped to {}.'.format(filepath))
                     if source[:4]=='exit':
                         break
                 os.system('clear')
@@ -386,19 +386,19 @@ class TestTranspose(object):
             if self.enable_debug_mode and not good:
                 dak.edit()
             self._check_errors(buffers, dak, dim, good, err_buffers)
-    
+
     def _cmp_buffers(self,buffers,dak,dim):
         good = True
         err_buffers = []
-        
-        print '  *Comparing outputs'
+
+        print('  *Comparing outputs')
         for (name,host,dev,view) in buffers:
             if host.shape != dev.shape:
                 msg='Incompatible buffer shapes between cpu {} and gpu {} outputs.'
                 msg=msg.format(host.shape, dev.shape)
                 raise RuntimeError(msg)
             (l1,l2,linf) = self._distances(host,dev,view)
-            print '   ::{:6} -> l1={}  l2={}  linf={}'.format(name,l1,l2,linf)
+            print('   ::{:6} -> l1={}  l2={}  linf={}'.format(name,l1,l2,linf))
             if linf>1e-12:
                 err_buffers.append(name)
                 good = False
@@ -407,7 +407,7 @@ class TestTranspose(object):
     def _check_errors(self, buffers, dak, dim, good, err_buffers):
         if not good:
             msg = '\n[FAIL] Buffer comparisson failed for buffers {}.\n'.format(err_buffers)
-            print msg
+            print(msg)
             if self.enable_error_plots:
                 from matplotlib import pyplot as plt
                 for (name,host,dev,view) in buffers:
@@ -415,11 +415,11 @@ class TestTranspose(object):
 
                         host = host[view]
                         dev  = dev[view]
-                        
+
                         E = (dev - host).astype(np.float64)
                         d = E*E
                         d -= np.mean(d)
-                        
+
                         if dim==4:
                             fig,axes = plt.subplots(4,3)
                             axes[0][0].imshow(np.sum(d,axis=(0,1)), interpolation='nearest')
@@ -443,7 +443,7 @@ class TestTranspose(object):
                             axes[0][0].imshow(np.sum(d,axis=0), interpolation='nearest')
                             axes[0][1].imshow(np.sum(d,axis=1), interpolation='nearest')
                             axes[1][0].imshow(np.sum(d,axis=2), interpolation='nearest')
-                            axes[1][1].imshow(np.sum(d,axis=(0,1))[np.newaxis,:], 
+                            axes[1][1].imshow(np.sum(d,axis=(0,1))[np.newaxis,:],
                                                                 interpolation='nearest')
                             plt.title(name)
                             fig.show()
@@ -471,13 +471,13 @@ class TestTranspose(object):
         l2   = np.sqrt(np.sum(d*d))/d.size
         linf = np.max(da)
         return (l1,l2,linf)
-   
+
     ### TESTS ###
     ## OUT OF PLACE TRANSPOSE
     # 2d tests
     def test_transpose_YX_out_of_place(self):
         self._test_transpose(dim=2, axes=(1,0), is_inplace=False)
-    
+
     # 3d tests
     def test_transpose_XZY_out_of_place(self):
         self._test_transpose(dim=3, axes=(1,0,2), is_inplace=False)
@@ -489,7 +489,7 @@ class TestTranspose(object):
         self._test_transpose(dim=3, axes=(1,2,0), is_inplace=False)
     def test_transpose_ZYX_out_of_place(self):
         self._test_transpose(dim=3, axes=(2,1,0), is_inplace=False)
-    
+
     # 4d tests
     def test_transpose_AXZY_out_of_place(self):
         self._test_transpose(dim=4, axes=(1,0,2,3), is_inplace=False)
@@ -501,7 +501,7 @@ class TestTranspose(object):
         self._test_transpose(dim=4, axes=(1,2,0,3), is_inplace=False)
     def test_transpose_AZYX_out_of_place(self):
         self._test_transpose(dim=4, axes=(2,1,0,3), is_inplace=False)
-    
+
     def test_transpose_XAYZ_out_of_place(self):
         self._test_transpose(dim=4, axes=(0,1,3,2), is_inplace=False)
     def test_transpose_XAZY_out_of_place(self):
@@ -514,7 +514,7 @@ class TestTranspose(object):
         self._test_transpose(dim=4, axes=(1,2,3,0), is_inplace=False)
     def test_transpose_ZAYX_out_of_place(self):
         self._test_transpose(dim=4, axes=(2,1,3,0), is_inplace=False)
-    
+
     def test_transpose_XYAZ_out_of_place(self):
         self._test_transpose(dim=4, axes=(0,3,1,2), is_inplace=False)
     def test_transpose_XZAY_out_of_place(self):
@@ -527,7 +527,7 @@ class TestTranspose(object):
         self._test_transpose(dim=4, axes=(1,3,2,0), is_inplace=False)
     def test_transpose_ZYAX_out_of_place(self):
         self._test_transpose(dim=4, axes=(2,3,1,0), is_inplace=False)
-    
+
     def test_transpose_XYZA_out_of_place(self):
         self._test_transpose(dim=4, axes=(3,0,1,2), is_inplace=False)
     def test_transpose_XZYA_out_of_place(self):
@@ -540,9 +540,9 @@ class TestTranspose(object):
         self._test_transpose(dim=4, axes=(3,1,2,0), is_inplace=False)
     def test_transpose_ZYXA_out_of_place(self):
         self._test_transpose(dim=4, axes=(3,2,1,0), is_inplace=False)
-   
+
     def test_transpose_5d_out_of_place(self):
-        for axes in it.permutations(xrange(5)):
+        for axes in it.permutations(range(5)):
             if tuple(axes) == tuple(range(5)):
                 continue
             self._test_transpose(dim=5, axes=axes, is_inplace=False)
@@ -551,7 +551,7 @@ class TestTranspose(object):
     # 2d tests
     def test_transpose_YX_inplace(self):
         self._test_transpose(dim=2, axes=(1,0), is_inplace=True)
-    
+
     # 3d tests
     def test_transpose_XZY_inplace(self):
         self._test_transpose(dim=3, axes=(1,0,2), is_inplace=True)
@@ -563,7 +563,7 @@ class TestTranspose(object):
         self._test_transpose(dim=3, axes=(1,2,0), is_inplace=True)
     def test_transpose_ZYX_inplace(self):
         self._test_transpose(dim=3, axes=(2,1,0), is_inplace=True)
-    
+
     # 4d tests
     def test_transpose_AXZY_inplace(self):
         self._test_transpose(dim=4, axes=(1,0,2,3), is_inplace=True)
@@ -575,7 +575,7 @@ class TestTranspose(object):
         self._test_transpose(dim=4, axes=(1,2,0,3), is_inplace=True)
     def test_transpose_AZYX_inplace(self):
         self._test_transpose(dim=4, axes=(2,1,0,3), is_inplace=True)
-    
+
     def test_transpose_XAYZ_inplace(self):
         self._test_transpose(dim=4, axes=(0,1,3,2), is_inplace=True)
     def test_transpose_XAZY_inplace(self):
@@ -588,7 +588,7 @@ class TestTranspose(object):
         self._test_transpose(dim=4, axes=(1,2,3,0), is_inplace=True)
     def test_transpose_ZAYX_inplace(self):
         self._test_transpose(dim=4, axes=(2,1,3,0), is_inplace=True)
-    
+
     def test_transpose_XYAZ_inplace(self):
         self._test_transpose(dim=4, axes=(0,3,1,2), is_inplace=True)
     def test_transpose_XZAY_inplace(self):
@@ -601,7 +601,7 @@ class TestTranspose(object):
         self._test_transpose(dim=4, axes=(1,3,2,0), is_inplace=True)
     def test_transpose_ZYAX_inplace(self):
         self._test_transpose(dim=4, axes=(2,3,1,0), is_inplace=True)
-    
+
     def test_transpose_XYZA_inplace(self):
         self._test_transpose(dim=4, axes=(3,0,1,2), is_inplace=True)
     def test_transpose_XZYA_inplace(self):
@@ -614,15 +614,15 @@ class TestTranspose(object):
         self._test_transpose(dim=4, axes=(3,1,2,0), is_inplace=True)
     def test_transpose_ZYXA_inplace(self):
         self._test_transpose(dim=4, axes=(3,2,1,0), is_inplace=True)
-       
+
     # 5d tests
     def test_transpose_5d_inplace(self):
-        for axes in it.permutations(xrange(5)):
+        for axes in it.permutations(range(5)):
             if tuple(axes) == tuple(range(5)):
                 continue
             self._test_transpose(dim=5, axes=axes, is_inplace=True)
 
-    
+
     def _test_transpose(self, dim, axes, is_inplace):
         check_instance(axes, tuple, values=int)
         assert dim > 1
@@ -636,30 +636,30 @@ class TestTranspose(object):
             dtypes = [np.float32]
             tile_sizes=[4]
             tile_paddings = [0]
-        
+
         for dtype in dtypes:
             self._alloc_cpu_gpu(dtype=dtype, dim=dim, is_inplace=is_inplace)
             self._do_transpose_cpu(axes=axes, is_inplace=is_inplace)
             for tile_size in tile_sizes:
                 for tile_padding in tile_paddings:
                     for vectorization in vectorizations:
-                        self._do_compute_gpu_and_check(dim=dim, axes=axes, 
+                        self._do_compute_gpu_and_check(dim=dim, axes=axes,
                                 tile_size=tile_size, tile_padding=tile_padding,
                                 vectorization=vectorization, is_inplace=is_inplace)
-            
-    
+
+
 if __name__ == '__main__':
-    TestTranspose.setup_class(enable_extra_tests=False, 
+    TestTranspose.setup_class(enable_extra_tests=False,
                               enable_error_plots=True,
                               enable_debug_mode=False,
                               enable_interactive_debug=False)
-    
+
     test = TestTranspose()
 
     enable_out_of_place = False
     enable_inplace      = True
 
-    with printoptions(linewidth=200, 
+    with printoptions(linewidth=200,
             formatter={'float':lambda x: '{:0.2f}'.format(x)}):
 
         if enable_out_of_place:
@@ -682,32 +682,32 @@ if __name__ == '__main__':
             test.test_transpose_AYZX_out_of_place()
             test.test_transpose_AZXY_out_of_place()
             test.test_transpose_AZYX_out_of_place()
-        
+
             test.test_transpose_XAYZ_out_of_place()
             test.test_transpose_XAZY_out_of_place()
             test.test_transpose_YAXZ_out_of_place()
             test.test_transpose_YAZX_out_of_place()
             test.test_transpose_ZAXY_out_of_place()
             test.test_transpose_ZAYX_out_of_place()
-        
+
             test.test_transpose_XYAZ_out_of_place()
             test.test_transpose_XZAY_out_of_place()
             test.test_transpose_YXAZ_out_of_place()
             test.test_transpose_YZAX_out_of_place()
             test.test_transpose_ZXAY_out_of_place()
             test.test_transpose_ZYAX_out_of_place()
-        
+
             test.test_transpose_XYZA_out_of_place()
             test.test_transpose_XZYA_out_of_place()
             test.test_transpose_YXZA_out_of_place()
             test.test_transpose_YZXA_out_of_place()
             test.test_transpose_ZXYA_out_of_place()
             test.test_transpose_ZYXA_out_of_place()
-            
+
             ## 5d transpose
             if test.enable_extra_tests:
                 test.test_transpose_5d_out_of_place()
-        
+
         if enable_inplace:
             # only C(dim,k) permutations (ie transpositions) are currently
             # supported per dimension for inplace kernels.
diff --git a/hysop/backend/device/codegen/kernels/transpose.py b/hysop/backend/device/codegen/kernels/transpose.py
index 58257139d07103e50b451e5bd7a1074e9110d71a..62b6b527779799cbb4cfae73bce1fc97548d9843 100644
--- a/hysop/backend/device/codegen/kernels/transpose.py
+++ b/hysop/backend/device/codegen/kernels/transpose.py
@@ -1,5 +1,8 @@
-from contextlib import contextmanager, nested
-from hysop.deps import np, operator
+from contextlib import contextmanager
+import operator
+import numpy as np
+
+from hysop.tools.contexts import nested
 from hysop.tools.misc import upper_pow2_or_3, prod
 from hysop.tools.decorators import static_vars
 from hysop.tools.numpywrappers import npw
@@ -20,17 +23,17 @@ class TransposeKernelGenerator(KernelCodeGenerator):
     n_dbg_arrays = 2
 
     @staticmethod
-    def codegen_name(is_inplace, axes, ctype, 
-            tile_size, tile_padding, vectorization, 
+    def codegen_name(is_inplace, axes, ctype,
+            tile_size, tile_padding, vectorization,
             use_diagonal_coordinates):
         pdim = len(axes)
         axes = [ str(j) if i!=j else 'X' for i,j in enumerate(axes) ]
         return 'transpose{}_{}_{}_{}d__N{}__T{}__P{}__{}'.format(
                 '_dc' if use_diagonal_coordinates else '_nc',
                 'inplace' if is_inplace else 'out_of_place',
-                ctype.replace(' ','_'), pdim, vectorization, tile_size, tile_padding, 
+                ctype.replace(' ','_'), pdim, vectorization, tile_size, tile_padding,
                 '_'.join(axes))
-    
+
     @classmethod
     def characterize_permutation(cls, shape, axes, max_device_workdim):
         pdim = len(axes)
@@ -43,7 +46,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
             tile_indexes = (pdim-1,)
         tile_indexes = list( pdim-1-idx for idx in tile_indexes)
         wdim = min(len(axes), max_device_workdim)
-        
+
         extra_work_indexes = []
 
         work_shape = np.empty(shape=(wdim,), dtype=np.int32)
@@ -59,7 +62,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
                 continue
             j+=1
         return (contiguous_permutation, wdim, work_shape, tile_indexes)
-    
+
     @classmethod
     def max_local_worksize(cls, shape, work_dim, tile_size, vectorization, axes):
         pdim = len(axes)
@@ -69,16 +72,16 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         else:
             tile_indexes = (pdim-1,)
         tile_indexes = tuple( pdim-1-idx for idx in tile_indexes )
-        
+
         assert work_dim <= pdim, 'workdim to big.'
         assert work_dim >= (1 + int(contiguous_permutation)), 'workdim to small.'
-        
+
         wdim = work_dim
         max_local_worksize = np.empty(shape=(wdim,), dtype=np.int32)
         j=0
         for i,Si in enumerate(shape):
             if i==0:
-                max_local_worksize[j] = (tile_size+vectorization-1) / vectorization
+                max_local_worksize[j] = (tile_size+vectorization-1) // vectorization
             elif i in tile_indexes:
                 max_local_worksize[j] = tile_size
             elif i < (wdim - int(contiguous_permutation and tile_indexes[1]>wdim-1)):
@@ -90,8 +93,8 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         return max_local_worksize
 
     @classmethod
-    def compute_global_size(cls, shape, tile_size, 
-            vectorization, axes, 
+    def compute_global_size(cls, shape, tile_size,
+            vectorization, axes,
             local_work_size, work_load):
 
         pdim = len(axes)
@@ -101,7 +104,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         else:
             tile_indexes = (pdim-1,)
         tile_indexes = tuple( pdim-1-idx for idx in tile_indexes )
-        
+
         wdim = len(local_work_size)
         assert wdim <= pdim, 'workdim to big.'
         assert wdim >= (1 + int(contiguous_permutation)), 'workdim to small.'
@@ -113,20 +116,20 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         for i,Si in enumerate(shape):
             if i==0:
                 wl = work_load[j]
-                ngroups[j]  = (Si+vts*wl-1)/(vts*wl)
+                ngroups[j]  = (Si+vts*wl-1)//(vts*wl)
             elif i in tile_indexes:
                 wl = work_load[j]
-                ngroups[j] = ((Si+ts*wl-1)/(ts*wl))
+                ngroups[j] = ((Si+ts*wl-1)//(ts*wl))
             elif i < (wdim - int(contiguous_permutation and tile_indexes[1]>wdim-1)):
                 wl = work_load[j]
-                ngroups[j] = (Si+wl-1)/wl
+                ngroups[j] = (Si+wl-1)//wl
             else:
                 continue
             j+=1
         assert j==wdim, '{} != {}'.format(j, wdim)
         global_size = ngroups * local_work_size
         return global_size
-    
+
     def required_workgroup_cache_size(self):
         """
         Return a tuple of required (static,dynamic,total) cache bytes per workgroup
@@ -136,7 +139,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
 
         tile_shape = self.tile_shape
         tile_bytes = prod(tile_shape) * nbytes
-        
+
         if self.contiguous_permutation:
             count = tile_bytes
             if self.is_inplace:
@@ -147,9 +150,9 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         sc = count
         dc = 0
         tc = sc+dc
-        
+
         return (sc,dc,tc)
-   
+
     def __init__(self, typegen, ctype, vectorization,
             axes, tile_size, tile_padding, symbolic_mode,
             use_diagonal_coordinates = True,
@@ -166,7 +169,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         assert Pdim in [1,2,3,4,8,16]
         assert vectorization in [1,2,4,8,16]
         assert tile_padding >= 0
-        
+
         # check permutation axes
         msg='Invalid permutation {} for dimension {}.'
         msg=msg.format(axes, pdim)
@@ -175,8 +178,8 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         _axes = set(axes.tolist())
         if len(_axes) != pdim:
             raise ValueError(msg)
-        
-        _permutation = (axes != range(pdim))
+
+        _permutation = (axes != set(range(pdim)))
         _naxes = sum(_permutation)
         if (_naxes == 0):
             msg='There is nothing to transpose with given axes {}.'
@@ -185,7 +188,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         assert _naxes >= 2, msg
 
         # We need the first axe for contiguous reads and writes even if it's not part
-        # of the permutation scheme. If the contiguous axe is permutated, we need 
+        # of the permutation scheme. If the contiguous axe is permutated, we need
         # the second axe that will become the contiguous one as well.
         # Those contiguous axes are stored in tile_indexes.
         contiguous_permutation = (axes[-1] != (pdim-1))
@@ -197,31 +200,31 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         tile_indexes = tuple( pdim-1-idx       for idx in tile_indexes )
 
         permutation_axes = tuple( pdim-1-i for i,idx in enumerate(axes) if i!= idx )
-        
+
         tdim = len(tile_indexes)
         Tdim = upper_pow2_or_3(tdim)
         tile_shape = [tile_size,]*tdim
         tile_shape[-1] += tile_padding
         tile_shape = tuple(tile_shape)
 
-        is_tile_index = tuple( (i in tile_indexes) for i in xrange(pdim) ) 
+        is_tile_index = tuple( (i in tile_indexes) for i in range(pdim) )
         tile_index_to_id = dict( (j,i) for (i,j) in enumerate(tile_indexes) )
-        
+
         device = typegen.device
         if (device.max_work_item_dimensions < tdim):
             msg='OpenCL device {} does not support {} working dimensions required '
             msg+='to transpose whith axes {}.'
             msg=msg.format(device.name, tdim, axes)
         work_dim = min(pdim, device.max_work_item_dimensions)
-        
-        workload_indexes = tuple ( i for i in xrange(pdim)
-                if (not is_tile_index[i]) and 
-                (i < (work_dim- int(contiguous_permutation and (tile_indexes[1]>work_dim-1))))) 
-        is_workload_index = tuple( (i in workload_indexes) for i in xrange(pdim) ) 
+
+        workload_indexes = tuple ( i for i in range(pdim)
+                if (not is_tile_index[i]) and
+                (i < (work_dim- int(contiguous_permutation and (tile_indexes[1]>work_dim-1)))))
+        is_workload_index = tuple( (i in workload_indexes) for i in range(pdim) )
         wl_index_to_id = dict( (j,i) for (i,j) in enumerate(workload_indexes) )
         wldim = len(workload_indexes)
         WLdim = upper_pow2_or_3(wldim)
-        
+
         name = TransposeKernelGenerator.codegen_name(is_inplace, axes, ctype,
                 tile_size, tile_padding, vectorization, use_diagonal_coordinates)
 
@@ -231,7 +234,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         super(self.__class__,self).__init__(
                 name=name,
                 typegen=typegen,
-                work_dim=work_dim, 
+                work_dim=work_dim,
                 known_vars = known_vars,
                 kernel_args = kernel_args,
                 symbolic_mode=symbolic_mode,
@@ -240,23 +243,23 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         dtype = ctype_to_dtype(ctype)
 
         if debug_mode:
-            print 'Transpose codegen configuration:'
-            print ' *dimension:         {}'.format(pdim)
-            print ' *axes:              {}'.format(axes)
-            print ' *tile_dimension:    {}'.format(tdim)
-            print ' *tile_shape:        {}'.format(tile_shape)
-            print ' *tile_indexes:      {}'.format(tile_indexes)
-            print ' *tile_axes:         {}'.format(tile_axes)
-            print ' *tile_index_to_id:  {}'.format(tile_index_to_id)
-            print ' *workload_indexes:  {}'.format(workload_indexes)
-            print ' *wl_index_to_id:    {}'.format(wl_index_to_id)
-            print ' *is_tile_index:     {}'.format(is_tile_index)
-            print ' *is_workload_index: {}'.format(is_workload_index)
-            print ' *work_dim:          {} (tile[{}] + device_workload[{}])'.format(work_dim, 
-                    tdim, work_dim-tdim)
-            print ' *ctype:             {}'.format(ctype)
-            print ' *dtype:             {}'.format(dtype)
-        
+            print('Transpose codegen configuration:')
+            print(' *dimension:         {}'.format(pdim))
+            print(' *axes:              {}'.format(axes))
+            print(' *tile_dimension:    {}'.format(tdim))
+            print(' *tile_shape:        {}'.format(tile_shape))
+            print(' *tile_indexes:      {}'.format(tile_indexes))
+            print(' *tile_axes:         {}'.format(tile_axes))
+            print(' *tile_index_to_id:  {}'.format(tile_index_to_id))
+            print(' *workload_indexes:  {}'.format(workload_indexes))
+            print(' *wl_index_to_id:    {}'.format(wl_index_to_id))
+            print(' *is_tile_index:     {}'.format(is_tile_index))
+            print(' *is_workload_index: {}'.format(is_workload_index))
+            print(' *work_dim:          {} (tile[{}] + device_workload[{}])'.format(work_dim,
+                    tdim, work_dim-tdim))
+            print(' *ctype:             {}'.format(ctype))
+            print(' *dtype:             {}'.format(dtype))
+
         self.ctype             = ctype
         self.dtype             = dtype
         self.axes              = axes
@@ -293,13 +296,13 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         reqs = WriteOnceDict()
         return reqs
 
-    
+
     def gen_kernel_arguments(self, typegen, ctype, Pdim, debug_mode, is_inplace,
             known_vars, symbolic_mode):
         _global = OpenClCodeGenerator.default_keywords['global']
         tg = typegen
         mesh_dim = Pdim
-        
+
         kargs  = ArgDict()
         if is_inplace:
             data, strides = OpenClArrayBackend.build_codegen_arguments(kargs, name='inout',
@@ -324,9 +327,9 @@ class TransposeKernelGenerator(KernelCodeGenerator):
 
         if debug_mode:
             n_dbg_arrays = self.n_dbg_arrays
-            for i in xrange(n_dbg_arrays):
+            for i in range(n_dbg_arrays):
                 kargs['dbg{}'.format(i)]   = CodegenVariable(ctype='int', name='dbg{}'.format(i),
-                        typegen=tg, storage=_global, ptr=True, 
+                        typegen=tg, storage=_global, ptr=True,
                         ptr_const=True, ptr_restrict=True, nl=True)
         self.debug_mode = debug_mode
 
@@ -346,7 +349,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         tg               = s.typegen
         work_dim         = s.work_dim
         symbolic_mode    = s.symbolic_mode
-        
+
         ctype            = s.ctype
         dtype            = s.dtype
 
@@ -373,7 +376,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         is_workload_index = s.is_workload_index
         is_inplace        = s.is_inplace
         vectorization     = s.vectorization
-        
+
         contiguous_permutation   = s.contiguous_permutation
         use_diagonal_coordinates = s.use_diagonal_coordinates
 
@@ -386,7 +389,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         group_size  = s.vars['num_groups']
         global_size = s.vars['global_size']
         global_id   = s.vars['global_id']
-        
+
         S = s.vars['shape']
         if is_inplace:
             _in          = self.inout_data
@@ -400,7 +403,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
             _out_strides = self.out_strides
 
         if debug_mode:
-            dbg = [ s.vars['dbg{}'.format(i)] for i in xrange(n_dbg_arrays) ]
+            dbg = [ s.vars['dbg{}'.format(i)] for i in range(n_dbg_arrays) ]
 
         if is_inplace and S.known():
             msg='Permutated shape axis should form an hypercube for inplace transpositions.'
@@ -408,21 +411,21 @@ class TransposeKernelGenerator(KernelCodeGenerator):
             Sp0 = S.value[permutation_axes[0]]
             assert (Sp==Sp0).all(), msg
 
-        tile_size = CodegenVariable(typegen=tg, name='tile_size', ctype='int', 
+        tile_size = CodegenVariable(typegen=tg, name='tile_size', ctype='int',
                 const=True, value=self.tile_size, symbolic_mode=symbolic_mode)
-        tile_padding = CodegenVariable(typegen=tg, name='tile_padding', ctype='int', 
+        tile_padding = CodegenVariable(typegen=tg, name='tile_padding', ctype='int',
                 const=True, value=self.tile_padding, symbolic_mode=symbolic_mode)
         tile_sshape = [tile_size.value]*tdim
         tile_sshape[-1] = '{}+{}'.format(tile_size.value, tile_padding.value)
-        
+
         tile_indexes_extended = tile_indexes + (0,)*(Tdim-tdim)
 
         ntiles = '(({}+{}-1)/{})'.format(S, tile_size, tile_size)
-        ntiles = CodegenVectorClBuiltin('ntiles', 'int', Pdim, tg, const=True, 
+        ntiles = CodegenVectorClBuiltin('ntiles', 'int', Pdim, tg, const=True,
                 init = ntiles)
-        
+
         nwork = '(({}+{}-1)/{})'.format(S[:work_dim], local_size, local_size)
-        nwork = CodegenVectorClBuiltin('nwork', 'int', work_dim, tg, const=True, 
+        nwork = CodegenVectorClBuiltin('nwork', 'int', work_dim, tg, const=True,
                 init = nwork)
 
         idx  = CodegenVectorClBuiltin('idx',  'int', Pdim,  tg)
@@ -430,13 +433,13 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         tidx = CodegenVectorClBuiltin('tidx', 'int', Tdim,  tg)
         lidx = CodegenVectorClBuiltin('lidx', 'int', Tdim,  tg)
         kidx = CodegenVectorClBuiltin('kidx', 'int', WLdim, tg)
-        
+
         tmp = CodegenVectorClBuiltin('tmp', ctype, vectorization, tg)
 
         tile_offset_in, tile_offset_out = '', ''
         local_offset_in, local_offset_out = '', ''
         ki=kj=tdim-1
-        for k in xrange(pdim):
+        for k in range(pdim):
             i = pdim-1-k
             j = pdim-1-axes[k]
             if i==pdim-1:
@@ -452,7 +455,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
                 else:
                     local_offset_in += ' $+ {}*{}'.format(_in_strides[i], lidx[ki])
                 ki-=1
-            
+
             if j in tile_indexes:
                 if kj==tdim-1:
                     local_offset_out = '{}*{}'.format(_out_strides[i], lidx[kj])
@@ -462,25 +465,25 @@ class TransposeKernelGenerator(KernelCodeGenerator):
 
         assert ki==-1
         assert kj==-1
-    
+
         tile_id = ''
         block_id = ''
         loc_id = ''
-        for i in xrange(tdim-1,-1,-1):
+        for i in range(tdim-1,-1,-1):
             if i == tdim-1:
                 tile_id  = '{}'.format(tidx[i])
                 block_id = '{}'.format(bidx[i])
             else:
                 tile_id  = '({}*{}+{})'.format(tile_id,  ntiles[i], tidx[i])
                 block_id = '({}*{}+{})'.format(block_id, ntiles[i], bidx[i])
-        
-        for i in xrange(work_dim-1,-1,-1):
+
+        for i in range(work_dim-1,-1,-1):
             if i == work_dim-1:
                 loc_id = '{}'.format(local_id[i])
             else:
                 loc_id = '({}*{}+{})'.format(loc_id, local_size[i], local_id[i])
 
-        tile_offset_in  = CodegenVariable('tile_offset_in',  'ulong', tg, 
+        tile_offset_in  = CodegenVariable('tile_offset_in',  'ulong', tg,
                 init=tile_offset_in, const=True)
         tile_offset_out = CodegenVariable('tile_offset_out', 'ulong', tg,
                 init=tile_offset_out, const=True)
@@ -488,7 +491,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
                 init=local_offset_in, const=True)
         local_offset_out = CodegenVariable('local_offset_out', 'ulong', tg,
                 init=local_offset_out, const=True)
-       
+
         TID = CodegenVariable('TID', 'int', tg, const=True,
                 init=tile_id)
         BID = CodegenVariable('BID', 'int', tg, const=True,
@@ -498,8 +501,8 @@ class TransposeKernelGenerator(KernelCodeGenerator):
 
         active = CodegenVariable('active', 'bool', tg)
 
-        active_cond = ' && '.join(['({}<{})'.format(idx[i], S[i]) for i in xrange(pdim)
-            if (i < (work_dim- int(contiguous_permutation and (tile_indexes[1]>work_dim-1)))) 
+        active_cond = ' && '.join(['({}<{})'.format(idx[i], S[i]) for i in range(pdim)
+            if (i < (work_dim- int(contiguous_permutation and (tile_indexes[1]>work_dim-1))))
                and (i not in tile_indexes)])
 
         @contextmanager
@@ -508,13 +511,13 @@ class TransposeKernelGenerator(KernelCodeGenerator):
                 if is_tile_index[i]:
                     tid = tile_index_to_id[i]
                     imin = min(i, work_dim-1)
-                    loop = '{i}={ig}; {i}<{N}; {i}+={ng}'.format(i=bidx[tid], 
-                            ig=group_id[imin], N=ntiles[i], 
+                    loop = '{i}={ig}; {i}<{N}; {i}+={ng}'.format(i=bidx[tid],
+                            ig=group_id[imin], N=ntiles[i],
                             ng=group_size[imin])
                     unroll = True
                 elif is_workload_index[i]:
                     wid = wl_index_to_id[i]
-                    loop = '{i}={ig}; {i}<{N}; {i}+={ng}'.format(i=kidx[wid], 
+                    loop = '{i}={ig}; {i}<{N}; {i}+={ng}'.format(i=kidx[wid],
                             ig=group_id[i], ng=group_size[i],
                             N=nwork[i])
                     unroll = True
@@ -529,20 +532,20 @@ class TransposeKernelGenerator(KernelCodeGenerator):
         @contextmanager
         def _tile_iterate(i, tile_idx):
             try:
-                loop = '{var}={lid}; ({var}<{N}) && ({glob}+{var} < {S}); {var}+={L}'.format(i=i, 
-                        var=lidx[i], 
+                loop = '{var}={lid}; ({var}<{N}) && ({glob}+{var} < {S}); {var}+={L}'.format(i=i,
+                        var=lidx[i],
                         glob=idx[tile_idx],
-                        lid=local_id[i], 
-                        L=local_size[i], 
-                        N=tile_size, 
+                        lid=local_id[i],
+                        L=local_size[i],
+                        N=tile_size,
                         S=S[tile_idx])
                 unroll = True
                 with s._for_(loop, unroll=unroll) as ctx:
                     yield ctx
             except:
                 raise
-        
-        block_loops     = [ _block_iterate_(i) for i   in xrange(pdim) ][::-1]
+
+        block_loops     = [ _block_iterate_(i) for i   in range(pdim) ][::-1]
         if is_inplace and contiguous_permutation:
             tile0 = CodegenArray(typegen=tg, name='tile0', ctype=ctype, storage='__local',
                     dim=tdim, shape=tile_sshape)
@@ -555,14 +558,14 @@ class TransposeKernelGenerator(KernelCodeGenerator):
             tile_loops_in1  = [ _tile_iterate(i,j) for i,j in enumerate(tile_indexes[::-1]) ][::-1]
             tile_loops_out1 = [ _tile_iterate(i,j) for i,j in enumerate(tile_indexes) ][::-1]
 
-            tile_in0  = tile0() + ''.join(['[{}]'.format(lidx[i]) 
-                for i in xrange(tdim)][::-1])
-            tile_out0 = tile0() + ''.join(['[{}]'.format(lidx[axes.tolist().index(axes[i])]) 
-                for i in xrange(tdim)])
-            tile_in1 = tile1() + ''.join(['[{}]'.format(lidx[axes.tolist().index(axes[i])]) 
-                for i in xrange(tdim)])
-            tile_out1  = tile1() + ''.join(['[{}]'.format(lidx[i]) 
-                for i in xrange(tdim)][::-1])
+            tile_in0  = tile0() + ''.join(['[{}]'.format(lidx[i])
+                for i in range(tdim)][::-1])
+            tile_out0 = tile0() + ''.join(['[{}]'.format(lidx[axes.tolist().index(axes[i])])
+                for i in range(tdim)])
+            tile_in1 = tile1() + ''.join(['[{}]'.format(lidx[axes.tolist().index(axes[i])])
+                for i in range(tdim)])
+            tile_out1  = tile1() + ''.join(['[{}]'.format(lidx[i])
+                for i in range(tdim)][::-1])
         else:
             tile = CodegenArray(typegen=tg, name='tile', ctype=ctype, storage='__local',
                     dim=tdim, shape=tile_sshape)
@@ -571,12 +574,12 @@ class TransposeKernelGenerator(KernelCodeGenerator):
             tile_loops_in0  = [ _tile_iterate(i,j) for i,j in enumerate(tile_indexes) ][::-1]
             tile_loops_out0 = [ _tile_iterate(i,j) for i,j in enumerate(tile_indexes[::-1]) ][::-1]
 
-            tile_in0  = tile() + ''.join(['[{}]'.format(lidx[i]) 
-                for i in xrange(tdim)][::-1])
-            tile_out0 = tile() + ''.join(['[{}]'.format(lidx[axes.tolist().index(axes[i])]) 
-                for i in xrange(tdim)])
-                
-        
+            tile_in0  = tile() + ''.join(['[{}]'.format(lidx[i])
+                for i in range(tdim)][::-1])
+            tile_out0 = tile() + ''.join(['[{}]'.format(lidx[axes.tolist().index(axes[i])])
+                for i in range(tdim)])
+
+
         #include complex definitions if required
         with s._codeblock_('pragma_extensions'):
             if (ctype == 'cdouble_t'):
@@ -590,7 +593,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
                 tile_padding.declare(al,align=True)
             s.jumpline()
             s.decl_aligned_vars(global_id, local_id, group_id,
-                                global_size, local_size, group_size, 
+                                global_size, local_size, group_size,
                                 const=True)
             ptrs = (_in,)
             if not is_inplace:
@@ -602,7 +605,7 @@ class TransposeKernelGenerator(KernelCodeGenerator):
             s.jumpline()
             if contiguous_permutation:
                 s.decl_vars(*tiles)
-            
+
             s.jumpline()
 
             comment = \
@@ -640,36 +643,36 @@ to prevent memory camping that may occur during global input read or output writ
                         BID.declare(s)
                     TID.declare(s)
                 s.jumpline()
-                
+
                 if wldim:
                     s.comment('Adjust global offset index using the workload index')
-                    code = '{} = {}*{}+{};'.format(idx[workload_indexes], 
+                    code = '{} = {}*{}+{};'.format(idx[workload_indexes],
                             kidx[:wldim],
                             local_size[workload_indexes],
                             local_id[workload_indexes])
                     s.append(code)
                     s.jumpline()
-              
+
                 s.comment('Adjust global offset index using the tile index')
                 code = '{} = {}*{};'.format(idx[tile_indexes], tile_size, tidx[:tdim])
                 s.append(code)
                 s.jumpline()
-                
+
                 s.comment('Determine if this index is active')
                 if is_inplace:
                     assert len(permutation_axes)==2
-                    acond = '({}<={})'.format(idx[permutation_axes[-2]], 
+                    acond = '({}<={})'.format(idx[permutation_axes[-2]],
                             idx[permutation_axes[-1]])
                     if active_cond:
                         active_cond += ' && {}'.format(acond)
-                    else: 
+                    else:
                         active_cond = acond
                 active.affect(s, init=active_cond or 'true')
                 s.jumpline()
 
                 s.comment('idx[0:{}] now identifies a unique input and output tile, workgroup can iterate over it.'.format(pdim))
                 s.decl_aligned_vars(tile_offset_in, tile_offset_out)
-                
+
                 s.decl_aligned_vars(lidx)
                 if contiguous_permutation:
                     with s._if_(active):
@@ -685,11 +688,11 @@ to prevent memory camping that may occur during global input read or output writ
 
                     s.barrier(_local=True)
                     s.jumpline()
-                    
+
                     with s._if_(active):
                         with nested(*tile_loops_out0):
                             local_offset_out.declare(s)
-                            s.append('{} = {};'.format(_out['{}+{}'.format(tile_offset_out, 
+                            s.append('{} = {};'.format(_out['{}+{}'.format(tile_offset_out,
                                 local_offset_out)], tile_out0))
                         if is_inplace:
                             with nested(*tile_loops_out1):
@@ -701,13 +704,13 @@ to prevent memory camping that may occur during global input read or output writ
                     with s._if_(active):
                         with tile_loops_in0[0]:
                             if is_inplace:
-                                    
+
                                 offset_in='{}$+{}'.format(tile_offset_in, lidx[0])
                                 offset_out='{}$+{}'.format(tile_offset_out, lidx[0])
 
                                 tmp_load = self.vload(vectorization, _inout, offset_out)
                                 tmp.affect(s, init=tmp_load)
-                                
+
                                 swap = self.vload(vectorization,  _inout, offset_in, align=True)
                                 swap = self.vstore(vectorization, _inout, offset_out, '$'+swap, jmp=True, align=True)
                                 s.align(swap)
@@ -724,8 +727,8 @@ to prevent memory camping that may occur during global input read or output writ
 if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import _test_typegen
     tg = _test_typegen('float')
-    ek = TransposeKernelGenerator(typegen=tg, 
-            ctype='short', 
+    ek = TransposeKernelGenerator(typegen=tg,
+            ctype='short',
             vectorization=4,
             axes=(2,1,0,4,3),
             tile_size=8, tile_padding=1,
diff --git a/hysop/backend/device/codegen/structs/indices.py b/hysop/backend/device/codegen/structs/indices.py
index dfe81ca3cd108b75189f6306b226ba23c41a02e9..c1c3a54c821c8871b7b90633a98a663188f69a33 100644
--- a/hysop/backend/device/codegen/structs/indices.py
+++ b/hysop/backend/device/codegen/structs/indices.py
@@ -1,5 +1,5 @@
+import numpy as np
 
-from hysop.deps import np
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.misc import upper_pow2_or_3
 from hysop.backend.device.codegen.base.enum_codegen   import EnumCodeGenerator
@@ -8,7 +8,7 @@ from hysop.backend.device.opencl.opencl_types  import OpenClTypeGen
 from hysop.constants import DirectionLabels
 
 class SpaceIndices(StructCodeGenerator):
-    def __init__(self, name, typegen, var_prefix, stype, workdim, 
+    def __init__(self, name, typegen, var_prefix, stype, workdim,
             vsize=1, typedef=True, comments=None):
         assert vsize in [1,2,3,4,8,16]
         stype = stype.replace('fbtype', typegen.fbtype)
@@ -19,35 +19,35 @@ class SpaceIndices(StructCodeGenerator):
             else:
                 typedef = None
         dtype = self.build_dtype(typegen, var_prefix, stype, vsize, workdim)
-        super(SpaceIndices, self).__init__(name=name, 
-                dtype=dtype, 
+        super(SpaceIndices, self).__init__(name=name,
+                dtype=dtype,
                 typegen=typegen,
-                typedef=typedef, 
+                typedef=typedef,
                 comments=comments)
         self.workdim = workdim
         self.vsize = vsize
         self.stype = stype
-    
+
     @staticmethod
     def build_dtype(typegen, var_prefix, stype, vsize, workdim):
         tg     = typegen
         stype1 = tg.dtype_from_str(stype)
         stypen = tg.dtype_from_str('{}{}'.format(stype, vsize))
-        
+
         dtype = []
-        
+
         fn = '{}{}'.format(var_prefix, DirectionLabels[0])
         field = (fn, stypen)
         dtype.append(field)
-        
-        for i in xrange(workdim-2, -1, -1):
+
+        for i in range(workdim-2, -1, -1):
             fn = '{}{}'.format(var_prefix, DirectionLabels[workdim-1-i])
             field = (fn, stype1)
             dtype.append(field)
 
 
         return np.dtype(dtype)
-        
+
     def create(self, *args, **kwds):
         raise NotImplementedError()
 
@@ -63,7 +63,7 @@ class GlobalCoordinates(SpaceIndices):
     def __init__(self, typegen, workdim, **kwds):
         super(GlobalCoordinates, self).__init__(
                 typegen=typegen, workdim=workdim,
-                name='GlobalCoordinates', var_prefix='', stype='fbtype', 
+                name='GlobalCoordinates', var_prefix='', stype='fbtype',
                 **kwds)
 
 class GlobalFieldInfo(StructCodeGenerator):
@@ -71,7 +71,7 @@ class GlobalFieldInfo(StructCodeGenerator):
         gi = GlobalIndices(typegen=typegen, workdim=workdim, vsize=vsize)
         gc = GlobalCoordinates(typegen=typegen, workdim=workdim, vsize=vsize)
         dtype = self.build_dtype(gi, gc)
-        name='GlobalFieldInfo{}D_{}{}_{}{}'.format(workdim, 
+        name='GlobalFieldInfo{}D_{}{}_{}{}'.format(workdim,
                 gi.stype.capitalize(), vsize,
                 gc.stype.capitalize(), vsize)
         if isinstance(typedef, bool):
@@ -80,7 +80,7 @@ class GlobalFieldInfo(StructCodeGenerator):
             else:
                 typedef = None
         super(GlobalFieldInfo, self).__init__(name=name,
-                dtype=dtype, 
+                dtype=dtype,
                 typedef=typedef,
                 typegen=typegen,
                 **kwds)
@@ -88,7 +88,7 @@ class GlobalFieldInfo(StructCodeGenerator):
         self.require(gc.name, gc)
         self.workdim = workdim
         self.vsize = vsize
-    
+
     @staticmethod
     def build_dtype(gi, gc):
         dtype = [('idx', gi.dtype), ('pos', gc.dtype)]
@@ -110,7 +110,7 @@ class GlobalFieldInfos(StructCodeGenerator):
             else:
                 typedef = None
         super(GlobalFieldInfos, self).__init__(name=name,
-                dtype=dtype, 
+                dtype=dtype,
                 typedef=typedef,
                 typegen=typegen,
                 **kwds)
@@ -125,7 +125,7 @@ if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import _test_typegen
 
     tg = _test_typegen('double', float_dump_mode='dec')
-    
+
     idx = GlobalIndices(typegen=tg, workdim=3, vsize=4)
     pos = GlobalCoordinates(typegen=tg, workdim=3, vsize=4)
     gfi = GlobalFieldInfo(typegen=tg, workdim=3, vsize=2)
@@ -136,6 +136,6 @@ if __name__ == '__main__':
     cg.require('pos', pos)
     cg.require('gfi', gfi)
     cg.require('gfis', gfis)
-    
+
     cg.edit()
     cg.test_compile()
diff --git a/hysop/backend/device/codegen/structs/mesh_info.py b/hysop/backend/device/codegen/structs/mesh_info.py
index 87cb9e82117a2743cc5e71673aab5d251d098703..eab6cfd53550f1e92a4199a3165ddfadfa8197d1 100644
--- a/hysop/backend/device/codegen/structs/mesh_info.py
+++ b/hysop/backend/device/codegen/structs/mesh_info.py
@@ -1,5 +1,5 @@
+import numpy as np
 
-from hysop.deps import np
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.misc import upper_pow2_or_3
 from hysop.backend.device.codegen.base.enum_codegen   import EnumCodeGenerator
@@ -20,20 +20,20 @@ class MeshBaseStruct(StructCodeGenerator):
                 typedef = name+'_s'
             else:
                 typedef = None
-        super(MeshBaseStruct,self).__init__(name=name, 
-                dtype=dtype, 
+        super(MeshBaseStruct,self).__init__(name=name,
+                dtype=dtype,
                 typegen=typegen,
-                typedef=typedef, 
+                typedef=typedef,
                 comments=comments)
         self.reqs['boundary_enum'] = BoundaryConditionEnum
         self.vsize = vsize
-    
+
     @staticmethod
     def build_dtype(typegen, vsize):
         tg = typegen
         intn   = tg.dtype_from_str('int{}'.format(vsize))
         floatn = tg.dtype_from_str('fbtype{}'.format(vsize))
-        
+
         dtype = []
         for fn in ['resolution','compute_resolution']:
             field = (fn, intn)
@@ -44,7 +44,7 @@ class MeshBaseStruct(StructCodeGenerator):
 
         dtype.append(('lboundary', intn))
         dtype.append(('rboundary', intn))
-        
+
         comments = [
                 "Resolution of the mesh -including- ghosts",
                 "Resolution of the mesh -excluding- ghosts",
@@ -56,27 +56,27 @@ class MeshBaseStruct(StructCodeGenerator):
         ]
 
         return np.dtype(dtype), comments
-        
-    
-    def create(self, name, 
+
+
+    def create(self, name,
             resolution, compute_resolution, boundaries,
             xmin, xmax, size,
             **kargs):
 
         vsize = self.vsize
         dtype = self.dtype
-         
+
         def extend(var, d=0):
             return np.asarray(tuple(var)+(d,)*(vsize-len(var)))
 
         tg = self.typegen
-        
+
         lboundary, rboundary = boundaries
         lboundary = extend(lboundary, BoundaryCondition.NONE)
         rboundary = extend(rboundary, BoundaryCondition.NONE)
         _lboundary = [ bd() for bd in lboundary ]
         _rboundary = [ bd() for bd in rboundary ]
-        
+
         mesh_base_vals = {
                 'resolution':         tg.make_intn(resolution,vsize),
                 'compute_resolution': tg.make_intn(compute_resolution,vsize),
@@ -86,7 +86,7 @@ class MeshBaseStruct(StructCodeGenerator):
                 'xmax': tg.make_floatn(xmax,vsize),
                 'size': tg.make_floatn(size,vsize)
             }
-        
+
         var = np.empty(shape=(1,), dtype=dtype)
         for k in mesh_base_vals:
             var[k] = mesh_base_vals[k]
@@ -111,7 +111,7 @@ class MeshBaseStruct(StructCodeGenerator):
 
 class MeshInfoStruct(StructCodeGenerator):
     def __init__(self, typegen, vsize,
-            typedef=True, 
+            typedef=True,
             mbs_typedef=None):
         assert vsize in [1,2,3,4,8,16]
         name  = '{}MeshInfo{}D'.format(typegen.fbtype[0], vsize)
@@ -124,10 +124,10 @@ class MeshInfoStruct(StructCodeGenerator):
                 typedef = None
         dtype,comments,ctype_overrides, reqs = MeshInfoStruct.build_dtype(typegen, vsize, mbs_typedef=mbs_typedef)
         super(MeshInfoStruct,self).__init__(name=name, dtype=dtype, typegen=typegen,
-                typedef=typedef, 
+                typedef=typedef,
                 comments=comments,
                 ctype_overrides=ctype_overrides)
-        
+
         for req in reqs:
             self.require(req.name, req)
         self.mesh_base = reqs[0]
@@ -142,8 +142,8 @@ class MeshInfoStruct(StructCodeGenerator):
 
         i=0
         dtypes = []
-        def _append(dtype): 
-            dtypes.append(dtype) 
+        def _append(dtype):
+            dtypes.append(dtype)
 
         _append(('dim',np.int32) )
         i+=1
@@ -161,21 +161,21 @@ class MeshInfoStruct(StructCodeGenerator):
 
         comments = [
                 "Dimension of the mesh",
-                "Index of the first local compute point in the global grid",   
-                "Index of the last  local compute point in the global grid",   
+                "Index of the first local compute point in the global grid",
+                "Index of the last  local compute point in the global grid",
                 "Number of ghosts in each direction",
                 "Space discretization",
                 "1/dx",
                 "Local mesh",
                 "Global mesh",
                 ]
-        
+
         return dtypes, comments, None, [mesh_base]
 
     def create(self, name, dim,
             start, stop, ghosts,
             dx, local_mesh, global_mesh, **kargs):
-        
+
         vsize = self.vsize
         if dim>vsize:
             msg='Dim should be less or equal to {}, got dim={}.'.format(vsize, dim)
@@ -185,8 +185,8 @@ class MeshInfoStruct(StructCodeGenerator):
         vsize = self.vsize
 
         dtype = self.dtype
-        
-        dx = np.asarray(dx) 
+
+        dx = np.asarray(dx)
         mesh_info_vals = {
                 'dim'        : tg.make_intn(dim,1),
                 'start'      : tg.make_intn(start, vsize),
@@ -197,7 +197,7 @@ class MeshInfoStruct(StructCodeGenerator):
                 'local_mesh' : local_mesh[0],
                 'global_mesh': global_mesh[0]
             }
-        
+
         def extend(var,d=0):
             if isinstance(var, np.ndarray):
                 _var = np.full(shape=(vsize,), fill_value=d, dtype=var.dtype)
@@ -228,19 +228,19 @@ class MeshInfoStruct(StructCodeGenerator):
         for k in mesh_info_vals:
             var[k] = mesh_info_vals[k]
         return (var, cg_var)
-    
+
     @staticmethod
     def create_from_mesh(name, typegen, mesh, **kargs):
          from hysop.mesh.cartesian_mesh import CartesianMeshView
          check_instance(name, str)
-         check_instance(mesh, CartesianMeshView) 
+         check_instance(mesh, CartesianMeshView)
          check_instance(typegen, OpenClTypeGen)
-            
+
          tg = typegen
-         
+
          dim   = mesh.dim
          vsize = upper_pow2_or_3(dim)
-        
+
          btd = '{}MeshBase{}D_s'.format(tg.fbtype[0], vsize)
          itd = '{}MeshInfo{}D_s'.format(tg.fbtype[0], vsize)
          mesh_base = MeshBaseStruct(tg, vsize, typedef=btd)
@@ -257,7 +257,7 @@ class MeshInfoStruct(StructCodeGenerator):
          gxmax       = mesh.global_end[::-1]
          gsize       = mesh.global_length[::-1]
          gboundaries = mesh.global_boundaries[::-1]
-            
+
          lresolution         = mesh.local_resolution[::-1]
          lcompute_resolution = mesh.compute_resolution[::-1]
          lxmin = mesh.local_origin[::-1]
@@ -278,7 +278,7 @@ class MeshInfoStruct(StructCodeGenerator):
                  dx, lmesh, gmesh, **kargs)
 
          return (var, cg_var)
-   
+
     def build_codegen_variable(self,name,var_overrides=None,**kargs):
         tg = self.typegen
         if var_overrides is None:
@@ -297,17 +297,17 @@ if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import _test_typegen
 
     tg = _test_typegen('double', float_dump_mode='dec')
-    
+
     vsize = 4
     mbs = MeshBaseStruct(tg, typedef='{}MeshBase{}D_s'.format(tg.fbtype[0], vsize), vsize=vsize)
     mis = MeshInfoStruct(tg, typedef='{}MeshInfo{}D_s'.format(tg.fbtype[0], vsize), mbs_typedef=mbs.typedef, vsize=vsize)
 
     cg = OpenClCodeGenerator('test_generator',tg)
-    
+
     # declare mesh MeshInfoStruct and its dependancies (MeshBaseStruct,MeshDirectionEnum,TranspositionStateEnum)
     cg.require('mis',mis)
-    
-    # create a local numpy and a codegen MeshInfoStruct variable 
+
+    # create a local numpy and a codegen MeshInfoStruct variable
     local_mesh = mbs.create('local',
             (10,10,10,), (8,8,8,), ((BoundaryCondition.PERIODIC,)*3,)*2,
             (0,0,0,), (1,1,1,),
@@ -317,11 +317,11 @@ if __name__ == '__main__':
             (100,100,100,), (80,80,80,), ((BoundaryCondition.PERIODIC,)*3,)*2,
             (0,0,0,), (10,10,10,),
             (10,10,10,))
-    
-    (np_mis, cg_mis) = mis.create('mesh_info', 3, 
+
+    (np_mis, cg_mis) = mis.create('mesh_info', 3,
             (0,0,0,), (1024,1024,1024,), (1,1,1,),
             (0.1,0.2,0.3),
-            local_mesh, global_mesh, 
+            local_mesh, global_mesh,
             storage='__constant')
 
     # declare and intialize the nested struct in the __constant address space
diff --git a/hysop/backend/device/codegen/symbolic/cast.py b/hysop/backend/device/codegen/symbolic/cast.py
index 486de4bd69a2c1df7338d8cd4530dfcb03a13c9b..ff9f19f472c2de262eac0b30789345f3e5df8161 100644
--- a/hysop/backend/device/codegen/symbolic/cast.py
+++ b/hysop/backend/device/codegen/symbolic/cast.py
@@ -13,13 +13,13 @@ class OpenClCastUtils(object):
     # scalar ranks defined as Usual Arithmetic Conversions
     # http://www.informit.com/articles/article.aspx?p=1732873&seqNum=6
     # and section 6.3.1.8 of the C99 specification.
-    __type_ranks = [npw.float64, npw.float32, npw.float16, 
+    __type_ranks = [npw.float64, npw.float32, npw.float16,
              npw.uint64, npw.int64,
              npw.uint32, npw.int32,
              npw.uint16, npw.int16,
              npw.uint8,  npw.int8,
              npw.bool_]
-    type_ranks = dict(zip(__type_ranks, xrange(len(__type_ranks)-1, -1, -1)))
+    type_ranks = dict(zip(__type_ranks, range(len(__type_ranks)-1, -1, -1)))
 
     ## OPENCL IMPLICIT CONVERSION RULES
     # scalar op scalar => scalar promotion to higher rank type, nothing to do
@@ -51,7 +51,7 @@ class OpenClCastUtils(object):
         if not vector_types:
             assert (max_scalar_type is not None)
             return exprs, max_scalar_type, max_scalar_type
-        
+
         vcomponents = npw.asarray(vector_components)
         vcomponents = vcomponents[vcomponents>1]
         if (not broadcast_args) and (vcomponents.size>0):
@@ -66,7 +66,7 @@ class OpenClCastUtils(object):
         max_vector_components = max(vector_components)
         vector_btype = vector_types[vector_ranks.index(max_vector_rank)]
         vector_vtype = '{}{}'.format(vector_btype, max_vector_components)
-        
+
         promoted_exprs = ()
         for expr in exprs:
             # cast all vectors to max rank and broadcast if required
@@ -89,7 +89,7 @@ class OpenClCastUtils(object):
             promoted_exprs += (expr,)
 
         return promoted_exprs, vector_vtype, vector_btype
-    
+
     @classmethod
     def promote_expressions_to_float(cls, exprs):
         dtypes = tuple( ctype_to_dtype(e.btype) for e in exprs )
@@ -101,7 +101,7 @@ class OpenClCastUtils(object):
             float_dtype = npw.float64
         fbtype = dtype_to_ctype(float_dtype)
 
-        promoted = tuple(cls.promote_basetype_to(e, fbtype) 
+        promoted = tuple(cls.promote_basetype_to(e, fbtype)
                             for e in exprs)
         return promoted, fbtype
 
@@ -121,7 +121,7 @@ class OpenClCastUtils(object):
                 raise RuntimeError(msg)
             return OpenClCast(target_type, expr)
         return OpenClConvert(target_type, expr)
-    
+
     @classmethod
     def promote_basetype_to(cls, expr, btype):
         ebase, ecomponents = expr.btype, expr.components
@@ -129,9 +129,9 @@ class OpenClCastUtils(object):
             return expr
         target_type = vtype(btype, ecomponents)
         return OpenClConvert(target_type, expr)
-    
+
     @classmethod
-    def promote_expressions_to_required_signature(cls, exprs, signature, ret, 
+    def promote_expressions_to_required_signature(cls, exprs, signature, ret,
                 expand=None):
         exprs = to_tuple(exprs)
         signature = to_tuple(signature)
@@ -144,7 +144,7 @@ class OpenClCastUtils(object):
 
         dtypes     = tuple( e.dtype for e in exprs )
         components = tuple( e.components for e in exprs )
-        
+
         common_dtype = find_common_dtype(*dtypes)
         n = max(components)
 
@@ -164,7 +164,7 @@ class OpenClCastUtils(object):
             btype = dtype_to_ctype(dtype)
             ctype = e.vtype(btype, ecomponents)
             vtype = e.vtype(btype, n)
-            
+
             if (ecomponents>1):
                 assert (n % ecomponents == 0)
                 broadcast_factor = (n // ecomponents)
@@ -208,7 +208,7 @@ class OpenClCastUtils(object):
 
         return exprs, ctype
 
-    
+
     @classmethod
     def promote_expressions_to_float(cls, exprs):
         dtypes = tuple( ctype_to_dtype(e.btype) for e in exprs )
@@ -220,6 +220,6 @@ class OpenClCastUtils(object):
             float_dtype = npw.float64
         fbtype = dtype_to_ctype(float_dtype)
 
-        promoted = tuple(cls.promote_basetype_to(e, fbtype) 
+        promoted = tuple(cls.promote_basetype_to(e, fbtype)
                             for e in exprs)
         return promoted, fbtype
diff --git a/hysop/backend/device/codegen/symbolic/expr.py b/hysop/backend/device/codegen/symbolic/expr.py
index c8c86e978e51f00a7557e8823b50a3151e761f00..d6dca3eb0b79eaffca0f1467c92046e931809f85 100644
--- a/hysop/backend/device/codegen/symbolic/expr.py
+++ b/hysop/backend/device/codegen/symbolic/expr.py
@@ -1,17 +1,23 @@
+from hysop.backend.device.opencl.opencl_types import basetype as cl_basetype, \
+    components as cl_components, \
+    vtype as cl_vtype
+from hysop.backend.device.codegen.base.variables import ctype_to_dtype
 import sympy as sm
 from hysop.symbolic import Symbol, Expr
 from hysop.symbolic.array import OpenClSymbolicBuffer, OpenClSymbolicNdBuffer
 from hysop.tools.types import check_instance, first_not_None, to_tuple, to_list
 from hysop.tools.numerics import is_fp, is_signed, is_unsigned, is_integer, is_complex
-from sympy.printing.ccode import C99CodePrinter
 
-from hysop.backend.device.codegen.base.variables import ctype_to_dtype
-from hysop.backend.device.opencl.opencl_types import basetype as cl_basetype, \
-                                                     components as cl_components, \
-                                                     vtype as cl_vtype
+from packaging import version
+if version.parse(sm.__version__) > version.parse("1.7"):
+    from sympy.printing.c import C99CodePrinter
+else:
+    from sympy.printing.ccode import C99CodePrinter
+
 
 InstructionTermination = ''
 
+
 class TypedI(object):
     def __new__(cls, *args, **kwds):
         positive = kwds.pop('positive', None)
@@ -22,15 +28,15 @@ class TypedI(object):
     @classmethod
     def vtype(cls, btype, n):
         return cl_vtype(btype, n)
-    
+
     @property
     def btype(self):
         return cl_basetype(self.ctype)
-    
+
     @property
     def basetype(self):
         return self.btype
-    
+
     @property
     def components(self):
         return cl_components(self.ctype)
@@ -42,33 +48,35 @@ class TypedI(object):
     @property
     def is_signed(self):
         return is_signed(self.dtype)
-    
+
     @property
     def is_unsigned(self):
         return is_unsigned(self.dtype)
-    
+
     @property
     def is_integer(self):
         return is_integer(self.dtype)
-    
+
     @property
     def is_fp(self):
         return is_fp(self.dtype)
-    
+
     @property
     def is_complex(self):
         raise NotImplementedError()
-    
+
     @property
     def is_positive(self):
         return first_not_None(self._positive, self.is_unsigned)
 
+
 class TypedSymbol(TypedI, Symbol):
     def __new__(cls, ctype, **kwds):
         obj = super(TypedSymbol, cls).__new__(cls, **kwds)
         obj.ctype = ctype
         return obj
 
+
 class TypedExpr(TypedI, Expr):
     def __new__(cls, ctype, *args):
         try:
@@ -79,13 +87,16 @@ class TypedExpr(TypedI, Expr):
         obj.ctype = ctype
         return obj
 
+
 class TypedExprWrapper(TypedExpr):
     def __new__(cls, ctype, expr):
-            obj = super(TypedExprWrapper, cls).__new__(cls, ctype, expr)
-            obj.expr = expr
-            return obj
+        obj = super(TypedExprWrapper, cls).__new__(cls, ctype, expr)
+        obj.expr = expr
+        return obj
+
     def _ccode(self, printer):
-       return printer._print(self.expr)
+        return printer._print(self.expr)
+
 
 class OpenClConvert(TypedExpr):
     def __new__(cls, ctype, expr):
@@ -98,6 +109,7 @@ class OpenClConvert(TypedExpr):
         cast = 'convert_{}({})'.format(self.ctype, val)
         return cast
 
+
 class OpenClCast(TypedExpr):
     def __new__(cls, ctype, expr):
         obj = super(OpenClCast, cls).__new__(cls, ctype, expr)
@@ -107,7 +119,8 @@ class OpenClCast(TypedExpr):
     def _ccode(self, printer):
         expr = printer._print(self.expr)
         cast = '({})({})'.format(self.ctype, expr)
-        return cast 
+        return cast
+
 
 class OpenClBool(TypedExpr):
     """
@@ -117,59 +130,69 @@ class OpenClBool(TypedExpr):
     """
     def __new__(cls, expr):
         assert expr.ctype in ('short', 'int', 'long'), ctype
-        ctype='char' # force lowest integer rank (to force promotion later)
+        ctype = 'char'  # force lowest integer rank (to force promotion later)
         obj = super(OpenClBool, cls).__new__(cls, ctype, expr)
         obj.expr = expr
         return obj
+
     def _ccode(self, printer):
         # negate scalar boolean to set all bits to 1 (unsigned -1 sets all bits)
         #                                            (unsigned 0 has not bit set)
         expr = printer._print(self.expr)
-        
-        # pre-promote result to maximal rank just in case opencl 
+
+        # pre-promote result to maximal rank just in case opencl
         # implementation or runtime fails to yield good type or if
         # further promotion is needed after.
         s = '(-({}))'.format(expr)
-        
+
         # this breaks conditionals if further promotion is needed
         #s = '(u{})({})'.format(self.ctype, s)
 
         return s
 
+
 class Return(Expr):
     def __new__(cls, expr):
         obj = super(Return, cls).__new__(cls, expr)
         obj.expr = expr
         return obj
+
     def _ccode(self, printer):
         expr = printer._print(self.expr)
         code = 'return {};'.format(expr)
         ret = printer.codegen.append(code)
         return InstructionTermination
 
+
 class NumericalConstant(TypedExpr):
     def __new__(cls, ctype, value):
         obj = super(NumericalConstant, cls).__new__(cls, ctype, value)
-        obj.value=value
+        obj.value = value
         return obj
 
     def _ccode(self, printer):
         return printer.typegen.dump(self.value)
-    
+
     @classmethod
     def build(cls, val, typegen):
         ctype = typegen.dumped_type(val)
         return cls(ctype, val)
 
+
 class IntegerConstant(NumericalConstant):
     pass
+
+
 class FloatingPointConstant(NumericalConstant):
     pass
+
+
 class ComplexFloatingPointConstant(NumericalConstant):
     def _ccode(self, printer):
-        return '(({})({}, {}))'.format(self.ctype, 
-                printer.typegen.dump(self.value.real),
-                printer.typegen.dump(self.value.imag))
+        return '(({})({}, {}))'.format(self.ctype,
+                                       printer.typegen.dump(self.value.real),
+                                       printer.typegen.dump(self.value.imag))
+
 
 class OpenClVariable(TypedExpr):
     def __new__(cls, ctype, var, *args):
@@ -180,8 +203,9 @@ class OpenClVariable(TypedExpr):
     def _ccode(self, printer):
         return self.var()
 
+
 class OpenClIndexedVariable(OpenClVariable):
-    def __new__(cls, ctype, var, index): 
+    def __new__(cls, ctype, var, index):
         try:
             dim = index.var.dim
             components = cl_components(ctype)
@@ -192,7 +216,7 @@ class OpenClIndexedVariable(OpenClVariable):
         obj.index = index
         obj.dim = dim
         return obj
-    
+
     def _ccode(self, printer):
         if not isinstance(self.var, (OpenClSymbolicBuffer, OpenClSymbolicNdBuffer)):
             try:
@@ -201,13 +225,14 @@ class OpenClIndexedVariable(OpenClVariable):
                 pass
 
         var = printer._print(self.var)
-        if (self.dim>1):
-            vals = ', '.join('{}[{}]'.format(var, self.index.var[i]) for i in xrange(self.dim))
+        if (self.dim > 1):
+            vals = ', '.join('{}[{}]'.format(var, self.index.var[i]) for i in range(self.dim))
             return '({})({})'.format(self.ctype, vals)
         else:
             index = printer._print(self.index)
             return '{}[{}]'.format(var, index)
 
+
 class OpenClAssignment(TypedExpr):
     def __new__(cls, ctype, var, op, rhs):
         obj = super(OpenClAssignment, cls).__new__(cls, ctype, var, op, rhs)
@@ -215,6 +240,7 @@ class OpenClAssignment(TypedExpr):
         obj.op = op
         obj.rhs = rhs
         return obj
+
     def _ccode(self, printer):
         var = printer._print(self.var)
         rhs = printer._print(self.rhs)
@@ -222,19 +248,21 @@ class OpenClAssignment(TypedExpr):
         printer.codegen.append(code)
         return InstructionTermination
 
+
 class FunctionCall(TypedExpr):
     def __new__(cls, ctype, fn, fn_kwds):
         obj = super(FunctionCall, cls).__new__(cls, ctype, fn, fn_kwds)
         obj.fn = fn
         obj.fn_kwds = fn_kwds
         return obj
-    
+
     def _ccode(self, printer):
         return self.fn(**self.fn_kwds)
-    
+
     def _sympystr(self, printer):
         return 'FunctionCall({})'.format(self.fn.name)
 
+
 class VStore(Expr):
     def __new__(cls, ptr, offset, data, n=1, **opts):
         obj = super(VStore, cls).__new__(cls, ptr, offset, data, n)
@@ -244,13 +272,14 @@ class VStore(Expr):
         obj.n = n
         obj.opts = opts
         return obj
-    
+
     def _ccode(self, printer):
-        code = printer.codegen.vstore(n=self.n, ptr=self.ptr, 
-                offset=self.offset, data=self.data, **self.opts)
+        code = printer.codegen.vstore(n=self.n, ptr=self.ptr,
+                                      offset=self.offset, data=self.data, **self.opts)
         printer.codegen.append(code)
         return InstructionTermination
 
+
 class VStoreIf(VStore):
     def __new__(cls, cond, scalar_cond, ptr, offset, data, n, **opts):
         obj = super(VStoreIf, cls).__new__(cls, ptr, offset, data, n)
@@ -260,10 +289,10 @@ class VStoreIf(VStore):
         return obj
 
     def _ccode(self, printer):
-        printer.codegen.vstore_if(cond=self.cond, 
-                scalar_cond=self.scalar_cond,
-                n=self.n, ptr=self.ptr, 
-                offset=self.offset, data=self.data, **self.opts)
+        printer.codegen.vstore_if(cond=self.cond,
+                                  scalar_cond=self.scalar_cond,
+                                  n=self.n, ptr=self.ptr,
+                                  offset=self.offset, data=self.data, **self.opts)
         return InstructionTermination
 
 
@@ -276,16 +305,17 @@ class VLoad(TypedExpr):
         obj.n = n
         obj.opts = opts
         return obj
-        
+
     def _ccode(self, printer):
-        vload = printer.codegen.vload(n=self.n, ptr=self.ptr, 
-                offset=self.offset, **self.opts)
+        vload = printer.codegen.vload(n=self.n, ptr=self.ptr,
+                                      offset=self.offset, **self.opts)
         if self.dst:
             self.dst.affect(printer.codegen, vload)
             return InstructionTermination
         else:
             return vload
 
+
 class VLoadIf(VLoad):
     def __new__(cls, cond, scalar_cond, ptr, offset, dst, n, default_value, **opts):
         obj = super(VLoadIf, cls).__new__(cls, ptr, offset, dst, n)
@@ -296,21 +326,22 @@ class VLoadIf(VLoad):
         return obj
 
     def _ccode(self, printer):
-        printer.codegen.vload_if(cond=self.cond, 
-                scalar_cond=self.scalar_cond,
-                n=self.n, ptr=self.ptr, 
-                offset=self.offset, dst=self.dst, 
-                default_value=self.default_value, **self.opts)
+        printer.codegen.vload_if(cond=self.cond,
+                                 scalar_cond=self.scalar_cond,
+                                 n=self.n, ptr=self.ptr,
+                                 offset=self.offset, dst=self.dst,
+                                 default_value=self.default_value, **self.opts)
         return InstructionTermination
 
+
 class IfElse(Expr):
     def __new__(cls, conditions, all_exprs, else_exprs=None):
-        conditions  = to_tuple(conditions)
-        all_exprs   = to_list(all_exprs)
-        else_exprs  = to_list(else_exprs) if (else_exprs is not None) else None
-        assert len(all_exprs)>=1
+        conditions = to_tuple(conditions)
+        all_exprs = to_list(all_exprs)
+        else_exprs = to_list(else_exprs) if (else_exprs is not None) else None
+        assert len(all_exprs) >= 1
         if not isinstance(all_exprs[0], list):
-            assert len(conditions)==1
+            assert len(conditions) == 1
             all_exprs = [all_exprs]
         assert len(conditions) == len(all_exprs) >= 1
         obj = super(IfElse, cls).__new__(cls, conditions, all_exprs, else_exprs)
@@ -331,6 +362,7 @@ class IfElse(Expr):
                     printer._print(e)
         return InstructionTermination
 
+
 class UpdateVars(Expr):
     def __new__(cls, srcs, dsts, ghosts):
         obj = super(UpdateVars, cls).__new__(cls, srcs, dsts, ghosts)
@@ -341,18 +373,18 @@ class UpdateVars(Expr):
         return obj
 
     def init(self, srcs, dsts, ghosts):
-        assert len(srcs)==len(dsts)
-        private_stores  = ()
+        assert len(srcs) == len(dsts)
+        private_stores = ()
         local_stores = ()
         for (src, dst, ghost) in zip(srcs, dsts, ghosts):
             assert not src.is_ptr
             if dst.is_ptr:
-                assert dst.storage=='__local'
+                assert dst.storage == '__local'
                 local_stores += ((src, dst, ghost),)
             else:
                 private_stores += ((src, dst),)
         self.private_stores = private_stores
-        self.local_stores   = local_stores
+        self.local_stores = local_stores
 
     def _ccode(self, printer):
         codegen = printer.codegen
@@ -368,18 +400,18 @@ class UpdateVars(Expr):
                 for (src, dst) in self.private_stores:
                     dst.affect(al, init=src, align=True)
         if self.local_stores:
-            srcs    = map(lambda x: x[0], self.local_stores)
-            ptrs    = map(lambda x: x[1], self.local_stores)
-            offsets = map(lambda x: x[2], self.local_stores)
-            codegen.multi_vstore_if(csc.is_last_active, 
-                    lambda i: '{}+{} < {}'.format(csc.full_offset, i, csc.compute_grid_size[0]),
-                    csc.vectorization, csc.local_offset,
-                    srcs, ptrs, 
-                    extra_offsets=offsets,
-                    use_short_circuit=csc.use_short_circuit,
-                    else_cond=csc.is_active)
+            srcs = tuple(map(lambda x: x[0], self.local_stores))
+            ptrs = tuple(map(lambda x: x[1], self.local_stores))
+            offsets = tuple(map(lambda x: x[2], self.local_stores))
+            codegen.multi_vstore_if(csc.is_last_active,
+                                    lambda i: '{}+{} < {}'.format(csc.full_offset, i, csc.compute_grid_size[0]),
+                                    csc.vectorization, csc.local_offset,
+                                    srcs, ptrs,
+                                    extra_offsets=offsets,
+                                    use_short_circuit=csc.use_short_circuit,
+                                    else_cond=csc.is_active)
             codegen.barrier(_local=True)
-            
+
         return InstructionTermination
 
 
@@ -389,10 +421,11 @@ class BuiltinFunctionCall(TypedExpr):
         obj.fname = fname
         obj.fargs = fargs
         return obj
-    
+
     def _ccode(self, printer):
         return '{}({})'.format(self.fname, ', '.join(printer._print(arg) for arg in self.fargs))
 
+
 class BuiltinFunction(object):
     def __new__(cls, fname):
         obj = super(BuiltinFunction, cls).__new__(cls)
@@ -423,11 +456,10 @@ class OpenClPrinter(C99CodePrinter):
 
     def doprint(self, expr, terminate=True):
         res = super(OpenClPrinter, self).doprint(expr)
-        if terminate and (res != InstructionTermination): 
-            msg='OpenClPrinter failed to generate code for the following expression:\n'
-            msg+='  {}\n'.format(expr)
-            msg+='Returned value was:\n  {}\n'.format(res)
+        if terminate and (res != InstructionTermination):
+            msg = 'OpenClPrinter failed to generate code for the following expression:\n'
+            msg += '  {}\n'.format(expr)
+            msg += 'Returned value was:\n  {}\n'.format(res)
             raise RuntimeError(msg)
         if not terminate:
             return res
-
diff --git a/hysop/backend/device/codegen/symbolic/functions/apply_stencil.py b/hysop/backend/device/codegen/symbolic/functions/apply_stencil.py
index 6291d7fcb41b66f4f25272da00f40e6b4a621305..2ae48f967a11c2be79014f87e7a669f5a09858df 100644
--- a/hysop/backend/device/codegen/symbolic/functions/apply_stencil.py
+++ b/hysop/backend/device/codegen/symbolic/functions/apply_stencil.py
@@ -1,5 +1,5 @@
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types import check_instance, first_not_None
 
@@ -18,9 +18,9 @@ from hysop.backend.device.codegen.symbolic.relational import OpenClMul, OpenClAd
 class CustomApplyStencilFunction(CustomSymbolicFunction):
     def __init__(self, csc, name, expr, target_ctype=None, **kwds):
         check_instance(expr, ApplyStencil)
-        super(CustomApplyStencilFunction, self).__init__(csc=csc, name=name, 
+        super(CustomApplyStencilFunction, self).__init__(csc=csc, name=name,
                 expr=expr, target_ctype=target_ctype, **kwds)
-    
+
     def parse_expr(self, csc, name, expr, args, reqs):
         if isinstance(expr, ApplyStencil):
             stencil = expr.stencil
@@ -29,7 +29,7 @@ class CustomApplyStencilFunction(CustomSymbolicFunction):
             def fn_call(i, fn_kwds):
                 _fn_kwds = fn_kwds.copy()
                 _fn_kwds['offset'] = '{}+{}'.format(fn_kwds['offset'], csc.typegen.dump(int(i)))
-                return FunctionCall(fn.ctype, fn, _fn_kwds) 
+                return FunctionCall(fn.ctype, fn, _fn_kwds)
             factor = self.parse_expr(csc, name, stencil.factor, args, reqs)
             pexprs = ()
             for (i, coeff) in stencil.iteritems(include_factor=False):
diff --git a/hysop/backend/device/codegen/symbolic/functions/custom_symbolic_function.py b/hysop/backend/device/codegen/symbolic/functions/custom_symbolic_function.py
index f5db424fbce9e08c3f1283e6eb07798e11b5fb7d..77bc17ca3ff26f45ed288202befa3d4291bce175 100644
--- a/hysop/backend/device/codegen/symbolic/functions/custom_symbolic_function.py
+++ b/hysop/backend/device/codegen/symbolic/functions/custom_symbolic_function.py
@@ -1,5 +1,5 @@
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.numerics import is_fp, is_signed, is_unsigned, get_dtype, is_complex
@@ -42,7 +42,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
     @classmethod
     def array_name(cls, array):
         return cls.varname(array.varname)
-    
+
     @classmethod
     def default_out_of_bounds_value(cls, dtype):
         if is_complex(dtype):
@@ -50,7 +50,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
                 return '(float2)(NAN, NAN)'
             elif (dtype is npw.complex128):
                 return '(double2)(NAN, NAN)'
-            else: 
+            else:
                 msg='Unsupported complex dtype {}.'.format(dtype)
                 raise NotImplementedError(msg)
         elif is_fp(dtype):
@@ -58,7 +58,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             dval = 'NAN'
         elif is_unsigned(dtype):
             # uint max
-            dval = '0x'+ ''.join(('ff',)*npw.dtype(dtype).itemsize) 
+            dval = '0x'+ ''.join(('ff',)*npw.dtype(dtype).itemsize)
         elif is_signed(dtype):
             # int max
             dval = '0x7f' + ''.join(('ff',)*(npw.dtype(dtype).itemsize-1))
@@ -66,7 +66,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             msg='Dtype {} is not signed, unsigned or floating point...'.format(dtype)
             raise TypeError(msg)
         return dval
-    
+
     def __init__(self, csc, name, expr, target_ctype=None, inline=True, known_args=None):
         from hysop.backend.device.codegen.kernels.custom_symbolic import SymbolicCodegenContext
         check_instance(csc, SymbolicCodegenContext)
@@ -79,29 +79,29 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
         self.stencils = {}
         self.__fn_counter = 0
         self.known_args = known_args
-        
+
         pexpr = self.parse_expr(csc, name, expr, args, reqs)
-            
+
         if (target_ctype is not None) and (pexpr.ctype != target_ctype):
             pexpr = OpenClCastUtils.promote_to(pexpr, target_ctype)
 
         self.is_fcall = isinstance(pexpr, FunctionCall)
         self.pexpr = pexpr
 
-        super(CustomSymbolicFunction,self).__init__(basename=name, output=pexpr.ctype, args=args, 
+        super(CustomSymbolicFunction,self).__init__(basename=name, output=pexpr.ctype, args=args,
                 typegen=csc.typegen, inline=inline, known_args=known_args)
-        
+
 
         self.update_requirements(reqs)
-        
+
         self.gencode(csc, pexpr)
         self.ctype = pexpr.ctype
-    
+
     @classmethod
     def fmt_args(cls, args):
         new_args = ArgDict()
         argsmap = {}
-        for i, (argname, arg) in enumerate(args.iteritems()):
+        for i, (argname, arg) in enumerate(args.items()):
             new_argname = 'a{}'.format(i)
             new_arg = arg.newvar(name=new_argname)
             new_args[new_argname] = new_arg
@@ -110,16 +110,16 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
 
     def fmt_kwds(self, kwds):
         new_kwds = {}
-        for vname, val in kwds.iteritems():
+        for vname, val in kwds.items():
             new_kwds[self.argsmap[vname]] = val
         return new_kwds
-    
+
     def get_stencil_fn(self, csc, expr):
         from hysop.backend.device.codegen.symbolic.functions.apply_stencil import CustomApplyStencilFunction
         stencil = expr.stencil
         dummy_stencil_fn = CustomApplyStencilFunction(csc=csc, name='dummy', expr=expr, known_args=self.known_args)
         _hash = hash(str(dummy_stencil_fn))
-        if _hash in self.stencils: 
+        if _hash in self.stencils:
             stencil_fn = self.stencils[_hash]
         else:
             stencil_name = 'stencil_{}'.format(len(self.stencils))
@@ -134,9 +134,9 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
         fn_kwds = fn.args.copy()
         top_fn = fn
         if kwds:
-            s_fn_kwds = {k: '({})'.format(v) for (k,v) in fn_kwds.iteritems()}
+            s_fn_kwds = {k: '({})'.format(v) for (k,v) in fn_kwds.items()}
             fn_kwds = {}
-            for (argname, argval) in fcall.fn_kwds.iteritems():
+            for (argname, argval) in fcall.fn_kwds.items():
                 argval = str(argval)
                 for (an,av) in sorted(s_fn_kwds.items(), key=lambda x: len(x[0])):
                     argval.replace(an, av)
@@ -145,9 +145,9 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             fcall   = fn.pexpr
             ctype   = fn.ctype
             fn      = fcall.fn
-            s_fn_kwds = {k: '({})'.format(v) for (k,v) in fn_kwds.iteritems()}
+            s_fn_kwds = {k: '({})'.format(v) for (k,v) in fn_kwds.items()}
             fn_kwds = {}
-            for (argname, argval) in fcall.fn_kwds.iteritems():
+            for (argname, argval) in fcall.fn_kwds.items():
                 argval = str(argval)
                 for (an,av) in sorted(s_fn_kwds.items(), key=lambda x: len(x[0])):
                     argval.replace(an, av)
@@ -158,10 +158,10 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
         else:
             self.check_and_set(reqs, fn.name, fn)
             self.__fn_counter += 1
-        for argname, arg in top_fn.args.iteritems():
+        for argname, arg in top_fn.args.items():
             self.check_and_set(args, argname, arg)
         return fn, fn_kwds
-            
+
     @classmethod
     def check_and_set(cls, dic, key, value):
         if (key in dic):
@@ -178,7 +178,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
 
     def parse_expr(self, csc, name, expr, args, reqs):
         pexpr = None
-        if isinstance(expr, (int,long,sm.Integer)):
+        if isinstance(expr, (int,sm.Integer)):
             pexpr = IntegerConstant('int', expr)
         elif isinstance(expr, (float, sm.Rational, sm.Float)):
             pexpr = FloatingPointConstant(csc.typegen.fbtype, expr)
@@ -198,7 +198,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             ctype = expr.parameter.ctype
             param = expr.parameter
             index = expr.idx
-            
+
             pname = param.name
             var = csc.param_args[pname]
 
@@ -224,13 +224,13 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
                 assert csc.local_size_known
                 size   = csc.array_size(field, index)
                 ghosts = csc.array_ghost(field, index)
-                vload = Vload(csc.typegen, var.ctype, csc.vectorization, default_val=dval, 
+                vload = Vload(csc.typegen, var.ctype, csc.vectorization, default_val=dval,
                         itype=csc.itype, restrict=True, storage=var.storage, known_args=dict(size=size))
                 if (vload.name in reqs):
                     vload = reqs[vload.name]
                 else:
                     reqs[vload.name] = vload
-                for argname, arg in vload.args.iteritems():
+                for argname, arg in vload.args.items():
                     if (argname == 'data'):
                         argname=var.name
                         arg=var
@@ -274,7 +274,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             ctype        = stencil_fn.ctype
             pexpr = FunctionCall(ctype, stencil_fn, stencil_kwds)
             self.check_and_set(reqs, stencil_fn.name, stencil_fn)
-            for argname, argval in stencil_fn.args.iteritems():
+            for argname, argval in stencil_fn.args.items():
                 self.check_and_set(args, argname, argval)
         elif isinstance(expr, WaveNumberIndex):
             expr = expr.real_index
@@ -284,7 +284,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             if (sname=='dx'):
                 args[csc.dx.name] = csc.dx
                 pexpr = OpenClVariable(csc.dx.ctype, csc.dx)
-            elif (expr in csc.space_symbols): 
+            elif (expr in csc.space_symbols):
                 xi = csc.space_symbols[expr]
                 self.check_and_set(args, xi.name, xi)
                 pexpr = OpenClVariable(xi.ctype, xi)
@@ -307,13 +307,13 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             msg='Unknown expression type {}.\n  __mro__ = {}, expr={}\n'
             msg=msg.format(type(expr), type(expr).__mro__, expr)
             raise NotImplementedError(msg)
-        
+
         if not isinstance(pexpr, (TypedI, CustomSymbolicFunction)):
             msg='Failed to parse the following expression:\n{}\n'.format(expr)
             msg+='Expanded expression is:\n{}\n'.format(pexpr)
             raise RuntimeError(msg)
         return pexpr
-    
+
     def gencode(self, csc, pexpr):
         with self._function_():
             printer = OpenClPrinter(csc.typegen, self)
diff --git a/hysop/backend/device/codegen/symbolic/kernels/custom_symbolic_time_integrate.py b/hysop/backend/device/codegen/symbolic/kernels/custom_symbolic_time_integrate.py
index f1b445aec4e26b0e37aad81994ead058eeb76499..7f17339ace670b7af78d9b013bdc8d8beda7e460 100644
--- a/hysop/backend/device/codegen/symbolic/kernels/custom_symbolic_time_integrate.py
+++ b/hysop/backend/device/codegen/symbolic/kernels/custom_symbolic_time_integrate.py
@@ -23,23 +23,23 @@ class CustomSymbolicTimeIntegrateKernelGenerator(CustomSymbolicKernelGenerator):
         vectorization = csc.vectorization
         tg = csc.typegen
         itype = csc.itype
-        
+
         time_integrator = expr_info.time_integrator
-        
+
         K_args = ArgDict()
         Uo = ArgDict()
         Uk = ArgDict()
-        
+
         vloads = {}
         vstores = {}
         _ghosts = {}
         fcalls = ()
-        
+
         shared  = set()
         private = set()
         vnames = tuple()
         for i,expr in enumerate(expr_info.dexprs):
-            if isinstance(expr, TimeIntegrate): 
+            if isinstance(expr, TimeIntegrate):
                 time_integrator, lhs, rhs = expr.args
                 if isinstance(lhs, SymbolicDiscreteField):
                     vname = CustomSymbolicFunction.field_name(lhs.field, lhs.index)
@@ -57,18 +57,18 @@ class CustomSymbolicTimeIntegrateKernelGenerator(CustomSymbolicKernelGenerator):
                         is_local = False
 
                     K_args[vname] = CodegenArray('K_{}'.format(vname), vtype, tg, shape=(time_integrator.stages,))
-                    Uo[vname] = CodegenVectorClBuiltin(vname+'__0', ctype, vectorization, typegen=tg, 
+                    Uo[vname] = CodegenVectorClBuiltin(vname+'__0', ctype, vectorization, typegen=tg,
                             const=True, nl=True)
-                    Uk[vname] = CodegenVectorClBuiltin(vname+'__k', ctype, vectorization, typegen=tg, 
+                    Uk[vname] = CodegenVectorClBuiltin(vname+'__k', ctype, vectorization, typegen=tg,
                             const=True, nl=True)
                     _ghosts[vname] = ghosts
-                    
+
                     dval = CustomSymbolicFunction.default_out_of_bounds_value(ctype_to_dtype(ctype))
                     if is_local:
-                        size = csc.array_size(lhs.field, lhs.index)
-                        assert (size > 2*ghosts)
-                        vload = Vload(tg, ctype, vectorization, default_val=dval, 
-                                itype=itype, restrict=True, storage=var.storage, 
+                        size = int(csc.array_size(lhs.field, lhs.index))
+                        assert (size > 2*ghosts), (size, 2*ghosts)
+                        vload = Vload(tg, ctype, vectorization, default_val=dval,
+                                itype=itype, restrict=True, storage=var.storage,
                                 known_args=dict(size=size))
                         if (vload.name not in reqs):
                             reqs[vload.name] = vload
@@ -76,7 +76,7 @@ class CustomSymbolicTimeIntegrateKernelGenerator(CustomSymbolicKernelGenerator):
                         else:
                             vloads[vname] = reqs[vload.name]
                         vstore = Vstore(tg, ctype, vectorization,
-                                itype=itype, restrict=True, storage=var.storage, 
+                                itype=itype, restrict=True, storage=var.storage,
                                 known_args=dict(size=size))
                         if (vstore.name not in reqs):
                             reqs[vstore.name] = vstore
@@ -117,14 +117,14 @@ class CustomSymbolicTimeIntegrateKernelGenerator(CustomSymbolicKernelGenerator):
 
 
     def generate_expr_code(self):
-        s    = self 
+        s    = self
         tg   = s.typegen
         csc  = s.csc
         info = csc.expr_info
         time_integrator = self.time_integrator
-        
+
         args = csc.args
-        
+
         K    = s.K_args
         Uo   = s.Uo
         Uk   = s.Uk
@@ -147,17 +147,17 @@ class CustomSymbolicTimeIntegrateKernelGenerator(CustomSymbolicKernelGenerator):
                 arg = args[vname]
                 G = ghosts[vname]
                 if (vname in shared):
-                    load = vloads[vname](data=arg, 
+                    load = vloads[vname](data=arg,
                             offset='{}+{}'.format(csc.local_offset,G))
                     uo.declare(al, align=True, init=load)
                 else:
                     uo.declare(al, align=True, init=arg)
-        
+
         s.jumpline()
         s.comment('Storage for computed slopes')
         s.decl_aligned_vars(*tuple(K.values()))
 
-        for i in xrange(1, time_integrator.stages+1):
+        for i in range(1, time_integrator.stages+1):
             alpha = time_integrator.alpha[i-1]
             is_last = (i == time_integrator.stages)
             if is_last:
@@ -182,9 +182,9 @@ class CustomSymbolicTimeIntegrateKernelGenerator(CustomSymbolicKernelGenerator):
                         k   = K[vname]
                         uk = Uk[vname].nv_replace('k', str(i))
                         uo = Uo[vname]
-                        init = ' $+ '.join('{}$*{}'.format(tg.dump(float(gamma[j])),k[j]) for j in xrange(i) if (gamma[j]!=0))
-                        init = '{} $+ {}*{}*({})'.format(uo, 
-                                tg.dump(float(self.csc.expr_info.dt_coeff)), 
+                        init = ' $+ '.join('{}$*{}'.format(tg.dump(float(gamma[j])),k[j]) for j in range(i) if (gamma[j]!=0))
+                        init = '{} $+ {}*{}*({})'.format(uo,
+                                tg.dump(float(self.csc.expr_info.dt_coeff)),
                                 self.args[self.csc.expr_info.dt.name], init)
                         uk.declare(al, init=init, align=True)
                 s.jumpline()
@@ -192,7 +192,7 @@ class CustomSymbolicTimeIntegrateKernelGenerator(CustomSymbolicKernelGenerator):
                     arg = args[vname]
                     value = Uk[vname].nv_replace('k', str(i))
                     G = ghosts[vname]
-                    code = vstores[vname](value=value, data=arg, 
+                    code = vstores[vname](value=value, data=arg,
                             offset='{}+{}'.format(csc.local_offset,G)) + ';'
                     s.append(code)
                 if private:
diff --git a/hysop/backend/device/codegen/symbolic/map.py b/hysop/backend/device/codegen/symbolic/map.py
index ab087c5372bde5e8c509d2a6b5d54c63736a6486..111ec83921d67d70461e72f2230048497271a533 100644
--- a/hysop/backend/device/codegen/symbolic/map.py
+++ b/hysop/backend/device/codegen/symbolic/map.py
@@ -1,4 +1,5 @@
-from hysop.deps import sm
+import sympy as sm
+
 from hysop.tools.types import check_instance
 from hysop.backend.device.codegen.symbolic.expr import TypedI
 from hysop.backend.device.codegen.symbolic.cast import OpenClCastUtils
@@ -33,7 +34,7 @@ def map_expression(csc, expr, args, reqs):
         msg = msg.format(expr, expr.func.__mro__,
             '\n   '.join(str(pa) for pa in args),
             '\n   '.join(str(pa) for pa in promoted_args))
-        print msg
+        print(msg)
         raise
     return new_expr
 
@@ -80,7 +81,7 @@ def _map_ctypes(expr, args):
         msg=msg.format(expr)
         msg+='\n Expression __mro__ is :\n  *{}'.format('\n  *'.join(str(x) for x in type(expr).__mro__))
         msg+='\n'
-        print msg
+        print(msg)
         msg='Cannot determine ctype for expression {}.'.format(expr)
         raise RuntimeError(msg)
 
@@ -123,7 +124,7 @@ def _map_func(csc, expr, promoted_args, ctype, reqs):
     elif expr.func in _func_mappings:
         return _func_mappings[expr.func]
     return expr.func
-        
+
 def _map_complex_func(csc, expr, promoted_args, ctype, reqs):
     assert isinstance(expr, ComplexExpr)
     functions = {
diff --git a/hysop/backend/device/codegen/symbolic/misc.py b/hysop/backend/device/codegen/symbolic/misc.py
index b36cb3f4281ab188f93a4b3ff6f17be3491c071c..60ca262bff737587c09194c4fca400bd258842a2 100644
--- a/hysop/backend/device/codegen/symbolic/misc.py
+++ b/hysop/backend/device/codegen/symbolic/misc.py
@@ -13,9 +13,9 @@ class OpenClBroadCast(TypedI, BroadCast):
         val = printer._print(expr)
         if (self.factor>1):
             if (self.expr.components>1):
-                indices = tuple(i for j in xrange(self.factor) for i in xrange(expr.components))
+                indices = tuple(i for j in range(self.factor) for i in range(expr.components))
                 mode=('hex' if (expr.components>4) else 'pos')
-                bc = '({}).{}{}'.format(val, 
+                bc = '({}).{}{}'.format(val,
                         printer.typegen.vtype_access(indices[0], expr.components, mode),
                         ''.join(printer.typegen.vtype_component_adressing(i, mode) for i in indices[1:]))
             else:
@@ -36,9 +36,9 @@ class OpenClExpand(TypedI, Expand):
         val = printer._print(expr)
         if (self.factor>1):
             if (self.expr.components>1):
-                indices = tuple(i for i in xrange(expr.components) for j in xrange(self.factor))
+                indices = tuple(i for i in range(expr.components) for j in range(self.factor))
                 mode=('hex' if (expr.components>4) else 'pos')
-                bc = '({}).{}{}'.format(val, 
+                bc = '({}).{}{}'.format(val,
                         printer.typegen.vtype_access(indices[0], expr.components, mode),
                         ''.join(printer.typegen.vtype_component_adressing(i, mode) for i in indices[1:]))
             else:
@@ -55,6 +55,6 @@ class OpenClSelect(TypedI, Select):
         return obj
 
     def _ccode(self, printer):
-        return 'select({}, {}, {})'.format(printer._print(self.a), 
+        return 'select({}, {}, {})'.format(printer._print(self.a),
                                            printer._print(self.b),
                                            printer._print(self.c))
diff --git a/hysop/backend/device/codegen/symbolic/relational.py b/hysop/backend/device/codegen/symbolic/relational.py
index 0f85cd26daa719e4e484badae5a6c4d88515a8c3..3f712255d44e60d7d3235813d0ee90319e55b286 100644
--- a/hysop/backend/device/codegen/symbolic/relational.py
+++ b/hysop/backend/device/codegen/symbolic/relational.py
@@ -8,7 +8,7 @@ from hysop.symbolic.relational import LogicalAND, LogicalOR, LogicalXOR, \
                                       Mul, Pow, Add
 def basetype(fulltype):
     import string
-    return fulltype.translate(None, string.digits)
+    return fulltype.translate(str.maketrans('', '', string.digits))
 
 class RelationalTypedExpr(TypedI):
     def __new__(cls, ctype, *args, **kwds):
diff --git a/hysop/backend/device/codegen/unions/float_int.py b/hysop/backend/device/codegen/unions/float_int.py
index 643d85f6e84465e469480c668bd608ecc9a7c67d..dca99bf53b8fc83760aede98d825dd9f83c43c2b 100644
--- a/hysop/backend/device/codegen/unions/float_int.py
+++ b/hysop/backend/device/codegen/unions/float_int.py
@@ -1,6 +1,5 @@
+import numpy as np
 
-
-from hysop.deps import np
 from hysop.tools.types import check_instance
 from hysop.backend.device.codegen.base.union_codegen import UnionCodeGenerator
 from hysop.backend.device.opencl.opencl_types  import OpenClTypeGen
@@ -10,18 +9,18 @@ class FloatIntegerUnion(UnionCodeGenerator):
 
         name,dtype,comments = self.build_dtype(typegen, ftype)
 
-        super(FloatIntegerUnion,self).__init__(name=name, 
-                dtype=dtype, 
+        super(FloatIntegerUnion,self).__init__(name=name,
+                dtype=dtype,
                 typegen=typegen,
-                typedef=typedef, 
+                typedef=typedef,
                 comments=comments)
-    
+
     @staticmethod
     def build_dtype(typegen, ftype):
         tg = typegen
 
         name = 'float_int'
-        
+
         dtype = []
         if ftype == 'half':
             name+='16'
@@ -42,7 +41,7 @@ class FloatIntegerUnion(UnionCodeGenerator):
             msg='Unknown ftype \'{}\', only half, float and double are supported.'
             msg=msg.format(ftype)
             raise ValueError(msg)
-        
+
         comments = [
                 'Access value as a signed integer',
                 'Access value as a unsigned integer',
@@ -57,12 +56,12 @@ if __name__ == '__main__':
     from hysop.backend.device.codegen.base.test import _test_typegen
 
     tg = _test_typegen()
-    
+
     u1 = FloatIntegerUnion('double', tg)
     u2 = FloatIntegerUnion('float', tg, 'custom32')
 
     cg = OpenClCodeGenerator('test_generator',tg)
-    cg.declare_cl_extension('cl_khr_fp64') 
+    cg.declare_cl_extension('cl_khr_fp64')
     cg.require('u1',u1)
     cg.require('u2',u2)
 
diff --git a/hysop/backend/device/device_platform.py b/hysop/backend/device/device_platform.py
index 096eec3849a77312ff17b68ff3134b36c969742a..63df8bdde35f4b5b43af1ddf31cc535a704fd871 100644
--- a/hysop/backend/device/device_platform.py
+++ b/hysop/backend/device/device_platform.py
@@ -1,9 +1,7 @@
 
 from abc import ABCMeta, abstractmethod
 
-class Platform(object):
-
-    __metaclass__ = ABCMeta
+class Platform(object, metaclass=ABCMeta):
 
     def __init__(self, hardware_topo, platform_handle, platform_id, **kwds):
         super(Platform, self).__init__(**kwds)
@@ -12,11 +10,11 @@ class Platform(object):
         self._discover_devices(hardware_topo, platform_handle)
         # we do not keep a reference to platform_handle as we
         # need to pickle this object
-    
+
     @property
     def platform_id(self):
         return self._platform_id
-    
+
     @property
     def logical_devices(self):
         return self._logical_devices
@@ -24,7 +22,7 @@ class Platform(object):
     @property
     def physical_devices(self):
         return [dev.physical_device() for dev in self.logical_devices()]
-    
+
     @abstractmethod
     def _discover_devices(self, hardware_topo, platform_handle):
         pass
diff --git a/hysop/backend/device/kernel_autotuner.py b/hysop/backend/device/kernel_autotuner.py
index 3a1cdb53b88e14027e1ce3756f6fee1e408203f8..d0d1cf99d76982d05ff28364f05f0f9359e51b00 100644
--- a/hysop/backend/device/kernel_autotuner.py
+++ b/hysop/backend/device/kernel_autotuner.py
@@ -1,7 +1,12 @@
-import copy
+import copy, os, hashlib, gzip, sys, warnings
+import itertools as it
+try:
+   import cPickle as pickle
+except:
+   import pickle
+
 from abc import ABCMeta, abstractmethod
 from hysop import __KERNEL_DEBUG__
-from hysop.deps import pickle, os, it, hashlib, gzip, sys, warnings
 from hysop.tools.units import time2str
 from hysop.tools.contexts import Timer
 from hysop.tools.types import check_instance
@@ -17,18 +22,17 @@ from hysop.backend.device.codegen import CodeGeneratorWarning
 
 class KernelGenerationError(RuntimeError):
     pass
-    
-class KernelAutotuner(object):
-    __metaclass__ = ABCMeta
+
+class KernelAutotuner(object, metaclass=ABCMeta):
 
     FULL_RESULTS_KEY = '__FULL_RESULTS__'
     DUMP_LAST_TUNED_KERNEL    = False
     STORE_FULL_KERNEL_SOURCES = False
-    
-    @staticmethod 
+
+    @staticmethod
     def _hash_func():
         return hashlib.new('sha256')
-    
+
     def use_tmp_cache(self):
         self._cache_dir = IO.get_tmp_dir('kernel_autotuner')
     def use_system_cache(self):
@@ -40,11 +44,11 @@ class KernelAutotuner(object):
     def cache_file(self):
         cache_file = '{}/{}.pklz'.format(self.cache_dir(), self.name.replace(' ','_'))
         return cache_file
-    
+
     def _reload_cache(self, extra_kwds_hash):
         cache_file = self.cache_file()
         if self.verbose:
-            print self.indent(1)+'>Loading cached results from \'{}\'.'.format(cache_file)
+            print(self.indent(1)+'>Loading cached results from \'{}\'.'.format(cache_file))
         self.all_results = load_cache(cache_file)
         config_key =  self.autotuner_config_key()
         config_key += (extra_kwds_hash,)
@@ -55,13 +59,13 @@ class KernelAutotuner(object):
     def _dump_cache(self, silent=False):
         cache_file = self.cache_file()
         if (not silent) and (self.verbose>1):
-            print self.indent(1)+'>Caching results to \'{}\'.'.format(cache_file)
+            print(self.indent(1)+'>Caching results to \'{}\'.'.format(cache_file))
         update_cache(cache_file, self.config_key, self.results)
-    
+
     def __init__(self, name, tunable_kernel, **kwds):
         """
         Initialize a KernelAutotuner.
-        
+
         Parameters
         ----------
         name: str
@@ -81,7 +85,7 @@ class KernelAutotuner(object):
 
         self.indent = lambda i: '  '*i
         self.verbose = self.autotuner_config.verbose
-            
+
         self.result_keys = (
                 'extra_parameters',    #00
                 'work_size',           #01
@@ -99,12 +103,12 @@ class KernelAutotuner(object):
             )
         for (i, pname) in enumerate(self.result_keys):
             setattr(self, '{}_idx'.format(pname), i)
-        
+
         self._cache_dir = None
-            
+
     def autotune(self, extra_kwds,
-            first_working=False, 
-            force_verbose=False, 
+            first_working=False,
+            force_verbose=False,
             force_debug=False):
         """
         Autotune the target tunable_kernels.
@@ -121,21 +125,21 @@ class KernelAutotuner(object):
 
         extra_kwds_hash, extra_kwds_hash_logs = tkernel.hash_extra_kwds(extra_kwds)
         hasher = self._hash_func()
-        hasher.update(str(extra_kwds_hash))
+        hasher.update(str(extra_kwds_hash).encode('utf-8'))
         extra_kwds_hash = hasher.hexdigest()
         check_instance(extra_kwds_hash, str)
         check_instance(extra_kwds_hash_logs, str)
         file_basename = '{}_{}'.format(self.name, extra_kwds_hash[:4])
-        
+
         self._print_header(extra_kwds)
         if autotuner_config.override_cache:
             if self.verbose:
-                print self.indent(1)+'>Using temporary cache folder, benching all new kernels.'
+                print(self.indent(1)+'>Using temporary cache folder, benching all new kernels.')
             self.use_tmp_cache()
         else:
             self.use_system_cache()
         results = self._reload_cache(extra_kwds_hash)
-        
+
         if first_working:
             best_candidate = None
         else:
@@ -144,38 +148,38 @@ class KernelAutotuner(object):
 
         if (best_candidate is None):
             best_candidate = self._autotune_kernels(tkernel, results, extra_kwds,
-                    force_verbose, force_debug, first_working, 
+                    force_verbose, force_debug, first_working,
                     extra_kwds_hash, extra_kwds_hash_logs, file_basename)
             from_cache = False
         else:
             from_cache = True
-        
+
         assert len(self.result_keys) == len(best_candidate)
         return dict(zip(self.result_keys, best_candidate)), file_basename, from_cache
 
-    
+
     def _load_results_from_cache(self, tkernel, results, extra_kwds,
             force_verbose, force_debug, extra_kwds_hash, extra_kwds_hash_logs, file_basename):
         if (self.FULL_RESULTS_KEY not in results):
             if self.verbose:
-                print ('  >No best candidate was cached for this configuration, '
+                print('  >No best candidate was cached for this configuration, '
                        'benching all kernels.')
             return None
         if self.verbose:
-            print '  >Retrieving best candidate from cache.'
+            print('  >Retrieving best candidate from cache.')
 
         # deep copy best_candidate so that program and kernels
         # do not spill into the cache (results dictionnary is mutable)
-        # and is used for all cache updates. Pickle cannot pickle 
+        # and is used for all cache updates. Pickle cannot pickle
         # pyopencl kernel and program objects.
         best_candidate = copy.deepcopy(results[self.FULL_RESULTS_KEY])
-        
-        (extra_parameters, 
-          work_size, work_load, global_work_size, local_work_size, 
-          prg, kernel, statistics, cached_kernel_src, 
-          cached_kernel_name, cached_src_hash, 
+
+        (extra_parameters,
+          work_size, work_load, global_work_size, local_work_size,
+          prg, kernel, statistics, cached_kernel_src,
+          cached_kernel_name, cached_src_hash,
           cached_kernel_hash, cached_kernel_hash_logs) = best_candidate
-        
+
         if (cached_kernel_hash != extra_kwds_hash):
             msg='\nCached kernel extra_kwds hash did not match the benched one:\n {}\n {}\n'
             msg+='\nThis might be due to an upgrade of the generated code or '
@@ -188,16 +192,16 @@ class KernelAutotuner(object):
         assert kernel is None
         global_work_size = npw.asintegerarray(global_work_size)
         local_work_size  = npw.asintegerarray(local_work_size)
-        
+
         kernel_name, kernel_src = tkernel.generate_kernel_src(
                         global_work_size=global_work_size,
-                        local_work_size=local_work_size, 
-                        extra_parameters=extra_parameters, 
-                        extra_kwds=extra_kwds, 
+                        local_work_size=local_work_size,
+                        extra_parameters=extra_parameters,
+                        extra_kwds=extra_kwds,
                         tuning_mode=False, dry_run=False)
-        
+
         hasher = self._hash_func()
-        hasher.update(kernel_src)
+        hasher.update(kernel_src.encode('utf-8'))
         src_hash = hasher.hexdigest()
 
         if (kernel_name != cached_kernel_name):
@@ -234,29 +238,29 @@ class KernelAutotuner(object):
                     msg+='\nCurrently tuned kernel sources dumped to \'{}\'.'.format(tuned_src)
             warnings.warn(msg, CodeGeneratorWarning)
             return None
-        
+
         try:
             (prg, kernel) = self.build_from_source(kernel_name=kernel_name,
-                                     kernel_src=kernel_src, 
+                                     kernel_src=kernel_src,
                                      build_options=self.build_opts,
                                      force_verbose=force_verbose,
                                      force_debug=force_debug)
         except Exception as e:
             msg = 'Could not use cached kernel because there was a problem during build:'
             msg +='\n  {}'.format(e)
-            print msg
+            print(msg)
             return None
-        
+
         try:
-            self.check_kernel(tkernel=tkernel, kernel=kernel, 
-                    global_work_size=global_work_size, 
+            self.check_kernel(tkernel=tkernel, kernel=kernel,
+                    global_work_size=global_work_size,
                     local_work_size=local_work_size)
         except Exception as e:
             msg = 'Could not use cached kernel because the following error occured during checkup:'
             msg +='\n  {}'.format(e)
-            print msg
+            print(msg)
             return None
-            
+
         best_candidate[self.program_idx]    = prg
         best_candidate[self.kernel_idx]     = kernel
         best_candidate[self.kernel_src_idx] = kernel_src
@@ -264,10 +268,10 @@ class KernelAutotuner(object):
         return tuple(best_candidate)
 
 
-    def _autotune_kernels(self, tkernel, results, extra_kwds, 
-            force_verbose, force_debug, first_working, 
+    def _autotune_kernels(self, tkernel, results, extra_kwds,
+            force_verbose, force_debug, first_working,
             extra_kwds_hash, extra_kwds_hash_logs, file_basename):
-        autotuner_config = self.autotuner_config 
+        autotuner_config = self.autotuner_config
         if first_working:
             nruns = 1
         else:
@@ -279,12 +283,12 @@ class KernelAutotuner(object):
         step_count = 0
 
         self._print_step(step_count, 'all', nruns)
-            
+
         ks = AutotunedKernelStatistics(tkernel, extra_kwds)
         ks.max_candidates = max_candidates
         ks.nruns = nruns
         ks.file_basename = file_basename
-        
+
         with Timer() as timer:
             params = tkernel.compute_parameters(extra_kwds=extra_kwds)
             total_count, pruned_count, kept_count, failed_count = 0,0,0,0
@@ -293,11 +297,11 @@ class KernelAutotuner(object):
                 extra_param_hash = tkernel.hash_extra_parameters(extra_parameters)
                 try:
                     (max_kernel_work_group_size, preferred_work_group_size_multiple) = \
-                        self.collect_kernel_infos(tkernel=tkernel, 
-                            extra_parameters=extra_parameters, 
+                        self.collect_kernel_infos(tkernel=tkernel,
+                            extra_parameters=extra_parameters,
                             extra_kwds=extra_kwds)
                     pks = ks.push_parameters(extra_param_hash,
-                            extra_parameters=extra_parameters, 
+                            extra_parameters=extra_parameters,
                             max_kernel_work_group_size=max_kernel_work_group_size,
                             preferred_work_group_size_multiple=preferred_work_group_size_multiple)
                 except Exception as e:
@@ -307,15 +311,16 @@ class KernelAutotuner(object):
                     pks = ks.push_parameters(extra_param_hash, extra_parameters=extra_parameters)
                     continue
 
+
                 work_bounds = tkernel.compute_work_bounds(max_kernel_work_group_size=max_kernel_work_group_size,
                                                           preferred_work_group_size_multiple=preferred_work_group_size_multiple,
                                                           extra_parameters=extra_parameters,
                                                           extra_kwds=extra_kwds)
                 work_size = work_bounds.work_size
-                
+
                 self._print_parameters(extra_parameters, work_bounds)
 
-                args_mapping = tkernel.compute_args_mapping(extra_kwds=extra_kwds, 
+                args_mapping = tkernel.compute_args_mapping(extra_kwds=extra_kwds,
                         extra_parameters=extra_parameters)
 
                 isolation_params = extra_kwds.get('isolation_params', None)
@@ -325,32 +330,32 @@ class KernelAutotuner(object):
                 kernel_args = extra_kwds['kernel_args']
                 check_instance(kernel_args, dict, keys=str)
                 args_list = self._compute_args_list(args_mapping=args_mapping, **kernel_args)
-                
+
                 for work_load in work_bounds.iter_work_loads():
-                    work = tkernel.compute_work_candidates(work_bounds=work_bounds, 
-                            work_load=work_load, extra_parameters=extra_parameters, 
+                    work = tkernel.compute_work_candidates(work_bounds=work_bounds,
+                            work_load=work_load, extra_parameters=extra_parameters,
                             extra_kwds=extra_kwds)
                     self._print_workload(work_load, work)
                     for local_work_size in work.iter_local_work_size():
                         global_work_size = tkernel.compute_global_work_size(
-                                               local_work_size=local_work_size, work=work, 
+                                               local_work_size=local_work_size, work=work,
                                                extra_parameters=extra_parameters,
                                                extra_kwds=extra_kwds)
 
-                        run_key = (extra_param_hash, tuple(work_load), 
+                        run_key = (extra_param_hash, tuple(work_load),
                                     tuple(global_work_size), tuple(local_work_size))
-                        
+
                         pruned = None
                         try:
                             kernel_name, kernel_src = tkernel.generate_kernel_src(
                                             global_work_size=global_work_size,
-                                            local_work_size=local_work_size, 
-                                            extra_parameters=extra_parameters, 
-                                            extra_kwds=extra_kwds, 
+                                            local_work_size=local_work_size,
+                                            extra_parameters=extra_parameters,
+                                            extra_kwds=extra_kwds,
                                             tuning_mode=True, dry_run=False)
-                                
+
                             hasher = self._hash_func()
-                            hasher.update(kernel_src)
+                            hasher.update(kernel_src.encode('utf-8'))
                             src_hash = hasher.hexdigest()
 
                             if (run_key in results):
@@ -361,7 +366,7 @@ class KernelAutotuner(object):
                                     msg+='\nThis might be due to an upgrade of the generated '
                                     msg+='code or a faulty implementation of '
                                     msg+='{}.hash_extra_kwds().'
-                                    msg=msg.format(src_hash, cache_src_hash, 
+                                    msg=msg.format(src_hash, cache_src_hash,
                                             type(tkernel).__name__)
                                     warnings.warn(msg, CodeGeneratorWarning)
                                     old_stats = None
@@ -374,18 +379,18 @@ class KernelAutotuner(object):
                             (prg, kernel, statistics, pruned) = self.bench_one_from_source(
                                                      tkernel=tkernel,
                                                      kernel_name=kernel_name,
-                                                     kernel_src=kernel_src, 
+                                                     kernel_src=kernel_src,
                                                      args_list=args_list,
                                                      args_mapping=args_mapping,
                                                      isolation_params=isolation_params,
-                                                     target_nruns=nruns, 
+                                                     target_nruns=nruns,
                                                      old_stats=old_stats,
                                                      best_stats=best_stats,
                                                      global_work_size=global_work_size,
                                                      local_work_size=local_work_size,
                                                      force_verbose=force_verbose,
                                                      force_debug=force_debug)
-                            
+
                             check_instance(statistics, KernelStatistics)
                             assert (statistics.nruns >= 1)
 
@@ -393,28 +398,28 @@ class KernelAutotuner(object):
                                 pruned_count += 1
                             else:
                                 kept_count += 1
-                            
+
                             if (best_stats is None) or (statistics.mean < best_stats.mean):
                                 local_best = True
                                 best_stats = statistics
                             else:
                                 local_best = False
-                            
-                            candidate =  (extra_parameters, 
+
+                            candidate =  (extra_parameters,
                                     tuple(work_size),
-                                    tuple(work_load), 
-                                    tuple(global_work_size), 
-                                    tuple(local_work_size), 
-                                    prg, kernel, statistics, 
-                                    kernel_src, kernel_name, 
+                                    tuple(work_load),
+                                    tuple(global_work_size),
+                                    tuple(local_work_size),
+                                    prg, kernel, statistics,
+                                    kernel_src, kernel_name,
                                     src_hash, extra_kwds_hash, extra_kwds_hash_logs)
-                            
+
                             results[run_key] = (src_hash, statistics)
                             bench_results[run_key] = candidate
                             pks.push_run_statistics(run_key,
                                     work_size=work_size, work_load=work_load,
-                                    local_work_size=local_work_size, global_work_size=global_work_size, 
-                                    statistics=statistics, pruned=pruned, 
+                                    local_work_size=local_work_size, global_work_size=global_work_size,
+                                    statistics=statistics, pruned=pruned,
                                     local_best=local_best, error=None)
                         except KernelGenerationError as e:
                             if __KERNEL_DEBUG__:
@@ -423,9 +428,9 @@ class KernelAutotuner(object):
                             statistics = None
                             from_cache=False
                             pks.push_run_statistics(run_key,
-                                    work_size=work_size, work_load=work_load, 
-                                    local_work_size=local_work_size, global_work_size=global_work_size, 
-                                    statistics=None, pruned=None, 
+                                    work_size=work_size, work_load=work_load,
+                                    local_work_size=local_work_size, global_work_size=global_work_size,
+                                    statistics=None, pruned=None,
                                     local_best=None, error=e)
                         total_count += 1
                         abort = (max_candidates is not None) and \
@@ -433,7 +438,7 @@ class KernelAutotuner(object):
                         abort |= (first_working and kept_count==1)
                         self._print_full_candidate(local_work_size, global_work_size, statistics, pruned,
                                                     from_cache)
-                        self._print_candidate((statistics is None), from_cache, 
+                        self._print_candidate((statistics is None), from_cache,
                                 total_count, abort)
                         if abort:
                             break
@@ -449,7 +454,7 @@ class KernelAutotuner(object):
                     msg='>Achieved maximum number of configured candidates: {}'
                     msg=msg.format(max_candidates)
                 if self.verbose>1:
-                    print msg
+                    print(msg)
 
             assert total_count == (kept_count+pruned_count+failed_count)
             if (kept_count == 0):
@@ -457,33 +462,33 @@ class KernelAutotuner(object):
                 msg += '(kept_count={}, pruned_count={}, failed_count={}), aborting.'
                 msg = msg.format(total_count, kept_count, pruned_count, failed_count)
                 raise RuntimeError(msg)
-            
+
             keep_only = max(previous_pow2(kept_count),1)
-            self._print_first_step_results(total_count, kept_count, pruned_count, 
+            self._print_first_step_results(total_count, kept_count, pruned_count,
                     failed_count, keep_only)
-            candidates = sorted(bench_results.items(), key=lambda x: x[1][self.kernel_statistics_idx])
+            candidates = tuple(sorted(bench_results.items(), key=lambda x: x[1][self.kernel_statistics_idx]))
             candidates = candidates[:keep_only]
             while(len(candidates)>1):
                 step_count += 1
                 nruns *= 2
-                
+
                 self._print_step(step_count, '{} BEST'.format(len(candidates)), nruns)
                 for (run_key, run_params) in candidates:
-                    (extra_params, work_size, work_load, global_work_size, local_work_size, 
+                    (extra_params, work_size, work_load, global_work_size, local_work_size,
                             _, kernel, old_stats, _, _, _, _, _) = run_params
                     self.bench_one_from_binary(kernel=kernel,
-                                             target_nruns=nruns, 
+                                             target_nruns=nruns,
                                              old_stats=old_stats,
                                              best_stats=best_stats,
                                              global_work_size=global_work_size,
                                              local_work_size=local_work_size)
-                candidates = sorted(candidates, key=lambda x: x[1][self.kernel_statistics_idx])
+                candidates = tuple(sorted(candidates, key=lambda x: x[1][self.kernel_statistics_idx]))
                 self._print_step_results(candidates, self.kernel_statistics_idx)
                 candidates = candidates[:max(previous_pow2(len(candidates)),1)]
                 ks.push_step(step_count, candidates)
             best_candidate = candidates[0][1]
         self._print_footer(ellapsed=timer.interval, best_candidate=best_candidate)
-        
+
         if autotuner_config.filter_statistics(file_basename):
             ks.exec_time = timer.interval
             ks.best_candidate = best_candidate
@@ -493,10 +498,10 @@ class KernelAutotuner(object):
             ks.failed_count = failed_count
             ks.total_count = total_count
             ks.extra_kwds_hash = best_candidate[self.extra_kwds_hash_idx]
-            if autotuner_config.plot_statistics and not first_working: 
+            if autotuner_config.plot_statistics and not first_working:
                 ks.plot()
-        
-        # Regenerate final kernel 
+
+        # Regenerate final kernel
         best_candidate = list(best_candidate)
         self._build_final_kernel(tkernel, best_candidate, extra_kwds)
         returned_best_candidate = tuple(best_candidate)
@@ -511,36 +516,36 @@ class KernelAutotuner(object):
         self._dump_cache()
 
         return returned_best_candidate
-    
+
     def _build_final_kernel(self, tkernel, best_candidate,
             extra_kwds):
-        (extra_parameters, work_size, work_load, global_work_size, local_work_size, 
+        (extra_parameters, work_size, work_load, global_work_size, local_work_size,
             _, _, _, _, _, _, _, _) = best_candidate
-        
+
         global_work_size = npw.asintegerarray(global_work_size)
         local_work_size  = npw.asintegerarray(local_work_size)
 
         kernel_name, kernel_src = tkernel.generate_kernel_src(
                         global_work_size=global_work_size,
-                        local_work_size=local_work_size, 
-                        extra_parameters=extra_parameters, 
-                        extra_kwds=extra_kwds, 
+                        local_work_size=local_work_size,
+                        extra_parameters=extra_parameters,
+                        extra_kwds=extra_kwds,
                         tuning_mode=False, dry_run=False)
-        
+
         hasher = self._hash_func()
-        hasher.update(kernel_src)
+        hasher.update(kernel_src.encode('utf-8'))
         src_hash = hasher.hexdigest()
-        
+
         (prg, kernel) = self.build_from_source(kernel_name=kernel_name,
-                                 kernel_src=kernel_src, 
+                                 kernel_src=kernel_src,
                                  build_options=self.build_opts,
                                  force_verbose=None,
                                  force_debug=None)
 
-        self.check_kernel(tkernel=tkernel, kernel=kernel, 
-                global_work_size=global_work_size, 
+        self.check_kernel(tkernel=tkernel, kernel=kernel,
+                global_work_size=global_work_size,
                 local_work_size=local_work_size)
-        
+
         best_candidate[self.program_idx]    = prg
         best_candidate[self.kernel_idx]     = kernel
         best_candidate[self.kernel_src_idx] = kernel_src
@@ -566,13 +571,13 @@ class KernelAutotuner(object):
 
         args_list = [None,]*len(args_mapping)
         arg_indices = set(ka[0] for ka in args_mapping.values())
-        if arg_indices != set(xrange(len(arg_indices))):
+        if arg_indices != set(range(len(arg_indices))):
             msg='Illformed argument position mapping:\n'
             msg+='\n'.join('  >argument {}: {}'.format(argpos, argname) for (argname, argpos)
                     in zip(args_mapping.keys(), arg_indices))
             msg+='\nExpected contiguous integer argument positions.'
             raise ValueError(msg)
-        for (arg_name, arg_value) in kernel_args.iteritems():
+        for (arg_name, arg_value) in kernel_args.items():
             if (arg_name not in args_mapping):
                 msg='Unknown argument {}, valid ones are {}.'
                 msg=msg.format(arg_name, ', '.join(args_mapping.keys()))
@@ -600,24 +605,24 @@ class KernelAutotuner(object):
             args_list[arg_index] = arg_value
         args_list = tuple(args_list)
         return tuple(args_list)
-    
+
     @abstractmethod
     def autotuner_config_key(self):
         """Caching key for autotuner configurations."""
         pass
-    
-    def bench_one_from_source(self, tkernel, kernel_name, kernel_src, 
+
+    def bench_one_from_source(self, tkernel, kernel_name, kernel_src,
             args_list, args_mapping, isolation_params,
             global_work_size, local_work_size,
-            target_nruns, old_stats, best_stats, 
+            target_nruns, old_stats, best_stats,
             force_verbose, force_debug):
         """
         Compile and bench one kernel by executing it nruns times.
         Return the compiled kernel, KernelStatistics and whether it was
         pruned or not.
         """
-        (prg, kernel) = self.build_from_source(kernel_name=kernel_name, 
-                kernel_src=kernel_src, build_options=self.build_opts, 
+        (prg, kernel) = self.build_from_source(kernel_name=kernel_name,
+                kernel_src=kernel_src, build_options=self.build_opts,
                 force_verbose=force_verbose, force_debug=force_debug)
 
         self.check_kernel_args(kernel, args_list)
@@ -628,29 +633,29 @@ class KernelAutotuner(object):
              except:
                  msg='Failed to set opencl kernel argument {} which is of type {}.\n'
                  msg=msg.format(i, type(arg))
-                 print msg
+                 print(msg)
                  raise
-        
+
         if self.DUMP_LAST_TUNED_KERNEL:
             name = 'currently_tuned'
             kernel_src_file = tkernel.generate_source_file(name, kernel_src, force=True)
             kernel_sim_file = tkernel.generate_oclgrind_isolation_file(kernel=kernel,
-                    kernel_name=name, kernel_source=kernel_src_file, 
+                    kernel_name=name, kernel_source=kernel_src_file,
                     global_work_size=global_work_size, local_work_size=local_work_size,
-                    args_list=args_list, args_mapping=args_mapping, 
+                    args_list=args_list, args_mapping=args_mapping,
                     isolation_params=isolation_params, force=True)
-            print 'Current tuned kernel has been dumped:'.format(name)
-            print '  {}'.format(kernel_sim_file)
+            print('Current tuned kernel has been dumped:'.format(name))
+            print('  {}'.format(kernel_sim_file))
 
-        bench_results = self.bench_one_from_binary(kernel=kernel, 
-                            target_nruns=target_nruns, old_stats=old_stats, 
+        bench_results = self.bench_one_from_binary(kernel=kernel,
+                            target_nruns=target_nruns, old_stats=old_stats,
                             best_stats=best_stats,
                             global_work_size=global_work_size, local_work_size=local_work_size)
 
         return (prg,kernel)+bench_results
-    
+
     @abstractmethod
-    def build_from_source(self, kernel_name, kernel_src, 
+    def build_from_source(self, kernel_name, kernel_src,
         build_options, force_verbose, force_debug):
         """
         Compile one kernel from source.
@@ -670,7 +675,7 @@ class KernelAutotuner(object):
         """
         pass
 
-    
+
     @abstractmethod
     def collect_kernel_infos(self, tkernel, extra_parameters, extra_kwds):
         """
@@ -686,21 +691,21 @@ class KernelAutotuner(object):
     @abstractmethod
     def check_kernel_args(self, kernel, args_list):
         pass
-    
+
     def _print_separator(self):
-        print '_'*80
+        print('_'*80)
 
     def _print_header(self, extra_kwds):
         verbose = self.verbose
         if verbose:
             self._print_separator()
-            print '\n|| KERNEL {} AUTOTUNING'.format(self.name.upper())
-            print '\n  *config: {} (nruns={}, prune={}, max_candidates={})'.format(
-                    self.autotuner_config.autotuner_flag, 
+            print('\n|| KERNEL {} AUTOTUNING'.format(self.name.upper()))
+            print('\n  *config: {} (nruns={}, prune={}, max_candidates={})'.format(
+                    self.autotuner_config.autotuner_flag,
                     self.autotuner_config.nruns,
                     self.autotuner_config.prune_threshold,
-                    extra_kwds.get('max_candidates', self.autotuner_config.max_candidates))
-            print '  *build_opts: {}'.format(self.tunable_kernel.build_opts or 'None')
+                    extra_kwds.get('max_candidates', self.autotuner_config.max_candidates)))
+            print('  *build_opts: {}'.format(self.tunable_kernel.build_opts or 'None'))
         return verbose
 
     def _print_parameters(self, extra_parameters, work_bounds):
@@ -710,40 +715,40 @@ class KernelAutotuner(object):
             msg=msg.format(extra_parameters)
             msg0='\n'+self.indent(1)
             msg0+='   work_size={}, min_work_load={}, max_work_load={}'
-            msg+=msg0.format(work_bounds.work_size, work_bounds.min_work_load, 
+            msg+=msg0.format(work_bounds.work_size, work_bounds.min_work_load,
                             work_bounds.max_work_load)
-            print msg
-    
+            print(msg)
+
     def _print_workload(self, work_load, work):
         if self.verbose>2:
             msg= '\n'+self.indent(2)+'::Current workload {}, global_work_size set to {}::'
             msg=msg.format(work_load, work.global_work_size)
-            print msg
+            print(msg)
             if self.verbose<4:
                 self._print_separator()
 
-    def _print_first_step_results(self, total_count, kept_count, pruned_count, 
+    def _print_first_step_results(self, total_count, kept_count, pruned_count,
             failed_count, keep_only):
         verbose = self.verbose
         if verbose>1:
             if verbose>=4:
                 self._print_separator()
             else:
-                print '\n'
-            print self.indent(1)+' All candidate kernels have been run:'
+                print('\n')
+            print(self.indent(1)+' All candidate kernels have been run:')
             msg=self.indent(2)+'Collected {} bench results (kept={}, pruned={}, failed={}).'
             msg=msg.format(total_count, kept_count, pruned_count, failed_count)
-            print msg
+            print(msg)
             msg=self.indent(2)+'Building binary tree optimizer out of {} best candidates.'
             msg=msg.format(keep_only)
-            print msg
+            print(msg)
 
     def _print_step(self, step, candidates, nruns):
         if self.verbose>1:
             msg='\n   AUTOTUNING STEP {} :: running {} candidates over {} runs'
             msg=msg.format(step, candidates, nruns)
             self._print_separator()
-            print msg.upper()
+            print(msg.upper())
 
     def _print_candidate(self, failed, from_cache, total_count, abort):
         if self.verbose==2:
@@ -762,8 +767,8 @@ class KernelAutotuner(object):
             elif (total_count % 5 == 0):
                 sys.stdout.write(' ')
             sys.stdout.flush()
-    
-    def _print_full_candidate(self, local_work_size, global_work_size, 
+
+    def _print_full_candidate(self, local_work_size, global_work_size,
             statistics, is_pruned, from_cache):
         if self.verbose>3:
             failed = (statistics is None)
@@ -778,35 +783,35 @@ class KernelAutotuner(object):
             else:
                 indicator='|'
             config = self.indent(3)+'{} L={:^10}, G={:^10}: {}'.format(indicator, local_work_size, global_work_size, msg)
-            print config
+            print(config)
 
 
     def _print_step_results(self, sorted_candidates, kernel_statistics_idx):
         if self.verbose==2:
             best  = sorted_candidates[0][1]
             worst = sorted_candidates[-1][1]
-            print self.indent(2)+'worst candidate: {}'.format(worst[kernel_statistics_idx])
-            print self.indent(2)+'best  candidate: {}'.format(best[kernel_statistics_idx])
-            
+            print(self.indent(2)+'worst candidate: {}'.format(worst[kernel_statistics_idx]))
+            print(self.indent(2)+'best  candidate: {}'.format(best[kernel_statistics_idx]))
+
     def _print_footer(self, ellapsed, best_candidate):
         if self.verbose:
-            (best_extra_params, best_work_size, best_work_load, best_global_size, best_local_size, 
+            (best_extra_params, best_work_size, best_work_load, best_global_size, best_local_size,
                     _, _, best_stats, _, _, _, _, _) = best_candidate
             if self.verbose>1:
                 if ellapsed is not None:
                     self._print_separator()
                     msg='\n   Autotuning successfully run in {}.'
                     msg=msg.format(time2str(ellapsed))
-                    print msg
+                    print(msg)
                 self._print_separator()
             id1 = self.indent(1)
-            print '\n|> BEST OVERALL RESULT for kernel {}:'.format(self.name)
-            print id1+' => Extra params:'
-            for ep,val in best_extra_params.iteritems():
-                print self.indent(2)+'*{}: {}'.format(ep, val) 
+            print('\n|> BEST OVERALL RESULT for kernel {}:'.format(self.name))
+            print(id1+' => Extra params:')
+            for ep,val in best_extra_params.items():
+                print(self.indent(2)+'*{}: {}'.format(ep, val))
             msg=id1+' => WL={} G={} L={}'
             msg=msg.format(best_work_load, best_global_size, best_local_size)
-            print msg
-            print id1+' => Execution statistics: {}'.format(best_stats)
+            print(msg)
+            print(id1+' => Execution statistics: {}'.format(best_stats))
             self._print_separator()
-            print
+            print()
diff --git a/hysop/backend/device/kernel_autotuner_config.py b/hysop/backend/device/kernel_autotuner_config.py
index ee4483e49fc367fe0adc538371c6efdff2a211a0..b8ed3d302944b7072493ecf13160c523a157d41b 100644
--- a/hysop/backend/device/kernel_autotuner_config.py
+++ b/hysop/backend/device/kernel_autotuner_config.py
@@ -4,9 +4,7 @@ from hysop.constants import AutotunerFlags, \
                             __DEBUG__, __VERBOSE__, __KERNEL_DEBUG__, \
                             DEFAULT_AUTOTUNER_FLAG, DEFAULT_AUTOTUNER_PRUNE_THRESHOLD
 
-class KernelAutotunerConfig(object):
-
-    __metaclass__ = ABCMeta
+class KernelAutotunerConfig(object, metaclass=ABCMeta):
 
     _default_initial_runs = {
         AutotunerFlags.ESTIMATE:   1,
diff --git a/hysop/backend/device/kernel_autotuner_statistics.py b/hysop/backend/device/kernel_autotuner_statistics.py
index 1f086e2b00f60bc0b2c3838c5997c6a572477fc8..63cfb7bcc474046d33832d6cd3ca09105fe3666e 100644
--- a/hysop/backend/device/kernel_autotuner_statistics.py
+++ b/hysop/backend/device/kernel_autotuner_statistics.py
@@ -7,9 +7,9 @@ class AutotunedKernelStatistics(dict):
     class AutotunedParameterStatistics(dict):
         class AutotunedRunStatistics(object):
             def __init__(self,
-                        work_size, work_load, 
-                        local_work_size, global_work_size, 
-                        statistics, pruned, 
+                        work_size, work_load,
+                        local_work_size, global_work_size,
+                        statistics, pruned,
                         local_best, error):
                 self.work_size = work_size
                 self.work_load = work_load
@@ -22,7 +22,7 @@ class AutotunedKernelStatistics(dict):
             def good(self):
                 return (self.error is None)
         def __init__(self, extra_parameters,
-                       max_kernel_work_group_size=None, 
+                       max_kernel_work_group_size=None,
                        preferred_work_group_size_multiple=None):
             self.extra_parameters = extra_parameters
             self.max_kernel_work_group_size = max_kernel_work_group_size
@@ -91,10 +91,10 @@ class AutotunedKernelStatistics(dict):
 
     def collect_exec_times(self):
         run_times = ()
-        for (extra_param_hash, parameter_statistics) in self.iteritems():
+        for (extra_param_hash, parameter_statistics) in self.items():
             if not parameter_statistics.good():
                 continue
-            for (run_key, run_statistics) in parameter_statistics.iteritems():
+            for (run_key, run_statistics) in parameter_statistics.items():
                 if not run_statistics.good():
                     continue
                 run_time = run_statistics.statistics.mean
diff --git a/hysop/backend/device/kernel_config.py b/hysop/backend/device/kernel_config.py
index 334ab9e34cbbad72498c1a64e739fda4b42f81ba..6db66240464c2c952b580e87fe46ddae7904743a 100644
--- a/hysop/backend/device/kernel_config.py
+++ b/hysop/backend/device/kernel_config.py
@@ -5,9 +5,7 @@ from hysop.constants import Precision
 from hysop.backend.device.kernel_autotuner_config import KernelAutotunerConfig
 from hysop.tools.types import check_instance, first_not_None
 
-class KernelConfig(object):
-
-    __metaclass__ = ABCMeta
+class KernelConfig(object, metaclass=ABCMeta):
 
     def __init__(self, autotuner_config=None,
                 user_build_options=None,
@@ -15,7 +13,7 @@ class KernelConfig(object):
                 precision=None,
                 float_dump_mode=None,
                 use_short_circuit_ops=None,
-                unroll_loops=None): 
+                unroll_loops=None):
 
         autotuner_config = first_not_None(autotuner_config, self.default_autotuner_config())
         user_build_options = first_not_None(user_build_options, [])
@@ -23,7 +21,7 @@ class KernelConfig(object):
         precision = first_not_None(precision, Precision.SAME)
         use_short_circuit_ops = first_not_None(use_short_circuit_ops, False)
         unroll_loops = first_not_None(unroll_loops, False)
-        
+
         if (float_dump_mode is None):
             if __KERNEL_DEBUG__:
                 float_dump_mode = 'dec'
diff --git a/hysop/backend/device/kernel_statistics.py b/hysop/backend/device/kernel_statistics.py
index ed7484a5daf4094fa4b89fffeeb623c1defa329b..4052fcde25e5938c379bfe4ddd853ea7b4261f2d 100644
--- a/hysop/backend/device/kernel_statistics.py
+++ b/hysop/backend/device/kernel_statistics.py
@@ -7,37 +7,38 @@ class KernelStatistics(object):
     Execution statistics extracted from kernel events.
     """
 
-    def __init__(self, min_, max_, total, nruns, data=None, **kwds): 
+    def __init__(self, min_, max_, total, nruns, data=None, **kwds):
         """
         Initialize KernelStatistics from nruns.
         Statistics should be given in nanoseconds.
         """
         super(KernelStatistics, self).__init__(**kwds)
-        check_instance(min_, (int,long), allow_none=True)
-        check_instance(max_, (int,long), allow_none=True)
-        check_instance(total, (int,long), allow_none=True)
-        check_instance(nruns, (int,long))
+        check_instance(min_, int, allow_none=True)
+        check_instance(max_, int, allow_none=True)
+        check_instance(total, int, allow_none=True)
+        check_instance(nruns, int)
         check_instance(data, (list,tuple), allow_none=True)
         self._min   = min_
         self._max   = max_
         self._total = total
         self._nruns = nruns
         self._data  = None if (data is None) else tuple(data)
-        
+
     def _get_min(self):
-		return self._min
+        return self._min
     def _get_max(self):
-		return self._max
+        return self._max
     def _get_mean(self):
         assert (self._nruns>0)
         return self._total / float(self._nruns)
+
     def _get_total(self):
-		return self._total
+        return self._total
     def _get_nruns(self):
-		return self._nruns
+        return self._nruns
     def _get_data(self):
-		return self._data
-    
+        return self._data
+
     min   = property(_get_min)
     max   = property(_get_max)
     mean  = property(_get_mean)
@@ -77,7 +78,7 @@ class KernelStatistics(object):
         return self.cmp(self, other) >= 0
     def __ne__(self, other):
         return self.cmp(self, other) != 0
-    
+
     def __iadd__(self, other):
         if (other.nruns == 0):
             return
@@ -94,7 +95,7 @@ class KernelStatistics(object):
             self._total += other.total
             self._data  += other.data
         return self
-    
+
     def __str__(self):
         mini  = float(self.min)   * 1e-9 #ns
         maxi  = float(self.max)   * 1e-9 #ns
diff --git a/hysop/backend/device/logical_device.py b/hysop/backend/device/logical_device.py
index d9e74c926d3b17934518065e1063164ecec49768..44682869e544b2956bc2fea40e91470877ce1fc2 100644
--- a/hysop/backend/device/logical_device.py
+++ b/hysop/backend/device/logical_device.py
@@ -10,11 +10,9 @@ class UnknownDeviceAttribute(object):
         return 'unknown'
 
 
-class LogicalDevice(object):
+class LogicalDevice(object, metaclass=ABCMeta):
 
-    __metaclass__ = ABCMeta
-    
-    def __init__(self, platform, platform_handle, device_id, device_handle, 
+    def __init__(self, platform, platform_handle, device_id, device_handle,
             hardware_topo, **kargs):
         super(LogicalDevice,self).__init__(**kargs)
         self._platform = platform
@@ -22,11 +20,11 @@ class LogicalDevice(object):
         physical_devices = self._match_physical_devices(hardware_topo=hardware_topo)
         physical_devices = to_tuple(physical_devices)
         self._physical_devices = physical_devices
-        
+
         vendor = hardware_topo.pciids.find_vendor(self.vendor_id())
-        self._vendor_handle = vendor 
+        self._vendor_handle = vendor
         self._device_handle = None
-        
+
         # identifying device without device_id is not easy for CPUs
         # so we do not look for a device handle
         if (physical_devices is not None):
@@ -50,15 +48,15 @@ class LogicalDevice(object):
             return ' [0x{:04x}]'.format(did)
         else:
             return ''
-    
+
     @property
     def device_id(self):
         return self._device_id
-        
+
     @property
     def platform(self):
         return self._platform
-    
+
     @property
     def physical_devices(self):
         return self._physical_devices
@@ -66,7 +64,7 @@ class LogicalDevice(object):
     @abstractmethod
     def _match_physical_devices(self, hardware_topo):
         pass
-    
+
     @abstractmethod
     def _determine_performance_and_affinity(self, hardware_topo):
         pass
@@ -153,14 +151,14 @@ class LogicalDevice(object):
     @abstractmethod
     def max_global_alloc_size(self):
         pass
-    
+
     @abstractmethod
     def local_mem_size(self):
         pass
     @abstractmethod
     def local_mem_type(self):
         pass
-    
+
 
 #DEVICE SPLITTING
     @abstractmethod
@@ -194,7 +192,7 @@ class LogicalDevice(object):
     @abstractmethod
     def fp64_config(self):
         pass
-    
+
 #IMAGES
     def has_image_support(self):
         pass
@@ -206,14 +204,14 @@ class LogicalDevice(object):
         pass
     def max_samplers(self):
         pass
-    
+
     def has_1d_image_support(self):
         pass
     def has_2d_image_support(self):
         pass
     def has_3d_image_support(self):
         pass
-    
+
     def has_1d_image_write_support(self):
         pass
     def has_2d_image_write_support(self):
@@ -225,7 +223,7 @@ class LogicalDevice(object):
         pass
     def has_2d_array_image_support(self):
         pass
-    
+
     def max_1d_image_size(self):
         pass
     def max_1d_image_array_size(self):
@@ -238,7 +236,7 @@ class LogicalDevice(object):
 
     def max_3d_image_size(self):
         pass
-   
+
 
     def has_2d_image_from_buffer_support(self):
         pass
@@ -254,7 +252,7 @@ class LogicalDevice(object):
     def image_max_array_size(self):
         pass
 
-    
+
 #ATOMICS
     @abstractmethod
     def has_global_int32_atomics(self):
@@ -268,7 +266,7 @@ class LogicalDevice(object):
     @abstractmethod
     def has_global_float64_atomics(self):
         pass
-    
+
     @abstractmethod
     def has_local_int32_atomics(self):
         pass
@@ -281,7 +279,7 @@ class LogicalDevice(object):
     @abstractmethod
     def has_local_float64_atomics(self):
         pass
-    
+
     @abstractmethod
     def has_mixed_int32_atomics(self):
         pass
@@ -294,7 +292,7 @@ class LogicalDevice(object):
     @abstractmethod
     def has_mixed_float64_atomics(self):
         pass
-    
+
     @abstractmethod
     def has_int32_hardware_atomic_counters(self):
         pass
diff --git a/hysop/backend/device/opencl/__init__.py b/hysop/backend/device/opencl/__init__.py
index e63a4b7acfd8a3acad7e1ebcb18c0b0f20eefdcd..9956307692102465ae1d96b8d9d5584df70bf169 100644
--- a/hysop/backend/device/opencl/__init__.py
+++ b/hysop/backend/device/opencl/__init__.py
@@ -41,6 +41,9 @@ cl = pyopencl
 clTools = pyopencl.tools
 """PyOpencl tools"""
 
+clTypes = pyopencl.cltypes
+"""PyOpencl types"""
+
 clArray = pyopencl.array
 """PyOpenCL arrays"""
 
diff --git a/hysop/backend/device/opencl/autotunable_kernels/custom_symbolic.py b/hysop/backend/device/opencl/autotunable_kernels/custom_symbolic.py
index 9831ca86cb40d7ae8726f049af77d9fd1a1b1965..dcb5daa29967a6b987e10f0ba165fed0b7f6aea3 100644
--- a/hysop/backend/device/opencl/autotunable_kernels/custom_symbolic.py
+++ b/hysop/backend/device/opencl/autotunable_kernels/custom_symbolic.py
@@ -1,5 +1,5 @@
+import warnings
 
-from hysop.deps import warnings
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types import check_instance
 from hysop.tools.numerics import is_complex
@@ -17,30 +17,30 @@ from hysop.backend.device.opencl import cl, clTools
 from hysop.backend.device.opencl.opencl_array import OpenClArray
 from hysop.backend.device.opencl.opencl_autotunable_kernel import OpenClAutotunableKernel
 from hysop.backend.device.kernel_autotuner import KernelGenerationError
-        
+
 class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
     """Autotunable interface for directional remeshing kernel code generators."""
 
     @classmethod
     def sort_key_by_name(cls, iterator):
         """Utility to sort a dictionary by key name."""
-        # We sort objects because this has an influence on generated 
+        # We sort objects because this has an influence on generated
         # argument order (and for autotuner hashing)
         return sorted(iterator, key=lambda x:x[0].name)
 
-    def autotune(self, expr_info, hardcode_arrays=True, 
+    def autotune(self, expr_info, hardcode_arrays=True,
                         has_complex=False, disable_vectorization=False, debug=False,
                         **kwds):
         """Autotune this kernel with specified configuration.
-        
+
         hardcode_arrays means that array offset and strides can be hardcoded
         into the kernels as constants.
         """
-        check_instance(expr_info.input_dfields.values(),  list, values=CartesianDiscreteScalarFieldView)
-        check_instance(expr_info.output_dfields.values(), list, values=CartesianDiscreteScalarFieldView)
+        check_instance(tuple(expr_info.input_dfields.values()),  tuple, values=CartesianDiscreteScalarFieldView)
+        check_instance(tuple(expr_info.output_dfields.values()), tuple, values=CartesianDiscreteScalarFieldView)
 
         granularity = expr_info.compute_granularity
-           
+
         dim       = expr_info.dim
         direction = expr_info.direction
 
@@ -53,7 +53,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         if not issubclass(precision, npw.floating):
             msg='Precision is not a npw.floating subtype, got {}.'.format(precision)
             raise TypeError(msg)
-        
+
         cshape     = expr_info.compute_resolution
         array_dim  = len(cshape)
         iter_shape = cshape[:granularity]
@@ -61,11 +61,11 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         kernel_dim = work_size.size
 
         work_dim  = min(kernel_dim, self.max_device_work_dim())
-        work_size = work_size[:work_dim] 
-        
+        work_size = work_size[:work_dim]
+
         DEBUG=False
         if DEBUG:
-            print \
+            print(
             '''
                 dim:    {}
                 dir:    {}
@@ -78,30 +78,30 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
                 work_size:  {}
                 kernel_dim: {}
             '''.format(dim, direction, precision, ftype, cshape,
-                      array_dim, iter_shape, work_size, work_dim)
-        
+                      array_dim, iter_shape, work_size, work_dim))
+
         min_ghosts = npw.dim_zeros(dim)
         for mg in expr_info.min_ghosts.values():
             min_ghosts = npw.maximum(min_ghosts, mg)
         assert (min_ghosts>=0).all(), min_ghosts
         min_wg_size  = 2*min_ghosts[dim-1-direction]+1
         assert (min_wg_size >= 1), min_wg_size
-        
+
         name = expr_info.name
         for dfields in (expr_info.input_dfields, expr_info.output_dfields):
-            for (field, dfield) in dfields.iteritems():
+            for (field, dfield) in dfields.items():
                 if (dfield.compute_resolution != cshape).any():
                     msg='Resolution mismatch between discrete fields, '
                     msg+='got {} but cshape={}, cannot generate kernel.'
                     msg=msg.format(dfield.compute_resolution, cshape)
                     raise ValueError(msg)
-        for (field, dfield) in expr_info.input_dfields.iteritems():
+        for (field, dfield) in expr_info.input_dfields.items():
             if (dfield.dfield.ghosts < expr_info.min_ghosts[field]).any():
                 msg='Min ghosts condition not met for discrete field {}:\n'
                 msg+=' expected {} but got only {}.'
                 msg=msg.format(dfield.name, expr_info.min_ghosts[field], dfield.ghosts)
                 raise ValueError(msg)
-        
+
         for mem_objects in (expr_info.input_arrays.values(), expr_info.output_arrays.values(),
                        expr_info.input_buffers.values(), expr_info.output_buffers.values()):
             for mem_obj in mem_objects:
@@ -110,7 +110,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
                     msg+='prior to code generation.'
                     msg=msg.format(type(mem_obj).__name__, mem_obj.name)
                     raise RuntimeError(msg)
-            
+
         for arrays in (expr_info.input_arrays.values(), expr_info.output_arrays.values()):
             for array in arrays:
                 if not npw.array_equal(array.shape, cshape):
@@ -118,13 +118,13 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
                     msg+='got {} but cshape={}, cannot generate kernel.'
                     msg=msg.format(dfield.compute_resolution, cshape)
                     raise ValueError(msg)
-       
+
         make_offset, offset_dtype = self.make_array_offset()
-        make_strides, strides_dtype = self.make_array_strides(array_dim, 
+        make_strides, strides_dtype = self.make_array_strides(array_dim,
                 hardcode_arrays=hardcode_arrays)
         parameter_dtypes, parameter_make = {}, {}
         make_gidx, gidx_dtype, = self.make_array_granularity_index(granularity)
-        
+
         kernel_args = {}
         known_args = {}
         isolation_params = {}
@@ -135,7 +135,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         # read-only input fields (opencl buffers can be marked read-only in discrete field views)
         # read-only input arrays and input buffers
         di = expr_info.discretization_info
-        for (obj, counts) in self.sort_key_by_name(di.read_counter.iteritems()):
+        for (obj, counts) in self.sort_key_by_name(di.read_counter.items()):
             if isinstance(obj, di.IndexedCounterTypes):
                 assert isinstance(obj, DiscreteScalarFieldView)
                 dfield = expr_info.input_dfields[obj._field]
@@ -150,7 +150,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
                     kernel_args[vname+'_base']    = dfield.data[i].base_data
                     target_stride_args[vname+'_strides'] = make_strides(dfield.data[i].strides, dfield.dtype)
                     target_offset_args[vname+'_offset']  = make_offset(dfield.data[i].offset, dfield.dtype)
-                    isolation_params[vname+'_base'] = dict(count=dfield.npoints, 
+                    isolation_params[vname+'_base'] = dict(count=dfield.npoints,
                             dtype=dfield.dtype, fill=i)
             elif isinstance(obj, di.SimpleCounterTypes):
                 assert isinstance(obj, (OpenClSymbolicArray, OpenClSymbolicBuffer, OpenClSymbolicNdBuffer)), type(obj)
@@ -169,7 +169,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
             has_complex |= is_complex(obj.dtype)
 
         # output fields, output arrays (output buffers are not supported yet)
-        for (obj, counts) in self.sort_key_by_name(di.write_counter.iteritems()):
+        for (obj, counts) in self.sort_key_by_name(di.write_counter.items()):
             if isinstance(obj, di.IndexedCounterTypes):
                 assert isinstance(obj, DiscreteScalarFieldView)
                 dfield = expr_info.output_dfields[obj._field]
@@ -182,7 +182,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
                     kernel_args[vname+'_base']    = dfield.data[i].base_data
                     target_stride_args[vname+'_strides'] = make_strides(dfield.data[i].strides, dfield.dtype)
                     target_offset_args[vname+'_offset']  = make_offset(dfield.data[i].offset, dfield.dtype)
-                    isolation_params[vname+'_base'] = dict(count=dfield.npoints, 
+                    isolation_params[vname+'_base'] = dict(count=dfield.npoints,
                                                            dtype=dfield.dtype, fill=i)
             elif isinstance(obj, di.SimpleCounterTypes):
                 assert isinstance(obj, (OpenClSymbolicArray, OpenClSymbolicBuffer, OpenClSymbolicNdBuffer)), type(obj)
@@ -200,7 +200,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
 
 
         # read-only input parameters
-        for (pname, param) in expr_info.input_params.iteritems():
+        for (pname, param) in expr_info.input_params.items():
             if (pname in expr_info.output_params):
                 continue
             has_complex |= is_complex(param.dtype)
@@ -215,7 +215,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
                 assert make_param().dtype == param_dtype
 
         # output parameters
-        for (pname, param) in expr_info.output_params.iteritems():
+        for (pname, param) in expr_info.output_params.items():
             has_complex |= is_complex(param.dtype)
             if param.const:
                 msg='A constant parameter cannot be set as output parameter.'
@@ -225,18 +225,18 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         # granularity argument
         if (granularity>0):
             kernel_args['gidx'] = make_gidx([0,]*granularity)
-        
+
         return super(OpenClAutotunableCustomSymbolicKernel, self).autotune(name=name,
                 expr_info=expr_info, kernel_dim=kernel_dim, compute_shape=cshape,
                 precision=precision, ftype=ftype, mesh_info_vars=mesh_info_vars,
-                work_dim=work_dim, work_size=work_size, min_wg_size=min_wg_size, 
-                known_args=known_args, kernel_args=kernel_args, 
-                hardcode_arrays=hardcode_arrays, 
-                granularity=granularity, iter_shape=iter_shape, 
+                work_dim=work_dim, work_size=work_size, min_wg_size=min_wg_size,
+                known_args=known_args, kernel_args=kernel_args,
+                hardcode_arrays=hardcode_arrays,
+                granularity=granularity, iter_shape=iter_shape,
                 gidx_dtype=gidx_dtype, make_gidx=make_gidx,
                 offset_dtype=offset_dtype, strides_dtype=strides_dtype,
                 parameter_dtypes=parameter_dtypes, parameter_make=parameter_make,
-                isolation_params=isolation_params, has_complex=has_complex, 
+                isolation_params=isolation_params, has_complex=has_complex,
                 disable_vectorization=disable_vectorization, debug=debug, **kwds)
 
 
@@ -249,7 +249,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         arg_position being an int and arg_type(s) a type or
         tuple of types which will be checked against.
         """
-        
+
         strides_dtype    = extra_kwds['strides_dtype']
         offset_dtype     = extra_kwds['offset_dtype']
         gidx_dtype       = extra_kwds['gidx_dtype']
@@ -257,13 +257,13 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         granularity      = extra_kwds['granularity']
         expr_info        = extra_kwds['expr_info']
         parameter_dtypes = extra_kwds['parameter_dtypes']
-        
+
         args_mapping = {}
         arg_index = 0
-        
+
         # read-only input fields, input arrays, input buffers
         di = expr_info.discretization_info
-        for (obj, counts) in self.sort_key_by_name(di.read_counter.iteritems()):
+        for (obj, counts) in self.sort_key_by_name(di.read_counter.items()):
             if isinstance(obj, di.IndexedCounterTypes):
                 assert isinstance(obj, DiscreteScalarFieldView)
                 dfield = obj
@@ -297,9 +297,9 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
             else:
                 msg='Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        
+
         # output fields, arrays
-        for (obj, counts) in self.sort_key_by_name(di.write_counter.iteritems()):
+        for (obj, counts) in self.sort_key_by_name(di.write_counter.items()):
             if isinstance(obj, di.IndexedCounterTypes):
                 assert isinstance(obj, DiscreteScalarFieldView)
                 dfield = obj
@@ -330,16 +330,16 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
             else:
                 msg='Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        
+
         # read-only input parameters
-        for (pname, param) in expr_info.input_params.iteritems():
+        for (pname, param) in expr_info.input_params.items():
             if (pname in expr_info.output_params) or param.const:
                 continue
             args_mapping[pname] = (arg_index, parameter_dtypes[pname])
             arg_index += 1
-        
+
         # output parameters
-        for (pname, param) in expr_info.output_params.iteritems():
+        for (pname, param) in expr_info.output_params.items():
             assert not param.const
             args_mapping[pname] = (arg_index, cl.MemoryObjectHolder)
             arg_index += 1
@@ -352,11 +352,11 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         return args_mapping
 
 
-    def compute_parameters(self, extra_kwds): 
+    def compute_parameters(self, extra_kwds):
         """Register extra parameters to optimize."""
         check_instance(extra_kwds, dict, keys=str)
         cshape = extra_kwds['compute_shape']
-        
+
         if extra_kwds['disable_vectorization']:
             vectorization_options = [1,]
         else:
@@ -364,7 +364,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
                 vectorization_options = [1, 2, 4, 8]
             else:
                 vectorization_options = [1, 2, 4, 8, 16]
-                    
+
         autotuner_flag = self.autotuner_config.autotuner_flag
         if (autotuner_flag == AutotunerFlags.ESTIMATE):
             max_workitem_workload = [1,1,1]
@@ -378,18 +378,18 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         work_dim = extra_kwds['work_dim']
         max_workitem_workload = npw.asarray(max_workitem_workload[:work_dim])
         extra_kwds['max_work_load'] = max_workitem_workload
-        
+
         params = super(OpenClAutotunableCustomSymbolicKernel, self).compute_parameters(
                         extra_kwds=extra_kwds)
         params.register_extra_parameter('vectorization', vectorization_options)
         return params
-               
+
     def compute_min_max_wg_size(self, work_bounds, work_load, global_work_size,
             extra_parameters, extra_kwds):
         """Default min and max workgroup size."""
         _min_wg_size = extra_kwds['min_wg_size']
         vectorization = extra_parameters['vectorization']
-        min_wg_size = npw.ones(shape=work_bounds.work_dim, dtype=npw.int32) 
+        min_wg_size = npw.ones(shape=work_bounds.work_dim, dtype=npw.int32)
         max_wg_size = npw.ones(shape=work_bounds.work_dim, dtype=npw.int32)
         min_wg_size[0] = _min_wg_size
         max_wg_size[0] = max((global_work_size[0]+vectorization-1)//vectorization, _min_wg_size)
@@ -398,7 +398,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
     def compute_global_work_size(self, work, local_work_size,
             extra_parameters, extra_kwds):
         # contiguous axe is iterated fully by one work_group
-        global_work_size = ((work.global_work_size+local_work_size-1)/local_work_size)*local_work_size
+        global_work_size = ((work.global_work_size+local_work_size-1)//local_work_size)*local_work_size
         global_work_size[0] = local_work_size[0]
         return global_work_size
 
@@ -415,7 +415,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         mesh_info_vars = extra_kwds['mesh_info_vars']
         granularity    = extra_kwds['granularity']
         debug          = extra_kwds['debug']
-        
+
         if dry_run:
             assert local_work_size is None
             assert global_work_size is None
@@ -427,32 +427,32 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
 
         ## Get compile time OpenCL known variables
         known_vars = super(OpenClAutotunableCustomSymbolicKernel, self).generate_kernel_src(
-                global_work_size=global_work_size, 
-                local_work_size=local_work_size, 
-                extra_parameters=extra_parameters, 
-                extra_kwds=extra_kwds, 
+                global_work_size=global_work_size,
+                local_work_size=local_work_size,
+                extra_parameters=extra_parameters,
+                extra_kwds=extra_kwds,
                 tuning_mode=tuning_mode,
                 dry_run=dry_run)
         known_vars.update(mesh_info_vars)
         known_vars.update(known_args)
-                        
+
         ## Generate OpenCL source code
         codegen = CustomSymbolicKernelGenerator.create(expr_info,
                            typegen=self.typegen, ftype=ftype,
                            kernel_dim=kernel_dim, work_dim=work_dim, granularity=granularity,
-                           vectorization=vectorization, 
+                           vectorization=vectorization,
                            symbolic_mode=self.symbolic_mode,
                            tuning_mode=tuning_mode,
-                           debug_mode=False, 
+                           debug_mode=False,
                            known_vars=known_vars)
-        
+
         if debug:
-            print 'User asked to debug kernel {}, entering in edit mode and terminating program.'.format(codegen.name)
+            print('User asked to debug kernel {}, entering in edit mode and terminating program.'.format(codegen.name))
             codegen.edit()
             codegen.test_compile()
             import sys
             sys.exit(1)
-        
+
         kernel_name = codegen.name
         kernel_src  = str(codegen)
 
@@ -465,18 +465,18 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
             self.check_cache(required_cache[2])
 
         return (kernel_name, kernel_src)
-    
+
     def hash_extra_kwds(self, extra_kwds):
         """Hash extra_kwds dictionnary for caching purposes."""
         kwds = ('kernel_dim', 'work_dim', 'ftype', 'granularity', 'known_args', 'compute_shape')
-        return self.custom_hash(*tuple(extra_kwds[kwd] for kwd in kwds), 
+        return self.custom_hash(*tuple(extra_kwds[kwd] for kwd in kwds),
                 mesh_info_vars=extra_kwds['mesh_info_vars'],
                 expr_info=extra_kwds['expr_info'])
 
     def format_best_candidate(self, **kwds):
         from hysop.backend.device.opencl.opencl_kernel import OpenClKernelParameterYielder
 
-        (kernel, args_dict) = super(OpenClAutotunableCustomSymbolicKernel, 
+        (kernel, args_dict) = super(OpenClAutotunableCustomSymbolicKernel,
                 self).format_best_candidate(**kwds)
 
         extra_kwds = kwds['extra_kwds']
@@ -484,8 +484,8 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         make_gidx = extra_kwds['make_gidx']
         granularity = extra_kwds['granularity']
         parameter_make = extra_kwds['parameter_make']
-        
-        if (granularity>0): 
+
+        if (granularity>0):
             def granularity_iterator():
                 for idx in npw.ndindex(*iter_shape):
                     yield make_gidx(idx[::-1])
@@ -494,10 +494,10 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         # pop non constant input parameters
         for pname in parameter_make.keys():
             args_dict.pop(pname)
-        
+
         # return a method to update non constant input parameters
         def update_input_parameters():
-            return { pname:pmake() for (pname, pmake) in parameter_make.iteritems() }
+            return { pname:pmake() for (pname, pmake) in parameter_make.items() }
 
         return (kernel, args_dict, update_input_parameters)
 
diff --git a/hysop/backend/device/opencl/autotunable_kernels/remesh_dir.py b/hysop/backend/device/opencl/autotunable_kernels/remesh_dir.py
index 4f92191c477f63c9b5f83ac01799983ec7cfb07a..fa6230c9d981ff343c3ea1f07dc596b0df9f0be2 100644
--- a/hysop/backend/device/opencl/autotunable_kernels/remesh_dir.py
+++ b/hysop/backend/device/opencl/autotunable_kernels/remesh_dir.py
@@ -1,4 +1,5 @@
-from hysop.deps import warnings
+import warnings
+
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types import check_instance
 from hysop.tools.misc import upper_pow2_or_3
@@ -145,7 +146,7 @@ class OpenClAutotunableDirectionalRemeshKernel(OpenClAutotunableKernel):
             for (i, dsinout) in enumerate(scalars_in):
                 mi = 'S{}_inout_mesh_info'.format(i)
                 mesh_info_vars[mi] = self.mesh_info(mi, dsinout.mesh)
-                for j in xrange(dsinout.nb_components):
+                for j in range(dsinout.nb_components):
                     prefix = 'S{}_{}_inout'.format(i, j)
                     kernel_args[prefix+'_base'] = dsinout.data[j].base_data
                     target_args[prefix+'_strides'] = make_strides(dsinout.data[j].strides,
@@ -160,7 +161,7 @@ class OpenClAutotunableDirectionalRemeshKernel(OpenClAutotunableKernel):
             for (i, dsin) in enumerate(scalars_in):
                 mi = 'S{}_in_mesh_info'.format(i)
                 mesh_info_vars[mi] = self.mesh_info(mi, dsin.mesh)
-                for j in xrange(dsin.nb_components):
+                for j in range(dsin.nb_components):
                     prefix = 'S{}_{}_in'.format(i, j)
                     kernel_args[prefix+'_base'] = dsin.data[j].base_data
                     target_args[prefix+'_strides'] = make_strides(dsin.data[j].strides, dsin.dtype)
@@ -172,7 +173,7 @@ class OpenClAutotunableDirectionalRemeshKernel(OpenClAutotunableKernel):
             for (i, dsout) in enumerate(scalars_out):
                 mi = 'S{}_out_mesh_info'.format(i)
                 mesh_info_vars[mi] = self.mesh_info(mi, dsout.mesh)
-                for j in xrange(dsout.nb_components):
+                for j in range(dsout.nb_components):
                     prefix = 'S{}_{}_out'.format(i, j)
                     kernel_args[prefix+'_base'] = dsout.data[j].base_data
                     target_args[prefix +
@@ -228,7 +229,7 @@ class OpenClAutotunableDirectionalRemeshKernel(OpenClAutotunableKernel):
 
         if is_inplace:
             for (i, dsinout) in enumerate(scalars_in):
-                for j in xrange(dsinout.nb_components):
+                for j in range(dsinout.nb_components):
                     prefix = 'S{}_{}_inout'.format(i, j)
                     args_mapping[prefix+'_base'] = (arg_index, cl.MemoryObjectHolder)
                     arg_index += 1
@@ -238,7 +239,7 @@ class OpenClAutotunableDirectionalRemeshKernel(OpenClAutotunableKernel):
                         arg_index += 2
         else:
             for (i, dsin) in enumerate(scalars_in):
-                for j in xrange(dsin.nb_components):
+                for j in range(dsin.nb_components):
                     prefix = 'S{}_{}_in'.format(i, j)
                     args_mapping[prefix+'_base'] = (arg_index, cl.MemoryObjectHolder)
                     arg_index += 1
@@ -247,7 +248,7 @@ class OpenClAutotunableDirectionalRemeshKernel(OpenClAutotunableKernel):
                         args_mapping[prefix+'_offset'] = (arg_index+1, offset_dtype)
                         arg_index += 2
             for (i, dsout) in enumerate(scalars_out):
-                for j in xrange(dsout.nb_components):
+                for j in range(dsout.nb_components):
                     prefix = 'S{}_{}_out'.format(i, j)
                     args_mapping[prefix+'_base'] = (arg_index, cl.MemoryObjectHolder)
                     arg_index += 1
diff --git a/hysop/backend/device/opencl/autotunable_kernels/transpose.py b/hysop/backend/device/opencl/autotunable_kernels/transpose.py
index 1dbd3fdb78a802c946817ba22653ef3b891f0d63..c64306d84644df968ec110615c080cbc62b68765 100644
--- a/hysop/backend/device/opencl/autotunable_kernels/transpose.py
+++ b/hysop/backend/device/opencl/autotunable_kernels/transpose.py
@@ -20,9 +20,9 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
         nbytes = dtype.itemsize
         factor = 2.0 if is_inplace else 1.0
         max_cache_elems = int(self.usable_cache_bytes_per_wg / (factor*nbytes))
-        
+
         if len(tile_indexes)==2:
-            x = int(npw.sqrt(max_cache_elems)) 
+            x = int(npw.sqrt(max_cache_elems))
             #while x*(x+1) > max_cache_elems:
                 #x-=1
             # tile offsetting will just trigger the usual cache exception
@@ -30,15 +30,15 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
         else:
             # no cache is used
             max_ts_cache = npw.inf
-        
+
         tile_shape = shape[tile_indexes]
         max_ts_shape = max(tile_shape)
-        
+
         max_tile_size = min(max_ts_cache, max_ts_shape)
         return max_tile_size
 
-    def autotune(self, is_inplace, 
-            input_buffer, output_buffer, 
+    def autotune(self, is_inplace,
+            input_buffer, output_buffer,
             axes, hardcode_arrays,
             name=None, **kwds):
         """Autotune this kernel with specified axes, inputs and outputs."""
@@ -51,19 +51,19 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
         assert input_buffer.ndim  == output_buffer.ndim
         assert input_buffer.size  == output_buffer.size
         assert input_buffer.dtype == output_buffer.dtype
-        
+
         dim   = input_buffer.ndim
         size  = input_buffer.size
         shape = npw.asintarray(input_buffer.shape[::-1])
         dtype = input_buffer.dtype
         ctype = clTools.dtype_to_ctype(dtype)
-        
+
         # check if the permutation is valid
         assert dim>=2
         assert len(axes)==dim
         assert set(axes)==set(range(dim))
         assert axes != tuple(range(dim))
-        
+
         # check if is_inplace is allowed
         if is_inplace:
             can_compute_inplace  = (dim == 2)
@@ -72,12 +72,12 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
             if not can_compute_inplace:
                 raise ValueError(msg)
             assert (input_buffer.data == output_buffer.data)
-        
+
         # get vector size for strides
         make_offset, offset_dtype = self.make_array_offset()
-        make_strides, strides_dtype = self.make_array_strides(dim, 
+        make_strides, strides_dtype = self.make_array_strides(dim,
                 hardcode_arrays=hardcode_arrays)
-        
+
         kernel_args = {}
         known_args = {}
         isolation_params = {}
@@ -87,62 +87,62 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
             kernel_args['inout_base']    = output_buffer.base_data
             target_args['inout_strides'] = make_strides(output_buffer.strides, output_buffer.dtype)
             target_args['inout_offset']  = make_offset(output_buffer.offset, output_buffer.dtype)
-            isolation_params['inout_base'] = dict(count=output_buffer.size, 
+            isolation_params['inout_base'] = dict(count=output_buffer.size,
                     dtype=output_buffer.dtype, range=slice(output_buffer.size))
         else:
             kernel_args['in_base']     = input_buffer.base_data
             target_args['in_strides']  = make_strides(input_buffer.strides, input_buffer.dtype)
             target_args['in_offset']   = make_offset(input_buffer.offset, input_buffer.dtype)
-            isolation_params['in_base'] = dict(count=input_buffer.size, dtype=input_buffer.dtype, 
+            isolation_params['in_base'] = dict(count=input_buffer.size, dtype=input_buffer.dtype,
                                                     range=slice(input_buffer.size))
 
             kernel_args['out_base']    = output_buffer.base_data
             target_args['out_strides'] = make_strides(output_buffer.strides, output_buffer.dtype)
             target_args['out_offset']  = make_offset(output_buffer.offset, output_buffer.dtype)
-            isolation_params['out_base'] = dict(count=output_buffer.size, 
+            isolation_params['out_base'] = dict(count=output_buffer.size,
                                                     dtype=output_buffer.dtype, fill=0)
-        
+
         if (name is None):
             name = 'transpose_{}_[{}]_{}'.format(ctype,
                     ','.join(str(a) for a in axes),
                     'inplace' if is_inplace else 'out_of_place')
-        
-        # if last axe (contiguous axe) is permuted, 
+
+        # if last axe (contiguous axe) is permuted,
         # we need to use 2D tiles else we only need 1D tiles.
         (last_axe_permuted, work_dim, work_shape, tile_indices) = \
-            TransposeKernelGenerator.characterize_permutation(shape, axes, 
+            TransposeKernelGenerator.characterize_permutation(shape, axes,
                     self.max_device_work_dim())
-        
+
         # keyword arguments will be agregated into extra_kwds dictionnary
-        return super(OpenClAutotunableTransposeKernel, self).autotune(name=name, 
-                kernel_args=kernel_args, 
+        return super(OpenClAutotunableTransposeKernel, self).autotune(name=name,
+                kernel_args=kernel_args,
                 known_args=known_args, hardcode_arrays=hardcode_arrays,
                 offset_dtype=offset_dtype, strides_dtype=strides_dtype,
-                axes=axes, 
-                dtype=dtype, 
+                axes=axes,
+                dtype=dtype,
                 ctype=ctype,
                 shape=shape,
-                tile_indices=tile_indices, 
+                tile_indices=tile_indices,
                 work_dim=work_dim,
                 work_size=work_shape,
                 is_inplace=is_inplace,
                 isolation_params=isolation_params,
                 last_axe_permuted=last_axe_permuted, **kwds)
 
-    def compute_parameters(self, extra_kwds): 
+    def compute_parameters(self, extra_kwds):
         """Register extra parameters to optimize."""
         check_instance(extra_kwds, dict, keys=str)
         params = super(OpenClAutotunableTransposeKernel, self).compute_parameters(
                 extra_kwds=extra_kwds)
 
-        ## Register extra parameters   
+        ## Register extra parameters
         # compute max tile fize from device cache
         tile_indices = extra_kwds['tile_indices']
         dtype = extra_kwds['dtype']
         shape = extra_kwds['shape']
         is_inplace = extra_kwds['is_inplace']
         last_axe_permuted = extra_kwds['last_axe_permuted']
-        
+
         flag = self.autotuner_config.autotuner_flag
         vectorization = (1,)
         use_diagonal_coordinates = (False,)
@@ -154,12 +154,12 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
         imax = int(math.log(max_tile_size, 2))
         jmax = int(math.log(max_tile_size, 3)) if flag in (AutotunerFlags.EXHAUSTIVE,) else 0
         tile_sizes = tuple( int((2**i)*(3**j))
-                for (i,j) in it.product(range(0,imax+1), range(0,jmax+1)))
+                for (i,j) in it.product(range(0, imax+1), range(0, jmax+1)))
         tile_sizes = (max_tile_size,) + tuple(sorted(tile_sizes, reverse=True))
         tile_sizes = tuple(filter(lambda x: (x>=max_tile_size//8) and (x<=max_tile_size), tile_sizes))
 
-        
-        params.register_extra_parameter('vectorization', vectorization) 
+
+        params.register_extra_parameter('vectorization', vectorization)
         params.register_extra_parameter('use_diagonal_coordinates', use_diagonal_coordinates)
         params.register_extra_parameter('tile_padding', tile_padding)
         params.register_extra_parameter('tile_size', tile_sizes)
@@ -168,21 +168,21 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
 
     def compute_work_candidates(self, work_bounds, work_load, extra_parameters, extra_kwds):
         """
-        Configure work (global_size, local_size candidates) given a 
+        Configure work (global_size, local_size candidates) given a
         OpenClWorkBoundsConfiguration object and a work_load.
 
         Return a WorkConfiguration object.
-        
+
         Notes
         -----
         global_work_size can be ignored if it depends on local_work_size and will be set
         in self.compute_global_work_size().
         """
         work = super(OpenClAutotunableTransposeKernel, self).compute_work_candidates(
-                work_bounds=work_bounds, work_load=work_load, 
-                extra_parameters=extra_parameters, 
+                work_bounds=work_bounds, work_load=work_load,
+                extra_parameters=extra_parameters,
                 extra_kwds=extra_kwds)
-        
+
         axes          = extra_kwds['axes']
         work_dim      = extra_kwds['work_dim']
         shape         = extra_kwds['shape']
@@ -192,7 +192,7 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
         max_tile_work_size = TransposeKernelGenerator.max_local_worksize(axes=axes, shape=shape,
                 work_dim=work_dim, tile_size=tile_size, vectorization=vectorization)
 
-        work.push_filter('max_tile_worksize', work.max_wi_sizes_filter, 
+        work.push_filter('max_tile_worksize', work.max_wi_sizes_filter,
                 max_work_item_sizes=max_tile_work_size)
 
         return work
@@ -209,13 +209,13 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
         return gs
 
     def generate_kernel_src(self, global_work_size, local_work_size,
-        extra_parameters, extra_kwds, tuning_mode, dry_run, 
+        extra_parameters, extra_kwds, tuning_mode, dry_run,
         force_verbose=False, force_debug=False,
         return_codegen = False):
         """
         Generate kernel name and source code.
         """
-        
+
         ## Extract usefull variables
         axes       = extra_kwds['axes']
         ctype      = extra_kwds['ctype']
@@ -224,26 +224,26 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
 
         ## Get compile time OpenCL known variables
         known_vars = super(OpenClAutotunableTransposeKernel, self).generate_kernel_src(
-                global_work_size=global_work_size, 
-                local_work_size=local_work_size, 
-                extra_parameters=extra_parameters, 
+                global_work_size=global_work_size,
+                local_work_size=local_work_size,
+                extra_parameters=extra_parameters,
                 extra_kwds=extra_kwds, tuning_mode=tuning_mode, dry_run=dry_run)
         known_vars.update(known_args)
         known_vars['shape'] = self.to_vecn(extra_kwds['shape'], 0)
-            
+
         ## Generate OpenCL source code
-        codegen = TransposeKernelGenerator(axes=axes, 
+        codegen = TransposeKernelGenerator(axes=axes,
             typegen=self.typegen, ctype=ctype, is_inplace=is_inplace,
             symbolic_mode=self.symbolic_mode, known_vars=known_vars, debug_mode=force_debug,
             tuning_mode=tuning_mode,
             **extra_parameters)
-        
+
         kernel_name = codegen.name
         kernel_src  = str(codegen)
 
         ## Check if cache would fit
         self.check_cache(codegen.required_workgroup_cache_size()[2])
-        
+
         return (kernel_name, kernel_src)
 
     def compute_args_mapping(self, extra_kwds, extra_parameters):
@@ -252,25 +252,25 @@ class OpenClAutotunableTransposeKernel(OpenClAutotunableKernel):
         strides_dtype   = extra_kwds['strides_dtype']
         hardcode_arrays = extra_kwds['hardcode_arrays']
         if extra_kwds['is_inplace']:
-            args_mapping['inout_base'] = (0, cl.MemoryObjectHolder) 
+            args_mapping['inout_base'] = (0, cl.MemoryObjectHolder)
             if not hardcode_arrays:
-                args_mapping['inout_strides'] = (1, strides_dtype) 
+                args_mapping['inout_strides'] = (1, strides_dtype)
                 args_mapping['inout_offset']  = (2, offset_dtype)
         else:
-            args_mapping['in_base'] = (0, cl.MemoryObjectHolder) 
+            args_mapping['in_base'] = (0, cl.MemoryObjectHolder)
             if not hardcode_arrays:
-                args_mapping['in_strides'] = (1, strides_dtype) 
+                args_mapping['in_strides'] = (1, strides_dtype)
                 args_mapping['in_offset']  = (2, offset_dtype)
-            args_mapping['out_base'] = (1 + 2*(not hardcode_arrays), cl.MemoryObjectHolder) 
+            args_mapping['out_base'] = (1 + 2*(not hardcode_arrays), cl.MemoryObjectHolder)
             if not hardcode_arrays:
-                args_mapping['out_strides'] = (4, strides_dtype) 
+                args_mapping['out_strides'] = (4, strides_dtype)
                 args_mapping['out_offset']  = (5, offset_dtype)
         return args_mapping
 
     def hash_extra_kwds(self, extra_kwds):
         """Hash extra_kwds dictionnary for caching purposes."""
-        return self.custom_hash(extra_kwds['ctype'], 
-                     extra_kwds['axes'], 
+        return self.custom_hash(extra_kwds['ctype'],
+                     extra_kwds['axes'],
                      extra_kwds['shape'],
                      extra_kwds['is_inplace'],
                      extra_kwds['known_args'])
diff --git a/hysop/backend/device/opencl/cl_src/advection/basic_rk2.cl b/hysop/backend/device/opencl/cl_src/advection/basic_rk2.cl
deleted file mode 100644
index 559638ff61919ef65d8a5e6c6013320f56976495..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/basic_rk2.cl
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * @file advection/basic.cl
- * Advection function, vectorized version, no use of builtins functions.
- */
-
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity cache.
- * @return Particle position.
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float__N__ v,        		/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Intermediary position */
-    c,				/* initial coordinate */
-    hdt = (float__N__)(0.5*dt);	/* half time step */
-  int__N__ i_ind,		/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-  c = (float__N__)((i+__NN__)*mesh->dx.x,
-		   );
-  c = c + mesh->min_position;
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = (float__N__)(velocity_cache[noBC_id(i+__NN__)],
-		   );
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  v = (p*(vp-v) + v);
-#endif
-
-  p = (c + hdt * v) * mesh->v_invdx;
-
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  v = (p*(vp-v) + v);
-
-  return c + dt * v;
-}
diff --git a/hysop/backend/device/opencl/cl_src/advection/basic_rk2_noVec.cl b/hysop/backend/device/opencl/cl_src/advection/basic_rk2_noVec.cl
deleted file mode 100644
index a635dd9d4d064de1157d853e2758b0f18d1eeb7f..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/basic_rk2_noVec.cl
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * @file advection/basic_noVec.cl
- * Advection function, basic version
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param gvelo Global velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Normalized intermediary position */
-    c = i * mesh->dx.x + mesh->min_position,  /* initial coordinate */
-    hdt = 0.5 * dt;		/* half time step */
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = velocity_cache[noBC_id(i)]; 	/* k = k1 */
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  p = (c + hdt*v) * mesh->v_invdx;
-
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-
-  v = velocity_cache[noBC_id(i_ind)];
-  vp = velocity_cache[noBC_id(i_ind_p)];
-  v = (p*(vp-v) + v);
-
-  return c + dt * v;
-}
-/* Operations number :  */
-/*   - 2 positions = 2 * 2 */
-/*   - 1 iterpolation = 9 */
-/* Total = 13 */
diff --git a/hysop/backend/device/opencl/cl_src/advection/basic_rk4.cl b/hysop/backend/device/opencl/cl_src/advection/basic_rk4.cl
deleted file mode 100644
index 853873ba47e171acfffef036b37c7eafd72388ac..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/basic_rk4.cl
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * @file basic_rk4.cl
- * Advection function (RK4 scheme), vectorized version, no use of builtins functions.
- */
-
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity cache.
- * @return Particle position.
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float__N__ v,        		/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Intermediary position */
-    k,				/* rk averaged velocity */
-    kn,				/* rk intermediate velocity */
-    c,				/* initial coordinate */
-    hdt = (float__N__)(0.5*dt);	/* half time step */
-  int__N__ i_ind,		/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-  c = (float__N__)((i+__NN__)*mesh->dx.x,
-		   );
-  c = c + mesh->min_position;
-
-  //k1 = f(t,y)
-  //k2 = f(t + dt/2, y + dt/2 * k1)
-  //k3 = f(t + dt/2, y + dt/2 * k2)
-  //k4 = f(t + dt, y + dt * k3)
-  //result = y + dt/6( k1 + 2 * k2 + 2 * k3 + k4)
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  k = (float__N__)(velocity_cache[noBC_id(i+__NN__)],
-		   );
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  k = p*(vp-v) + v;
-#endif
-
-  p = (c + hdt * k) * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = p*(vp-v) + v;
-
-  k += 2.0 * kn;
-
-  p = (c + hdt * kn) * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = p*(vp-v) + v;
-
-  k += 2.0 * kn;
-
-  p = (c + dt * kn) * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = p*(vp-v) + v;
-
-  k += kn;
-
-
-  return c + (float__N__)(dt *0.16666666666666666) * k;
-}
diff --git a/hysop/backend/device/opencl/cl_src/advection/basic_rk4_noVec.cl b/hysop/backend/device/opencl/cl_src/advection/basic_rk4_noVec.cl
deleted file mode 100644
index 7b7d4b2752c7611d3c58c0eddfd08ca78fdc58a6..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/basic_rk4_noVec.cl
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * @file basic_rk4_noVec.cl
- * Advection function (RK4 scheme), basic version
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Intermediary position */
-    k,				/* rk averaged velocity */
-    kn,				/* rk intermediate velocity */
-    c = i * mesh->dx.x + mesh->min_position,  /* initial coordinate */
-    hdt = 0.5 * dt;		/* half time step */
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-  //k1 = f(t,y)
-  //k2 = f(t + dt/2, y + dt/2 * k1)
-  //k3 = f(t + dt/2, y + dt/2 * k2)
-  //k4 = f(t + dt, y + dt * k3)
-  //result = y + dt/6( k1 + 2 * k2 + 2 * k3 + k4)
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  k = velocity_cache[noBC_id(i)]; 	/* k = k1 */
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  k = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  p = (c + hdt * k) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = velocity_cache[noBC_id(i_ind)];
-  vp = velocity_cache[noBC_id(i_ind_p)];
-  kn = p*(vp-v) + v;		/* kn = k2 */
-
-  k += 2.0 * kn;		/* k = k1 + 2*k2 */
-
-  p = (c + hdt * kn) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = velocity_cache[noBC_id(i_ind)];
-  vp = velocity_cache[noBC_id(i_ind_p)];
-  kn = p*(vp-v) + v;		/* kn = k3 */
-
-  k += 2.0 * kn;		/* k = k1 + 2*k2 + 2*k3 */
-
-  p = (c + dt * kn) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = velocity_cache[noBC_id(i_ind)];
-  vp = velocity_cache[noBC_id(i_ind_p)];
-  kn = p*(vp-v) + v;		/* kn = k4 */
-
-  k += kn;			/* k = k1 + 2*k2 + 2*k3 + k4 */
-
-  return c + dt * k*0.16666666666666666;
-}
-/* Operations number :  */
-/*   - 4 positions = 4 * 2 + 3 */
-/*   - 3 iterpolation = 3 * 9 */
-/*   - velocity weights = 5*/
-/* Total = 41 */
diff --git a/hysop/backend/device/opencl/cl_src/advection/builtin_euler_noVec.cl b/hysop/backend/device/opencl/cl_src/advection/builtin_euler_noVec.cl
deleted file mode 100644
index aecb5ff383230c6c454733a3ebb564acba7c7e7f..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/builtin_euler_noVec.cl
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * @file builtin_noVec.cl
- * Advection function, basic version
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    c = fma(i, mesh->dx.x, mesh->min_position);	/* initial coordinate */
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = velocity_cache[noBC_id(i)];
-#else
-  float p;
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  return fma(dt, v, c);
-}
-/* Operations number :  */
-/*   - 3 positions = 3 * fma */
-/*   - 1 iterpolation = 2 + 1 * mix */
-/*   - dt/2 = 1 */
-/* 1mix <=> 3flop : mix(x,y,a) = x+(y-x)*a */
-/* Total = 3 fma + 1 mix + 3 = 12flop */
diff --git a/hysop/backend/device/opencl/cl_src/advection/builtin_rk2.cl b/hysop/backend/device/opencl/cl_src/advection/builtin_rk2.cl
deleted file mode 100644
index c45d5cee4b78861d35ecb891efc51a3116bbc1af..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/builtin_rk2.cl
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * @file builtin.cl
- * Advection function, vectorized version.
- */
-
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity cache.
- * @return Particle position.
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float__N__ v,        		/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Intermediary position */
-    c,				/* initial coordinate */
-    hdt = (float__N__)(0.5*dt);	/* half time step */
-  int__N__ i_ind,		/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-  c = (float__N__)((i+__NN__)*mesh->dx.x,
-		       );
-  c = c + mesh->min_position;
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = (float__N__)(velocity_cache[noBC_id(i+__NN__)],
-		   );
-  p = fma(hdt, v, c) * mesh->v_invdx;
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  p = fma(hdt, mix(v,vp,p), c) * v_invdx;
-#endif
-
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  return fma(mix(v,vp,p),dt,c);
-}
diff --git a/hysop/backend/device/opencl/cl_src/advection/builtin_rk2_noVec.cl b/hysop/backend/device/opencl/cl_src/advection/builtin_rk2_noVec.cl
deleted file mode 100644
index 415eec1d1b257836c576b6928015c5dabb2c41a2..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/builtin_rk2_noVec.cl
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * @file builtin_noVec.cl
- * Advection function, basic version
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    p,				/* Intermediary position */
-    c = i*mesh->dx.x, //fma(i, mesh->dx.x, mesh->min_position),	/* initial coordinate */
-    hdt = 0.5 * dt;		/* half time step */
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = velocity_cache[noBC_id(i)];
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  p = fma(hdt, v, c) * mesh->v_invdx;
-
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-
-  return fma(dt, v, c) + mesh->min_position;
-}
-/* Operations number :  */
-/*   - 3 positions = 3 * fma */
-/*   - 1 iterpolation = 2 + 1 * mix */
-/*   - dt/2 = 1 */
-/* 1mix <=> 3flop : mix(x,y,a) = x+(y-x)*a */
-/* Total = 3 fma + 1 mix + 3 = 12flop */
diff --git a/hysop/backend/device/opencl/cl_src/advection/builtin_rk4.cl b/hysop/backend/device/opencl/cl_src/advection/builtin_rk4.cl
deleted file mode 100644
index 2dcc7dc1e77817fa753a0cc48a15929f03e360f9..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/builtin_rk4.cl
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * @file builtin_rk4.cl
- * Advection function, vectorized version.
- */
-
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity cache.
- * @return Particle position.
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float__N__ v,        		/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Intermediary position */
-    k,				/* rk averaged velocity */
-    kn,				/* rk intermediate velocity */
-    c,				/* initial coordinate */
-    hdt = (float__N__)(0.5*dt);	/* half time step */
-  int__N__ i_ind,		/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-  c = (float__N__)((i+__NN__)*mesh->dx.x,
-		       );
-  c = c + mesh->min_position;
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = (float__N__)(velocity_cache[noBC_id(i+__NN__)],
-		   );
-  p = fma(hdt, v, c) * mesh->v_invdx;
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  p = fma(hdt, mix(v,vp,p), c) * mesh->v_invdx;
-#endif
-
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = mix(v,vp,p);
-
-  k += 2.0 * kn;
-
-  p = fma(hdt, kn, c) * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = mix(v,vp,p);
-
-  k += 2.0 * kn;
-
-  p = fma((float__N__)(dt), kn, c) * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = mix(v,vp,p);
-
-  k += kn;
-
-
-  return fma(k,(float__N__)(dt*0.16666666666666666),c);
-}
diff --git a/hysop/backend/device/opencl/cl_src/advection/builtin_rk4_noVec.cl b/hysop/backend/device/opencl/cl_src/advection/builtin_rk4_noVec.cl
deleted file mode 100644
index 170fa90c81026c26b8e61396a0e14c8b471d4d91..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/builtin_rk4_noVec.cl
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * @file builtin_rk4_noVec.cl
- * Advection function, basic version
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float p,		       /* Intermediary position */
-    k,			       /* rk averaged velocity */
-    kn,			       /* rk intermediate velocity */
-    c = fma(i, mesh->dx.x, mesh->min_position), /* initial coordinate */
-    hdt = 0.5 * dt;	       /* half time step */
-  int i_ind,		       /* Interpolation left point */
-    i_ind_p;		       /* Interpolation right point */
-
-  //k1 = f(t,y)
-  //k2 = f(t + dt/2, y + dt/2 * k1)
-  //k3 = f(t + dt/2, y + dt/2 * k2)
-  //k4 = f(t + dt, y + dt * k3)
-  //result = y + dt/6( k1 + 2 * k2 + 2 * k3 + k4)
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  k = velocity_cache[noBC_id(i)]; 	/* k = k1 */
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  k = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  p = fma(hdt, k, c) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  kn = mix(velocity_cache[noBC_id(i_ind)],
-	   velocity_cache[noBC_id(i_ind_p)],p);		/* kn = k2 */
-
-  k += 2.0 * kn;		/* k = k1 + 2*k2 */
-
-  p = fma(hdt, kn, c) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  kn = mix(velocity_cache[noBC_id(i_ind)],
-	   velocity_cache[noBC_id(i_ind_p)],p);		/* kn = k3 */
-
-  k += 2.0 * kn;		/* k = k1 + 2*k2 + 2*k3 */
-
-  p = fma(dt, kn, c) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  kn = mix(velocity_cache[noBC_id(i_ind)],
-	   velocity_cache[noBC_id(i_ind_p)],p);		/* kn = k4 */
-
-  k += kn;			/* k = k1 + 2*k2 + 2*k3 + k4 */
-
-  return fma(k, dt*0.16666666666666666, c);
-}
-
-/* Operations number :  */
-/*   - 5 positions = 5 * fma*/
-/*   - 3 iterpolation = 3 * (1 * mix + 2) */
-/*   - velocity weights = 7 */
-/*   - dt/2, dt/6 = 2 */
-/* Total = 5 fma + 3 mix + 13 = 32flop */
diff --git a/hysop/backend/device/opencl/cl_src/advection/comm_basic_rk2_noVec.cl b/hysop/backend/device/opencl/cl_src/advection/comm_basic_rk2_noVec.cl
deleted file mode 100644
index 037d19052fe5b1627b36080d852a9100cbd024bb..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/comm_basic_rk2_noVec.cl
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * @file advection/comm_basic_noVec.cl
- * Advection function, basic version, mpi communications on the host side
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark T_NB_I: global points number in the 1st direction (mpi cutted direction)
- * @remark START_INDEX Global staring index for computational points
- * @remark STOP_INDEX Global stop index for computational points
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Normalized intermediary position */
-    c = i * mesh->dx.x + mesh->min_position, /* initial coordinate */
-    hdt = 0.5 * dt;		/* half time step */
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-#if (V_NB_I-2*V_GHOSTS_NB) == NB_I
-  // single-scale:
-  v = velocity_cache[noBC_id(i + V_GHOSTS_NB)]; 	/* k = k1 */
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB);
-  i_ind_p = i_ind + 1;
-  v = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-  p = (c + hdt*v) * mesh->v_invdx;
-
-  i_ind = convert_int_rtn(p);
-  if( i_ind>=(V_START_INDEX-MS_INTERPOL_SHIFT) && i_ind < (V_STOP_INDEX-V_GHOSTS_NB))
-    {
-      p = p - convert_float(i_ind);
-
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB);
-      i_ind_p = i_ind + 1;
-
-      v = velocity_cache[noBC_id(i_ind)];
-      vp = velocity_cache[noBC_id(i_ind_p)];
-      v = (p*(vp-v) + v);
-
-      p = c + dt * v;
-    }
-  else
-    {
-      p = (1000*T_NB_I)*1.0 + p;
-    }
-
-  return p;
-}
-/* Operations number :  */
-/*   - 2 positions = 2 * 2 */
-/*   - 1 iterpolation = 9 */
-/* Total = 13 */
diff --git a/hysop/backend/device/opencl/cl_src/advection/comm_builtin_rk2_noVec.cl b/hysop/backend/device/opencl/cl_src/advection/comm_builtin_rk2_noVec.cl
deleted file mode 100644
index a9a717f8088de07317e5eac2c46d787442880f0f..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/comm_builtin_rk2_noVec.cl
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * @file comm_builtin_noVec.cl
- * Advection function, basic version, mpi communications on the host side
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index (without velocity ghosts considering).
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark T_NB_I: global points number in the 1st direction (mpi cutted direction)
- * @remark START_INDEX Global staring index for computational points
- * @remark STOP_INDEX Global stop index for computational points
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    p,				/* Intermediary position */
-    c = i * dx + min_position,  /* initial coordinate */
-    hdt = 0.5 * dt;		/* half time step */
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-#if (V_NB_I-2*V_GHOSTS_NB) == NB_I
-  // single scale:
-  v = velocity_cache[noBC_id(i + V_GHOSTS_NB)];
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB);
-  i_ind_p = i_ind + 1;
-  v = mix(velocity_cache[noBC_id(i_ind)],
-  	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  p = fma(hdt, v, c) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  if( i_ind>=(V_START_INDEX-MS_INTERPOL_SHIFT) && i_ind < (V_STOP_INDEX-V_GHOSTS_NB))
-    {
-      p = p - convert_float(i_ind);
-
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB);
-      i_ind_p = i_ind + 1;
-
-      v = mix(velocity_cache[noBC_id(i_ind)],
-      	      velocity_cache[noBC_id(i_ind_p)],p);
-
-      p = fma(dt, v, c);
-    }
-  else
-    {
-      p = (1000*T_NB_I)*1.0 + p;
-    }
-
-  return p;
-}
-/* Operations number :  */
-/*   - 2 positions = 2 * fma */
-/*   - 1 iterpolation = 6 + 1 * mix */
-/* Total = 2 fma + 1 mix + 6 */
diff --git a/hysop/backend/device/opencl/cl_src/advection/velocity_cache.cl b/hysop/backend/device/opencl/cl_src/advection/velocity_cache.cl
deleted file mode 100644
index 6fd4ae6e6046ca8d3db8c2472c18b0dcd7483ad9..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/velocity_cache.cl
+++ /dev/null
@@ -1,217 +0,0 @@
-void fill_velocity_cache(__global const float* gvelo,
-			 uint gidX, uint gidY, uint gidZ,
-			 __local float* gvelo_loc,
-#if ADVEC_IS_MULTISCALE
-			 float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			 __constant struct AdvectionMeshInfo* mesh);
-
-void fill_velocity_cache(__global const float* gvelo,
-			 uint gidX, uint gidY, uint gidZ,
-			 __local float* velocity_cache,
-#if ADVEC_IS_MULTISCALE
-			 float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			 __constant struct AdvectionMeshInfo* mesh)
-{
-  uint i;
-  float__N__ v;
-#if !(ADVEC_IS_MULTISCALE)
-  // Single scale : Velocity and scalar grids are identical : cache is just read from global
-  uint line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II; /* Current 1D problem index */
-  for(i=gidX*__N__; i<V_NB_I; i+=(WI_NB*__N__))
-    {
-      /* Read velocity */
-      v = vload__N__((i+line_index)/__N__, gvelo);
-      /* Fill the cache */
-      velocity_cache[noBC_id(i+__NN__)] = v.s__NN__;
-    }
-#else
-  // Multi-scale: Velocity cache is interpolated from global
-
-#if NB_III == 1
-  // 2D case
-
-
-  float line_posY, hY;
-  int indY;
-#if MS_FORMULA == MS_LINEAR
-  int2 v_line_index;
-  float2 wY;
-#elif MS_FORMULA == MS_L2_1
-  int4 v_line_index;
-  float4 wY;
-#elif MS_FORMULA == MS_L4_2 ||  MS_FORMULA == MS_L4_4
-  // Only the 6 first elements will be used
-  int8 v_line_index;
-  float8 wY;
-#endif
-
-  line_posY = (gidY * mesh->dx.y) * inv_v_dx_y;// mesh->v_dx.y;
-  indY = convert_int_rtn(line_posY);
-  hY = line_posY - convert_float(indY);
-
-#if MS_FORMULA == MS_LINEAR
-  wY.s1 = hY;
-  wY.s0 = 1.0 - wY.s1;
-#else
-  wY.s0 = MS_INTERPOL(alpha)(hY);
-  wY.s1 = MS_INTERPOL(beta)(hY);
-  wY.s2 = MS_INTERPOL(gamma)(hY);
-#if MS_INTERPOL_SHIFT > 1
-  wY.s3 = MS_INTERPOL(delta)(hY);
-  wY.s4 = MS_INTERPOL(eta)(hY);
-  wY.s5 = 1.0 - wY.s0 - wY.s1 - wY.s2 - wY.s3 - wY.s4;
-#else
-  wY.s3 = 1.0 - wY.s0 - wY.s1 - wY.s2;
-#endif
-#endif
-
-  indY = indY + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
-
-  v_line_index.s0 = indY * V_NB_I;
-  v_line_index.s1 = (indY + 1) * V_NB_I;
-#if MS_INTERPOL_SHIFT > 0
-  v_line_index.s2 = (indY + 2) * V_NB_I;
-  v_line_index.s3 = (indY + 3) * V_NB_I;
-#elif MS_INTERPOL_SHIFT > 1
-  v_line_index.s4 = (indY + 4) * V_NB_I;
-  v_line_index.s5 = (indY + 5) * V_NB_I;
-#endif
-
-  for(i=gidX*__N__; i<V_NB_I; i+=(WI_NB*__N__))
-    {
-    gvelo_loc[noBC_id(i)] = wY.s0 * gvelo[i + v_line_index.s0];
-    gvelo_loc[noBC_id(i)] += wY.s1 * gvelo[i + v_line_index.s1];
-#if MS_INTERPOL_SHIFT > 0
-    gvelo_loc[noBC_id(i)] += wY.s2 * gvelo[i + v_line_index.s2];
-    gvelo_loc[noBC_id(i)] += wY.s3 * gvelo[i + v_line_index.s3];
-#elif MS_INTERPOL_SHIFT > 1
-    gvelo_loc[noBC_id(i)] += wY.s4 * gvelo[i + v_line_index.s4];
-    gvelo_loc[noBC_id(i)] += wY.s5 * gvelo[i + v_line_index.s5];
-#endif
-    }
-
-#else
-  // 3D case
-
-
-  float line_posY, hY;
-  float line_posZ, hZ;
-  int indY, indZ;
-#if MS_FORMULA == MS_LINEAR
-  int2 v_line_indexY, v_line_indexZ;
-  float2 wY, wZ;
-#elif MS_FORMULA == MS_L2_1
-  int4 v_line_indexY, v_line_indexZ;
-  float4 wY, wZ;
-#elif MS_FORMULA == MS_L4_2 || MS_FORMULA == MS_L4_4
-  int8 v_line_indexY, v_line_indexZ;
-  float8 wY, wZ;
-#endif
-
-  line_posY = (gidY * mesh->dx.y) * inv_v_dx_y;// mesh->v_dx.y;
-  line_posZ = (gidZ * mesh->dx.z) * inv_v_dx_z;// mesh->v_dx.z;
-  indY = convert_int_rtn(line_posY);
-  indZ = convert_int_rtn(line_posZ);
-  hY = line_posY - convert_float(indY);
-  hZ = line_posZ - convert_float(indZ);
-
-#if MS_FORMULA == MS_LINEAR
-  wY.s1 = hY;
-  wY.s0 = 1.0 - wY.s1;
-  wZ.s1 = hZ;
-  wZ.s0 = 1.0 - wZ.s1;
-#else
-  wY.s0 = MS_INTERPOL(alpha)(hY);
-  wY.s1 = MS_INTERPOL(beta)(hY);
-  wY.s2 = MS_INTERPOL(gamma)(hY);
-  wZ.s0 = MS_INTERPOL(alpha)(hZ);
-  wZ.s1 = MS_INTERPOL(beta)(hZ);
-  wZ.s2 = MS_INTERPOL(gamma)(hZ);
-#if MS_INTERPOL_SHIFT > 1
-  wY.s3 = MS_INTERPOL(delta)(hY);
-  wY.s4 = MS_INTERPOL(eta)(hY);
-  wY.s5 = 1.0 - wY.s0 - wY.s1 - wY.s2 - wY.s3 - wY.s4;
-  wZ.s3 = MS_INTERPOL(delta)(hZ);
-  wZ.s4 = MS_INTERPOL(eta)(hZ);
-  wZ.s5 = 1.0 - wZ.s0 - wZ.s1 - wZ.s2 - wZ.s3 - wZ.s4;
-#else
-  wY.s3 = 1.0 - wY.s0 - wY.s1 - wY.s2;
-  wZ.s3 = 1.0 - wZ.s0 - wZ.s1 - wZ.s2;
-#endif
-#endif
-
- indY = indY + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
- indZ = indZ + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
-
-  v_line_indexY.s0 = indY * V_NB_I;
-  v_line_indexY.s1 = (indY + 1) * V_NB_I;
-  v_line_indexZ.s0 = indZ * V_NB_I * V_NB_II;
-  v_line_indexZ.s1 = (indZ + 1) * V_NB_I * V_NB_II;
-#if MS_INTERPOL_SHIFT > 0
-  v_line_indexY.s2 = (indY + 2) * V_NB_I;
-  v_line_indexY.s3 = (indY + 3) * V_NB_I;
-  v_line_indexZ.s2 = (indZ + 2) * V_NB_I * V_NB_II;
-  v_line_indexZ.s3 = (indZ + 3) * V_NB_I * V_NB_II;
-#elif MS_INTERPOL_SHIFT > 1
-  v_line_indexY.s4 = (indY + 4) * V_NB_I;
-  v_line_indexY.s5 = (indY + 5) * V_NB_I;
-  v_line_indexZ.s4 = (indZ + 4) * V_NB_I * V_NB_II;
-  v_line_indexZ.s5 = (indZ + 5) * V_NB_I * V_NB_II;
-#endif
-
-
-  for(i=gidX*__N__; i<V_NB_I; i+=(WI_NB*__N__))
-    {
-    gvelo_loc[noBC_id(i)] = wY.s0 * wZ.s0 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s1 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s0 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s1 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s1];
-#if MS_INTERPOL_SHIFT > 0
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s2 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s3 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s2 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s3 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s0 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s1 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s2 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s3 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s0 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s1 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s2 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s3 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s3];
-#elif MS_INTERPOL_SHIFT > 1
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s4 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s5 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s4 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s5 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s4 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s5 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s4 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s5 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s0 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s1 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s2 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s3 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s3];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s4 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s5 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s0 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s1 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s2 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s3 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s3];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s4 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s5 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s5];
-#endif
-    }
-#endif
-#endif
-}
diff --git a/hysop/backend/device/opencl/cl_src/advection/velocity_cache_noVec.cl b/hysop/backend/device/opencl/cl_src/advection/velocity_cache_noVec.cl
deleted file mode 100644
index 1677b2546e42b36ebc28dacf7288989461c9c559..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/advection/velocity_cache_noVec.cl
+++ /dev/null
@@ -1,231 +0,0 @@
-void fill_velocity_cache(__global const float* gvelo,
-			 uint gidX, uint gidY, uint gidZ,
-			 __local float* gvelo_loc,
-#if ADVEC_IS_MULTISCALE
-			 float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			 __constant struct AdvectionMeshInfo* mesh);
-
-void fill_velocity_cache(__global const float* gvelo,
-			 uint gidX, uint gidY, uint gidZ,
-			 __local float* gvelo_loc,
-#if ADVEC_IS_MULTISCALE
-			 float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			 __constant struct AdvectionMeshInfo* mesh)
-{
-  uint i;
-
-  // ********************************
-  // **    Single Scale
-  // ********************************
-#if !(ADVEC_IS_MULTISCALE)
-  // Single scale : Velocity and scalar grids are identical : cache is just read from global
-  uint line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II; /* Current 1D problem index */
-  for(i=gidX; i<V_NB_I; i+=(WI_NB))
-    {
-      /* Read velocity */
-      /* Fill velocity cache */
-      gvelo_loc[noBC_id(i)] = gvelo[i+line_index];
-    }
-
-  // ********************************
-  // **    Multi-Scale
-  // ********************************
-  // Velocity cache is interpolated from global memory
-#else
-
-
-#if NB_III == 1
-  //  Multi-Scale (2D)
-
-  float line_posY, hY;
-  int indY;
-#if MS_FORMULA == LINEAR
-  int2 v_line_index;
-  float2 wY;
-#elif MS_FORMULA == L2_1
-  int4 v_line_index;
-  float4 wY;
-#elif MS_FORMULA == L4_2 ||  MS_FORMULA == L4_4
-  // Only the 6 first elements will be used
-  int8 v_line_index;
-  float8 wY;
-#endif
-
-  line_posY = (gidY * mesh->dx.y) * inv_v_dx_y; // mesh->v_dx.y;
-  indY = convert_int_rtn(line_posY);
-  hY = line_posY - convert_float(indY);
-
-
-#if MS_FORMULA == LINEAR
-  wY.s1 = hY;
-  wY.s0 = 1.0 - wY.s1;
-#else
-  wY.s0 = MS_INTERPOL(alpha)(hY);
-  wY.s1 = MS_INTERPOL(beta)(hY);
-  wY.s2 = MS_INTERPOL(gamma)(hY);
-#if MS_INTERPOL_SHIFT > 1
-  wY.s3 = MS_INTERPOL(delta)(hY);
-  wY.s4 = MS_INTERPOL(eta)(hY);
-  wY.s5 = 1.0 - wY.s0 - wY.s1 - wY.s2 - wY.s3 - wY.s4;
-#else
-  wY.s3 = 1.0 - wY.s0 - wY.s1 - wY.s2;
-#endif
-#endif
-
-  indY = indY + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
-
-  v_line_index.s0 = indY * V_NB_I;
-  v_line_index.s1 = (indY + 1) * V_NB_I;
-#if MS_INTERPOL_SHIFT > 0
-  v_line_index.s2 = (indY + 2) * V_NB_I;
-  v_line_index.s3 = (indY + 3) * V_NB_I;
-#elif MS_INTERPOL_SHIFT > 1
-  v_line_index.s4 = (indY + 4) * V_NB_I;
-  v_line_index.s5 = (indY + 5) * V_NB_I;
-#endif
-
-
-  for(i=gidX; i<V_NB_I; i+=(WI_NB)){
-    gvelo_loc[noBC_id(i)] = wY.s0 * gvelo[i + v_line_index.s0];
-    gvelo_loc[noBC_id(i)] += wY.s1 * gvelo[i + v_line_index.s1];
-#if MS_INTERPOL_SHIFT > 0
-    gvelo_loc[noBC_id(i)] += wY.s2 * gvelo[i + v_line_index.s2];
-    gvelo_loc[noBC_id(i)] += wY.s3 * gvelo[i + v_line_index.s3];
-#elif MS_INTERPOL_SHIFT > 1
-    gvelo_loc[noBC_id(i)] += wY.s4 * gvelo[i + v_line_index.s4];
-    gvelo_loc[noBC_id(i)] += wY.s5 * gvelo[i + v_line_index.s5];
-#endif
-  }
-			 /* nombre d'opérations 2D Linéaire:
-			    - calcul des poids de ligne : 4flop (par wi)
-			    - calcul de la vitesse : 3flop par point de grille de vitesse
-			 */
-
-
-#else
-  //  Multi-Scale (3D)
-
-  float line_posY, hY;
-  float line_posZ, hZ;
-  int indY, indZ;
-#if MS_FORMULA == LINEAR
-  int2 v_line_indexY, v_line_indexZ;
-  float2 wY, wZ;
-#elif MS_FORMULA == L2_1
-  int4 v_line_indexY, v_line_indexZ;
-  float4 wY, wZ;
-#elif MS_FORMULA == L4_2 || MS_FORMULA == L4_4
-  int8 v_line_indexY, v_line_indexZ;
-  float8 wY, wZ;
-#endif
-
-  line_posY = (gidY * mesh->dx.y) * inv_v_dx_y; // mesh->v_dx.y;
-  line_posZ = (gidZ * mesh->dx.z) * inv_v_dx_z;// mesh->v_dx.z;
-  indY = convert_int_rtn(line_posY);
-  indZ = convert_int_rtn(line_posZ);
-  hY = line_posY - convert_float(indY);
-  hZ = line_posZ - convert_float(indZ);
-
-#if MS_FORMULA == LINEAR
-  wY.s1 = hY;
-  wY.s0 = 1.0 - wY.s1;
-  wZ.s1 = hZ;
-  wZ.s0 = 1.0 - wZ.s1;
-#else
-  wY.s0 = MS_INTERPOL(alpha)(hY);
-  wY.s1 = MS_INTERPOL(beta)(hY);
-  wY.s2 = MS_INTERPOL(gamma)(hY);
-  wZ.s0 = MS_INTERPOL(alpha)(hZ);
-  wZ.s1 = MS_INTERPOL(beta)(hZ);
-  wZ.s2 = MS_INTERPOL(gamma)(hZ);
-#if MS_INTERPOL_SHIFT > 1
-  wY.s3 = MS_INTERPOL(delta)(hY);
-  wY.s4 = MS_INTERPOL(eta)(hY);
-  wY.s5 = 1.0 - wY.s0 - wY.s1 - wY.s2 - wY.s3 - wY.s4;
-  wZ.s3 = MS_INTERPOL(delta)(hZ);
-  wZ.s4 = MS_INTERPOL(eta)(hZ);
-  wZ.s5 = 1.0 - wZ.s0 - wZ.s1 - wZ.s2 - wZ.s3 - wZ.s4;
-#else
-  wY.s3 = 1.0 - wY.s0 - wY.s1 - wY.s2;
-  wZ.s3 = 1.0 - wZ.s0 - wZ.s1 - wZ.s2;
-#endif
-#endif
-
-  indY = indY + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
-  indZ = indZ + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
-
-  v_line_indexY.s0 = indY * V_NB_I;
-  v_line_indexY.s1 = (indY + 1) * V_NB_I;
-  v_line_indexZ.s0 = indZ * V_NB_I * V_NB_II;
-  v_line_indexZ.s1 = (indZ + 1) * V_NB_I * V_NB_II;
-#if MS_INTERPOL_SHIFT > 0
-  v_line_indexY.s2 = (indY + 2) * V_NB_I;
-  v_line_indexY.s3 = (indY + 3) * V_NB_I;
-  v_line_indexZ.s2 = (indZ + 2) * V_NB_I * V_NB_II;
-  v_line_indexZ.s3 = (indZ + 3) * V_NB_I * V_NB_II;
-#elif MS_INTERPOL_SHIFT > 1
-  v_line_indexY.s4 = (indY + 4) * V_NB_I;
-  v_line_indexY.s5 = (indY + 5) * V_NB_I;
-  v_line_indexZ.s4 = (indZ + 4) * V_NB_I * V_NB_II;
-  v_line_indexZ.s5 = (indZ + 5) * V_NB_I * V_NB_II;
-#endif
-
-  for(i=gidX; i<V_NB_I; i+=(WI_NB)){
-    gvelo_loc[noBC_id(i)] = wY.s0 * wZ.s0 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s1 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s0 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s1 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s1];
-#if MS_INTERPOL_SHIFT > 0
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s2 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s3 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s2 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s3 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s0 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s1 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s2 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s3 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s0 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s1 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s2 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s3 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s3];
-#elif MS_INTERPOL_SHIFT > 1
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s4 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s5 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s4 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s5 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s4 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s5 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s4 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s5 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s0 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s1 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s2 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s3 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s3];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s4 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s5 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s0 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s1 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s2 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s3 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s3];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s4 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s5 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s5];
-#endif
-  }
-			 /* nombre d'opérations 3D Linéaire:
-			    - calcul des poids de ligne : 8flop (par wi)
-			    - calcul de la vitesse : 11flop par point de grille de vitesse
-			 */
-
-#endif
-#endif
-}
diff --git a/hysop/backend/device/opencl/cl_src/common.cl b/hysop/backend/device/opencl/cl_src/common.cl
deleted file mode 100644
index 4f67d2aa000011e2dc5fe8b875e5c5521107a283..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/common.cl
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * @file common.cl
- * Common parameters for advection and remeshing kernels.
- */
-
-inline uint noBC_id(int id);
-
-#ifdef WITH_NOBC
-/**
- * Mapping to local memory arrays to avoir banck conflics.
- * 1D buffer is taken as 2D one with wor-items vs. particles.
- *
- * @param id 1D index
- *
- * @return 2D index
- */
-inline uint noBC_id(int id){
-  return (id%PART_NB_PER_WI)*WI_NB+(id/PART_NB_PER_WI);
-}
-#else
-/**
- * Leave mapping unchanged, 1D.
- *
- * @param id 1D index
- *
- * @return 1D index
- */
-inline uint noBC_id(int id){
-  return id;
-}
-#endif
-
-/**
- * Constants for remeshing formulas:
- *   - L2_1 1
- *   - L2_2 2
- *   - L2_3 3
- *   - L2_4 4
- *   - L4_2 5
- *   - L4_3 6
- *   - L4_4 7
- *   - L6_3 8
- *   - L6_4 9
- *   - L6_5 10
- *   - L6_6 11
- *   - L8_4 12
- *   - M8PRIME 13
- */
-#define L2_1 1
-#define L2_2 2
-#define L2_3 3
-#define L2_4 4
-#define L4_2 5
-#define L4_3 6
-#define L4_4 7
-#define L6_3 8
-#define L6_4 9
-#define L6_5 10
-#define L6_6 11
-#define L8_4 12
-#define M8PRIME 13
-#define LINEAR 14
-
-/**
- * Remeshing configuration
- */
-#if FORMULA == L2_1
-#define REMESH_SHIFT 1
-#define REMESH(greek) greek##_l2_1
-#elif FORMULA == L2_2
-#define REMESH_SHIFT 1
-#define REMESH(greek) greek##_l2_2
-#elif FORMULA == L2_3
-#define REMESH_SHIFT 1
-#define REMESH(greek) greek##_l2_3
-#elif FORMULA == L2_4
-#define REMESH_SHIFT 1
-#define REMESH(greek) greek##_l2_4
-
-#elif FORMULA == L4_2
-#define REMESH_SHIFT 2
-#define REMESH(greek) greek##_l4_2
-#elif FORMULA == L4_3
-#define REMESH_SHIFT 2
-#define REMESH(greek) greek##_l4_3
-#elif FORMULA == L4_4
-#define REMESH_SHIFT 2
-#define REMESH(greek) greek##_l4_4
-
-#elif FORMULA == M8PRIME
-#define REMESH_SHIFT 3
-#define REMESH(greek) greek##_M8p
-#elif FORMULA == L6_3
-#define REMESH_SHIFT 3
-#define REMESH(greek) greek##_l6_3
-#elif FORMULA == L6_4
-#define REMESH_SHIFT 3
-#define REMESH(greek) greek##_l6_4
-#elif FORMULA == L6_5
-#define REMESH_SHIFT 3
-#define REMESH(greek) greek##_l6_5
-#elif FORMULA == L6_6
-#define REMESH_SHIFT 3
-#define REMESH(greek) greek##_l6_6
-
-#elif FORMULA == L8_4
-#define REMESH_SHIFT 4
-#define REMESH(greek) greek##_l8_4
-#endif
-
-
-/**
- * Multi-scale configuration
- */
-
-#ifndef ADVEC_IS_MULTISCALE
-#define ADVEC_IS_MULTISCALE (V_NB_I-2*V_GHOSTS_NB) != NB_I
-#endif
-
-#if MS_FORMULA == LINEAR
-#define MS_INTERPOL_SHIFT 0
-// MS_INTERPOL not used
-#elif MS_FORMULA == L2_1
-#define MS_INTERPOL_SHIFT 1
-#define MS_INTERPOL(greek) greek##_l2_1
-#elif MS_FORMULA == L4_2
-#define MS_INTERPOL_SHIFT 2
-#define MS_INTERPOL(greek) greek##_l4_2
-#elif MS_FORMULA == L4_4
-#define MS_INTERPOL_SHIFT 2
-#define MS_INTERPOL(greek) greek##_l4_4
-#else
-//Default case for single-scale (only used in comm advection)
-#define MS_INTERPOL_SHIFT 0
-#endif
-
-/*
-a minmax element is a 12 int defined as follows:
-*/
-#define L_MIN_X 0
-#define L_MAX_X 1
-#define L_MIN_Y 2
-#define L_MAX_Y 3
-#define L_MIN_Z 4
-#define L_MAX_Z 5
-#define R_MIN_X 6
-#define R_MAX_X 7
-#define R_MIN_Y 8
-#define R_MAX_Y 9
-#define R_MIN_Z 10
-#define R_MAX_Z 11
-
-/* Structure to store __constants advection parameters */
-typedef struct AdvectionMeshInfo
-{
-  float4 dx;                   /* Mesh step (advected grid) */
-  float4 v_dx;                 /* Mesh step (velocity) */
-  float min_position;           /* Domain minimum coordinate in current direction */
-  float invdx;                 /* Store 1./dx.x */
-  float v_invdx;               /* Store 1./v_dx.x */
-  float x;                     /* Padding */
-} AdvectionMeshInfo;
-
-/* Finite differences constants */
-#define FD_C_2 88
-#define FD_C_4 99
diff --git a/hysop/backend/device/opencl/cl_src/kernels/advection.cl b/hysop/backend/device/opencl/cl_src/kernels/advection.cl
deleted file mode 100644
index 2e9e341e3df1cbc898a89b882e7ee83ddf4c2a77..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/advection.cl
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * @file advection.cl
- * Advection kernel, vectorized version.
- */
-
-/**
- * Computes particles positions from the velocity field.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group. Computations of 1D problems are placed in loops over gidY and gidZ to adjust local workload and handle the work-item maximum size.
- * Each work-item computes <code>NB_I/WI_NB</code> particles positions in each 1D problem.
- * Particle are computed through OpenCL vector types of length 2, 4 or 8.
- * Velocity data are copied to a local buffer as a cache.
- *
- * @param gvelo Velocity.
- * @param ppos Particle position.
- * @param dt Time step.
- * @param mesh Mesh description.
- * @param inv_v_dx_y velocity grid 1/dy
- * @param inv_v_dx_z velocity grid 1/dz
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>NB_X</code>, <code>NB_Y</code>, <code>NB_Z</code> : points number in physical space directions.
- * @remark <code>WI_NB</code> corresponds to the work-item number.
- * @remark <code>ADVEC_IS_MULTISCALE</code> is a flag for multiscale.
- * @remark <code>V_NB_I</code>, <code>V_NB_II</code>, <code>V_NB_III</code> : points number for velocity grid in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void advection_kernel(__global const float* gvelo,
-			       __global float* ppos,
-			       float dt,
-#if ADVEC_IS_MULTISCALE
-			       float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			       __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  float__N__ p;				/* Particle position */
-  uint line_index; /* Current 1D problem index */
-
-  __local float velocity_cache[V_NB_I]; /* Velocity cache */
-
-  for(gidZ=get_global_id(2);
-#ifdef NB_Z
-      gidZ<NB_III;
-#else
-      gidZ<=get_global_id(2); // Single element loop
-#endif
-      gidZ+=get_global_size(2)) {
-    for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-
-      // 1D problem computations
-      line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-#if ADVEC_IS_MULTISCALE
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, inv_v_dx_y, inv_v_dx_z, mesh);
-#else
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, mesh);
-#endif
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX*__N__; i<NB_I; i+=WI_NB*__N__) {
-	/* Compute position */
-	p = advection(i, dt, velocity_cache, mesh);
-	/* Store result */
-	vstore__N__(p, (i+line_index)/__N__, ppos);
-      }
-
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/advection_and_remeshing.cl b/hysop/backend/device/opencl/cl_src/kernels/advection_and_remeshing.cl
deleted file mode 100644
index c9fb52ebdcd758d33088e60acefcce35d640ecf7..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/advection_and_remeshing.cl
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * @file advection_and_remeshing.cl
- * Advection and remeshing kernel, vectorized version.
- */
-
-/**
- * Performs advection and then remeshing of the particles scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group. Computations of 1D problems are placed in loops over gidY and gidZ to adjust local workload and handle the work-item maximum size.
- * Each work-item computes NB_I/WI_NB particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param gvelo Velocity field
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param dt Time step
- * @param mesh Mesh description.
- * @param inv_v_dx_y velocity grid 1/dy
- * @param inv_v_dx_z velocity grid 1/dz
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>NB_X</code>, <code>NB_Y</code>, <code>NB_Z</code> : points number in physical space directions.
- * @remark <code>WI_NB</code> corresponds to the work-item number.
- * @remark <code>ADVEC_IS_MULTISCALE</code> is a flag for multiscale.
- * @remark <code>V_NB_I</code>, <code>V_NB_II</code>, <code>V_NB_III</code> : points number for velocity grid in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void advection_and_remeshing(__global const float* gvelo,
-				      __RCOMP_P__global const float* pscal__ID__,
-				      __RCOMP_P__global float* gscal__ID__,
-				      float dt,
-#if ADVEC_IS_MULTISCALE
-				      float inv_v_dx_y, float inv_v_dx_z,
-#endif
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  float__N__ p;			/* Particle position */
-  __RCOMP_I float__N__ s__ID__; /* Particle scalar */
-  uint line_index; /* Current 1D problem index */
-
-  __RCOMP_I__local float gscal_loc__ID__[NB_I]; /* Local buffer for result */
-  __local float velocity_cache[V_NB_I]; /* Velocity cache */
-
-  for(gidZ=get_global_id(2);
-#ifdef NB_Z
-      gidZ<NB_III;
-#else
-      gidZ<=get_global_id(2); // Single element loop
-#endif
-      gidZ+=get_global_size(2)) {
-    for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-
-      // 1D problem computations
-      line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-#if ADVEC_IS_MULTISCALE
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, inv_v_dx_y, inv_v_dx_z, mesh);
-#else
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, mesh);
-#endif
-
-      for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__)) {
-	/* Initialize result buffer */
-	__RCOMP_Igscal_loc__ID__[noBC_id(i+__NN__)] = 0.0;
-      }
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=__N__) {
-	/* Read Particle scalar */
-	__RCOMP_Is__ID__ = vload__N__((i + line_index)/__N__, pscal__ID__);
-	/* Compute particle position */
-	p = advection(i, dt, velocity_cache, mesh);
-	/* Remesh particle */
-	remesh(i, __RCOMP_Ps__ID__, p, __RCOMP_Pgscal_loc__ID__, mesh);
-      }
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__)) {
-	/* Store result */
-	__RCOMP_Ivstore__N__((float__N__)(gscal_loc__ID__[noBC_id(i+__NN__)],
-					  ), (i + line_index)/__N__, gscal__ID__);
-      }
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/advection_and_remeshing_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/advection_and_remeshing_noVec.cl
deleted file mode 100644
index 5759dc6f56f201d248c1ec794598533d648aae38..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/advection_and_remeshing_noVec.cl
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * @file advection_and_remeshing.cl
- * Advection and remeshing kernel.
- */
-
-/**
- * Performs advection and then remeshing of the particles' scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes NB_I/WI_NB particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param gvelo Velocity field
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param dt Time step
- * @param min_position Domain lower coordinate
- * @param dx Space step
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark WI_NB corresponds to the work-item number.
- * @remark \__N__ is expanded at compilation time by vector width.
- * @remark \__NN__ is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void advection_and_remeshing(__global const float* gvelo,
-				      __RCOMP_P__global const float* pscal__ID__,
-				      __RCOMP_P__global float* gscal__ID__,
-				      float dt,
-#if ADVEC_IS_MULTISCALE
-				      float inv_v_dx_y, float inv_v_dx_z,
-#endif
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  float p;			/* Particle position */
-  __RCOMP_I float s__ID__;	/* Particle scalar */
-  uint line_index; /* Current 1D problem index */
-
-  __RCOMP_I__local float gscal_loc__ID__[NB_I]; /* Local buffer for result */
-  __local float velocity_cache[V_NB_I]; /* Velocity cache */
-
-#ifdef NB_Z
-  for(gidZ=get_global_id(2); gidZ<NB_III; gidZ+=get_global_size(2)) {
-#else
-  gidZ=get_global_id(2); {
-#endif
-  for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-  line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-#if ADVEC_IS_MULTISCALE
-  fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, inv_v_dx_y, inv_v_dx_z, mesh);
-#else
-  fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, mesh);
-#endif
-
-  for(i=gidX; i<NB_I; i+=(WI_NB))
-    {
-      /* Initialize result buffer */
-      __RCOMP_Igscal_loc__ID__[noBC_id(i)] = 0.0;
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=1)
-    {
-      /* Read Particle scalar */
-      __RCOMP_Is__ID__ = pscal__ID__[i + line_index];
-      /* Compute particle position */
-      p = advection(i, dt, velocity_cache, mesh);
-      /* Remesh particle */
-      remesh(i, __RCOMP_Ps__ID__, p, __RCOMP_Pgscal_loc__ID__, mesh);
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX; i<NB_I; i+=(WI_NB))
-    {
-      /* Store result */
-      __RCOMP_Igscal__ID__[i + line_index] = gscal_loc__ID__[noBC_id(i)];
-    }
-
-  barrier(CLK_LOCAL_MEM_FENCE);
-}
-}
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/advection_euler_and_remeshing_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/advection_euler_and_remeshing_noVec.cl
deleted file mode 100644
index 99565be69e6702c197cb77d021df46636c59620f..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/advection_euler_and_remeshing_noVec.cl
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * @file advection_and_remeshing.cl
- * Euler advection and remeshing kernel.
- */
-
-/**
- * Performs advection and then remeshing of the particles scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group. Computations of 1D problems are placed in loops over gidY and gidZ to adjust local workload and handle the work-item maximum size.
- * Each work-item computes NB_I/WI_NB particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param gvelo Velocity field
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param dt Time step
- * @param min_position Domain lower coordinate
- * @param dx Space step
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark WI_NB corresponds to the work-item number.
- * @remark \__N__ is expanded at compilation time by vector width.
- * @remark \__NN__ is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void advection_and_remeshing(__global const float* gvelo,
-				      __RCOMP_P__global const float* pscal__ID__,
-				      __RCOMP_P__global float* gscal__ID__,
-				      float dt,
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  float p,c;			/* Particle position */
-  __RCOMP_I float s__ID__;	/* Particle scalar */
-  uint line_index; /* Current 1D problem index */
-
-  __RCOMP_I__local float gscal_loc__ID__[NB_I]; /* Local buffer for result */
-
-  for(gidZ=get_global_id(2);
-#ifdef NB_Z
-      gidZ<NB_III;
-#else
-      gidZ<=get_global_id(2); // Single element loop
-#endif
-      gidZ+=get_global_size(2)) {
-    for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-
-      // 1D computations
-      line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-      for(i=gidX; i<NB_I; i+=(WI_NB)) {
-	/* Initialize result buffer */
-	__RCOMP_Igscal_loc__ID__[noBC_id(i)] = 0.0;
-      }
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=1) {
-	/* Read Particle scalar */
-	__RCOMP_Is__ID__ = pscal__ID__[i + line_index];
-	/* Compute particle position */
-	c = fma(i, mesh->dx.x, mesh->min_position);
-	p = fma(dt, gvelo[i+line_index], c);
-	/* Remesh particle */
-	remesh(i, __RCOMP_Ps__ID__, p, __RCOMP_Pgscal_loc__ID__, mesh);
-      }
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX; i<NB_I; i+=(WI_NB)) {
-	/* Store result */
-	__RCOMP_Igscal__ID__[i + line_index] = gscal_loc__ID__[noBC_id(i)];
-      }
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/advection_euler_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/advection_euler_noVec.cl
deleted file mode 100644
index df90575a224d57ebd3485bd9a8e3f9128249c0be..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/advection_euler_noVec.cl
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * @file advection_euler_noVec.cl
- * Advection kernel, basic version for Euler integrator for simple scale problems (no need velocity cache).
- */
-
-/**
- * Computes particles positions from the velocity field.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes NB_I/WI_NB particles positions.
- *
- * @param gvelo Velocity.
- * @param ppos Particle position.
- * @param dt Time step.
- * @param min_position Domain lower coordinate.
- * @param dx Space step.
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark WI_NB corresponds to the work-item number.
- */
-__kernel void advection_kernel(__global const float* gvelo,
-			       __global float* ppos,
-			       float dt,
-			       __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  uint line_index; /* Current 1D problem index */
-  float c;
-
-  for(gidZ=get_global_id(2);
-#ifdef NB_Z
-      gidZ<NB_III;
-#else
-      gidZ<=get_global_id(2); // Single element loop
-#endif
-      gidZ+=get_global_size(2)) {
-    for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-
-      //1D computations
-      line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-      for(i=gidX; i<NB_I; i+=WI_NB) {
-	c = fma(i, mesh->dx.x, mesh->min_position);
-	ppos[i+line_index] =  fma(dt, gvelo[i+line_index], c);
-      }
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/advection_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/advection_noVec.cl
deleted file mode 100644
index 78ca64d6108809df5d4db707017f94a7d96d5b72..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/advection_noVec.cl
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * @file advection_noVec.cl
- * Advection kernel, basic version.
- */
-
-/**
- * Computes particles positions from the velocity field.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes NB_I/WI_NB particles positions.
- *
- * @param gvelo Velocity.
- * @param ppos Particle position.
- * @param dt Time step.
- * @param min_position Domain lower coordinate.
- * @param dx Space step.
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark WI_NB corresponds to the work-item number.
- */
-__kernel void advection_kernel(__global const float* gvelo,
-			       __global float* ppos,
-			       float dt,
-#if ADVEC_IS_MULTISCALE
-			       float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			       __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  uint line_index; /* Current 1D problem index */
-
-  __local float velocity_cache[V_NB_I]; /* Velocity cache */
-
-  for(gidZ=get_global_id(2);
-#ifdef NB_Z
-      gidZ<NB_III;
-#else
-      gidZ<=get_global_id(2);  // Single element loop
-#endif
-      gidZ+=get_global_size(2)) {
-    for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-
-      // 1D computation
-      line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-#if ADVEC_IS_MULTISCALE
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, inv_v_dx_y, inv_v_dx_z, mesh);
-#else
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, mesh);
-#endif
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX; i<NB_I; i+=WI_NB) {
-	ppos[i+line_index] = advection(i, dt, velocity_cache, mesh);
-      }
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/comm_MS_advection_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/comm_MS_advection_noVec.cl
deleted file mode 100644
index 2f7193be4698c21f37cb2e49ebd7b9ae18c7364f..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/comm_MS_advection_noVec.cl
+++ /dev/null
@@ -1,87 +0,0 @@
-
-
-
-
-__kernel void buff_advec(__global const float* gvelo,
-			 __global float* ppos,
-			 __global float* buffer_l,
-			 __global float* buffer_r,
-			 float dt,
-			 float inv_v_dx_y, float inv_v_dx_z,
-			 __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  int gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index = gidY*NB_I+gidZ*NB_I*NB_II; /* Current 1D problem index */
-  float p,v,c;
-  float2 hY, hZ;
-  int i_ind, i_indY, i_indZ;
-
-
-  __local float velocity_cache[V_NB_I];
-  __local float buff_l_loc[V_BUFF_WIDTH];
-  __local float buff_r_loc[V_BUFF_WIDTH];
-  __local float* loc_ptr;
-
-
-  hY.s0 = (gidY * mesh->dx.y) * inv_v_dx_y;
-  hZ.s0 = (gidZ * mesh->dx.z) * inv_v_dx_z;
-  i_indY = convert_int_rtn(hY.s0);
-  i_indZ = convert_int_rtn(hZ.s0);
-  hY.s0 = hY.s0 - convert_float(i_indY);
-  hZ.s0 = hZ.s0 - convert_float(i_indZ);
-  hY.s1 = (1.0-hY.s0);
-  hZ.s1 = (1.0-hZ.s0);
-
-  i_indY = i_indY + V_GHOSTS_NB;
-  i_indZ = i_indZ + V_GHOSTS_NB;
-
-  for(i=gidX; i<V_NB_I; i+=(WI_NB)){
-    velocity_cache[noBC_id(i)] = hY.s1*hZ.s1 * gvelo[i + i_indY * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s1*hZ.s0 * gvelo[i + i_indY * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s1 * gvelo[i + (i_indY + 1) * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s0 * gvelo[i + (i_indY + 1) * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-  }
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-    buff_l_loc[i] = hY.s1*hZ.s1*buffer_l[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    buff_l_loc[i] += hY.s1*hZ.s0*buffer_l[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    buff_l_loc[i] += hY.s0*hZ.s1*buffer_l[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    buff_l_loc[i] += hY.s0*hZ.s0*buffer_l[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-    buff_r_loc[i] = hY.s1*hZ.s1*buffer_r[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    buff_r_loc[i] += hY.s1*hZ.s0*buffer_r[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    buff_r_loc[i] += hY.s0*hZ.s1*buffer_r[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    buff_r_loc[i] += hY.s0*hZ.s0*buffer_r[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      c = i * mesh->dx.x + mesh->min_position;
-      // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-      p = c * mesh->v_invdx;
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB) - MS_INTERPOL_SHIFT;
-      v = mix(velocity_cache[noBC_id(i_ind)],
-	      velocity_cache[noBC_id(i_ind+1)],p);
-      p = (c + 0.5*dt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p) - MS_INTERPOL_SHIFT;
-      p = p - convert_float(i_ind);
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? buff_l_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1)  : buff_r_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = (1.0-p)*(*loc_ptr);
-      i_ind = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? buff_l_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1)  : buff_r_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v += p*(*loc_ptr);
-      ppos[i+line_index] = c + dt * v;
-    }
-
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/comm_advection_MS_and_remeshing_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/comm_advection_MS_and_remeshing_noVec.cl
deleted file mode 100644
index 8e9ea780a2aafb23ff5067028ee7dfb3d7966667..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/comm_advection_MS_and_remeshing_noVec.cl
+++ /dev/null
@@ -1,547 +0,0 @@
-
-
-
-
-__kernel void buff_advec_and_remesh_l(__global const float* gvelo,
-				      __global float* v_l_buff,
-				      __global const float* pscal,
-				      __global float* s_l_buff,
-				      int used_width,
-				      float dt,
-				      float inv_v_dx_y, float inv_v_dx_z,
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index = gidY*NB_I+gidZ*NB_I*NB_II; /* Current 1D problem index */
-  float p,v,c,s,y,w;
-  float2 hY, hZ;
-  int i_ind, i_indY, i_indZ;
-  int ind, index;
-
-
-  float velocity_cache[V_NB_I];
-  float v_l_buff_loc[V_BUFF_WIDTH];
-  float s_l_buff_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for (i=0; i<used_width; i++)
-    s_l_buff_loc[i] = 0.0;
-
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  hY.s0 = (gidY * mesh->dx.y) * inv_v_dx_y;
-  hZ.s0 = (gidZ * mesh->dx.z) * inv_v_dx_z;
-  i_indY = convert_int_rtn(hY.s0);
-  i_indZ = convert_int_rtn(hZ.s0);
-  hY.s0 = hY.s0 - convert_float(i_indY);
-  hZ.s0 = hZ.s0 - convert_float(i_indZ);
-  hY.s1 = (1.0-hY.s0);
-  hZ.s1 = (1.0-hZ.s0);
-
-  i_indY = i_indY + V_GHOSTS_NB;
-  i_indZ = i_indZ + V_GHOSTS_NB;
-
-  for (i=0; i<V_NB_I; i++){
-    velocity_cache[noBC_id(i)] = hY.s1*hZ.s1 * gvelo[i + i_indY * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s1*hZ.s0 * gvelo[i + i_indY * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s1 * gvelo[i + (i_indY + 1) * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s0 * gvelo[i + (i_indY + 1) * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-  }
-
-  for (i=0; i<V_BUFF_WIDTH; i++){
-    v_l_buff_loc[i] = hY.s1*hZ.s1*v_l_buff[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    v_l_buff_loc[i] += hY.s1*hZ.s0*v_l_buff[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    v_l_buff_loc[i] += hY.s0*hZ.s1*v_l_buff[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    v_l_buff_loc[i] += hY.s0*hZ.s0*v_l_buff[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=0; i<2*BUFF_WIDTH; i++)
-    {
-      c = i * mesh->dx.x + mesh->min_position;
-      // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-      p = c * mesh->v_invdx;
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB) - MS_INTERPOL_SHIFT;
-      v = mix(velocity_cache[noBC_id(i_ind)],
-	      velocity_cache[noBC_id(i_ind+1)],p);
-      p = (c + 0.5*dt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p) - MS_INTERPOL_SHIFT;
-      p = p - convert_float(i_ind);
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : v_l_buff_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1);
-      v = (1.0-p)*(*loc_ptr);
-      i_ind = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : v_l_buff_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1);
-      v += p*(*loc_ptr);
-      p = c + dt * v;
-
-
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  // Store buffers
-  for (i=0; i<used_width; i++)
-    s_l_buff[i + gidY*used_width + gidZ*used_width*NB_II] = s_l_buff_loc[i];
-
-}
-
-__kernel void buff_advec_and_remesh_r(__global const float* gvelo,
-				      __global float* v_r_buff,
-				      __global const float* pscal,
-				      __global float* s_r_buff,
-				      int used_width,
-				      float dt,
-				      float inv_v_dx_y, float inv_v_dx_z,
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index = gidY*NB_I+gidZ*NB_I*NB_II; /* Current 1D problem index */
-  float p,v,c,s,y,w;
-  float2 hY, hZ;
-  int i_ind, i_indY, i_indZ;
-  int ind, index;
-
-
-  float velocity_cache[V_NB_I];
-  float v_r_buff_loc[V_BUFF_WIDTH];
-  float s_r_buff_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for(i=0; i<used_width; i++)
-    s_r_buff_loc[i] = 0.0;
-
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  hY.s0 = (gidY * mesh->dx.y) * inv_v_dx_y;
-  hZ.s0 = (gidZ * mesh->dx.z) * inv_v_dx_z;
-  i_indY = convert_int_rtn(hY.s0);
-  i_indZ = convert_int_rtn(hZ.s0);
-  hY.s0 = hY.s0 - convert_float(i_indY);
-  hZ.s0 = hZ.s0 - convert_float(i_indZ);
-  hY.s1 = (1.0-hY.s0);
-  hZ.s1 = (1.0-hZ.s0);
-
-  i_indY = i_indY + V_GHOSTS_NB;
-  i_indZ = i_indZ + V_GHOSTS_NB;
-
-  for(i=0;i<V_NB_I; i++){
-    velocity_cache[noBC_id(i)] = hY.s1*hZ.s1 * gvelo[i + i_indY * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s1*hZ.s0 * gvelo[i + i_indY * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s1 * gvelo[i + (i_indY + 1) * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s0 * gvelo[i + (i_indY + 1) * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-  }
-
-  for(i=0;i<V_BUFF_WIDTH; i++){
-    v_r_buff_loc[i] = hY.s1*hZ.s1*v_r_buff[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    v_r_buff_loc[i] += hY.s1*hZ.s0*v_r_buff[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    v_r_buff_loc[i] += hY.s0*hZ.s1*v_r_buff[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    v_r_buff_loc[i] += hY.s0*hZ.s0*v_r_buff[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=NB_I-2*BUFF_WIDTH; i<NB_I; i++)
-    {
-      c = i * mesh->dx.x + mesh->min_position;
-      // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-      p = c * mesh->v_invdx;
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB) - MS_INTERPOL_SHIFT;
-      v = mix(velocity_cache[noBC_id(i_ind)],
-	      velocity_cache[noBC_id(i_ind+1)],p);
-      p = (c + 0.5*dt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p) - MS_INTERPOL_SHIFT;
-      p = p - convert_float(i_ind);
-      loc_ptr = (i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : v_r_buff_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = (1.0-p)*(*loc_ptr);
-      i_ind = i_ind + 1;
-      loc_ptr = (i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : v_r_buff_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v += p*(*loc_ptr);
-      p = c + dt * v;
-
-
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  // Store buffers
-  for(i=0;i<used_width;i++)
-    s_r_buff[i + gidY*used_width + gidZ*used_width*NB_II] = s_r_buff_loc[i];
-
-}
-
-__kernel void buff_advec_and_remesh(__global const float* gvelo,
-				      __global float* v_l_buff,
-				      __global float* v_r_buff,
-				      __global const float* pscal,
-				      __global float* gscal,
-				      float dt,
-				      float inv_v_dx_y, float inv_v_dx_z,
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  int gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index = gidY*NB_I+gidZ*NB_I*NB_II; /* Current 1D problem index */
-  float p,v,c,s,y,w;
-  float2 hY, hZ;
-  int i_ind, i_indY, i_indZ;
-  int ind, index;
-
-
-  __local float velocity_cache[V_NB_I];
-  __local float v_l_buff_loc[V_BUFF_WIDTH];
-  __local float v_r_buff_loc[V_BUFF_WIDTH];
-  __local float gscal_loc[NB_I];
-  __local float* loc_ptr;
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      /* Initialize result buffer */
-      gscal_loc[i] = 0.0;
-    }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-
-
-  hY.s0 = (gidY * mesh->dx.y) * inv_v_dx_y;
-  hZ.s0 = (gidZ * mesh->dx.z) * inv_v_dx_z;
-  i_indY = convert_int_rtn(hY.s0);
-  i_indZ = convert_int_rtn(hZ.s0);
-  hY.s0 = hY.s0 - convert_float(i_indY);
-  hZ.s0 = hZ.s0 - convert_float(i_indZ);
-  hY.s1 = (1.0-hY.s0);
-  hZ.s1 = (1.0-hZ.s0);
-
-  i_indY = i_indY + V_GHOSTS_NB;
-  i_indZ = i_indZ + V_GHOSTS_NB;
-
-  for(i=gidX; i<V_NB_I; i+=(WI_NB)){
-    velocity_cache[noBC_id(i)] = hY.s1*hZ.s1 * gvelo[i + i_indY * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s1*hZ.s0 * gvelo[i + i_indY * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s1 * gvelo[i + (i_indY + 1) * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s0 * gvelo[i + (i_indY + 1) * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-  }
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-    v_l_buff_loc[i] = hY.s1*hZ.s1*v_l_buff[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    v_l_buff_loc[i] += hY.s1*hZ.s0*v_l_buff[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    v_l_buff_loc[i] += hY.s0*hZ.s1*v_l_buff[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    v_l_buff_loc[i] += hY.s0*hZ.s0*v_l_buff[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-    v_r_buff_loc[i] = hY.s1*hZ.s1*v_r_buff[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    v_r_buff_loc[i] += hY.s1*hZ.s0*v_r_buff[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    v_r_buff_loc[i] += hY.s0*hZ.s1*v_r_buff[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    v_r_buff_loc[i] += hY.s0*hZ.s0*v_r_buff[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=1)
-    {
-      c = i * mesh->dx.x + mesh->min_position;
-      // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-      p = c * mesh->v_invdx;
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB) - MS_INTERPOL_SHIFT;
-      v = mix(velocity_cache[noBC_id(i_ind)],
-	      velocity_cache[noBC_id(i_ind+1)],p);
-      p = (c + 0.5*dt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p) - MS_INTERPOL_SHIFT;
-      p = p - convert_float(i_ind);
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? v_l_buff_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1)  : v_r_buff_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = (1.0-p)*(*loc_ptr);
-      i_ind = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? v_l_buff_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1)  : v_r_buff_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v += p*(*loc_ptr);
-      p = c + dt * v;
-
-
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      /* Store result */
-      gscal[i + line_index] = gscal_loc[i];
-    }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/comm_advection_and_remeshing_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/comm_advection_and_remeshing_noVec.cl
deleted file mode 100644
index 1648c70e4d99e9145364ff331bfff2f13695f92c..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/comm_advection_and_remeshing_noVec.cl
+++ /dev/null
@@ -1,468 +0,0 @@
-
-
-
-__kernel void buff_advec_and_remesh_l(__global const float* gvelo,
-				      __global float* v_buffer_l,
-				      __global const float* pscal,
-				      __global float* s_buffer_l,
-				      int used_width,
-				      float dt, __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index ; /* Current 1D problem index */
-
-  float v,vp,p,c,s,y,w, hdt = 0.5 * dt;
-  int i_ind, i_ind_p, ind, index;
-
-  float velocity_cache[V_NB_I];
-  float v_buff_l_loc[V_BUFF_WIDTH];
-  float s_buff_l_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for (i=0;i<used_width;i++)
-    s_buff_l_loc[i] = 0.0;
-
-  for(i=0; i<V_BUFF_WIDTH; i++)
-    v_buff_l_loc[i] = v_buffer_l[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-
-  line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II;
-  /* Read velocity */
-  /* Fill velocity cache */
-  for(i=0;i<V_NB_I;i++)
-      velocity_cache[i] = gvelo[i+line_index];
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  line_index = gidY*NB_I+gidZ*NB_I*NB_II;
-  for(i=0; i<2*BUFF_WIDTH; i++)
-    {
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-      c = i * mesh->dx.x + mesh->min_position;
-      v = velocity_cache[i + V_GHOSTS_NB];
-      p = (c + hdt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind_p = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB)) ? velocity_cache + i_ind - (V_START_INDEX-V_GHOSTS_NB) : v_buff_l_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1);
-      v = *loc_ptr;
-
-      loc_ptr = (i_ind_p>=(V_START_INDEX-V_GHOSTS_NB)) ? velocity_cache+i_ind_p - (V_START_INDEX-V_GHOSTS_NB) : v_buff_l_loc+i_ind_p-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1);
-      vp = *loc_ptr;
-
-      v = (p*(vp-v) + v);
-      p = c + dt * v;
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  // Store buffers
-  for(i=0;i<used_width;i++)
-    s_buffer_l[i + gidY*used_width + gidZ*used_width*NB_II] = s_buff_l_loc[i];
-}
-
-
-
-
-
-
-
-
-__kernel void buff_advec_and_remesh_r(__global const float* gvelo,
-				      __global float* v_buffer_r,
-				      __global const float* pscal,
-				      __global float* s_buffer_r,
-				      int used_width,
-				      float dt, __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index ; /* Current 1D problem index */
-
-  float v,vp,p,c,s,y,w, hdt = 0.5 * dt;
-  int i_ind, i_ind_p, ind, index;
-
-  float velocity_cache[V_NB_I];
-  float v_buff_r_loc[V_BUFF_WIDTH];
-  float s_buff_r_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for(i=0;i<used_width;i++)
-    s_buff_r_loc[i] = 0.0;
-
-  for(i=0;i<V_BUFF_WIDTH;i++)
-    v_buff_r_loc[i] = v_buffer_r[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-
-  line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II;
-  /* Read velocity */
-  /* Fill velocity cache */
-  for(i=0;i<V_NB_I; i++)
-      velocity_cache[i] = gvelo[i+line_index];
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  line_index = gidY*NB_I+gidZ*NB_I*NB_II;
-  for(i=NB_I-2*BUFF_WIDTH; i<NB_I; i++)
-    {
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-      c = i * mesh->dx.x + mesh->min_position;
-      v = velocity_cache[i + V_GHOSTS_NB];
-      p = (c + hdt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind_p = i_ind + 1;
-      loc_ptr = (i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache + i_ind - (V_START_INDEX-V_GHOSTS_NB) : v_buff_r_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = *loc_ptr;
-
-      loc_ptr = (i_ind_p <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+i_ind_p - (V_START_INDEX-V_GHOSTS_NB) : v_buff_r_loc+i_ind_p-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      vp = *loc_ptr;
-
-      v = (p*(vp-v) + v);
-      p = c + dt * v;
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=0;i<used_width;i++)
-    s_buffer_r[i + gidY*used_width + gidZ*used_width*NB_II] = s_buff_r_loc[i];
-
-}
-
-
-__kernel void buff_advec_and_remesh(__global const float* gvelo,
-				    __global float* v_buffer_l,
-				    __global float* v_buffer_r,
-				    __global const float* pscal,
-				    __global float* gscal,
-				    float dt, __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  int gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index ; /* Current 1D problem index */
-
-  float v,vp,p,c,s,y,w, hdt = 0.5 * dt;
-  int i_ind, i_ind_p, ind, index;
-
-  __local float velocity_cache[V_NB_I];
-  __local float v_buff_l_loc[V_BUFF_WIDTH];
-  __local float v_buff_r_loc[V_BUFF_WIDTH];
-  __local float gscal_loc[NB_I];
-  __local float* loc_ptr;
-
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    /* Initialize result buffer */
-    gscal_loc[i] = 0.0;
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB))
-    v_buff_l_loc[i] = v_buffer_l[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB))
-    v_buff_r_loc[i] = v_buffer_r[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-
-  line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II;
-  /* Read velocity */
-  /* Fill velocity cache */
-  for(i=gidX; i<V_NB_I; i+=(WI_NB))
-      velocity_cache[i] = gvelo[i+line_index];
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  line_index = gidY*NB_I+gidZ*NB_I*NB_II;
-  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=1)
-    {
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-      c = i * mesh->dx.x + mesh->min_position;
-      v = velocity_cache[i + V_GHOSTS_NB];
-      p = (c + hdt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind_p = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache + i_ind - (V_START_INDEX-V_GHOSTS_NB) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? v_buff_l_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1) : v_buff_r_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = *loc_ptr;
-
-      loc_ptr = (i_ind_p>=(V_START_INDEX-V_GHOSTS_NB) && i_ind_p <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+i_ind_p - (V_START_INDEX-V_GHOSTS_NB) : (i_ind_p<(V_START_INDEX-V_GHOSTS_NB)) ? v_buff_l_loc+i_ind_p-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1) : v_buff_r_loc+i_ind_p-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      vp = *loc_ptr;
-
-      v = (p*(vp-v) + v);
-      p = c + dt * v;
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      /* Store result */
-      gscal[i + line_index] = gscal_loc[i];
-    }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/comm_advection_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/comm_advection_noVec.cl
deleted file mode 100644
index d10675fede5770453fa488f5472fc9b90b06f372..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/comm_advection_noVec.cl
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
-
-__kernel void buff_advec(__global const float* gvelo,
-			 __global float* ppos,
-			 __global float* buffer_l,
-			 __global float* buffer_r,
-			 float dt, __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  int gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index ; /* Current 1D problem index */
-
-  float v,vp,p,c, hdt = 0.5 * dt;
-  int i_ind, i_ind_p;
-
-  __local float velocity_cache[V_NB_I];
-  __local float buff_l_loc[V_BUFF_WIDTH];
-  __local float buff_r_loc[V_BUFF_WIDTH];
-  __local float* loc_ptr;
-
-    for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-      buff_l_loc[i] = buffer_l[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-    }
-
-    for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-      buff_r_loc[i] = buffer_r[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-    }
-
-  line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II;
-  for(i=gidX; i<V_NB_I; i+=(WI_NB))
-    {
-      /* Read velocity */
-      /* Fill velocity cache */
-      velocity_cache[i] = gvelo[i+line_index];
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  line_index = gidY*NB_I+gidZ*NB_I*NB_II;
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      c = i * mesh->dx.x + mesh->min_position;
-      v = velocity_cache[i + V_GHOSTS_NB];
-      p = (c + hdt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind_p = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache + i_ind - (V_START_INDEX-V_GHOSTS_NB) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? buff_l_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1) : buff_r_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = *loc_ptr;
-
-      loc_ptr = (i_ind_p>=(V_START_INDEX-V_GHOSTS_NB) && i_ind_p <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+i_ind_p - (V_START_INDEX-V_GHOSTS_NB) : (i_ind_p<(V_START_INDEX-V_GHOSTS_NB)) ? buff_l_loc+i_ind_p-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1) : buff_r_loc+i_ind_p-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      vp = *loc_ptr;
-
-      v = (p*(vp-v) + v);
-      p = c + dt * v;
-      ppos[i+line_index] = p;
-    }
-
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/comm_remeshing_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/comm_remeshing_noVec.cl
deleted file mode 100644
index 89a3dac244ff1cf5a6f72da66ccf9cc8514087ae..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/comm_remeshing_noVec.cl
+++ /dev/null
@@ -1,409 +0,0 @@
-/**
- * @file comm_remeshing_noVec.cl
- * Remeshing kernel.
- */
-/**
- * Performs remeshing of the particles' scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes <code>NB_I/WI_NB</code> particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param ppos Particle position
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param buffer_l Buffer for storing out of domain contributions (to left)
- * @param buffer_r Buffer for storing out of domain contributions (to right)
- * @param min_position Domain lower coordinate
- * @param dx Space step
- * @param l_nb buffer_l sizes
- * @param r_nb buffer_r sizes
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>WI_NB</code> corresponds to the work-item number.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void buff_remesh_l(__global const float* ppos,
-			    __global const float* pscal,
-			    __global float* buffer_l,
-			    int used_width,
-			    __constant struct AdvectionMeshInfo* mesh
-			    )
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  float p;			/* Particle position */
-  float s;      /* Particle scalar */
-float y;			/* Normalized distance to nearest left grid point */
-  int ind;			/* Integer coordinate */
-  int index;		/* Remeshing index */
-  float w;
-
-  uint line_index = gidY*NB_I+ gidZ*NB_I*NB_II; /* Current 1D problem index */
-
-  float l_buff_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for(i=0; i<used_width; i++)
-    l_buff_loc[i] = 0.0;
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  //for(i=lid*PART_NB_PER_WI; i<(lid + 1)*PART_NB_PER_WI; i+=1)
-  for(i=0; i<2*BUFF_WIDTH; i++)
-    {
-      /* Read particle position */
-      p = ppos[i + line_index];
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-      /* Remesh particle */
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  // Store buffers
-  for(i=0; i<used_width; i++)
-    buffer_l[i + gidY*used_width + gidZ*used_width*NB_II] = l_buff_loc[i];
-}
-
-__kernel void buff_remesh_r(__global const float* ppos,
-			    __global const float* pscal,
-			    __global float* buffer_r,
-			    int used_width,
-			    __constant struct AdvectionMeshInfo* mesh
-			    )
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  float p;			/* Particle position */
-  float s;      /* Particle scalar */
-  float y;			/* Normalized distance to nearest left grid point */
-  int ind;			/* Integer coordinate */
-  int index;		/* Remeshing index */
-  float w;
-
-  uint line_index = gidY*NB_I+ gidZ*NB_I*NB_II; /* Current 1D problem index */
-
-  float r_buff_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for(i=0; i<used_width; i++)
-    r_buff_loc[i] = 0.0;
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=NB_I-2*BUFF_WIDTH; i<NB_I; i++)
-    {
-      /* Read particle position */
-      p = ppos[i + line_index];
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-      /* Remesh particle */
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  // Store buffers
-  for(i=0; i<used_width; i++)
-    buffer_r[i + gidY*used_width + gidZ*used_width*NB_II] = r_buff_loc[i];
-
-}
-
-__kernel void remesh(__global const float* ppos,
-			  __global const float* pscal,
-			  __global float* gscal,
-			  __constant struct AdvectionMeshInfo* mesh
-			  )
-{
-  int lid = get_local_id(0);	/* OpenCL work-itme global index (X) */
-  int gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  float p;			/* Particle position */
-  float s;      /* Particle scalar */
-float y;			/* Normalized distance to nearest left grid point */
-  int ind;			/* Integer coordinate */
-  int index;		/* Remeshing index */
-  float w;
-
-  uint line_index = gidY*NB_I+ gidZ*NB_I*NB_II; /* Current 1D problem index */
-
-  __local float gscal_loc[NB_I];
-  __local float* loc_ptr;
-
-  /* Initialize result buffer */
-  for(i=lid; i<NB_I; i+=WI_NB)
-      gscal_loc[i] = 0.0;
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=lid*PART_NB_PER_WI; i<(lid + 1)*PART_NB_PER_WI; i+=1)
-    {
-      /* Read particle position */
-      p = ppos[i + line_index];
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-      /* Remesh particle */
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  /* Store result */
-  for(i=lid; i<NB_I; i+=WI_NB)
-      gscal[i + line_index] = gscal_loc[i];
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/copy.cl b/hysop/backend/device/opencl/cl_src/kernels/copy.cl
deleted file mode 100644
index 41faadc113a169365148846fad602bd8efa64961..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/copy.cl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * @file copy.cl
- * Copy kernel, vectorized version.
- */
-
-/**
- * Performs a copy from in to out. Data are read by blocs of <code>__N__</code> contiguously.
- *
- * @param in Input data.
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- */
-__kernel void copy(__global const float* in,
-		   __global float* out)
-{
-  uint xIndex = (get_group_id(0) * TILE_DIM_COPY + get_local_id(0)*__N__);
-  uint yIndex = get_group_id(1) * TILE_DIM_COPY + get_local_id(1);
-  uint zIndex = get_global_id(2);
-  uint index = xIndex + yIndex * NB_I + zIndex*NB_I*NB_II;
-  float x__NN__;
-
-  for(uint i=0; i<TILE_DIM_COPY; i+=BLOCK_ROWS_COPY)
-    {
-      x__NN__ = in[index + __NN__ + i*NB_I];
-      out[index + __NN__ + i*NB_I] = x__NN__;
-    }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/copy_locMem.cl b/hysop/backend/device/opencl/cl_src/kernels/copy_locMem.cl
deleted file mode 100644
index ea51b77ab13c8100dd27011bba351c76129ab43a..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/copy_locMem.cl
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * @file copy_locMem.cl
- * Copy kernel, use local memory.
- */
-
-/**
- * Performs a copy from in to out. Data are moved to local memory buffer.
- *
- * @param in Input data.
- * @param out Output data
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-__kernel void copy(__global const float* in,
-		   __global float* out)
-{
-  uint xIndex = get_group_id(0) * TILE_DIM_COPY + get_local_id(0);
-  uint yIndex = get_group_id(1) * TILE_DIM_COPY + get_local_id(1);
-  uint zIndex = get_global_id(2);
-  uint index = xIndex + yIndex * NB_I + zIndex*NB_I*NB_II;
-
-  __local float tile[TILE_DIM_COPY][TILE_DIM_COPY];
-
-  for(uint i=0; i<TILE_DIM_COPY; i+=BLOCK_ROWS_COPY)
-    {
-      tile[get_local_id(1)+i][get_local_id(0)] = in[index + i*NB_I];
-    }
-  barrier(CLK_LOCAL_MEM_FENCE);
-  for(uint i=0; i<TILE_DIM_COPY; i+=BLOCK_ROWS_COPY)
-    {
-      out[index + i*NB_I] = tile[get_local_id(1)+i][get_local_id(0)];
-    }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/copy_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/copy_noVec.cl
deleted file mode 100644
index fb4cea67ba2882f759e857234236ec9b4b5c4049..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/copy_noVec.cl
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * @file copy.cl
- * Copy kernel, basic version.
- */
-
-/**
- * Performs a copy from in to out.
- *
- * @param in Input data.
- * @param out Output data
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-__kernel void copy(__global const float* in,
-		   __global float* out)
-{
-  uint xIndex = get_group_id(0) * TILE_DIM_COPY + get_local_id(0);
-  uint yIndex = get_group_id(1) * TILE_DIM_COPY + get_local_id(1);
-  uint zIndex = get_global_id(2);
-  uint index = xIndex + yIndex * NB_I + zIndex*NB_I*NB_II;
-
-  for(uint i=0; i<TILE_DIM_COPY; i+=BLOCK_ROWS_COPY)
-    {
-        out[index + i*NB_I] = in[index + i*NB_I];
-    }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/diffusion.cl b/hysop/backend/device/opencl/cl_src/kernels/diffusion.cl
deleted file mode 100644
index edbfa572a5e09fb97d792c3860d77ba5b6e93215..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/diffusion.cl
+++ /dev/null
@@ -1,183 +0,0 @@
-/**
- * @file diffusion.cl
- * Diffusion kernel.
- */
-
-/**
- * Computes diffusion operator with finite differences.
- * Stencil computation is performed within a 2D index space of size <code>TILE_SIZE</code> by a work-group. The 3rd direction is traversed in a loop for data reuse.
- *
- * @param scal_in Input scalar field
- * @param ghostsX Ghosts array if X is a communication direction
- * @param ghostsY Ghosts array if Y is a communication direction
- * @param ghostsZ Ghosts array if Z is a communication direction
- * @param scal_out Output scalar field
- * @param nudt Diffusion coefficient
- * @param dx Mesh space step
- *
- * @remark <code>NB_X</code>, <code>NB_Y</code>, <code>NB_Z</code> : points number in physical space directions.
- * @remark <code>NB_PART</code> Particles number per work-item in computing direction
- * @remark <code>CUT_DIT_X</code>, <code>CUT_DIT_Y</code> and <code>CUT_DIT_Z</code> : flags for communication direction
- * @remark <code>NB_GROUPS_I</code> and <code>NB_GROUPS_II</code> : tiles number in X and Y directions.
- * @remark <code>L_WIDTH</code> : work-item number in tile.
- */
-
-__kernel void diffusion(__global const float* scal_in,
-#if CUT_DIR_X == 1
-			__global const float* ghostsX,
-#endif
-#if CUT_DIR_Y == 1
-			__global const float* ghostsY,
-#endif
-#if CUT_DIR_Z == 1
-			__global const float* ghostsZ,
-#endif
-			__global float* scal_out,
-			float nudt,
-			float4 dx)
-{
-  int t_gidX, t_gidY;
-  int lidX, lidY;
-  int gidX, gidY, gidZ;
-  float cx, cy, cz;
-  float scal_z_m[NB_PART];
-  float scal_z[NB_PART];
-  float scal_z_p[NB_PART];
-  float s;
-  uint i;
-
-  __local float tile_XY[TILE_SIZE+2][TILE_SIZE+2];
-
-  for (t_gidX=get_group_id(0); t_gidX<NB_GROUPS_I; t_gidX+=get_num_groups(0)) {
-    for (t_gidY=get_group_id(1); t_gidY<NB_GROUPS_II; t_gidY+=get_num_groups(1)) {
-
-      // Tile computation
-      lidX = get_local_id(0);
-      lidY = get_local_id(1);
-      gidX = t_gidX*TILE_SIZE + lidX; /* OpenCL work-item global index (X) */
-      gidY = t_gidY*TILE_SIZE + lidY; /* OpenCL work-item global index (Y) */
-      cx = nudt/(dx.x*dx.x);
-      cy = nudt/(dx.y*dx.y);
-      cz = nudt/(dx.z*dx.z);
-
-      for(i=0;i<NB_PART;i++) {
-#if CUT_DIR_Z == 1
-	scal_z_m[i] = ghostsZ[gidX + (gidY+i*L_WIDTH)*NB_X + NB_X*NB_Y];
-#else
-	scal_z_m[i] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X + (NB_Z-1)*NB_X*NB_Y];
-#endif
-	scal_z[i] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X];
-      }
-
-      lidX += 1;
-      lidY += 1;
-
-      // loop over Z indices but last.
-      for (gidZ=0; gidZ<(NB_Z-1); gidZ++) {
-	for(i=0;i<NB_PART;i++) {
-	  // fill the tile
-	  tile_XY[lidX][lidY+i*L_WIDTH] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-
-	  /* // fill tile edges */
-#if CUT_DIR_X == 1
-	  tile_XY[0][lidY+i*L_WIDTH] = (t_gidX*TILE_SIZE>=1) ? scal_in[t_gidX*TILE_SIZE-1 + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y] : ghostsX[1 + (gidY+i*L_WIDTH)*2 + gidZ*2*NB_Y];
-	  tile_XY[TILE_SIZE+1][lidY+i*L_WIDTH] = ((t_gidX+1)*TILE_SIZE<NB_X) ? scal_in[(t_gidX+1)*TILE_SIZE + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y]: ghostsX[(gidY+i*L_WIDTH)*2 + gidZ*2*NB_Y];
-#else
-	  tile_XY[0][lidY+i*L_WIDTH] = scal_in[((t_gidX*TILE_SIZE-1+NB_X)%NB_X) + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-	  tile_XY[TILE_SIZE+1][lidY+i*L_WIDTH] = scal_in[(((t_gidX+1)*TILE_SIZE+NB_X)%NB_X) + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-#endif
-	}
-#if CUT_DIR_Y == 1
-	tile_XY[lidX][0] = (t_gidY*TILE_SIZE>=1)? scal_in[gidX + (t_gidY*TILE_SIZE-1)*NB_X + gidZ*NB_X*NB_Y] : ghostsY[gidX + NB_X + gidZ*NB_X*2];
-	tile_XY[lidX][TILE_SIZE+1] = ((t_gidY+1)*TILE_SIZE<NB_Y) ? scal_in[gidX + (t_gidY+1)*TILE_SIZE*NB_X + gidZ*NB_X*NB_Y] : ghostsY[gidX + gidZ*NB_X*2];
-#else
-	tile_XY[lidX][0] = scal_in[gidX + ((t_gidY*TILE_SIZE-1+NB_Y)%NB_Y)*NB_X + gidZ*NB_X*NB_Y];
-	tile_XY[lidX][TILE_SIZE+1] = scal_in[gidX + (((t_gidY+1)*TILE_SIZE+NB_Y)%NB_Y)*NB_X + gidZ*NB_X*NB_Y];
-#endif
-
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(i=0;i<NB_PART;i++) {
-	  /* get scalar value in Z direction */
-	  scal_z_p[i] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X + (gidZ+1)*NB_X*NB_Y];
-
-	  // Compute stencil
-	  // central point
-	  s = scal_z[i] * (1.0 - 2.0 * (cx + cy + cz));
-
-	  s += cz*(scal_z_m[i] + scal_z_p[i]);
-
-	  s += cy * tile_XY[lidX][lidY+i*L_WIDTH-1];
-	  s += cy * tile_XY[lidX][lidY+i*L_WIDTH+1];
-	  s += cx * tile_XY[lidX-1][lidY+i*L_WIDTH];
-	  s += cx * tile_XY[lidX+1][lidY+i*L_WIDTH];
-
-	  // write result
-	  scal_out[gidX + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y] = s;
-	}
-
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(i=0;i<NB_PART;i++) {
-	  // Shift Z values
-	  scal_z_m[i] = scal_z[i];
-	  scal_z[i] = scal_z_p[i];
-	}
-      }
-
-      // Compute last point (from ghosts)
-      gidZ = NB_Z - 1;
-
-      for(i=0;i<NB_PART;i++) {
-	// fill the tile
-	tile_XY[lidX][lidY+i*L_WIDTH] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-
-	/* // fill tile edges */
-#if CUT_DIR_X == 1
-	tile_XY[0][lidY+i*L_WIDTH] = (t_gidX*TILE_SIZE>=1) ? scal_in[t_gidX*TILE_SIZE-1 + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y] : ghostsX[1 + (gidY+i*L_WIDTH)*2 + gidZ*2*NB_Y];
-	tile_XY[TILE_SIZE+1][lidY+i*L_WIDTH] = ((t_gidX+1)*TILE_SIZE<NB_X) ? scal_in[(t_gidX+1)*TILE_SIZE + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y]: ghostsX[(gidY+i*L_WIDTH)*2 + gidZ*2*NB_Y];
-#else
-	tile_XY[0][lidY+i*L_WIDTH] = scal_in[((t_gidX*TILE_SIZE-1+NB_X)%NB_X) + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-	tile_XY[TILE_SIZE+1][lidY+i*L_WIDTH] = scal_in[(((t_gidX+1)*TILE_SIZE+NB_X)%NB_X) + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-#endif
-      }
-#if CUT_DIR_Y == 1
-      tile_XY[lidX][0] = (t_gidY*TILE_SIZE>=1)? scal_in[gidX + (t_gidY*TILE_SIZE-1)*NB_X + gidZ*NB_X*NB_Y] : ghostsY[gidX + NB_X + gidZ*NB_X*2];
-      tile_XY[lidX][TILE_SIZE+1] = ((t_gidY+1)*TILE_SIZE<NB_Y) ? scal_in[gidX + (t_gidY+1)*TILE_SIZE*NB_X + gidZ*NB_X*NB_Y] : ghostsY[gidX + gidZ*NB_X*2];
-#else
-      tile_XY[lidX][0] = scal_in[gidX + ((t_gidY*TILE_SIZE-1+NB_Y)%NB_Y)*NB_X + gidZ*NB_X*NB_Y];
-      tile_XY[lidX][TILE_SIZE+1] = scal_in[gidX + (((t_gidY+1)*TILE_SIZE+NB_Y)%NB_Y)*NB_X + gidZ*NB_X*NB_Y];
-#endif
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=0;i<NB_PART;i++) {
-	/* // get scalar value in Z direction */
-#if CUT_DIR_Z == 1
-	scal_z_p[i] = ghostsZ[gidX + (gidY+i*L_WIDTH)*NB_X];
-#else
-	scal_z_p[i] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X];
-#endif
-
-	// Compute stencil
-	/* // central point */
-	s = scal_z[i] * (1.0 - 2.0 * (cx + cy + cz));
-
-	s += cz*(scal_z_m[i] + scal_z_p[i]);
-
-	s += cy * tile_XY[lidX][lidY+i*L_WIDTH-1];
-	s += cy * tile_XY[lidX][lidY+i*L_WIDTH+1];
-	s += cx * tile_XY[lidX-1][lidY+i*L_WIDTH];
-	s += cx * tile_XY[lidX+1][lidY+i*L_WIDTH];
-
-	// write result
-	scal_out[gidX + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y] = s;
-      }
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/fine_to_coarse_filter.cl b/hysop/backend/device/opencl/cl_src/kernels/fine_to_coarse_filter.cl
deleted file mode 100644
index 2851c60383e2d634a729ab5cbeb0360e5c5da5a9..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/fine_to_coarse_filter.cl
+++ /dev/null
@@ -1,178 +0,0 @@
-__kernel void initialize_output(__global float* scal_out) {
-  scal_out[get_global_id(0) + get_global_id(1)*NB_OUT_X + get_global_id(2)*NB_OUT_X*NB_OUT_Y] = 0.0;
-}
-
-__kernel void coarse_to_fine_filter(__global const float* scal_in,
-				    __global float* scal_out,
-				    float scale_factor,
-				    float4 dx_in, float4 dx_out, float4 origin,
-				    int offset_y, int offset_z) {
-  // Work-group is computed from coarse grid (without ghosts)
-  // globalsize(1) = (NB_OUT_Y - 2*GHOSTS_OUT_Y) / PTS_PER_CELL_Y
-  // globalsize(2) = (NB_OUT_Z - 2*GHOSTS_OUT_Z) / PTS_PER_CELL_X
-  // Resolutions are linked by: (NB_OUT - 2*GHOSTS_OUT) * PTS_PER_CELL = NB_IN
-  // A work-group is in charge of a subdomain corresponding to:
-  //   - [NB_OUT_X, L_STENCIL, L_STENCIL] for the coarse grid
-  //   - [NB_IN_X, PTS_PER_CELL_Y, PTS_PER_CELL] for the fine grid
-  // Data in the fine grid are read only once for the whole computation.
-  // Because of the stencil, these data are spread over multiple coarse grid cells -> we need a global memory synchronization.
-  // The global synchronization is obtained by several kernel launch with an offset
-  unsigned int lid = get_local_id(0);
-  unsigned int gid_y = get_global_id(1);
-  unsigned int gid_z = get_global_id(2);
-  unsigned int iy_c = gid_y*L_STENCIL+offset_y;
-  unsigned int iz_c = gid_z*L_STENCIL+offset_z;
-  unsigned int iy_f = iy_c*PTS_PER_CELL_Y;
-  unsigned int iz_f = iz_c*PTS_PER_CELL_Z;
-  unsigned int i, j, k, b_id, pt_x, pt_y, pt_z;
-  float4 coord_in;
-  float4 coord_out;
-  float4 d;
-#if FORMULA==L2_1
-  float4 wx, wy, wz;
-#endif
-  __local float line[WG*PTS_PER_CELL_X];
-  __local float result[NB_OUT_X][L_STENCIL][L_STENCIL];
-  __private float p_res[L_STENCIL][L_STENCIL][L_STENCIL];
-
-  // Fill local arrays
-  // Output data
-  for (k=0;k<L_STENCIL;k++)
-    for (j=0;j<L_STENCIL;j++)
-      for (i=lid;i<NB_OUT_X;i+=WG)
-  	result[i][j][k] = scal_out[i + (GHOSTS_OUT_Y+iy_c-SHIFT_STENCIL+j)*NB_OUT_X +
-				   (GHOSTS_OUT_Z+iz_c-SHIFT_STENCIL+k)*NB_OUT_X*NB_OUT_Y];
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for (b_id=0;b_id<NB_IN_X/(WG*PTS_PER_CELL_X);b_id++)
-    {
-      // Compute a bloc of: (b_id is the bloc number in X direction)
-      //   - [WG*PTS_PER_CELL_X, PTS_PER_CELL_Y, PTS_PER_CELL_Z] points in fine grid
-      //   - [WG, L_STENCIL, L_STENCIL] points in coarse grid
-      // Each work-item is computing a coarse cell (looping in 3D over PTS_PER_CELL thanks to pt_x, pt_y and pt_z indices)
-      // global fine grid data are cached line by line in the X direction
-      coord_out = ((float4)(b_id*WG+lid, iy_c, iz_c, 0.0)) * dx_out;
-      // Initialize the register corresponding to the current cell
-      for (pt_z=0;pt_z<L_STENCIL;pt_z++)
-	for (pt_y=0;pt_y<L_STENCIL;pt_y++)
-	  for (pt_x=0;pt_x<L_STENCIL;pt_x++)
-	    p_res[pt_x][pt_y][pt_z] = 0.0;
-
-      // Loop over PTS_PER_CELL_Z: fine grid points in the curent cell
-      for (pt_z=0;pt_z<PTS_PER_CELL_Z;pt_z++)
-	{
-	  // Loop over PTS_PER_CELL_Y: fine grid points in the curent cell
-	  for (pt_y=0;pt_y<PTS_PER_CELL_Y;pt_y++)
-	    {
-	      // Input cache
-	      for (i=lid;i<WG*PTS_PER_CELL_X;i+=WG)
-		line[i] = scal_in[b_id*(WG*PTS_PER_CELL_X) + i + (iy_f+pt_y)*NB_IN_X + (iz_f+pt_z)*NB_IN_X*NB_IN_Y];
-	      barrier(CLK_LOCAL_MEM_FENCE);
-
-	      // Loop over PTS_PER_CELL_X: fine grid points in the curent cell
-	      for (pt_x=0;pt_x<PTS_PER_CELL_X;pt_x++)
-		{
-		  coord_in = ((float4)(b_id*(WG*PTS_PER_CELL_X) + lid*PTS_PER_CELL_X + pt_x, iy_f+pt_y, iz_f+pt_z, 0.0)) * dx_in;
-		  d = (coord_in  - coord_out) / dx_out;
-		  #if FORMULA==LINEAR
-		  p_res[0][0][0] += scale_factor * (1.0 - d.x) * (1.0 - d.y) * (1.0 - d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][0][1] += scale_factor * (1.0 - d.x) * (1.0 - d.y) * (d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][0] += scale_factor * (1.0 - d.x) * (d.y) * (1.0 - d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][1] += scale_factor * (1.0 - d.x) * (d.y) * (d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][0][0] += scale_factor * (d.x) * (1.0 - d.y) * (1.0 - d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][0][1] += scale_factor * (d.x) * (1.0 - d.y) * (d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][0] += scale_factor * (d.x) * (d.y) * (1.0 - d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][1] += scale_factor * (d.x) * (d.y) * (d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  #elif FORMULA==L2_1
-		  wx = (float4)(alpha_l2_1(d.x), beta_l2_1(d.x), gamma_l2_1(d.x), delta_l2_1(d.x));
-		  wy = (float4)(alpha_l2_1(d.y), beta_l2_1(d.y), gamma_l2_1(d.y), delta_l2_1(d.y));
-		  wz = (float4)(alpha_l2_1(d.z), beta_l2_1(d.z), gamma_l2_1(d.z), delta_l2_1(d.z));
-		  p_res[0][0][0] += scale_factor * wx.x * wy.x * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][0][1] += scale_factor * wx.x * wy.x * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][0][2] += scale_factor * wx.x * wy.x * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][0][3] += scale_factor * wx.x * wy.x * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][0] += scale_factor * wx.x * wy.y * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][1] += scale_factor * wx.x * wy.y * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][2] += scale_factor * wx.x * wy.y * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][3] += scale_factor * wx.x * wy.y * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][2][0] += scale_factor * wx.x * wy.z * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][2][1] += scale_factor * wx.x * wy.z * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][2][2] += scale_factor * wx.x * wy.z * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][2][3] += scale_factor * wx.x * wy.z * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][3][0] += scale_factor * wx.x * wy.w * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][3][1] += scale_factor * wx.x * wy.w * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][3][2] += scale_factor * wx.x * wy.w * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][3][3] += scale_factor * wx.x * wy.w * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-
-		  p_res[1][0][0] += scale_factor * wx.y * wy.x * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][0][1] += scale_factor * wx.y * wy.x * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][0][2] += scale_factor * wx.y * wy.x * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][0][3] += scale_factor * wx.y * wy.x * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][0] += scale_factor * wx.y * wy.y * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][1] += scale_factor * wx.y * wy.y * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][2] += scale_factor * wx.y * wy.y * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][3] += scale_factor * wx.y * wy.y * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][2][0] += scale_factor * wx.y * wy.z * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][2][1] += scale_factor * wx.y * wy.z * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][2][2] += scale_factor * wx.y * wy.z * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][2][3] += scale_factor * wx.y * wy.z * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][3][0] += scale_factor * wx.y * wy.w * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][3][1] += scale_factor * wx.y * wy.w * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][3][2] += scale_factor * wx.y * wy.w * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][3][3] += scale_factor * wx.y * wy.w * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-
-		  p_res[2][0][0] += scale_factor * wx.z * wy.x * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][0][1] += scale_factor * wx.z * wy.x * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][0][2] += scale_factor * wx.z * wy.x * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][0][3] += scale_factor * wx.z * wy.x * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][1][0] += scale_factor * wx.z * wy.y * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][1][1] += scale_factor * wx.z * wy.y * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][1][2] += scale_factor * wx.z * wy.y * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][1][3] += scale_factor * wx.z * wy.y * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][2][0] += scale_factor * wx.z * wy.z * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][2][1] += scale_factor * wx.z * wy.z * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][2][2] += scale_factor * wx.z * wy.z * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][2][3] += scale_factor * wx.z * wy.z * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][3][0] += scale_factor * wx.z * wy.w * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][3][1] += scale_factor * wx.z * wy.w * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][3][2] += scale_factor * wx.z * wy.w * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][3][3] += scale_factor * wx.z * wy.w * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-
-		  p_res[3][0][0] += scale_factor * wx.w * wy.x * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][0][1] += scale_factor * wx.w * wy.x * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][0][2] += scale_factor * wx.w * wy.x * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][0][3] += scale_factor * wx.w * wy.x * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][1][0] += scale_factor * wx.w * wy.y * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][1][1] += scale_factor * wx.w * wy.y * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][1][2] += scale_factor * wx.w * wy.y * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][1][3] += scale_factor * wx.w * wy.y * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][2][0] += scale_factor * wx.w * wy.z * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][2][1] += scale_factor * wx.w * wy.z * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][2][2] += scale_factor * wx.w * wy.z * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][2][3] += scale_factor * wx.w * wy.z * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][3][0] += scale_factor * wx.w * wy.w * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][3][1] += scale_factor * wx.w * wy.w * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][3][2] += scale_factor * wx.w * wy.w * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][3][3] += scale_factor * wx.w * wy.w * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-
-		  #endif
-		}
-	    }
-	}
-      // Store the registers results in local memory
-      for (pt_z=0;pt_z<L_STENCIL;pt_z++)
-	for (pt_y=0;pt_y<L_STENCIL;pt_y++)
-	  for (pt_x=0;pt_x<L_STENCIL;pt_x++) {
-	    result[GHOSTS_OUT_X+b_id*WG+lid-SHIFT_STENCIL+pt_x][pt_y][pt_z] += p_res[pt_x][pt_y][pt_z];
-	    barrier(CLK_LOCAL_MEM_FENCE);
-	  }
-    }
-
-  // Write result in output array
-  for (k=0;k<L_STENCIL;k++)
-    for (j=0;j<L_STENCIL;j++)
-      for (i=lid;i<NB_OUT_X;i+=WG)
-	scal_out[i + (GHOSTS_OUT_Y+iy_c-SHIFT_STENCIL+j)*NB_OUT_X +
-		 (GHOSTS_OUT_Z+iz_c-SHIFT_STENCIL+k)*NB_OUT_X*NB_OUT_Y] = result[i][j][k];
-
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/multiphase_baroclinic_rhs.cl b/hysop/backend/device/opencl/cl_src/kernels/multiphase_baroclinic_rhs.cl
deleted file mode 100644
index d9938aa9994f0c5e1c37b8d56fe82b45db3405e0..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/multiphase_baroclinic_rhs.cl
+++ /dev/null
@@ -1,321 +0,0 @@
-/** Computes the right hand side of the baroclinic term.
- * The pressure gradient is given in input at a coarse scale.
- * The density is given at a fine scale.
- * Result is computed ans returned at fine scale.
- */
-#define C_TILE_IDX(x,y) x+GHOSTS_C_X+(y+GHOSTS_C_Y)*C_TILE_WIDTH
-#if FD_ORDER == FD_C_2
-#define GRAD_GH 1
-#endif
-#if FD_ORDER == FD_C_4
-#define GRAD_GH 2
-#endif
-
-float compute_density(float x);
-float compute_density(float x){
-  return __USER_DENSITY_FUNCTION_FROM_GIVEN_INPUT__;
-}
-
-float interpolate(__local float* loc_gradp_zm, __local float* loc_gradp_zp,
-		  float *h, int lidx, int lidy,
-		  int cellx, int celly, int cellz);
-
-float interpolate(__local float* loc_zm,
-		  __local float* loc_zp,
-		  float *h,
-		  int lidx,
-		  int lidy,
-		  int cellx,
-		  int celly,
-		  int cellz) {
-  float res = 0.0;
-  res += (1.0 - h[cellz]) * (1.0 - h[cellx]) * (1.0 - h[celly]) * loc_zm[C_TILE_IDX(lidx,lidy)];
-  res += (1.0 - h[cellz]) * (h[cellx]) * (1.0 - h[celly]) * loc_zm[C_TILE_IDX(lidx+1,lidy)];
-  res += (1.0 - h[cellz]) * (1.0 - h[cellx]) * (h[celly]) * loc_zm[C_TILE_IDX(lidx,lidy+1)];
-  res += (1.0 - h[cellz]) * (h[cellx]) * (h[celly]) * loc_zm[C_TILE_IDX(lidx+1,lidy+1)];
-  res += (h[cellz]) * (1.0 - h[cellx]) * (1.0 - h[celly]) * loc_zp[C_TILE_IDX(lidx,lidy)];
-  res += (h[cellz]) * (h[cellx]) * (1.0 - h[celly]) * loc_zp[C_TILE_IDX(lidx+1,lidy)];
-  res += (h[cellz]) * (1.0 - h[cellx]) * (h[celly]) * loc_zp[C_TILE_IDX(lidx,lidy+1)];
-  res += (h[cellz]) * (h[cellx]) * (h[celly]) * loc_zp[C_TILE_IDX(lidx+1,lidy+1)];
-  return res;
-}
-
-void fill_loc_rho_cache(__local float *loc_rho,
-			__global const float* rho,
-#if CUT_DIR_Y == 1
-			__global const float* rho_ghostsY,
-#endif
-			int lidx,
-			int lidy,
-			int gidx,
-			int gidy,
-			int idz);
-void fill_loc_rho_cache(__local float *loc_rho,
-			__global const float* rho,
-#if CUT_DIR_Y == 1
-			__global const float* rho_ghostsY,
-#endif
-			int lidx,
-			int lidy,
-			int gidx,
-			int gidy,
-			int idz) {
-  int celly, cellx;
-  if (gidx > 0 && gidx < ((int)get_num_groups(0))-1 && gidy > 0 && gidy < ((int)get_num_groups(1))-1) {
-    for (celly=lidy; celly<F_TILE_SIZE+2*GRAD_GH; celly+=get_local_size(1)) {
-      for (cellx=lidx; cellx<F_TILE_SIZE+2*GRAD_GH; cellx+=get_local_size(0)) {
-	loc_rho[cellx + (celly)*(F_TILE_SIZE+2*GRAD_GH)] =
-	  compute_density(rho[cellx-GRAD_GH + gidx*F_TILE_SIZE +
-			      (celly-GRAD_GH + gidy*F_TILE_SIZE)*NB_F_X +
-			      idz*NB_F_X*NB_F_Y]);
-      }
-    }
-  } else {
-    for (celly=lidy; celly<F_TILE_SIZE+2*GRAD_GH; celly+=get_local_size(1)) {
-      for (cellx=lidx; cellx<F_TILE_SIZE+2*GRAD_GH; cellx+=get_local_size(0)) {
-#if CUT_DIR_Y == 1
-	if (celly-GRAD_GH + gidy*F_TILE_SIZE >= NB_F_Y)
-	  loc_rho[cellx + (celly)*(F_TILE_SIZE+2*GRAD_GH)] =
-	    compute_density(rho_ghostsY[(cellx-GRAD_GH + gidx*F_TILE_SIZE+NB_F_X)%NB_F_X +
-					(celly-GRAD_GH + gidy*F_TILE_SIZE - NB_F_Y)*NB_F_X +
-					idz*NB_F_X*2*GRAD_GH]);
-	else if (celly-GRAD_GH + gidy*F_TILE_SIZE < 0)
-	  loc_rho[cellx + (celly)*(F_TILE_SIZE+2*GRAD_GH)] =
-	    compute_density(rho_ghostsY[(cellx-GRAD_GH + gidx*F_TILE_SIZE+NB_F_X)%NB_F_X +
-					(2*GRAD_GH + (celly-GRAD_GH + gidy*F_TILE_SIZE))*NB_F_X +
-					idz*NB_F_X*2*GRAD_GH]);
-	else
-	  loc_rho[cellx + (celly)*(F_TILE_SIZE+2*GRAD_GH)] =
-	    compute_density(rho[(cellx-GRAD_GH + gidx*F_TILE_SIZE+NB_F_X)%NB_F_X +
-				(celly-GRAD_GH + gidy*F_TILE_SIZE)*NB_F_X +
-				idz*NB_F_X*NB_F_Y]);
-#else
-	loc_rho[cellx + (celly)*(F_TILE_SIZE+2*GRAD_GH)] =
-	  compute_density(rho[(cellx-GRAD_GH + gidx*F_TILE_SIZE+NB_F_X)%NB_F_X +
-			      ((celly-GRAD_GH + gidy*F_TILE_SIZE+NB_F_Y)%NB_F_Y)*NB_F_X +
-			      idz*NB_F_X*NB_F_Y]);
-#endif
-      }
-    }
-  }
-}
-
-__kernel void baroclinic_rhs(__global float* rhs_x,
-			     __global float* rhs_y,
-			     __global float* rhs_z,
-			     __global const float* rho,
-#if CUT_DIR_Y == 1
-			     __global const float* rho_ghostsY,
-#endif
-#if CUT_DIR_Z == 1
-			     __global const float* rho_ghostsZ,
-#endif
-			     __global const float* gradp,
-			     float4 dx_coarse,
-			     float4 dx_fine)
-{
-  /* Space index refers to the coarse grid comute points */
-  int lidx = get_local_id(0);
-  int lidy = get_local_id(1);
-  int gidx = get_group_id(0);
-  int gidy = get_group_id(1);
-  int rhs_idx = lidx*N_PER_CELL + gidx*F_TILE_SIZE + (lidy*N_PER_CELL + gidy*F_TILE_SIZE)*NB_F_X;
-  float h[N_PER_CELL];
-  int i, cellx, celly, cellz;
-  int idz, c_idz;
-  float p_gradp, gradrho_x, gradrho_y, gradrho_z;
-  float rho_zm[N_PER_CELL][N_PER_CELL];
-#if FD_ORDER == FD_C_4
-  float rho_zmm[N_PER_CELL][N_PER_CELL];
-  float rho_zp[N_PER_CELL][N_PER_CELL];
-  float rho_zpp;
-#endif
-
-  __local float loc_rho[(F_TILE_SIZE+2*GRAD_GH)*(F_TILE_SIZE+2*GRAD_GH)];
-  __local float loc_gradp_zm[C_TILE_WIDTH*C_TILE_HEIGHT];
-  __local float loc_gradp_zp[C_TILE_WIDTH*C_TILE_HEIGHT];
-
-
-  // Compute distances from fine grid points to coarse left point cell.
-  for (i=0; i<N_PER_CELL; i++)
-    h[i] = i * 1.0 / (1.0 * N_PER_CELL);
-
-  idz = 0; 			/* Fine grid Z indice */
-  c_idz=GHOSTS_C_Z;
-  // Fill gradp z cache for first iteration
-  for (celly=lidy; celly<C_TILE_HEIGHT; celly+=get_local_size(1)) {
-    for (cellx=lidx; cellx<C_TILE_WIDTH; cellx+=get_local_size(0)) {
-      loc_gradp_zm[cellx + celly*(C_TILE_WIDTH)] =
-	gradp[cellx + gidx*C_TILE_SIZE + (celly + gidy*C_TILE_SIZE)*NB_C_X + c_idz*NB_C_X*NB_C_Y];
-    }
-  }
-
-  for (celly=0; celly<N_PER_CELL; celly++) {
-    for (cellx=0; cellx<N_PER_CELL; cellx++) {
-#if FD_ORDER == FD_C_4
-#if CUT_DIR_Z == 1
-      rho_zm[cellx][celly] = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X + 3*NB_F_X*NB_F_Y]);
-      rho_zmm[cellx][celly] = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X + 2*NB_F_X*NB_F_Y]);
-      rho_zp[cellx][celly] = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + NB_F_X*NB_F_Y]);
-#else
-      rho_zm[cellx][celly] = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + (NB_F_Z-1)*NB_F_X*NB_F_Y]);
-      rho_zmm[cellx][celly] = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + (NB_F_Z-2)*NB_F_X*NB_F_Y]);
-      rho_zp[cellx][celly] = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + NB_F_X*NB_F_Y]);
-#endif
-#else
-#if CUT_DIR_Z == 1
-      rho_zm[cellx][celly] = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X + NB_F_X*NB_F_Y]);
-#else
-      rho_zm[cellx][celly] = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + (NB_F_Z-1)*NB_F_X*NB_F_Y]));
-#endif
-#endif
-    }
-  }
-
-  for (c_idz=GHOSTS_C_Z; c_idz<NB_C_Z-GHOSTS_C_Z; c_idz++) {
-
-    if((c_idz-GHOSTS_C_Z)%2 == 0)
-      for (celly=lidy; celly<C_TILE_HEIGHT; celly+=get_local_size(1)) {
-	for (cellx=lidx; cellx<C_TILE_WIDTH; cellx+=get_local_size(0)) {
-	  loc_gradp_zp[cellx + celly*(C_TILE_WIDTH)] =
-	    gradp[cellx + gidx*C_TILE_SIZE + (celly + gidy*C_TILE_SIZE)*NB_C_X + (c_idz+1)*NB_C_X*NB_C_Y];
-	}
-      }
-    else
-      for (celly=lidy; celly<C_TILE_HEIGHT; celly+=get_local_size(1)) {
-	for (cellx=lidx; cellx<C_TILE_WIDTH; cellx+=get_local_size(0)) {
-	  loc_gradp_zm[cellx + celly*(C_TILE_WIDTH)] =
-	    gradp[cellx + gidx*C_TILE_SIZE + (celly + gidy*C_TILE_SIZE)*NB_C_X + (c_idz+1)*NB_C_X*NB_C_Y];
-	}
-      }
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    for (cellz=0; cellz<N_PER_CELL; cellz++) {
-      //fill rho cache
-#if CUT_DIR_Y == 1
-      fill_loc_rho_cache(loc_rho, rho, rho_ghostsY, lidx, lidy, gidx, gidy, idz);
-#else
-      fill_loc_rho_cache(loc_rho, rho, lidx, lidy, gidx, gidy, idz);
-#endif
-
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for (celly=0; celly<N_PER_CELL; celly++) {
-	for (cellx=0; cellx<N_PER_CELL; cellx++) {
-	  if((c_idz-GHOSTS_C_Z)%2 == 0)
-	    p_gradp = interpolate(loc_gradp_zm, loc_gradp_zp, h, lidx, lidy, cellx, celly, cellz);
-	  else
-	    p_gradp = interpolate(loc_gradp_zp, loc_gradp_zm, h, lidx, lidy, cellx, celly, cellz);
-
-	  ///// TEMP WRITE GRADP TO RHS
-	  //rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = p_gradp;
-	  ///// END TEMP WRITE GRADP TO RHS
-
-#if FD_ORDER == FD_C_2
-	  gradrho_x = loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx+1 +
-			      (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx-1 +
-			       (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x /= (2.0*dx_fine.x);
-
-	  gradrho_y = loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			      (GRAD_GH+lidy*N_PER_CELL+celly+1)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			       (GRAD_GH+lidy*N_PER_CELL+celly-1)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y /= (2.0*dx_fine.y);
-
-#if CUT_DIR_Z == 1
-	  if (idz==NB_F_Z-1)
-	    gradrho_z = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X]);
-	  else
-	    gradrho_z = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + ((idz+1)%NB_F_Z)*NB_F_X*NB_F_Y]);
-#else
- 	  gradrho_z = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + ((idz+1)%NB_F_Z)*NB_F_X*NB_F_Y]);
-#endif
-	  gradrho_z -= rho_zm[cellx][celly];
-	  gradrho_z /= (2.0*dx_fine.z);
-
-#endif
-#if FD_ORDER == FD_C_4
-	  gradrho_x = loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx+1 +
-			      (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx-1 +
-			      (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x *= 8.0;
-	  gradrho_x += loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx-2 +
-			      (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx+2 +
-			      (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x /= (12.0*dx_fine.x);
-
-	  gradrho_y = loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			      (GRAD_GH+lidy*N_PER_CELL+celly+1)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			      (GRAD_GH+lidy*N_PER_CELL+celly-1)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y *= 8.0;
-	  gradrho_y += loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			      (GRAD_GH+lidy*N_PER_CELL+celly-2)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			      (GRAD_GH+lidy*N_PER_CELL+celly+2)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y /= (12.0*dx_fine.y);
-
-#if CUT_DIR_Z == 1
-	  if (idz==NB_F_Z-1)
-	    rho_zpp = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X + NB_F_X*NB_F_Y]);
-	  else if (idz==NB_F_Z-2)
-	    rho_zpp = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X]);
-	  else
-	    rho_zpp = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + ((idz+2)%NB_F_Z)*NB_F_X*NB_F_Y]);
-#else
-	  rho_zpp = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + ((idz+2)%NB_F_Z)*NB_F_X*NB_F_Y]);
-#endif
-	  gradrho_z = rho_zp[cellx][celly];
-	  gradrho_z -= rho_zm[cellx][celly];
-	  gradrho_z *= 8.0;
-	  gradrho_z += rho_zmm[cellx][celly];
-	  gradrho_z -= rho_zpp;
-	  gradrho_z /= (12.0*dx_fine.z);
-#endif
-
-	  ///// TEMP WRITE GRADrho_X TO RHS
-	  //rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = gradrho_x;
-	  ///// END TEMP WRITE GRADrho TO RHS
-	  ///// TEMP WRITE GRADrho_Y TO RHS
-	  //rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = gradrho_y;
-	  ///// END TEMP WRITE GRADrho TO RHS
-	  ///// TEMP WRITE GRADrho_Z TO RHS
-	  //rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = gradrho_z;
-	  ///// END TEMP WRITE GRADrho TO RHS
-
-	  // Using gradp X component as gradp and assuming this kernel run first to initialise output
-#if GRADP_COMP == 0
-	  rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = 0.0;
-	  rhs_y[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = -gradrho_z*p_gradp;
-	  rhs_z[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = gradrho_y*p_gradp;
-#endif
-	  // Using gradp Y component as gradp
-#if GRADP_COMP == 1
-	  rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] += gradrho_z*p_gradp;
-	  rhs_z[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] -= gradrho_x*p_gradp;
-#endif
-	  // Using gradp Z component as gradp
-#if GRADP_COMP == 2
-	  rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] -= gradrho_y*p_gradp;
-	  rhs_y[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] += gradrho_x*p_gradp;
-#endif
-
-
-	  // For next iteration we swap values in cache.
-#if FD_ORDER == FD_C_4
-	  rho_zp[cellx][celly] = rho_zpp;
-	  rho_zmm[cellx][celly] = rho_zm[cellx][celly];
-#endif
-	  rho_zm[cellx][celly] = loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-					 (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	}
-      }
-      idz++;
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/remeshing.cl b/hysop/backend/device/opencl/cl_src/kernels/remeshing.cl
deleted file mode 100644
index 809c5ad32567ca2b91cc51773fffb480c05de721..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/remeshing.cl
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * @file remeshing.cl
- * Remeshing kernel.
- */
-
-/**
- * Performs remeshing of the particles' scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes <code>NB_I/WI_NB</code> particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param ppos Particle position
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param min_position Domain lower coordinate
- * @param dx Space step
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>WI_NB</code> corresponds to the work-item number.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void remeshing_kernel(__global const float* ppos,
-			       __RCOMP_P__global const float* pscal__ID__,
-			       __RCOMP_P__global float* gscal__ID__,
-			       __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  //  float invdx = 1.0/dx;         /* Space step inverse */
-  uint i;			/* Particle index in 1D problem */
-  float__N__ p;			/* Particle position */
-  __RCOMP_I float__N__ s__ID__; /* Particle scalar */
-  uint line_index; /* Current 1D problem index */
-
-  __RCOMP_I__local float gscal_loc__ID__[NB_I]; /* Local buffer for result */
-
-#ifdef NB_Z
-  for(gidZ=get_global_id(2); gidZ<NB_III; gidZ+=get_global_size(2)) {
-#else
-  gidZ=get_global_id(2); {
-#endif
-  for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-  line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
-    {
-      /* Initialize result buffer */
-      __RCOMP_Igscal_loc__ID__[i+__NN__] = 0.0;
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=__N__)
-    {
-      /* Read particle position */
-      p = vload__N__((i + line_index)/__N__, ppos);
-      /* Read particle scalar */
-      __RCOMP_Is__ID__ = vload__N__((i + line_index)/__N__, pscal__ID__);
-      /* Remesh particle */
-      remesh(i, __RCOMP_Ps__ID__, p, __RCOMP_Pgscal_loc__ID__, mesh);
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
-    {
-      /* Store result */
-      __RCOMP_Ivstore__N__((float__N__)(gscal_loc__ID__[noBC_id(i+__NN__)],
-			       ),(i + line_index)/__N__, gscal__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-}
-}
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/remeshing_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/remeshing_noVec.cl
deleted file mode 100644
index 15db5730cedc2cf8b347c831ed6dfe3b79a48985..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/remeshing_noVec.cl
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * @file remeshing.cl
- * Remeshing kernel.
- */
-
-/**
- * Performs remeshing of the particles' scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes <code>NB_I/WI_NB</code> particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param ppos Particle position
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param min_position Domain lower coordinate
- * @param dx Space step
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>WI_NB</code> corresponds to the work-item number.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void remeshing_kernel(__global const float* ppos,
-			       __RCOMP_P__global const float* pscal__ID__,
-			       __RCOMP_P__global float* gscal__ID__,
-			       __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  float p;			/* Particle position */
-  __RCOMP_I float s__ID__;      /* Particle scalar */
-  uint line_index; /* Current 1D problem index */
-
-  __RCOMP_I__local float gscal_loc__ID__[NB_I]; /* Local buffer for result */
-
-#ifdef NB_Z
-  for(gidZ=get_global_id(2); gidZ<NB_III; gidZ+=get_global_size(2)) {
-#else
-  gidZ=get_global_id(2); {
-#endif
-  for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-  line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      /* Initialize result buffer */
-      __RCOMP_Igscal_loc__ID__[i] = 0.0;
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=1)
-    {
-      /* Read particle position */
-      p = ppos[i + line_index];
-      /* Read particle scalar */
-      __RCOMP_Is__ID__ = pscal__ID__[i + line_index];
-      /* Remesh particle */
-      remesh(i, __RCOMP_Ps__ID__, p, __RCOMP_Pgscal_loc__ID__, mesh);
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      /* Store result */
-      __RCOMP_Igscal__ID__[i + line_index] = gscal_loc__ID__[noBC_id(i)];
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-}
-}
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/rendering.cl b/hysop/backend/device/opencl/cl_src/kernels/rendering.cl
deleted file mode 100644
index 567c44d9abcb4a9a2f7a52ef9d1cda57effc7f81..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/rendering.cl
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * @file rendering.cl
- * Rendering kernels.
- */
-
-/**
- * Colorize regarding scalar values.
- *
- * @param scalar Scalar values used
- * @param color Color data array that contains RGBA values for each grid point
- */
-__kernel void colorize(__global const float* scalar,
-		       __global float* color
-)
-{
-  __private uint ind;
-  __private float c;
-  __private int ix, iy;
-  ix = get_global_id(0);
-  iy = get_global_id(1);
-  ind = ix + iy*NB_X;
-
-  //plain colors
-  /* c = (scalar[ind] > 0.5f ? 1.0: 0.0); */
-  /* color[4*ind + 0] = c; //Red */
-  /* color[4*ind + 1] = 0.0; //Green */
-  /* color[4*ind + 2] = 0.0; //Blue */
-  /* color[4*ind + 3] = 1.0; //Alpha */
-
-  //shaded colors
-  c = scalar[ind];
-  color[4*ind + 0] = 2.0*c; //Red
-  color[4*ind + 1] = 2.0*c-0.5; //Green
-  color[4*ind + 2] = 2.0*c-1.0; //Blue
-  color[4*ind + 3] = 1.0; //Alpha
-}
-
-
-/**
- * Compute grid point coordinates from OpenCL index space.
- *
- * @param pos Coordinates ax XY values for each grid point.
- * @param minPos Domain origin.
- * @param size Mesh size.
- */
-__kernel void initPointCoordinates(__global float* pos, float4 minPos, float4 size)
-{
-  __private uint ind;
-  __private int ix, iy;
-  ix = get_global_id(0);
-  iy = get_global_id(1);
-  ind = ix + iy*NB_X;
-
-  pos[2*ind + 0] = ix*size.x;
-  pos[2*ind + 1] = iy*size.y;
-}
-
diff --git a/hysop/backend/device/opencl/cl_src/kernels/transpose_xy.cl b/hysop/backend/device/opencl/cl_src/kernels/transpose_xy.cl
deleted file mode 100644
index a8701738f17f67659f27c616cf6ad75c1f29bcec..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/transpose_xy.cl
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * @file transpose_xy.cl
- * Transposition in XY plane, coalesced, diagonal coordinates, vectorized version.
- */
-
-/**
- * Performs a transposition in xy plane.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoid banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- * A work group handle transposition for a tile. Transposition is done when reading data in tile.
- * Work-group layout: \code
- * ________________________
- * |0,0 | 1,0 | ...
- * |N,0 | 0,1 | 1,2 | ...
- * | .     .  | 0,2 | ...
- * | .     .
- * | .     .
- * |
- * \endcode
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XY</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void transpose_xy(__global const float* in,
-        __global float* out)
-{
-    float__N__ temp;			/* Temporary variable */
-    uint group_id_x;			/* Work-group coordinate in global space index X */
-    uint group_id_y;			/* Work-group coordinate in global space index Y */
-    uint lid_x = get_local_id(0);
-    uint lid_y = get_local_id(1);
-
-    uint xIndex, yIndex, zIndex;
-    uint index_in, index_out;
-    uint gidI, gidII, i;
-
-    __local float tile[TILE_DIM_XY][TILE_DIM_XY+PADDING_XY]; /* Tile with padding */
-
-#ifdef NB_III
-    for(zIndex=get_global_id(2); zIndex<NB_III; zIndex+=get_global_size(2))
-#else
-        zIndex=get_global_id(2);
-#endif
-    {
-        for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-            for(gidII=get_group_id(1); gidII<NB_GROUPS_II; gidII+=get_num_groups(1)) {
-
-                /* Use of diagonal coordinates */
-#if NB_II == NB_I
-                group_id_x = (gidI + gidII) % NB_GROUPS_I;
-                group_id_y = gidI;
-#else
-                uint bid = gidI + gidII * NB_GROUPS_I;
-                group_id_y = bid%NB_GROUPS_II;
-                group_id_x = ((bid/NB_GROUPS_II) + group_id_y)%NB_GROUPS_I;
-#endif
-
-                /* Global input index for work-item */
-                xIndex = group_id_x * TILE_DIM_XY + lid_x*__N__;
-                yIndex = group_id_y * TILE_DIM_XY + lid_y;
-                //zIndex = get_global_id(2);
-                index_in = xIndex + yIndex * NB_II + zIndex * NB_II * NB_I;
-
-                /* Global output index */
-                xIndex = group_id_y * TILE_DIM_XY + lid_x*__N__;
-                yIndex = group_id_x * TILE_DIM_XY + lid_y;
-                index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-
-                for(i=0; i<TILE_DIM_XY; i+=BLOCK_ROWS_XY) {
-                    /* Fill the tile */
-                    temp = vload__N__((index_in + i * NB_II)/__N__, in);
-                    tile[lid_y + i][lid_x*__N__+__NN__] = temp.s__NN__;
-                }
-
-                /* Synchronize work-group */
-                barrier(CLK_LOCAL_MEM_FENCE);
-
-                for(i=0; i<TILE_DIM_XY; i+=BLOCK_ROWS_XY) {
-                    /* Write transposed data */
-                    temp = (float__N__)(tile[lid_x*__N__+__NN__][lid_y + i],
-                            );
-                    vstore__N__(temp, (index_out + i*NB_I)/__N__, out);
-                }
-                barrier(CLK_LOCAL_MEM_FENCE);
-            }
-        }
-    }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/transpose_xy_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/transpose_xy_noVec.cl
deleted file mode 100644
index 083d86eb2153e7224eab88c8c1f07ac021aa4477..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/transpose_xy_noVec.cl
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * @file transpose_xy.cl
- * Transposition in XY plane, coalesced, diagonal coordinates, vectorized version.
- */
-
-/**
- * Performs a transposition in xy plane.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoir banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- * A work group handle transposition for a tile. Transposition is done when reading data in tile.
- * Work-group layout: \code
- * ________________________
- * |0,0 | 1,0 | ...
- * |N,0 | 0,1 | 1,2 | ...
- * | .     .  | 0,2 | ...
- * | .     .
- * | .     .
- * |
- * \endcode
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XY</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void transpose_xy(__global const float* in,
-			   __global float* out)
-{
-  uint group_id_x;			/* Work-group coordinate in global space index X */
-  uint group_id_y;			/* Work-group coordinate in global space index Y */
-  uint lid_x = get_local_id(0);
-  uint lid_y = get_local_id(1);
-
-  uint xIndex, yIndex, zIndex;
-  uint index_in, index_out;
-  uint gidI, gidII, i;
-
-  __local float tile[TILE_DIM_XY][TILE_DIM_XY+PADDING_XY]; /* Tile with padding */
-
-#ifdef NB_Z
-  for(zIndex=get_global_id(2); zIndex<NB_III; zIndex+=get_global_size(2))
-#else
-  zIndex=get_global_id(2);
-#endif
-  {
-    for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-      for(gidII=get_group_id(1); gidII<NB_GROUPS_II; gidII+=get_num_groups(1)) {
-
-	/* Use of diagonal coordinates */
-#if NB_II == NB_I
-	group_id_x = (gidI + gidII) % NB_GROUPS_I;
-	group_id_y = gidI;
-#else
-	uint bid = gidI + gidII * NB_GROUPS_I;
-	group_id_y = bid%NB_GROUPS_II;
-	group_id_x = ((bid/NB_GROUPS_II) + group_id_y)%NB_GROUPS_I;
-#endif
-
-	/* Global input index for work-item */
-	xIndex = group_id_x * TILE_DIM_XY + lid_x;
-	yIndex = group_id_y * TILE_DIM_XY + lid_y;
-	index_in = xIndex + yIndex * NB_II + zIndex * NB_II * NB_I;
-
-	/* Global output index */
-	xIndex = group_id_y * TILE_DIM_XY + lid_x;
-	yIndex = group_id_x * TILE_DIM_XY + lid_y;
-	index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-	for(i=0; i<TILE_DIM_XY; i+=BLOCK_ROWS_XY) {
-	  /* Fill the tile */
-	  tile[lid_y + i][lid_x] = in[index_in + i * NB_II];
-	}
-
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(i=0; i<TILE_DIM_XY; i+=BLOCK_ROWS_XY) {
-	  /* Write transposed data */
-	  out[index_out + i*NB_I] = tile[lid_x][lid_y + i];
-	}
-
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-      }
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/transpose_xz.cl b/hysop/backend/device/opencl/cl_src/kernels/transpose_xz.cl
deleted file mode 100644
index b2197fbb12f4643f1922ab8674a22af9a0583174..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/transpose_xz.cl
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * @file transpose_xz.cl
- * Transposition in XZ plane, coalesced, diagonal coordinates, 3D tiles.
- */
-
-/**
- * Perfoms a transposition in XZ plane. As data have to be contiguously read an write in global memory, we use a 3D tile.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoir banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XZ</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- * @see transpose_xy.cl
- */
-__kernel void transpose_xz(__global const float* in,
-			   __global float* out)
-{
-  float__N__ temp;			/* Temporary variable */
-  uint group_id_x;			/* Work-group coordinate in global space index X */
-  uint group_id_z;			/* Work-group coordinate in global space index Y */
-  uint lid_x = get_local_id(0);
-  uint lid_y = get_local_id(1);
-  uint lid_z = get_local_id(2);
-
-  uint xIndex, yIndex, zIndex;
-  uint index_in, index_out, i, j;
-  uint gidI, gidII, gidIII;
-
-  __local float tile[TILE_DIM_XZ][TILE_DIM_XZ][TILE_DIM_XZ+PADDING_XZ]; /* Tile with padding */
-
-
-  for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-    for(gidII=get_group_id(1); gidII<NB_GROUPS_II; gidII+=get_num_groups(1)) {
-      for(gidIII=get_group_id(2); gidIII<NB_GROUPS_III; gidIII+=get_num_groups(2)) {
-
-	/* Use of diagonal coordinates */
-#if NB_III == NB_I
-	group_id_x = (gidI + gidIII) % NB_GROUPS_I;
-	group_id_z = gidI;
-#else
-	uint bid = gidI + gidIII * NB_GROUPS_I;
-	group_id_z = bid%NB_GROUPS_III;
-	group_id_x = ((bid/NB_GROUPS_III) + group_id_z)%NB_GROUPS_I;
-#endif
-
-	/* Global input index for work-item */
-	xIndex = group_id_x * TILE_DIM_XZ + lid_x*__N__;
-	yIndex = gidII * TILE_DIM_XZ + lid_y;
-	zIndex = group_id_z * TILE_DIM_XZ + lid_z;
-	index_in = xIndex + yIndex * NB_III + zIndex * NB_III * NB_II;
-
-	/* Global output index */
-	xIndex = group_id_z * TILE_DIM_XZ + lid_x*__N__;
-	zIndex = group_id_x * TILE_DIM_XZ + lid_z;
-	index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  for(i=0; i<TILE_DIM_XZ; i+=BLOCK_ROWS_XZ) {
-	    /* Fill the tile */
-	    temp = vload__N__((index_in + i*NB_III + j*NB_III*NB_II)/__N__, in);
-	    tile[lid_z + j][lid_y + i][lid_x*__N__+__NN__] = temp.s__NN__;
-	  }
-
-	}
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  for(i=0; i<TILE_DIM_XZ; i+=BLOCK_ROWS_XZ) {
-	    /* Write transposed data */
-	    temp = (float__N__)(tile[lid_x*__N__+__NN__][lid_y+i][lid_z + j],
-				);
-	    vstore__N__(temp, (index_out + i*NB_I + j*NB_I*NB_II)/__N__, out);
-	  }
-	}
-      }
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/transpose_xz_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/transpose_xz_noVec.cl
deleted file mode 100644
index 475bc9aaebe9d5c4a2ebf2c335e08c2a6413733e..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/transpose_xz_noVec.cl
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * @file transpose_xz.cl
- * Transposition in XZ plane, coalesced, diagonal coordinates, 3D tiles.
- */
-
-/**
- * Perfoms a transposition in XZ plane. As data have to be contiguously read an write in global memory, we use a 3D tile.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoir banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XZ</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- * @see transpose_xy.cl
- */
-__kernel void transpose_xz(__global const float* in,
-			   __global float* out)
-{
-  uint group_id_x;			/* Work-group coordinate in global space index X */
-  uint group_id_z;			/* Work-group coordinate in global space index Y */
-  uint lid_x = get_local_id(0);
-  uint lid_y = get_local_id(1);
-  uint lid_z = get_local_id(2);
-
-  uint xIndex, yIndex, zIndex;
-  uint index_in, index_out, i, j;
-  uint gidI, gidII, gidIII;
-
-  __local float tile[TILE_DIM_XZ][TILE_DIM_XZ][TILE_DIM_XZ+PADDING_XZ]; /* Tile with padding */
-
-  for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-    for(gidII=get_group_id(1); gidII<NB_GROUPS_II; gidII+=get_num_groups(1)) {
-      for(gidIII=get_group_id(2); gidIII<NB_GROUPS_III; gidIII+=get_num_groups(2)) {
-
-	/* Use of diagonal coordinates */
-#if NB_III == NB_I
-	group_id_x = (gidI + gidIII) % NB_GROUPS_I;
-	group_id_z = gidI;
-#else
-	uint bid = gidI + gidIII * NB_GROUPS_I;
-	group_id_z = bid%NB_GROUPS_III;
-	group_id_x = ((bid/NB_GROUPS_III) + group_id_z)%NB_GROUPS_I;
-#endif
-
-	/* Global input index for work-item */
-	xIndex = group_id_x * TILE_DIM_XZ + lid_x;
-	yIndex = gidII * TILE_DIM_XZ + lid_y;
-	zIndex = group_id_z * TILE_DIM_XZ + lid_z;
-	index_in = xIndex + yIndex * NB_III + zIndex * NB_III * NB_II;
-
-	/* Global output index */
-	xIndex = group_id_z * TILE_DIM_XZ + lid_x;
-	zIndex = group_id_x * TILE_DIM_XZ + lid_z;
-	index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  for(i=0; i<TILE_DIM_XZ; i+=BLOCK_ROWS_XZ) {
-	    /* Fill the tile */
-	    tile[lid_z + j][lid_y + i][lid_x] = in[index_in + i*NB_III + j*NB_III*NB_II];
-	  }
-	}
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  for(i=0; i<TILE_DIM_XZ; i+=BLOCK_ROWS_XZ) {
-	    /* Write transposed data */
-	    out[index_out + i*NB_I + j*NB_I*NB_II] = tile[lid_x][lid_y+i][lid_z + j];
-	  }
-	}
-	barrier(CLK_LOCAL_MEM_FENCE);
-      }
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/transpose_xz_slice.cl b/hysop/backend/device/opencl/cl_src/kernels/transpose_xz_slice.cl
deleted file mode 100644
index ec394f6cbd3690fe71ea7c3d97749077869c4f34..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/transpose_xz_slice.cl
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * @file transpose_xz.cl
- * Transposition in XZ plane, coalesced, diagonal coordinates, 3D tiles.
- */
-
-/**
- * Perfoms a transposition in XZ plane. As data have to be contiguously read an write in global memory, we use a 3D tile.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoir banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XZ</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- * @see transpose_xy.cl
- */
-__kernel void transpose_xz(__global const float* in,
-			   __global float* out)
-{
-  float__N__ temp;			/* Temporary variable */
-  uint group_id_x;			/* Work-group coordinate in global space index X */
-  uint group_id_z;			/* Work-group coordinate in global space index Y */
-  uint lid_x = get_local_id(0);
-  uint lid_z = get_local_id(2);
-
-  uint xIndex, yIndex, zIndex;
-  uint index_in, index_out;
-  uint gidI, gidIII, j;
-
-  __local float tile[TILE_DIM_XZ][TILE_DIM_XZ+PADDING_XZ]; /* Tile with padding */
-
-  for(yIndex=get_global_id(1); yIndex<NB_II; yIndex+=get_global_size(1)) {
-    for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-      for(gidIII=get_group_id(2); gidIII<NB_GROUPS_III; gidIII+=get_num_groups(2)) {
-
-	/* Use of diagonal coordinates */
-#if NB_III == NB_I
-	group_id_x = (gidI + gidIII) % NB_GROUPS_I;
-	group_id_z = gidI;
-#else
-	uint bid = gidI + gidIII * NB_GROUPS_I;
-	group_id_z = bid%NB_GROUPS_III;
-	group_id_x = ((bid/NB_GROUPS_III) + group_id_z)%NB_GROUPS_I;
-#endif
-
-	/* Global input index for work-item */
-	xIndex = group_id_x * TILE_DIM_XZ + lid_x*__N__;
-	zIndex = group_id_z * TILE_DIM_XZ + lid_z;
-	index_in = xIndex + yIndex * NB_III + zIndex * NB_III * NB_II;
-
-	/* Global output index */
-	xIndex = group_id_z * TILE_DIM_XZ + lid_x*__N__;
-	zIndex = group_id_x * TILE_DIM_XZ + lid_z;
-	index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  /* Fill the tile */
-	  temp = vload__N__((index_in + j*NB_III*NB_II)/__N__, in);
-	  tile[lid_z + j][lid_x*__N__+__NN__] = temp.s__NN__;
-
-	}
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  /* Write transposed data */
-	  temp = (float__N__)(tile[lid_x*__N__+__NN__][lid_z + j],
-			      );
-	  vstore__N__(temp, (index_out + j*NB_I*NB_II)/__N__, out);
-	}
-      }
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/kernels/transpose_xz_slice_noVec.cl b/hysop/backend/device/opencl/cl_src/kernels/transpose_xz_slice_noVec.cl
deleted file mode 100644
index d97cb925e5d5b0defdb8025e8f11565f8d34048c..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/kernels/transpose_xz_slice_noVec.cl
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * @file transpose_xz.cl
- * Transposition in XZ plane, coalesced, diagonal coordinates, 3D tiles.
- */
-
-/**
- * Perfoms a transposition in XZ plane. As data have to be contiguously read an write in global memory, we use a 3D tile.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoir banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XZ</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- * @see transpose_xy.cl
- */
-__kernel void transpose_xz(__global const float* in,
-			   __global float* out)
-{
-  uint group_id_x;			/* Work-group coordinate in global space index X */
-  uint group_id_z;			/* Work-group coordinate in global space index Y */
-  uint lid_x = get_local_id(0);
-  uint lid_z = get_local_id(2);
-
-  /* Global input index for work-item */
-  uint xIndex, yIndex, zIndex;
-  uint index_in, index_out;
-  uint gidI, gidIII, j;
-
-  __local float tile[TILE_DIM_XZ][TILE_DIM_XZ+PADDING_XZ]; /* Tile with padding */
-
-  for(yIndex=get_global_id(1); yIndex<NB_II; yIndex+=get_global_size(1)) {
-    for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-      for(gidIII=get_group_id(2); gidIII<NB_GROUPS_III; gidIII+=get_num_groups(2)) {
-
-	/* Use of diagonal coordinates */
-#if NB_III == NB_I
-	group_id_x = (gidI + gidIII) % NB_GROUPS_I;
-	group_id_z = gidI;
-#else
-	uint bid = gidI + gidIII * NB_GROUPS_I;
-	group_id_z = bid%NB_GROUPS_III;
-	group_id_x = ((bid/NB_GROUPS_III) + group_id_z)%NB_GROUPS_I;
-#endif
-
-	xIndex = group_id_x * TILE_DIM_XZ + lid_x;
-	zIndex = group_id_z * TILE_DIM_XZ + lid_z;
-	index_in = xIndex + yIndex * NB_III + zIndex * NB_III * NB_II;
-
-	/* Global output index */
-	xIndex = group_id_z * TILE_DIM_XZ + lid_x;
-	zIndex = group_id_x * TILE_DIM_XZ + lid_z;
-	index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  /* Fill the tile */
-	  tile[lid_z + j][lid_x] = in[index_in + j*NB_III*NB_II];
-	}
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  /* Write transposed data */
-	  out[index_out + j*NB_I*NB_II] = tile[lid_x][lid_z + j];
-	  tile[lid_x][lid_z + j] = 0.0;
-	}
-	barrier(CLK_LOCAL_MEM_FENCE);
-      }
-    }
-  }
-}
diff --git a/hysop/backend/device/opencl/cl_src/remeshing/basic.cl b/hysop/backend/device/opencl/cl_src/remeshing/basic.cl
deleted file mode 100644
index e2fd02f4d55a0b76891dc21d5ae4be546bb00cbf..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/remeshing/basic.cl
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * @file remeshing/basic.cl
- * Remeshing function, vectorized version.
- */
-
-void remesh(uint i, __RCOMP_P float__N__ s__ID__, float__N__ p, __RCOMP_P__local float* gscal_loc__ID__, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Remesh particles in local buffer.
- *
- * Remeshing formula is given a compiling time.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index
- * @param dx Space step
- * @param invdx 1/dx
- * @param s Particle scalar
- * @param p Particle position
- * @param gscal_loc Local buffer for result
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
- * @see hysop.gpu.tools.parse_file
- * @see hysop.gpu.cl_src.common
- */
-void remesh(uint i,
-	    __RCOMP_P float__N__ s__ID__,
-	    float__N__ p,
-	    __RCOMP_P__local float* gscal_loc__ID__,
-	    __constant struct AdvectionMeshInfo* mesh){
-  float__N__ y;			/* Normalized distance to nearest left grid point */
-  int__N__ ind;			/* Integer coordinate */
-  uint__N__ index;		/* Remeshing index */
-  float w__NN__;
-
-  p = p - mesh->min_position;
-
-  ind = convert_int__N___rtn(p * mesh->invdx);
-  y = (p - convert_float__N__(ind) * mesh->dx.x) * mesh->invdx;
-
-  index = convert_uint__N__((ind - REMESH_SHIFT + NB_I) % NB_I);
-
-  w__NN__ = REMESH(alpha)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(beta)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(gamma)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(delta)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(eta)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(zeta)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(theta)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(iota)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#endif
-
-#if REMESH_SHIFT > 3
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(kappa)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(mu)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#endif
-}
diff --git a/hysop/backend/device/opencl/cl_src/remeshing/basic_noVec.cl b/hysop/backend/device/opencl/cl_src/remeshing/basic_noVec.cl
deleted file mode 100644
index a2b75e98926a6a246f1e54af288fd4ab48a7143e..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/remeshing/basic_noVec.cl
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * @file remeshing/basic_noVec.cl
- * Remeshing function, vectorized version.
- */
-
-void remesh(uint i, __RCOMP_P float s__ID__, float p, __RCOMP_P__local float* gscal_loc__ID__, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Remesh particles in local buffer.
- *
- * Remeshing formula is given a compiling time.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index
- * @param dx Space step
- * @param invdx 1/dx
- * @param s Particle scalar
- * @param p Particle position
- * @param gscal_loc Local buffer for result
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
- * @see hysop.gpu.tools.parse_file
- * @see hysop.gpu.cl_src.common
- */
-void remesh(uint i,
-	    __RCOMP_P float s__ID__,
-	    float p,
-	    __RCOMP_P__local float* gscal_loc__ID__,
-	    __constant struct AdvectionMeshInfo* mesh){
-  float y;			/* Normalized distance to nearest left grid point */
-  int ind;			/* Integer coordinate */
-  uint index;		/* Remeshing index */
-  float w;
-
-  p = p - mesh->min_position;
-
-  ind = convert_int_rtn(p * mesh->invdx);
-  y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-  index = convert_uint((ind - REMESH_SHIFT + NB_I) % NB_I);
-
-  w = REMESH(alpha)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(beta)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(gamma)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(delta)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-  index = (index + 1) % NB_I;
-  w = REMESH(eta)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(zeta)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-  index = (index + 1) % NB_I;
-  w = REMESH(theta)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(iota)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-  index = (index + 1) % NB_I;
-  w = REMESH(kappa)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(mu)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-}
-
-
-/* Flop number
-   - distance to grid point : 5flop
-   - contributions : 2*Stencil*Nbcomponents
-   - poids (horner) : (d*fma+1)*Stencil (d=degré, +1 for the coefficient)
-
-*/
diff --git a/hysop/backend/device/opencl/cl_src/remeshing/comm_basic_noVec.cl b/hysop/backend/device/opencl/cl_src/remeshing/comm_basic_noVec.cl
deleted file mode 100644
index e1e886cfcb88dbaad5a237ad9c210a7d8f945568..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/remeshing/comm_basic_noVec.cl
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * @file remeshing/comm_basic_noVec.cl
- * Remeshing function, vectorized version.
- */
-
-void remesh(uint i, __RCOMP_P float s__ID__, float p, __RCOMP_P__local float* gscal_loc__ID__, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Remesh particles in local buffer.
- *
- * Remeshing formula is given a compiling time.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index
- * @param dx Space step
- * @param invdx 1/dx
- * @param s Particle scalar
- * @param p Particle position
- * @param gscal_loc Local buffer for result
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark T_NB_I: global points number in the 1st direction (mpi cutted direction)
- * @remark START_INDEX Global staring index for computational points
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
- * @see hysop.gpu.tools.parse_file
- * @see hysop.gpu.cl_src.common
- */
-void remesh(uint i,
-	    __RCOMP_P float s__ID__,
-	    float p,
-	    __RCOMP_P__local float* gscal_loc__ID__,
-	    __constant struct AdvectionMeshInfo* mesh){
-  float y;			/* Normalized distance to nearest left grid point */
-  int ind;			/* Integer coordinate */
-  int index;		/* Remeshing index */
-  float w;
-
-  ind = convert_int_rtn(p * mesh->invdx);
-  y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-  index = ((ind - REMESH_SHIFT + T_NB_I) % T_NB_I) - START_INDEX;
-
-  if (index>=0 && index < NB_I){
-    w = REMESH(alpha)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(beta)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(gamma)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(delta)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(eta)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(zeta)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(theta)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(iota)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(kappa)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(mu)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-}
diff --git a/hysop/backend/device/opencl/cl_src/remeshing/private.cl b/hysop/backend/device/opencl/cl_src/remeshing/private.cl
deleted file mode 100644
index 18943652bf3885a13dc77e77babc7280c28c319c..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/remeshing/private.cl
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * @file private.cl
- * Remeshing function, vectorized, private variable.
- */
-
-void remesh(uint i, __RCOMP_P float__N__ s__ID__, float__N__ p, __RCOMP_P__local float* gscal_loc__ID__, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Remesh particles in local buffer.
- *
- * Remeshing formula is given a compiling time.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- * Use of a private temporary variable for remeshing weights.
- *
- * @param i Particle index
- * @param dx Space step
- * @param invdx 1/dx
- * @param s Particle scalar
- * @param p Particle position
- * @param gscal_loc Local buffer for result
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
- * @see hysop.gpu.tools.parse_file
- * @see hysop.gpu.cl_src.common
- */
-void remesh(uint i,
-	    __RCOMP_P float__N__ s__ID__,
-	    float__N__ p,
-	    __RCOMP_P__local float* gscal_loc__ID__,
-	    __constant struct AdvectionMeshInfo* mesh){
-  float__N__ y,			   /* Normalized distance to nearest left grid point */
-     w;
-  __RCOMP_I float__N__ temp__ID__; /* Temporary remeshing weights */
-  int__N__ ind;		   	   /* Integer coordinate */
-  uint__N__ index;		   /* Remeshing index */
-
-  p = p - mesh->min_position;
-
-  ind = convert_int__N___rtn(p * mesh->invdx);
-  y = (p - convert_float__N__(ind) * mesh->dx.x) * mesh->invdx;
-
-  index = convert_uint__N__((ind - REMESH_SHIFT + NB_I) % NB_I);
-
-  w = REMESH(alpha)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(beta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(gamma)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(delta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-  index = (index + 1) % NB_I;
-  w = REMESH(eta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(zeta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-  index = (index + 1) % NB_I;
-  w = REMESH(theta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(iota)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-  index = (index + 1) % NB_I;
-  w = REMESH(kappa)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(mu)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-}
diff --git a/hysop/backend/device/opencl/cl_src/remeshing/private_noVec.cl b/hysop/backend/device/opencl/cl_src/remeshing/private_noVec.cl
deleted file mode 100644
index 7bafbe37de8e4a92fa839336045d4566374f167c..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/remeshing/private_noVec.cl
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * @file private.cl
- * Remeshing function, vectorized, private variable.
- */
-
-void remesh(uint i, __RCOMP_P float s__ID__, float p, __RCOMP_P__local float* gscal_loc__ID__, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Remesh particles in local buffer.
- *
- * Remeshing formula is given a compiling time.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- * Use of a private temporary variable for remeshing weights.
- *
- * @param i Particle index
- * @param dx Space step
- * @param invdx 1/dx
- * @param s Particle scalar
- * @param p Particle position
- * @param gscal_loc Local buffer for result
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
- * @see hysop.gpu.tools.parse_file
- * @see hysop.gpu.cl_src.common
- */
-void remesh(uint i,
-	    __RCOMP_P float s__ID__,
-	    float p,
-	    __RCOMP_P__local float* gscal_loc__ID__,
-	    __constant struct AdvectionMeshInfo* mesh){
-  float y,			/* Normalized distance to nearest left grid point */
-    w;			/* Temporary remeshing weights */
-  __RCOMP_I float temp__ID__;
-  int ind;			/* Integer coordinate */
-  uint index;		/* Remeshing index */
-
-  p = p - mesh->min_position;
-
-  ind = convert_int_rtn(p * mesh->invdx);
-  y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-  index = convert_uint((ind - REMESH_SHIFT + NB_I) % NB_I);
-
-  w = REMESH(alpha)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(beta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(gamma)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(delta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-  index = (index + 1) % NB_I;
-  w = REMESH(eta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(zeta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-  index = (index + 1) % NB_I;
-  w = REMESH(theta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(iota)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-  index = (index + 1) % NB_I;
-  w = REMESH(kappa)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(mu)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-  #endif
-}
diff --git a/hysop/backend/device/opencl/cl_src/remeshing/weights.cl b/hysop/backend/device/opencl/cl_src/remeshing/weights.cl
deleted file mode 100644
index d101fffed4fac3f0a97327d1c1186f3d0682d486..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/remeshing/weights.cl
+++ /dev/null
@@ -1,202 +0,0 @@
-/**
- * @file weights.cl
- * Remeshing formulas, vectorized version.
- * Polynomials under Horner form.
- */
-
-inline float__N__ alpha_l2_1(float__N__ y){
-  return ((y * (y * (-y + 2.0) - 1.0)) * 0.5);}
-inline float__N__ beta_l2_1(float__N__ y){
-  return ((y * y * (3.0 * y - 5.0) + 2.0) * 0.5);}
-inline float__N__ gamma_l2_1(float__N__ y){
-  return ((y * (y * (-3.0 * y + 4.0) + 1.0)) * 0.5);}
-inline float__N__ delta_l2_1(float__N__ y){
-  return ((y * y * (y - 1.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_2(float__N__ y){
-  return ((y * (y * (y * (y * (2.0 * y - 5.0) + 3.0) + 1.0) - 1.0)) * 0.5);}
-inline float__N__ beta_l2_2(float__N__ y){
-  return ((y * y * (y * (y * (-6.0 * y + 15.0) - 9.0) - 2.0) + 2.0) * 0.5);}
-inline float__N__ gamma_l2_2(float__N__ y){
-  return ((y * (y * (y * (y * (6.0 * y - 15.0) + 9.0) + 1.0) + 1.0)) * 0.5);}
-inline float__N__ delta_l2_2(float__N__ y){
-  return ((y * y * y * (y * (-2.0 * y + 5.0) - 3.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_3(float__N__ y){
-  return ((y * (y * (y * y * (y * (y * (-6.0 * y + 21.0) - 25.0) + 10.0) + 1.0) - 1.0)) * 0.5);}
-inline float__N__ beta_l2_3(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (18.0 * y - 63.0) + 75.0) - 30.0) - 2.0) + 2.0) * 0.5);}
-inline float__N__ gamma_l2_3(float__N__ y){
-  return ((y * (y * (y * y * (y * (y * (-18.0 * y + 63.0) - 75.0) + 30.0) + 1.0) + 1.0)) * 0.5);}
-inline float__N__ delta_l2_3(float__N__ y){
-  return ((y * y * y * y * (y * (y * (6.0 * y - 21.0) + 25.0) - 10.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_4(float__N__ y){
-  return ((y * (y * (y * y * y * (y * (y * (y * (20.0 * y - 90.0) + 154.0) - 119.0) + 35.0) + 1.0) - 1.0)) * 0.5);}
-inline float__N__ beta_l2_4(float__N__ y){
-  return ((y * y * (y * y * y * (y * (y * (y * (-60.0 * y + 270.0) - 462.0) + 357.0) - 105.0) - 2.0) + 2.0) * 0.5);}
-inline float__N__ gamma_l2_4(float__N__ y){
-  return ((y * (y * (y * y * y * (y * (y * (y * (60.0 * y - 270.0) + 462.0) - 357.0) + 105.0) + 1.0) + 1.0)) * 0.5);}
-inline float__N__ delta_l2_4(float__N__ y){
-  return ((y * y * y * y * y * (y * (y * (y * (-20.0 * y + 90.0) - 154.0) + 119.0) - 35.0)) * 0.5);}
-
-
-inline float__N__ alpha_l4_2(float__N__ y){
-  return ((y * (y * (y * (y * (-5.0 * y + 13.0) - 9.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_2(float__N__ y){
-  return ((y * (y * (y * (y * (25.0 * y - 64.0) + 39.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_2(float__N__ y){
-  return ((y * y * (y * (y * (-50.0 * y + 126.0) - 70.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_2(float__N__ y){
-  return ((y * (y * (y * (y * (50.0 * y - 124.0) + 66.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_2(float__N__ y){
-  return ((y * (y * (y * (y * (-25.0 * y + 61.0) - 33.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_2(float__N__ y){
-  return ((y * y * y * (y * (5.0 * y - 12.0) + 7.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_l4_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (14.0 * y - 49.0) + 58.0) - 22.0) - 2.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-70.0 * y + 245.0) - 290.0) + 111.0) + 4.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_3(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (140.0 * y - 490.0) + 580.0) - 224.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-140.0 * y + 490.0) - 580.0) + 226.0) - 4.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (70.0 * y - 245.0) + 290.0) - 114.0) + 2.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_3(float__N__ y){
-  return ((y * y * y * y * (y * (y * (-14.0 * y + 49.0) - 58.0) + 23.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_l4_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-46.0 * y + 207.0) - 354.0) + 273.0) - 80.0) + 1.0) - 2.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (230.0 * y - 1035.0) + 1770.0) - 1365.0) + 400.0) - 4.0) + 4.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_4(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-460.0 * y + 2070.0) - 3540.0) + 2730.0) - 800.0) + 6.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (460.0 * y - 2070.0) + 3540.0) - 2730.0) + 800.0) - 4.0) - 4.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-230.0 * y + 1035.0) - 1770.0) + 1365.0) - 400.0) + 1.0) + 2.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_4(float__N__ y){
-  return ((y * y * y * y * y * (y * (y * (y * (46.0 * y - 207.0) + 354.0) - 273.0) + 80.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_M8p(float__N__ y){
-  return ((y*(y*(y*(y*(y*(y*(-10.0*y + 21.0) + 28.0) - 105.0) + 70.0) + 35.0) - 56.0) + 17.0) * 0.00029761904761904765);}
-inline float__N__ beta_M8p(float__N__ y){
-  return ((y*(y*(y*(y*(y*(y*(70.0*y - 175.0) - 140.0) + 770.0) - 560.0) - 350.0) + 504.0) - 102.0) * 0.00029761904761904765);}
-inline float__N__ gamma_M8p(float__N__ y){
-  return ((y*(y*(y*(y*(y*(y*(-210.0*y + 609.0) + 224.0) - 2135.0) + 910.0) + 2765.0) - 2520.0) + 255.0) * 0.00029761904761904765);}
-inline float__N__ delta_M8p(float__N__ y){
-  return ((y*y* (y*y* (y*y* (70.0*y - 231.0) + 588.0) - 980.0) + 604.0) * 0.001488095238095238);}
-inline float__N__ eta_M8p(float__N__ y){
-  return ((y*(y*(y*(y*(y*(y*(-70.0*y+ 259.0) - 84.0) - 427.0) - 182.0)+ 553.0) + 504.0)+ 51.0) * 0.001488095238095238);}
-inline float__N__ zeta_M8p(float__N__ y){
-  return ((y*(y*(y*(y*(y*(y*(210.0*y- 861.0) + 532.0) + 770.0) + 560.0) - 350.0) - 504.0) - 102.0) * 0.00029761904761904765);}
-inline float__N__ theta_M8p(float__N__ y){
-  return ((y* (y* (y* (y* (y* (y* (-70.0* y+ 315.0) -280.0) -105.0) -70.0) +35.0)+ 56.0) +17.0) * 0.00029761904761904765);}
-inline float__N__ iota_M8p(float__N__ y){
-  return ((y * y * y * y * y * (y * (10.0 * y - 49.0) + 56.0)) * 0.00029761904761904765);}
-
-
-inline float__N__ alpha_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-89.0 * y + 312.0) - 370.0) + 140.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (623.0 * y - 2183.0) + 2581.0) - 955.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-1869.0 * y + 6546.0) - 7722.0) + 2850.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_3(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (3115.0 * y - 10905.0) + 12845.0) - 4795.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-3115.0 * y + 10900.0) - 12830.0) + 4880.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (1869.0 * y - 6537.0) + 7695.0) - 2985.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-623.0 * y + 2178.0) - 2566.0) + 1010.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_3(float__N__ y){
-  return ((y * y * y * y * (y * (y * (89.0 * y - 311.0) + 367.0) - 145.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (290.0 * y - 1305.0) + 2231.0) - 1718.0) + 500.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-2030.0 * y + 9135.0) - 15617.0) + 12027.0) - 3509.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (6090.0 * y - 27405.0) + 46851.0) - 36084.0) + 10548.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_4(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-10150.0 * y + 45675.0) - 78085.0) + 60145.0) - 17605.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (10150.0 * y - 45675.0) + 78085.0) - 60150.0) + 17620.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-6090.0 * y + 27405.0) - 46851.0) + 36093.0) - 10575.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (2030.0 * y - 9135.0) + 15617.0) - 12032.0) + 3524.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_4(float__N__ y){
-  return ((y * y * y * y * y * (y * (y * (y * (-290.0 * y + 1305.0) - 2231.0) + 1719.0) - 503.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-1006.0 * y + 5533.0) - 12285.0) + 13785.0) - 7829.0) + 1803.0) - 3.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (7042.0 * y - 38731.0) + 85995.0) - 96495.0) + 54803.0) - 12620.0) + 12.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-21126.0 * y + 116193.0) - 257985.0) + 289485.0) - 164409.0) + 37857.0) - 15.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_5(float__N__ y){
-  return ((y * y * (y * y * (y * y * (y * (y * (y * (y * (35210.0 * y - 193655.0) + 429975.0) - 482475.0) + 274015.0) - 63090.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-35210.0 * y + 193655.0) - 429975.0) + 482475.0) - 274015.0) + 63085.0) + 15.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (21126.0 * y - 116193.0) + 257985.0) - 289485.0) + 164409.0) - 37848.0) - 12.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-7042.0 * y + 38731.0) - 85995.0) + 96495.0) - 54803.0) + 12615.0) + 3.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_5(float__N__ y){
-  return ((y * y * y * y * y * y * (y * (y * (y * (y * (1006.0 * y - 5533.0) + 12285.0) - 13785.0) + 7829.0) - 1802.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (3604.0 * y - 23426.0) + 63866.0) - 93577.0) + 77815.0) - 34869.0) + 6587.0) + 1.0) - 3.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-25228.0 * y + 163982.0) - 447062.0) + 655039.0) - 544705.0) + 244083.0) - 46109.0) - 6.0) + 12.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (75684.0 * y - 491946.0) + 1341186.0) - 1965117.0) + 1634115.0) - 732249.0) + 138327.0) + 15.0) - 15.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_6(float__N__ y){
-  return ((y * y * (y * y * (y * y * (y * (y * (y * (y * (y * (y * (-126140.0 * y + 819910.0) - 2235310.0) + 3275195.0) - 2723525.0) + 1220415.0) - 230545.0) - 20.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (126140.0 * y - 819910.0) + 2235310.0) - 3275195.0) + 2723525.0) - 1220415.0) + 230545.0) + 15.0) + 15.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-75684.0 * y + 491946.0) - 1341186.0) + 1965117.0) - 1634115.0) + 732249.0) - 138327.0) - 6.0) - 12.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (25228.0 * y - 163982.0) + 447062.0) - 655039.0) + 544705.0) - 244083.0) + 46109.0) + 1.0) + 3.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_6(float__N__ y){
-  return ((y * y * y * y * y * y * y * (y * (y * (y * (y * (y * (-3604.0 * y + 23426.0) - 63866.0) + 93577.0) - 77815.0) + 34869.0) - 6587.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-3569.0 * y + 16061.0) - 27454.0) + 21126.0) - 6125.0) + 49.0) - 196.0) - 36.0) + 144.0)) * 2.48015873015873e-05);}
-inline float__N__ beta_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (32121.0 * y - 144548.0) + 247074.0) - 190092.0) + 55125.0) - 672.0) + 2016.0) + 512.0) - 1536.0)) * 2.48015873015873e-05);}
-inline float__N__ gamma_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-128484.0 * y + 578188.0) - 988256.0) + 760312.0) - 221060.0) + 4732.0) - 9464.0) - 4032.0) + 8064.0)) * 2.48015873015873e-05);}
-inline float__N__ delta_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (299796.0 * y - 1349096.0) + 2305856.0) - 1774136.0) + 517580.0) - 13664.0) + 13664.0) + 32256.0) - 32256.0)) * 2.48015873015873e-05);}
-inline float__N__ eta_l8_4(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-449694.0 * y + 2023630.0) - 3458700.0) + 2661540.0) - 778806.0) + 19110.0) - 57400.0) + 40320.0) * 2.48015873015873e-05);}
-inline float__N__ zeta_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (449694.0 * y - 2023616.0) + 3458644.0) - 2662016.0) + 780430.0) - 13664.0) - 13664.0) + 32256.0) + 32256.0)) * 2.48015873015873e-05);}
-inline float__N__ theta_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-299796.0 * y + 1349068.0) - 2305744.0) + 1775032.0) - 520660.0) + 4732.0) + 9464.0) - 4032.0) - 8064.0)) * 2.48015873015873e-05);}
-inline float__N__ iota_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (128484.0 * y - 578168.0) + 988176.0) - 760872.0) + 223020.0) - 672.0) - 2016.0) + 512.0) + 1536.0)) * 2.48015873015873e-05);}
-inline float__N__ kappa_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-32121.0 * y + 144541.0) - 247046.0) + 190246.0) - 55685.0) + 49.0) + 196.0) - 36.0) - 144.0)) * 2.48015873015873e-05);}
-inline float__N__ mu_l8_4(float__N__ y){
-  return ((y * y * y * y * y * (y * (y * (y * (3569.0 * y - 16060.0) + 27450.0) - 21140.0) + 6181.0)) * 2.48015873015873e-05);}
-
-
-
-#endif
diff --git a/hysop/backend/device/opencl/cl_src/remeshing/weights_builtin.cl b/hysop/backend/device/opencl/cl_src/remeshing/weights_builtin.cl
deleted file mode 100644
index cd1827937456989bf6eb51b36bf5025c5d4bbd55..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/remeshing/weights_builtin.cl
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * @file weights_builtin.cl
- * Remeshing formulas, vectorized version, use of builtin OpenCL fma.
- * Polynomials under Horner form.
- */
-
-inline float__N__ alpha_l2_1(float__N__ y){
-  return (y*fma(y,fma(y,-1.0, 2.0), - 1.0) * 0.5);}
-inline float__N__ beta_l2_1(float__N__ y){
-  return (fma(y*y, fma(y, 3.0, -5.0), 2.0) * 0.5);}
-inline float__N__ gamma_l2_1(float__N__   y){
-  return ((y * fma(y , fma(-3.0, y, 4.0), 1.0)) * 0.5);}
-inline float__N__ delta_l2_1(float__N__ y){
-  return ((y * y * fma(1.0, y, - 1.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 2.0, -5.0), 3.0), 1.0), -1.0)) * 0.5);}
-inline float__N__ beta_l2_2(float__N__ y){
-  return (fma(y * y, fma(y, fma(y, fma(y, -6.0, 15.0), -9.0), -2.0), 2.0) * 0.5);}
-inline float__N__ gamma_l2_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 6.0, -15.0), 9.0), 1.0), 1.0)) * 0.5);}
-inline float__N__ delta_l2_2(float__N__ y){
-  return ((y * y * y * fma(y, fma(y, -2.0, 5.0), -3.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_3(float__N__ y){
-  return ((y * fma(y, fma(y * y, fma(y, fma(y, fma(y, -6.0, 21.0), -25.0), 10.0), 1.0), -1.0)) * 0.5);}
-inline float__N__ beta_l2_3(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 18.0, -63.0), 75.0), -30.0), -2.0), 2.0) * 0.5);}
-inline float__N__ gamma_l2_3(float__N__ y){
-  return ((y * fma(y, fma(y * y, fma(y, fma(y, fma(y, -18.0, 63.0), -75.0), 30.0), 1.0), 1.0)) * 0.5);}
-inline float__N__ delta_l2_3(float__N__ y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, 6.0, -21.0), 25.0), -10.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_4(float__N__ y){
-  return ((y * fma(y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, 20.0, -90.0), 154.0), -119.0), 35.0), 1.0), -1.0)) * 0.5);}
-inline float__N__ beta_l2_4(float__N__ y){
-  return (fma(y * y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, -60.0, 270.0), -462.0), 357.0), -105.0), -2.0), 2.0) * 0.5);}
-inline float__N__ gamma_l2_4(float__N__ y){
-  return ((y * fma(y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, 60.0, -270.0), 462.0), -357.0), 105.0), 1.0), 1.0)) * 0.5);}
-inline float__N__ delta_l2_4(float__N__ y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, -20.0, 90.0), -154.0), 119.0), -35.0)) * 0.5);}
-
-
-inline float__N__ alpha_l4_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, -5.0, 13.0), -9.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 25.0, -64.0), 39.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_2(float__N__ y){
-  return (fma(y * y, fma(y, fma(y, fma(y, -50.0, 126.0), -70.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 50.0, -124.0), 66.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, -25.0, 61.0), -33.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_2(float__N__ y){
-  return ((y * y * y * fma(y, fma(y, 5.0, -12.0), 7.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_l4_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 14.0, -49.0), 58.0), -22.0), -2.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -70.0, 245.0), -290.0), 111.0), 4.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_3(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 140.0, -490.0), 580.0), -224.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -140.0, 490.0), -580.0), 226.0), -4.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 70.0, -245.0), 290.0), -114.0), 2.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_3(float__N__ y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, -14.0, 49.0), -58.0), 23.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_l4_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -46.0, 207.0), -354.0), 273.0), -80.0), 1.0), -2.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 230.0, -1035.0), 1770.0), -1365.0), 400.0), -4.0), 4.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_4(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -460.0, 2070.0), -3540.0), 2730.0), -800.0), 6.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 460.0, -2070.0), 3540.0), -2730.0), 800.0), -4.0), -4.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -230.0, 1035.0), -1770.0), 1365.0), -400.0), 1.0), 2.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_4(float__N__ y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, 46.0, -207.0), 354.0), -273.0), 80.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_M8p(float__N__ y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-10.0,y, + 21.0), + 28.0), - 105.0), + 70.0), + 35.0), - 56.0), + 17.0) * 0.00029761904761904765);}
-inline float__N__ beta_M8p(float__N__ y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(70.0,y, - 175.0), - 140.0), + 770.0), - 560.0), - 350.0), + 504.0), - 102.0) * 0.00029761904761904765);}
-inline float__N__ gamma_M8p(float__N__ y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-210.0,y, + 609.0), + 224.0), - 2135.0), + 910.0), + 2765.0), - 2520.0), + 255.0) * 0.00029761904761904765);}
-inline float__N__ delta_M8p(float__N__ y){
-  return (fma(y*y, fma(y*y, fma(y*y, fma(70.0,y, - 231.0), + 588.0), - 980.0), + 604.0) * 0.001488095238095238);}
-inline float__N__ eta_M8p(float__N__ y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-70.0,y, 259.0), - 84.0), - 427.0), - 182.0), + 553.0), + 504.0), + 51.0) * 0.001488095238095238);}
-inline float__N__ zeta_M8p(float__N__ y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(210.0,y,- 861.0), + 532.0), + 770.0), + 560.0), - 350.0), - 504.0), - 102.0) * 0.00029761904761904765);}
-inline float__N__ theta_M8p(float__N__ y){
-  return (fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(-70.0, y, 315.0), -280.0), -105.0), -70.0), 35.0), 56.0), 17.0) * 0.00029761904761904765);}
-inline float__N__ iota_M8p(float__N__ y){
-  return ((y * y * y * y * y * fma(y , fma(10.0 , y ,- 49.0) , 56.0)) * 0.00029761904761904765);}
-
-
-inline float__N__ alpha_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -89.0, 312.0), -370.0), 140.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 623.0, -2183.0), 2581.0), -955.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -1869.0, 6546.0), -7722.0), 2850.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_3(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 3115.0, -10905.0), 12845.0), -4795.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3115.0, 10900.0), -12830.0), 4880.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 1869.0, -6537.0), 7695.0), -2985.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -623.0, 2178.0), -2566.0), 1010.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_3(float__N__ y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, 89.0, -311.0), 367.0), -145.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 290.0, -1305.0), 2231.0), -1718.0), 500.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -2030.0, 9135.0), -15617.0), 12027.0), -3509.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 6090.0, -27405.0), 46851.0), -36084.0), 10548.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_4(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -10150.0, 45675.0), -78085.0), 60145.0), -17605.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 10150.0, -45675.0), 78085.0), -60150.0), 17620.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -6090.0, 27405.0), -46851.0), 36093.0), -10575.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 2030.0, -9135.0), 15617.0), -12032.0), 3524.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_4(float__N__ y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, -290.0, 1305.0), -2231.0), 1719.0), -503.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -1006.0, 5533.0), -12285.0), 13785.0), -7829.0), 1803.0), -3.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 7042.0, -38731.0), 85995.0), -96495.0), 54803.0), -12620.0), 12.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -21126.0, 116193.0), -257985.0), 289485.0), -164409.0), 37857.0), -15.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_5(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, 35210.0, -193655.0), 429975.0), -482475.0), 274015.0), -63090.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -35210.0, 193655.0), -429975.0), 482475.0), -274015.0), 63085.0), 15.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 21126.0, -116193.0), 257985.0), -289485.0), 164409.0), -37848.0), -12.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -7042.0, 38731.0), -85995.0), 96495.0), -54803.0), 12615.0), 3.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_5(float__N__ y){
-  return ((y * y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, fma(y, 1006.0, -5533.0), 12285.0), -13785.0), 7829.0), -1802.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 3604.0, -23426.0), 63866.0), -93577.0), 77815.0), -34869.0), 6587.0), 1.0), -3.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -25228.0, 163982.0), -447062.0), 655039.0), -544705.0), 244083.0), -46109.0), -6.0), 12.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 75684.0, -491946.0), 1341186.0), -1965117.0), 1634115.0), -732249.0), 138327.0), 15.0), -15.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_6(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -126140.0, 819910.0), -2235310.0), 3275195.0), -2723525.0), 1220415.0), -230545.0), -20.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 126140.0, -819910.0), 2235310.0), -3275195.0), 2723525.0), -1220415.0), 230545.0), 15.0), 15.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -75684.0, 491946.0), -1341186.0), 1965117.0), -1634115.0), 732249.0), -138327.0), -6.0), -12.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 25228.0, -163982.0), 447062.0), -655039.0), 544705.0), -244083.0), 46109.0), 1.0), 3.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_6(float__N__ y){
-  return ((y * y * y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3604.0, 23426.0), -63866.0), 93577.0), -77815.0), 34869.0), -6587.0)) * 0.001388888888888889);}
-
-
-
-inline float__N__ alpha_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3569.0, 16061.0), -27454.0), 21126.0), -6125.0), 49.0), -196.0), -36.0), 144.0)) * 2.48015873015873e-05);}
-inline float__N__ beta_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 32121.0, -144548.0), 247074.0), -190092.0), 55125.0), -672.0), 2016.0), 512.0), -1536.0)) * 2.48015873015873e-05);}
-inline float__N__ gamma_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -128484.0, 578188.0), -988256.0), 760312.0), -221060.0), 4732.0), -9464.0), -4032.0), 8064.0)) * 2.48015873015873e-05);}
-inline float__N__ delta_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 299796.0, -1349096.0), 2305856.0), -1774136.0), 517580.0), -13664.0), 13664.0), 32256.0), -32256.0)) * 2.48015873015873e-05);}
-inline float__N__ eta_l8_4(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -449694.0, 2023630.0), -3458700.0), 2661540.0), -778806.0), 19110.0), -57400.0), 40320.0) * 2.48015873015873e-05);}
-inline float__N__ zeta_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 449694.0, -2023616.0), 3458644.0), -2662016.0), 780430.0), -13664.0), -13664.0), 32256.0), 32256.0)) * 2.48015873015873e-05);}
-inline float__N__ theta_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -299796.0, 1349068.0), -2305744.0), 1775032.0), -520660.0), 4732.0), 9464.0), -4032.0), -8064.0)) * 2.48015873015873e-05);}
-inline float__N__ iota_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 128484.0, -578168.0), 988176.0), -760872.0), 223020.0), -672.0), -2016.0), 512.0), 1536.0)) * 2.48015873015873e-05);}
-inline float__N__ kappa_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -32121.0, 144541.0), -247046.0), 190246.0), -55685.0), 49.0), 196.0), -36.0), -144.0)) * 2.48015873015873e-05);}
-inline float__N__ mu_l8_4(float__N__ y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, 3569.0, -16060.0), 27450.0), -21140.0), 6181.0)) * 2.48015873015873e-05);}
diff --git a/hysop/backend/device/opencl/cl_src/remeshing/weights_noVec.cl b/hysop/backend/device/opencl/cl_src/remeshing/weights_noVec.cl
deleted file mode 100644
index a46f89e31324df10b568c2ca10d0923bc428ecbc..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/remeshing/weights_noVec.cl
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * @file weights_noVec.cl
- * Remeshing formulas, basic version.
- * Polynomials under Horner form.
- */
-
-inline float alpha_l2_1(float y){
-  return ((y * (y * (-y + 2.0) - 1.0)) * 0.5);}
-inline float beta_l2_1(float y){
-  return ((y * y * (3.0 * y - 5.0) + 2.0) * 0.5);}
-inline float gamma_l2_1(float y){
-  return ((y * (y * (-3.0 * y + 4.0) + 1.0)) * 0.5);}
-inline float delta_l2_1(float y){
-  return ((y * y * (y - 1.0)) * 0.5);}
-
-
-inline float alpha_l2_2(float y){
-  return ((y * (y * (y * (y * (2.0 * y - 5.0) + 3.0) + 1.0) - 1.0)) * 0.5);}
-inline float beta_l2_2(float y){
-  return ((y * y * (y * (y * (-6.0 * y + 15.0) - 9.0) - 2.0) + 2.0) * 0.5);}
-inline float gamma_l2_2(float y){
-  return ((y * (y * (y * (y * (6.0 * y - 15.0) + 9.0) + 1.0) + 1.0)) * 0.5);}
-inline float delta_l2_2(float y){
-  return ((y * y * y * (y * (-2.0 * y + 5.0) - 3.0)) * 0.5);}
-
-
-inline float alpha_l2_3(float y){
-  return ((y * (y * (y * y * (y * (y * (-6.0 * y + 21.0) - 25.0) + 10.0) + 1.0) - 1.0)) * 0.5);}
-inline float beta_l2_3(float y){
-  return ((y * y * (y * y * (y * (y * (18.0 * y - 63.0) + 75.0) - 30.0) - 2.0) + 2.0) * 0.5);}
-inline float gamma_l2_3(float y){
-  return ((y * (y * (y * y * (y * (y * (-18.0 * y + 63.0) - 75.0) + 30.0) + 1.0) + 1.0)) * 0.5);}
-inline float delta_l2_3(float y){
-  return ((y * y * y * y * (y * (y * (6.0 * y - 21.0) + 25.0) - 10.0)) * 0.5);}
-
-
-inline float alpha_l2_4(float y){
-  return ((y * (y * (y * y * y * (y * (y * (y * (20.0 * y - 90.0) + 154.0) - 119.0) + 35.0) + 1.0) - 1.0)) * 0.5);}
-inline float beta_l2_4(float y){
-  return ((y * y * (y * y * y * (y * (y * (y * (-60.0 * y + 270.0) - 462.0) + 357.0) - 105.0) - 2.0) + 2.0) * 0.5);}
-inline float gamma_l2_4(float y){
-  return ((y * (y * (y * y * y * (y * (y * (y * (60.0 * y - 270.0) + 462.0) - 357.0) + 105.0) + 1.0) + 1.0)) * 0.5);}
-inline float delta_l2_4(float y){
-  return ((y * y * y * y * y * (y * (y * (y * (-20.0 * y + 90.0) - 154.0) + 119.0) - 35.0)) * 0.5);}
-
-
-inline float alpha_l4_2(float y){
-  return ((y * (y * (y * (y * (-5.0 * y + 13.0) - 9.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float beta_l4_2(float y){
-  return ((y * (y * (y * (y * (25.0 * y - 64.0) + 39.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float gamma_l4_2(float y){
-  return ((y * y * (y * (y * (-50.0 * y + 126.0) - 70.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float delta_l4_2(float y){
-  return ((y * (y * (y * (y * (50.0 * y - 124.0) + 66.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float eta_l4_2(float y){
-  return ((y * (y * (y * (y * (-25.0 * y + 61.0) - 33.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float zeta_l4_2(float y){
-  return ((y * y * y * (y * (5.0 * y - 12.0) + 7.0)) * 0.041666666666666664);}
-
-
-inline float alpha_l4_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (14.0 * y - 49.0) + 58.0) - 22.0) - 2.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float beta_l4_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-70.0 * y + 245.0) - 290.0) + 111.0) + 4.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float gamma_l4_3(float y){
-  return ((y * y * (y * y * (y * (y * (140.0 * y - 490.0) + 580.0) - 224.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float delta_l4_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-140.0 * y + 490.0) - 580.0) + 226.0) - 4.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float eta_l4_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (70.0 * y - 245.0) + 290.0) - 114.0) + 2.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float zeta_l4_3(float y){
-  return ((y * y * y * y * (y * (y * (-14.0 * y + 49.0) - 58.0) + 23.0)) * 0.041666666666666664);}
-
-
-inline float alpha_l4_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-46.0 * y + 207.0) - 354.0) + 273.0) - 80.0) + 1.0) - 2.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float beta_l4_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (230.0 * y - 1035.0) + 1770.0) - 1365.0) + 400.0) - 4.0) + 4.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float gamma_l4_4(float y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-460.0 * y + 2070.0) - 3540.0) + 2730.0) - 800.0) + 6.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float delta_l4_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (460.0 * y - 2070.0) + 3540.0) - 2730.0) + 800.0) - 4.0) - 4.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float eta_l4_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-230.0 * y + 1035.0) - 1770.0) + 1365.0) - 400.0) + 1.0) + 2.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float zeta_l4_4(float y){
-  return ((y * y * y * y * y * (y * (y * (y * (46.0 * y - 207.0) + 354.0) - 273.0) + 80.0)) * 0.041666666666666664);}
-
-
-inline float alpha_M8p(float y){
-  return ((y*(y*(y*(y*(y*(y*(-10.0*y + 21.0) + 28.0) - 105.0) + 70.0) + 35.0) - 56.0) + 17.0) * 0.00029761904761904765);}
-inline float beta_M8p(float y){
-  return ((y*(y*(y*(y*(y*(y*(70.0*y - 175.0) - 140.0) + 770.0) - 560.0) - 350.0) + 504.0) - 102.0) * 0.00029761904761904765);}
-inline float gamma_M8p(float y){
-  return ((y*(y*(y*(y*(y*(y*(-210.0*y + 609.0) + 224.0) - 2135.0) + 910.0) + 2765.0) - 2520.0) + 255.0) * 0.00029761904761904765);}
-inline float delta_M8p(float y){
-  return ((y*y* (y*y* (y*y* (70.0*y - 231.0) + 588.0) - 980.0) + 604.0) * 0.001488095238095238);}
-inline float eta_M8p(float y){
-  return ((y*(y*(y*(y*(y*(y*(-70.0*y+ 259.0) - 84.0) - 427.0) - 182.0)+ 553.0) + 504.0)+ 51.0) * 0.001488095238095238);}
-inline float zeta_M8p(float y){
-  return ((y*(y*(y*(y*(y*(y*(210.0*y- 861.0) + 532.0) + 770.0) + 560.0) - 350.0) - 504.0) - 102.0) * 0.00029761904761904765);}
-inline float theta_M8p(float y){
-  return ((y* (y* (y* (y* (y* (y* (-70.0* y+ 315.0) -280.0) -105.0) -70.0) +35.0)+ 56.0) +17.0) * 0.00029761904761904765);}
-inline float iota_M8p(float y){
-  return ((y * y * y * y * y * (y * (10.0 * y - 49.0) + 56.0)) * 0.00029761904761904765);}
-
-
-inline float alpha_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-89.0 * y + 312.0) - 370.0) + 140.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float beta_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (623.0 * y - 2183.0) + 2581.0) - 955.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-1869.0 * y + 6546.0) - 7722.0) + 2850.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float delta_l6_3(float y){
-  return ((y * y * (y * y * (y * (y * (3115.0 * y - 10905.0) + 12845.0) - 4795.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float eta_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-3115.0 * y + 10900.0) - 12830.0) + 4880.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (1869.0 * y - 6537.0) + 7695.0) - 2985.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float theta_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-623.0 * y + 2178.0) - 2566.0) + 1010.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float iota_l6_3(float y){
-  return ((y * y * y * y * (y * (y * (89.0 * y - 311.0) + 367.0) - 145.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (290.0 * y - 1305.0) + 2231.0) - 1718.0) + 500.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float beta_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-2030.0 * y + 9135.0) - 15617.0) + 12027.0) - 3509.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (6090.0 * y - 27405.0) + 46851.0) - 36084.0) + 10548.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float delta_l6_4(float y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-10150.0 * y + 45675.0) - 78085.0) + 60145.0) - 17605.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float eta_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (10150.0 * y - 45675.0) + 78085.0) - 60150.0) + 17620.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-6090.0 * y + 27405.0) - 46851.0) + 36093.0) - 10575.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float theta_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (2030.0 * y - 9135.0) + 15617.0) - 12032.0) + 3524.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float iota_l6_4(float y){
-  return ((y * y * y * y * y * (y * (y * (y * (-290.0 * y + 1305.0) - 2231.0) + 1719.0) - 503.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-1006.0 * y + 5533.0) - 12285.0) + 13785.0) - 7829.0) + 1803.0) - 3.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float beta_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (7042.0 * y - 38731.0) + 85995.0) - 96495.0) + 54803.0) - 12620.0) + 12.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-21126.0 * y + 116193.0) - 257985.0) + 289485.0) - 164409.0) + 37857.0) - 15.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float delta_l6_5(float y){
-  return ((y * y * (y * y * (y * y * (y * (y * (y * (y * (35210.0 * y - 193655.0) + 429975.0) - 482475.0) + 274015.0) - 63090.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float eta_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-35210.0 * y + 193655.0) - 429975.0) + 482475.0) - 274015.0) + 63085.0) + 15.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (21126.0 * y - 116193.0) + 257985.0) - 289485.0) + 164409.0) - 37848.0) - 12.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float theta_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-7042.0 * y + 38731.0) - 85995.0) + 96495.0) - 54803.0) + 12615.0) + 3.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float iota_l6_5(float y){
-  return ((y * y * y * y * y * y * (y * (y * (y * (y * (1006.0 * y - 5533.0) + 12285.0) - 13785.0) + 7829.0) - 1802.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (3604.0 * y - 23426.0) + 63866.0) - 93577.0) + 77815.0) - 34869.0) + 6587.0) + 1.0) - 3.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float beta_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-25228.0 * y + 163982.0) - 447062.0) + 655039.0) - 544705.0) + 244083.0) - 46109.0) - 6.0) + 12.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (75684.0 * y - 491946.0) + 1341186.0) - 1965117.0) + 1634115.0) - 732249.0) + 138327.0) + 15.0) - 15.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float delta_l6_6(float y){
-  return ((y * y * (y * y * (y * y * (y * (y * (y * (y * (y * (y * (-126140.0 * y + 819910.0) - 2235310.0) + 3275195.0) - 2723525.0) + 1220415.0) - 230545.0) - 20.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float eta_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (126140.0 * y - 819910.0) + 2235310.0) - 3275195.0) + 2723525.0) - 1220415.0) + 230545.0) + 15.0) + 15.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-75684.0 * y + 491946.0) - 1341186.0) + 1965117.0) - 1634115.0) + 732249.0) - 138327.0) - 6.0) - 12.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float theta_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (25228.0 * y - 163982.0) + 447062.0) - 655039.0) + 544705.0) - 244083.0) + 46109.0) + 1.0) + 3.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float iota_l6_6(float y){
-  return ((y * y * y * y * y * y * y * (y * (y * (y * (y * (y * (-3604.0 * y + 23426.0) - 63866.0) + 93577.0) - 77815.0) + 34869.0) - 6587.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-3569.0 * y + 16061.0) - 27454.0) + 21126.0) - 6125.0) + 49.0) - 196.0) - 36.0) + 144.0)) * 2.48015873015873e-05);}
-inline float beta_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (32121.0 * y - 144548.0) + 247074.0) - 190092.0) + 55125.0) - 672.0) + 2016.0) + 512.0) - 1536.0)) * 2.48015873015873e-05);}
-inline float gamma_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-128484.0 * y + 578188.0) - 988256.0) + 760312.0) - 221060.0) + 4732.0) - 9464.0) - 4032.0) + 8064.0)) * 2.48015873015873e-05);}
-inline float delta_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (299796.0 * y - 1349096.0) + 2305856.0) - 1774136.0) + 517580.0) - 13664.0) + 13664.0) + 32256.0) - 32256.0)) * 2.48015873015873e-05);}
-inline float eta_l8_4(float y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-449694.0 * y + 2023630.0) - 3458700.0) + 2661540.0) - 778806.0) + 19110.0) - 57400.0) + 40320.0) * 2.48015873015873e-05);}
-inline float zeta_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (449694.0 * y - 2023616.0) + 3458644.0) - 2662016.0) + 780430.0) - 13664.0) - 13664.0) + 32256.0) + 32256.0)) * 2.48015873015873e-05);}
-inline float theta_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-299796.0 * y + 1349068.0) - 2305744.0) + 1775032.0) - 520660.0) + 4732.0) + 9464.0) - 4032.0) - 8064.0)) * 2.48015873015873e-05);}
-inline float iota_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (128484.0 * y - 578168.0) + 988176.0) - 760872.0) + 223020.0) - 672.0) - 2016.0) + 512.0) + 1536.0)) * 2.48015873015873e-05);}
-inline float kappa_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-32121.0 * y + 144541.0) - 247046.0) + 190246.0) - 55685.0) + 49.0) + 196.0) - 36.0) - 144.0)) * 2.48015873015873e-05);}
-inline float mu_l8_4(float y){
-  return ((y * y * y * y * y * (y * (y * (y * (3569.0 * y - 16060.0) + 27450.0) - 21140.0) + 6181.0)) * 2.48015873015873e-05);}
diff --git a/hysop/backend/device/opencl/cl_src/remeshing/weights_noVec_builtin.cl b/hysop/backend/device/opencl/cl_src/remeshing/weights_noVec_builtin.cl
deleted file mode 100644
index 4c0e124803bf7a58c73fa796ccda0f0c18e919ba..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/cl_src/remeshing/weights_noVec_builtin.cl
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * @file weights_noVec_builtin.cl
- * Remeshing formulas, vectorized version, use of builtin OpenCL fma.
- * Polynomials under Horner form.
- */
-
-inline float alpha_l2_1(float y){
-  return (y*fma(y,fma(y,-1.0, 2.0), - 1.0) * 0.5);}
-inline float beta_l2_1(float y){
-  return (fma(y*y, fma(y, 3.0, -5.0), 2.0) * 0.5);}
-inline float gamma_l2_1(float   y){
-  return ((y * fma(y , fma(-3.0, y, 4.0), 1.0)) * 0.5);}
-inline float delta_l2_1(float y){
-  return ((y * y * fma(1.0, y, - 1.0)) * 0.5);}
-
-
-inline float alpha_l2_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 2.0, -5.0), 3.0), 1.0), -1.0)) * 0.5);}
-inline float beta_l2_2(float y){
-  return (fma(y * y, fma(y, fma(y, fma(y, -6.0, 15.0), -9.0), -2.0), 2.0) * 0.5);}
-inline float gamma_l2_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 6.0, -15.0), 9.0), 1.0), 1.0)) * 0.5);}
-inline float delta_l2_2(float y){
-  return ((y * y * y * fma(y, fma(y, -2.0, 5.0), -3.0)) * 0.5);}
-
-
-inline float alpha_l2_3(float y){
-  return ((y * fma(y, fma(y * y, fma(y, fma(y, fma(y, -6.0, 21.0), -25.0), 10.0), 1.0), -1.0)) * 0.5);}
-inline float beta_l2_3(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 18.0, -63.0), 75.0), -30.0), -2.0), 2.0) * 0.5);}
-inline float gamma_l2_3(float y){
-  return ((y * fma(y, fma(y * y, fma(y, fma(y, fma(y, -18.0, 63.0), -75.0), 30.0), 1.0), 1.0)) * 0.5);}
-inline float delta_l2_3(float y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, 6.0, -21.0), 25.0), -10.0)) * 0.5);}
-
-
-inline float alpha_l2_4(float y){
-  return ((y * fma(y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, 20.0, -90.0), 154.0), -119.0), 35.0), 1.0), -1.0)) * 0.5);}
-inline float beta_l2_4(float y){
-  return (fma(y * y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, -60.0, 270.0), -462.0), 357.0), -105.0), -2.0), 2.0) * 0.5);}
-inline float gamma_l2_4(float y){
-  return ((y * fma(y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, 60.0, -270.0), 462.0), -357.0), 105.0), 1.0), 1.0)) * 0.5);}
-inline float delta_l2_4(float y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, -20.0, 90.0), -154.0), 119.0), -35.0)) * 0.5);}
-
-
-inline float alpha_l4_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, -5.0, 13.0), -9.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float beta_l4_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 25.0, -64.0), 39.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float gamma_l4_2(float y){
-  return (fma(y * y, fma(y, fma(y, fma(y, -50.0, 126.0), -70.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float delta_l4_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 50.0, -124.0), 66.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float eta_l4_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, -25.0, 61.0), -33.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float zeta_l4_2(float y){
-  return ((y * y * y * fma(y, fma(y, 5.0, -12.0), 7.0)) * 0.041666666666666664);}
-
-
-inline float alpha_l4_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 14.0, -49.0), 58.0), -22.0), -2.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float beta_l4_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -70.0, 245.0), -290.0), 111.0), 4.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float gamma_l4_3(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 140.0, -490.0), 580.0), -224.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float delta_l4_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -140.0, 490.0), -580.0), 226.0), -4.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float eta_l4_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 70.0, -245.0), 290.0), -114.0), 2.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float zeta_l4_3(float y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, -14.0, 49.0), -58.0), 23.0)) * 0.041666666666666664);}
-
-
-inline float alpha_l4_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -46.0, 207.0), -354.0), 273.0), -80.0), 1.0), -2.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float beta_l4_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 230.0, -1035.0), 1770.0), -1365.0), 400.0), -4.0), 4.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float gamma_l4_4(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -460.0, 2070.0), -3540.0), 2730.0), -800.0), 6.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float delta_l4_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 460.0, -2070.0), 3540.0), -2730.0), 800.0), -4.0), -4.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float eta_l4_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -230.0, 1035.0), -1770.0), 1365.0), -400.0), 1.0), 2.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float zeta_l4_4(float y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, 46.0, -207.0), 354.0), -273.0), 80.0)) * 0.041666666666666664);}
-
-
-inline float alpha_M8p(float y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-10.0,y, + 21.0), + 28.0), - 105.0), + 70.0), + 35.0), - 56.0), + 17.0) * 0.00029761904761904765);}
-inline float beta_M8p(float y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(70.0,y, - 175.0), - 140.0), + 770.0), - 560.0), - 350.0), + 504.0), - 102.0) * 0.00029761904761904765);}
-inline float gamma_M8p(float y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-210.0,y, + 609.0), + 224.0), - 2135.0), + 910.0), + 2765.0), - 2520.0), + 255.0) * 0.00029761904761904765);}
-inline float delta_M8p(float y){
-  return (fma(y*y, fma(y*y, fma(y*y, fma(70.0,y, - 231.0), + 588.0), - 980.0), + 604.0) * 0.001488095238095238);}
-inline float eta_M8p(float y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-70.0,y, 259.0), - 84.0), - 427.0), - 182.0), + 553.0), + 504.0), + 51.0) * 0.001488095238095238);}
-inline float zeta_M8p(float y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(210.0,y,- 861.0), + 532.0), + 770.0), + 560.0), - 350.0), - 504.0), - 102.0) * 0.00029761904761904765);}
-inline float theta_M8p(float y){
-  return (fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(-70.0, y, 315.0), -280.0), -105.0), -70.0), 35.0), 56.0), 17.0) * 0.00029761904761904765);}
-inline float iota_M8p(float y){
-  return ((y * y * y * y * y * fma(y , fma(10.0 , y ,- 49.0) , 56.0)) * 0.00029761904761904765);}
-
-
-inline float alpha_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -89.0, 312.0), -370.0), 140.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float beta_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 623.0, -2183.0), 2581.0), -955.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -1869.0, 6546.0), -7722.0), 2850.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float delta_l6_3(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 3115.0, -10905.0), 12845.0), -4795.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float eta_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3115.0, 10900.0), -12830.0), 4880.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 1869.0, -6537.0), 7695.0), -2985.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float theta_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -623.0, 2178.0), -2566.0), 1010.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float iota_l6_3(float y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, 89.0, -311.0), 367.0), -145.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 290.0, -1305.0), 2231.0), -1718.0), 500.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float beta_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -2030.0, 9135.0), -15617.0), 12027.0), -3509.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 6090.0, -27405.0), 46851.0), -36084.0), 10548.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float delta_l6_4(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -10150.0, 45675.0), -78085.0), 60145.0), -17605.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float eta_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 10150.0, -45675.0), 78085.0), -60150.0), 17620.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -6090.0, 27405.0), -46851.0), 36093.0), -10575.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float theta_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 2030.0, -9135.0), 15617.0), -12032.0), 3524.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float iota_l6_4(float y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, -290.0, 1305.0), -2231.0), 1719.0), -503.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -1006.0, 5533.0), -12285.0), 13785.0), -7829.0), 1803.0), -3.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float beta_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 7042.0, -38731.0), 85995.0), -96495.0), 54803.0), -12620.0), 12.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -21126.0, 116193.0), -257985.0), 289485.0), -164409.0), 37857.0), -15.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float delta_l6_5(float y){
-  return (fma(y * y, fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, 35210.0, -193655.0), 429975.0), -482475.0), 274015.0), -63090.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float eta_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -35210.0, 193655.0), -429975.0), 482475.0), -274015.0), 63085.0), 15.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 21126.0, -116193.0), 257985.0), -289485.0), 164409.0), -37848.0), -12.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float theta_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -7042.0, 38731.0), -85995.0), 96495.0), -54803.0), 12615.0), 3.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float iota_l6_5(float y){
-  return ((y * y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, fma(y, 1006.0, -5533.0), 12285.0), -13785.0), 7829.0), -1802.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 3604.0, -23426.0), 63866.0), -93577.0), 77815.0), -34869.0), 6587.0), 1.0), -3.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float beta_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -25228.0, 163982.0), -447062.0), 655039.0), -544705.0), 244083.0), -46109.0), -6.0), 12.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 75684.0, -491946.0), 1341186.0), -1965117.0), 1634115.0), -732249.0), 138327.0), 15.0), -15.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float delta_l6_6(float y){
-  return (fma(y * y, fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -126140.0, 819910.0), -2235310.0), 3275195.0), -2723525.0), 1220415.0), -230545.0), -20.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float eta_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 126140.0, -819910.0), 2235310.0), -3275195.0), 2723525.0), -1220415.0), 230545.0), 15.0), 15.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -75684.0, 491946.0), -1341186.0), 1965117.0), -1634115.0), 732249.0), -138327.0), -6.0), -12.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float theta_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 25228.0, -163982.0), 447062.0), -655039.0), 544705.0), -244083.0), 46109.0), 1.0), 3.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float iota_l6_6(float y){
-  return ((y * y * y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3604.0, 23426.0), -63866.0), 93577.0), -77815.0), 34869.0), -6587.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3569.0, 16061.0), -27454.0), 21126.0), -6125.0), 49.0), -196.0), -36.0), 144.0)) * 2.48015873015873e-05);}
-inline float beta_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 32121.0, -144548.0), 247074.0), -190092.0), 55125.0), -672.0), 2016.0), 512.0), -1536.0)) * 2.48015873015873e-05);}
-inline float gamma_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -128484.0, 578188.0), -988256.0), 760312.0), -221060.0), 4732.0), -9464.0), -4032.0), 8064.0)) * 2.48015873015873e-05);}
-inline float delta_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 299796.0, -1349096.0), 2305856.0), -1774136.0), 517580.0), -13664.0), 13664.0), 32256.0), -32256.0)) * 2.48015873015873e-05);}
-inline float eta_l8_4(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -449694.0, 2023630.0), -3458700.0), 2661540.0), -778806.0), 19110.0), -57400.0), 40320.0) * 2.48015873015873e-05);}
-inline float zeta_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 449694.0, -2023616.0), 3458644.0), -2662016.0), 780430.0), -13664.0), -13664.0), 32256.0), 32256.0)) * 2.48015873015873e-05);}
-inline float theta_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -299796.0, 1349068.0), -2305744.0), 1775032.0), -520660.0), 4732.0), 9464.0), -4032.0), -8064.0)) * 2.48015873015873e-05);}
-inline float iota_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 128484.0, -578168.0), 988176.0), -760872.0), 223020.0), -672.0), -2016.0), 512.0), 1536.0)) * 2.48015873015873e-05);}
-inline float kappa_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -32121.0, 144541.0), -247046.0), 190246.0), -55685.0), 49.0), 196.0), -36.0), -144.0)) * 2.48015873015873e-05);}
-inline float mu_l8_4(float y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, 3569.0, -16060.0), 27450.0), -21140.0), 6181.0)) * 2.48015873015873e-05);}
diff --git a/hysop/backend/device/opencl/clpeak.py b/hysop/backend/device/opencl/clpeak.py
index 1cbba49a1b34c66119a67918164e8f8ef94324ae..f0e5f929b05573035c94e066f640cbd6a3f3eba7 100644
--- a/hysop/backend/device/opencl/clpeak.py
+++ b/hysop/backend/device/opencl/clpeak.py
@@ -1,9 +1,11 @@
+try:
+   import cPickle as pickle
+except:
+   import pickle
 
-import tempfile, re, os, warnings, gzip, portalocker
-import subprocess32 as subprocess
+import tempfile, re, os, warnings, gzip, portalocker, subprocess
 from xml.dom import minidom
 from hysop import vprint
-from hysop.deps import pickle
 from hysop.tools.decorators import requires_cmd
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.units import bdw2str, flops2str, iops2str
@@ -41,9 +43,9 @@ class ClPeakInfo(object):
     __int_results    = ('int',    'int2',    'int4',    'int8',    'int16')
     __float_results  = ('float',  'float2',  'float4',  'float8',  'float16')
     __double_results = ('double', 'double2', 'double4', 'double8', 'double16')
-        
+
     __cached_datakeys = ('_preferred_exec_params',
-                         '_transfer_bandwidth_values', '_global_bdw_values', 
+                         '_transfer_bandwidth_values', '_global_bdw_values',
                          '_sp_compute_values', '_dp_compute_values', '_int_compute_values')
 
     @classmethod
@@ -57,14 +59,14 @@ class ClPeakInfo(object):
 
     def _update_cache_data(self, key):
         filepath = self._cache_file()
-        cached_data = dict(zip(self.__cached_datakeys, (getattr(self, cdk) 
+        cached_data = dict(zip(self.__cached_datakeys, (getattr(self, cdk)
             for cdk in self.__cached_datakeys)))
         update_cache(filepath, key, cached_data)
-    
+
     def __init__(self, platform_name, device_name,
                        platform_id, device_id, is_cpu,
                        override_cache=False):
-        
+
         self.platform_name = platform_name
         self.device_name   = device_name
         self.platform_id   = platform_id
@@ -83,17 +85,17 @@ class ClPeakInfo(object):
             self._exec_cpu_id  = None
             self._preferred_exec_params = None
 
-            self._transfer_bandwidth_values = None 
+            self._transfer_bandwidth_values = None
             self._gather_transfer_bandwidth_info(is_cpu)
 
             self._global_bdw_values = None
             self._gather_global_bandwidth_info()
 
-            self._sp_compute_values  = None 
+            self._sp_compute_values  = None
             self._dp_compute_values  = None
             self._int_compute_values = None
             self._gather_compute_info()
-            
+
             self._update_cache_data(key)
 
             del self._exec_node_id
@@ -194,7 +196,7 @@ class ClPeakInfo(object):
             try:
                 assert (dev is not None)
                 tag = 'transfer_bandwidth'
-                result_tags = ('enqueuewritebuffer', 'enqueuereadbuffer', 'enqueuemapbuffer', 
+                result_tags = ('enqueuewritebuffer', 'enqueuereadbuffer', 'enqueuemapbuffer',
                                 'memcpy_from_mapped_ptr', 'enqueueunmap', 'memcpy_to_mapped_ptr')
                 bdw = dev.getElementsByTagName(tag)
                 assert len(bdw)==1
@@ -216,10 +218,10 @@ class ClPeakInfo(object):
                 raise
 
         transfer_bandwidth = {}
-        if is_cpu: 
+        if is_cpu:
             self._set_exec_params(None, None)
             try:
-                res = self._exec_clpeak(test='--transfer-bandwidth', 
+                res = self._exec_clpeak(test='--transfer-bandwidth',
                         handle_results=handle_results)
                 transfer_bandwidth[(None,None)] = res
             except (AssertionError,subprocess.CalledProcessError,subprocess.TimeoutExpired):
@@ -231,7 +233,7 @@ class ClPeakInfo(object):
             while (exception_counter < 2):
                 self._set_exec_params(membind=node_id, cpubind=core_id)
                 try:
-                    res = self._exec_clpeak(test='--transfer-bandwidth', 
+                    res = self._exec_clpeak(test='--transfer-bandwidth',
                             handle_results=handle_results, catch=False)
                     transfer_bandwidth[(node_id,core_id)] = res
                     exception_counter = 0
@@ -246,12 +248,12 @@ class ClPeakInfo(object):
                     core_id = 0
                 except AssertionError:
                     break
-            
+
         if len(transfer_bandwidth)>0:
             def criteria(key):
                 values = transfer_bandwidth[key]
                 return -(values['enqueuewritebuffer'] + values['enqueuereadbuffer'])
-            params = sorted(transfer_bandwidth.keys(), key=criteria)
+            params = tuple(sorted(transfer_bandwidth.keys(), key=criteria))
             self._set_exec_params(*params[0])
             self._preferred_exec_params = params[0]
             self._transfer_bandwidth_values = transfer_bandwidth
@@ -259,7 +261,7 @@ class ClPeakInfo(object):
             self._set_exec_params(None, None)
             self._transfer_bandwidth_values = None
             self._preferred_exec_params = (None, None)
-            
+
     def _gather_global_bandwidth_info(self):
         def handle_results(dev, cmd):
             try:
@@ -284,7 +286,7 @@ class ClPeakInfo(object):
                 self._global_bdw_values = None
 
         self._exec_clpeak(test='--global-bandwidth', handle_results=handle_results)
-    
+
     def _gather_compute_info(self):
         def handle_results(ctype):
             def _handle_results(dev, cmd):
@@ -345,7 +347,7 @@ class ClPeakInfo(object):
     def _set_exec_params(self, membind, cpubind):
         self._exec_node_id = membind
         self._exec_cpu_id  = cpubind
-    
+
     @requires_cmd('clpeak')
     @requires_cmd('hwloc-bind')
     def _exec_clpeak(self, test, handle_results, catch=True):
@@ -363,12 +365,12 @@ class ClPeakInfo(object):
                 if (self._exec_node_id is not None):
                     opts += ['--cpubind', 'core:{}'.format(self._exec_cpu_id)]
                 opts += ['--']
-            opts += ['clpeak', _platform, str(self.platform_id), 
-                              _device,   str(self.device_id), 
-                              test, _xml, 
+            opts += ['clpeak', _platform, str(self.platform_id),
+                              _device,   str(self.device_id),
+                              test, _xml,
                               _xml_file, tmp.name]
             cmd = ' '.join(opts)
-            print '[CMD] {}'.format(cmd)
+            print('[CMD] {}'.format(cmd))
             if catch:
                 try:
                     vprint(cmd)
@@ -406,7 +408,7 @@ class ClPeakInfo(object):
                 msg+='expected device \'{}\' but got device \'{}\'.'
                 msg=msg.format(self.device_name, dev_name)
                 raise RuntimeError(msg)
-            
+
             return handle_results(dev=dev, cmd=cmd)
 
 
@@ -415,15 +417,15 @@ class ClPeakInfo(object):
 
 
 class ClPeakStatistics(HardwareStatistics):
-        
+
     def __init__(self, info):
         self._platform_name = None
         self._device_name   = None
         self._counter       = 0
-            
-        self._transfer_bandwidth_values = [] 
+
+        self._transfer_bandwidth_values = []
         self._global_bdw_values = []
-        self._sp_compute_values  = [] 
+        self._sp_compute_values  = []
         self._dp_compute_values  = []
         self._int_compute_values = []
 
@@ -437,7 +439,7 @@ class ClPeakStatistics(HardwareStatistics):
             self._sp_compute_values  += [info.max_sp_compute]
             self._dp_compute_values  += [info.max_dp_compute]
             self._int_compute_values += [info.max_int_compute]
-        
+
     def __iadd__(self, other):
         if (other is None):
             return self
@@ -456,7 +458,7 @@ class ClPeakStatistics(HardwareStatistics):
             assert self._platform_name == other._platform_name
             assert self._device_name == other._device_name
         self._counter += other._counter
-        
+
         self._global_bdw_values  += other._global_bdw_values
         self._sp_compute_values  += other._sp_compute_values
         self._dp_compute_values  += other._dp_compute_values
@@ -472,7 +474,7 @@ class ClPeakStatistics(HardwareStatistics):
         ss += ['{:^14}'.format(self._mean(self._dp_compute_values,  op=flops2str))]
         ss += ['{:^14}'.format(self._mean(self._int_compute_values, op=iops2str))]
         return ' '.join(s.format(ind=ind, inc=inc) for s in ss)
-    
+
     @property
     def has_memory_bandwidth(self):
         return (len(self._global_bdw_values)>0)
@@ -488,7 +490,7 @@ class ClPeakStatistics(HardwareStatistics):
     @property
     def has_integer_compute(self):
         return (len(self._int_compute_values)>0)
-    
+
     @property
     def has_statistics(self):
         return (self.has_memory_bandwidth or self.has_transfer_bandwdith or
diff --git a/hysop/backend/device/opencl/device_config/__init__.py b/hysop/backend/device/opencl/device_config/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/hysop/backend/device/opencl/device_config/config_cayman.py b/hysop/backend/device/opencl/device_config/config_cayman.py
deleted file mode 100644
index a6dd8e322cf1ab6bbdfb2bbf047fccc03bdc8bcd..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/device_config/config_cayman.py
+++ /dev/null
@@ -1,176 +0,0 @@
-"""
-@file config_cayman.py
-
-OpenCL kernels configurations.
-"""
-from hysop.deps import np
-FLOAT_GPU, DOUBLE_GPU = np.float32, np.float64
-
-#build empty dictionaries
-kernels_config = {}
-kernels_config[2] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-kernels_config[3] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-
-# Copy kernel:
-def copy_space_index_2d(size, t_dim, b_rows, vec):
-    gwi = (int(size[0] / vec), int(b_rows * size[1] / t_dim), 1)
-    lwi = (t_dim / vec, b_rows, 1)
-    return gwi, lwi
-def copy_space_index_3d(size, t_dim, b_rows, vec):
-    gwi = (int(size[0] / vec), int(b_rows * size[1] / t_dim), int(size[2]))
-    lwi = (t_dim / vec, b_rows, 1)
-    return gwi, lwi
-# Configs : sources, tile size, block rows, vector size, index space function
-kernels_config[3][FLOAT_GPU]['copy'] = \
-    ('kernels/copy.cl', 16, 8, 4, copy_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['copy'] = \
-    ('kernels/copy_locMem.cl', 32, 8, 1, copy_space_index_3d)
-kernels_config[2][FLOAT_GPU]['copy'] = \
-    ('kernels/copy.cl', 16, 8, 2, copy_space_index_2d)
-kernels_config[2][DOUBLE_GPU]['copy'] = \
-    ('kernels/copy.cl', 32, 2, 2, copy_space_index_2d)
-
-# Transpositions kernels:
-# XY transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xy_space_index_2d(size, t_dim, b_rows, vec):
-    gwi = (int(size[1] / vec), int(b_rows * size[0] / t_dim), 1)
-    lwi = (t_dim / vec, b_rows, 1)
-    return gwi, lwi
-def xy_space_index_3d(size, t_dim, b_rows, vec):
-    gwi = (int(size[1] / vec), int(b_rows * size[0] / t_dim), int(size[2]))
-    lwi = (t_dim / vec, b_rows, 1)
-    return gwi, lwi
-# Configs : sources, tile size, block rows, is padding, vector size,
-#              index space function
-kernels_config[3][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy.cl', 16, 8, True, 2, xy_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy.cl', 32, 4, True, 4, xy_space_index_3d)
-kernels_config[2][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy.cl', 32, 8, True, 4, xy_space_index_2d)
-kernels_config[2][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy.cl', 32, 2, True, 4, xy_space_index_2d)
-
-# XZ transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xz_space_index_3d(size, t_dim, b_rows, b_deph, vec):
-    gwi = (int(size[2] / vec), int(b_rows * size[1] / t_dim), int(b_deph * size[0] / t_dim))
-    lwi = (t_dim / vec, b_rows, b_deph)
-    return gwi, lwi
-# Configs : sources, tile size, block rows, block depth, is padding,
-#              vector size, index space function
-kernels_config[3][FLOAT_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz.cl', 16, 4, 4, True, 1, xy_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz.cl', 8, 2, 2, False, 1, xy_space_index_3d)
-
-
-def computational_kernels_index_space(size, vec):
-    dim = len(size)
-    if dim == 3:
-        wi = 64
-    if dim == 2:
-        wi = 256
-    # Change work-item regarding problem size
-    if size[0] % wi > 0:
-        if dim == 3:
-            print "Warning : GPU best performances obtained for",
-            print "problem sizes multiples of 64"
-        else:
-            print "Warning : GPU best performances obtained for",
-            print "problem sizes multiples of 256"
-    while(size[0] % wi > 0):
-        wi = wi / 2
-    # Change work-item regarding vector_width
-    if wi * vec > size[0]:
-        if size[0] % vec > 0:
-            raise ValueError(
-                "Resolution ({0}) must be a multiple of {1}".format(
-                    size[0], vec))
-        wi = size[0] // vec
-    if dim == 3:
-        gwi = (int(wi), int(size[1]), int(size[2]))
-        lwi = (int(wi), 1, 1)
-    else:
-        gwi = (int(wi), int(size[1]))
-        lwi = (int(wi), 1)
-    return gwi, lwi
-
-# Advection kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection.cl"],
-     False, 4, computational_kernels_index_space)
-kernels_config[3][DOUBLE_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection.cl"],
-     False, 2, computational_kernels_index_space)
-kernels_config[2][FLOAT_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection.cl"],
-     False, 4, computational_kernels_index_space)
-kernels_config[2][DOUBLE_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_noVec.cl"],
-     False, 1, computational_kernels_index_space)
-
-# Remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_builtin.cl", "remeshing/private.cl",
-      "kernels/remeshing.cl"],
-     False, 4, computational_kernels_index_space)
-kernels_config[3][DOUBLE_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_builtin.cl", "remeshing/private.cl",
-      "kernels/remeshing.cl"],
-     False, 4, computational_kernels_index_space)
-kernels_config[2][FLOAT_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "kernels/remeshing.cl"],
-     True, 4, computational_kernels_index_space)
-kernels_config[2][DOUBLE_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "kernels/remeshing.cl"],
-     True, 4, computational_kernels_index_space)
-
-# Advection and remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_builtin.cl", "remeshing/private.cl",
-      "advection/velocity_cache.cl","advection/builtin_RKN.cl",
-      "kernels/advection_and_remeshing.cl"],
-     False, 4, computational_kernels_index_space)
-kernels_config[3][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_builtin.cl", "remeshing/private.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection_and_remeshing.cl"],
-     True, 4, computational_kernels_index_space)
-kernels_config[2][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection_and_remeshing.cl"],
-     True, 8, computational_kernels_index_space)
-kernels_config[2][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection_and_remeshing.cl"],
-     True, 4, computational_kernels_index_space)
-
-
-
diff --git a/hysop/backend/device/opencl/device_config/config_default.py b/hysop/backend/device/opencl/device_config/config_default.py
deleted file mode 100644
index 79f224a7e7204f658bf81113b0c163178eca4c44..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/device_config/config_default.py
+++ /dev/null
@@ -1,248 +0,0 @@
-"""
-@file config_default.py
-
-OpenCL kernels default configurations.
-"""
-from hysop.deps import np
-FLOAT_GPU, DOUBLE_GPU = np.float32, np.float64
-MAX_GWI = (256, 256, 256)
-
-#build empty dictionaries
-kernels_config = {}
-kernels_config[2] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-kernels_config[3] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-
-def _clamp_max(w, m):
-    while w > m:
-        w /= 2
-    return int(w)
-
-
-def check_max(t_gwi):
-    return tuple([_clamp_max(w, m) for w, m in zip(t_gwi, MAX_GWI)])
-
-# Transpositions kernels:
-# XY transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xy_space_index_2d(size, t_dim, b_rows, vec):
-    gwi = check_max((size[1] / vec, b_rows * size[0] / t_dim, 1))
-    lwi = (t_dim / vec, b_rows, 1)
-    blocs_nb = ((size[1] / vec) / lwi[0],
-                (b_rows * size[0] / t_dim) / lwi[1], None)
-    return gwi, lwi, blocs_nb
-def xy_space_index_3d(size, t_dim, b_rows, vec):
-    gwi = check_max((size[1] / vec, b_rows * size[0] / t_dim, size[2]))
-    lwi = (t_dim / vec, b_rows, 1)
-    blocs_nb = ((size[1] / vec) / lwi[0],
-                (b_rows * size[0] / t_dim) / lwi[1], None)
-    return gwi, lwi, blocs_nb
-# Configs : sources, tile size, block rows, is padding, vector size,
-#              index space function
-kernels_config[3][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 2, True, 1, xy_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 2, True, 1, xy_space_index_3d)
-kernels_config[2][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 2, True, 1, xy_space_index_2d)
-kernels_config[2][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 2, True, 1, xy_space_index_2d)
-
-# XZ transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xz_space_index_3d(size, t_dim, b_rows, b_deph, vec):
-    gwi = check_max((size[2] / vec, size[1], b_deph * size[0] / t_dim))
-    lwi = (t_dim / vec, 1, b_deph)
-    blocs_nb = (((size[2]) / vec) / lwi[0], None,
-                (b_deph * (size[0]) / t_dim) / lwi[2])
-    return gwi, lwi, blocs_nb
-# Configs : sources, tile size, block rows, is padding, vector size,
-#              index space function
-kernels_config[3][FLOAT_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz_slice_noVec.cl', 32, 1, 2, True, 1, xz_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz_slice_noVec.cl', 32, 1, 2, True, 1, xz_space_index_3d)
-
-def computational_kernels_index_space(wi, size, vec):
-    # Change work-item regarding vector_width
-    if wi * vec > size[0]:
-        if size[0] % vec > 0:
-            raise ValueError(
-                "Resolution ({0}) must be a multiple of {1}".format(
-                    size[0], vec))
-        wi = size[0] // vec
-
-    if len(size) == 3:
-        gwi = (int(wi),
-               _clamp_max(size[1], MAX_GWI[1]),
-               _clamp_max(size[2], MAX_GWI[2]))
-        lwi = (int(wi), 1, 1)
-    else:
-        gwi = (int(wi), _clamp_max(size[1], MAX_GWI[1]), 1)
-        lwi = (int(wi), 1, 1)
-    return gwi, lwi
-
-def advection_index_space_3d(size, vec):
-    wi = min(max(32, size[0] / 2), 128)
-    return computational_kernels_index_space(wi, size, vec)
-def advection_index_space_2d_SP(size, vec):
-    wi = min(max(32, size[0] / 2), 128)
-    return computational_kernels_index_space(wi, size, vec)
-def advection_index_space_2d_DP(size, vec):
-    wi = min(max(32, size[0] / 2), 128)
-    return computational_kernels_index_space(wi, size, vec)
-
-def remeshing_index_space_3d(size, vec):
-    wi = min(max(32, size[0] / 2), 128)
-    return computational_kernels_index_space(wi, size, vec)
-def remeshing_index_space_2d(size, vec):
-    wi = min(max(32, size[0] / 2), 128)
-    return computational_kernels_index_space(wi, size, vec)
-
-def advection_and_remeshing_index_space(size, vec):
-    wi = min(size[0] / 2, 128)
-    return computational_kernels_index_space(wi, size, vec)
-
-
-# Advection kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec'] = \
-    (["common.cl", "advection/velocity_cache_noVec.cl",
-      "advection/builtin_RKN_noVec.cl", "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec'] = \
-    (["common.cl", "advection/velocity_cache_noVec.cl",
-      "advection/builtin_RKN_noVec.cl", "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_3d)
-kernels_config[2][FLOAT_GPU]['advec'] = \
-    (["common.cl", "advection/velocity_cache_noVec.cl",
-      "advection/builtin_RKN_noVec.cl", "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_2d_SP)
-kernels_config[2][DOUBLE_GPU]['advec'] = \
-    (["common.cl", "advection/velocity_cache_noVec.cl",
-      "advection/builtin_RKN_noVec.cl", "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_2d_DP)
-
-# Remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['remesh'] = \
-    (["common.cl", "remeshing/weights_noVec_builtin.cl",
-      "remeshing/basic_noVec.cl", "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['remesh'] = \
-    (["common.cl", "remeshing/weights_noVec_builtin.cl",
-      "remeshing/basic_noVec.cl", "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_3d)
-kernels_config[2][FLOAT_GPU]['remesh'] = \
-    (["common.cl", "remeshing/weights_noVec_builtin.cl",
-      "remeshing/basic_noVec.cl", "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_2d)
-kernels_config[2][DOUBLE_GPU]['remesh'] = \
-    (["common.cl", "remeshing/weights_noVec_builtin.cl",
-      "remeshing/basic_noVec.cl", "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_2d)
-
-
-# Advection and remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[3][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[2][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[2][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-
-
-def diffusion_space_index_3d(size, nb_part, tile):
-    gwi = check_max((size[0], size[1] / nb_part))
-    lwi = (tile, tile / nb_part)
-    blocs_nb = (size[0] / tile, size[1] / tile)
-    return gwi, lwi, blocs_nb
-
-
-kernels_config[3][FLOAT_GPU]['diffusion'] = \
-    (["common.cl", "kernels/diffusion.cl"],
-     16, 1, 1, diffusion_space_index_3d)
-
-
-kernels_config[3][DOUBLE_GPU]['advec_comm'] = \
-    (['common.cl', 'kernels/comm_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec_MS_comm'] = \
-    (['common.cl', "remeshing/weights_noVec_builtin.cl",
-      'kernels/comm_MS_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_remeshing_noVec.cl'],
-     False, 1, remeshing_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[3][DOUBLE_GPU]['advec_MS_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_MS_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-
-
-kernels_config[3][FLOAT_GPU]['advec_comm'] = \
-    (['common.cl', 'kernels/comm_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][FLOAT_GPU]['advec_MS_comm'] = \
-    (['common.cl', "remeshing/weights_noVec_builtin.cl",
-      'kernels/comm_MS_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][FLOAT_GPU]['remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_remeshing_noVec.cl'],
-     False, 1, remeshing_index_space_3d)
-kernels_config[3][FLOAT_GPU]['advec_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[3][FLOAT_GPU]['advec_MS_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_MS_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-
-
-def fine_to_coarse_filter_index_space(size, stencil_width):
-    wg = size[0] / (2 * stencil_width)
-    return ((wg, size[1] / stencil_width, size[2] / stencil_width),
-            (wg, 1, 1))
-
-
-kernels_config[3][FLOAT_GPU]['fine_to_coarse_filter'] = \
-    (["common.cl", 'remeshing/weights_noVec.cl',
-      "kernels/fine_to_coarse_filter.cl"],
-     1, fine_to_coarse_filter_index_space)
-
-
-def multiphase_baroclinic_index_space(size, tile):
-    wg = (tile, tile, 1)
-    ws = (int(size[0]), int(size[1]), 1)
-    return ws, wg
-
-kernels_config[3][FLOAT_GPU]['multiphase_baroclinic'] = \
-    (["common.cl", "kernels/multiphase_baroclinic_rhs.cl"],
-     8, 1, multiphase_baroclinic_index_space)
diff --git a/hysop/backend/device/opencl/device_config/config_k20m.py b/hysop/backend/device/opencl/device_config/config_k20m.py
deleted file mode 100644
index f0d3857ab56b6b337a33f7dcfd1313ecc776ad23..0000000000000000000000000000000000000000
--- a/hysop/backend/device/opencl/device_config/config_k20m.py
+++ /dev/null
@@ -1,245 +0,0 @@
-"""
-@file config_k20m.py
-
-OpenCL kernels configurations.
-"""
-from hysop.deps import np
-FLOAT_GPU, DOUBLE_GPU = np.float32, np.float64
-MAX_GWI = (1024, 1024, 1024)
-
-
-def _clamp_max(w, m):
-    while w > m:
-        w /= 2
-    return int(w)
-
-
-def check_max(t_gwi):
-    return tuple([_clamp_max(w, m) for w, m in zip(t_gwi, MAX_GWI)])
-
-
-#build empty dictionaries
-kernels_config = {}
-kernels_config[2] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-kernels_config[3] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-
-# Transpositions kernels:
-# XY transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xy_space_index_2d(size, t_dim, b_rows, vec):
-    gwi = check_max((size[1] / vec, b_rows * size[0] / t_dim, 1))
-    lwi = (t_dim / vec, b_rows, 1)
-    blocs_nb = ((size[1] / vec) / lwi[0],
-                (b_rows * size[0] / t_dim) / lwi[1], None)
-    return gwi, lwi, blocs_nb
-def xy_space_index_3d(size, t_dim, b_rows, vec):
-    gwi = check_max((size[1] / vec, b_rows * size[0] / t_dim, size[2]))
-    lwi = (t_dim / vec, b_rows, 1)
-    block_nb = ((size[1] / vec) / lwi[0],
-                (b_rows * size[0] / t_dim) / lwi[1], None)
-    return gwi, lwi, block_nb
-# Configs : sources, tile size, block rows, is padding, vector size,
-#              index space function
-kernels_config[3][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 4, True, 1, xy_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 16, True, 1, xy_space_index_3d)
-kernels_config[2][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 2, True, 1, xy_space_index_2d)
-kernels_config[2][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 8, True, 1, xy_space_index_2d)
-
-# XZ transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xz_space_index_3d(size, t_dim, b_rows, b_deph, vec):
-    gwi = check_max((size[2] / vec, size[1], b_deph * size[0] / t_dim))
-    lwi = (t_dim / vec, 1, b_deph)
-    blocs_nb = ((size[2] / vec) / lwi[0], None,
-                (b_deph * size[0] / t_dim) / lwi[2])
-    return gwi, lwi, blocs_nb
-# Configs : sources, tile size, block rows, is padding, vector size,
-#              index space function
-kernels_config[3][FLOAT_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz_slice_noVec.cl', 32, 1, 8, True, 1, xz_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz_slice_noVec.cl', 32, 1, 8, True, 1, xz_space_index_3d)
-
-def computational_kernels_index_space(wi, size, vec):
-    # Change work-item regarding vector_width
-    if wi * vec > size[0]:
-        if size[0] % vec > 0:
-            raise ValueError(
-                "Resolution ({0}) must be a multiple of {1}".format(
-                    size[0], vec))
-        wi = size[0] // vec
-
-    if len(size) == 3:
-        gwi = (int(wi),
-               _clamp_max(size[1], MAX_GWI[1]),
-               _clamp_max(size[2], MAX_GWI[2]))
-        lwi = (int(wi), 1, 1)
-    else:
-        gwi = (int(wi), _clamp_max(size[1], MAX_GWI[1]))
-        lwi = (int(wi), 1)
-    return gwi, lwi
-
-def advection_index_space_3d(size, vec):
-    wi = min(size[0] / 4, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-def advection_index_space_2d_SP(size, vec):
-    wi = min(size[0] / 8, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-def advection_index_space_2d_DP(size, vec):
-    wi = min(size[0] / 4, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-
-def remeshing_index_space_3d(size, vec):
-    wi = min(size[0] / 2, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-def remeshing_index_space_2d(size, vec):
-    wi = min(size[0] / 4, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-
-def advection_and_remeshing_index_space(size, vec):
-    wi = min(size[0] / 2, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-
-
-# Advection kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_3d)
-kernels_config[2][FLOAT_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_2d_SP)
-kernels_config[2][DOUBLE_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_2d_DP)
-
-# Remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic_noVec.cl",
-      "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic_noVec.cl",
-      "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_3d)
-kernels_config[2][FLOAT_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "kernels/remeshing.cl"],
-     True, 2, remeshing_index_space_2d)
-kernels_config[2][DOUBLE_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "kernels/remeshing.cl"],
-     True, 2, remeshing_index_space_2d)
-
-
-# Advection and remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[3][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[2][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[2][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-
-
-def diffusion_space_index_3d(size, nb_part, tile):
-    gwi = check_max((size[0], size[1] / nb_part))
-    lwi = (tile, tile / nb_part)
-    blocs_nb = (size[0] / tile, size[1] / tile)
-    return gwi, lwi, blocs_nb
-
-
-kernels_config[3][DOUBLE_GPU]['diffusion'] = \
-    (["common.cl", "kernels/diffusion.cl"],
-     16, 4, 1, diffusion_space_index_3d)
-
-
-kernels_config[3][DOUBLE_GPU]['advec_comm'] = \
-    (['common.cl', 'kernels/comm_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec_MS_comm'] = \
-    (['common.cl', "remeshing/weights_noVec_builtin.cl",
-      'kernels/comm_MS_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_remeshing_noVec.cl'],
-     False, 1, remeshing_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[3][DOUBLE_GPU]['advec_MS_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_MS_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-
-
-def fine_to_coarse_filter_index_space(size, stencil_width):
-    wg = size[0] / (2 * stencil_width)
-    return ((wg, size[1] / stencil_width, size[2] / stencil_width),
-            (wg, 1, 1))
-
-
-kernels_config[3][FLOAT_GPU]['fine_to_coarse_filter'] = \
-    (["common.cl", 'remeshing/weights_noVec.cl',
-      "kernels/fine_to_coarse_filter.cl"],
-     1, fine_to_coarse_filter_index_space)
-kernels_config[3][DOUBLE_GPU]['fine_to_coarse_filter'] = \
-    (["common.cl", 'remeshing/weights_noVec.cl',
-      "kernels/fine_to_coarse_filter.cl"],
-     1, fine_to_coarse_filter_index_space)
-
-
-
-def multiphase_baroclinic_index_space(size, tile):
-    wg = (tile, tile, 1)
-    ws = (int(size[0]), int(size[1]), 1)
-    return ws, wg
-
-kernels_config[3][FLOAT_GPU]['multiphase_baroclinic'] = \
-    (["common.cl", "kernels/multiphase_baroclinic_rhs.cl"],
-     8, 1, multiphase_baroclinic_index_space)
-kernels_config[3][DOUBLE_GPU]['multiphase_baroclinic'] = \
-    (["common.cl", "kernels/multiphase_baroclinic_rhs.cl"],
-     8, 1, multiphase_baroclinic_index_space)
diff --git a/hysop/backend/device/opencl/opencl_allocator.py b/hysop/backend/device/opencl/opencl_allocator.py
index 44d8f8b0c7b6ee5fc8232ac7042cdacb156d9e00..696258758521f66ba6210346e836e7e3d52f29b4 100644
--- a/hysop/backend/device/opencl/opencl_allocator.py
+++ b/hysop/backend/device/opencl/opencl_allocator.py
@@ -1,15 +1,17 @@
-
 from abc import ABCMeta, abstractmethod
-from hysop.deps import np
+import numpy as np
+
 from hysop.backend.device.opencl import cl, cl_api
 from hysop.core.memory.allocator import AllocatorBase
 from hysop.backend.device.opencl.opencl_buffer import OpenClBuffer
 
-class OpenClAllocator(AllocatorBase):
+class OpenClAllocator(AllocatorBase, metaclass=ABCMeta):
     """
     Base class for OpenCl backend allocators.
     """
-    __metaclass__=ABCMeta
+
+    def __new__(cls, queue, mem_flags=cl.mem_flags.READ_WRITE, verbose=None):
+        return super(OpenClAllocator, cls).__new__(cls, verbose=verbose)
 
     def __init__(self, queue, mem_flags=cl.mem_flags.READ_WRITE, verbose=None):
         super(OpenClAllocator, self).__init__(verbose=verbose)
@@ -25,7 +27,7 @@ class OpenClAllocator(AllocatorBase):
     def max_alloc_size(self):
         """Max allocatable size in bytes."""
         return self._max_alloc_size
-    
+
     def get_queue(self):
         return self._queue
     def get_context(self):
@@ -47,10 +49,10 @@ class OpenClAllocator(AllocatorBase):
         """
         super(OpenClAllocator, self).allocate(nbytes=nbytes, **kwds)
         try:
-            return self._allocate_impl(nbytes=nbytes) 
+            return self._allocate_impl(nbytes=nbytes)
         except cl.Error as e:
             raise MemoryError(str(e))
-    
+
     def is_on_host(self):
         """
         Return true if buffers are allocated in host memory.
@@ -71,7 +73,7 @@ class OpenClDeferredAllocator(OpenClAllocator):
     is_deferred = True
 
     def _allocate_impl(self, nbytes):
-        assert isinstance(nbytes, (long,int))
+        assert isinstance(nbytes, int)
         return OpenClBuffer(context=self.context, mem_flags=self.mem_flags, size=nbytes)
 
 
@@ -84,7 +86,7 @@ class OpenClImmediateAllocator(OpenClAllocator):
     _zero = np.array([0, 0, 0, 0], dtype=np.int8)
 
     def _allocate_impl(self, nbytes):
-        assert isinstance(nbytes, (long,int))
+        assert isinstance(nbytes, int)
         buf = OpenClBuffer(context=self.context, mem_flags=self.mem_flags, size=nbytes)
 
         try:
@@ -98,7 +100,7 @@ class OpenClImmediateAllocator(OpenClAllocator):
             raise MemoryError(str(e))
 
         return buf
-   
+
     def memory_pool(self, name, **kwds):
         """
         Construct a memory pool from this allocator.
@@ -107,4 +109,4 @@ class OpenClImmediateAllocator(OpenClAllocator):
         if isinstance(self, MemoryPool):
             msg='allocator is already a memory pool.'
             raise RuntimeError(msg)
-        return OpenClMemoryPool(allocator=self, name=name, verbose=None, **kwds) 
+        return OpenClMemoryPool(allocator=self, name=name, verbose=None, **kwds)
diff --git a/hysop/backend/device/opencl/opencl_array.py b/hysop/backend/device/opencl/opencl_array.py
index 42df5ec55a7474bf3d93c4a8cb4da618f547881c..7e86a24583f39189afaadc3b43db3f09dc7a97d4 100644
--- a/hysop/backend/device/opencl/opencl_array.py
+++ b/hysop/backend/device/opencl/opencl_array.py
@@ -1,5 +1,3 @@
-
-
 import numpy as np
 from hysop.tools.types import check_instance, first_not_None, to_tuple
 from hysop.tools.numpywrappers import slices_empty
@@ -10,6 +8,7 @@ from hysop.core.arrays import MemoryType, MemoryOrdering
 from hysop.core.arrays import default_order
 from hysop.core.arrays.array import Array
 
+
 class OpenClArray(Array):
     """
     OpenCl memory array wrapper (pyopencl.array.Array).
@@ -22,37 +21,37 @@ class OpenClArray(Array):
         handle: pyopencl.array.Array, implementation of this array
         kargs: arguments for base classes.
         """
-        
+
         if not isinstance(handle, clArray.Array):
-            msg='Handle should be a pyopencl.array.Array but got a {}.'
-            msg=msg.format(handle.__class__)
+            msg = 'Handle should be a pyopencl.array.Array but got a {}.'
+            msg = msg.format(handle.__class__)
             raise ValueError(msg)
         if not isinstance(backend, OpenClArrayBackend):
-            msg='Backend should be a OpenClArrayBackend but got a {}.'
-            msg=msg.format(handle.__class__)
+            msg = 'Backend should be a OpenClArrayBackend but got a {}.'
+            msg = msg.format(handle.__class__)
             raise ValueError(msg)
-        
-        if handle.dtype in [np.float16, np.longdouble, np.bool]:
-            msg='{} unsupported yet for OpenCl arrays.'.format(handle.dtype)
+
+        if handle.dtype in [np.float16, np.longdouble, np.bool_]:
+            msg = '{} unsupported yet for OpenCl arrays.'.format(handle.dtype)
             raise TypeError(msg)
-        
-        super(OpenClArray,self).__init__(handle=handle, backend=backend, **kargs)
-        
-        # at this time the opencl backend works only with the default_queue 
+
+        super(OpenClArray, self).__init__(handle=handle, backend=backend, **kargs)
+
+        # at this time the opencl backend works only with the default_queue
         # so we enforce it.
         if (handle.queue is not None) and (handle.queue is not self.default_queue):
             msg = 'pyopencl.Array has been created with a non-default queue.'
             raise RuntimeError(msg)
         backend.check_queue(handle.queue)
         self.set_default_queue(self.default_queue)
-    
+
     def as_symbolic_array(self, name, **kwds):
         """
         Return a symbolic array variable that contain a reference to this array.
         """
         from hysop.symbolic.array import OpenClSymbolicArray
         return OpenClSymbolicArray(memory_object=self, name=name, **kwds)
-    
+
     def as_symbolic_buffer(self, name, **kwds):
         """
         Return a symbolic buffer variable that contain a reference to this array.
@@ -62,68 +61,82 @@ class OpenClArray(Array):
 
     def get_ndim(self):
         return self._handle.ndim
+
     def get_shape(self):
         return to_tuple(self._handle.shape)
+
     def set_shape(self, shape):
         self._handle.shape = shape
+
     def get_size(self):
         return self._handle.size
+
     def get_strides(self):
         return self._handle.strides
+
     def get_data(self):
         try:
             return self._handle.data
         except clArray.ArrayHasOffsetError:
-             offset = self.offset
-             alignment = self.backend.device.mem_base_addr_align
-             if (offset % alignment) == 0:
-                 # try to return a subbuffer
-                 try:
-                     buf = self.base_data[offset:]
-                     buf.__parent = self.base_data
-                     return buf
-                 except:
-                     raise clArray.ArrayHasOffsetError
-             else:
-                 raise
+            offset = self.offset
+            alignment = self.backend.device.mem_base_addr_align
+            if (offset % alignment) == 0:
+                # try to return a subbuffer
+                try:
+                    buf = self.base_data[offset:]
+                    buf.__parent = self.base_data
+                    return buf
+                except:
+                    raise clArray.ArrayHasOffsetError
+            else:
+                raise
 
     def get_base(self):
         return self._handle.base_data
+
     def get_offset(self):
         return self._handle.offset
+
     def get_dtype(self):
         return self._handle.dtype
+
     def get_flags(self):
         return self._handle.flags
+
     def get_T(self):
         return self.wrap(self._handle.T)
+
     def get_imag(self):
         return self.backend.imag(self)
+
     def get_real(self):
         return self.backend.real(self)
+
     def get_nbytes(self):
         return self._handle.nbytes
+
     def get_int_ptr(self):
         return self._handle.base_data.int_ptr + self.offset
-    
-    # array properties 
-    ndim     = property(get_ndim)
-    shape    = property(get_shape, set_shape)
-    offset   = property(get_offset)
-    strides  = property(get_strides)
-    data     = property(get_data)
-    base     = property(get_base)
-    dtype    = property(get_dtype)
-    flags    = property(get_flags)
-    T        = property(get_T)
-    imag     = property(get_imag)
-    real     = property(get_real)
-    size     = property(get_size)
-    nbytes   = property(get_nbytes)
-    int_ptr  = property(get_int_ptr)
-    
+
+    # array properties
+    ndim = property(get_ndim)
+    shape = property(get_shape, set_shape)
+    offset = property(get_offset)
+    strides = property(get_strides)
+    data = property(get_data)
+    base = property(get_base)
+    dtype = property(get_dtype)
+    flags = property(get_flags)
+    T = property(get_T)
+    imag = property(get_imag)
+    real = property(get_real)
+    size = property(get_size)
+    nbytes = property(get_nbytes)
+    int_ptr = property(get_int_ptr)
+
     def get_base_data(self):
         return self._handle.base_data
+
     def get_offset(self):
         return self._handle.offset
     base_data = property(get_base_data)
@@ -134,28 +147,26 @@ class OpenClArray(Array):
         Equivalent C type corresponding to the numpy.dtype.
         """
         return clTools.dtype_to_ctype(self.dtype)
-    
-    
+
     def get(self, handle=False,
-            queue=None, ary=None, async=False):
+            queue=None, ary=None):
         """
         Returns a HostArray, view or copy of this array.
         """
         queue = self.backend.check_queue(queue)
-        if self.size==0:
+        if self.size == 0:
             return None
         elif self.flags.forc:
-            host_array = self._call('get', queue=queue, ary=ary, async=async)
+            host_array = self._call('get', queue=queue, ary=ary)
         else:
             from hysop.backend.device.opencl.opencl_copy_kernel_launchers import \
-                    OpenClCopyBufferRectLauncher
-            assert not async
+                OpenClCopyBufferRectLauncher
             if (ary is not None):
                 host_array = ary
             else:
                 host_array = self.backend.host_array_backend.empty_like(self)
-            kl = OpenClCopyBufferRectLauncher.from_slices(varname='buffer', src=self, 
-                    dst=host_array)
+            kl = OpenClCopyBufferRectLauncher.from_slices(varname='buffer', src=self,
+                                                          dst=host_array)
             evt = kl(queue=queue)
             evt.wait()
 
@@ -167,19 +178,19 @@ class OpenClArray(Array):
             if host_array.ndim == 1:
                 return host_array._handle[0]
         return host_array
-    
+
     # event managment
     def events(self):
         """
-        A list of pyopencl.Event instances that the current content of this array depends on. 
-        User code may read, but should never modify this list directly. 
+        A list of pyopencl.Event instances that the current content of this array depends on.
+        User code may read, but should never modify this list directly.
         To update this list, instead use the following methods.
         """
         return self.handle.events
 
     def add_event(self, evt):
         """
-        Add evt to events. If events is too long, this method may implicitly wait 
+        Add evt to events. If events is too long, this method may implicitly wait
         for a subset of events and clear them from the list.
         """
         self._call('add_event', evt=evt)
@@ -196,35 +207,39 @@ class OpenClArray(Array):
         Get the opencl context associated to this array.
         """
         return self.backend.context
+
     def get_device(self):
         """
         Get the opencl device associated to this array.
         """
         return self.backend.device
+
     def set_default_queue(self, queue):
         """
         Sets the default queue for this array.
         """
-        # at this time the opencl backend works only with the default_queue 
+        # at this time the opencl backend works only with the default_queue
         # so we enforce it.
         if (queue is not self.default_queue):
-            msg='Default queue override has been disabled for non-default queues.'
+            msg = 'Default queue override has been disabled for non-default queues.'
             raise RuntimeError(msg)
         queue = self.backend.check_queue(queue)
         self._handle.queue = queue
+
     def reset_default_queue(self):
         """
         Resets the default queue for this array.
         """
         self._handle.queue = None
+
     def get_default_queue(self):
         """
         Get the default queue for this array.
         """
         return self._handle.queue or self.backend.default_queue
-    
+
     context = property(get_context)
-    device  = property(get_device)
+    device = property(get_device)
     default_queue = property(get_default_queue, set_default_queue)
 
     def with_queue(queue):
@@ -234,26 +249,26 @@ class OpenClArray(Array):
         queue = self.backend.check_queue(queue)
         yield self._call('with_queue', queue=queue)
 
-    
-    ## Array specific methods
+    # Array specific methods
+
     def view(self, dtype=None):
         """
-        Returns view of array with the same data. If dtype is different from current dtype, 
+        Returns view of array with the same data. If dtype is different from current dtype,
         the actual bytes of memory will be reinterpreted.
         """
         return self._call('view', dtype=dtype)
-    
+
     def reshape(self, shape, order=default_order):
         """
-        Returns view of array with the same data. If dtype is different from current dtype, 
+        Returns view of array with the same data. If dtype is different from current dtype,
         the actual bytes of memory will be reinterpreted.
         """
         shape = tuple(int(i) for i in shape)
         return self._call('reshape', *shape, order=order)
 
     def astype(self, dtype, queue=None,
-            order=MemoryOrdering.SAME_ORDER, 
-            casting='unsafe', subok=True, copy=True):
+               order=MemoryOrdering.SAME_ORDER,
+               casting='unsafe', subok=True, copy=True):
         """
         Copy of the array, cast to a specified type.
         """
@@ -263,119 +278,119 @@ class OpenClArray(Array):
         self._unsupported_argument('astype', 'copy', copy, True)
         queue = self.backend.check_queue(queue)
         return self._call('astype', dtype=dtype, queue=queue)
-   
 
-    ## Cached kernels for efficiency
-    def min(self, axis=None, out=None, 
-                queue=None, async=False, **kwds):
+    # Cached kernels for efficiency
+
+    def min(self, axis=None, out=None,
+            queue=None, synchronize=True, **kwds):
         """
         Return the minimum along a given axis.
         On the first call, a kernel launcher is built for efficiency.
         """
         if (axis is None) and (out is None):
             if not hasattr(self, '_OpenClArray__min_launcher'):
-                self.__min_launcher = self.backend.amin(a=self, axis=axis, out=out, 
-                        build_kernel_launcher=True, queue=queue, async=True, 
-                        **kwds)
-            evt = self.__min_launcher(queue=queue, async=True)
+                self.__min_launcher = self.backend.amin(a=self, axis=axis, out=out,
+                                                        build_kernel_launcher=True, queue=queue, synchronize=False,
+                                                        **kwds)
+            evt = self.__min_launcher(queue=queue, synchronize=False)
             out = self.__min_launcher.out
-            if async:
-                return evt
-            else:
+            if synchronize:
                 evt.wait()
                 return out.copy()
+            else:
+                return (evt, out)
         else:
             super(OpenClArray, self).min(self, axis=axis, out=out,
-                    queue=queue, async=async, **kwds)
+                                         queue=queue, synchronize=synchronize, **kwds)
 
-    def max(self, axis=None, out=None, 
-                queue=None, async=False, **kwds):
+    def max(self, axis=None, out=None,
+            queue=None, synchronize=True, **kwds):
         """
         Return the maximum along a given axis.
         On the first call, a kernel launcher is built for efficiency.
         """
         if (axis is None) and (out is None):
             if not hasattr(self, '_OpenClArray__max_launcher'):
-                self.__max_launcher = self.backend.amax(a=self, axis=axis, out=out, 
-                        build_kernel_launcher=True, queue=queue, async=True, 
-                        **kwds)
-            evt = self.__max_launcher(queue=queue, async=True)
+                self.__max_launcher = self.backend.amax(a=self, axis=axis, out=out,
+                                                        build_kernel_launcher=True, queue=queue, synchronize=False,
+                                                        **kwds)
+            evt = self.__max_launcher(queue=queue, synchronize=False)
             out = self.__max_launcher.out
-            if async:
-                return evt
-            else:
+            if synchronize:
                 evt.wait()
                 return out.copy()
+            else:
+                return (evt, out)
         else:
             super(OpenClArray, self).max(self, axis=axis, out=out,
-                    queue=queue, async=async, **kwds)
-    
-    def nanmin(self, axis=None, out=None, 
-                queue=None, async=False, **kwds):
+                                         queue=queue, synchronize=synchronize, **kwds)
+
+    def nanmin(self, axis=None, out=None,
+               queue=None, synchronize=True, **kwds):
         """
         Return the minimum along a given axis.
         On the first call, a kernel launcher is built for efficiency.
         """
         if (axis is None) and (out is None):
             if not hasattr(self, '_OpenClArray__nanmin_launcher'):
-                self.__nanmin_launcher = self.backend.nanmin(a=self, axis=axis, out=out, 
-                        build_kernel_launcher=True, queue=queue, async=True, 
-                        **kwds)
-            evt = self.__nanmin_launcher(queue=queue, async=True)
+                self.__nanmin_launcher = self.backend.nanmin(a=self, axis=axis, out=out,
+                                                             build_kernel_launcher=True, queue=queue, synchronize=False,
+                                                             **kwds)
+            evt = self.__nanmin_launcher(queue=queue, synchronize=False)
             out = self.__nanmin_launcher.out
-            if async:
-                return evt
-            else:
+            if synchronize:
                 evt.wait()
                 return out.copy()
+            else:
+                return (evt, out)
         else:
             super(OpenClArray, self).nanmin(self, axis=axis, out=out,
-                    queue=queue, async=async, **kwds)
+                                            queue=queue, synchronize=synchronize, **kwds)
 
-    def nanmax(self, axis=None, out=None, 
-                queue=None, async=False, **kwds):
+    def nanmax(self, axis=None, out=None,
+               queue=None, synchronize=True, **kwds):
         """
         Return the maximum along a given axis.
         On the first call, a kernel launcher is built for efficiency.
         """
         if (axis is None) and (out is None):
             if not hasattr(self, '_OpenClArray__nanmax_launcher'):
-                self.__nanmax_launcher = self.backend.nanmax(a=self, axis=axis, out=out, 
-                        build_kernel_launcher=True, queue=queue, async=True, 
-                        **kwds)
-            evt = self.__nanmax_launcher(queue=queue, async=True)
+                self.__nanmax_launcher = self.backend.nanmax(a=self, axis=axis, out=out,
+                                                             build_kernel_launcher=True, queue=queue, synchronize=False,
+                                                             **kwds)
+            evt = self.__nanmax_launcher(queue=queue, synchronize=False)
             out = self.__nanmax_launcher.out
-            if async:
-                return evt
-            else:
+            if synchronize:
                 evt.wait()
                 return out.copy()
+            else:
+                return (evt, out)
         else:
             super(OpenClArray, self).nanmax(self, axis=axis, out=out,
-                    queue=queue, async=async, **kwds)
-    
-    def sum(self, axis=None, out=None, 
-                queue=None, async=False, **kwds):
+                                            queue=queue, synchronize=synchronize, **kwds)
+
+    def sum(self, axis=None, out=None,
+            queue=None, synchronize=True, **kwds):
         """
         Return the sum along a given axis.
         On the first call, a kernel launcher is built for efficiency.
         """
         if (axis is None) and (out is None):
             if not hasattr(self, '_OpenClArray__sum_launcher'):
-                self.__sum_launcher = self.backend.sum(a=self, axis=axis, out=out, 
-                        build_kernel_launcher=True, queue=queue, async=True, 
-                        **kwds)
-            evt = self.__sum_launcher(queue=queue, async=True)
+                self.__sum_launcher = self.backend.sum(a=self, axis=axis, out=out,
+                                                       build_kernel_launcher=True, queue=queue, synchronize=False,
+                                                       **kwds)
+            evt = self.__sum_launcher(queue=queue, synchronize=False)
             out = self.__sum_launcher.out
-            if async:
-                return evt
-            else:
+            if synchronize:
                 evt.wait()
                 return out.copy()
+            else:
+                return (evt, out)
         else:
             super(OpenClArray, self).sum(self, axis=axis, out=out,
-                    queue=queue, async=async, **kwds)
-    
+                                         queue=queue, synchronize=synchronize, **kwds)
+
     def setitem(self, subscript, value, queue=None):
         queue = first_not_None(queue, self.default_queue)
         if np.isscalar(value):
@@ -384,22 +399,23 @@ class OpenClArray(Array):
                 a.fill(value=value, queue=queue)
         else:
             try:
-                self.handle.setitem(subscript=subscript, value=value, 
-                        queue=queue)
+                self.handle.setitem(subscript=subscript, value=value,
+                                    queue=queue)
             except:
                 from hysop.backend.device.opencl.opencl_copy_kernel_launchers import \
-                        OpenClCopyBufferRectLauncher
-                kl = OpenClCopyBufferRectLauncher.from_slices(varname='buffer', src=value, 
-                        dst=self, dst_slices=subscript)
+                    OpenClCopyBufferRectLauncher
+                kl = OpenClCopyBufferRectLauncher.from_slices(varname='buffer', src=value,
+                                                              dst=self, dst_slices=subscript)
                 evt = kl(queue=queue)
                 evt.wait()
 
     def __setitem__(self, subscript, value, **kwds):
-        if any( (s==0) for s in self[subscript].shape ):
+        if any((s == 0) for s in self[subscript].shape):
             return
         self.setitem(subscript=subscript, value=value, **kwds)
-    
+
     def __str__(self):
         return str(self.get())
+
     def __repr__(self):
         return repr(self.get())
diff --git a/hysop/backend/device/opencl/opencl_array_backend.py b/hysop/backend/device/opencl/opencl_array_backend.py
index bc60c191871f46e468d499799bfe0c96e061a622..d6df937593b0a358bddefb73a6bdeb7b4b8bfdc7 100644
--- a/hysop/backend/device/opencl/opencl_array_backend.py
+++ b/hysop/backend/device/opencl/opencl_array_backend.py
@@ -1,30 +1,32 @@
-
+import os
+import re
 import warnings
+import numpy as np
+
 from hysop import __KERNEL_DEBUG__
-from hysop.deps import re, np, os
 from hysop.tools.types import check_instance, to_tuple
 from hysop.tools.misc import prod
 from hysop.tools.numerics import is_complex, get_dtype, float_to_complex_dtype, \
-                                 complex_to_float_dtype, find_common_dtype
+    complex_to_float_dtype, find_common_dtype
 from hysop.constants import Backend
 from hysop.constants import HYSOP_REAL, HYSOP_INTEGER, HYSOP_INDEX, HYSOP_BOOL
 from hysop.backend.device.opencl import cl, clArray, clTools, clRandom, \
-                                        clReduction, clElementwise, clScan
+    clReduction, clElementwise, clScan
 
 from hysop.core.arrays import default_order
 from hysop.core.arrays import MemoryOrdering, MemoryType, QueuePolicy
 
 from hysop.core.arrays.array_backend import ArrayBackend
-from hysop.core.arrays.array         import Array
+from hysop.core.arrays.array import Array
 
 # from hysop.core.memory.mempool import MemoryPool
 
-from hysop.backend.device.opencl.opencl_allocator   import OpenClAllocator
+from hysop.backend.device.opencl.opencl_allocator import OpenClAllocator
 from hysop.backend.device.opencl.opencl_env import OpenClEnvironment
 from hysop.backend.device.opencl.opencl_kernel_launcher import OpenClKernelLauncherI, trace_kernel, profile_kernel
 
 from hysop.tools.numerics import is_fp, is_integer, is_signed, is_unsigned,\
-                                 get_dtype, match_dtype
+    get_dtype, match_dtype
 from hysop.tools.types import first_not_None
 
 from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env
@@ -32,29 +34,32 @@ from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env
 
 class _ElementwiseKernel(object):
     """
-    Evaluating involved expressions on OpenClArray instances by using overloaded 
+    Evaluating involved expressions on OpenClArray instances by using overloaded
     operators can be somewhat inefficient, because a new temporary is created for
-    each intermediate result. ElementwiseKernel generate kernels that evaluate 
+    each intermediate result. ElementwiseKernel generate kernels that evaluate
     multi-stage expressions on one or several operands in a single pass.
 
     clElementwise.ElementwiseKernel adapted for use with OpenClArrayBackend.elementwise(...)
     """
-    def __init__(self, context, arguments, operation, 
-                       name='elementwise', preamble='', options=[],
-                      **kwds):
-        self.kernel = clElementwise.ElementwiseKernel(context=context, arguments=arguments, 
-                                                      operation=operation, name=name, 
+
+    def __init__(self, context, arguments, operation,
+                 name='elementwise', preamble='', options=[],
+                 **kwds):
+        self.kernel = clElementwise.ElementwiseKernel(context=context, arguments=arguments,
+                                                      operation=operation, name=name,
                                                       preamble=preamble, options=options)
+
     def __call__(self, wait_for, args, **kwds):
-        return self.kernel.__call__(wait_for=wait_for, queue=kwds.get('queue',None), *args)
-    
+        return self.kernel.__call__(wait_for=wait_for, queue=kwds.get('queue', None), *args)
+
     def to_kernel_launcher(self, name, wait_for, queue, args, **kwds):
         """Build an _OpenClElementWiseKernelLauncher from self."""
 
         class OpenClElementwiseKernelLauncher(OpenClKernelLauncherI):
             """Utility class to build opencl reduction kernel launchers."""
-            __slots__ = ('_name', 'kernel', 'kernel_args', 'kernel_kwds', 
-                            'default_queue')
+            __slots__ = ('_name', 'kernel', 'kernel_args', 'kernel_kwds',
+                         'default_queue')
+
             def __init__(self, name, kernel, args, default_queue, **extra_kernel_kwds):
                 super(OpenClElementwiseKernelLauncher, self).__init__(name=name)
                 kernel_args = args
@@ -63,9 +68,11 @@ class _ElementwiseKernel(object):
                 self.kernel_args = kernel_args
                 self.kernel_kwds = kernel_kwds
                 self.default_queue = default_queue
-                self._apply_msg='  '+name+'<<<{}>>>()'.format(args[0].shape)
+                self._apply_msg = '  '+name+'<<<{}>>>()'.format(args[0].shape)
+
             def global_size_configured(self):
                 return True
+
             def __call__(self, queue=None, wait_for=None, **kwds):
                 trace_kernel(self._apply_msg)
                 queue = first_not_None(queue, self.default_queue)
@@ -77,53 +84,55 @@ class _ElementwiseKernel(object):
                 return evt
 
         return OpenClElementwiseKernelLauncher(name=name, kernel=self.kernel,
-                args=args, default_queue=queue, **kwds)
+                                               args=args, default_queue=queue, **kwds)
+
 
 class _ReductionKernel(object):
     """
-    Generate a kernel that takes a number of scalar or vector arguments 
-    (at least one vector argument), performs the map_expr on each entry 
-    of the vector argument and then the reduce_expr on the outcome. 
-    
-    neutral serves as an initial value and is specified as float or 
+    Generate a kernel that takes a number of scalar or vector arguments
+    (at least one vector argument), performs the map_expr on each entry
+    of the vector argument and then the reduce_expr on the outcome.
+
+    neutral serves as an initial value and is specified as float or
     integer formatted as string.
-    
-    preamble offers the possibility to add preprocessor directives 
-    and other code (such as helper functions) to be included before the 
+
+    preamble offers the possibility to add preprocessor directives
+    and other code (such as helper functions) to be included before the
     actual reduction kernel code.
 
-    vectors in map_expr should be indexed by the variable i. 
-    
-    reduce_expr uses the formal values 'a' and 'b' to indicate 
-    two operands of a binary reduction operation. 
-    
-    If you do not specify a map_expr, in[i] is automatically 
+    vectors in map_expr should be indexed by the variable i.
+
+    reduce_expr uses the formal values 'a' and 'b' to indicate
+    two operands of a binary reduction operation.
+
+    If you do not specify a map_expr, in[i] is automatically
     assumed and treated as the only one input argument.
 
-    dtype_out specifies the numpy.dtype in which the reduction is performed 
+    dtype_out specifies the numpy.dtype in which the reduction is performed
     and in which the result is returned.
-    
+
     clReduction.ReductionKernel adapted for use with OpenClArrayBackend.elementwise(...)
     """
+
     def __init__(self, context, arguments, reduce_expr, map_expr, neutral, dtype,
-                       name='reduce', preamble='', options=[],
-                      **kargs):
+                 name='reduce', preamble='', options=[],
+                 **kargs):
         # remove output argument from arguments list
         arguments = arguments.split(',')
         arguments = ','.join(arguments[1:])
 
-        self.kernel = clReduction.ReductionKernel(ctx=context, dtype_out=dtype, 
-                                              neutral=neutral,
-                                              reduce_expr=reduce_expr, map_expr=map_expr, 
-                                              arguments=arguments, 
-                                              name=name, preamble=preamble, options=options)
-    
+        self.kernel = clReduction.ReductionKernel(ctx=context, dtype_out=dtype,
+                                                  neutral=neutral,
+                                                  reduce_expr=reduce_expr, map_expr=map_expr,
+                                                  arguments=arguments,
+                                                  name=name, preamble=preamble, options=options)
+
     def __call__(self, wait_for, return_event, iargs, oargs, pargs, queue, **kargs):
-        assert len(oargs)==1
-        assert len(iargs)>=1
+        assert len(oargs) == 1
+        assert len(iargs) >= 1
         return self.kernel.__call__(*(iargs+pargs), out=oargs[0],
                                     queue=queue, wait_for=wait_for, return_event=return_event,
-                                    allocator=None, 
+                                    allocator=None,
                                     range=None, slice=None)
 
     def to_kernel_launcher(self, name, wait_for, queue, args, **kwds):
@@ -131,21 +140,24 @@ class _ReductionKernel(object):
 
         class OpenClReductionKernelLauncher(OpenClKernelLauncherI):
             """Utility class to build opencl reduction kernel launchers."""
-            __slots__ = ('_name', 'kernel', 'kernel_args', 'kernel_kwds', 
-                    'default_queue', 'return_event')
+            __slots__ = ('_name', 'kernel', 'kernel_args', 'kernel_kwds',
+                         'default_queue', 'return_event')
+
             def __init__(self, name, kernel, return_event,
-                    iargs, oargs, pargs, default_queue, **extra_kernel_kwds):
+                         iargs, oargs, pargs, default_queue, **extra_kernel_kwds):
                 super(OpenClReductionKernelLauncher, self).__init__(name=name)
                 kernel_args = (iargs+pargs)
                 kernel_kwds = dict(out=oargs[0], return_event=True,
-                        allocator=None, range=None, slice=None)
+                                   allocator=None, range=None, slice=None)
                 self.kernel = kernel
                 self.kernel_args = kernel_args
                 self.kernel_kwds = kernel_kwds
                 self.default_queue = default_queue
-                self._apply_msg='  '+name+'<<<{}>>>()'.format(self.kernel_args[0].shape)
+                self._apply_msg = '  '+name+'<<<{}>>>()'.format(self.kernel_args[0].shape)
+
             def global_size_configured(self):
                 return True
+
             def __call__(self, queue=None, wait_for=None, **kwds):
                 trace_kernel(self._apply_msg)
                 queue = first_not_None(queue, self.default_queue)
@@ -158,161 +170,163 @@ class _ReductionKernel(object):
                 return evt
 
         return OpenClReductionKernelLauncher(name=name, kernel=self.kernel,
-                default_queue=queue, **kwds)
+                                             default_queue=queue, **kwds)
+
 
 class _GenericScanKernel(object):
     """
-    Generates an OpenCL kernel that performs prefix sums ('scans') on arbitrary types, 
+    Generates an OpenCL kernel that performs prefix sums ('scans') on arbitrary types,
     with many possible tweaks.
-    
+
     Adapted for use with OpenClArrayBackend.elementwise(...)
-    
+
     Context and devices:
         - context: context within the code for this kernel will be generated.
-        - devices may be used to restrict the set of devices on which the kernel is meant to 
+        - devices may be used to restrict the set of devices on which the kernel is meant to
           run (defaults to all devices in the context ctx).
-    
+
     Prefix sum configuration:
-        - dtype specifies the numpy.dtype with which the scan will be performed. 
-          May be a structured type if that type was registered through 
+        - dtype specifies the numpy.dtype with which the scan will be performed.
+          May be a structured type if that type was registered through
           clTools.get_or_register_dtype().
-        
-        - arguments: An optional string of comma-separated C argument declarations. 
+
+        - arguments: An optional string of comma-separated C argument declarations.
                      If arguments is specified, then input_expr must also be specified.
 
-        - input_expr: A C expression, encoded as a string, resulting in the values to which 
+        - input_expr: A C expression, encoded as a string, resulting in the values to which
                       the scan is applied. This may be used to apply a mapping to values stored
-                      in arguments before being scanned. 
+                      in arguments before being scanned.
                       The result of this expression must match dtype.
 
-        - scan_expr: The associative, binary operation carrying out the scan, 
-                     represented as a C string. Its two arguments are available 
-                     as a and b when it is evaluated. b is guaranteed to be the 
-                     element being updated, and a is the increment. 
+        - scan_expr: The associative, binary operation carrying out the scan,
+                     represented as a C string. Its two arguments are available
+                     as a and b when it is evaluated. b is guaranteed to be the
+                     element being updated, and a is the increment.
 
         - neutral is the neutral element of scan_expr, obeying scan_expr(a, neutral) == a.
 
-        - output_statement: a C statement that writes the output of the scan. 
-          It has access to the scan result as item, the preceding scan result item as 
-          prev_item, and the current index as i. 
+        - output_statement: a C statement that writes the output of the scan.
+          It has access to the scan result as item, the preceding scan result item as
+          prev_item, and the current index as i.
           prev_item in a segmented scan will be the neutral element i
           at a segment boundary, not the immediately preceding item.
 
-          Using prev_item in output statement has a small run-time cost. 
+          Using prev_item in output statement has a small run-time cost.
           prev_item enables the construction of an exclusive scan.
 
-          For non-segmented scans, output_statement may also reference last_item, 
+          For non-segmented scans, output_statement may also reference last_item,
           which evaluates to the scan result of the last array entry.
-    
+
     Segmented scans:
-        - is_segment_start_expr: A C expression, encoded as a string, resulting in a C bool 
-          value that determines whether a new scan segments starts at index i. 
-          If given, makes the scan a segmented scan. Has access to the current index i, 
-          the result of input_expr as a, and in addition may use arguments and input_fetch_expr 
+        - is_segment_start_expr: A C expression, encoded as a string, resulting in a C bool
+          value that determines whether a new scan segments starts at index i.
+          If given, makes the scan a segmented scan. Has access to the current index i,
+          the result of input_expr as a, and in addition may use arguments and input_fetch_expr
           variables just like input_expr.
-        
-          If it returns true, then previous sums will not spill over into the item with 
+
+          If it returns true, then previous sums will not spill over into the item with
           index i or subsequent items.
 
-        - input_fetch_exprs: a list of tuples (NAME, ARG_NAME, OFFSET). 
-          An entry here has the effect of doing the equivalent of the 
+        - input_fetch_exprs: a list of tuples (NAME, ARG_NAME, OFFSET).
+          An entry here has the effect of doing the equivalent of the
           following before input_expr:   ARG_NAME_TYPE NAME = ARG_NAME[i+OFFSET];
-         
+
           OFFSET is allowed to be 0 or -1
           ARG_NAME_TYPE is the type of ARG_NAME.
-    
+
     Kernel generation and build options:
-        - name is used for kernel names to ensure recognizability in profiles and logs. 
-        - options is a list of compiler options to use when building. 
-        - preamble specifies a string of code that is inserted before the actual kernels. 
-    
+        - name is used for kernel names to ensure recognizability in profiles and logs.
+        - options is a list of compiler options to use when building.
+        - preamble specifies a string of code that is inserted before the actual kernels.
+
     clScan.GenericScanKernel adapted for use with OpenClArrayBackend.elementwise(...)
     """
-    def __init__(self, context, dtype, arguments, 
-                       input_expr, scan_expr, neutral,
-                       output_statement, 
-                       is_segment_start_expr=None, input_fetch_exprs=None,
-                       name='generic_scan', options=[], preamble='',
-                       devices=None, 
-                       index_dtype = np.int32,
-                       **kargs):
-
-        input_fetch_exprs = input_fetch_exprs or list()
-
-        self.kernel = clScan.GenericScanKernel(ctx=context, 
-                dtype=dtype, index_dtype=index_dtype,
-                arguments=arguments, neutral=neutral, 
-                scan_expr=scan_expr, input_expr=input_expr, output_statement=output_statement, 
-                is_segment_start_expr=is_segment_start_expr, 
-                input_fetch_exprs=input_fetch_exprs,
-                name_prefix=name, preamble=preamble, options=options)
+
+    def __init__(self, context, dtype, arguments,
+                 input_expr, scan_expr, neutral,
+                 output_statement,
+                 is_segment_start_expr=None, input_fetch_exprs=None,
+                 name='generic_scan', options=[], preamble='',
+                 devices=None,
+                 index_dtype=np.int32,
+                 **kargs):
+
+        input_fetch_exprs = first_not_None(input_fetch_exprs, [])
+
+        self.kernel = clScan.GenericScanKernel(ctx=context,
+                                               dtype=dtype, index_dtype=index_dtype,
+                                               arguments=arguments, neutral=neutral,
+                                               scan_expr=scan_expr, input_expr=input_expr, output_statement=output_statement,
+                                               is_segment_start_expr=is_segment_start_expr,
+                                               input_fetch_exprs=input_fetch_exprs,
+                                               name_prefix=name, preamble=preamble, options=options)
 
     def __call__(self, args, queue, wait_for=None, size=None, **kargs):
         """
         Call the kernel with arguments args.
-        size may specify the length of the scan to be carried out. 
+        size may specify the length of the scan to be carried out.
         If not given, this length is inferred from the first array argument passed.
         """
         return self.kernel.__call__(*args, queue=queue,
-                size=size, wait_for=wait_for)
-
+                                    size=size, wait_for=wait_for)
 
 
 class OpenClArrayBackend(ArrayBackend):
     """
     Opencl array backend, extending pyopencl.array.Array capabilities.
 
-    By default all methods are using the default opencl device (setup during 
-    build in hysop.__init__.py and hysop.backend.device.opencl.__init__.py) 
-    and an associated default command queue. An OpenClArrayBackend can be 
+    By default all methods are using the default opencl device (setup during
+    build in hysop.__init__.py and hysop.backend.device.opencl.__init__.py)
+    and an associated default command queue. An OpenClArrayBackend can be
     created with any context and default queue through an OpenClEnvironment.
-    
-    See hysop.backend.device.opencl.opencl_tools.get_or_create_opencl_env() for more 
+
+    See hysop.backend.device.opencl.opencl_tools.get_or_create_opencl_env() for more
     informations on how to create or get an OpenClEnvironment.
 
-    The default allocator is a hysop.backend.device.opencl.opencl_allocator.OpenClImmediateAllocator 
+    The default allocator is a hysop.backend.device.opencl.opencl_allocator.OpenClImmediateAllocator
     and the arrays are created within a hysop.core.memory.MemoryPool based on this allocator
     so that there is no surprise when the array is first used (out of memory).
     """
-    
+
     def get_kind(self):
         return Backend.OPENCL
     kind = property(get_kind)
-    
+
     def get_host_array_backend(self):
         return self._host_array_backend
-    host_array_backend=property(get_host_array_backend)
-    
+    host_array_backend = property(get_host_array_backend)
+
     def short_description(self):
         return ':OpenClBackend:  tag={}, cl_env={}, allocator={}, host_backend={}]'.format(
-                self.tag,
-                self.cl_env.tag,
-                self.allocator.full_tag,
-                self.host_array_backend.full_tag)
-    
+            self.tag,
+            self.cl_env.tag,
+            self.allocator.full_tag,
+            self.host_array_backend.full_tag)
+
     def __eq__(self, other):
         if not other.__class__ == self.__class__:
             return NotImplemented
-        eq  = (self._context           is other._context)
-        eq &= (self._default_queue     is other._default_queue)
-        eq &= (self._allocator         is other._allocator)
+        eq = (self._context is other._context)
+        eq &= (self._default_queue is other._default_queue)
+        eq &= (self._allocator is other._allocator)
         eq &= (self.host_array_backend is other.host_array_backend)
         return eq
+
     def __ne__(self, other):
         return not (self == other)
+
     def __hash__(self):
         return id(self._context) ^ id(self._default_queue) ^ id(self.allocator) ^ id(self.host_array_backend)
-    
-    
+
     __backends = {}
 
     @classmethod
-    def get_or_create(cls, cl_env, 
-                           queue=None, 
-                           allocator=None, 
-                           host_array_backend=None):
+    def get_or_create(cls, cl_env,
+                      queue=None,
+                      allocator=None,
+                      host_array_backend=None):
         from hysop.core.arrays.all import HostArrayBackend, \
-                default_host_array_backend
+            default_host_array_backend
         host_array_backend = first_not_None(host_array_backend, default_host_array_backend)
         if (queue is None):
             queue = cl_env.default_queue
@@ -322,21 +336,24 @@ class OpenClArrayBackend(ArrayBackend):
             return cls.__backends[key]
         else:
             obj = cls(cl_env=cl_env, queue=queue, allocator=allocator,
-                        host_array_backend=host_array_backend)
+                      host_array_backend=host_array_backend)
             cls.__backends[key] = obj
             return obj
 
+    def __new__(cls, cl_env=None, queue=None, allocator=None, host_array_backend=None):
+        return super(OpenClArrayBackend, cls).__new__(cls, allocator=None)
+
     def __init__(self, cl_env=None, queue=None, allocator=None,
-            host_array_backend=None): 
+                 host_array_backend=None):
         """
         Initialize and OpenClArrayBackend with OpenCL environment cl_env.
-        
+
         If cl_env and queue are not specified, the default environment returned
         by get_or_create_opencl_env() will be used along with its default queue.
-        
-        If cl_env is not specified but a queue is specified, this queue will be 
+
+        If cl_env is not specified but a queue is specified, this queue will be
         used instead along with its associated context.
-        
+
         If both queue and cl_env are speficied, context should match.
         Allocator can be used to override cl_env.memory_pool and should also match context.
 
@@ -345,35 +362,35 @@ class OpenClArrayBackend(ArrayBackend):
         check_instance(cl_env, OpenClEnvironment, allow_none=True)
         check_instance(queue, cl.CommandQueue, allow_none=True)
         check_instance(allocator, OpenClAllocator, allow_none=True)
-        
+
         # disable multi queue support at this time
-        msg='Non-default queue support has been disabled. Please provide a cl_env only.'
+        msg = 'Non-default queue support has been disabled. Please provide a cl_env only.'
         if (cl_env is None):
             raise RuntimeError(msg)
-        #if (queue is not None) and (queue is not cl_env.default_queue):
+        # if (queue is not None) and (queue is not cl_env.default_queue):
             #raise RuntimeError(msg)
-        
+
         if (queue is None):
             if (cl_env is None):
                 cl_env = get_or_create_opencl_env()
-            context   = cl_env.context
-            queue     = cl_env.default_queue
+            context = cl_env.context
+            queue = cl_env.default_queue
             allocator = allocator or cl_env.allocator
         elif (cl_env is None):
             context = queue.context
             if (allocator is None):
-                msg='A custom allocator should be given as input '
-                msg+='(cl_env was not specified but queue was).'
+                msg = 'A custom allocator should be given as input '
+                msg += '(cl_env was not specified but queue was).'
                 raise ValueError(msg)
         else:
             # cl_env and queue specified
-            context   = cl_env.context
+            context = cl_env.context
             allocator = allocator or cl_env.allocator
-        
+
         check_instance(allocator, OpenClAllocator)
         check_instance(context, cl.Context)
         check_instance(queue, cl.CommandQueue)
-        
+
         if queue.context != context:
             msg = 'Queue does not match context:'
             msg += '\n  *Given context is {}.'.format(context)
@@ -385,31 +402,34 @@ class OpenClArrayBackend(ArrayBackend):
             msg += '\n  *Allocator context is {}.'.format(allocator.context)
             raise ValueError(msg)
 
-        assert len(context.devices)==1, 'Multidevice contexts are not supported.'
-        
+        assert len(context.devices) == 1, 'Multidevice contexts are not supported.'
+
         from hysop.core.arrays.all import HostArrayBackend, \
-                default_host_array_backend
+            default_host_array_backend
         host_array_backend = host_array_backend or default_host_array_backend
         check_instance(host_array_backend, HostArrayBackend)
 
-        super(OpenClArrayBackend,self).__init__(allocator=allocator)
-        self._context       = context
+        super(OpenClArrayBackend, self).__init__(allocator=allocator)
+        self._context = context
         self._default_queue = queue
         self._host_array_backend = host_array_backend
         self._cl_env = cl_env
-        
+
         from hysop.backend.device.opencl.opencl_elementwise import OpenClElementwiseKernelGenerator
         self._kernel_generator = OpenClElementwiseKernelGenerator(cl_env=cl_env)
 
     def get_default_queue(self):
         return self._default_queue
+
     def get_context(self):
         return self._context
+
     def get_device(self):
         return self._context.devices[0]
+
     def get_cl_env(self):
         return self._cl_env
-    
+
     def any_backend_from_kind(self, *kinds):
         for kind in kinds:
             if (kind == self.kind):
@@ -430,9 +450,11 @@ class OpenClArrayBackend(ArrayBackend):
 
 # BACKEND SPECIFIC METHODS #
 ############################
+
+
     def can_wrap(self, handle):
         """
-        Should return True if handle is an Array or a array handle corresponding 
+        Should return True if handle is an Array or a array handle corresponding
         this backend.
         """
         from hysop.core.arrays.all import OpenClArray
@@ -443,33 +465,32 @@ class OpenClArrayBackend(ArrayBackend):
         Create an hysop.backend.device.OpenclArray from an pyopencl.array.Array instance.
         """
         from hysop.core.arrays.all import OpenClArray
-        
+
         if isinstance(handle, OpenClArray):
             return handle
-        
+
         check_instance(handle, clArray.Array)
 
         return OpenClArray(backend=self, handle=handle)
-   
+
     def _arg(self, arg):
         """
         Prepare one argument for a call to pyopencl backend.
         """
         if isinstance(arg, QueuePolicy):
-            if arg==QueuePolicy.COPY_QUEUE:
+            if arg == QueuePolicy.COPY_QUEUE:
                 return clArray._copy_queue
-            elif arg==QueuePolicy.SAME_AS_TRANSFER:
+            elif arg == QueuePolicy.SAME_AS_TRANSFER:
                 return clArray._same_as_transfer
-            elif arg==QueuePolicy.NO_QUEUE:
+            elif arg == QueuePolicy.NO_QUEUE:
                 return None
             else:
-                msg='Unknown queue policy {}.'.format(arg)
+                msg = 'Unknown queue policy {}.'.format(arg)
                 raise ValueError(msg)
         else:
-            return super(OpenClArrayBackend,self)._arg(arg)
+            return super(OpenClArrayBackend, self)._arg(arg)
 
-    
-    def copyto(self, dst, src, queue=None, async=False, **kargs):
+    def copyto(self, dst, src, queue=None, synchronize=True, **kargs):
         """
         src is a OpenClArray
         dst can be everything
@@ -477,7 +498,7 @@ class OpenClArrayBackend(ArrayBackend):
         from hysop.core.arrays.all import OpenClArray, HostArray
         self._check_argtype('copyto', 'src', src, OpenClArray)
         queue = first_not_None(queue, self._default_queue)
-        
+
         if dst.size != src.size:
             raise ValueError("'dst' has non-matching size.")
         if dst.dtype != src.dtype:
@@ -486,26 +507,27 @@ class OpenClArrayBackend(ArrayBackend):
             raise ValueError("'dst' has non-matching nbytes.")
         if (dst.size == 0):
             return
-        
+
         if isinstance(dst, OpenClArray):
             from hysop.backend.device.opencl.opencl_copy_kernel_launchers \
-                    import OpenClCopyBufferRectLauncher
+                import OpenClCopyBufferRectLauncher
             kl = OpenClCopyBufferRectLauncher.from_slices('buffer',
-                    src=src, dst=dst)
+                                                          src=src, dst=dst)
             evt = kl(queue=queue)
-            if async:
-                return evt
-            else:
-                evt.wait()
         elif isinstance(dst, Array):
-            return src.handle.get(queue=queue, ary=dst.handle, async=async)
+            (_, evt) = src.handle.get_async(queue=queue, ary=dst.handle)
         elif isinstance(dst, np.ndarray):
-            return src.handle.get(queue=queue, ary=dst, async=async)
+            (_, evt) = src.handle.get_async(queue=queue, ary=dst)
         else:
             msg = 'Unknown type to copy to ({}) for array of type {}.'
             msg = msg.format(dst.__class__.__name__, src.__class__.__name__)
             raise TypeError(msg)
 
+        if synchronize:
+            evt.wait()
+        else:
+            return evt
+
 
 # HELPER FUNCTIONS #
 ####################
@@ -518,7 +540,7 @@ class OpenClArrayBackend(ArrayBackend):
             if allow_none:
                 pass
             else:
-                msg='Got None argument and allow_none is not set.'
+                msg = 'Got None argument and allow_none is not set.'
                 raise ValueError(msg)
         elif np.isscalar(arg):
             dtype = get_dtype(arg)
@@ -531,29 +553,29 @@ class OpenClArrayBackend(ArrayBackend):
             ctype = arg.ctype()
             argument = '__global {} {}* {}'.format(ctype, const, argname)
         elif isinstance(arg, Array):
-            msg='Argument \'{}\' is an Array but not an OpenClArray '
-            msg+='(size={}, shape={}, dtype={}).'
-            msg=msg.format(argname, arg.size, arg.shape, arg.dtype)
+            msg = 'Argument \'{}\' is an Array but not an OpenClArray '
+            msg += '(size={}, shape={}, dtype={}).'
+            msg = msg.format(argname, arg.size, arg.shape, arg.dtype)
             raise TypeError(msg)
         elif isinstance(arg, np.ndarray):
-            if arg.size==1:
+            if arg.size == 1:
                 dtype = arg.dtype
                 ctype = clTools.dtype_to_ctype(arg.dtype)
                 argument = '{} {} {}'.format(const, ctype, argname)
                 is_scalar = True
             else:
-                msg='Argument \'{}\' is a non-scalar np.ndarray (size={}, shape={}, dtype={}).'
-                msg=msg.format(argname, arg.size, arg.shape, arg.dtype)
+                msg = 'Argument \'{}\' is a non-scalar np.ndarray (size={}, shape={}, dtype={}).'
+                msg = msg.format(argname, arg.size, arg.shape, arg.dtype)
                 raise TypeError(msg)
         else:
-            msg='Unknown argument type {} (value={}).'.format(arg.__class__, arg)
+            msg = 'Unknown argument type {} (value={}).'.format(arg.__class__, arg)
             raise TypeError(msg)
         return arg, argument, is_scalar, dtype
 
-    def format_kernel_args(self, kargs, argprefix, argflags, 
-            argcast=None, dtype=None, is_output=False,
-            arguments=None, const=False, filter_expr=None):
-        
+    def format_kernel_args(self, kargs, argprefix, argflags,
+                           argcast=None, dtype=None, is_output=False,
+                           arguments=None, const=False, filter_expr=None):
+
         if argflags is not None:
             assert 'has_fp16' in argflags
             assert 'has_fp64' in argflags
@@ -561,21 +583,20 @@ class OpenClArrayBackend(ArrayBackend):
             filter_expr = dict()
         else:
             self._check_argtype('format_kernel_args', 'filter_expr', filter_expr, dict)
-        
+
         if not isinstance(argcast, tuple):
             assert not isinstance(argcast, set) and not isinstance(argcast, list)
             argcast = (argcast,)*len(kargs)
         else:
-            if len(argcast)!=len(kargs):
+            if len(argcast) != len(kargs):
                 msg = 'Allocation dtypes input length mismatch:'
-                msg+='\n\tkargs: {}\n\targcast={}\n'
-                msg=msg.format(kargs, argcast)
+                msg += '\n\tkargs: {}\n\targcast={}\n'
+                msg = msg.format(kargs, argcast)
                 raise ValueError(msg)
-        
 
         # find common floating point type
         fdtypes = []
-        for i,arg in enumerate(kargs):
+        for i, arg in enumerate(kargs):
             if (argcast[i] == 'f') or (argcast[i] == 'c'):
                 cast_dtype = match_dtype(arg, argcast[i])
                 if is_complex(cast_dtype):
@@ -584,51 +605,51 @@ class OpenClArrayBackend(ArrayBackend):
                     fdtypes.append(cast_dtype)
         if fdtypes:
             fcast_dtype = find_common_dtype(*tuple(fdtypes))
-            #TODO fix np.float16 and np.longdouble support 
-            if fcast_dtype==np.float16:
-                fcast_dtype=np.float32
-            if fcast_dtype==np.longdouble:
-                fcast_dtype=np.float64
+            # TODO fix np.float16 and np.longdouble support
+            if fcast_dtype == np.float16:
+                fcast_dtype = np.float32
+            if fcast_dtype == np.longdouble:
+                fcast_dtype = np.float64
         else:
             fcast_dtype = np.float64
         ccast_dtype = float_to_complex_dtype(fcast_dtype)
         del fdtypes
 
         if (arguments is None):
-            arguments = [None,] * len(kargs)
+            arguments = [None, ] * len(kargs)
         else:
             self._check_argtype('format_kernel_args', 'arguments', arguments, list)
-            if len(kargs)!=len(arguments):
+            if len(kargs) != len(arguments):
                 msg = 'Argument count mismatch:\n\tkargs: {}\n\targuments={}\n'
-                msg=msg.format(kargs, arguments)
+                msg = msg.format(kargs, arguments)
                 raise ValueError(msg)
 
         kargs = list(kargs)
-        for i,arg in enumerate(kargs):
-            argname = '{}{}'.format(argprefix,i)
+        for i, arg in enumerate(kargs):
+            argname = '{}{}'.format(argprefix, i)
             arg, argument, is_scalar, dtype = self.format_kernel_arg(arg,
-                        argname=argname, const=const)
+                                                                     argname=argname, const=const)
             kargs[i] = arg
             if (arguments[i] is None):
                 arguments[i] = argument
             if is_output:
-                affectation = '{}\[i\]\s*=\s*([\s\S]*?)\s*$'.format(argname)
+                affectation = r'{}\[i\]\s*=\s*([\s\S]*?)\s*$'.format(argname)
                 affectation = re.compile(affectation)
             is_cast = (argcast[i] is not None)
-            for (k,v) in filter_expr.iteritems():
+            for (k, v) in filter_expr.items():
                 expr = filter_expr[k]
                 if expr is None:
                     continue
                 argi = argname+'[i]'
-                if is_output: #cast to nearest output value dtype
+                if is_output:  # cast to nearest output value dtype
                     if not is_complex(dtype):
                         ctype = clTools.dtype_to_ctype(dtype)
-                        convert = '{}=convert_{}_rte(\\1)'.format(argi,ctype)
+                        convert = '{}=convert_{}_rte(\\1)'.format(argi, ctype)
                         expr = expr.split(';')
-                        for i,subexpr in enumerate(expr):
-                            expr[i] = re.sub(affectation,convert,subexpr)
+                        for i, subexpr in enumerate(expr):
+                            expr[i] = re.sub(affectation, convert, subexpr)
                         expr = ';'.join(expr)
-                elif is_cast: #input variable should be cast
+                elif is_cast:  # input variable should be cast
                     if (is_complex(dtype)) or (argcast[i] == 'c'):
                         cast_dtype = ccast_dtype
                     elif (is_fp(dtype)) or (argcast[i] == 'f'):
@@ -641,20 +662,20 @@ class OpenClArrayBackend(ArrayBackend):
                             ctype = clTools.dtype_to_ctype(ftype)
                             if is_complex(dtype):
                                 argc = 'c{}_new({argi}.real,{argi}.imag)'.format(ctype,
-                                        argi=argi)
+                                                                                 argi=argi)
                             else:
                                 if dtype != ftype:
                                     argc = 'convert_{}({})'.format(ctype, argi)
                                 else:
                                     argc = argi
-                                argc = 'c{}_fromreal({})'.format(ctype,argc)
+                                argc = 'c{}_fromreal({})'.format(ctype, argc)
                             expr = expr.replace(argi, argc)
                         else:
                             ctype = clTools.dtype_to_ctype(cast_dtype)
                             argc = 'convert_{}({})'.format(ctype, argi)
                             expr = expr.replace(argi, argc)
                 if is_scalar:
-                    expr = expr.replace(argi,argname)
+                    expr = expr.replace(argi, argname)
                 filter_expr[k] = expr
             if argflags is not None:
                 if (dtype == np.float16):
@@ -663,36 +684,36 @@ class OpenClArrayBackend(ArrayBackend):
                     argflags['has_fp64'] = True
         kargs, _ = self._prepare_args(*kargs)
         return kargs, arguments
-    
+
     @classmethod
     def complex_fn(cls, fname, handle):
         """
         Return corresponding complex function name from pyopencl-complex.h
         matching handle complex datatype.
         """
-        #see pyopencl/cl/pyopencl-complex.h
+        # see pyopencl/cl/pyopencl-complex.h
         assert is_complex(handle)
         if fname not in ['real', 'imag', 'abs', 'abs_squared',
-                         'new', 'fromreal', 
-                         'neg', 'conj', 'add', 'addr', 
+                         'new', 'fromreal',
+                         'neg', 'conj', 'add', 'addr',
                          'radd', 'sub', 'mul', 'mulr', 'rdivide',
-                         'divide', 'divider', 'pow', 'powr', 'rpow', 
+                         'divide', 'divider', 'pow', 'powr', 'rpow',
                          'sqrt', 'exp', 'log', 'sin', 'cos', 'tan',
                          'sinh', 'cosh', 'tanh']:
-            msg='The function \'{}\' has not been implemented for complex numbers, '
-            msg+='see \'{}/cl/pyopencl-complex.h\' for more informations on '
-            msg+='available methods.'
+            msg = 'The function \'{}\' has not been implemented for complex numbers, '
+            msg += 'see \'{}/cl/pyopencl-complex.h\' for more informations on '
+            msg += 'available methods.'
             msg = msg.format(fname, os.path.dirname(cl.__file__))
             raise NotImplementedError(msg)
-        
+
         dtype = get_dtype(handle)
         if dtype not in [np.complex64, np.complex128]:
-            msg='{} complex type has not been implemented yet.'
-            msg=msg.format(dtype)
+            msg = '{} complex type has not been implemented yet.'
+            msg = msg.format(dtype)
             raise NotImplementedError(msg)
         ftype = complex_to_float_dtype(dtype)
         ctype = clTools.dtype_to_ctype(ftype)
-        prefix='c{}_'.format(ctype)
+        prefix = 'c{}_'.format(ctype)
 
         return prefix+fname
 
@@ -700,99 +721,98 @@ class OpenClArrayBackend(ArrayBackend):
     def binary_complex_fn(cls, fname, x0, x1):
         assert is_complex(x0) or is_complex(x1)
         if not is_complex(x0):
-            fname='r'+fname
+            fname = 'r'+fname
         elif not is_complex(x1):
-            fname=fname+'r'
-        
+            fname = fname+'r'
+
         # rsub and subr do not exist in complex header
         if (fname == 'subr'):
-            operands_prefix=('','-')
+            operands_prefix = ('', '-')
         elif (fname == 'rsub'):
-            operands_prefix=('', cls.complex_fn('neg',x1))
+            operands_prefix = ('', cls.complex_fn('neg', x1))
         else:
-            operands_prefix=('','')
+            operands_prefix = ('', '')
 
-        ftype = find_common_dtype(x0,x1)
+        ftype = find_common_dtype(x0, x1)
         cmplx_fname = cls.complex_fn(fname, ftype)
 
-        expr='{}({}(x0[i]), {}(x1[i]))'
+        expr = '{}({}(x0[i]), {}(x1[i]))'
         expr = expr.format(cmplx_fname, *operands_prefix)
-        
+
         # fix 0^0 and 0^x
-        if fname.find('pow')>=0:
-            default=expr
-            expr0='{}(1)'.format(cls.complex_fn('fromreal', ftype))
-            expr1='{}(0)'.format(cls.complex_fn('fromreal', ftype))
-            expr2='{}(NAN,NAN)'.format(cls.complex_fn('new', ftype))
-            if fname=='rpow':
-                cond0='(x0[i]==0)'
-                cond1='((x1[i].real==0) && (x1[i].imag==0))'
-            elif fname=='powr':
-                cond0='((x0[i].real==0) && (x0[i].imag==0))'
-                cond1='(x1[i]==0)'
-            elif fname=='pow':
-                cond0='((x0[i].real==0) && (x0[i].imag==0))'
-                cond1='(x1[i].imag==0)'
-                cond2='(x1[i].real==0)'
-            expr='({cond0} ? ({cond1} ? ({cond2} ? {expr0} : {expr1}) : {expr2}) : {default})'.format(
-                    cond0=cond0, cond1=cond1, cond2=cond2,
-                    expr0=expr0, expr1=expr1, expr2=expr2,
-                    default=default)
+        if fname.find('pow') >= 0:
+            default = expr
+            expr0 = '{}(1)'.format(cls.complex_fn('fromreal', ftype))
+            expr1 = '{}(0)'.format(cls.complex_fn('fromreal', ftype))
+            expr2 = '{}(NAN,NAN)'.format(cls.complex_fn('new', ftype))
+            if fname == 'rpow':
+                cond0 = '(x0[i]==0)'
+                cond1 = '((x1[i].real==0) && (x1[i].imag==0))'
+            elif fname == 'powr':
+                cond0 = '((x0[i].real==0) && (x0[i].imag==0))'
+                cond1 = '(x1[i]==0)'
+            elif fname == 'pow':
+                cond0 = '((x0[i].real==0) && (x0[i].imag==0))'
+                cond1 = '(x1[i].imag==0)'
+                cond2 = '(x1[i].real==0)'
+            expr = '({cond0} ? ({cond1} ? ({cond2} ? {expr0} : {expr1}) : {expr2}) : {default})'.format(
+                cond0=cond0, cond1=cond1, cond2=cond2,
+                expr0=expr0, expr1=expr1, expr2=expr2,
+                default=default)
 
         expr = 'y0[i]={}'.format(expr)
         return expr
 
-    
     def elementwise(self, ikargs, okargs, extra_kargs=None,
-            input_arguments=None, output_arguments=None, extra_arguments=None,
-            filter_expr=None, 
-            dtype=None, infer_dtype=False, 
-            queue=None, infer_queue=False, 
-            async=False, convert_inputs=None,
-            allocator=None, alloc_shapes=None, alloc_dtypes=None,
-            Kernel=_ElementwiseKernel, 
-            kernel_build_kwargs=None, kernel_call_kwargs=None,
-            build_kernel_launcher=False):
-        """
+                    input_arguments=None, output_arguments=None, extra_arguments=None,
+                    filter_expr=None,
+                    dtype=None, infer_dtype=False,
+                    queue=None, infer_queue=False,
+                    synchronize=True, convert_inputs=None,
+                    allocator=None, alloc_shapes=None, alloc_dtypes=None,
+                    Kernel=_ElementwiseKernel,
+                    kernel_build_kwargs=None, kernel_call_kwargs=None,
+                    build_kernel_launcher=False):
+        r"""
         Build and call kernel that takes:
                  n read-only scalars or OpenClArray as input arguments xi,
                  m OpenClArray as output arguments yi,
                  k read-only parameters pi
         and performs an operation specified as a snippet of C99 on these arguments,
-        depending on passed Kernel class and additional arguments passed 
+        depending on passed Kernel class and additional arguments passed
         in kernel_build_kwargs and kernel_call_kwargs.
-        
+
         This method performs:
             y0[i],...,ym[i] = f( x0[i],...,xn[i] ; p0,...,pk )
-            
+
             1) outputs allocation
             2) expression and arguments preprocessing
             3) kernel creation and compilation:
                 kernel = Kernel(**kernel_build_kwargs)
             4) kernel call:
                 return kernel(**kernel_call_kwargs)
-                
+
             The OpenCL kernel signature should match something like this:
                     kernel_name(*y0,...,*ym, const *x0,...,const *xn, const p0, ... const pk)
-            
+
         Kernel arguments:
-            For all input and output arguments, if they are OpenClArray, all shapes 
+            For all input and output arguments, if they are OpenClArray, all shapes
             and order should match or an error is raised.
 
-            If one of the output arg is set to None, it is allocated by the 
+            If one of the output arg is set to None, it is allocated by the
             given allocator, dtype and queue with the global shape and order.
-            
+
             Allocation shape can be overidden by using the argument 'alloc_shapes' which
             should be a list of the same size as okargs (output arguments) or None.
             If alloc_shapes is not None at allocated variable index, this shape is used
             instead of the common operand shape.
 
-            If infer_dtype is set to True and dtype is None, every None output argument will 
+            If infer_dtype is set to True and dtype is None, every None output argument will
             be allocated with the first OpenClArray input dtype contained in ikargs (input
             arguments). If there is no input OpenClArray the lookup is extended to okargs
-            (output arguments). This can be overhidden using alloc_dtypes witch should be 
+            (output arguments). This can be overhidden using alloc_dtypes witch should be
             a list of size of the output argument.
-        
+
             Default arguments names (generated in opencl kernel) are:
                 x0,x1,...,xn for inputs     (     ikargs -> input_arguments)
                 y0,y1,...,ym for outputs    (     okargs -> output_arguments)
@@ -802,16 +822,16 @@ class OpenClArrayBackend(ArrayBackend):
             input  arguments (x0,...,xn) can be scalars or OpenClArrays.
             output arguments (y0,...,ym) can only be OpenClArrays.
             extra  arguments (p0,...,pk) can be anything compatible with pyopencl.
-        
+
         Expression filtering:
             filter_expr is the dictionary of expr_name (str) -> expression (str)
             Expressions in filter_expr are filtered according the following rule:
-                If input xi is a scalar, elementwise access 'x[i]' is replaced by 'x' 
+                If input xi is a scalar, elementwise access 'x[i]' is replaced by 'x'
                 in given 'expr'.
             This allows broadcasting of scalars in operators.
-            
-            Filtered expressions are added to kernel_build_kwargs entries prior 
-            to kernel construction. This implies that kernel_build_kwargs keys and 
+
+            Filtered expressions are added to kernel_build_kwargs entries prior
+            to kernel construction. This implies that kernel_build_kwargs keys and
             filter_expr keys should not clash.
 
         Kernel build and kernel call:
@@ -823,10 +843,10 @@ class OpenClArrayBackend(ArrayBackend):
             User supplied 'kernel_build_kwargs' are completed with:
                 - all filtered expressions contained in dictionnary 'filter_expr'
                 - kernel_build_kwargs['context']   = context corresponding to queue
-                - kernel_build_kwargs['arguments'] = preprocessed arguments 
+                - kernel_build_kwargs['arguments'] = preprocessed arguments
                                                             (output+input+params)
                 - kernel_build_kwargs['dtype']     = dtype
-                
+
             User supplied 'kernel_call_kwargs' are completed with:
                 - kernel_call_kwargs['queue'] = queue
                 - kernel_call_kwargs['args']  = kargs (okargs+ikargs+extra_kargs), preprocessed.
@@ -839,49 +859,49 @@ class OpenClArrayBackend(ArrayBackend):
             If there is a key clash in those dictionaries, the method will fail.
 
         Queue priorities:
-             queue (function argument), 
+             queue (function argument),
              if infer_queue is set to True:
-                 x0.default_queue, ..., xn.default_queue 
+                 x0.default_queue, ..., xn.default_queue
                  y0.default_queue, ..., ym.default_queue
              /!\ default_cl_queue is not used in this function.
-        
+
         Call and events:
             wait_for specify a list a event to wait prior applying the kernel.
 
-            If async is set to False, this is a blocking opencl call and this functions returns 
+            If synchronize is set, this is a blocking opencl call and this functions returns
             output arguments okargs as a tuple unless there is only one output.
-            
-            If async is set to True, this is a non blocking call and an event is returned in 
+
+            If synchronize not set, this is a non blocking call and an event is returned in
             addition to the outputs as last argument.
-            
-            The returned event may not be used for profiling purposes, because it only 
-            covers the last effective kernel call (reductions may use two kernels 
+
+            The returned event may not be used for profiling purposes, because it only
+            covers the last effective kernel call (reductions may use two kernels
             for example).
         """
         from hysop.core.arrays.all import OpenClArray
-        
-        extra_kargs         = extra_kargs         or tuple()
-        kernel_build_kwargs = kernel_build_kwargs or dict()
-        kernel_call_kwargs  = kernel_call_kwargs  or dict()
-        
+
+        extra_kargs = first_not_None(extra_kargs, ())
+        kernel_build_kwargs = first_not_None(kernel_build_kwargs, {})
+        kernel_call_kwargs = first_not_None(kernel_call_kwargs, {})
+
         self._check_argtype('elementwise', 'ikargs', ikargs, tuple)
         self._check_argtype('elementwise', 'okargs', okargs, tuple)
         self._check_argtype('elementwise', 'extra_kargs', extra_kargs, tuple)
         self._check_argtype('elementwise', 'kernel_build_kwargs', kernel_build_kwargs, dict)
         self._check_argtype('elementwise', 'kernel_call_kwargs', kernel_call_kwargs, dict)
-        
+
         if (alloc_dtypes is None):
             alloc_dtypes = (None,)*len(okargs)
         elif isinstance(alloc_dtypes, str):
             alloc_dtypes = (alloc_dtypes,)*len(okargs)
         else:
             self._check_argtype('elementwise', 'alloc_dtype', alloc_dtypes, tuple)
-            if len(alloc_dtypes)!=len(okargs):
+            if len(alloc_dtypes) != len(okargs):
                 msg = 'Allocation dtypes input length mismatch:'
-                msg+='\n\tokargs: {}\n\talloc_dtypes={}\n'
-                msg=msg.format(okargs, alloc_dtypes)
+                msg += '\n\tokargs: {}\n\talloc_dtypes={}\n'
+                msg = msg.format(okargs, alloc_dtypes)
                 raise ValueError(msg)
-        
+
         alloc_shapes = alloc_shapes or (None,)*len(okargs)
         self._check_argtype('elementwise', 'alloc_shapes', alloc_shapes, tuple)
 
@@ -897,86 +917,86 @@ class OpenClArrayBackend(ArrayBackend):
                     shape = arg.shape
                     order = arg.order
                 elif arg.shape != shape:
-                    msg='{} with shape {} does not match reference shape {}.'
-                    msg=msg.format(arg,args.shape,shape)
+                    msg = '{} with shape {} does not match reference shape {}.'
+                    msg = msg.format(arg, args.shape, shape)
                 elif arg.order != order:
-                    msg='{} with order {} does not match reference order {}.'
-                    msg=msg.format(arg,args.order,order)
+                    msg = '{} with order {} does not match reference order {}.'
+                    msg = msg.format(arg, args.order, order)
         if (shape is None):
-            msg='No OpenClArray argument was found in ikargs and okargs.'
+            msg = 'No OpenClArray argument was found in ikargs and okargs.'
             raise ValueError(msg)
         if (queue is None):
-            msg='Queue has not been specified.'
+            msg = 'Queue has not been specified.'
             raise ValueError(msg)
-        
-        alloc_dtypes = tuple([match_dtype(dtype,alloc_dtypes[i]) \
-                for i in xrange(len(okargs))])
-    
+
+        alloc_dtypes = tuple([match_dtype(dtype, alloc_dtypes[i])
+                              for i in range(len(okargs))])
+
         okargs = list(okargs)
-        for i, (arg, _shape, _dtype) in enumerate(zip(okargs,alloc_shapes,alloc_dtypes)):
+        for i, (arg, _shape, _dtype) in enumerate(zip(okargs, alloc_shapes, alloc_dtypes)):
             if (arg is None):
                 alloc_shape = first_not_None(_shape, shape)
                 alloc_dtype = first_not_None(_dtype, dtype)
                 if (alloc_dtype is None):
-                    msg='Output argument y{} has not been allocated and dtype was'
-                    msg+=' not set (dtype={}, infer_dtype={}, alloc_dtypes={}).'
-                    msg=msg.format(i,dtype,infer_dtype,alloc_dtypes)
+                    msg = 'Output argument y{} has not been allocated and dtype was'
+                    msg += ' not set (dtype={}, infer_dtype={}, alloc_dtypes={}).'
+                    msg = msg.format(i, dtype, infer_dtype, alloc_dtypes)
                     raise ValueError(msg)
                 if (order is None):
-                    msg='Output argument y{} has not been allocated and order was'
-                    msg+=' not set.'
-                    msg=msg.format(i)
+                    msg = 'Output argument y{} has not been allocated and order was'
+                    msg += ' not set.'
+                    msg = msg.format(i)
                     raise ValueError(msg)
-                okargs[i] = self.empty(shape=alloc_shape, dtype=alloc_dtype, 
-                                        queue=queue, order=order)
+                okargs[i] = self.empty(shape=alloc_shape, dtype=alloc_dtype,
+                                       queue=queue, order=order)
         okargs = tuple(okargs)
 
         argflags = {
             'has_fp16': (dtype == np.float16),
             'has_fp64': (dtype == np.float64)
         }
-    
+
         ikargs, input_arguments = \
-                self.format_kernel_args(kargs=ikargs, argprefix='x', is_output=False,
-                         arguments=input_arguments, filter_expr=filter_expr,
-                         const=True, argflags=argflags, 
-                         argcast=convert_inputs, dtype=dtype)
-        
+            self.format_kernel_args(kargs=ikargs, argprefix='x', is_output=False,
+                                    arguments=input_arguments, filter_expr=filter_expr,
+                                    const=True, argflags=argflags,
+                                    argcast=convert_inputs, dtype=dtype)
+
         okargs, output_arguments = \
-                self.format_kernel_args(kargs=okargs, argprefix='y', is_output=True,
-                         arguments=output_arguments, filter_expr=filter_expr,
-                         const=False, argflags=argflags)
-        
+            self.format_kernel_args(kargs=okargs, argprefix='y', is_output=True,
+                                    arguments=output_arguments, filter_expr=filter_expr,
+                                    const=False, argflags=argflags)
+
         extra_kargs, extra_arguments = \
-                self.format_kernel_args(kargs=extra_kargs, argprefix='p', is_output=False,
-                         arguments=extra_arguments,
-                         const=True, argflags=argflags)
+            self.format_kernel_args(kargs=extra_kargs, argprefix='p', is_output=False,
+                                    arguments=extra_arguments,
+                                    const=True, argflags=argflags)
 
-        kargs     = okargs + ikargs + extra_kargs
+        kargs = okargs + ikargs + extra_kargs
         arguments = output_arguments + input_arguments + extra_arguments
         arguments = ', '.join(arguments)
-        
+
         # kernel build keyword arguments
         assert not set(kernel_build_kwargs.keys()).intersection(filter_expr.keys())
         kernel_build_kwargs.update(filter_expr)
 
-        blacklist = ['context', 'arguments','dtype','argflags']
+        blacklist = ['context', 'arguments', 'dtype', 'argflags']
         assert not set(kernel_build_kwargs.keys()).intersection(blacklist)
 
-        kernel_build_kwargs['context']   = queue.context
+        kernel_build_kwargs['context'] = queue.context
         kernel_build_kwargs['arguments'] = arguments
-        kernel_build_kwargs['dtype']     = dtype
-        kernel_build_kwargs['argflags']  = argflags
-        
+        kernel_build_kwargs['dtype'] = dtype
+        kernel_build_kwargs['argflags'] = argflags
+
         # kernel call keyword arguments
-        blacklist = ['queue', 'args','iargs','oargs','pargs']
+        blacklist = ['queue', 'args', 'iargs', 'oargs', 'pargs']
         assert not set(kernel_call_kwargs.keys()).intersection(blacklist)
         kernel_call_kwargs['queue'] = queue
-        kernel_call_kwargs['args']  = kargs
+        kernel_call_kwargs['args'] = kargs
         kernel_call_kwargs['iargs'] = ikargs
         kernel_call_kwargs['oargs'] = okargs
         kernel_call_kwargs['pargs'] = extra_kargs
-                   
+
         try:
             knl = Kernel(**kernel_build_kwargs)
             ret = knl(**kernel_call_kwargs)
@@ -985,34 +1005,35 @@ class OpenClArrayBackend(ArrayBackend):
                 if val.__class__ in [list, tuple, set]:
                     out = '{}[{}]'.format(val.__class__.__name__,
                                           ', '.join([val2str(v) for v in val]))
-                elif isinstance(val,Array):
-                    out= '{}(shape={}, dtype={}, order={}, strides={}, offset={})'
-                    out=out.format(val.__class__.__name__, val.shape, 
-                                   val.dtype, val.order, val.strides, val.offset)
+                elif isinstance(val, Array):
+                    out = '{}(shape={}, dtype={}, order={}, strides={}, offset={})'
+                    out = out.format(val.__class__.__name__, val.shape,
+                                     val.dtype, val.order, val.strides, val.offset)
                 elif val.__class__ in self._registered_backends().keys():
-                    out= '{}(shape={}, dtype={}, strides={})'
-                    out=out.format(val.__class__.__name__, val.shape, 
-                                   val.dtype, val.strides)
+                    out = '{}(shape={}, dtype={}, strides={})'
+                    out = out.format(val.__class__.__name__, val.shape,
+                                     val.dtype, val.strides)
                 else:
-                    out=val
+                    out = val
                 return out
+
             def dic2str(dic):
-                entries=[]
-                for k,v in dic.iteritems():
+                entries = []
+                for k, v in dic.items():
                     e = '{}: {}'.format(k, val2str(v))
                     entries.append(e)
                 return '\n\t'+'\n\t'.join(entries)
             msg = 'Something went wrong during kernel build or kernel call:'
-            msg+= '\n  build kargs:{}'.format(dic2str(kernel_build_kwargs))
-            msg+= '\n  call kargs:{}'.format(dic2str(kernel_call_kwargs))
-            msg+= '\n'
-            print msg
+            msg += '\n  build kargs:{}'.format(dic2str(kernel_build_kwargs))
+            msg += '\n  call kargs:{}'.format(dic2str(kernel_call_kwargs))
+            msg += '\n'
+            print(msg)
             raise
-        
+
         if build_kernel_launcher:
             return knl.to_kernel_launcher(name=kernel_build_kwargs['name'],
-                    **kernel_call_kwargs)
-        
+                                          **kernel_call_kwargs)
+
         if isinstance(ret, tuple):
             evt = ret[-1]
         else:
@@ -1020,14 +1041,14 @@ class OpenClArrayBackend(ArrayBackend):
 
         okargs = self._return(okargs)
 
-        if not async:
+        if synchronize:
             evt.wait()
-            if len(okargs)==1:
+            if len(okargs) == 1:
                 return okargs[0]
             else:
                 return okargs
         else:
-            if len(okargs)==1:
+            if len(okargs) == 1:
                 return okargs[0], evt
             else:
                 return okargs, evt
@@ -1035,11 +1056,11 @@ class OpenClArrayBackend(ArrayBackend):
     def nary_op(self, ikargs, okargs, operation,
                 extra_kargs=None, extra_arguments=None,
                 input_arguments=None, output_arguments=None,
-                dtype=None, infer_dtype=True, 
-                queue=None, infer_queue=True, 
+                dtype=None, infer_dtype=True,
+                queue=None, infer_queue=True,
                 allocator=None,
                 alloc_shapes=None, alloc_dtypes=None,
-                async=False, wait_for=None,
+                synchronize=True, wait_for=None,
                 convert_inputs=None,
                 build_kernel_launcher=False,
                 name='nary_op', options=[], preamble=''):
@@ -1047,20 +1068,20 @@ class OpenClArrayBackend(ArrayBackend):
         A kernel that takes n vector as input and outputs m vectors.
         The operation is specified as a snippet of C99 on these arguments.
         Default arguments names are x0,x1,...,xn and y0,y1,...,ym.
-        
-        Queue priorities: queue (function argument), 
+
+        Queue priorities: queue (function argument),
                           if infer_queue:
-                              x0.default_queue, ..., xn.default_queue, 
-                              y0.default_queue, ..., yn.default_queue, 
+                              x0.default_queue, ..., xn.default_queue,
+                              y0.default_queue, ..., yn.default_queue,
 
         For all input and output arguments, if they are OpenClArray, all shapes should match.
         If out is None, it is allocated to input shape with specified dtype.
-        
+
         y0[i] = f( x0[i],...,xn[i] ; p0,...,pk )
         <=>
         y0[i] = ElementwiseKernel(const x0[i],...,const xn[i], const p0,..., const pk)
 
-        If async is set to True, evt is returned as last argument.
+        If synchronize is set to false, evt is returned as last argument.
         If out and dtype is None it is set to xi.dtype where xi is the first OpenClArray
 
         options are opencl kernel build options.
@@ -1078,79 +1099,78 @@ class OpenClArrayBackend(ArrayBackend):
         filter_expr = {
             'operation': operation
         }
-        return self.elementwise(ikargs=ikargs, okargs=okargs, 
-            extra_kargs=extra_kargs, extra_arguments=extra_arguments,
-            input_arguments=input_arguments, output_arguments=output_arguments,
-            filter_expr=filter_expr, 
-            dtype=dtype, infer_dtype=infer_dtype, 
-            queue=queue, infer_queue=infer_queue, 
-            async=async, 
-            allocator=allocator, convert_inputs=convert_inputs, 
-            alloc_shapes=alloc_shapes, alloc_dtypes=alloc_dtypes,
-            Kernel=Kernel,
-            kernel_build_kwargs= kernel_build_kwargs, 
-            kernel_call_kwargs = kernel_call_kwargs,
-            build_kernel_launcher=build_kernel_launcher)
-
-    def unary_op(self, x0, expr, out=None, 
-                extra_kargs=None, extra_arguments=None,
-                input_arguments=None, output_arguments=None,
-                dtype=None, infer_dtype=True,
-                queue=None, infer_queue=True, 
-                convert_inputs=None,
-                allocator=None,
-                alloc_shapes=None, alloc_dtypes=None,
-                async=False, wait_for=None,
-                build_kernel_launcher=False,
-                name='unary_op', options=[], preamble=''):
+        return self.elementwise(ikargs=ikargs, okargs=okargs,
+                                extra_kargs=extra_kargs, extra_arguments=extra_arguments,
+                                input_arguments=input_arguments, output_arguments=output_arguments,
+                                filter_expr=filter_expr,
+                                dtype=dtype, infer_dtype=infer_dtype,
+                                queue=queue, infer_queue=infer_queue,
+                                synchronize=synchronize,
+                                allocator=allocator, convert_inputs=convert_inputs,
+                                alloc_shapes=alloc_shapes, alloc_dtypes=alloc_dtypes,
+                                Kernel=Kernel,
+                                kernel_build_kwargs=kernel_build_kwargs,
+                                kernel_call_kwargs=kernel_call_kwargs,
+                                build_kernel_launcher=build_kernel_launcher)
+
+    def unary_op(self, x0, expr, out=None,
+                 extra_kargs=None, extra_arguments=None,
+                 input_arguments=None, output_arguments=None,
+                 dtype=None, infer_dtype=True,
+                 queue=None, infer_queue=True,
+                 convert_inputs=None,
+                 allocator=None,
+                 alloc_shapes=None, alloc_dtypes=None,
+                 synchronize=True, wait_for=None,
+                 build_kernel_launcher=False,
+                 name='unary_op', options=[], preamble=''):
 
         return self.nary_op(ikargs=(x0,), okargs=(out,), operation=expr,
-                    extra_kargs=extra_kargs, extra_arguments=extra_arguments,
-                    input_arguments=input_arguments, output_arguments=output_arguments,
-                    dtype=dtype, infer_dtype=infer_dtype,
-                    queue=queue, infer_queue=infer_queue,
-                    allocator=allocator, convert_inputs=convert_inputs,
-                    alloc_shapes=alloc_shapes, alloc_dtypes=alloc_dtypes,
-                    async=async, wait_for=wait_for, 
-                    build_kernel_launcher=build_kernel_launcher,
-                    name=name, options=options, preamble=preamble)
-    
+                            extra_kargs=extra_kargs, extra_arguments=extra_arguments,
+                            input_arguments=input_arguments, output_arguments=output_arguments,
+                            dtype=dtype, infer_dtype=infer_dtype,
+                            queue=queue, infer_queue=infer_queue,
+                            allocator=allocator, convert_inputs=convert_inputs,
+                            alloc_shapes=alloc_shapes, alloc_dtypes=alloc_dtypes,
+                            synchronize=synchronize, wait_for=wait_for,
+                            build_kernel_launcher=build_kernel_launcher,
+                            name=name, options=options, preamble=preamble)
+
     def binary_op(self, x0, x1, expr, out=None,
-                    extra_kargs=None, extra_arguments=None,
-                    input_arguments=None, output_arguments=None,
-                    dtype=None, infer_dtype=True, 
-                    queue=None, infer_queue=True, 
-                    convert_inputs=None,
-                    allocator=None,
-                    alloc_shapes=None, alloc_dtypes=None,
-                    async=False, wait_for=None,
-                    build_kernel_launcher=False,
-                    name='binary_op', options=[], preamble=''):
-        
-        return self.nary_op(ikargs=(x0,x1), okargs=(out,), operation=expr,
-                    extra_kargs=extra_kargs, extra_arguments=extra_arguments,
-                    input_arguments=input_arguments, output_arguments=output_arguments,
-                    dtype=dtype, infer_dtype=infer_dtype,
-                    queue=queue, infer_queue=infer_queue,
-                    allocator=allocator, convert_inputs=convert_inputs,
-                    alloc_shapes=alloc_shapes, alloc_dtypes=alloc_dtypes,
-                    async=async, wait_for=wait_for,
-                    build_kernel_launcher=build_kernel_launcher,
-                    name=name, options=options, preamble=preamble)
-            
-    
+                  extra_kargs=None, extra_arguments=None,
+                  input_arguments=None, output_arguments=None,
+                  dtype=None, infer_dtype=True,
+                  queue=None, infer_queue=True,
+                  convert_inputs=None,
+                  allocator=None,
+                  alloc_shapes=None, alloc_dtypes=None,
+                  synchronize=True, wait_for=None,
+                  build_kernel_launcher=False,
+                  name='binary_op', options=[], preamble=''):
+
+        return self.nary_op(ikargs=(x0, x1), okargs=(out,), operation=expr,
+                            extra_kargs=extra_kargs, extra_arguments=extra_arguments,
+                            input_arguments=input_arguments, output_arguments=output_arguments,
+                            dtype=dtype, infer_dtype=infer_dtype,
+                            queue=queue, infer_queue=infer_queue,
+                            allocator=allocator, convert_inputs=convert_inputs,
+                            alloc_shapes=alloc_shapes, alloc_dtypes=alloc_dtypes,
+                            synchronize=synchronize, wait_for=wait_for,
+                            build_kernel_launcher=build_kernel_launcher,
+                            name=name, options=options, preamble=preamble)
+
     def reduce(self, kargs, neutral, reduce_expr, map_expr='x0[i]',
-                axis=None, out=None,
-                extra_kargs=None,     extra_arguments=None,
-                input_arguments=None, output_arguments=None,
-                convert_inputs=None, alloc_dtypes=None,
-                dtype=None, infer_dtype=True, 
-                queue=None, infer_queue=True, 
-                allocator=None,
-                async=False, wait_for=None,
-                build_kernel_launcher=False,
-                name='reduction', options=[], preamble=''):
-        """ 
+               axis=None, out=None,
+               extra_kargs=None,     extra_arguments=None,
+               input_arguments=None, output_arguments=None,
+               convert_inputs=None, alloc_dtypes=None,
+               dtype=None, infer_dtype=True,
+               queue=None, infer_queue=True,
+               allocator=None,
+               synchronize=True, wait_for=None,
+               build_kernel_launcher=False,
+               name='reduction', options=[], preamble=''):
+        """
         Reduce arrays elements over the whole array.
         kargs = kernel array or scalar arguments (tuple)
         y0 = ReductionKernel( MAP_EXPR(const x0[i],...,const xn[i], const p0,..., const pk) )
@@ -1159,40 +1179,40 @@ class OpenClArrayBackend(ArrayBackend):
 
         if dtype is None:
             if any([is_signed(k) for k in kargs]):
-                dtype=np.int64
+                dtype = np.int64
             elif any([is_unsigned(k) for k in kargs]):
-                dtype=np.uint64
+                dtype = np.uint64
             else:
                 pass
 
-        map_expr    = map_expr.strip()
+        map_expr = map_expr.strip()
         reduce_expr = reduce_expr.strip()
-        
+
         input_dtype = find_common_dtype(*kargs)
         if is_complex(input_dtype):
             map_ops = {
-                    'a+b': 'add',
-                    'a-b': 'sub',
-                    'a*b': 'mul',
-                }
+                'a+b': 'add',
+                'a-b': 'sub',
+                'a*b': 'mul',
+            }
             if reduce_expr in map_ops:
-                neutral= '{}({})'.format(self.complex_fn('fromreal',input_dtype), neutral)
-                reduce_expr = '{}(a,b)'.format( self.complex_fn(map_ops[reduce_expr], input_dtype))
+                neutral = '{}({})'.format(self.complex_fn('fromreal', input_dtype), neutral)
+                reduce_expr = '{}(a,b)'.format(self.complex_fn(map_ops[reduce_expr], input_dtype))
 
-        self._unsupported_argument('reduce','axis',axis)
+        self._unsupported_argument('reduce', 'axis', axis)
         if (out is not None):
             self._check_argtype('reduce', 'out', out, OpenClArray)
             dtype = out.dtype
-            assert out.size==1
+            assert out.size == 1
             out = out.handle
 
-        Kernel=_ReductionKernel
+        Kernel = _ReductionKernel
         kernel_build_kwargs = {
-                'name':     name,
-                'options':  options,
-                'preamble': preamble,
-                'reduce_expr': reduce_expr,
-                'neutral': neutral,
+            'name':     name,
+            'options':  options,
+            'preamble': preamble,
+            'reduce_expr': reduce_expr,
+            'neutral': neutral,
         }
         kernel_call_kwargs = {
             'return_event': True,
@@ -1201,33 +1221,32 @@ class OpenClArrayBackend(ArrayBackend):
         filter_expr = {
             'map_expr': map_expr
         }
-        return self.elementwise(ikargs=kargs, okargs=(out,), 
-            extra_kargs=extra_kargs, extra_arguments=extra_arguments,
-            input_arguments=input_arguments, output_arguments=output_arguments,
-            filter_expr=filter_expr, 
-            dtype=dtype, infer_dtype=infer_dtype, 
-            queue=queue, infer_queue=infer_queue, 
-            async=async,
-            convert_inputs=convert_inputs, alloc_dtypes=alloc_dtypes,
-            allocator=allocator, alloc_shapes=((1,),),
-            Kernel=Kernel,
-            build_kernel_launcher=build_kernel_launcher,
-            kernel_build_kwargs= kernel_build_kwargs, 
-            kernel_call_kwargs = kernel_call_kwargs)
-    
-    
-    def nanreduce(self, kargs, neutral, reduce_expr, map_expr='x0[i]', 
-            axis=None, out=None,
-            extra_kargs=None,     extra_arguments=None,
-            input_arguments=None, output_arguments=None,
-            dtype=None, infer_dtype=True, 
-            queue=None, infer_queue=True, 
-            allocator=None,
-            convert_inputs=None,
-            async=False, wait_for=None,
-            build_kernel_launcher=False,
-            name='nan_reduction', options=[], preamble=''):
-        """ 
+        return self.elementwise(ikargs=kargs, okargs=(out,),
+                                extra_kargs=extra_kargs, extra_arguments=extra_arguments,
+                                input_arguments=input_arguments, output_arguments=output_arguments,
+                                filter_expr=filter_expr,
+                                dtype=dtype, infer_dtype=infer_dtype,
+                                queue=queue, infer_queue=infer_queue,
+                                synchronize=synchronize,
+                                convert_inputs=convert_inputs, alloc_dtypes=alloc_dtypes,
+                                allocator=allocator, alloc_shapes=((1,),),
+                                Kernel=Kernel,
+                                build_kernel_launcher=build_kernel_launcher,
+                                kernel_build_kwargs=kernel_build_kwargs,
+                                kernel_call_kwargs=kernel_call_kwargs)
+
+    def nanreduce(self, kargs, neutral, reduce_expr, map_expr='x0[i]',
+                  axis=None, out=None,
+                  extra_kargs=None,     extra_arguments=None,
+                  input_arguments=None, output_arguments=None,
+                  dtype=None, infer_dtype=True,
+                  queue=None, infer_queue=True,
+                  allocator=None,
+                  convert_inputs=None,
+                  synchronize=True, wait_for=None,
+                  build_kernel_launcher=False,
+                  name='nan_reduction', options=[], preamble=''):
+        """
         Reduce arrays elements over the whole array, replacing mapped expr NaNs by the neutral.
         dtype should be a floating point type.
         kargs = kernel args as a scalar or tuple
@@ -1235,8 +1254,8 @@ class OpenClArrayBackend(ArrayBackend):
         if any([is_complex(k) for k in kargs]):
             ctype = find_common_dtype(*kargs)
             nan_map_expr = '((isnan({expr}.real) || isnan({expr}.imag)) ? {neutral} : {expr})'
-            nan_map_expr = nan_map_expr.format(expr=map_expr, 
-                    neutral='{}({})'.format(self.complex_fn('fromreal',ctype), neutral))
+            nan_map_expr = nan_map_expr.format(expr=map_expr,
+                                               neutral='{}({})'.format(self.complex_fn('fromreal', ctype), neutral))
         elif any([is_fp(k) for k in kargs]):
             nan_map_expr = '(isnan({expr}) ? {neutral} : {expr})'
             nan_map_expr = nan_map_expr.format(expr=map_expr, neutral=neutral)
@@ -1244,70 +1263,69 @@ class OpenClArrayBackend(ArrayBackend):
             nan_map_expr = map_expr
 
         return self.reduce(kargs=kargs, neutral=neutral, convert_inputs=convert_inputs,
-                reduce_expr=reduce_expr, map_expr=nan_map_expr,
-                axis=axis, out=out, extra_kargs=extra_kargs, extra_arguments=extra_arguments,
-                input_arguments=input_arguments, output_arguments=output_arguments,
-                dtype=dtype, infer_dtype=infer_dtype, queue=queue, infer_queue=infer_queue,
-                allocator=allocator, async=async, wait_for=wait_for, 
-                build_kernel_launcher=build_kernel_launcher,
-                name=name, options=options, preamble=preamble)
-    
-    def generic_scan(self, kargs, neutral, scan_expr, output_statement, 
-                input_expr='x0[i]', size=None,
-                is_segment_start_expr=None, input_fetch_exprs=None,
-                axis=None, out=None,
-                extra_kargs=None,     extra_arguments=None,
-                input_arguments=None, output_arguments=None,
-                dtype=None, infer_dtype=True, 
-                queue=None, infer_queue=True, 
-                allocator=None,
-                convert_inputs=None,
-                async=False, wait_for=None,
-                build_kernel_launcher=False,
-                name='generic_scan', options=[], preamble=''):
-        """ 
+                           reduce_expr=reduce_expr, map_expr=nan_map_expr,
+                           axis=axis, out=out, extra_kargs=extra_kargs, extra_arguments=extra_arguments,
+                           input_arguments=input_arguments, output_arguments=output_arguments,
+                           dtype=dtype, infer_dtype=infer_dtype, queue=queue, infer_queue=infer_queue,
+                           allocator=allocator, synchronize=synchronize, wait_for=wait_for,
+                           build_kernel_launcher=build_kernel_launcher,
+                           name=name, options=options, preamble=preamble)
+
+    def generic_scan(self, kargs, neutral, scan_expr, output_statement,
+                     input_expr='x0[i]', size=None,
+                     is_segment_start_expr=None, input_fetch_exprs=None,
+                     axis=None, out=None,
+                     extra_kargs=None,     extra_arguments=None,
+                     input_arguments=None, output_arguments=None,
+                     dtype=None, infer_dtype=True,
+                     queue=None, infer_queue=True,
+                     allocator=None,
+                     convert_inputs=None,
+                     synchronize=True, wait_for=None,
+                     build_kernel_launcher=False,
+                     name='generic_scan', options=[], preamble=''):
+        """
         Scan arrays elements over the whole array.
         kargs = kernel array or scalar arguments (tuple)
         """
         from hysop.core.arrays.all import OpenClArray
 
-        self._unsupported_argument('generic_scan','axis',axis)
-        
+        self._unsupported_argument('generic_scan', 'axis', axis)
+
         if dtype is None:
             if any([is_signed(k) for k in kargs]):
-                dtype=np.int64
+                dtype = np.int64
             elif any([is_unsigned(k) for k in kargs]):
-                dtype=np.uint64
+                dtype = np.uint64
             # numpy does not force np.float16 / np.float32 to np.float64
-        
-        
+
         scan_expr = scan_expr.strip()
         input_dtype = find_common_dtype(*kargs)
         if is_complex(input_dtype):
             if input_dtype == np.complex128:
-                define='#define PYOPENCL_DEFINE_CDOUBLE'
+                define = '#define PYOPENCL_DEFINE_CDOUBLE'
             else:
-                define=''
+                define = ''
             include = '#include <pyopencl-complex.h>\n'+preamble
-            preamble='{}\n{}\n{}'.format(define,include,preamble)
+            preamble = '{}\n{}\n{}'.format(define, include, preamble)
 
             map_ops = {
-                    'a+b': 'add',
-                    'a-b': 'sub',
-                    'a*b': 'mul',
-                }
+                'a+b': 'add',
+                'a-b': 'sub',
+                'a*b': 'mul',
+            }
             if scan_expr in map_ops:
-                neutral= '{}({})'.format(self.complex_fn('fromreal',input_dtype), neutral)
-                scan_expr = '{}(a,b)'.format( self.complex_fn(map_ops[scan_expr], input_dtype))
+                neutral = '{}({})'.format(self.complex_fn('fromreal', input_dtype), neutral)
+                scan_expr = '{}(a,b)'.format(self.complex_fn(map_ops[scan_expr], input_dtype))
 
-        Kernel=_GenericScanKernel
+        Kernel = _GenericScanKernel
         kernel_build_kwargs = {
-                'name':     name,
-                'options':  options,
-                'preamble': preamble,
-                'scan_expr': scan_expr,
-                'neutral': neutral,
-                'input_fetch_exprs': input_fetch_exprs
+            'name':     name,
+            'options':  options,
+            'preamble': preamble,
+            'scan_expr': scan_expr,
+            'neutral': neutral,
+            'input_fetch_exprs': input_fetch_exprs
         }
 
         kernel_call_kwargs = {
@@ -1319,221 +1337,220 @@ class OpenClArrayBackend(ArrayBackend):
             'output_statement': output_statement,
             'is_segment_start_expr': is_segment_start_expr,
         }
-        return self.elementwise(ikargs=kargs, okargs=(out,), 
-            extra_kargs=extra_kargs, extra_arguments=extra_arguments,
-            input_arguments=input_arguments, output_arguments=output_arguments,
-            filter_expr=filter_expr, 
-            dtype=dtype, infer_dtype=infer_dtype, 
-            queue=queue, infer_queue=infer_queue, 
-            async=async,
-            allocator=allocator, alloc_shapes=None,
-            convert_inputs=convert_inputs,
-            Kernel=Kernel,
-            build_kernel_launcher=build_kernel_launcher,
-            kernel_build_kwargs= kernel_build_kwargs, 
-            kernel_call_kwargs = kernel_call_kwargs)
-    
-    def inclusive_scan(self, kargs, neutral, scan_expr,
-                input_expr='x0[i]', size=None,
-                axis=None, out=None,
-                extra_kargs=None,     extra_arguments=None,
-                input_arguments=None, 
-                dtype=None, infer_dtype=True, 
-                queue=None, infer_queue=True, 
-                allocator=None,
-                convert_inputs=None,
-                async=False, wait_for=None,
-                build_kernel_launcher=False,
-                name='inclusive_scan', options=[], preamble=''):
+        return self.elementwise(ikargs=kargs, okargs=(out,),
+                                extra_kargs=extra_kargs, extra_arguments=extra_arguments,
+                                input_arguments=input_arguments, output_arguments=output_arguments,
+                                filter_expr=filter_expr,
+                                dtype=dtype, infer_dtype=infer_dtype,
+                                queue=queue, infer_queue=infer_queue,
+                                synchronize=synchronize,
+                                allocator=allocator, alloc_shapes=None,
+                                convert_inputs=convert_inputs,
+                                Kernel=Kernel,
+                                build_kernel_launcher=build_kernel_launcher,
+                                kernel_build_kwargs=kernel_build_kwargs,
+                                kernel_call_kwargs=kernel_call_kwargs)
 
-        output_arguments = None 
+    def inclusive_scan(self, kargs, neutral, scan_expr,
+                       input_expr='x0[i]', size=None,
+                       axis=None, out=None,
+                       extra_kargs=None,     extra_arguments=None,
+                       input_arguments=None,
+                       dtype=None, infer_dtype=True,
+                       queue=None, infer_queue=True,
+                       allocator=None,
+                       convert_inputs=None,
+                       synchronize=True, wait_for=None,
+                       build_kernel_launcher=False,
+                       name='inclusive_scan', options=[], preamble=''):
+
+        output_arguments = None
         output_statement = 'y0[i] = item'
-        
+
         return self.generic_scan(kargs=kargs, neutral=neutral, scan_expr=scan_expr,
-                input_expr=input_expr, output_statement=output_statement, 
-                size=size, axis=axis, out=out,
-                extra_kargs=extra_kargs, extra_arguments=extra_arguments,
-                input_arguments=input_arguments, output_arguments=output_arguments,
-                dtype=dtype, infer_dtype=infer_dtype, 
-                queue=queue, infer_queue=infer_queue, 
-                allocator=allocator, convert_inputs=convert_inputs,
-                async=async, wait_for=wait_for,
-                build_kernel_launcher=build_kernel_launcher,
-                name=name, options=options, preamble=preamble)
-    
-    def exclusive_scan(self, kargs, neutral, scan_expr,
-                input_expr='x0[i]', size=None,
-                axis=None, out=None,
-                extra_kargs=None,     extra_arguments=None,
-                input_arguments=None, 
-                dtype=None, infer_dtype=True, 
-                queue=None, infer_queue=True, 
-                allocator=None,
-                async=False, wait_for=None,
-                build_kernel_launcher=False,
-                name='exclusive_scan', options=[], preamble=''):
+                                 input_expr=input_expr, output_statement=output_statement,
+                                 size=size, axis=axis, out=out,
+                                 extra_kargs=extra_kargs, extra_arguments=extra_arguments,
+                                 input_arguments=input_arguments, output_arguments=output_arguments,
+                                 dtype=dtype, infer_dtype=infer_dtype,
+                                 queue=queue, infer_queue=infer_queue,
+                                 allocator=allocator, convert_inputs=convert_inputs,
+                                 synchronize=synchronize, wait_for=wait_for,
+                                 build_kernel_launcher=build_kernel_launcher,
+                                 name=name, options=options, preamble=preamble)
 
-        output_arguments = None 
+    def exclusive_scan(self, kargs, neutral, scan_expr,
+                       input_expr='x0[i]', size=None,
+                       axis=None, out=None,
+                       extra_kargs=None,     extra_arguments=None,
+                       input_arguments=None,
+                       dtype=None, infer_dtype=True,
+                       queue=None, infer_queue=True,
+                       allocator=None,
+                       synchronize=True, wait_for=None,
+                       build_kernel_launcher=False,
+                       name='exclusive_scan', options=[], preamble=''):
+
+        output_arguments = None
         output_statement = 'y0[i] = prev_item'
-        
+
         return self.generic_scan(ikargs=kargs, neutral=neutral, scan_expr=scan_expr,
-                input_expr=input_expr, output_statement=output_statement, 
-                size=size, axis=axis, out=out,
-                extra_kargs=extra_kargs, extra_arguments=extra_arguments,
-                input_arguments=input_arguments, output_arguments=output_arguments,
-                dtype=dtype, infer_dtype=infer_dtype, 
-                queue=queue, infer_queue=infer_queue, 
-                allocator=allocator,
-                async=async, wait_for=wait_for,
-                build_kernel_launcher=build_kernel_launcher,
-                name=name, options=options, preamble=preamble)
-    
+                                 input_expr=input_expr, output_statement=output_statement,
+                                 size=size, axis=axis, out=out,
+                                 extra_kargs=extra_kargs, extra_arguments=extra_arguments,
+                                 input_arguments=input_arguments, output_arguments=output_arguments,
+                                 dtype=dtype, infer_dtype=infer_dtype,
+                                 queue=queue, infer_queue=infer_queue,
+                                 allocator=allocator,
+                                 synchronize=synchronize, wait_for=wait_for,
+                                 build_kernel_launcher=build_kernel_launcher,
+                                 name=name, options=options, preamble=preamble)
+
     def inclusive_nanscan(self, kargs, neutral, scan_expr,
-                input_expr='x0[i]', size=None,
-                axis=None, out=None,
-                extra_kargs=None,     extra_arguments=None,
-                input_arguments=None, 
-                dtype=None, infer_dtype=True, 
-                queue=None, infer_queue=True, 
-                allocator=None,
-                async=False, wait_for=None,
-                build_kernel_launcher=False,
-                name='inclusive_nanscan', options=[], preamble=''):
+                          input_expr='x0[i]', size=None,
+                          axis=None, out=None,
+                          extra_kargs=None,     extra_arguments=None,
+                          input_arguments=None,
+                          dtype=None, infer_dtype=True,
+                          queue=None, infer_queue=True,
+                          allocator=None,
+                          synchronize=True, wait_for=None,
+                          build_kernel_launcher=False,
+                          name='inclusive_nanscan', options=[], preamble=''):
 
         if any([is_complex(k) for k in kargs]):
             ctype = find_common_dtype(*kargs)
             nan_input_expr = '((isnan({expr}.real) || isnan({expr}.imag)) ? {neutral} : {expr})'
-            nan_input_expr = nan_input_expr.format(expr=input_expr, 
-                    neutral='{}({})'.format(self.complex_fn('fromreal',ctype), neutral))
+            nan_input_expr = nan_input_expr.format(expr=input_expr,
+                                                   neutral='{}({})'.format(self.complex_fn('fromreal', ctype), neutral))
         elif any([is_fp(k) for k in kargs]):
             nan_input_expr = '(isnan({expr}) ? {neutral} : {expr})'
             nan_input_expr = nan_input_expr.format(expr=input_expr, neutral=neutral)
         else:
             nan_input_expr = input_expr
-        
+
         return self.inclusive_scan(kargs=kargs, neutral=neutral, scan_expr=scan_expr,
-                input_expr=nan_input_expr, 
-                size=size, axis=axis, out=out,
-                extra_kargs=extra_kargs, extra_arguments=extra_arguments,
-                input_arguments=input_arguments,
-                dtype=dtype, infer_dtype=infer_dtype, 
-                queue=queue, infer_queue=infer_queue, 
-                allocator=allocator,
-                async=async, wait_for=wait_for,
-                build_kernel_launcher=build_kernel_launcher,
-                name=name, options=options, preamble=preamble)
-    
+                                   input_expr=nan_input_expr,
+                                   size=size, axis=axis, out=out,
+                                   extra_kargs=extra_kargs, extra_arguments=extra_arguments,
+                                   input_arguments=input_arguments,
+                                   dtype=dtype, infer_dtype=infer_dtype,
+                                   queue=queue, infer_queue=infer_queue,
+                                   allocator=allocator,
+                                   synchronize=synchronize, wait_for=wait_for,
+                                   build_kernel_launcher=build_kernel_launcher,
+                                   name=name, options=options, preamble=preamble)
+
     def exclusive_nanscan(self, kargs, neutral, scan_expr,
-                input_expr='x0[i]', size=None,
-                axis=None, out=None,
-                extra_kargs=None,     extra_arguments=None,
-                input_arguments=None, 
-                dtype=None, infer_dtype=True, 
-                queue=None, infer_queue=True, 
-                allocator=None,
-                async=False, wait_for=None,
-                build_kernel_launcher=False,
-                name='exclusive_nanscan', options=[], preamble=''):
+                          input_expr='x0[i]', size=None,
+                          axis=None, out=None,
+                          extra_kargs=None,     extra_arguments=None,
+                          input_arguments=None,
+                          dtype=None, infer_dtype=True,
+                          queue=None, infer_queue=True,
+                          allocator=None,
+                          synchronize=True, wait_for=None,
+                          build_kernel_launcher=False,
+                          name='exclusive_nanscan', options=[], preamble=''):
 
         if any([is_complex(k) for k in kargs]):
             ctype = find_common_dtype(*kargs)
             nan_input_expr = '((isnan({expr}.real) || isnan({expr}.imag)) ? {neutral} : {expr})'
-            nan_input_expr = nan_input_expr.format(expr=input_expr, 
-                    neutral='{}({})'.format(self.complex_fn('fromreal',ctype), neutral))
+            nan_input_expr = nan_input_expr.format(expr=input_expr,
+                                                   neutral='{}({})'.format(self.complex_fn('fromreal', ctype), neutral))
         elif any([is_fp(k) for k in kargs]):
             nan_input_expr = '(isnan({expr}) ? {neutral} : {expr})'
             nan_input_expr = nan_input_expr.format(expr=input_expr, neutral=neutral)
         else:
             nan_input_expr = input_expr
-        
+
         return self.exclusive_scan(kargs=kargs, neutral=neutral, scan_expr=scan_expr,
-                input_expr=nan_input_expr, 
-                size=size, axis=axis, out=out,
-                extra_kargs=extra_kargs, extra_arguments=extra_arguments,
-                input_arguments=input_arguments,
-                dtype=dtype, infer_dtype=infer_dtype, 
-                queue=queue, infer_queue=infer_queue, 
-                allocator=allocator,
-                async=async, wait_for=wait_for,
-                build_kernel_launcher=build_kernel_launcher,
-                name=name, options=options, preamble=preamble)
+                                   input_expr=nan_input_expr,
+                                   size=size, axis=axis, out=out,
+                                   extra_kargs=extra_kargs, extra_arguments=extra_arguments,
+                                   input_arguments=input_arguments,
+                                   dtype=dtype, infer_dtype=infer_dtype,
+                                   queue=queue, infer_queue=infer_queue,
+                                   allocator=allocator,
+                                   synchronize=synchronize, wait_for=wait_for,
+                                   build_kernel_launcher=build_kernel_launcher,
+                                   name=name, options=options, preamble=preamble)
 
     def check_queue(self, queue):
         if (queue is None):
             return self.default_queue
         if queue.context != self.context:
-            msg='Given queue does not match backend context.'
+            msg = 'Given queue does not match backend context.'
             raise RuntimeError(msg)
         return queue
 
 ###########################
 # ARRAY CREATION ROUTINES #
     def array(self, shape, dtype=HYSOP_REAL, order=default_order,
-            queue=None, min_alignment=None,
-            buf=None, offset=0,
-            strides=None, events=None):
+              queue=None, min_alignment=None,
+              buf=None, offset=0,
+              strides=None, events=None):
         """
         Create an OpenClArray, see pyopencl.array.Array constructor.
         If a queue is specified, it is set as default queue,
         else it will be backend.default_queue.
         """
-        #FIXME OpenCL half float support
+        # FIXME OpenCL half float support
         if dtype == np.float16:
-            dtype=np.float32
-            msg='OpenClArrayBackend promoted float16 to float32 in array allocation of shape {}.'
-            msg=msg.format(shape)
+            dtype = np.float32
+            msg = 'OpenClArrayBackend promoted float16 to float32 in array allocation of shape {}.'
+            msg = msg.format(shape)
             warnings.warn(RuntimeWarning(msg))
-        #FIXME OpenCL long double support
+        # FIXME OpenCL long double support
         if dtype == np.longdouble:
-            msg='OpenClArrayBackend demoted {} to float64 in array allocation of shape {}.'
-            msg=msg.format(dtype, shape)
+            msg = 'OpenClArrayBackend demoted {} to float64 in array allocation of shape {}.'
+            msg = msg.format(dtype, shape)
             dtype = np.float64
             warnings.warn(RuntimeWarning(msg))
-        #FIXME OpenCL bool support
-        if dtype == np.bool:
-            dtype=HYSOP_BOOL
-            msg='OpenClArrayBackend promoted np.bool to {} in array allocation of shape {}.'
-            msg=msg.format(dtype, shape)
+        # FIXME OpenCL bool support
+        if dtype == np.bool_:
+            dtype = HYSOP_BOOL
+            msg = 'OpenClArrayBackend promoted np.bool_ to {} in array allocation of shape {}.'
+            msg = msg.format(dtype, shape)
             warnings.warn(RuntimeWarning(msg))
 
         shape = to_tuple(shape)
-        
-        # at this time the opencl backend works only with the default_queue 
+
+        # at this time the opencl backend works only with the default_queue
         # so we enforce it by not binding any Array to any queue.
         if (queue is not None) and (queue is not self.default_queue):
             msg = 'pyopencl.Array has been created with non-default queue.'
             raise RuntimeError(msg)
         queue = self.check_queue(queue)
-        cq = queue #force default queue
+        cq = queue  # force default queue
 
-        if buf is None:
-            assert offset==0
+        if (buf is None):
+            assert offset == 0
             assert strides is None
             allocator = self.allocator
-            if min_alignment < self.allocator.device.mem_base_addr_align:
-                alignment=1
+            if (min_alignment is None) or (min_alignment < self.allocator.device.mem_base_addr_align):
+                alignment = 1
                 dtype = np.dtype(dtype)
                 size = int(prod(shape)*dtype.itemsize)
                 # prod( shape=(,) ) will return 1.0, so we cast to int for scalars
             else:
-                (size,nbytes,alignment) = self.get_alignment_and_size(shape=shape,
-                        dtype=dtype, min_alignment=min_alignment)
+                (size, nbytes, alignment) = self.get_alignment_and_size(shape=shape,
+                                                                        dtype=dtype, min_alignment=min_alignment)
             buf = allocator.allocate_aligned(size, alignment=alignment)
-        
-    
-        handle = self._call(clArray.Array, cq=cq, shape=shape, dtype=dtype, order=order, 
-                                allocator=None, data=buf, offset=offset, 
-                                strides=strides, events=events)
+
+        handle = self._call(clArray.Array, cq=cq, shape=shape, dtype=dtype, order=order,
+                            allocator=None, data=buf, offset=offset,
+                            strides=strides, events=events)
         array = self.wrap(handle)
         return array
-    
-    def asarray(self, a, queue=None, async=False,
-            dtype=None, order=default_order, array_queue=QueuePolicy.SAME_AS_TRANSFER):
+
+    def asarray(self, a, queue=None, synchronize=True,
+                dtype=None, order=default_order, array_queue=QueuePolicy.SAME_AS_TRANSFER):
         """
         Convert the input to an OpenClArray.
         Queue is set as default queue.
         """
-        from hysop.backend.device.opencl.opencl_array  import Array, OpenClArray
+        from hysop.backend.device.opencl.opencl_array import Array, OpenClArray
         from hysop.backend.host.host_array import HostArray
         acls = a.__class__.__name__
         queue = self.check_queue(queue)
@@ -1543,12 +1560,12 @@ class OpenClArrayBackend(ArrayBackend):
         if isinstance(a, Array):
             dtype = a.dtype
         elif hasattr(a, 'dtype'):
-            dtype = a.dtype 
+            dtype = a.dtype
         else:
-            msg='Could not extract dtype from type {} and argument not set (dtype).'
-            msg=msg.format(acls)
+            msg = 'Could not extract dtype from type {} and argument not set (dtype).'
+            msg = msg.format(acls)
             raise ValueError(msg)
-        
+
         if isinstance(a, OpenClArray):
             array = a
         elif isinstance(a, clArray.Array):
@@ -1558,21 +1575,21 @@ class OpenClArrayBackend(ArrayBackend):
                 a = np.asarray(a)
             elif isinstance(a, HostArray):
                 a = a.handle
-                
-            array = self._call(clArray.to_device, queue=queue, ary=a, 
-                                    async=async, array_queue=array_queue)
+
+            array = self._call(clArray.to_device, queue=queue, ary=a,
+                               async_=(not synchronize), array_queue=array_queue)
         else:
-            msg='Unknown type to convert from {}.'
-            msg=msg.format(acls)
+            msg = 'Unknown type to convert from {}.'
+            msg = msg.format(acls)
             raise TypeError(msg)
-                
+
         return array.view(dtype)
 
     def copy(self, a, order=MemoryOrdering.SAME_ORDER, queue=None):
         """
         Return an array copy of the given object.
         """
-        self._unsupported_argument('copy','order',order,MemoryOrdering.SAME_ORDER)
+        self._unsupported_argument('copy', 'order', order, MemoryOrdering.SAME_ORDER)
         queue = queue = self.check_queue(queue)
         return self._call(a.handle.copy, queue=queue)
 
@@ -1590,18 +1607,18 @@ class OpenClArrayBackend(ArrayBackend):
             from hysop.symbolic.relational import Assignment
             a, = self._kernel_generator.arrays_to_symbols(a)
             expr = Assignment(a, value)
-            self._kernel_generator.elementwise_kernel('fill', expr, 
-                                queue=queue, call_only_once=True)
+            self._kernel_generator.elementwise_kernel('fill', expr,
+                                                      queue=queue, call_only_once=True)
 
 # Ones and zeros
     def empty(self, shape, dtype=HYSOP_REAL, order=default_order, queue=None, min_alignment=None):
         """
         Return a new array of given shape and type, without initializing entries.
-        If queue is specified, the queue becomes the default queue, else 
+        If queue is specified, the queue becomes the default queue, else
         backend.default_queue is used instead.
         """
         return self.array(shape=shape, dtype=dtype, order=order, queue=queue, min_alignment=min_alignment)
-    
+
     def full(self, shape, fill_value, dtype=HYSOP_REAL, order=default_order, queue=None, min_alignment=None):
         """
         Return a new array of given shape and type, filled with fill_value.
@@ -1610,21 +1627,22 @@ class OpenClArrayBackend(ArrayBackend):
         a = self.empty(shape=shape, dtype=dtype, order=order, queue=queue, min_alignment=min_alignment)
         self.fill(a=a, value=fill_value, queue=queue)
         return a
+
     def zeros(self, shape, dtype=HYSOP_REAL, order=default_order, queue=None, min_alignment=None):
         """
         Return a new array of given shape and type, filled with zeros.
         Queue is set as default queue.
         """
-        return self.full(shape=shape, dtype=dtype, order=order, queue=queue, 
-                 fill_value=0, min_alignment=min_alignment)
+        return self.full(shape=shape, dtype=dtype, order=order, queue=queue,
+                         fill_value=0, min_alignment=min_alignment)
+
     def ones(self, shape, dtype=HYSOP_REAL, order=default_order, queue=None, min_alignment=None):
         """
         Return a new array of given shape and type, filled with ones.
         Queue is set as default queue.
         """
-        return self.full(shape=shape, dtype=dtype, order=order, queue=queue, 
-                 fill_value=1, min_alignment=min_alignment)
-
+        return self.full(shape=shape, dtype=dtype, order=order, queue=queue,
+                         fill_value=1, min_alignment=min_alignment)
 
     def empty_like(self, a, shape=None, dtype=None, order=None, subok=True, queue=None, min_alignment=None):
         """
@@ -1634,31 +1652,34 @@ class OpenClArrayBackend(ArrayBackend):
         dtype = first_not_None(dtype, a.dtype)
         shape = first_not_None(shape, a.shape)
         order = first_not_None(order, getattr(a, 'order', default_order))
-        return self.array(shape=shape, queue=queue, 
-                            dtype=dtype, order=order, min_alignment=min_alignment)
+        return self.array(shape=shape, queue=queue,
+                          dtype=dtype, order=order, min_alignment=min_alignment)
+
     def full_like(self, a, fill_value, dtype=None, order=None, subok=True, queue=None, min_alignment=None, shape=None):
         """
         Return a new array with the same shape and type as a given array.
         Queue is set as default queue.
         """
-        a = self.empty_like(a=a, dtype=dtype, order=order, subok=subok, 
+        a = self.empty_like(a=a, dtype=dtype, order=order, subok=subok,
                             queue=queue, min_alignment=min_alignment, shape=shape)
         self.fill(a, value=fill_value, queue=queue)
         return a
+
     def zeros_like(self, a, dtype=None, order=None, subok=True, queue=None, min_alignment=None, shape=None):
         """
         Return an array of zeros with the same shape and type as a given array.
         Queue is set as default queue.
         """
-        return self.full_like(a=a,fill_value=0,dtype=dtype,order=order,subok=subok,
-                                 queue=queue, min_alignment=min_alignment, shape=shape)
+        return self.full_like(a=a, fill_value=0, dtype=dtype, order=order, subok=subok,
+                              queue=queue, min_alignment=min_alignment, shape=shape)
+
     def ones_like(self, a, dtype=None, order=None, subok=True, queue=None, min_alignment=None, shape=None):
         """
         Return an array of ones with the same shape and type as a given array.
         Queue is set as default queue.
         """
-        return self.full_like(a=a,fill_value=1,dtype=dtype,order=order,subok=subok,
-                                queue=queue, min_alignment=min_alignment, shape=shape)
+        return self.full_like(a=a, fill_value=1, dtype=dtype, order=order, subok=subok,
+                              queue=queue, min_alignment=min_alignment, shape=shape)
 
     def arange(self, *args, **kargs):
         """
@@ -1676,9 +1697,11 @@ class OpenClArrayBackend(ArrayBackend):
 
 ###############################
 # ARRAY MANIPULATION ROUTINES #
-## See https://docs.scipy.org/doc/numpy/reference/routines.array-manipulation.html
+# See https://docs.scipy.org/doc/numpy/reference/routines.array-manipulation.html
+
+# Changing array shape
+
 
-#Changing array shape
     def reshape(self, a, newshape, order=default_order, **kargs):
         """
         Gives a new shape to an array without changing its data.
@@ -1689,19 +1712,19 @@ class OpenClArrayBackend(ArrayBackend):
         """
         Return a contiguous flattened array.
         """
-        self._unsupported_argument('ravel','order',order,MemoryOrdering.SAME_ORDER)
+        self._unsupported_argument('ravel', 'order', order, MemoryOrdering.SAME_ORDER)
         return a._call('ravel')
-    
-#Changing number of dimensions
+
+# Changing number of dimensions
     def squeeze(self, a, axis=None):
         """
         Remove single-dimensional entries from the shape of an array.
         """
-        self._unsupported_argument('squeeze','axis',axis,None)
+        self._unsupported_argument('squeeze', 'axis', axis, None)
         return a._call('squeeze')
 
-#Transpose-like operations
-## /!\ those functions can alter the logical transposition state /!\
+# Transpose-like operations
+# /!\ those functions can alter the logical transposition state /!\
     def transpose(self, a, axes=None):
         """
         Permute the dimensions of an array (only array strides are permutated).
@@ -1710,82 +1733,91 @@ class OpenClArrayBackend(ArrayBackend):
 
 
 # BINARY OPERATIONS #
-## See https://docs.scipy.org/doc/numpy/reference/routines.bitwise.html
+# See https://docs.scipy.org/doc/numpy/reference/routines.bitwise.html
 #####################
 
 # Elementwise bit operations
+
+
     def bitwise_and(self, x1, x2, out=None,
-                queue=None, dtype=None):
+                    queue=None, dtype=None):
         """
         Compute the bit-wise AND of two arrays element-wise.
         """
         expr = 'y0[i] = (x0[i] & x1[i])'
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
+
     def bitwise_or(self, x1, x2, out=None,
-                queue=None, dtype=None):
+                   queue=None, dtype=None):
         """
         Compute the bit-wise OR of two arrays element-wise.
         """
         expr = 'y0[i] = (x0[i] | x1[i])'
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
+
     def bitwise_xor(self, x1, x2, out=None,
-                queue=None, dtype=None):
+                    queue=None, dtype=None):
         """
         Compute the bit-wise XOR of two arrays element-wise.
         """
         expr = 'y0[i] = (x0[i] ^ x1[i])'
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
 
     def invert(self, x, out=None,
-                queue=None, dtype=None):
+               queue=None, dtype=None):
         """
         Compute bit-wise inversion, or bit-wise NOT, element-wise.
         """
         expr = 'y0[i] = (~x0[i])'
-        return self.unary_op(x0=x, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.unary_op(x0=x, expr=expr, out=out,
+                             queue=queue, dtype=dtype)
+
     def left_shift(self, x1, x2, out=None,
-                queue=None, dtype=None):
+                   queue=None, dtype=None):
         """
         Shift the bits of an integer to the left.
         """
         expr = 'y0[i] = (x0[i] << x1[i])'
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
+
     def right_shift(self, x1, x2, out=None,
-                queue=None, dtype=None):
+                    queue=None, dtype=None):
         """
         Shift the bits of an integer to the right.
         """
         expr = 'y0[i] = (x0[i] >> x1[i])'
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
 
 
 # LOGIC FUNCTIONS #
 ###################
-## See https://docs.scipy.org/doc/numpy/reference/routines.logic.html
+# See https://docs.scipy.org/doc/numpy/reference/routines.logic.html
+
+# Truth value testing
+
 
-#Truth value testing
     def any(self, a, axis=None, out=None, queue=None, dtype=HYSOP_BOOL):
         """
         Test whether any array elements along a given axis evaluate to True.
         """
         return self.reduce(kargs=(a,), axis=axis, dtype=dtype, out=out, queue=queue,
-                            neutral='false', reduce_expr='a||b')
+                           neutral='false', reduce_expr='a||b')
+
     def all(self, a, axis=None, out=None, queue=None, dtype=HYSOP_BOOL):
         """
         Test whether all array elements along a given axis evaluate to True.
         """
         return self.reduce(kargs=(a,), axis=axis, dtype=dtype, out=out, queue=queue,
-                            neutral='true', reduce_expr='a&&b')
-    
-#Array contents
-    def isfinite(self, x, out=None, 
-            queue=None, dtype=HYSOP_BOOL):
+                           neutral='true', reduce_expr='a&&b')
+
+# Array contents
+    def isfinite(self, x, out=None,
+                 queue=None, dtype=HYSOP_BOOL):
         """
         Test element-wise for finiteness (not infinity or not Not a Number).
         """
@@ -1793,13 +1825,14 @@ class OpenClArrayBackend(ArrayBackend):
             expr = 'y0[i] = (!isinf(x0[i])) && (!isnan(x0[i]))'
         elif is_complex(x):
             expr = 'y0[i] = ((!isinf(x0[i].real)) && (!isnan(x0[i].real))'
-            expr+= '&& (!isinf(x0[i].imag)) && (!isnan(x0[i].imag)))'
+            expr += '&& (!isinf(x0[i].imag)) && (!isnan(x0[i].imag)))'
         else:
             expr = 'y0[i] = 1'
-        return self.unary_op(x0=x, expr=expr, out=out, 
-            queue=queue, dtype=dtype)
+        return self.unary_op(x0=x, expr=expr, out=out,
+                             queue=queue, dtype=dtype)
+
     def isinf(self, x, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+              queue=None, dtype=HYSOP_BOOL):
         """
         Test element-wise for positive or negative infinity.
         """
@@ -1809,10 +1842,11 @@ class OpenClArrayBackend(ArrayBackend):
             expr = 'y0[i] = (isinf(x0[i].real) || isinf(x0[i].imag))'
         else:
             expr = 'y0[i] = 0'
-        return self.unary_op(x0=x, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.unary_op(x0=x, expr=expr, out=out,
+                             queue=queue, dtype=dtype)
+
     def isnan(self, x, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+              queue=None, dtype=HYSOP_BOOL):
         """
         Test element-wise for NaN and return result as a boolean array.
         """
@@ -1822,10 +1856,11 @@ class OpenClArrayBackend(ArrayBackend):
             expr = 'y0[i] = (isnan(x0[i].real) || isnan(x0[i].imag))'
         else:
             expr = 'y0[i] = 0'
-        return self.unary_op(x0=x, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.unary_op(x0=x, expr=expr, out=out,
+                             queue=queue, dtype=dtype)
+
     def isneginf(self, x, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+                 queue=None, dtype=HYSOP_BOOL):
         """
         Test element-wise for negative infinity, return result as bool array.
         """
@@ -1834,13 +1869,14 @@ class OpenClArrayBackend(ArrayBackend):
             expr = 'y0[i] = signbit(x0[i]) && isinf(x0[i])'
         elif is_complex(x):
             expr = 'y0[i] = ((signbit(x0[i].real) && isinf(x0[i].real))'
-            expr +=      '|| (signbit(x0[i].imag) && isinf(x0[i].imag)))'
+            expr += '|| (signbit(x0[i].imag) && isinf(x0[i].imag)))'
         else:
             expr = 'y0[i] = 0'
-        return self.unary_op(x0=x, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.unary_op(x0=x, expr=expr, out=out,
+                             queue=queue, dtype=dtype)
+
     def isposinf(self, x, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+                 queue=None, dtype=HYSOP_BOOL):
         """
         Test element-wise for positive infinity, return result as bool array.
         """
@@ -1849,80 +1885,85 @@ class OpenClArrayBackend(ArrayBackend):
             expr = 'y0[i] = (!signbit(x0[i])) && isinf(x0[i])'
         elif is_complex(x):
             expr = 'y0[i] = ((!signbit(x0[i].real) && isinf(x0[i].real))'
-            expr +=      '|| (!signbit(x0[i].imag) && isinf(x0[i].imag)))'
+            expr += '|| (!signbit(x0[i].imag) && isinf(x0[i].imag)))'
         else:
             expr = 'y0[i] = 0'
-        return self.unary_op(x0=x, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.unary_op(x0=x, expr=expr, out=out,
+                             queue=queue, dtype=dtype)
 
-#Logical operations
-    def logical_and(self, x1, x2, out=None, 
-                queue=None, dtype=HYSOP_BOOL):
+# Logical operations
+    def logical_and(self, x1, x2, out=None,
+                    queue=None, dtype=HYSOP_BOOL):
         """
         Compute the truth value of x1 AND x2 element-wise.
         """
         expr = 'y0[i] = (x0[i] && x1[i])'
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
+
     def logical_or(self, x1, x2, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+                   queue=None, dtype=HYSOP_BOOL):
         """
         Compute the truth value of x1 OR x2 element-wise.
         """
         expr = 'y0[i] = (x0[i] || x1[i])'
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
+
     def logical_not(self, x, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+                    queue=None, dtype=HYSOP_BOOL):
         """
         Compute the truth value of NOT x element-wise.
         """
         expr = 'y0[i] = (!x0[i])'
-        return self.unary_op(x0=x, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.unary_op(x0=x, expr=expr, out=out,
+                             queue=queue, dtype=dtype)
+
     def logical_xor(self, x1, x2, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+                    queue=None, dtype=HYSOP_BOOL):
         """
         Compute the truth value of x1 XOR x2, element-wise.
         """
         expr = 'y0[i] = (x0[i] ? (!x1[i]) : x1[i])'
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
 
-#Comparisson
+# Comparisson
     def allequal(self, a, b, equal_nan=False,
-                queue=None, dtype=HYSOP_BOOL):
+                 queue=None, dtype=HYSOP_BOOL):
         """
         Returns True if two arrays are element-wise equal within a tolerance.
         """
-        x0,x1 = a,b
-        map_expr ='( x0[i]==x1[i] )' 
+        x0, x1 = a, b
+        map_expr = '( x0[i]==x1[i] )'
         if is_fp(x0) and is_fp(x1) and equal_nan:
             map_expr += ' || ( isnan(x0[i]) && isnan(x1[i]) )'
-        return self.reduce(kargs=(x0,x1), neutral='true', reduce_expr='a&&b', 
-                            queue=queue, dtype=dtype,
-                            map_expr=map_expr,
-                            arguments=arguments)
+        return self.reduce(kargs=(x0, x1), neutral='true', reduce_expr='a&&b',
+                           queue=queue, dtype=dtype,
+                           map_expr=map_expr,
+                           arguments=arguments)
+
     def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False,
-                queue=None, dtype=HYSOP_BOOL):
+                 queue=None, dtype=HYSOP_BOOL):
         """
         Returns True if two arrays are element-wise equal within a tolerance.
         """
         if is_complex(a) or is_complex(b):
             map_expr = '({fabs}({sub}(x0[i],x1[i])) <= (atol + rtol*{fabs}(x1[i])))'
-            common_dtype = find_common_dtype(a,b)
-            map_expr = map_expr.format(fabs=self.complex_fn('abs',common_dtype),
-                    sub=self.complex_fn('sub',common_dtype)) 
-            convert_inputs='c'
+            common_dtype = find_common_dtype(a, b)
+            map_expr = map_expr.format(fabs=self.complex_fn('abs', common_dtype),
+                                       sub=self.complex_fn('sub', common_dtype))
+            convert_inputs = 'c'
         else:
             map_expr = '(fabs(x0[i]-x1[i]) <= (atol + rtol*fabs(x1[i])))'
-            convert_inputs='f'
-        return self.reduce(kargs=(a,b), neutral='true', reduce_expr='a&&b', 
-                            queue=queue, dtype=dtype,
-                            map_expr=map_expr,
-                            extra_kargs=(np.float64(rtol), np.float64(atol)),
-                            extra_arguments=['const double rtol', 'const double atol'],
-                            convert_inputs=convert_inputs, alloc_dtypes='f')
+            convert_inputs = 'f'
+        return self.reduce(kargs=(a, b), neutral='true', reduce_expr='a&&b',
+                           queue=queue, dtype=dtype,
+                           map_expr=map_expr,
+                           extra_kargs=(np.float64(rtol), np.float64(atol)),
+                           extra_arguments=['const double rtol', 'const double atol'],
+                           convert_inputs=convert_inputs, alloc_dtypes='f')
+
     def isclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False,
                 queue=None, dtype=HYSOP_BOOL):
         """
@@ -1930,50 +1971,51 @@ class OpenClArrayBackend(ArrayBackend):
         """
         if is_complex(a) or is_complex(b):
             map_expr = '({fabs}({sub}(x0[i],x1[i])) <= (atol + rtol*{fabs}(x1[i])))'
-            common_dtype = find_common_dtype(a,b)
-            map_expr = map_expr.format(fabs=self.complex_fn('abs',common_dtype),
-                    sub=self.complex_fn('sub',common_dtype)) 
-            convert_inputs='c'
+            common_dtype = find_common_dtype(a, b)
+            map_expr = map_expr.format(fabs=self.complex_fn('abs', common_dtype),
+                                       sub=self.complex_fn('sub', common_dtype))
+            convert_inputs = 'c'
         else:
             map_expr = '(fabs(x0[i]-x1[i]) <= (atol + rtol*fabs(x1[i])))'
-            convert_inputs='f'
-        return self.binary_op(x0=a, x1=b, expr=expr, out=out, 
-                extra_kargs=(np.float64(rtol), np.float64(atol)),
-                extra_arguments=['double rtol', 'double atol'],
-                queue=queue, dtype=dtype,
-                convert_inputs=convert_inputs)
+            convert_inputs = 'f'
+        return self.binary_op(x0=a, x1=b, expr=expr, out=out,
+                              extra_kargs=(np.float64(rtol), np.float64(atol)),
+                              extra_arguments=['double rtol', 'double atol'],
+                              queue=queue, dtype=dtype,
+                              convert_inputs=convert_inputs)
+
     def array_equal(self, a1, a2,
-                queue=None, dtype=HYSOP_BOOL):
+                    queue=None, dtype=HYSOP_BOOL):
         """
         True if two arrays have the same shape and elements, False otherwise.
         """
-        return (a1.shape==a2.shape) and \
-                self.all_equal(x1=a1,x2=a2,queue=queue,dtype=HYSOP_BOOL)
+        return (a1.shape == a2.shape) and \
+            self.all_equal(x1=a1, x2=a2, queue=queue, dtype=HYSOP_BOOL)
 
     @staticmethod
-    def _make_comparisson_expr(x1,x2,comp):
+    def _make_comparisson_expr(x1, x2, comp):
         iseq = (comp == '==')
         expr = 'y0[i] = (({}) || (({}) && ({})))'
         if is_complex(x1) and is_complex(x2):
             expr = expr.format(
-                    'false' if iseq else 'x0[i].real {comp}  x1[i].real',
-                    'x0[i].real == x1[i].real',
-                    'x0[i].imag {comp}  x1[i].imag')
+                'false' if iseq else 'x0[i].real {comp}  x1[i].real',
+                'x0[i].real == x1[i].real',
+                'x0[i].imag {comp}  x1[i].imag')
         elif is_complex(x1):
             expr = expr.format(
-                    'false' if iseq else 'x0[i].real {comp}  x1[i]',
-                    'x0[i].real == x1[i]',
-                    'x0[i].imag {comp}  0')
+                'false' if iseq else 'x0[i].real {comp}  x1[i]',
+                'x0[i].real == x1[i]',
+                'x0[i].imag {comp}  0')
         elif is_complex(x2):
             expr = expr.format(
-                    'false' if iseq else 'x0[i] {comp}  x1[i].real',
-                    'x0[i] == x1[i].real',
-                    '0     {comp}  x1[i].imag')
+                'false' if iseq else 'x0[i] {comp}  x1[i].real',
+                'x0[i] == x1[i].real',
+                '0     {comp}  x1[i].imag')
         else:
             expr = expr.format(
-                    'false' if iseq else 'x0[i] {comp}  x1[i]',
-                    'x0[i] == x1[i]',
-                    '0     {comp}  0')
+                'false' if iseq else 'x0[i] {comp}  x1[i]',
+                'x0[i] == x1[i]',
+                '0     {comp}  0')
         return expr.format(comp=comp)
 
     def greater(self, x1, x2, out=None,
@@ -1981,186 +2023,200 @@ class OpenClArrayBackend(ArrayBackend):
         """
         Return the truth value of (x1 > x2) element-wise.
         """
-        expr = self._make_comparisson_expr(x1,x2,'>')
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        expr = self._make_comparisson_expr(x1, x2, '>')
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
+
     def greater_equal(self, x1, x2, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+                      queue=None, dtype=HYSOP_BOOL):
         """
         Return the truth value of (x1 >= x2) element-wise.
         """
-        expr = self._make_comparisson_expr(x1,x2,'>=')
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        expr = self._make_comparisson_expr(x1, x2, '>=')
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
+
     def less(self, x1, x2, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+             queue=None, dtype=HYSOP_BOOL):
         """
         Return the truth value of (x1 < x2) element-wise.
         """
-        expr = self._make_comparisson_expr(x1,x2,'<')
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        expr = self._make_comparisson_expr(x1, x2, '<')
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
+
     def less_equal(self, x1, x2, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+                   queue=None, dtype=HYSOP_BOOL):
         """
         Return the truth value of (x1 =< x2) element-wise.
         """
-        expr = self._make_comparisson_expr(x1,x2,'<=')
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        expr = self._make_comparisson_expr(x1, x2, '<=')
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
+
     def equal(self, x1, x2, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+              queue=None, dtype=HYSOP_BOOL):
         """
         Return (x1 == x2) element-wise.
         """
-        expr = self._make_comparisson_expr(x1,x2,'==')
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        expr = self._make_comparisson_expr(x1, x2, '==')
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
+
     def not_equal(self, x1, x2, out=None,
-                queue=None, dtype=HYSOP_BOOL):
+                  queue=None, dtype=HYSOP_BOOL):
         """
         Return (x1 != x2) element-wise.
         """
-        expr = self._make_comparisson_expr(x1,x2,'!=')
-        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                queue=queue, dtype=dtype)
+        expr = self._make_comparisson_expr(x1, x2, '!=')
+        return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                              queue=queue, dtype=dtype)
 
 
 # MATHEMATICAL FUNCTIONS #
 ##########################
-## See https://docs.scipy.org/doc/numpy/reference/routines.math.html
-    
+# See https://docs.scipy.org/doc/numpy/reference/routines.math.html
+
     def _flt_or_cplx_unary_op(self, x, fname, **kargs):
         assert 'expr' not in kargs
         assert 'x0' not in kargs
         assert 'convert_inputs' not in kargs
         assert 'alloc_dtypes' not in kargs
-        expr='y0[i] = {}(x0[i])'
+        expr = 'y0[i] = {}(x0[i])'
         if is_complex(x):
             expr = expr.format(self.complex_fn(fname, x))
-            convert_inputs='c'
-            alloc_dtypes='c'
+            convert_inputs = 'c'
+            alloc_dtypes = 'c'
         else:
             expr = expr.format(fname)
-            convert_inputs='f'
-            alloc_dtypes='f'
+            convert_inputs = 'f'
+            alloc_dtypes = 'f'
         return self.unary_op(expr=expr, x0=x, convert_inputs=convert_inputs,
-                alloc_dtypes=alloc_dtypes, **kargs)
-    
+                             alloc_dtypes=alloc_dtypes, **kargs)
+
     def _flt_or_map_cplx_unary_op(self, x, fname, **kargs):
         assert 'expr' not in kargs
         assert 'x0' not in kargs
         if is_complex(x):
-            expr='y0[i] = {}({fn}(x0[i].real), {fn}(x0[i].imag))'
+            expr = 'y0[i] = {}({fn}(x0[i].real), {fn}(x0[i].imag))'
             expr = expr.format(self.complex_fn('new', x), fn=fname)
-            _convert_inputs=None
-            _alloc_dtypes=(x.dtype,)
+            _convert_inputs = None
+            _alloc_dtypes = (x.dtype,)
         else:
-            expr='y0[i] = {}(x0[i])'
+            expr = 'y0[i] = {}(x0[i])'
             expr = expr.format(fname)
-            _convert_inputs='f'
-            _alloc_dtypes='f'
+            _convert_inputs = 'f'
+            _alloc_dtypes = 'f'
         alloc_dtypes = kargs.pop('alloc_dtypes', None) or _alloc_dtypes
         convert_inputs = kargs.pop('convert_inputs', None) or _convert_inputs
         return self.unary_op(expr=expr, x0=x, convert_inputs=convert_inputs,
-                alloc_dtypes=alloc_dtypes, **kargs)
-    
+                             alloc_dtypes=alloc_dtypes, **kargs)
+
     def _cplx_binary_op(self, x0, x1, fname, **kargs):
         assert is_complex(x0) or is_complex(x1)
         assert 'expr' not in kargs
         assert 'convert_inputs' not in kargs
         assert 'alloc_dtypes' not in kargs
-        
-        convert_inputs='f'
-        alloc_dtypes='c'
-        expr = self.binary_complex_fn(fname,x0,x1)
+
+        convert_inputs = 'f'
+        alloc_dtypes = 'c'
+        expr = self.binary_complex_fn(fname, x0, x1)
 
         return self.binary_op(expr=expr, x0=x0, x1=x1,
-                convert_inputs=convert_inputs,
-                alloc_dtypes=alloc_dtypes, **kargs)
-    
+                              convert_inputs=convert_inputs,
+                              alloc_dtypes=alloc_dtypes, **kargs)
+
 
 # Trigonometric functions
-    def sin(self, x, out=None, queue=None, 
+
+
+    def sin(self, x, out=None, queue=None,
             dtype=None):
         """
         Trigonometric sine, element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'sin', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
+
     def cos(self, x, out=None, queue=None, dtype=None):
         """
         Cosine element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'cos', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
+
     def tan(self, x, out=None, queue=None, dtype=None):
         """
         Compute tangent element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'tan', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
 
     def arcsin(self, x, out=None, queue=None, dtype=None):
         """
         Inverse sine, element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'asin', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
+
     def arccos(self, x, out=None, queue=None, dtype=None):
         """
         Trigonometric inverse cosine, element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'acos', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
+
     def arctan(self, x, out=None, queue=None, dtype=None):
         """
         Trigonometric inverse tangent, element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'atan', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
+
     def arctan2(self, x1, x2, out=None, queue=None, dtype=None):
         """
         Element-wise arc tangent of x1/x2 choosing the quadrant correctly.
         """
-        expr='y0[i] = atan2(x0[i],x1[i])'
+        expr = 'y0[i] = atan2(x0[i],x1[i])'
         assert not is_complex(x1) and not is_complex(x2)
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                        queue=queue, dtype=dtype,
-                        convert_inputs='f', alloc_dtypes='f')
+                              queue=queue, dtype=dtype,
+                              convert_inputs='f', alloc_dtypes='f')
 
     def hypot(self, x1, x2, out=None, queue=None, dtype=None):
         """
         Given the legs of a right triangle, return its hypotenuse.
         """
         assert not is_complex(x1) and not is_complex(x2)
-        expr='y0[i] = hypot(x0[i],x1[i])'
+        expr = 'y0[i] = hypot(x0[i],x1[i])'
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                        queue=queue, dtype=dtype,
-                        convert_inputs='f', alloc_dtypes='f')
-    #def unwrap(self, p, discont=3.141592653589793, axis=-1):
-        #"""
-        #Unwrap by changing deltas between values to 2*pi complement.
-        #"""
-        #self.unary_op(x0=x, expr='y0[i] = unwrap(x0[i])', out=out,
-                        #queue=queue, dtype=dtype,
-                        # convert_inputs='f', alloc_dtypes='f')
+                              queue=queue, dtype=dtype,
+                              convert_inputs='f', alloc_dtypes='f')
+    # def unwrap(self, p, discont=3.141592653589793, axis=-1):
+        # """
+        # Unwrap by changing deltas between values to 2*pi complement.
+        # """
+        # self.unary_op(x0=x, expr='y0[i] = unwrap(x0[i])', out=out,
+        #queue=queue, dtype=dtype,
+        # convert_inputs='f', alloc_dtypes='f')
+
     def deg2rad(self, x, out=None, queue=None, dtype=None):
         """
         Convert angles from degrees to radians.
         """
         assert not is_complex(x)
         return self.unary_op(x0=x, expr='y0[i] = x0[i] * M_PI/180.0', out=out,
-                        queue=queue, dtype=dtype,
-                        convert_inputs='f', alloc_dtypes='f')
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f', alloc_dtypes='f')
+
     def rad2deg(self, x, out=None, queue=None, dtype=None):
         """
         Convert angles from radians to degrees.
         """
         assert not is_complex(x)
         return self.unary_op(x0=x, expr='y0[i] = x0[i] * 180.0/M_PI', out=out,
-                        queue=queue, dtype=dtype,
-                        convert_inputs='f', alloc_dtypes='f')
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f', alloc_dtypes='f')
 
 # Hyperbolic functions
     def sinh(self, x, out=None, queue=None, dtype=None):
@@ -2168,188 +2224,206 @@ class OpenClArrayBackend(ArrayBackend):
         Hyperbolic sine, element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'sinh', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
+
     def cosh(self, x, out=None, queue=None, dtype=None):
         """
         Hyperbolic cosine, element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'cosh', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
+
     def tanh(self, x, out=None, queue=None, dtype=None):
         """
         Compute hyperbolic tangent element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'tanh', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
 
     def arcsinh(self, x, out=None, queue=None, dtype=None):
         """
         Inverse hyperbolic sine element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'asinh', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
+
     def arccosh(self, x, out=None, queue=None, dtype=None):
         """
         Inverse hyperbolic cosine, element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'acosh', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
+
     def arctanh(self, x, out=None, queue=None, dtype=None):
         """
         Inverse hyperbolic tangent element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'atanh', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
 
 # Rounding
-    def around(self, a, decimals=0, out=None, 
-				queue=None, dtype=None):
+    def around(self, a, decimals=0, out=None,
+               queue=None, dtype=None):
         """
         Round an array to the given number of decimals.
         """
-        self._unsupported_argument('round','decimals',decimals,0)
+        self._unsupported_argument('round', 'decimals', decimals, 0)
         if is_complex(dtype):
             convert_inputs = (np.complex128,)
         else:
             convert_inputs = None
-        alloc_dtypes=(a.dtype,)
+        alloc_dtypes = (a.dtype,)
         return self._flt_or_map_cplx_unary_op(a, 'round', out=out,
-                        queue=queue, dtype=dtype, 
-                        alloc_dtypes=alloc_dtypes,
-                        convert_inputs=convert_inputs)
+                                              queue=queue, dtype=dtype,
+                                              alloc_dtypes=alloc_dtypes,
+                                              convert_inputs=convert_inputs)
 
-    def fix(self, x, y=None, out=None, 
-				queue=None, dtype=None):
+    def fix(self, x, y=None, out=None,
+            queue=None, dtype=None):
         """
         Round to nearest integer towards zero.
         """
         assert not is_complex(x)
         return self.unary_op(x0=x, expr='y0[i] = trunc(x0[i])', out=y,
-                        queue=queue, dtype=dtype,
-						convert_inputs='f', alloc_dtypes='f')
-    def rint(self, x, out=None, 
-				queue=None, dtype=None):
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f', alloc_dtypes='f')
+
+    def rint(self, x, out=None,
+             queue=None, dtype=None):
         """
         Round elements of the array to the nearest integer.
         """
         return self._flt_or_map_cplx_unary_op(x, 'rint', out=out,
-                        queue=queue, dtype=dtype)
-    def floor(self, x, out=None, 
-				queue=None, dtype=None):
+                                              queue=queue, dtype=dtype)
+
+    def floor(self, x, out=None,
+              queue=None, dtype=None):
         """
         Return the floor of the input, element-wise.
         """
         assert not is_complex(x)
         if x.is_fp():
-            expr='y0[i] = floor(x0[i])'
+            expr = 'y0[i] = floor(x0[i])'
         else:
-            expr='y0[i] = x0[i]'
+            expr = 'y0[i] = x0[i]'
         return self.unary_op(x0=x, expr=expr, out=out,
-                        queue=queue, dtype=dtype,
-						convert_inputs='f', alloc_dtypes='f')
-    def ceil(self, x, out=None, 
-				queue=None, dtype=None):
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f', alloc_dtypes='f')
+
+    def ceil(self, x, out=None,
+             queue=None, dtype=None):
         """
         Return the ceiling of the input, element-wise.
         """
         assert not is_complex(x)
         return self.unary_op(x0=x, expr='y0[i] = ceil(x0[i])', out=out,
-                        queue=queue, dtype=dtype,
-						convert_inputs='f', alloc_dtypes='f')
-    def trunc(self, x, out=None, 
-				queue=None, dtype=None):
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f', alloc_dtypes='f')
+
+    def trunc(self, x, out=None,
+              queue=None, dtype=None):
         """
         Return the truncated value of the input, element-wise.
         """
         assert not is_complex(x)
         return self.unary_op(x0=x, expr='y0[i] = trunc(x0[i])', out=out,
-                        queue=queue, dtype=dtype,
-						convert_inputs='f', alloc_dtypes='f')
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f', alloc_dtypes='f')
 
 
 # Sums, product, differences
+
+
     def sum(self, a, axis=None, dtype=None, out=None, queue=None, **kwds):
         """
         Sum of array elements over a given axis.
         """
         return self.reduce(kargs=(a,), axis=axis, dtype=dtype, out=out, queue=queue,
-                            neutral='0', reduce_expr='a+b', **kwds)
+                           neutral='0', reduce_expr='a+b', **kwds)
 
     def prod(self, a, axis=None, dtype=None, out=None, queue=None):
         """
         Return the product of array elements over a given axis.
         """
         return self.reduce(kargs=(a,), axis=axis, dtype=dtype, out=out, queue=queue,
-                            neutral='1', reduce_expr='a*b')
+                           neutral='1', reduce_expr='a*b')
 
     def nansum(self, a, axis=None, dtype=None, out=None, queue=None, **kwds):
         """
-        Return the sum of array elements over a given axis treating Not a Numbers (NaNs) 
+        Return the sum of array elements over a given axis treating Not a Numbers (NaNs)
         as zeros.
         """
         return self.nanreduce(kargs=(a,), axis=axis, dtype=dtype, out=out, queue=queue,
-                            neutral='0', reduce_expr='a+b', **kwds)
-    
+                              neutral='0', reduce_expr='a+b', **kwds)
+
     def nanprod(self, a, axis=None, dtype=None, out=None, queue=None):
         """
-        Return the product of array elements over a given axis treating 
+        Return the product of array elements over a given axis treating
         Not a Numbers (NaNs) as ones.
         """
         return self.nanreduce(kargs=(a,), axis=axis, dtype=dtype, out=out, queue=queue,
-                            neutral='1', reduce_expr='a*b')
+                              neutral='1', reduce_expr='a*b')
 
     def cumprod(self, a, axis=None, dtype=None, out=None,
-            queue=None):
-        """
+                queue=None):
+        r"""
         Return the cumulative product of elements along a given axis.
         /!\ precision loss because of operation ordering
         """
-        return self.inclusive_scan(kargs=(a,), axis=axis, dtype=dtype, out=out, 
-                neutral='1', scan_expr='a*b', queue=queue)
+        return self.inclusive_scan(kargs=(a,), axis=axis, dtype=dtype, out=out,
+                                   neutral='1', scan_expr='a*b', queue=queue)
+
     def cumsum(self, a, axis=None, dtype=None, out=None,
-            queue=None):
+               queue=None):
         """
         Return the cumulative sum of the elements along a given axis.
         """
-        return self.inclusive_scan(kargs=(a,), axis=axis, dtype=dtype, out=out, 
-                neutral='0', scan_expr='a+b', queue=queue)
+        return self.inclusive_scan(kargs=(a,), axis=axis, dtype=dtype, out=out,
+                                   neutral='0', scan_expr='a+b', queue=queue)
+
     def nancumprod(self, a, axis=None, dtype=None, out=None,
-            queue=None):
-        """
-        Return the cumulative product of array elements over a given axis treating 
+                   queue=None):
+        r"""
+        Return the cumulative product of array elements over a given axis treating
         Not a Numbers (NaNs) as one.
         /!\ precision loss because of operation ordering
         """
-        return self.inclusive_nanscan(kargs=(a,), axis=axis, dtype=dtype, out=out, 
-                neutral='1', scan_expr='a*b', queue=queue)
+        return self.inclusive_nanscan(kargs=(a,), axis=axis, dtype=dtype, out=out,
+                                      neutral='1', scan_expr='a*b', queue=queue)
+
     def nancumsum(self, a, axis=None, dtype=None, out=None,
-            queue=None):
+                  queue=None):
         """
-        Return the cumulative sum of array elements over a given axis treating 
+        Return the cumulative sum of array elements over a given axis treating
         Not a Numbers (NaNs) as zero.
         """
-        return self.inclusive_nanscan(kargs=(a,), axis=axis, dtype=dtype, out=out, 
-                neutral='0', scan_expr='a+b', queue=queue)
+        return self.inclusive_nanscan(kargs=(a,), axis=axis, dtype=dtype, out=out,
+                                      neutral='0', scan_expr='a+b', queue=queue)
+
     def diff(self, a, n=1, axis=-1):
         """
         Calculate the n-th discrete difference along given axis.
         """
         self._not_implemented_yet('diff')
+
     def ediff1d(self, ary, to_end=None, to_begin=None):
         """
         The differences between consecutive elements of an array.
         """
         self._not_implemented_yet('ediff1d')
+
     def gradient(self, f, *varargs, **kwargs):
         """
         Return the gradient of an N-dimensional array.
         """
         self._not_implemented_yet('gradient')
+
     def cross(self, a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
         """
         Return the cross product of two (arrays of) vectors.
         """
         self._not_implemented_yet('cross')
+
     def trapz(self, y, x=None, dx=1.0, axis=-1):
         """
         Integrate along the given axis using the composite trapezoidal rule.
@@ -2362,205 +2436,221 @@ class OpenClArrayBackend(ArrayBackend):
         """
         Calculate the exponential of all elements in the input array.
         """
-        return self._flt_or_cplx_unary_op(x, 'exp',out=out,
-                        queue=queue, dtype=dtype)
+        return self._flt_or_cplx_unary_op(x, 'exp', out=out,
+                                          queue=queue, dtype=dtype)
+
     def exp2(self, x, out=None,
-            queue=None, dtype=None):
+             queue=None, dtype=None):
         """
         Calculate 2**p for all p in the input array.
         """
-        expr='y0[i] = exp2(x0[i])'
-        return self._flt_or_cplx_unary_op(x, 'exp2',out=out,
-                        queue=queue, dtype=dtype)
+        expr = 'y0[i] = exp2(x0[i])'
+        return self._flt_or_cplx_unary_op(x, 'exp2', out=out,
+                                          queue=queue, dtype=dtype)
+
     def expm1(self, x, out=None,
-            queue=None, dtype=None):
+              queue=None, dtype=None):
         """
         Calculate exp(x) - 1 for all elements in the array.
         """
-        expr='y0[i] = expm1(x0[i])'
-        return self._flt_or_cplx_unary_op(x, 'expm1',out=out,
-                        queue=queue, dtype=dtype)
-    
+        expr = 'y0[i] = expm1(x0[i])'
+        return self._flt_or_cplx_unary_op(x, 'expm1', out=out,
+                                          queue=queue, dtype=dtype)
+
     def log(self, x, out=None,
             queue=None, dtype=None):
         """
         Natural logarithm, element-wise.
         """
-        return self._flt_or_cplx_unary_op(x, 'log',out=out,
-                        queue=queue, dtype=dtype)
+        return self._flt_or_cplx_unary_op(x, 'log', out=out,
+                                          queue=queue, dtype=dtype)
+
     def log2(self, x, out=None,
-            queue=None, dtype=None):
+             queue=None, dtype=None):
         """
         Base-2 logarithm of x.
         """
-        return self._flt_or_cplx_unary_op(x, 'log2',out=out,
-                        queue=queue, dtype=dtype)
+        return self._flt_or_cplx_unary_op(x, 'log2', out=out,
+                                          queue=queue, dtype=dtype)
+
     def log10(self, x, out=None,
-            queue=None, dtype=None):
+              queue=None, dtype=None):
         """
         Return the base 10 logarithm of the input array, element-wise.
         """
-        return self._flt_or_cplx_unary_op(x, 'log10',out=out,
-                        queue=queue, dtype=dtype)
+        return self._flt_or_cplx_unary_op(x, 'log10', out=out,
+                                          queue=queue, dtype=dtype)
+
     def log1p(self, x, out=None,
-            queue=None, dtype=None):
+              queue=None, dtype=None):
         """
         Return the natural logarithm of one plus the input array, element-wise.
         """
-        expr='y0[i] = log1p(x0[i])'
-        return self._flt_or_cplx_unary_op(x, 'log1p',out=out,
-                        queue=queue, dtype=dtype)
+        expr = 'y0[i] = log1p(x0[i])'
+        return self._flt_or_cplx_unary_op(x, 'log1p', out=out,
+                                          queue=queue, dtype=dtype)
 
     def logaddexp(self, x1, x2, out=None,
-            queue=None, dtype=None):
+                  queue=None, dtype=None):
         """
         Logarithm of the sum of exponentiations of the inputs.
         """
         assert not is_complex(x1) and not is_complex(x2)
-        expr='y0[i] = log(exp(x0[i]) + exp(x1[i]))'
+        expr = 'y0[i] = log(exp(x0[i]) + exp(x1[i]))'
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                    queue=queue, dtype=dtype,
-						convert_inputs='f', alloc_dtypes='f')
+                              queue=queue, dtype=dtype,
+                              convert_inputs='f', alloc_dtypes='f')
+
     def logaddexp2(self, x1, x2, out=None,
-            queue=None, dtype=None):
+                   queue=None, dtype=None):
         """
         Logarithm of the sum of exponentiations of the inputs in base-2.
         """
         assert not is_complex(x1) and not is_complex(x2)
-        expr='y0[i] = log2(pow(2,x0[i]) + pow(2,x1[i]))'
+        expr = 'y0[i] = log2(pow(2,x0[i]) + pow(2,x1[i]))'
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                    queue=queue, dtype=dtype,
-						convert_inputs='f', alloc_dtypes='f')
+                              queue=queue, dtype=dtype,
+                              convert_inputs='f', alloc_dtypes='f')
 
- #Other special functions
+ # Other special functions
     def i0(self, x, out=None,
             queue=None, dtype=None):
         """
         Modified Bessel function of the first kind, order 0.
         """
         self._not_implemented_yet('i0',
-						convert_inputs='f', alloc_dtypes='f')
+                                  convert_inputs='f', alloc_dtypes='f')
+
     def sinc(self, x, out=None,
-            queue=None, dtype=None):
+             queue=None, dtype=None):
         """
         Return the sinc function.
         """
         assert not is_complex(x)
-        expr='y0[i] = sin(M_PI*x0[i]) / (M_PI*x0[i])'
+        expr = 'y0[i] = sin(M_PI*x0[i]) / (M_PI*x0[i])'
         return self.unary_op(x0=x, expr=expr, out=out,
-                    queue=queue, dtype=dtype,
-						convert_inputs='f', alloc_dtypes='f')
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f', alloc_dtypes='f')
 
- #Floating point routines
+ # Floating point routines
     def signbit(self, x, out=None,
-            queue=None, dtype=HYSOP_BOOL):
+                queue=None, dtype=HYSOP_BOOL):
         """
         Returns element-wise True where signbit is set (less than zero).
         """
         assert not is_complex(x)
-        expr='y0[i] = signbit(x0[i])'
+        expr = 'y0[i] = signbit(x0[i])'
         return self.unary_op(x0=x, expr=expr, out=out,
-                    queue=queue, dtype=dtype,
-						convert_inputs='f')
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f')
+
     def copysign(self, x1, x2, out=None,
-            queue=None, dtype=None):
+                 queue=None, dtype=None):
         """
         Change the sign of x1 to that of x2, element-wise.
         """
         assert not is_complex(x1) and not is_complex(x2)
-        expr='y0[i] = copysign(x0[i], x1[i])'
+        expr = 'y0[i] = copysign(x0[i], x1[i])'
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                    queue=queue, dtype=dtype,
-						convert_inputs='f', alloc_dtypes='f')
+                              queue=queue, dtype=dtype,
+                              convert_inputs='f', alloc_dtypes='f')
+
     def frexp(self, x, out1=None, out2=None,
-            queue=None):
+              queue=None):
         """
         Decompose the elements of x into mantissa and twos exponent.
         """
         assert not is_complex(x)
-        expr='int buf; y0[i] = frexp(x0[i], &buf); y1[i]=buf;'
-        return self.nary_op(ikargs=(x,), okargs=(out1,out2), operation=expr,
-                    queue=queue, 
-                    convert_inputs='f', alloc_dtypes=('f',np.int32))
+        expr = 'int buf; y0[i] = frexp(x0[i], &buf); y1[i]=buf;'
+        return self.nary_op(ikargs=(x,), okargs=(out1, out2), operation=expr,
+                            queue=queue,
+                            convert_inputs='f', alloc_dtypes=('f', np.int32))
+
     def ldexp(self, x1, x2, out=None,
-            queue=None, dtype=None):
+              queue=None, dtype=None):
         """
         Returns x1 * 2**x2, element-wise.
         """
-        expr='y0[i] = ldexp(x0[i])'
+        expr = 'y0[i] = ldexp(x0[i])'
         return self.unary_op(x0=x, expr=expr, out=out,
-                    queue=queue, dtype=dtype,
-					convert_inputs='f', alloc_dtypes='f')
-        
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f', alloc_dtypes='f')
+
 
 # Arithmetic operations
+
+
     def add(self, x1, x2, out=None,
-            queue=None, dtype=None, name='add',**kwds):
+            queue=None, dtype=None, name='add', **kwds):
         """
         Add arguments element-wise.
         """
         if is_complex(x1) or is_complex(x2):
-            return self._cplx_binary_op(x1,x2,'add',
-                    out=out,queue=queue,dtype=dtype,name=name,**kwds)
+            return self._cplx_binary_op(x1, x2, 'add',
+                                        out=out, queue=queue, dtype=dtype, name=name, **kwds)
         else:
             expr = 'y0[i] = (x0[i] + x1[i])'
-            return self.binary_op(x0=x1, x1=x2, expr=expr, out=out, 
-                    queue=queue, dtype=dtype,name=name,**kwds)
+            return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
+                                  queue=queue, dtype=dtype, name=name, **kwds)
+
     def reciprocal(self, x, out=None,
-                queue=None, dtype=None):
+                   queue=None, dtype=None):
         """
         Return the reciprocal of the argument, element-wise.
         """
         dt = get_dtype(x)
         if is_complex(x):
-            expr = 'y0[i] = {}(1,x0[i])'.format(self.complex_fn('rdivide',x))
+            expr = 'y0[i] = {}(1,x0[i])'.format(self.complex_fn('rdivide', x))
         elif is_integer(x):
             info = np.iinfo(dt)
-            if (dt==np.int32) or (dt==np.int64): # to match numpy output
-                suffix =  'u'  if is_unsigned(x)  else ''
-                suffix += 'L'  if (dt==np.int64)  else ''
-                imin='{}{}'.format(info.min,suffix)
-                if dt==np.int64:
-                    imin='{}{}'.format(info.min+1,suffix) #...
+            if (dt == np.int32) or (dt == np.int64):  # to match numpy output
+                suffix = 'u' if is_unsigned(x) else ''
+                suffix += 'L' if (dt == np.int64) else ''
+                imin = '{}{}'.format(info.min, suffix)
+                if dt == np.int64:
+                    imin = '{}{}'.format(info.min+1, suffix)  # ...
                 expr = 'y0[i] = (x0[i]==0 ? {imin} : (1{suffix}/x0[i]))'
-                expr = expr.format(imin=imin,suffix=suffix)
+                expr = expr.format(imin=imin, suffix=suffix)
             else:
                 expr = 'y0[i] = (x0[i]==0 ? 0 : (1/x0[i]))'
         else:
             expr = 'y0[i] = (1.0/x0[i])'
         return self.unary_op(x0=x, expr=expr, out=out,
-                    queue=queue, dtype=dtype)
+                             queue=queue, dtype=dtype)
+
     def negative(self, x, out=None,
-                queue=None, dtype=None):
+                 queue=None, dtype=None):
         """
         Numerical negative, element-wise.
         """
         if is_complex(x):
-            expr = 'y0[i] = {}(x0[i])'.format(self.complex_fn('neg',x))
+            expr = 'y0[i] = {}(x0[i])'.format(self.complex_fn('neg', x))
         else:
             expr = 'y0[i] = (-x0[i])'
         return self.unary_op(x0=x, expr=expr, out=out,
-                    queue=queue, dtype=dtype)
+                             queue=queue, dtype=dtype)
+
     def multiply(self, x1, x2, out=None,
-                queue=None, dtype=None):
+                 queue=None, dtype=None):
         """
         Multiply arguments element-wise.
         """
         if is_complex(x1) or is_complex(x2):
-            return self._cplx_binary_op(x1,x2,'mul',
-                    out=out,queue=queue,dtype=dtype,)
+            return self._cplx_binary_op(x1, x2, 'mul',
+                                        out=out, queue=queue, dtype=dtype,)
         else:
             expr = 'y0[i] = (x0[i] * x1[i])'
             return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                        queue=queue, dtype=dtype)
+                                  queue=queue, dtype=dtype)
+
     def divide(self, x1, x2, out=None,
-                queue=None, dtype=None):
+               queue=None, dtype=None):
         """
         Divide arguments element-wise.
         """
         if is_complex(x1) or is_complex(x2):
-            return self._cplx_binary_op(x1,x2,'divide',
-                    out=out,queue=queue,dtype=dtype)
+            return self._cplx_binary_op(x1, x2, 'divide',
+                                        out=out, queue=queue, dtype=dtype)
         elif is_integer(x2):
             expr = 'y0[i] = (x1[i]==0 ? 0 : floor(x0[i] / x1[i]))'
             convert_inputs = np.float64
@@ -2568,81 +2658,86 @@ class OpenClArrayBackend(ArrayBackend):
             expr = 'y0[i] = (x0[i] / x1[i])'
             convert_inputs = None
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                    queue=queue, dtype=dtype,
-                    convert_inputs=convert_inputs)
+                              queue=queue, dtype=dtype,
+                              convert_inputs=convert_inputs)
+
     def power(self, x1, x2, out=None,
-                queue=None, dtype=None):
+              queue=None, dtype=None):
         """
         First array elements raised to powers from second array, element-wise.
         """
         if is_complex(x1) or is_complex(x2):
-            return self._cplx_binary_op(x1,x2,'pow',
-                    out=out,queue=queue,dtype=dtype,)
+            return self._cplx_binary_op(x1, x2, 'pow',
+                                        out=out, queue=queue, dtype=dtype,)
         else:
             expr = 'pow(x0[i], x1[i])'
-            expr='y0[i] = ' + expr
+            expr = 'y0[i] = ' + expr
             return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                        queue=queue, dtype=dtype,
-                        convert_inputs='f')
+                                  queue=queue, dtype=dtype,
+                                  convert_inputs='f')
+
     def subtract(self, x1, x2, out=None,
-                queue=None, dtype=None):
+                 queue=None, dtype=None):
         """
         Subtract arguments, element-wise.
         """
         if is_complex(x1) or is_complex(x2):
-            return self._cplx_binary_op(x1,x2,'sub',
-                    out=out,queue=queue,dtype=dtype,)
+            return self._cplx_binary_op(x1, x2, 'sub',
+                                        out=out, queue=queue, dtype=dtype,)
         else:
             expr = 'y0[i] = (x0[i] - x1[i])'
             return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                        queue=queue, dtype=dtype)
+                                  queue=queue, dtype=dtype)
+
     def true_divide(self, x1, x2, out=None,
-                queue=None, dtype=None):
+                    queue=None, dtype=None):
         """
         Returns a true division of the inputs, element-wise.
         """
         if is_complex(x1) or is_complex(x2):
-            return self._cplx_binary_op(x1,x2,'divide',
-                    out=out,queue=queue,dtype=dtype)
+            return self._cplx_binary_op(x1, x2, 'divide',
+                                        out=out, queue=queue, dtype=dtype)
         else:
             expr = 'y0[i] = (x0[i] / x1[i])'
-            convert_inputs=None
+            convert_inputs = None
             if is_integer(x1) and is_integer(x2):
-                dtype=(dtype or HYSOP_REAL)
-                convert_inputs=dtype
+                dtype = (dtype or HYSOP_REAL)
+                convert_inputs = dtype
             return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                        queue=queue, dtype=dtype,
-                        convert_inputs=convert_inputs)
+                                  queue=queue, dtype=dtype,
+                                  convert_inputs=convert_inputs)
+
     def floor_divide(self, x1, x2, out=None,
-                queue=None, dtype=None):
+                     queue=None, dtype=None):
         """
         Return the largest integer smaller or equal to the division of the inputs.
         Returns y = floor(x1/x2)
-        
+
         Special floating point values are handled this way:
 
-        x1   x2   output 
+        x1   x2   output
         nan  ***   nan
         ***  nan   nan
         inf  ***   nan
         ---  inf   0 if (x1 and x2 have same sign) else -1
         ---  ---   floor(x1/x2)
-        
+
         Note: inf means +inf or -inf.
         """
         if is_complex(x1) or is_complex(x2):
             self._not_implemented_yet('floor_divide.')
         elif is_integer(x1):
             expr = 'y0[i] = (x1[i]==0 ? 0 : floor(x0[i]/x1[i]))'
-            convert_inputs=(np.float64,None)
+            convert_inputs = (np.float64, None)
         else:
             expr = 'y0[i] = ((isnan(x0[i])||isnan(x1[i])||isinf(x0[i]))?NAN:(isinf(x1[i])?((signbit(x0[i])^signbit(x1[i]))?-1:0):floor(x0[i]/x1[i])))'
-            convert_inputs='f'
+            convert_inputs = 'f'
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                    queue=queue, dtype=dtype,
-                    convert_inputs=convert_inputs)
+                              queue=queue, dtype=dtype,
+                              convert_inputs=convert_inputs)
+
     def fmod(self, x1, x2, out=None,
-                queue=None, dtype=None):
+             queue=None, dtype=None):
         """
         Return the element-wise remainder of division (REM).
         Remainder has the same sign as the dividend x1.
@@ -2656,41 +2751,43 @@ class OpenClArrayBackend(ArrayBackend):
         else:
             expr = 'y0[i] = (x1[i] == 0 ? 0 : (x0[i] % x1[i]))'
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                    queue=queue, dtype=dtype)
+                              queue=queue, dtype=dtype)
+
     def mod(self, x1, x2, out=None,
-                queue=None, dtype=None):
+            queue=None, dtype=None):
         """
         Return element-wise remainder of division (MOD).
         Remainder has the same sign as the divisor x2.
         Match Python modulus operator x1 % x2.
         Returns x - y*floor(x/y)
-        
+
         Special floating point values are handled this way:
 
-        x1   x2   output 
+        x1   x2   output
         nan  ***   nan
         ***  nan   nan
         inf  inf   nan
         inf  ---   x2 if (x1 and x2 have same sign) else x1
         ---  ---   x1 - x2 * floor(x1/x2)
-        
+
         Note: inf means +inf or -inf.
         """
         assert not is_complex(x1) and not is_complex(x2)
         expr = 'x0[i] - x1[i]*floor(x0[i]/x1[i])'
-        convert_inputs=None
+        convert_inputs = None
         if is_integer(x1):
             expr = '(x1[i]==0 ? 0 : {})'.format(expr)
-            convert_inputs=(np.float64,None)
+            convert_inputs = (np.float64, None)
         if is_fp(x1) or is_fp(x2):
             expr = '(isnan(x0[i])||isnan(x1[i])?NAN:(isinf(x1[i])?(isinf(x0[i])?NAN:(signbit(x0[i])^signbit(x1[i])?x1[i]:x0[i])):{}))'.format(expr)
-            convert_inputs='f'
-        expr='y0[i] = '+expr
+            convert_inputs = 'f'
+        expr = 'y0[i] = '+expr
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                queue=queue, dtype=dtype,
-                convert_inputs=convert_inputs)
+                              queue=queue, dtype=dtype,
+                              convert_inputs=convert_inputs)
+
     def modf(self, x, out1=None, out2=None,
-            queue=None, dtype=None):
+             queue=None, dtype=None):
         """
         Return the fractional and integral parts of an array, element-wise.
         The fractional and integral parts are negative if the given number is negative.
@@ -2703,12 +2800,12 @@ class OpenClArrayBackend(ArrayBackend):
             expr = 'y0[i] = modf(x0[i], &y1[i])'
         else:
             expr = 'y0[i] = 0; y1[i] = x0[i];'
-        return self.nary_op(ikargs=(x,), okargs=(out1,out2), operation=expr,
-                    queue=queue, dtype=dtype, alloc_dtypes='f')
+        return self.nary_op(ikargs=(x,), okargs=(out1, out2), operation=expr,
+                            queue=queue, dtype=dtype, alloc_dtypes='f')
 
 # Handling complex numbers
     def angle(self, z, deg=False,
-                queue=None, dtype=None):
+              queue=None, dtype=None):
         """
         Return the angle of the complex argument.
         """
@@ -2731,30 +2828,33 @@ class OpenClArrayBackend(ArrayBackend):
         if is_complex(val):
             expr = 'y0[i] = x0[i].real'
             dtype = dtype or complex_to_float_dtype(val.dtype)
-            return self.unary_op(x0=val, expr=expr, out=out, 
-                    queue=queue, dtype=dtype)
-        else: 
+            return self.unary_op(x0=val, expr=expr, out=out,
+                                 queue=queue, dtype=dtype)
+        else:
             return val
+
     def imag(self, val):
         """
         Return the imaginary part of the elements of the array.
         """
         return self.wrap(val.handle.imag)
+
     def conj(self, x, out=None):
         """
         Return the complex conjugate, element-wise.
         """
         return self.wrap(x.handle.conj())
-    
-# Miscellanous 
+
+# Miscellanous
     def convolve(self, a, v, mode='full',
-                queue=None, dtype=None):
+                 queue=None, dtype=None):
         """
         Returns the discrete, linear convolution of two one-dimensional sequences.
         """
         self._not_implemented_yet('convolve')
+
     def clip(self, a, a_min, a_max, out=None,
-                queue=None, dtype=None):
+             queue=None, dtype=None):
         """
         Clip (limit) the values in an array.
         """
@@ -2762,14 +2862,14 @@ class OpenClArrayBackend(ArrayBackend):
             expr = 'y0[i] = (isnan(x0[i]) ? NAN : clamp(x0[i], p0, p1))'
         elif is_complex(a):
             expr = 'y0[i] = ({cond0} ? {expr0} : ({cond1} ? {expr1} : {default}))'
-            cond0='({xr}<{p0r} || ({xr}=={p0r} && {xi}<{p0i}))'
-            cond1='({xr}>{p1r} || ({xr}=={p1r} && {xi}>{p1i}))'
-            expr0='p0'
-            expr1='p1'
-            default='x0[i]'
+            cond0 = '({xr}<{p0r} || ({xr}=={p0r} && {xi}<{p0i}))'
+            cond1 = '({xr}>{p1r} || ({xr}=={p1r} && {xi}>{p1i}))'
+            expr0 = 'p0'
+            expr1 = 'p1'
+            default = 'x0[i]'
             expr = expr.format(cond0=cond0, cond1=cond1, expr0=expr0, expr1=expr1, default=default)
-            expr = expr.format(xr = 'x0[i].real', p0r = 'p0.real', p1r='p1.real',
-                               xi = 'x0[i].imag', p0i = 'p0.imag', p1i='p1.imag')
+            expr = expr.format(xr='x0[i].real', p0r='p0.real', p1r='p1.real',
+                               xi='x0[i].imag', p0i='p0.imag', p1i='p1.imag')
         else:
             expr = 'y0[i] = clamp(x0[i], p0, p1)'
             atype = a.dtype
@@ -2778,10 +2878,10 @@ class OpenClArrayBackend(ArrayBackend):
         a_max = np.asarray(a_max, dtype=atype)
         assert a_min <= a_max
         return self.unary_op(x0=a, expr=expr, out=out, extra_kargs=(a_min, a_max),
-                    queue=queue, dtype=dtype)
+                             queue=queue, dtype=dtype)
 
     def clip_components(self, a, a_min, a_max, out=None,
-                queue=None, dtype=None):
+                        queue=None, dtype=None):
         """
         Clip (limit) the real and imaginary part of a complex number independantly.
         ie:
@@ -2797,39 +2897,42 @@ class OpenClArrayBackend(ArrayBackend):
         expr1 = 'y0[i].imag = (isnan(x0[i].imag) ? NAN : clamp(x0[i].imag, p0.imag, p1.imag));'
         expr = expr0+expr1
         return self.unary_op(x0=a, expr=expr, out=out, extra_kargs=(a_min, a_max),
-                    queue=queue, dtype=dtype)
+                             queue=queue, dtype=dtype)
 
     def sqrt(self, x, out=None,
-                queue=None, dtype=None):
+             queue=None, dtype=None):
         """
         Return the positive square-root of an array, element-wise.
         """
         return self._flt_or_cplx_unary_op(x, 'sqrt', out=out,
-                        queue=queue, dtype=dtype)
+                                          queue=queue, dtype=dtype)
+
     def cbrt(self, x, out=None,
-                queue=None, dtype=None):
+             queue=None, dtype=None):
         """
         Return the cube-root of an array, element-wise.
         """
         assert not is_complex(x)
         expr = 'y0[i] = cbrt(x0[i])'
         return self.unary_op(x0=x, expr=expr, out=out,
-                    queue=queue, dtype=dtype,
-                    convert_inputs='f', alloc_dtypes='f')
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f', alloc_dtypes='f')
+
     def square(self, x, out=None,
-                queue=None, dtype=None):
+               queue=None, dtype=None):
         """
         Return the element-wise square of the input.
         """
         expr = 'y0[i] = x0[i]*x0[i]'
         if is_complex(x):
-            return self._cplx_binary_op(x,x,'mul', 
-                    queue=queue, dtype=dtype, out=out)
+            return self._cplx_binary_op(x, x, 'mul',
+                                        queue=queue, dtype=dtype, out=out)
         else:
             return self.unary_op(x0=x, expr=expr, out=out,
-                        queue=queue, dtype=dtype)
+                                 queue=queue, dtype=dtype)
+
     def nan_to_num(self, x, out=None,
-                queue=None, dtype=None):
+                   queue=None, dtype=None):
         """
         Replace nan with zero and inf with finite numbers.
         """
@@ -2837,9 +2940,9 @@ class OpenClArrayBackend(ArrayBackend):
         if is_fp(x) or is_complex(x):
             dtype = dtype or x.dtype
             if is_complex(dtype):
-                ftype=complex_to_float_dtype(dtype)
+                ftype = complex_to_float_dtype(dtype)
             else:
-                ftype=dtype
+                ftype = dtype
             info = np.finfo(ftype)
             max_val = np.asarray(info.max, dtype=ftype)
             min_val = np.asarray(info.min, dtype=ftype)
@@ -2849,14 +2952,15 @@ class OpenClArrayBackend(ArrayBackend):
                 real = '(isnan(x0[i].real) ? 0 : (isinf(x0[i].real) ? (signbit(x0[i].real) ? p0 : p1) : x0[i].real))'
                 imag = '(isnan(x0[i].imag) ? 0 : (isinf(x0[i].imag) ? (signbit(x0[i].imag) ? p0 : p1) : x0[i].imag))'
                 expr = 'y0[i] = {new}({real},{imag})'.format(real=real, imag=imag,
-                        new=self.complex_fn('new', dtype))
-            return self.unary_op(x0=x, expr=expr, out=out, 
-                        extra_kargs=(min_val, max_val),
-                        queue=queue, dtype=dtype)
+                                                             new=self.complex_fn('new', dtype))
+            return self.unary_op(x0=x, expr=expr, out=out,
+                                 extra_kargs=(min_val, max_val),
+                                 queue=queue, dtype=dtype)
         elif out:
             out.copyfrom(x)
         else:
             return x.astype(dtype=dtype or x.dtype, queue=queue)
+
     def real_if_close(self, a, tol=100, queue=None):
         """
         If complex input returns a real array if complex parts are close to zero.
@@ -2866,18 +2970,18 @@ class OpenClArrayBackend(ArrayBackend):
                 from numpy.core import getlimits
                 f = getlimits.finfo(a.dtype.type)
                 tol = f.eps * tol
-            
-            if self.reduce(kargs=(a,), neutral='true', reduce_expr='a&&b', 
-                        map_expr='(fabs(x0[i].imag) < tol)',
-                        queue=queue, dtype=HYSOP_BOOL,
-                        extra_kargs=(np.float64(tol),),
-                        extra_arguments=['const double tol']).get():
+
+            if self.reduce(kargs=(a,), neutral='true', reduce_expr='a&&b',
+                           map_expr='(fabs(x0[i].imag) < tol)',
+                           queue=queue, dtype=HYSOP_BOOL,
+                           extra_kargs=(np.float64(tol),),
+                           extra_arguments=['const double tol']).get():
                 return a.real
             else:
                 return a
         else:
             return a
-    
+
     def maximum(self, x1, x2, out=None,
                 queue=None, dtype=None):
         """
@@ -2892,28 +2996,29 @@ class OpenClArrayBackend(ArrayBackend):
         elif is_complex(x1) and is_complex(x2):
             default = '(((x0[i].real>x1[i].real) || ((x0[i].real==x1[i].real) && (x0[i].imag>x1[i].imag))) ? x0[i] : x1[i])'
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x0[i]',
-                    '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x1[i]',
-                    default)
+                '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x0[i]',
+                '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x1[i]',
+                default)
         elif is_complex(x1):
-            fromreal=self.complex_fn('fromreal', x1)
+            fromreal = self.complex_fn('fromreal', x1)
             default = '(((x0[i].real>x1[i]) || ((x0[i].real==x1[i]) && (x0[i].imag>0))) ? x0[i] : {}(x1[i])'.format(fromreal)
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x0[i]',
-                    'isnan(x1[i])', '{}(NAN)'.format(fromreal),
-                    default)
+                '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x0[i]',
+                'isnan(x1[i])', '{}(NAN)'.format(fromreal),
+                default)
         elif is_complex(x2):
-            fromreal=self.complex_fn('fromreal', x2)
+            fromreal = self.complex_fn('fromreal', x2)
             default = '(((x0[i]>x1[i].real) || ((x0[i]==x1[i].real) && (0>x1[i].imag))) ? {}(x0[i]) : x1[i])'.format(fromreal)
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    'isnan(x0[i])', '{}(x0[i])'.format(fromreal),
-                    '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x1[i]',
-                    default)
+                'isnan(x0[i])', '{}(x0[i])'.format(fromreal),
+                '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x1[i]',
+                default)
         else:
             expr = 'max(x0[i], x1[i])'
-        expr='y0[i] = '+expr
+        expr = 'y0[i] = '+expr
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                    queue=queue, dtype=dtype)
+                              queue=queue, dtype=dtype)
+
     def minimum(self, x1, x2, out=None,
                 queue=None, dtype=None):
         """
@@ -2928,30 +3033,31 @@ class OpenClArrayBackend(ArrayBackend):
         elif is_complex(x1) and is_complex(x2):
             default = '(((x0[i].real<x1[i].real) || ((x0[i].real==x1[i].real) && (x0[i].imag<x1[i].imag))) ? x0[i] : x1[i])'
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x0[i]',
-                    '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x1[i]',
-                    default)
+                '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x0[i]',
+                '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x1[i]',
+                default)
         elif is_complex(x1):
-            fromreal=self.complex_fn('fromreal', x1)
+            fromreal = self.complex_fn('fromreal', x1)
             default = '(((x0[i].real<x1[i]) || ((x0[i].real==x1[i]) && (x0[i].imag<0))) ? x0[i] : {}(x1[i])'.format(fromreal)
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x0[i]',
-                    'isnan(x1[i])', '{}(x1[i])'.format(fromreal),
-                    default)
+                '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x0[i]',
+                'isnan(x1[i])', '{}(x1[i])'.format(fromreal),
+                default)
         elif is_complex(x2):
-            fromreal=self.complex_fn('fromreal', x2)
+            fromreal = self.complex_fn('fromreal', x2)
             default = '(((x0[i]<x1[i].real) || ((x0[i]==x1[i].real) && (0<x1[i].imag))) ? {}(x0[i]) : x1[i])'.format(fromreal)
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    'isnan(x0[i])', '{}(x0[i])'.format(fromreal),
-                    '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x1[i]',
-                    default)
+                'isnan(x0[i])', '{}(x0[i])'.format(fromreal),
+                '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x1[i]',
+                default)
         else:
             expr = 'min(x0[i], x1[i])'
-        expr='y0[i] = '+expr
+        expr = 'y0[i] = '+expr
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                    queue=queue, dtype=dtype)
+                              queue=queue, dtype=dtype)
+
     def fmax(self, x1, x2, out=None,
-                queue=None, dtype=None, name='_fmax'):
+             queue=None, dtype=None, name='_fmax'):
         """
         Element-wise maximum of array elements, ignoring NaNs.
         """
@@ -2964,30 +3070,31 @@ class OpenClArrayBackend(ArrayBackend):
         elif is_complex(x1) and is_complex(x2):
             default = '(((x0[i].real>x1[i].real) || ((x0[i].real==x1[i].real) && (x0[i].imag>x1[i].imag))) ? x0[i] : x1[i])'
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x1[i]',
-                    '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x0[i]',
-                    default)
+                '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x1[i]',
+                '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x0[i]',
+                default)
         elif is_complex(x1):
-            fromreal=self.complex_fn('fromreal', x1)
+            fromreal = self.complex_fn('fromreal', x1)
             default = '(((x0[i].real>x1[i]) || ((x0[i].real==x1[i]) && (x0[i].imag>0))) ? x0[i] : {}(x1[i])'.format(fromreal)
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    '(isnan(x0[i].real) || isnan(x0[i].imag))', '{}(x1[i])'.format(fromreal),
-                    'isnan(x1[i])', 'x0[i]',
-                    default)
+                '(isnan(x0[i].real) || isnan(x0[i].imag))', '{}(x1[i])'.format(fromreal),
+                'isnan(x1[i])', 'x0[i]',
+                default)
         elif is_complex(x2):
-            fromreal=self.complex_fn('fromreal', x2)
+            fromreal = self.complex_fn('fromreal', x2)
             default = '(((x0[i]>x1[i].real) || ((x0[i]==x1[i].real) && (0>x1[i].imag))) ? {}(x0[i]) : x1[i])'.format(fromreal)
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    'isnan(x0[i])', 'x1[i]',
-                    '(isnan(x1[i].real) || isnan(x1[i].imag))', '{}(x0[i])'.format(fromreal),
-                    default)
+                'isnan(x0[i])', 'x1[i]',
+                '(isnan(x1[i].real) || isnan(x1[i].imag))', '{}(x0[i])'.format(fromreal),
+                default)
         else:
             expr = 'max(x0[i], x1[i])'
         expr = 'y0[i] = ' + expr
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                    queue=queue, dtype=dtype)
+                              queue=queue, dtype=dtype)
+
     def fmin(self, x1, x2, out=None,
-                queue=None, dtype=None, name='_fmin', **kwds):
+             queue=None, dtype=None, name='_fmin', **kwds):
         """
         Element-wise minimum of array elements, ignoring NaNs.
         """
@@ -3000,32 +3107,33 @@ class OpenClArrayBackend(ArrayBackend):
         elif is_complex(x1) and is_complex(x2):
             default = '(((x0[i].real<x1[i].real) || ((x0[i].real==x1[i].real) && (x0[i].imag<x1[i].imag))) ? x0[i] : x1[i])'
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x1[i]',
-                    '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x0[i]',
-                    default)
+                '(isnan(x0[i].real) || isnan(x0[i].imag))', 'x1[i]',
+                '(isnan(x1[i].real) || isnan(x1[i].imag))', 'x0[i]',
+                default)
         elif is_complex(x1):
-            fromreal=self.complex_fn('fromreal', x1)
+            fromreal = self.complex_fn('fromreal', x1)
             default = '(((x0[i].real<x1[i]) || ((x0[i].real==x1[i]) && (x0[i].imag<0))) ? x0[i] : {}(x1[i])'.format(fromreal)
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    '(isnan(x0[i].real) || isnan(x0[i].imag))', '{}(x1[i])'.format(fromreal),
-                    'isnan(x1[i])', 'x0[i]',
-                    default)
+                '(isnan(x0[i].real) || isnan(x0[i].imag))', '{}(x1[i])'.format(fromreal),
+                'isnan(x1[i])', 'x0[i]',
+                default)
         elif is_complex(x2):
-            fromreal=self.complex_fn('fromreal', x2)
+            fromreal = self.complex_fn('fromreal', x2)
             default = '(((x0[i]<x1[i].real) || ((x0[i]==x1[i].real) && (0<x1[i].imag))) ? {}(x0[i]) : x1[i])'.format(fromreal)
             expr = '({} ? {} : ({} ? {} : {}))'.format(
-                    'isnan(x0[i])', 'x1[i]',
-                    '(isnan(x1[i].real) || isnan(x1[i].imag))', '{}(x0[i])'.format(fromreal),
-                    default)
+                'isnan(x0[i])', 'x1[i]',
+                '(isnan(x1[i].real) || isnan(x1[i].imag))', '{}(x0[i])'.format(fromreal),
+                default)
         else:
             expr = 'min(x0[i], x1[i])'
         expr = 'y0[i] = ' + expr
         return self.binary_op(x0=x1, x1=x2, expr=expr, out=out,
-                    queue=queue, dtype=dtype, name=name, **kwds)
+                              queue=queue, dtype=dtype, name=name, **kwds)
+
     def fabs(self, x, out=None,
-                queue=None, dtype=None):
+             queue=None, dtype=None):
         """
-        Calculate the absolute value element-wise, outputs real values unless out or dtype 
+        Calculate the absolute value element-wise, outputs real values unless out or dtype
         is set.
         """
         assert not is_complex(x)
@@ -3035,28 +3143,29 @@ class OpenClArrayBackend(ArrayBackend):
             expr = 'y0[i] = abs(x0[i])'
         alloc_dtypes = 'f'
         return self.unary_op(x0=x, expr=expr, out=out,
-                    queue=queue, dtype=dtype, 
-                    alloc_dtypes=alloc_dtypes)
+                             queue=queue, dtype=dtype,
+                             alloc_dtypes=alloc_dtypes)
+
     def absolute(self, x, out=None,
-                queue=None, dtype=None):
+                 queue=None, dtype=None):
         """
-        Calculate the absolute value element-wise, ouputs values as input type 
+        Calculate the absolute value element-wise, ouputs values as input type
         unless out or dtype is set.
         """
         if is_complex(x):
-            expr = 'y0[i] = {}(x0[i])'.format(self.complex_fn('abs',x))
-            alloc_dtypes=(complex_to_float_dtype(x),)
+            expr = 'y0[i] = {}(x0[i])'.format(self.complex_fn('abs', x))
+            alloc_dtypes = (complex_to_float_dtype(x),)
         elif is_fp(x):
             expr = 'y0[i] = fabs(x0[i])'
-            alloc_dtypes=None
+            alloc_dtypes = None
         else:
             expr = 'y0[i] = abs(x0[i])'
-            alloc_dtypes=None
+            alloc_dtypes = None
         return self.unary_op(x0=x, expr=expr, out=out,
-                    queue=queue, dtype=dtype, alloc_dtypes=alloc_dtypes)
-    
+                             queue=queue, dtype=dtype, alloc_dtypes=alloc_dtypes)
+
     def sign(self, x, out=None,
-                queue=None, dtype=None):
+             queue=None, dtype=None):
         """
         Returns an element-wise indication of the sign of a number.
         NaNs values are kept to NaNs to comply with numpy.
@@ -3069,18 +3178,20 @@ class OpenClArrayBackend(ArrayBackend):
         else:
             expr = 'y0[i] = (sign(x0[i]))'
         return self.unary_op(x0=x, expr=expr, out=out,
-                    queue=queue, dtype=dtype,
-                    convert_inputs='f')
+                             queue=queue, dtype=dtype,
+                             convert_inputs='f')
 
 
 # RANDOM SAMPLING #
 ###################
-## See https://docs.scipy.org/doc/numpy/reference/routines.random.html
+# See https://docs.scipy.org/doc/numpy/reference/routines.random.html
 
 # Simple random data
-    def rand(self, shape=None, queue=None, 
-            order=default_order, dtype=HYSOP_REAL, 
-            out=None, a=0.0, b=1.0, generator=None):
+
+
+    def rand(self, shape=None, queue=None,
+             order=default_order, dtype=HYSOP_REAL,
+             out=None, a=0.0, b=1.0, generator=None):
         """
         Return samples from the uniform distribution [a,b].
         Default generator is clRandom.PhiloxGenerator.
@@ -3093,11 +3204,11 @@ class OpenClArrayBackend(ArrayBackend):
             out = self.empty(shape=shape, dtype=dtype, order=order, queue=queue)
         generator.fill_uniform(ary=out.handle, a=a, b=b, queue=queue)
         return out
-    
-    def randn(self, shape=None, queue=None, 
-            order=default_order, dtype=HYSOP_REAL, 
-            out=None, mu=0.0, sigma=1.0, generator=None,
-            *args):
+
+    def randn(self, shape=None, queue=None,
+              order=default_order, dtype=HYSOP_REAL,
+              out=None, mu=0.0, sigma=1.0, generator=None,
+              *args):
         """
         Return samples from the standard normal distribution [mu,sigma].
         Default generator is clRandom.PhiloxGenerator.
@@ -3114,54 +3225,56 @@ class OpenClArrayBackend(ArrayBackend):
 
 # STATISTICS #
 ##############
-## See https://docs.scipy.org/doc/numpy/reference/routines.sort.html
+# See https://docs.scipy.org/doc/numpy/reference/routines.sort.html
+
+# Order statistics
+
 
-#Order statistics
     def amin(self, a, axis=None, out=None, queue=None, name='amin', **kwds):
         """
         Return the minimum of an array.
         """
-        return self.reduce(kargs=(a,), neutral='x0[0]', reduce_expr='min(a,b)', 
-                axis=axis, out=out, queue=queue, name=name, **kwds)
-    
+        return self.reduce(kargs=(a,), neutral='x0[0]', reduce_expr='min(a,b)',
+                           axis=axis, out=out, queue=queue, name=name, **kwds)
+
     def amax(self, a, axis=None, out=None, slice=None, queue=None, name='amax', **kwds):
         """
         Return the maximum of an array.
         """
-        return self.reduce(kargs=(a,), neutral='x0[0]', reduce_expr='max(a,b)', 
-                axis=axis, out=out, queue=queue, name=name, **kwds)
-    
+        return self.reduce(kargs=(a,), neutral='x0[0]', reduce_expr='max(a,b)',
+                           axis=axis, out=out, queue=queue, name=name, **kwds)
+
     def nanmin(self, a, axis=None, out=None, queue=None, name='nanmin', **kwds):
         """
         Return the minimum of an array.
         """
-        reduce_expr='(isnan(a) ? b : (isnan(b) ? a : min(a,b)))'
-        return self.reduce(kargs=(a,), neutral='x0[0]', reduce_expr=reduce_expr, 
-                axis=axis, out=out, queue=queue, name=name, **kwds)
-    
+        reduce_expr = '(isnan(a) ? b : (isnan(b) ? a : min(a,b)))'
+        return self.reduce(kargs=(a,), neutral='x0[0]', reduce_expr=reduce_expr,
+                           axis=axis, out=out, queue=queue, name=name, **kwds)
+
     def nanmax(self, a, axis=None, out=None, slice=None, queue=None, name='nanmax', **kwds):
         """
         Return the maximum of an array.
         """
-        reduce_expr='(isnan(a) ? b : (isnan(b) ? a : max(a,b)))'
-        return self.reduce(kargs=(a,), neutral='x0[0]', reduce_expr=reduce_expr, 
-                axis=axis, out=out, queue=queue, name=name, **kwds)
-    
+        reduce_expr = '(isnan(a) ? b : (isnan(b) ? a : max(a,b)))'
+        return self.reduce(kargs=(a,), neutral='x0[0]', reduce_expr=reduce_expr,
+                           axis=axis, out=out, queue=queue, name=name, **kwds)
+
     def average(self, a, axis=None, weights=None, returned=False, queue=None):
         """
         Compute the weighted average along the specified axis.
         """
-        self._unsupported_argument('average','returned', returned, False)
+        self._unsupported_argument('average', 'returned', returned, False)
         if (weights is None):
             return self.mean(a=a, axis=axis, queue=queue)
         else:
             arguments = '__global {ctype} const* x0, __global {ctype} const* w0'
             arguments = arguments.format(ctype=a.ctype())
-            return self.reduce(kargs=(a,), neutral='0', reduce_expr='a+b', 
-                                axis=axis, out=out, queue=queue, dtype=dtype,
-                                map_expr='w0[i]*x0[i]', 
-                                arguments=arguments) / float(a.size)
-    
+            return self.reduce(kargs=(a,), neutral='0', reduce_expr='a+b',
+                               axis=axis, out=out, queue=queue, dtype=dtype,
+                               map_expr='w0[i]*x0[i]',
+                               arguments=arguments) / float(a.size)
+
     def mean(self, a, axis=None, dtype=None, out=None, queue=None):
         """
         Compute the arithmetic mean along the specified axis.
@@ -3185,16 +3298,19 @@ class OpenClArrayBackend(ArrayBackend):
         Compute the median along the specified axis, while ignoring NaNs.
         """
         self._not_implemented_yet('nanmedian')
+
     def nanmean(self, a, axis=None, dtype=None, out=None):
         """
         Compute the arithmetic mean along the specified axis, ignoring NaNs.
         """
         self._not_implemented_yet('nanmean')
+
     def nanstd(self, a, axis=None, dtype=None, out=None, ddof=0):
         """
         Compute the standard deviation along the specified axis, while ignoring NaNs.
         """
         self._not_implemented_yet('nanstd')
+
     def nanvar(self, a, axis=None, dtype=None, out=None, ddof=0):
         """
         Compute the variance along the specified axis, while ignoring NaNs.
@@ -3202,40 +3318,40 @@ class OpenClArrayBackend(ArrayBackend):
         self._not_implemented_yet('nanvar')
 
     @staticmethod
-    def build_codegen_arguments(args, name, typegen, ctype, mesh_dim, 
-            known_vars, symbolic_mode,
-            itype='ulong', 
-            const=False, ptr=True, volatile=False, **kargs):
+    def build_codegen_arguments(args, name, typegen, ctype, mesh_dim,
+                                known_vars, symbolic_mode,
+                                itype='ulong',
+                                const=False, ptr=True, volatile=False, **kargs):
 
         from hysop.backend.device.codegen.base.utils import ArgDict
         from hysop.backend.device.codegen.base.variables import \
-                CodegenVariable, CodegenVectorClBuiltin
+            CodegenVariable, CodegenVectorClBuiltin
         check_instance(args, ArgDict)
 
         # strides are in number of elements, offset in bytes
-        base    = '{}_base'.format(name)
+        base = '{}_base'.format(name)
         strides = '{}_strides'.format(name)
-        offset  = '{}_offset'.format(name)
+        offset = '{}_offset'.format(name)
         assert base not in args
         assert offset not in args
         assert strides not in args
-        
+
         assert 'nl' not in kargs
         assert 'add_impl_const' not in kargs
         assert 'init' not in kargs
-        assert mesh_dim in [1,2,3,4,8,16]
+        assert mesh_dim in [1, 2, 3, 4, 8, 16]
+
+        args[base] = CodegenVariable(name=base, typegen=typegen,
+                                     ctype=ctype, ptr=ptr, const=const, volatile=volatile,
+                                     add_impl_const=True, nl=False, **kargs)
 
-        args[base] = CodegenVariable(name=base, typegen=typegen, 
-                ctype=ctype, ptr=ptr, const=const, volatile=volatile, 
-                add_impl_const=True, nl=False, **kargs)
-        
         args[strides] = CodegenVectorClBuiltin(name=strides,
-                typegen=typegen, btype='uint', dim=mesh_dim,
-                add_impl_const=True, nl=False)
+                                               typegen=typegen, btype='uint', dim=mesh_dim,
+                                               add_impl_const=True, nl=False)
 
         args[offset] = CodegenVariable(name=offset,
-                typegen=typegen, ctype=itype,
-                add_impl_const=True, nl=False)
+                                       typegen=typegen, ctype=itype,
+                                       add_impl_const=True, nl=False)
 
         if (offset in known_vars) and (strides in known_vars):
             args[base].nl = True
@@ -3244,8 +3360,7 @@ class OpenClArrayBackend(ArrayBackend):
         else:
             args[offset].nl = True
 
-        
-        char_alias  = args[base].full_ctype(ctype='char', cast=True, align=True)
+        char_alias = args[base].full_ctype(ctype='char', cast=True, align=True)
         ctype_alias = args[base].full_ctype(cast=True, align=True)
 
         if (not symbolic_mode) and (offset in known_vars):
@@ -3255,10 +3370,10 @@ class OpenClArrayBackend(ArrayBackend):
         # init = '({})(({})({})+{})'.format(ctype_alias, char_alias, base, offset_str)
         init = '{} $+ {}'.format(base, offset_str)
 
-        array = CodegenVariable(name=name, typegen=typegen, 
-                ctype=ctype, ptr=ptr, const=const, volatile=volatile,
-                add_impl_const=True, nl=False, 
-                init=init, **kargs)
+        array = CodegenVariable(name=name, typegen=typegen,
+                                ctype=ctype, ptr=ptr, const=const, volatile=volatile,
+                                add_impl_const=True, nl=False,
+                                init=init, **kargs)
         strides = args[strides]
 
         return array, strides
diff --git a/hysop/backend/device/opencl/opencl_autotunable_kernel.py b/hysop/backend/device/opencl/opencl_autotunable_kernel.py
index 50756c8be8f2eb959f587214dc2d624c53f240d5..7f73c0766207d5c71e2c353ccf27da3be54e5e88 100644
--- a/hysop/backend/device/opencl/opencl_autotunable_kernel.py
+++ b/hysop/backend/device/opencl/opencl_autotunable_kernel.py
@@ -1,7 +1,7 @@
-import subprocess, sys
+import os, subprocess, sys
 from abc import ABCMeta, abstractmethod
+
 from hysop import __KERNEL_DEBUG__, vprint
-from hysop.deps import os
 from hysop.constants import Backend
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types import check_instance, first_not_None, to_tuple, to_list
@@ -62,7 +62,7 @@ class OpenClAutotunableKernel(AutotunableKernel):
         check_instance(extra_parameters, dict, keys=str)
         check_instance(extra_kwds, dict, keys=str)
         global_work_size = work.global_work_size
-        return ((global_work_size+local_work_size-1)/local_work_size)*local_work_size
+        return ((global_work_size+local_work_size-1)//local_work_size)*local_work_size
 
     @abstractmethod
     def generate_kernel_src(self, global_work_size, local_work_size,
@@ -87,12 +87,12 @@ class OpenClAutotunableKernel(AutotunableKernel):
 
     def format_best_candidate(self, autotuner,
             file_basename, from_cache, name,
-            extra_kwds, extra_parameters, 
+            extra_kwds, extra_parameters,
             work_size, work_load,
             global_work_size, local_work_size,
             args_mapping, args_list,
             program, kernel, kernel_name, kernel_src,
-            kernel_statistics, src_hash, 
+            kernel_statistics, src_hash,
             extra_kwds_hash, extra_kwds_hash_logs):
         """
         Post treatment callback for autotuner results.
@@ -118,7 +118,7 @@ class OpenClAutotunableKernel(AutotunableKernel):
         check_instance(src_hash, str)
         check_instance(extra_kwds_hash, str)
         check_instance(extra_kwds_hash_logs, str)
-        
+
         autotuner_config = autotuner.autotuner_config
         if autotuner_config.filter_statistics(file_basename):
             kernel_hash_logs = self.generate_hash_logs(file_basename, extra_kwds_hash_logs)
@@ -126,7 +126,7 @@ class OpenClAutotunableKernel(AutotunableKernel):
             kernel_isolation = self.generate_oclgrind_isolation_file(kernel,
                     file_basename, kernel_source,
                     global_work_size, local_work_size,
-                    args_list, args_mapping, 
+                    args_list, args_mapping,
                     extra_kwds['isolation_params'])
 
             if autotuner_config.postprocess_kernels:
@@ -135,40 +135,40 @@ class OpenClAutotunableKernel(AutotunableKernel):
                 del kernel_statistics
                 for i,arg in enumerate(args_list):
                     kernel.set_arg(i, arg)
-                kernel_statistics, _ = autotuner.bench_one_from_binary(kernel=kernel, 
+                kernel_statistics, _ = autotuner.bench_one_from_binary(kernel=kernel,
                         target_nruns=autotuner_config.postprocess_nruns,
                         old_stats=None, best_stats=None,
-                        global_work_size=global_work_size, 
+                        global_work_size=global_work_size,
                         local_work_size=local_work_size)
 
                 # execute command FILE_BASENAME FROM_CACHE
                 # AUTOTUNER_DUMP_DIR  AUTOTUNER_NAME  KERNEL_NAME
                 # MEAN_EXECUTION_TIME_NS  MIN_EXECUTION_TIME_NS  MAX_EXECUTION_TIME_NS
                 # KERNEL_SOURCE_FILE  KERNEL_ISOLATION_FILE  KERNEL_HASH_LOGS_FILE
-                # VENDOR_NAME  DEVICE_NAME  
-                # WORK_SIZE  WORK_LOAD  
-                # GLOBAL_WORK_SIZE  LOCAL_WORK_SIZE 
+                # VENDOR_NAME  DEVICE_NAME
+                # WORK_SIZE  WORK_LOAD
+                # GLOBAL_WORK_SIZE  LOCAL_WORK_SIZE
                 # EXTRA_PARAMETERS  EXTRA_KWDS_HASH  SRC_HASH
-                command = [str(autotuner_config.postprocess_kernels), 
+                command = [str(autotuner_config.postprocess_kernels),
                            str(file_basename),
                            '1' if from_cache else '0',
-                           str(autotuner_config.dump_folder), 
+                           str(autotuner_config.dump_folder),
                            str(autotuner.name),
-                           str(kernel_name), 
-                           str(kernel_statistics.mean), 
-                           str(kernel_statistics.min), 
-                           str(kernel_statistics.max), 
-                           str(kernel_source), 
-                           str(kernel_isolation), 
+                           str(kernel_name),
+                           str(kernel_statistics.mean),
+                           str(kernel_statistics.min),
+                           str(kernel_statistics.max),
+                           str(kernel_source),
+                           str(kernel_isolation),
                            str(kernel_hash_logs),
                            str(kernel.context.devices[0].platform.name.strip()),
                            str(kernel.context.devices[0].name.strip()),
                            str(work_size),
-                           str(work_load), 
+                           str(work_load),
                            str(global_work_size),
-                           str(local_work_size), 
+                           str(local_work_size),
                            str(extra_parameters),
-                           str(extra_kwds_hash), 
+                           str(extra_kwds_hash),
                            str(src_hash)]
                 if autotuner_config.debug:
                     print('POSTPROCESSING KERNEL {}:\n'.format(autotuner.name) + ' '.join(command))
@@ -176,8 +176,8 @@ class OpenClAutotunableKernel(AutotunableKernel):
                     subprocess.check_call(command)
                 except OSError as e:
                     msg="\nFATAL ERROR: Could not find or execute postprocessing script '{}'.".format(command[0])
-                    print msg
-                    print
+                    print(msg)
+                    print()
                     raise
                 except subprocess.CalledProcessError as e:
                     if (e.returncode == 10):
@@ -188,9 +188,9 @@ class OpenClAutotunableKernel(AutotunableKernel):
                         msg='\nFATAL ERROR: Failed to call autotuner postprocessing command.\n{}\n'
                         msg=msg.format(' '.join(command))
                         print(msg)
-                        print
+                        print()
                         raise
-        
+
         kernel = OpenClKernel(name=autotuner.name, program=program,
                 args_mapping=args_mapping,
                 default_queue=None,
@@ -199,7 +199,7 @@ class OpenClAutotunableKernel(AutotunableKernel):
                 default_args=None)
 
         args_dict = extra_kwds['kernel_args']
-        
+
         return (kernel, args_dict)
 
     def generate_source_file(self, kernel_name, kernel_src, force=False):
@@ -215,10 +215,10 @@ class OpenClAutotunableKernel(AutotunableKernel):
             os.makedirs(dump_folder)
         with open(dump_file, 'w+') as f:
             if self.autotuner_config.verbose:
-                print '  >Saving OpenCL kernel source to \'{}\'.'.format(dump_file)
+                print('  >Saving OpenCL kernel source to \'{}\'.'.format(dump_file))
             f.write(kernel_src)
         return dump_file
-    
+
     def generate_hash_logs(self, kernel_name, hash_logs, force=False):
         if (not force) and (not self.autotuner_config.dump_hash_logs):
             return None
@@ -231,7 +231,7 @@ class OpenClAutotunableKernel(AutotunableKernel):
             os.makedirs(dump_folder)
         with open(dump_file, 'w+') as f:
             if self.autotuner_config.verbose:
-                print '  >Saving hash logs to \'{}\'.'.format(dump_file)
+                print('  >Saving hash logs to \'{}\'.'.format(dump_file))
             f.write(hash_logs)
         return dump_file
 
@@ -246,7 +246,7 @@ class OpenClAutotunableKernel(AutotunableKernel):
             global_work_size = tuple(x for x in global_work_size) + (1,)*(3-dim)
             local_work_size  = tuple(x for x in local_work_size)  + (1,)*(3-dim)
 
-        sorted_args = sorted(args_mapping.items(), key=lambda x:x[1][0])
+        sorted_args = tuple(sorted(args_mapping.items(), key=lambda x:x[1][0]))
         assert len(sorted_args) == len(args_list)
 
         dump_folder = self.autotuner_config.dump_folder
@@ -285,7 +285,7 @@ class OpenClAutotunableKernel(AutotunableKernel):
                 msg+=self.format_oclgrind_isolation_argument(arg_name, arg_isol, arg_value)
                 msg+='\n'
             if self.autotuner_config.verbose:
-                print '  >Saving oclgrind kernel isolation file to \'{}\'.'.format(dump_file)
+                print('  >Saving oclgrind kernel isolation file to \'{}\'.'.format(dump_file))
             f.write(msg)
         return dump_file
 
@@ -352,7 +352,7 @@ class OpenClAutotunableKernel(AutotunableKernel):
             assert dtype in typemap.keys(), dtype
             ranges = list(slices.indices(count))
             assert (ranges[1]-ranges[0])//ranges[2] in (count, count//vect)
-            if ((ranges[1]-ranges[0])//ranges[2] == count//vect): 
+            if ((ranges[1]-ranges[0])//ranges[2] == count//vect):
                 ranges[0]*=vect
                 ranges[1]*=vect
             assert (ranges[1]-ranges[0])//ranges[2] == count, '{} != {}'.format((ranges[1]-ranges[0])//ranges[2], count)
@@ -540,7 +540,7 @@ class OpenClAutotunableKernel(AutotunableKernel):
 
     def build_array_args(self, hardcode_arrays=False, **arrays):
         kernel_args = {}
-        for name, data in arrays.iteritems():
+        for name, data in arrays.items():
             check_instance(data, (OpenClArray, clArray.Array))
             base = '{}_base'.format(name)
             kernel_args[base] = data.base_data
diff --git a/hysop/backend/device/opencl/opencl_buffer.py b/hysop/backend/device/opencl/opencl_buffer.py
index cf7dd81df468f8efed2a978188b6a9a6742b2eac..ff2e09c355e22b3108889a0cb6adc9217c34931e 100644
--- a/hysop/backend/device/opencl/opencl_buffer.py
+++ b/hysop/backend/device/opencl/opencl_buffer.py
@@ -9,7 +9,7 @@ class OpenClBuffer(DeviceBuffer, cl.Buffer):
     """
     def __init__(self, context, mem_flags, size=0, hostbuf=None):
         assert (hostbuf is not None) or (size>0)
-        super(OpenClBuffer,self).__init__(context=context, flags=mem_flags, 
+        super(OpenClBuffer,self).__init__(context=context, flags=mem_flags,
                 size=size, hostbuf=hostbuf)
 
     def get_int_ptr(self):
@@ -32,12 +32,12 @@ class OpenClBuffer(DeviceBuffer, cl.Buffer):
             return self
         else:
             if self._DEBUG:
-                print 'Taking aligned subbuffer with alignment {}.'.format(alignment)
+                print('Taking aligned subbuffer with alignment {}.'.format(alignment))
             return self[offset:offset+size]
 
     def __getitem__(self, key):
         if self._DEBUG:
-            print 'Getting opencl subbuffer view {}.'.format(key)
+            print('Getting opencl subbuffer view {}.'.format(key))
         return super(OpenClBuffer, self).__getitem__(key)
 
     def release(self):
diff --git a/hysop/backend/device/opencl/opencl_copy_kernel_launchers.py b/hysop/backend/device/opencl/opencl_copy_kernel_launchers.py
index 507bc1f491c0851523cb65f6cf5fe47415e4adc8..f16eea6994d0e230659986f19da2e2181436c674 100644
--- a/hysop/backend/device/opencl/opencl_copy_kernel_launchers.py
+++ b/hysop/backend/device/opencl/opencl_copy_kernel_launchers.py
@@ -1,5 +1,6 @@
+import numpy as np
+
 from hysop import vprint, dprint, __KERNEL_DEBUG__, __TRACE_KERNELS__
-from hysop.deps import np
 from hysop.constants import Backend
 from hysop.tools.profiler import FProfiler
 from hysop.tools.decorators import debug
@@ -36,8 +37,8 @@ class OpenClCopyKernelLauncher(OpenClKernelLauncher):
         if isinstance(src, np.ndarray) or isinstance(dst, np.ndarray):
             enqueue_copy_kwds['is_blocking'] = False
 
-        super(OpenClCopyKernelLauncher, self).__init__(name=name,
-                                                       kernel=None, args_list=(), **kwds)
+        super(OpenClCopyKernelLauncher, self).__init__(
+            name=name, kernel=None, args_list=(), **kwds)
 
         self._enqueue_copy_kwds = enqueue_copy_kwds
         self._apply_msg = apply_msg
@@ -134,14 +135,16 @@ class OpenClCopyBufferLauncher(OpenClCopyKernelLauncher):
                                '...')
 
         assert 'name' not in kwds
-        name = 'enqueue_copy_{}__{}_to_{}'.format(varname,
-                                                  'host' if isinstance(src, np.ndarray) else 'device',
-                                                  'host' if isinstance(dst, np.ndarray) else 'device')
+        name = 'enqueue_copy_{}__{}_to_{}'.format(
+            varname,
+            'host' if isinstance(src, np.ndarray) else 'device',
+            'host' if isinstance(dst, np.ndarray) else 'device')
         apply_msg = '{}<<<{}>>>'.format(name, shape)
 
-        super(OpenClCopyBufferLauncher, self).__init__(dst=dst, src=src,
-                                                       enqueue_copy_kwds=enqueue_copy_kwds,
-                                                       name=name, apply_msg=apply_msg, **kwds)
+        super(OpenClCopyBufferLauncher, self).__init__(
+            dst=dst, src=src,
+            enqueue_copy_kwds=enqueue_copy_kwds,
+            name=name, apply_msg=apply_msg, **kwds)
 
     def _format_host_arg(self, arg):
         if isinstance(arg, HostArray):
@@ -213,10 +216,11 @@ class OpenClCopyDevice2DeviceLauncher(OpenClCopyBufferLauncher):
         check_instance(dst_device_offset, (int, np.integer), allow_none=True)
         check_instance(byte_count, (int, np.integer), allow_none=True)
         assert (src_nbytes is None) or (dst_nbytes is None) or (src_nbytes == dst_nbytes)
-        super(OpenClCopyDevice2DeviceLauncher, self).__init__(varname=varname,
-                                                              src=src, dst=dst,
-                                                              src_device_offset=src_device_offset, dst_device_offset=dst_device_offset,
-                                                              byte_count=byte_count)
+        super(OpenClCopyDevice2DeviceLauncher, self).__init__(
+            varname=varname,
+            src=src, dst=dst,
+            src_device_offset=src_device_offset, dst_device_offset=dst_device_offset,
+            byte_count=byte_count)
 
 
 class OpenClCopyBufferRectLauncher(OpenClCopyKernelLauncher):
@@ -230,7 +234,8 @@ class OpenClCopyBufferRectLauncher(OpenClCopyKernelLauncher):
 
     def __init__(self, varname, src, dst,
                  copy_region, copy_src_origin, copy_dst_origin, copy_src_pitches, copy_dst_pitches,
-                 iter_region=None, iter_src_origin=None, iter_dst_origin=None, iter_src_pitches=None, iter_dst_pitches=None,
+                 iter_region=None, iter_src_origin=None, iter_dst_origin=None,
+                 iter_src_pitches=None, iter_dst_pitches=None,
                  **kwds):
         """
         Initialize a (HOST <-> DEVICE) or a (DEVICE <-> DEVICE) rectangle
@@ -331,9 +336,10 @@ class OpenClCopyBufferRectLauncher(OpenClCopyKernelLauncher):
             raise ValueError(msg)
 
         assert 'name' not in kwds
-        name = 'enqueue_copy_rect_{}__{}_to_{}'.format(varname,
-                                                       'host' if isinstance(src, np.ndarray) else 'device',
-                                                       'host' if isinstance(dst, np.ndarray) else 'device')
+        name = 'enqueue_copy_rect_{}__{}_to_{}'.format(
+            varname,
+            'host' if isinstance(src, np.ndarray) else 'device',
+            'host' if isinstance(dst, np.ndarray) else 'device')
         apply_msg = '{}<<<{}>>>()'
         apply_msg = apply_msg.format(name, copy_region)
 
@@ -345,9 +351,10 @@ class OpenClCopyBufferRectLauncher(OpenClCopyKernelLauncher):
             src_origin = enqueue_copy_kwds.pop(src_origin_kwd)
             dst_origin = enqueue_copy_kwds.pop(dst_origin_kwd)
 
-        super(OpenClCopyBufferRectLauncher, self).__init__(dst=dst, src=src,
-                                                           enqueue_copy_kwds=enqueue_copy_kwds,
-                                                           name=name, apply_msg=apply_msg, **kwds)
+        super(OpenClCopyBufferRectLauncher, self).__init__(
+            dst=dst, src=src,
+            enqueue_copy_kwds=enqueue_copy_kwds,
+            name=name, apply_msg=apply_msg, **kwds)
 
         if (n > 0):
             def call(queue=None, wait_for=None,
@@ -358,7 +365,7 @@ class OpenClCopyBufferRectLauncher(OpenClCopyKernelLauncher):
                      iter_dst_pitches=iter_dst_pitches,
                      **kwds):
                 if __KERNEL_DEBUG__ or __TRACE_KERNELS__:
-                    print '  '+self._apply_msg
+                    print('  '+self._apply_msg)
                 queue = first_not_None(queue, self._default_queue)
                 check_instance(queue, cl.CommandQueue)
 
@@ -403,7 +410,7 @@ class OpenClCopyBufferRectLauncher(OpenClCopyKernelLauncher):
             assert nellipsis == 1, msg
             eid = slices.index(Ellipsis)
             missing_count = ndim-len(slices)
-            missing_slices = tuple(slice(shape[i]) for i in xrange(eid, eid+missing_count+1))
+            missing_slices = tuple(slice(shape[i]) for i in range(eid, eid+missing_count+1))
             full_slices = slices[:eid]+missing_slices+slices[eid+1:]
             slices = full_slices
         check_instance(slices, tuple, values=(int, slice), size=ndim)
@@ -599,7 +606,15 @@ class OpenClCopyBufferRectLauncher(OpenClCopyKernelLauncher):
         msg0 = msg0.format(iter_region,
                            iter_src_origin, iter_src_pitches,
                            iter_dst_origin, iter_dst_pitches)
-        #print msg0
+
+        return cls(varname=varname,
+                   src=src_data, dst=dst_data,
+                   copy_region=copy_region,
+                   copy_src_origin=copy_src_origin,   copy_dst_origin=copy_dst_origin,
+                   copy_src_pitches=copy_src_pitches, copy_dst_pitches=copy_dst_pitches,
+                   iter_region=iter_region,
+                   iter_src_origin=iter_src_origin,   iter_dst_origin=iter_dst_origin,
+                   iter_src_pitches=iter_src_pitches, iter_dst_pitches=iter_dst_pitches)
 
         return cls(varname=varname,
                    src=src_data, dst=dst_data,
diff --git a/hysop/backend/device/opencl/opencl_device.py b/hysop/backend/device/opencl/opencl_device.py
index cdd3290470a23f7d8c2ced892dd5292f92d42153..682baca1a3e7ccc1259124241975f94f0e98ddd7 100644
--- a/hysop/backend/device/opencl/opencl_device.py
+++ b/hysop/backend/device/opencl/opencl_device.py
@@ -1,7 +1,8 @@
 import re, fractions
+import numpy as np
+
 from hysop.tools.types import check_instance
 from hysop import vprint
-from hysop.deps import np
 from hysop.backend.device.opencl import cl, cl_api
 from hysop.constants import DeviceType, CacheType, MemoryType, FpConfig
 from hysop.tools.units import bytes2str, freq2str, time2str
@@ -23,7 +24,7 @@ def opencl_version_atleast(major, minor, returned=UnknownDeviceAttribute()):
                 return returned
         return wrap
     return decorator
-    
+
 def cl2hysop_device_type(cl_dev_type):
     entries = {
         cl.device_type.ACCELERATOR: DeviceType.ACCELERATOR,
@@ -36,7 +37,7 @@ def cl2hysop_device_type(cl_dev_type):
         entries[cl.device_type.CUSTOM] = DeviceType.CUSTOM
 
     if (cl_dev_type not in entries):
-        msg = 'Unknown opencl device type {}.'.format(cl_dev_type) 
+        msg = 'Unknown opencl device type {}.'.format(cl_dev_type)
         raise ValueError(msg)
     return entries[cl_dev_type]
 
@@ -47,7 +48,7 @@ def cl2hysop_mem_type(cl_mem_type):
     elif cl_mem_type == cl.device_local_mem_type.GLOBAL:
         return MemoryType.GLOBAL_MEMORY
     else:
-        msg = 'Unknown memory type {}.'.format(cl_mem_type) 
+        msg = 'Unknown memory type {}.'.format(cl_mem_type)
         raise ValueError(msg)
 
 def cl2hysop_cache_type(cl_cache_type):
@@ -59,7 +60,7 @@ def cl2hysop_cache_type(cl_cache_type):
     elif cl_cache_type == ct.READ_WRITE_CACHE:
         return CacheType.READ_WRITE_CACHE
     else:
-        msg = 'Unknown cache type {}.'.format(cl_cache_type) 
+        msg = 'Unknown cache type {}.'.format(cl_cache_type)
         raise ValueError(msg)
 
 def cl2hysop_fpconfig(cl_fpconfig):
@@ -84,42 +85,42 @@ class OpenClDevice(LogicalDevice):
 
     __attrs = (
         'version', 'opencl_c_version', 'spir_versions', 'driver_version', 'il_version',
-        'name', 'type', 'vendor', 'vendor_id', 
-        'extensions', 'built_in_kernels', 
-        'available', 'compiler_available', 'linker_available', 
-        'host_unified_memory',  'svm_capabilities', 
-        'partition_max_sub_devices', 'partition_properties', 
-        'partition_affinity_domain', 'partition_type', 
+        'name', 'type', 'vendor', 'vendor_id',
+        'extensions', 'built_in_kernels',
+        'available', 'compiler_available', 'linker_available',
+        'host_unified_memory',  'svm_capabilities',
+        'partition_max_sub_devices', 'partition_properties',
+        'partition_affinity_domain', 'partition_type',
         'pipe_max_active_reservations', 'pipe_max_packet_size', 'max_pipe_args',
         'queue_properties',
-        'queue_on_device_max_size', 'queue_on_device_preferred_size', 
+        'queue_on_device_max_size', 'queue_on_device_preferred_size',
         'queue_on_host_properties', 'queue_on_device_properties',
-        'max_on_device_events', 'max_on_device_queues', 
-        'max_global_variable_size',  'max_parameter_size', 'max_clock_frequency', 
-        'sub_group_independent_forward_progress', 'max_num_sub_groups', 
+        'max_on_device_events', 'max_on_device_queues',
+        'max_global_variable_size',  'max_parameter_size', 'max_clock_frequency',
+        'sub_group_independent_forward_progress', 'max_num_sub_groups',
         'address_bits', 'endian_little', 'error_correction_support', 'execution_capabilities',
-        'max_work_item_dimensions', 'max_work_item_sizes', 'max_work_group_size', 
-        'mem_base_addr_align', 'max_constant_args', 'max_constant_buffer_size', 
-        'global_mem_size', 'global_mem_cache_size', 
-        'global_mem_cacheline_size', 'global_mem_cache_type', 
-        'max_mem_alloc_size', 'local_mem_size', 'local_mem_type', 
-        'half_fp_config', 'single_fp_config', 'double_fp_config', 
+        'max_work_item_dimensions', 'max_work_item_sizes', 'max_work_group_size',
+        'mem_base_addr_align', 'max_constant_args', 'max_constant_buffer_size',
+        'global_mem_size', 'global_mem_cache_size',
+        'global_mem_cacheline_size', 'global_mem_cache_type',
+        'max_mem_alloc_size', 'local_mem_size', 'local_mem_type',
+        'half_fp_config', 'single_fp_config', 'double_fp_config',
         'image_support', 'max_samplers', 'image_max_array_size', 'image_max_buffer_size',
-        'max_read_image_args', 'max_write_image_args', 'max_read_write_image_args', 
-        'image2d_max_width', 'image2d_max_height', 
-        'image3d_max_depth', 'image3d_max_height', 'image3d_max_width', 
-        'image_max_buffer_size', 
-        'preferred_platform_atomic_alignment', 'preferred_local_atomic_alignment', 
-        'preferred_global_atomic_alignment', 
+        'max_read_image_args', 'max_write_image_args', 'max_read_write_image_args',
+        'image2d_max_width', 'image2d_max_height',
+        'image3d_max_depth', 'image3d_max_height', 'image3d_max_width',
+        'image_max_buffer_size',
+        'preferred_platform_atomic_alignment', 'preferred_local_atomic_alignment',
+        'preferred_global_atomic_alignment',
         'preferred_interop_user_sync',
-        'profiling_timer_resolution', 'printf_buffer_size', 
+        'profiling_timer_resolution', 'printf_buffer_size',
         'min_data_type_align_size',
-        'native_vector_width_char', 'native_vector_width_short', 'native_vector_width_int', 
-        'native_vector_width_long', 'native_vector_width_float', 'native_vector_width_double', 
-        'native_vector_width_half', 
-        'preferred_vector_width_char', 'preferred_vector_width_short', 
-        'preferred_vector_width_int', 'preferred_vector_width_long', 
-        'preferred_vector_width_half', 'preferred_vector_width_float', 
+        'native_vector_width_char', 'native_vector_width_short', 'native_vector_width_int',
+        'native_vector_width_long', 'native_vector_width_float', 'native_vector_width_double',
+        'native_vector_width_half',
+        'preferred_vector_width_char', 'preferred_vector_width_short',
+        'preferred_vector_width_int', 'preferred_vector_width_long',
+        'preferred_vector_width_half', 'preferred_vector_width_float',
         'preferred_vector_width_double',
         'pci_bus_id_nv', 'pci_slot_id_nv', 'attribute_async_engine_count_nv',
         'compute_capability_major_nv', 'compute_capability_minor_nv', 'gpu_overlap_nv',
@@ -138,10 +139,10 @@ class OpenClDevice(LogicalDevice):
         )
 
     __all_attrs = __attrs + ('_simd_lane_size', '_usable_local_mem_size', '_real_name')
-    
-    def __init__(self, platform, platform_handle, device_id, 
+
+    def __init__(self, platform, platform_handle, device_id,
                         device_handle, hardware_topo, **kwds):
-        
+
         # we do not keep a reference to platform_handle  or device_handle as we
         # need to pickle this object
         self._cl_version = self._extract_cl_version(device_handle)
@@ -164,13 +165,13 @@ class OpenClDevice(LogicalDevice):
         self._simd_lane_size = cl.characterize.get_simd_group_size(device_handle, np.int32)
         self._usable_local_mem_size = cl.characterize.usable_local_mem_size(device_handle)
         self._real_name = device_handle.name
-        
+
         super(OpenClDevice,self).__init__(
-                platform=platform, platform_handle=platform_handle, 
-                device_id=device_id, device_handle=device_handle, 
+                platform=platform, platform_handle=platform_handle,
+                device_id=device_id, device_handle=device_handle,
                 hardware_topo=hardware_topo, **kwds)
 
-    
+
     def _determine_performance_and_affinity(self, hardware_topo):
 
         # test device bandwidth using clpeak global memory bandwidth statistics
@@ -182,15 +183,15 @@ class OpenClDevice(LogicalDevice):
                           device_id=self.device_id,
                           is_cpu=(self.type()==DeviceType.CPU))
         self._clpeak_info = info
-       
+
     def _extract_cl_version(self, device_handle):
         version = device_handle.version
-        regexp = re.compile('OpenCL ([0-9]+)\.([0-9]+)')
+        regexp = re.compile(r'OpenCL ([0-9]+)\.([0-9]+)')
         match  = re.match(regexp, version)
         assert match, 'Could not match opencl version from \'{}\'.'.format(version)
         (major, minor) = int(match.group(1)), int(match.group(2))
         return (major, minor)
-        
+
     def _match_physical_devices(self, hardware_topo):
         if (self.type() == DeviceType.CPU):
             return hardware_topo.cpu_packages()
@@ -206,7 +207,7 @@ class OpenClDevice(LogicalDevice):
             msg=msg.format(self.name(), device.pci_busid())
             vprint(msg)
             return device
-        # else we may try to look to pci bus id 
+        # else we may try to look to pci bus id
         # at this time, this is only available for nvidia and amd via opencl extensions
         else:
             pci_bus_id  = self.pci_bus_id()
@@ -214,7 +215,7 @@ class OpenClDevice(LogicalDevice):
             if not isinstance(pci_bus_id, str):
                 msg='Could not get opencl pci device bus id.'
                 vprint(msg)
-                return None 
+                return None
             elif pci_bus_id not in pci_devices.keys():
                 msg='Could get opencl pci device bus id ({}), but it did not match '
                 msg='any in hardware topology.'
@@ -249,7 +250,7 @@ bytes2str(self.global_mem_size()),
 bytes2str(self.max_global_alloc_size()),
 bytes2str(self.local_mem_size(), decimal=False),
 'yes, up to {} subdevices'.format(self.max_subdevices()) if self.has_device_partition_support() \
-        else 'no', 'no match', ind=ind, inc=inc)   
+        else 'no', 'no match', ind=ind, inc=inc)
         msg += '\n'+self.clpeak_info_summary(indent=indent, increment=increment)
         return msg
 
@@ -288,14 +289,14 @@ bytes2str(self.local_mem_size(), decimal=False),
                 flops2str(info.max_dp_compute))]
             failed = False
         if (info.has_single_precision_compute and info.has_double_precision_compute):
-            ratio = info.max_dp_compute/info.max_sp_compute
+            ratio = info.max_dp_compute / float(info.max_sp_compute)
             ratio = fractions.Fraction(ratio).limit_denominator(64)
             ss += ['{{s}}double to float compute ratio:   {} (DP/SP)'.format(ratio)]
             failed = False
         if failed:
             ss += ['{s}>Error: All clpeak commands failed.']
         return '\n'.join(ss).format(s=(ind+inc+inc+' |-'))
-        
+
     def device_summary(self, indent=0, increment=2):
         ind=' '*indent
         inc=' '*increment
@@ -306,7 +307,7 @@ bytes2str(self.local_mem_size(), decimal=False),
 {ind}{inc}{inc}*device type:              {}
 {ind}{inc}{inc}*opencl   version:         {}
 {ind}{inc}{inc}*opencl C version:         {}
- 
+
 {ind}{inc}{inc}*address bits:             {}bit
 {ind}{inc}{inc}*little endian:            {}
 {ind}{inc}{inc}*ECC enabled:              {}
@@ -322,9 +323,9 @@ self.vendor(), self.vendor_id(),
 self.type(),
 self.version(), self.opencl_c_version(),
 self.address_bits(), self.little_endian(),
-self.error_correction_support(), 
+self.error_correction_support(),
 freq2str(self.max_clock_frequency)(),
-self.available(), self.compiler_available(), 
+self.available(), self.compiler_available(),
 self.linker_available(), self.has_spir_support(),
 ind=ind, inc=inc)
         return msg
@@ -385,7 +386,7 @@ ind=ind, inc=inc)
     def fp_support_summary(self, indent=0, increment=2):
         ind=' '*indent
         inc=' '*increment
-        
+
         def fmt(bits, has_fp, get_fp_flags):
             if not has_fp:
                 support = '\n{ind}{inc}{inc}*fp{} support: None'.format(bits, inc=inc, ind=ind)
@@ -417,7 +418,7 @@ ind=ind, inc=inc)
         msg=\
 '''
 {ind}{inc}Vector sizes (preferred{}):
-{ind}{inc}{inc}*char:                     {}{} 
+{ind}{inc}{inc}*char:                     {}{}
 {ind}{inc}{inc}*short:                    {}{}
 {ind}{inc}{inc}*int:                      {}{}
 {ind}{inc}{inc}*long:                     {}{}
@@ -481,11 +482,11 @@ ind=ind, inc=inc)
     self.max_image_args(),
     self.max_write_image_args(),
     self.max_samplers(),
-    fmt(self.has_1d_image_support(), self.has_1d_image_write_support(), 
+    fmt(self.has_1d_image_support(), self.has_1d_image_write_support(),
         self.max_1d_image_size()),
-    fmt(self.has_2d_image_support(), self.has_2d_image_write_support(), 
+    fmt(self.has_2d_image_support(), self.has_2d_image_write_support(),
         self.max_2d_image_size()),
-    fmt(self.has_3d_image_support(), self.has_3d_image_write_support(), 
+    fmt(self.has_3d_image_support(), self.has_3d_image_write_support(),
         self.max_3d_image_size()),
     fmt_array(self.has_1d_image_array_support(), self.max_1d_image_array_size()),
     fmt_array(self.has_2d_image_array_support(), self.max_2d_image_array_size()),
@@ -494,7 +495,7 @@ ind=ind, inc=inc)
     self.has_3d_image_from_buffer_support(),
     ind=ind, inc=inc)
         return msg
-    
+
     def atomics_summary(self, indent=0, increment=2):
         ind=' '*indent
         inc=' '*increment
@@ -507,29 +508,29 @@ ind=ind, inc=inc)
 {ind}{inc}{inc}*int64/uint64:             {}
 {ind}{inc}{inc}*float32:                  {}
 {ind}{inc}{inc}*float64:                  {}
- 
+
 {ind}{inc}{inc}*int32 hardware counters:  {}
 {ind}{inc}{inc}*int64 hardware counters:  {}
 '''.format(
-    fmt(self.has_global_int32_atomics(), self.has_local_int32_atomics(), 
+    fmt(self.has_global_int32_atomics(), self.has_local_int32_atomics(),
         self.has_mixed_int32_atomics()),
-    fmt(self.has_global_int64_atomics(), self.has_local_int64_atomics(), 
+    fmt(self.has_global_int64_atomics(), self.has_local_int64_atomics(),
         self.has_mixed_int64_atomics()),
-    fmt(self.has_global_float32_atomics(), self.has_local_float32_atomics(), 
+    fmt(self.has_global_float32_atomics(), self.has_local_float32_atomics(),
         self.has_mixed_float32_atomics()),
-    fmt(self.has_global_float64_atomics(), self.has_local_float64_atomics(), 
+    fmt(self.has_global_float64_atomics(), self.has_local_float64_atomics(),
         self.has_mixed_float64_atomics()),
     self.has_int32_hardware_atomic_counters(),
     self.has_int64_hardware_atomic_counters(),
     ind=ind, inc=inc)
         return msg
-    
+
     def misc_summary(self, indent=0, increment=2):
         ind=' '*indent
         inc=' '*increment
         if self.has_printf_support:
             pbs = self.printf_buffer_size()
-            pbs = 'up to {}'.format(bytes2str(pbs)) if isinstance(pbs,(int,long)) \
+            pbs = 'up to {}'.format(bytes2str(pbs)) if isinstance(pbs,int) \
                     else 'unknown buffer size'
         msg=\
 '''
@@ -562,7 +563,7 @@ ind=ind, inc=inc)
         if short:
             return \
     '''{ind}*Device {}:\n{}'''.format(
-            self.device_id, 
+            self.device_id,
             self.short_device_summary(indent=indent, increment=increment),
             ind=ind)
         else:
@@ -572,7 +573,7 @@ ind=ind, inc=inc)
     {}{}{}{}{}{}{}{}{}
     '''.format(
             self.device_id, self.name(),
-            self.device_summary(indent=indent, increment=increment), 
+            self.device_summary(indent=indent, increment=increment),
             self.memory_summary(indent=indent, increment=increment),
             self.kernel_summary(indent=indent, increment=increment),
             self.fp_support_summary(indent=indent, increment=increment),
@@ -605,7 +606,7 @@ ind=ind, inc=inc)
             if self.has_extension('cl_nv_device_attribute_query'):
                 bus_id  = self._pci_bus_id_nv
                 slot_id = self._pci_slot_id_nv
-                dev_id  = (slot_id >> 3) 
+                dev_id  = (slot_id >> 3)
                 fn_id   = (slot_id & 0x07)
                 bus_id0 = (bus_id >> 8 ) # not sure if usefull
                 bus_id1  = (bus_id & 0xff)
@@ -620,7 +621,7 @@ ind=ind, inc=inc)
                 return '{:04x}:{:02x}:{:02x}.{:01x}'.format(bus_id0,bus_id1,dev_id,fn_id)
             else:
                 return UnknownDeviceAttribute()
-   
+
 
     def extensions(self):
         return [ext.strip() for ext in self._extensions.split(' ') if ext.strip() != '']
@@ -654,7 +655,7 @@ ind=ind, inc=inc)
     @opencl_version_atleast(1,1)
     def native_vector_width_half(self):
         return self._native_vector_width_half
-    
+
     @opencl_version_atleast(1,2)
     def built_in_kernels(self):
         return self._built_in_kernels
@@ -732,13 +733,13 @@ ind=ind, inc=inc)
     def max_threads_per_block(self):
         return self._max_work_group_size
     def simd_lane_size(self):
-        return self._simd_lane_size 
+        return self._simd_lane_size
 
     def max_constant_args(self):
         return self._max_constant_args
     def max_constant_buffer_size(self):
         return self._max_constant_buffer_size
-   
+
 #MEMORY
     def global_mem_size(self):
         return self._global_mem_size
@@ -752,7 +753,7 @@ ind=ind, inc=inc)
         return self._max_mem_alloc_size
     def mem_base_addr_align(self):
         return self._mem_base_addr_align
-    
+
     def local_mem_size(self):
         return self._local_mem_size
     def local_mem_type(self):
@@ -792,7 +793,7 @@ ind=ind, inc=inc)
     def fp64_config(self):
         assert self.has_fp64
         return cl2hysop_fpconfig(self._double_fp_config)
-    
+
 #IMAGES
     def has_image_support(self):
         return bool(self._image_support)
@@ -808,14 +809,14 @@ ind=ind, inc=inc)
     def max_samplers(self):
         assert self.has_image_support
         return self._max_samplers
-    
+
     def has_1d_image_support(self):
         return self.has_image_support
     def has_2d_image_support(self):
         return self.has_image_support
     def has_3d_image_support(self):
         return self.has_image_support
-    
+
     def has_1d_image_write_support(self):
         return (self.max_write_image_args>0)
     def has_2d_image_write_support(self):
@@ -830,12 +831,12 @@ ind=ind, inc=inc)
     @opencl_version_atleast(1,2,False)
     def has_2d_image_array_support(self):
         return self.has_image_support
-    
+
     def image_max_array_size(self):
         assert self.has_1d_image_array_support or \
                self.has_2d_image_array_support
         return self._image_max_array_size
-    
+
     def max_1d_image_size(self):
         assert self.has_1d_image_support
         return np.asarray([self._image2d_max_width])
@@ -860,10 +861,10 @@ ind=ind, inc=inc)
     def max_3d_image_size(self):
         assert self.has_3d_image_support
         return np.asarray([
-                self._image3d_max_depth, 
+                self._image3d_max_depth,
                 self._image3d_max_height,
                 self._image3d_max_width])
-   
+
     @opencl_version_atleast(1,2,False)
     def has_1d_image_from_buffer_support(self):
         return self.has_image_support()
@@ -873,10 +874,10 @@ ind=ind, inc=inc)
             return self.has_image_support()
         else:
             return self.has_extension('cl_khr_image2d_from_buffer')
-    
+
     def has_3d_image_from_buffer_support(self):
         return False
-    
+
     @opencl_version_atleast(2,0,False)
     def has_2d_image_from_image_support(self):
         return self.has_image_support
@@ -890,7 +891,7 @@ ind=ind, inc=inc)
         return self._image_pitch_alignment
 
 
-    
+
 #ATOMICS
     def has_global_int32_atomics(self):
         if self.opencl_version() == (1,0):
@@ -903,7 +904,7 @@ ind=ind, inc=inc)
         return (self.opencl_version()[0] >= 2)
     def has_global_float64_atomics(self):
         return (self.opencl_version()[0] >= 2)
-    
+
     def has_local_int32_atomics(self):
         if self.opencl_version() == (1,0):
             return self.has_extension('cl_khr_local_int32_base_atomics')
@@ -915,7 +916,7 @@ ind=ind, inc=inc)
         return (self.opencl_version()[0] >= 2)
     def has_local_float64_atomics(self):
         return (self.opencl_version()[0] >= 2)
-    
+
     def has_mixed_int32_atomics(self):
         return False
     def has_mixed_int64_atomics(self):
@@ -1009,7 +1010,7 @@ class OpenClDeviceStatistics(HardwareStatistics):
             self._global_mem_size += [device._global_mem_size]
             self._type = device.type()
             self._clpeak_stats = device._clpeak_info.stats()
-        
+
     def __iadd__(self, other):
         if (other is None):
             return self
@@ -1035,6 +1036,6 @@ class OpenClDeviceStatistics(HardwareStatistics):
         ind = ' '*indent
         inc = ' '*increment
         ss = ['{{ind}}{:^4} {:^10} {:^10} {} {}'.format(self._counter, self._type,
-            bytes2str(np.mean(self._global_mem_size)), 
+            bytes2str(np.mean(self._global_mem_size)),
             self._clpeak_stats, self._name)]
         return '\n'.join(s.format(ind=ind, inc=inc) for s in ss)
diff --git a/hysop/backend/device/opencl/opencl_env.py b/hysop/backend/device/opencl/opencl_env.py
index 8668534bd72470c0246d6a81a1fe3c5ebc506e26..0b679eac3aeb4c2164d3c9aae5dfc30cb7f1ea96 100644
--- a/hysop/backend/device/opencl/opencl_env.py
+++ b/hysop/backend/device/opencl/opencl_env.py
@@ -1,6 +1,8 @@
+import hashlib, os, copy, re
+import numpy as np
+
 from hysop import vprint, dprint
 from hysop import __VERBOSE__, __KERNEL_DEBUG__, __DEFAULT_PLATFORM_ID__, __DEFAULT_DEVICE_ID__
-from hysop.deps import hashlib, np, os, copy, re
 from hysop.constants import Precision, DeviceType
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.io_utils import IO
@@ -21,6 +23,17 @@ class OpenClEnvironment(TaggedObject):
     """
     OpenCL environment.
     """
+
+    def __new__(cls, mpi_params,
+                     platform_id = None,
+                     device_id   = None,
+                     device_type = None,
+                     gl_sharing=False,
+                     strict=True,
+                     name=None,
+                     **kwds):
+        return super(OpenClEnvironment, cls).__new__(cls, tag_prefix='clenv', **kwds)
+
     def __init__(self, mpi_params,
                        platform_id = None,
                        device_id   = None,
@@ -89,7 +102,7 @@ class OpenClEnvironment(TaggedObject):
         except:
             title=' while creating the following OpenCL environment '
             msg=framed_str(title=title, msg=msg)
-            print msg+'\n'
+            print(msg+'\n')
             raise
         # OpenCL context
         context = get_context(device, gl_sharing)
@@ -300,7 +313,7 @@ device.opencl_c_version, bytes2str(device.global_mem_size))
     def _parse_opencl_version(self):
         assert (self.device is not None)
         sversion = self.device.version.strip()
-        _regexp='OpenCL\s+(\d)\.(\d)'
+        _regexp=r'OpenCL\s+(\d)\.(\d)'
         regexp=re.compile(_regexp)
         match=re.match(regexp,sversion)
         if not match:
@@ -390,7 +403,7 @@ device.opencl_c_version, bytes2str(device.global_mem_size))
                 self.parse_file(f, vector_width, nb_remesh_components))
             f.close()
 
-        # print gpu_src
+        # output gpu_src
         if self.macros is not None:
             for k in self.macros:
                 gpu_src = gpu_src.replace(k, str(self.macros[k]))
@@ -510,7 +523,7 @@ Dumped OpenCL Kernel '{}'
         # --- Build kernel ---
         try:
             build = prg.build(build_options)
-        except Exception, e:
+        except Exception as e:
             print("Build files : ")
             for sf in file_list:
                 print("   - ", sf)
@@ -553,14 +566,14 @@ Dumped OpenCL Kernel '{}'
 
         gpu_src = src
 
-        src_hash = hashlib.sha1(gpu_src).hexdigest()
+        src_hash = hashlib.sha1(gpu_src.encode('utf-8')).hexdigest()
         if (kernel_name is None):
             kernel_name = src_hash
         else:
             kernel_name += '_{}'.format(src_hash[:4])
 
         if VERBOSE:
-            print '=== Kernel raw source compiling ==='
+            print('=== Kernel raw source compiling ===')
         prg = cl.Program(self.context, gpu_src)
 
         dump_folder=os.path.join(IO.default_path(), OPENCL_KERNEL_DUMP_FOLDER)
@@ -570,14 +583,14 @@ Dumped OpenCL Kernel '{}'
             if not os.path.exists(dump_folder) and (main_rank == 0):
                 os.makedirs(dump_folder)
             dump_file=os.path.join(dump_folder, 'rk{}_{}_dump.cl'.format(main_rank, kernel_name))
-            print 'Dumping kernel src at \'{}\'.'.format(dump_file)
+            print('Dumping kernel src at \'{}\'.'.format(dump_file))
             with open(dump_file, 'w+') as f:
                 f.write(gpu_src)
         s_build_opts = ' '.join(build_opts)
 
         if VERBOSE:
-            print 'Build options: {}'.format(s_build_opts)
-            print 'Building...'
+            print('Build options: {}'.format(s_build_opts))
+            print('Building...')
 
         # Build OpenCL program
         try:
@@ -594,10 +607,10 @@ Dumped OpenCL Kernel '{}'
             raise e
 
         if VERBOSE:
-            print 'Compiler status: {}'.format(
-                build.get_build_info(self.device, cl.program_build_info.STATUS))
-            print 'Compiler log: {}'.format(
-                build.get_build_info(self.device, cl.program_build_info.LOG))
+            print('Compiler status: {}'.format(
+                build.get_build_info(self.device, cl.program_build_info.STATUS)))
+            print('Compiler log: {}'.format(
+                build.get_build_info(self.device, cl.program_build_info.LOG)))
 
         return build
 
diff --git a/hysop/backend/device/opencl/opencl_fft.py b/hysop/backend/device/opencl/opencl_fft.py
index 07d3b680b3870ba47f0e250806d6424b90b287b0..f75018bfec126d1195d0863869bf2d7b0cbd0a4a 100644
--- a/hysop/backend/device/opencl/opencl_fft.py
+++ b/hysop/backend/device/opencl/opencl_fft.py
@@ -7,8 +7,9 @@ except ImportError as e:
         def __init__(self):
             assert False, "Du to gpyfft import error ({}), this class is useless".format(e)
     gfft, GFFT = None, None
-    print e
-    print "Some functionnalities may not work. It seems that hysop is called from non OpenCL machine."
+    print(e)
+    print("Some functionnalities may not work. It seems that hysop is called from non OpenCL machine.")
+
 from hysop import vprint
 from hysop.tools.types import first_not_None
 from hysop.tools.warning import HysopWarning
@@ -180,7 +181,7 @@ class OpenClFFT(FFT):
         return self
 
     def enqueue(self, queue=None, wait_for_events=None, direction_forward=True):
-        """
+        r"""
         Enqueue transform with array base_data.
         /!\ Do not forget to offset input and output by array.offset
             within custom user callbacks, only base_data is passed
diff --git a/hysop/backend/device/opencl/opencl_hardware_backend.py b/hysop/backend/device/opencl/opencl_hardware_backend.py
index 6f4cd5f9ce2cce45d13de9872170b1a3294064d8..60eb844b5b1893885f8bea741ffe7530dec3e09e 100644
--- a/hysop/backend/device/opencl/opencl_hardware_backend.py
+++ b/hysop/backend/device/opencl/opencl_hardware_backend.py
@@ -22,17 +22,17 @@ class OpenClBackend(HardwareBackend):
                                  platform_id=i,
                                  platform_handle=p)
             self._platforms[i] = plat
-    
+
     def to_string(self, indent=0, increment=2):
         new_indent = indent + increment
-        platforms = '\n'.join(x.to_string(indent=new_indent, increment=increment) 
+        platforms = '\n'.join(x.to_string(indent=new_indent, increment=increment)
                                         for x in self.platforms.values())
         ss=\
 '''
 {ind}::OpenClBackend::
 {ind}{inc}Data collected using pyopencl {} compiled against OpenCL headers v{}.
 
-{}'''.format(self._version_text, self._cl_header_version_text, platforms, 
+{}'''.format(self._version_text, self._cl_header_version_text, platforms,
         ind=' '*indent, inc=' '*increment)
         return ss
 
@@ -43,13 +43,13 @@ class OpenClBackend(HardwareBackend):
         return self.to_string()
 
 
-class OpenClBackendStatistics(HardwareStatistics): 
+class OpenClBackendStatistics(HardwareStatistics):
 
     def __init__(self, backend=None):
         self._platform_statistics = {}
         if (backend is not None):
             check_instance(backend, OpenClBackend)
-            for (pid,platform) in backend._platforms.iteritems():
+            for (pid,platform) in backend._platforms.items():
                 self._platform_statistics[platform._name] = platform.stats()
 
     @property
@@ -65,7 +65,7 @@ class OpenClBackendStatistics(HardwareStatistics):
             msg='Unknown type {}, expected OpenClBackend or OpenClBackendStatistics.'
             msg=msg.format(type(other))
             raise TypeError(msg)
-        for (pname, pstats) in other._platform_statistics.iteritems():
+        for (pname, pstats) in other._platform_statistics.items():
             if (pname in self._platform_statistics):
                 self._platform_statistics[pname] += pstats
             else:
@@ -76,7 +76,7 @@ class OpenClBackendStatistics(HardwareStatistics):
         ind = ' '*indent
         inc = ' '*increment
         ss = []
-        for (pname,plat) in self._platform_statistics.iteritems():
+        for (pname,plat) in self._platform_statistics.items():
             ss += ['{{ind}}Platform {}:'.format(pname)]
             ss += [plat.to_string(indent+increment, increment)]
         return '\n'.join(s.format(ind=ind, inc=inc) for s in ss)
diff --git a/hysop/backend/device/opencl/opencl_kernel.py b/hysop/backend/device/opencl/opencl_kernel.py
index c5a6fba8b3b5a6a7fc1ec7cb27448bb90d03514e..429da47d7a27479e2bf77ec7e997010594dd2030 100644
--- a/hysop/backend/device/opencl/opencl_kernel.py
+++ b/hysop/backend/device/opencl/opencl_kernel.py
@@ -65,9 +65,8 @@ class OpenClKernel(object):
 
         assert len(program.all_kernels())==1
         self._kernel = program.all_kernels()[0]
-        
-        default_args = default_args or {}
-        self.default_args = default_args
+
+        self.default_args = first_not_None(default_args, {})
 
     def _get_name(self):
         """Get the name of this program."""
@@ -87,7 +86,7 @@ class OpenClKernel(object):
     def _get_default_local_work_size(self):
         """Default default_local_work_size to launch the program."""
         return self._default_local_work_size
-        
+
     def _get_default_args(self):
         """Default keyword args_mapping to launch the program."""
         return self._default_args
@@ -95,14 +94,14 @@ class OpenClKernel(object):
         """Set default arguments."""
         args_mapping = self._args_mapping
         nargs = len(args_mapping)
-        positions = () 
+        positions = ()
         for argname in default_args:
             if argname not in args_mapping:
                 msg='Unkown default argument \'{}\', known ones are {}.'
-                msg=msg.format(argname, 
+                msg=msg.format(argname,
                         ', '.join('\'{}\''.format(a) for a in args_mapping.keys()))
                 raise ValueError(msg)
-        for argname, (argpos, argtype) in args_mapping.iteritems():
+        for argname, (argpos, argtype) in args_mapping.items():
             assert isinstance(argpos, int)
             if not isinstance(argtype, (type, npw.dtype)):
                 check_instance(argtype, tuple, values=type)
@@ -112,11 +111,11 @@ class OpenClKernel(object):
                     msg='Argument {} at position {} should be of type {} but got a {}.'
                     msg=msg.format(argname, argpos, argtype, type(argval))
                     raise TypeError(msg)
-                  
+
             positions += (argpos,)
 
         msg='Ill-formed argument positions {}.'.format(positions)
-        assert set(positions) == set(xrange(nargs)), msg
+        assert set(positions) == set(range(nargs)), msg
         self._default_args = default_args
 
 
@@ -127,7 +126,7 @@ class OpenClKernel(object):
     default_args = property(_get_default_args, _set_default_args)
     default_global_work_size = property(_get_default_global_work_size)
     default_local_work_size = property(_get_default_local_work_size)
-    
+
     def build_list_launcher(self, launcher_name=None, *args, **kwds):
         """
         Build a OpenClKernelLauncher and return it as a OpenClKernelListLauncher.
@@ -140,7 +139,7 @@ class OpenClKernel(object):
             queue=None, local_work_size=None, global_work_size=None, **kwds):
         """
         Build an OpenClKernel with more default arguments bound.
-        If all arguments are bound, return an OpenClKernelLauncher, 
+        If all arguments are bound, return an OpenClKernelLauncher,
         else return an OpenClKernel.
         """
         name_prefix  = first_not_None(name_prefix,  '')
@@ -152,9 +151,9 @@ class OpenClKernel(object):
         local_work_size  = first_not_None(local_work_size, self._default_local_work_size)
 
         args_list, parameters_map, iterated_parameters = self._compute_args_list(**kwds)
-        
+
         _kwds = dict(name=name, kernel=self._program,
-                    args_list=args_list, 
+                    args_list=args_list,
                     default_global_work_size=global_work_size,
                     default_local_work_size=local_work_size,
                     default_queue=queue)
@@ -175,22 +174,22 @@ class OpenClKernel(object):
     def _compute_args_list(self, **kwds):
         """
         Compute argument list from default arguments and input keywords.
-        If all arguments are not specified, also return a parameter_map 
+        If all arguments are not specified, also return a parameter_map
         which is args_mapping restricted to the missing arguments.
         """
 
         default_args = self.default_args
         args_mapping = self.args_mapping
         nargs        = len(args_mapping)
-        
-        arguments = { k:w for (k,w) in default_args.iteritems() }
+
+        arguments = { k:w for (k,w) in default_args.items() }
         arguments.update(kwds)
 
-        parameters_map = {k:v for (k,v) in args_mapping.iteritems() if (k not in arguments)}
+        parameters_map = {k:v for (k,v) in args_mapping.items() if (k not in arguments)}
         iterated_parameters = {}
-        
+
         args_list = {}
-        for (arg_name, arg_value) in arguments.iteritems():
+        for (arg_name, arg_value) in arguments.items():
             if (arg_name not in args_mapping):
                 msg='Unknown argument {}, valid ones are {}.'
                 msg=msg.format(arg_name, ', '.join(args_mapping.keys()))
@@ -218,10 +217,10 @@ class OpenClKernel(object):
                 msg=msg.format(arg_name, arg_index, arg_types, type(arg_value))
                 raise TypeError(msg)
             args_list[arg_index] = arg_value
-        
-        if parameters_map: 
+
+        if parameters_map:
             args_list = tuple(args_list.items())
         else:
-            args_list = tuple(args_list[i] for i in xrange(nargs))
+            args_list = tuple(args_list[i] for i in range(nargs))
 
         return args_list, parameters_map, iterated_parameters
diff --git a/hysop/backend/device/opencl/opencl_kernel_autotuner.py b/hysop/backend/device/opencl/opencl_kernel_autotuner.py
index 63495c5a149988102a97c6d3554993e662a0d910..fe974165b823c0d59162d6dc1b7cda87de54a334 100644
--- a/hysop/backend/device/opencl/opencl_kernel_autotuner.py
+++ b/hysop/backend/device/opencl/opencl_kernel_autotuner.py
@@ -17,17 +17,17 @@ class OpenClKernelAutotuner(KernelAutotuner):
 
     def autotuner_config_key(self):
         """Caching key for autotuner results."""
-        return (self.typegen.__repr__(), 
-                self.cl_env.platform.name.strip(), 
-                self.cl_env.device.name.strip(), 
+        return (self.typegen.__repr__(),
+                self.cl_env.platform.name.strip(),
+                self.cl_env.device.name.strip(),
                 self.build_opts)
-    
+
     def _print_header(self, *args, **kwds):
         cl_env = self.cl_env
         verbose = super(OpenClKernelAutotuner, self)._print_header(*args, **kwds)
         if verbose:
-            print '  *platform: {}'.format(cl_env.platform.name.strip())
-            print '  *device: {}'.format(cl_env.device.name.strip())
+            print('  *platform: {}'.format(cl_env.platform.name.strip()))
+            print('  *device: {}'.format(cl_env.device.name.strip()))
 
     def collect_kernel_infos(self, tkernel, extra_parameters, extra_kwds):
         """
@@ -36,7 +36,7 @@ class OpenClKernelAutotuner(KernelAutotuner):
         kernel_name, kernel_src = tkernel.generate_kernel_src(
                         global_work_size=None,
                         local_work_size=None,
-                        extra_parameters=extra_parameters, 
+                        extra_parameters=extra_parameters,
                         extra_kwds=extra_kwds,
                         tuning_mode=False,
                         dry_run=True)
@@ -44,7 +44,7 @@ class OpenClKernelAutotuner(KernelAutotuner):
         prg, kernel = self.build_from_source(kernel_name=kernel_name,
                 kernel_src=kernel_src, build_options=[],
                 force_verbose=False, force_debug=False)
-        
+
         check_instance(prg, cl.Program)
         check_instance(kernel, cl.Kernel)
 
@@ -53,26 +53,26 @@ class OpenClKernelAutotuner(KernelAutotuner):
         preferred_work_group_size_multiple =  kernel.get_work_group_info(kwgi.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
                     self.cl_env.device)
         return (max_kernel_wg_size, preferred_work_group_size_multiple)
-    
+
     def check_kernel(self, tkernel, kernel,
             global_work_size, local_work_size):
         check_instance(kernel, cl.Kernel)
-        
+
         cl_env = self.cl_env
         device = cl_env.device
         lmem = cl.characterize.usable_local_mem_size(device)
-        
+
         kwgi = cl.kernel_work_group_info
         max_kernel_wg_size     = kernel.get_work_group_info(kwgi.WORK_GROUP_SIZE, device)
         kernel_local_mem_size  = kernel.get_work_group_info(kwgi.LOCAL_MEM_SIZE,  device)
-        
+
         wgs = prod(local_work_size)
 
         if (wgs > max_kernel_wg_size):
             msg='Work group size {} exceeds maximum kernel work group size {} for kernel {}.'
             msg=msg.format(wgs, max_kernel_wg_size, kernel.function_name)
             raise RuntimeError(msg)
-        
+
         if (kernel_local_mem_size > lmem):
             msg='Maximum usable device local memory size {} exceeded for kernel {} which '
             msg+='needs {}.'
@@ -80,16 +80,17 @@ class OpenClKernelAutotuner(KernelAutotuner):
                     kernel.function_name,
                     bytes2str(kernel_local_mem_size))
             raise RuntimeError(msg)
-        
-        if cl_env.cl_version >= (1,2) and (device.type == cl.device_type.CUSTOM):
+
+        cl_version = tuple(map(int, cl_env.cl_version))
+        if cl_version >= (1,2) and (device.type == cl.device_type.CUSTOM):
             max_kernel_global_size = kernel.get_work_group_info(kwgi.GLOBAL_WORK_SIZE, device)
             if np.any(np.greater(global_work_size, max_kernel_global_size)):
                 msg='Global size {} exceeded for kernel {} which allows only {} for '
                 msg+='the custom device {}.'
-                msg=msg.format(global_size, kernel.function_name, 
+                msg=msg.format(global_size, kernel.function_name,
                                 max_kernel_global_size, device.name)
                 raise RuntimeError(msg)
-        
+
         if cl.characterize.has_struct_arg_count_bug(device, ctx=cl_env.context):
             msg='Device has struct argument counting bug.'
             raise RuntimeError(msg)
@@ -103,14 +104,14 @@ class OpenClKernelAutotuner(KernelAutotuner):
                 msg=msg.format(i, kernel.function_name)
                 raise RuntimeError(msg)
 
-    
-    def build_from_source(self, kernel_name, kernel_src, 
+
+    def build_from_source(self, kernel_name, kernel_src,
                             build_options, force_verbose, force_debug):
         """
         Compile and bench one kernel by executing it nruns times.
         Return a AutotunerKernelStatistics instance.
         """
-        prg = self.cl_env.build_raw_src(src=kernel_src, 
+        prg = self.cl_env.build_raw_src(src=kernel_src,
                                    build_options=build_options,
                                    kernel_name=kernel_name,
                                    force_verbose=force_verbose, force_debug=force_debug)
@@ -131,14 +132,14 @@ class OpenClKernelAutotuner(KernelAutotuner):
 
         global_size = tuple(global_work_size)
         local_size  = tuple(local_work_size)
-           
+
         assert(target_nruns>=1)
         if (old_stats is None):
             stats = OpenClKernelStatistics()
         else:
             stats = old_stats
         pruned = False
-        
+
         try:
             with cl.CommandQueue(ctx, device, properties=profiling_enable) as queue:
                 assert queue.properties & profiling_enable
@@ -152,17 +153,17 @@ class OpenClKernelAutotuner(KernelAutotuner):
                     if (best_stats is not None):
                         pruned = (stats.mean > self.autotuner_config.prune_threshold*best_stats.mean)
         except Exception as e:
-            print e
-            print
+            print(e)
+            print()
             msg ='\nFATAL ERROR: Failed to bench kernel global_work_size={}, local_work_size={}'
             msg=msg.format(global_work_size, local_work_size)
             # try to dump offending kernel
             msg+='\nTrying to dump source kernel to \'/tmp/hysop_kernel_dump.cl\'...\n'
-            print msg
+            print(msg)
             with open('/tmp/hysop_kernel_dump.cl', 'w') as f:
                 f.write(kernel.program.source)
             msg+='\n'
             raise
-        
-        return (stats, pruned) 
+
+        return (stats, pruned)
 
diff --git a/hysop/backend/device/opencl/opencl_kernel_launcher.py b/hysop/backend/device/opencl/opencl_kernel_launcher.py
index 54dac149dd66f2fe9a0e0df842aff158a1d62913..220eb2c2acd0570bf5d050167fe75f944eab995e 100644
--- a/hysop/backend/device/opencl/opencl_kernel_launcher.py
+++ b/hysop/backend/device/opencl/opencl_kernel_launcher.py
@@ -1,6 +1,8 @@
+import warnings
+import itertools as it
 from abc import ABCMeta, abstractmethod
+
 from hysop import __KERNEL_DEBUG__, __TRACE_KERNELS__, __TRACE_NOCOPY__, __TRACE_NOACCUMULATE__
-from hysop.deps import it, warnings
 from hysop.tools.decorators import debug
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.numpywrappers import npw
@@ -26,7 +28,7 @@ should_profile_kernel = should_trace_kernel
 if (__KERNEL_DEBUG__ or __TRACE_KERNELS__):
     def trace_kernel(kernel_msg):
         if should_trace_kernel(kernel_msg):
-            print kernel_msg
+            print(kernel_msg)
 else:
     def trace_kernel(kernel_msg):
         pass
@@ -46,7 +48,7 @@ if __OPENCL_PROFILE__:
                     raise AttributeError(kernel_msg)
                 kernel_msg = kernel._apply_msg
         if __KERNEL_DEBUG__ and (kernel_msg is not None) and should_profile_kernel(kernel_msg):
-            print '{} | {}'.format(evt.profile.end - evt.profile.start, kernel_msg.strip())
+            print('{} | {}'.format(evt.profile.end - evt.profile.start, kernel_msg.strip()))
         if not fprofiler is None:
             fprofiler[kernel_msg] += (evt.profile.end - evt.profile.start)*1e-9
 
@@ -138,7 +140,7 @@ class OpenClKernelListLauncher(object):
                     msg = msg.format(launcher.name)
                     raise RuntimeError(msg)
                 if isinstance(launcher, OpenClParametrizedKernelLauncher):
-                    parameters = {k: v[1] for (k, v) in launcher.parameters_map.iteritems()}
+                    parameters = {k: v[1] for (k, v) in launcher.parameters_map.items()}
                     self._update_parameters_from_parametrized_kernel(launcher, parameters)
                 elif isinstance(launcher, HostLauncherI):
                     parameters = launcher.parameters()
@@ -187,7 +189,7 @@ class OpenClKernelListLauncher(object):
                 except:
                     msg = '\nFailed to call kernel {} of type {}.\n'
                     msg = msg.format(kernel.name, type(kernel).__name__)
-                    print msg
+                    print(msg)
                     raise
         else:
             if (__KERNEL_DEBUG__ or __TRACE_KERNELS__):
@@ -204,7 +206,7 @@ class OpenClKernelListLauncher(object):
         check_instance(kernel, (OpenClParametrizedKernelLauncher, HostLauncherI))
         check_instance(parameters, dict, keys=str, values=(type, npw.dtype))
         sparameters = self._parameters
-        for (pname, ptype) in parameters.iteritems():
+        for (pname, ptype) in parameters.items():
             if pname in sparameters:
                 (stype, op_names) = sparameters[pname]
                 if (stype != ptype):
@@ -222,7 +224,7 @@ class OpenClKernelListLauncher(object):
         check_instance(kernel_list_launcher, OpenClKernelListLauncher)
         parameters = kernel_list_launcher._parameters
         sparameters = self._parameters
-        for (pname, (ptype, knames)) in parameters.iteritems():
+        for (pname, (ptype, knames)) in parameters.items():
             if pname in sparameters:
                 (stype, op_names) = sparameters[pname]
                 if (stype != ptype):
@@ -259,11 +261,10 @@ class OpenClKernelListLauncher(object):
     statistics = property(_get_statistics)
 
 
-class LauncherI(object):
+class LauncherI(object, metaclass=ABCMeta):
     """
     Interface for any object that has the ability to be a launcher.
     """
-    __metaclass__ = ABCMeta
 
     def __init__(self, name, profiler=None, **kwds):
         """
@@ -599,7 +600,7 @@ class OpenClParametrizedKernelLauncher(OpenClKernelLauncher):
     def _set_kernel_args(self, **kwds):
         """Set the arguments of this kernel and return the kernel."""
         kernel = super(OpenClParametrizedKernelLauncher, self)._set_kernel_args()
-        for pname, (pindex, ptypes) in self._parameters_map.iteritems():
+        for pname, (pindex, ptypes) in self._parameters_map.items():
             assert pname in kwds, '{} was not given.'.format(pname)
             pval = kwds[pname]
             self.check_kernel_arg(pval, pindex, pname, ptypes)
@@ -609,11 +610,9 @@ class OpenClParametrizedKernelLauncher(OpenClKernelLauncher):
     parameters_map = property(_get_parameters_map)
 
 
-class OpenClKernelParameterGenerator(object):
+class OpenClKernelParameterGenerator(object, metaclass=ABCMeta):
     """Abstract base for opencl kernel parameter yielders."""
 
-    __metaclass__ = ABCMeta
-
     def __iter__(self):
         return self.new_generator()
 
@@ -636,7 +635,7 @@ class OpenClKernelParameterYielder(OpenClKernelParameterGenerator):
             which should return a Generator or an Iterator uppon call.
 
             Example:
-                lambda: xrange(10)
+                lambda: range(10)
         """
         assert callable(fn)
         self._fn = fn
@@ -681,7 +680,7 @@ class OpenClIterativeKernelLauncher(OpenClParametrizedKernelLauncher):
         iterated_parameter_arg_names = ()
         iterated_parameter_arg_types = ()
         iterated_parameter_generators = ()
-        for pname, pgen in iterated_parameters.iteritems():
+        for pname, pgen in iterated_parameters.items():
             assert pname in parameters_map
             arg_id, arg_type = parameters_map.pop(pname)
             iterated_parameter_arg_ids += (arg_id,)
diff --git a/hysop/backend/device/opencl/opencl_operator.py b/hysop/backend/device/opencl/opencl_operator.py
index 81a185833ea028fb9a03c55035d11826dd504c7a..4788250aa9072fcd6a96095a43ef7ba88af75215 100644
--- a/hysop/backend/device/opencl/opencl_operator.py
+++ b/hysop/backend/device/opencl/opencl_operator.py
@@ -22,11 +22,10 @@ from hysop.topology.topology_descriptor import TopologyDescriptor
 from hysop.fields.discrete_field import DiscreteScalarFieldView
 
 
-class OpenClOperator(ComputationalGraphOperator):
+class OpenClOperator(ComputationalGraphOperator, metaclass=ABCMeta):
     """
     Abstract class for discrete operators working on OpenCL backends.
     """
-    __metaclass__ = ABCMeta
 
     __default_method = {
         OpenClKernelConfig: OpenClKernelConfig()
@@ -48,6 +47,11 @@ class OpenClOperator(ComputationalGraphOperator):
         am.update(cls.__available_methods)
         return am
 
+    @debug
+    def __new__(cls, cl_env=None, mpi_params=None,
+                requested_symbolic_kernels=None, **kwds):
+        return super(OpenClOperator, cls).__new__(cls, mpi_params=mpi_params, **kwds)
+
     @debug
     def __init__(self, cl_env=None, mpi_params=None,
                  requested_symbolic_kernels=None, **kwds):
@@ -73,7 +77,8 @@ class OpenClOperator(ComputationalGraphOperator):
 
         msg = 'mpi_params was {} and cl_env was {}.'
 
-        for topo in kwds.get('input_fields', {}).values()+kwds.get('output_fields', {}).values():
+        for topo in set(kwds.get('input_fields', {}).values()).union(
+                kwds.get('output_fields', {}).values()):
             if isinstance(topo, Topology):
                 if (cl_env is None):
                     cl_env = topo.backend.cl_env
@@ -86,7 +91,7 @@ class OpenClOperator(ComputationalGraphOperator):
                 if not _vars:
                     msg = 'Cannot deduce domain without input or output fields.'
                     raise RuntimeError(msg)
-                domain = _vars.keys()[0].domain
+                domain = next(iter(_vars)).domain
                 mpi_params = MPIParams(comm=domain.task_comm(),
                                        task_id=domain.current_task())
                 cl_env = get_or_create_opencl_env(mpi_params)
@@ -170,7 +175,7 @@ class OpenClOperator(ComputationalGraphOperator):
     @debug
     def create_topology_descriptors(self):
         # by default we create OPENCL (gpu) TopologyDescriptors
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+        for (field, topo_descriptor) in self.input_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                 backend=Backend.OPENCL,
                 operator=self,
@@ -179,7 +184,7 @@ class OpenClOperator(ComputationalGraphOperator):
                 cl_env=self.cl_env)
             self.input_fields[field] = topo_descriptor
 
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                 backend=Backend.OPENCL,
                 operator=self,
@@ -251,7 +256,7 @@ class OpenClOperator(ComputationalGraphOperator):
         """
         build_options = self._cl_build_options
         defines = set()
-        for define, value in self._cl_defines.iteritems():
+        for define, value in self._cl_defines.items():
             if (value is not None):
                 define = '{}={}'.format(define.strip(), value.strip())
             else:
@@ -266,12 +271,15 @@ class OpenClOperator(ComputationalGraphOperator):
         Check if all topologies are on OpenCL backend and check that all opencl environments
         match.
         """
-        topo = (self.input_fields.values()+self.output_fields.values())[0]
+        topologies = set(self.input_fields.values()).union(self.output_fields.values())
+        if not topologies:
+            return
+        topo = next(iter(topologies))
         assert isinstance(topo, TopologyView)
         assert topo.backend.kind == Backend.OPENCL
         ref_env = self.cl_env
 
-        for topo in set(self.input_fields.values()+self.output_fields.values()):
+        for topo in topologies:
             assert isinstance(topo, TopologyView)
             assert topo.backend.kind == Backend.OPENCL
             assert topo.backend.cl_env == ref_env
diff --git a/hysop/backend/device/opencl/opencl_platform.py b/hysop/backend/device/opencl/opencl_platform.py
index 89bcc705b2a4115a7936f7979821bc341f14aecf..78e20228c6b1bdf9ba328813a7f32a15783a2152 100644
--- a/hysop/backend/device/opencl/opencl_platform.py
+++ b/hysop/backend/device/opencl/opencl_platform.py
@@ -24,19 +24,19 @@ class OpenClPlatform(Platform):
         # we do not keep a reference to platform_handle as we
         # need to pickle this object
 
-        super(OpenClPlatform, self).__init__(hardware_topo=hardware_topo, 
+        super(OpenClPlatform, self).__init__(hardware_topo=hardware_topo,
                 platform_id=platform_id, platform_handle=platform_handle, **kwds)
 
     @classmethod
     def handle_cls(cls):
         return cl.Platform
-    
+
     def _discover_devices(self, hardware_topo, platform_handle):
         for i, device_handle in enumerate(platform_handle.get_devices()):
             dev = OpenClDevice(platform=self,
                                platform_handle=platform_handle,
                                device_id=i,
-                               device_handle=device_handle, 
+                               device_handle=device_handle,
                                hardware_topo=hardware_topo)
             self._logical_devices[i] = dev
 
@@ -45,7 +45,7 @@ class OpenClPlatform(Platform):
         ind=' '*indent
         inc=' '*increment
         new_indent = indent + 2*increment
-        devices = '\n'.join(x.to_string(indent=new_indent, increment=increment, short=True) 
+        devices = '\n'.join(x.to_string(indent=new_indent, increment=increment, short=True)
                                         for x in self.logical_devices.values())
         sep = '\n{ind}{inc}{inc}{inc}|'.format(ind=ind, inc=inc)
         extensions = sep + sep.join(e.strip() for e in sorted(self._extensions.split(' ')) if e not in ('', ' ', '\t', '\n'))
@@ -54,7 +54,7 @@ class OpenClPlatform(Platform):
 {ind}{inc}*Profile: {}
 {}
 {ind}{inc}*Extensions: {}
-'''.format(self._platform_id, self._name, self._version, self._profile, devices, extensions, 
+'''.format(self._platform_id, self._name, self._version, self._profile, devices, extensions,
             ind=ind+inc, inc=inc)
         return ss
 
@@ -65,7 +65,7 @@ class OpenClPlatform(Platform):
         return OpenClPlatformStatistics(self)
 
 
-class OpenClPlatformStatistics(HardwareStatistics): 
+class OpenClPlatformStatistics(HardwareStatistics):
     def __init__(self, platform=None):
         self._name = None
         self._counter = 0
@@ -74,7 +74,7 @@ class OpenClPlatformStatistics(HardwareStatistics):
             check_instance(platform, OpenClPlatform)
             self._name = platform._name
             self._counter += 1
-            for (device_id,device) in platform._logical_devices.iteritems():
+            for (device_id,device) in platform._logical_devices.items():
                 self._device_statistics[device._name] = device.stats()
 
     @property
@@ -95,7 +95,7 @@ class OpenClPlatformStatistics(HardwareStatistics):
             self._name = other._name
         assert self._name == other._name
         self._counter += other._counter
-        for (dname, dstats) in other._device_statistics.iteritems():
+        for (dname, dstats) in other._device_statistics.items():
             if (dname in self._device_statistics):
                 self._device_statistics[dname] += dstats
             else:
@@ -107,11 +107,11 @@ class OpenClPlatformStatistics(HardwareStatistics):
         inc = ' '*increment
         if self._device_statistics:
             ss = []
-            for dname in sorted(self._device_statistics, 
+            for dname in sorted(self._device_statistics,
                     key=lambda k: -self._device_statistics[k]._counter):
                 dstats = self._device_statistics[dname]
                 ss += [dstats.to_string(indent+increment, increment)]
         else:
             ss += ['{ind}{inc}No device found.']
         return '\n'.join(s.format(ind=ind, inc=inc) for s in ss)
-    
+
diff --git a/hysop/backend/device/opencl/opencl_printer.py b/hysop/backend/device/opencl/opencl_printer.py
index 117f0c54699b59a8afa910a22b4bdf75b020834a..96d95101af75dd11744c36ec38c02003d5bf78d6 100644
--- a/hysop/backend/device/opencl/opencl_printer.py
+++ b/hysop/backend/device/opencl/opencl_printer.py
@@ -1,9 +1,11 @@
-
-
-import sympy as sm
-from sympy.printing.ccode import C99CodePrinter
-from hysop.tools.types import check_instance
 from hysop.backend.device.opencl.opencl_types import OpenClTypeGen
+from hysop.tools.types import check_instance
+import sympy as sm
+from packaging import version
+if version.parse(sm.__version__) > version.parse("1.7"):
+    from sympy.printing.c import C99CodePrinter
+else:
+    from sympy.printing.ccode import C99CodePrinter
 
 # /!\ TODO complete known_functions list with OpenCL builtins
 # - keys are sympy function names (beware to capital letters)
@@ -11,16 +13,16 @@ from hysop.backend.device.opencl.opencl_types import OpenClTypeGen
 #   that corresponds to OpenCL function builtins.
 # Here are some attributes that can be checked in predicates:
 #  is_zero
-#  is_finite 	 is_integer 	 
-#  is_negative 	 is_positive 	 
-#  is_rational 	 is_real 	 
+#  is_finite 	 is_integer
+#  is_negative 	 is_positive
+#  is_rational 	 is_real
 known_functions = {
-    'Abs': [(lambda x: x.is_integer, 'abs'),'fabs'],
-    'min': [(lambda x,y: x.is_integer and y.is_integer, 'min'),'fmin'],
-    'max': [(lambda x,y: x.is_integer and y.is_integer, 'max'),'fmax'],
+    'Abs': [(lambda x: x.is_integer, 'abs'), 'fabs'],
+    'min': [(lambda x, y: x.is_integer and y.is_integer, 'min'), 'fmin'],
+    'max': [(lambda x, y: x.is_integer and y.is_integer, 'max'), 'fmax'],
     'sqrt': 'sqrt',
     'gamma': 'tgamma',
-    
+
     'sin': 'sin',
     'cos': 'cos',
     'tan': 'tan',
@@ -28,14 +30,14 @@ known_functions = {
     'acos': 'acos',
     'atan': 'atan',
     'atan2': 'atan2',
-    
+
     'sinh': 'sinh',
     'cosh': 'cosh',
     'tanh': 'tanh',
     'asinh': 'asinh',
     'acosh': 'acosh',
     'atanh': 'atanh',
-    
+
     'exp': 'exp',
     'log': 'log',
     'erf': 'erf',
@@ -45,25 +47,25 @@ known_functions = {
 
 # OpenCl 2.2 reserved keywords (see opencl documentation)
 reserved_words = [
-    
+
     # C++14 keywords
-    'alignas', 'continue', 'friend', 'register', 'true', 
-    'alignof', 'decltype', 'goto', 'reinterpret_cast', 'try', 
-    'asm', 'default', 'if', 'return', 'typedef', 
-    'auto', 'delete', 'inline', 'short', 'typeid', 
-    'bool', 'do', 'int', 'signed', 'typename', 
-    'break', 'double', 'long', 'sizeof', 'union', 
-    'case', 'dynamic_cast', 'mutable', 'static', 'unsigned', 
-    'catch', 'else', 'namespace', 'static_assert', 'using', 
-    'char', 'enum', 'new', 'static_cast', 'virtual', 
-    'char16_t', 'explicit', 'noexcept', 'struct', 'void', 
-    'char32_t', 'export', 'nullptr', 'switch', 'volatile', 
-    'class', 'extern', 'operator', 'template', 'wchar_t', 
-    'const', 'false', 'private', 'this', 'while', 
-    'constexpr', 'float', 'protected', 'thread_local', 
+    'alignas', 'continue', 'friend', 'register', 'true',
+    'alignof', 'decltype', 'goto', 'reinterpret_cast', 'try',
+    'asm', 'default', 'if', 'return', 'typedef',
+    'auto', 'delete', 'inline', 'short', 'typeid',
+    'bool', 'do', 'int', 'signed', 'typename',
+    'break', 'double', 'long', 'sizeof', 'union',
+    'case', 'dynamic_cast', 'mutable', 'static', 'unsigned',
+    'catch', 'else', 'namespace', 'static_assert', 'using',
+    'char', 'enum', 'new', 'static_cast', 'virtual',
+    'char16_t', 'explicit', 'noexcept', 'struct', 'void',
+    'char32_t', 'export', 'nullptr', 'switch', 'volatile',
+    'class', 'extern', 'operator', 'template', 'wchar_t',
+    'const', 'false', 'private', 'this', 'while',
+    'constexpr', 'float', 'protected', 'thread_local',
     'const_cast', 'for', 'public', 'throw'
     'override', 'final',
-    
+
     # OpenCl data types
     'uchar', 'ushort', 'uint', 'ulong', 'half',
     'bool2', 'char2', 'uchar2', 'short2', 'ushort2', 'int2', 'uint2', 'long2', 'ulong2', 'half2', 'float2', 'double2',
@@ -71,10 +73,10 @@ reserved_words = [
     'bool4', 'char4', 'uchar4', 'short4', 'ushort4', 'int4', 'uint4', 'long4', 'ulong4', 'half4', 'float4', 'double4',
     'bool8', 'char8', 'uchar8', 'short8', 'ushort8', 'int8', 'uint8', 'long8', 'ulong8', 'half8', 'float8', 'double8',
     'bool16', 'char16', 'uchar16', 'short16', 'ushort16', 'int16', 'uint16', 'long16', 'ulong16', 'half16', 'float16', 'double16',
-    
+
     # function qualifiers
     'kernel', '__kernel',
-    
+
     # access qualifiers
     'read_only', 'write_only', 'read_write',
     '__read_only', '__write_only', '__read_write',
@@ -99,16 +101,16 @@ class OpenClPrinter(C99CodePrinter):
         'error_on_reserved': True,
         'reserved_word_suffix': None,
     }
-    
+
     def __init__(self, typegen, symbol2vars=None, **settings):
         check_instance(typegen, OpenClTypeGen)
         check_instance(symbol2vars, dict, keys=sm.Symbol, allow_none=True)
-        
-        super(OpenClPrinter,self).__init__(settings=settings)
+
+        super(OpenClPrinter, self).__init__(settings=settings)
 
         self.known_functions = dict(known_functions)
-        self.reserved_words  = set(reserved_words)
-        self.typegen     = typegen
+        self.reserved_words = set(reserved_words)
+        self.typegen = typegen
         self.symbol2vars = symbol2vars
 
     def dump_symbol(self, expr):
@@ -116,36 +118,44 @@ class OpenClPrinter(C99CodePrinter):
         if expr in symbol2vars:
             return self._print(symbol2vars[expr])
         else:
-            return super(OpenClPrinter,self)._print_Symbol(expr)
+            return super(OpenClPrinter, self)._print_Symbol(expr)
+
     def dump_rational(self, expr):
         return self.typegen.dump(expr)
+
     def dump_float(self, expr):
         return self.typegen.dump(expr)
-    
 
     def _print_Symbol(self, expr):
         return self.dump_symbol(expr)
+
     def _print_Rational(self, expr):
         return self.dump_rational(expr)
+
     def _print_PythonRational(self, expr):
         return self.dump_rational(expr)
+
     def _print_Fraction(self, expr):
         return self.dump_rational(expr)
+
     def _print_mpq(self, expr):
         return self.dump_rational(expr)
+
     def _print_Float(self, expr):
         return self.dump_float(expr)
 
     # last resort printer (if _print_CLASS is not found)
-    def emptyPrinter(self,expr):
+    def emptyPrinter(self, expr):
         return self.typegen.dump(expr)
 
+
 def dump_clcode(expr, typegen, **kargs):
     """Return OpenCL representation of the given expression as a string."""
     p = OpenClPrinter(typegen=typegen, **kargs)
     s = p.doprint(expr)
     return s
 
+
 def print_clcode(expr, typegen, **kargs):
     """Prints OpenCL representation of the given expression."""
-    print dump_clcode(expr,typegen=typegen,**kargs)
+    print(dump_clcode(expr, typegen=typegen, **kargs))
diff --git a/hysop/backend/device/opencl/opencl_symbolic.py b/hysop/backend/device/opencl/opencl_symbolic.py
index d0d414e6254fe9a5201862ee1c81af87b1ac6583..84cda9eeb35ab4c1eb743dcc853506045de10114 100644
--- a/hysop/backend/device/opencl/opencl_symbolic.py
+++ b/hysop/backend/device/opencl/opencl_symbolic.py
@@ -2,11 +2,11 @@
 """
 Abstract class providing a common interface to all
 discrete operators working on the OpenCl backend
-and using kernels generated on the fly from symbolic 
+and using kernels generated on the fly from symbolic
 expressions.
 
 * :class:`~hysop.backend.device.opencl.opencl_operator.OpenClSymbolic` is an abstract class
-    used to provide a common interface to all discrete operators working with the 
+    used to provide a common interface to all discrete operators working with the
     opencl backend and using kernels generated on the fly from symbolic expressions.
 """
 import numpy as np
@@ -31,13 +31,13 @@ from hysop.tools.sympy_utils import subscript, subscripts
 
 class OpenClSymbolic(OpenClOperator):
     """
-    Abstract class for discrete operators working on OpenCL backends 
+    Abstract class for discrete operators working on OpenCL backends
     that require custom symbolic kernels.
     """
     __default_method    = CustomSymbolicOperatorBase._CustomSymbolicOperatorBase__default_method
     __available_methods = \
             CustomSymbolicOperatorBase._CustomSymbolicOperatorBase__available_methods
-    
+
     @classmethod
     def default_method(cls):
         dm = super(OpenClSymbolic, cls).default_method()
@@ -50,6 +50,9 @@ class OpenClSymbolic(OpenClOperator):
         am.update(cls.__available_methods)
         return am
 
+    @debug
+    def __new__(cls, **kwds):
+        return super(OpenClSymbolic, cls).__new__(cls, **kwds)
 
     @debug
     def __init__(self, **kwds):
@@ -79,11 +82,11 @@ class OpenClSymbolic(OpenClOperator):
         check_instance(exprs, tuple, values=ValidExpressions, minsize=1)
         self.expressions[name] = exprs
         self.extra_kwds[name] = extra_kwds
-    
+
     @classmethod
     def __symbolic_variables(cls, *names, **kwds):
         scls = kwds.pop('scls')
-        
+
         arrays = ()
         shape = to_tuple(kwds.get('shape', kwds.get('count', ())))
         if shape:
@@ -100,7 +103,7 @@ class OpenClSymbolic(OpenClOperator):
             arr = scls(name=name, pretty_name=pname, var_name=var_name, **kwds)
             arrays += (arr,)
         return np.asarray(arrays).reshape(shape)
-    
+
     @classmethod
     def symbolic_ndbuffers(cls, *names, **kwds):
         from hysop.symbolic.array import OpenClSymbolicNdBuffer
@@ -109,7 +112,7 @@ class OpenClSymbolic(OpenClOperator):
         kwds['memory_object'] = None
         kwds['scls'] = OpenClSymbolicNdBuffer
         return cls.__symbolic_variables(*names, **kwds)
-    
+
     @classmethod
     def symbolic_buffers(cls, *names, **kwds):
         from hysop.symbolic.array import OpenClSymbolicBuffer
@@ -126,14 +129,14 @@ class OpenClSymbolic(OpenClOperator):
         assert 'scls' not in kwds
         kwds['scls'] = OpenClSymbolicArray
         return cls.__symbolic_variables(*names, **kwds)
-        
+
     @classmethod
     def symbolic_tmp_scalars(cls, *names, **kwds):
         from hysop.symbolic.tmp import TmpScalar
         assert 'scls' not in kwds
         kwds['scls'] = TmpScalar
         return cls.__symbolic_variables(*names, **kwds)
-    
+
     @classmethod
     def symbolic_constants(cls, *names, **kwds):
         from hysop.symbolic.constant import SymbolicConstant
@@ -163,7 +166,7 @@ class OpenClSymbolic(OpenClOperator):
         output_params = self.output_params
         input_tensor_fields  = self.input_tensor_fields
         output_tensor_fields = self.output_tensor_fields
-        
+
         check_instance(input_fields,  dict, keys=ScalarField, values=CartesianTopologyDescriptors)
         check_instance(output_fields, dict, keys=ScalarField, values=CartesianTopologyDescriptors)
         check_instance(input_params,  dict, keys=str, values=Parameter)
@@ -172,12 +175,12 @@ class OpenClSymbolic(OpenClOperator):
         check_instance(output_tensor_fields, tuple, values=Field)
 
         def _cmp(name, op_vars, expr_vars, exprs):
-            for (expr_var_key, expr_var_value) in expr_vars.iteritems():
+            for (expr_var_key, expr_var_value) in expr_vars.items():
                 vname = expr_var_key if isinstance(expr_var_key, str) else expr_var_key.name
                 if (expr_var_key not in op_vars):
                     msg='{} has not been set as {} in {}.__init__() but is required '
                     msg+='by one of the following symbolic expressions:\n  *{}'
-                    msg=msg.format(expr_var_key, name, self.name, 
+                    msg=msg.format(expr_var_key, name, self.name,
                             '\n  *{}'.join(str(x) for x in exprs))
                     raise RuntimeError(msg)
                 if (op_vars[expr_var_key] != expr_var_value):
@@ -190,16 +193,16 @@ class OpenClSymbolic(OpenClOperator):
         variables = input_fields.copy()
         variables.update(output_fields)
         direction = None
-        for (name, exprs) in self.expressions.iteritems():
+        for (name, exprs) in self.expressions.items():
             expr_info = SymbolicExpressionParser.parse(name, variables, *exprs)
             if expr_info.has_direction:
                 if (direction is None) :
                     direction = expr_info.direction
                 elif (expr_info.direction != direction):
-                    print
-                    print 'FATAL ERROR: Expressions cannot have different directions.'
-                    print expr_info
-                    print
+                    print()
+                    print('FATAL ERROR: Expressions cannot have different directions.')
+                    print(expr_info)
+                    print()
                     msg='{} contains a directional expression.'.format(name)
                     raise RuntimeError(msg)
             _cmp('input_fields',  input_fields,  expr_info.input_fields,  exprs)
@@ -212,7 +215,7 @@ class OpenClSymbolic(OpenClOperator):
             expr_info.interpolation        = self.interpolation
             expr_info.space_discretization = self.space_discretization
             expr_infos[name] = expr_info
-    
+
     @debug
     def get_field_requirements(self):
         """Extract field requirements from first expression parsing stage."""
@@ -232,14 +235,14 @@ class OpenClSymbolic(OpenClOperator):
                 axes = TranspositionState[dim].filter_axes(
                         lambda axes: (axes[-1] == dim-1-direction))
                 axes = tuple(axes)
-            
+
             min_ghosts_per_components = {}
 
             for (fields, expr_info_fields, is_input, iter_requirements) in \
-                    zip((self.input_fields, self.output_fields), 
-                        (expr_info.input_fields, expr_info.output_fields), 
+                    zip((self.input_fields, self.output_fields),
+                        (expr_info.input_fields, expr_info.output_fields),
                         (True, False),
-                        (requirements.iter_input_requirements, 
+                        (requirements.iter_input_requirements,
                         requirements.iter_output_requirements)):
                 if not fields:
                     continue
@@ -249,7 +252,7 @@ class OpenClSymbolic(OpenClOperator):
                     min_ghosts = npw.int_zeros(shape=(field.nb_components, field.dim))
                     if has_direction:
                         req.axes = axes
-                    for index in xrange(field.nb_components):
+                    for index in range(field.nb_components):
                         fname = '{}_{}'.format(field.name, index)
                         G = expr_info.min_ghosts_per_field_name.get(fname, 0)
                         if (field,index) in field_reqs:
@@ -279,11 +282,11 @@ class OpenClSymbolic(OpenClOperator):
                         min_ghosts_per_components[field] = min_ghosts
 
             expr_info.min_ghosts = {k:npw.max(v, axis=0) for (k,v) in \
-                                            min_ghosts_per_components.iteritems()}
-            expr_info.min_ghosts_per_components = {field:gpc[:,-1-direction] 
-                                for (field,gpc) in min_ghosts_per_components.iteritems()}
-        
-            for (array, reqs) in array_reqs.iteritems():
+                                            min_ghosts_per_components.items()}
+            expr_info.min_ghosts_per_components = {field:gpc[:,-1-direction]
+                                for (field,gpc) in min_ghosts_per_components.items()}
+
+            for (array, reqs) in array_reqs.items():
                 expr_info.min_ghosts[array] = reqs.min_ghosts.copy()
                 expr_info.min_ghosts_per_components[array] = reqs.min_ghosts[-1-direction]
         return requirements
@@ -310,17 +313,17 @@ class OpenClSymbolic(OpenClOperator):
             expr_info.check_buffers()
         super(OpenClSymbolic, self).setup(work=work)
         self._collect_symbolic_kernels(work)
-    
+
     def _collect_symbolic_kernels(self, work):
         cl_env = self.cl_env
         typegen = self.typegen
         autotuner_config = self.autotuner_config
         build_opts = self.build_options()
-        
-        kernel_autotuner = OpenClAutotunableCustomSymbolicKernel(cl_env=cl_env, typegen=typegen, 
+
+        kernel_autotuner = OpenClAutotunableCustomSymbolicKernel(cl_env=cl_env, typegen=typegen,
                 build_opts=build_opts, autotuner_config=autotuner_config)
-        
-        for (name, expr_info) in self.expr_infos.iteritems():
+
+        for (name, expr_info) in self.expr_infos.items():
             kernel, args_dict, update_input_parameters = \
                     kernel_autotuner.autotune(expr_info=expr_info, **self.extra_kwds[name])
             kl = kernel.build_launcher(**args_dict)
diff --git a/hysop/backend/device/opencl/opencl_tools.py b/hysop/backend/device/opencl/opencl_tools.py
index 155e8609a0acb7fcf8bb8aa62032631500854c0a..2d0259e5f145db9208299d49bf65ad8f8feb22ce 100644
--- a/hysop/backend/device/opencl/opencl_tools.py
+++ b/hysop/backend/device/opencl/opencl_tools.py
@@ -12,7 +12,8 @@
 
 
 """
-from hysop.deps import sys, os, re, itertools, hashlib, pickle, gzip, hashlib
+
+import sys, os, re, itertools, hashlib, gzip, hashlib
 
 from hysop import __VERBOSE__, __KERNEL_DEBUG__, \
     __DEFAULT_PLATFORM_ID__, __DEFAULT_DEVICE_ID__
@@ -237,7 +238,7 @@ def get_work_items(resolution, vector_width=1):
             print("Warning : GPU best performances obtained for",)
             print("problem sizes multiples of 256")
     while resolution[0] % workItemNumber > 0:
-        workItemNumber = workItemNumber / 2
+        workItemNumber = workItemNumber // 2
     # Change work-item regarding vector_width
     if workItemNumber * vector_width > resolution[0]:
         if resolution[0] % vector_width > 0:
@@ -438,8 +439,8 @@ def parse_opencl_file(f, n=8, nb_remesh_components=1):
     src = ""
     # replacement for floatN elements
     vec_floatn = re.compile(r'\(float__N__\)\(')
-    vec_nn = re.compile('__NN__')
-    vec_n = re.compile('__N__')
+    vec_nn = re.compile(r'__NN__')
+    vec_n = re.compile(r'__N__')
     for l in f.readlines():
         # Expand floatN items
         if vec_floatn.search(l) and vec_nn.search(l) and \
@@ -447,13 +448,13 @@ def parse_opencl_file(f, n=8, nb_remesh_components=1):
             sl = l.split("(float__N__)(")
             l = sl[0] + "(float" + str(n) + ")("
             el = sl[1].rsplit(',', 1)[0]
-            for i in xrange(n):
+            for i in range(n):
                 l += vec_nn.sub(str(i), el) + ','
             l = l[:-1] + '\n'
         # Expand floatN elements access
         elif vec_nn.search(l) and l[-2] == ';':
             el = ""
-            for i in xrange(n):
+            for i in range(n):
                 el += vec_nn.sub(str(i), l)
             l = el
         # Replace vector length
@@ -466,14 +467,14 @@ def parse_opencl_file(f, n=8, nb_remesh_components=1):
     def repl_instruction(m):
         return ''.join(
             [m.group(1).replace('__ID__', str(i))
-             for i in xrange(nb_remesh_components)])
+             for i in range(nb_remesh_components)])
     # __RCOMP_P ..., ou __RCOMP_P ...)
     re_param = re.compile(r'__RCOMP_P([\w\s\.\[\]+*/=-]+(?=,|\)))')
 
     def repl_parameter(m):
         return ', '.join(
             [m.group(1).replace('__ID__', str(i))
-             for i in xrange(nb_remesh_components)])
+             for i in range(nb_remesh_components)])
 
     src = re_instr.sub(repl_instruction, src)
     src = re_param.sub(repl_parameter, src)
diff --git a/hysop/backend/device/opencl/opencl_types.py b/hysop/backend/device/opencl/opencl_types.py
index 203d6e5df1923462b73fc7bdfcf5780bc240ff4d..455fef9bb763b09846e1946b875ef00a04273f54 100644
--- a/hysop/backend/device/opencl/opencl_types.py
+++ b/hysop/backend/device/opencl/opencl_types.py
@@ -1,6 +1,10 @@
+import string, re
+import sympy as sm
+import numpy as np
+import itertools as it
+
 from hysop import __KERNEL_DEBUG__, vprint, dprint
-from hysop.deps import sm, np, it, string, re
-from hysop.backend.device.opencl import cl, clArray
+from hysop.backend.device.opencl import cl, clArray, clTypes
 from hysop.tools.numerics import MPZ, MPQ, MPFR, F2Q
 from hysop.tools.types import first_not_None, to_tuple
 
@@ -55,9 +59,9 @@ FLT_BYTES = {
 }
 
 def basetype(fulltype):
-    return fulltype.translate(None,string.digits)
+    return fulltype.translate(str.maketrans('', '', string.digits))
 def components(fulltype):
-    comp = fulltype.translate(None,string.ascii_letters+'_')
+    comp = fulltype.translate(str.maketrans('', '', string.ascii_letters+'_'))
     return 1 if comp == '' else int(comp)
 def mangle_vtype(fulltype):
     return basetype(fulltype)[0]+str(components(fulltype))
@@ -131,7 +135,7 @@ def float_to_dec_str(f,fbtype):
 
 
 #pyopencl specific
-vec = clArray.vec
+vec = clTypes
 
 def npmake(dtype):
     return lambda scalar: dtype(scalar) #np.array([scalar], dtype=dtype)
@@ -265,7 +269,7 @@ class TypeGen(object):
         if isinstance(val, (float,np.floating,MPFR,sm.Float)):
             sval = self.float_to_str(val, self.fbtype)
             return '({})'.format(sval)
-        elif isinstance(val, (np.integer,int,long,MPZ,sm.Integer)):
+        elif isinstance(val, (np.integer,int,MPZ,sm.Integer)):
             suffix = ''
             if isinstance(val, np.unsignedinteger):
                 suffix+='u'
@@ -318,13 +322,15 @@ class TypeGen(object):
 
         if isinstance(val, (float,np.floating,MPFR,sm.Float)):
             return self.fbtype
-        elif isinstance(val, (np.integer,int,long,MPZ,sm.Integer)):
+        elif isinstance(val, (np.integer,int,MPZ,sm.Integer)):
             if isinstance(val, (np.int64, MPZ)):
                 return 'long'
             elif isinstance(val, np.uint64):
                 return 'ulong'
             elif isinstance(val, np.unsignedinteger):
                 return 'uint'
+            elif isinstance(val, int):
+                return 'long'
             else:
                 return 'int'
         elif isinstance(val, (bool,np.bool_)):
@@ -437,7 +443,7 @@ class OpenClTypeGen(TypeGen):
     def opencl_version(self):
         assert (self.device is not None)
         sversion = self.device.version.strip()
-        _regexp='OpenCL\s+(\d)\.(\d)'
+        _regexp=r'OpenCL\s+(\d)\.(\d)'
         regexp=re.compile(_regexp)
         match=re.match(regexp,sversion)
         if not match:
@@ -445,9 +451,9 @@ class OpenClTypeGen(TypeGen):
             msg += 'and regular expression \'{}\'.'
             msg=msg.format(sversion,_regexp)
             raise RuntimeError(msg)
-        major = match.group(1)
-        minor = match.group(2)
-        return (major,minor)
+        major = int(match.group(1))
+        minor = int(match.group(2))
+        return (major, minor)
 
     def dtype_from_str(self,stype):
         stype = stype.replace('ftype', self.fbtype).replace('fbtype',self.fbtype)
diff --git a/hysop/backend/device/opencl/operator/analytic.py b/hysop/backend/device/opencl/operator/analytic.py
index 916ea10ae34f194ad9928831115df67ae854e241..ff0d64cfc26a5592b8a62111bed3cdf0d34142ba 100644
--- a/hysop/backend/device/opencl/operator/analytic.py
+++ b/hysop/backend/device/opencl/operator/analytic.py
@@ -1,5 +1,5 @@
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.tools.types import check_instance, first_not_None, to_tuple
 from hysop.tools.decorators import debug
 from hysop.fields.continuous_field import ScalarField, Field
@@ -14,11 +14,17 @@ class OpenClAnalyticField(OpenClCustomSymbolicOperator):
     """
 
     @debug
-    def __init__(self, field, formula, variables, **kwds): 
+    def __new__(cls, field, formula, variables, **kwds):
+        name = kwds.pop('name', 'analytic_{}'.format(field.name))
+        return super(OpenClAnalyticField, cls).__new__(cls, name=None, exprs=None,
+                variables=variables, **kwds)
+
+    @debug
+    def __init__(self, field, formula, variables, **kwds):
         """
         Initialize a Analytic operator on the python backend.
 
-        Apply a user-defined formula onto a field, possibly 
+        Apply a user-defined formula onto a field, possibly
         dependent on space variables and external fields/parameters.
 
         Parameters
@@ -36,23 +42,23 @@ class OpenClAnalyticField(OpenClCustomSymbolicOperator):
         check_instance(field, ScalarField)
         check_instance(formula, tuple, values=(type(None),sm.Basic), size=field.nb_components)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
-        
+
         exprs = ()
         Fs = to_tuple(field.s())
         for (lhs,rhs) in zip(Fs, formula):
             if (rhs is None):
                 continue
             exprs += (Assignment(lhs, rhs),)
-        
+
         if not exprs:
             msg='All formulas are None. Give at least one expression.'
             raise ValueError(msg)
 
         name = kwds.pop('name', 'analytic_{}'.format(field.name))
 
-        super(OpenClAnalyticField, self).__init__(name=name, exprs=exprs, 
+        super(OpenClAnalyticField, self).__init__(name=name, exprs=exprs,
                 variables=variables, **kwds)
-    
+
     @classmethod
     def supports_mpi(cls):
         return True
diff --git a/hysop/backend/device/opencl/operator/curl.py b/hysop/backend/device/opencl/operator/curl.py
index 30d3b5afc9f4cafd9e2e8bebcd70ddf0916e4844..da4c64dfa9d5ce1025f10b7bf4709d915ac43242 100644
--- a/hysop/backend/device/opencl/operator/curl.py
+++ b/hysop/backend/device/opencl/operator/curl.py
@@ -19,11 +19,15 @@ class OpenClSpectralCurl(SpectralCurlOperatorBase, OpenClSymbolic):
     """
     Compute the curl by using an OpenCL FFT backend.
     """
-    
+
+    @debug
+    def __new__(cls, **kwds):
+        return super(OpenClSpectralCurl, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         super(OpenClSpectralCurl, self).__init__(**kwds)
-        
+
 
         assert (len(self.forward_transforms) % 2 == 0)
         N = len(self.forward_transforms)//2
@@ -32,10 +36,10 @@ class OpenClSpectralCurl(SpectralCurlOperatorBase, OpenClSymbolic):
         kernel_names = ()
         for (i,(Ft,(tg,Ki))) in enumerate(zip(self.forward_transforms, self.K)):
             Fhs = Ft.output_symbolic_array('F{}_hat'.format(i))
-            
+
             kname = 'filter_curl_{}d_{}'.format(Fhs.dim, i)
             kernel_names += (kname,)
-            
+
             is_complex = Ki.is_complex
             Ki = tg._indexed_wave_numbers[Ki]
 
@@ -52,7 +56,7 @@ class OpenClSpectralCurl(SpectralCurlOperatorBase, OpenClSymbolic):
             self.require_symbolic_kernel(kname, expr)
 
         self._kernel_names = kernel_names
-    
+
     @debug
     def setup(self, work):
         super(OpenClSpectralCurl, self).setup(work)
diff --git a/hysop/backend/device/opencl/operator/custom_symbolic.py b/hysop/backend/device/opencl/operator/custom_symbolic.py
index 3dca49a142fc781c743a8ad6f5171649fede7c96..fe5112fa44a72d005edddbe85f952df2bd9d94ea 100644
--- a/hysop/backend/device/opencl/operator/custom_symbolic.py
+++ b/hysop/backend/device/opencl/operator/custom_symbolic.py
@@ -10,6 +10,10 @@ from hysop.backend.device.opencl.opencl_copy_kernel_launchers import OpenClCopyB
 
 class OpenClCustomSymbolicOperator(CustomSymbolicOperatorBase, OpenClOperator):
 
+    @debug
+    def __new__(cls, **kwds):
+        return super(OpenClCustomSymbolicOperator, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         super(OpenClCustomSymbolicOperator, self).__init__(**kwds)
@@ -33,8 +37,9 @@ class OpenClCustomSymbolicOperator(CustomSymbolicOperatorBase, OpenClOperator):
         autotuner_config = self.autotuner_config
         build_opts = self.build_options()
 
-        kernel_autotuner = OpenClAutotunableCustomSymbolicKernel(cl_env=cl_env, typegen=typegen,
-                                                                 build_opts=build_opts, autotuner_config=autotuner_config)
+        kernel_autotuner = OpenClAutotunableCustomSymbolicKernel(
+            cl_env=cl_env, typegen=typegen,
+            build_opts=build_opts, autotuner_config=autotuner_config)
 
         kernel, args_dict, update_input_parameters = kernel_autotuner.autotune(expr_info=self.expr_info)
 
diff --git a/hysop/backend/device/opencl/operator/derivative.py b/hysop/backend/device/opencl/operator/derivative.py
index 222811aecf57f4de378258b2ee1f8620cc7c3943..7de3304f76dfce9974cb10df77704645f7063714 100644
--- a/hysop/backend/device/opencl/operator/derivative.py
+++ b/hysop/backend/device/opencl/operator/derivative.py
@@ -1,5 +1,5 @@
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.symbolic import space_symbols
 from hysop.symbolic.complex import ComplexMul
 from hysop.constants import DirectionLabels
@@ -20,12 +20,17 @@ from hysop.operator.base.custom_symbolic_operator import SymbolicExpressionParse
 from hysop.symbolic.relational import Assignment
 
 
-class OpenClFiniteDifferencesSpaceDerivative(FiniteDifferencesSpaceDerivativeBase, 
+class OpenClFiniteDifferencesSpaceDerivative(FiniteDifferencesSpaceDerivativeBase,
                                              OpenClSymbolic):
+
+    @debug
+    def __new__(cls, **kwds):
+        return super(OpenClFiniteDifferencesSpaceDerivative, cls).__new__(cls, require_tmp=False, **kwds)
+
     @debug
     def __init__(self, **kwds):
         super(OpenClFiniteDifferencesSpaceDerivative, self).__init__(require_tmp=False, **kwds)
-        
+
         Fin  = self.Fin.s()
         Fout = self.Fout.s()
         A = self.A
@@ -39,12 +44,12 @@ class OpenClFiniteDifferencesSpaceDerivative(FiniteDifferencesSpaceDerivativeBas
         xd = space_symbols[self.direction]
         expr = Assignment(Fout, A*Fin.diff(xd, self.directional_derivative))
         self.require_symbolic_kernel('compute_derivative', expr)
-    
+
     @debug
     def setup(self, work):
         super(OpenClFiniteDifferencesSpaceDerivative, self).setup(work)
         (self.kernel, self.update_parameters) = self.symbolic_kernels['compute_derivative']
-    
+
     @op_apply
     def apply(self, **kwds):
         queue = self.cl_env.default_queue
@@ -57,13 +62,17 @@ class OpenClSpectralSpaceDerivative(SpectralSpaceDerivativeBase, OpenClSymbolic)
     Compute a derivative of a scalar field in a given direction
     using spectral methods.
     """
-    
+
+    @debug
+    def __new__(cls, **kwds):
+        return super(OpenClSpectralSpaceDerivative, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         """
         Initialize a SpectralSpaceDerivative operator on the opencl backend.
 
-        See hysop.operator.base.derivative.SpectralSpaceDerivativeBase for 
+        See hysop.operator.base.derivative.SpectralSpaceDerivativeBase for
         more information.
 
         Parameters
@@ -93,10 +102,10 @@ class OpenClSpectralSpaceDerivative(SpectralSpaceDerivativeBase, OpenClSymbolic)
             self._do_scale = True
         else:
             self._do_scale = False
-        
+
         Kr = 1
         Kc = None
-        for (wn, indexed_wn) in self.tg._indexed_wave_numbers.iteritems():
+        for (wn, indexed_wn) in self.tg._indexed_wave_numbers.items():
             if wn.is_real:
                 Kr *= indexed_wn
             else:
@@ -118,7 +127,7 @@ class OpenClSpectralSpaceDerivative(SpectralSpaceDerivativeBase, OpenClSymbolic)
     @debug
     def discretize(self, **kwds):
         super(OpenClSpectralSpaceDerivative, self).discretize(**kwds)
-    
+
     @debug
     def setup(self, work):
         super(OpenClSpectralSpaceDerivative, self).setup(work=work)
@@ -126,10 +135,10 @@ class OpenClSpectralSpaceDerivative(SpectralSpaceDerivativeBase, OpenClSymbolic)
             (self.scale_kernel, self.scale_update_parameters) = self.symbolic_kernels['scale_derivative']
         else:
             self.scale_derivative_kernel = lambda **kwds: None
-            self.scale_update_parameters = lambda: {} 
+            self.scale_update_parameters = lambda: {}
         self.compute_derivative_kernel, _ = self.symbolic_kernels['compute_derivative']
 
-    
+
     @op_apply
     def apply(self, **kwds):
         queue = self.cl_env.default_queue
diff --git a/hysop/backend/device/opencl/operator/diffusion.py b/hysop/backend/device/opencl/operator/diffusion.py
index 9d7bbdc2cb6de7b33a65e851ec22114ccb1c8a17..48f2b697e7c35334c2c7ec5f3d5a9fae491fd169 100644
--- a/hysop/backend/device/opencl/operator/diffusion.py
+++ b/hysop/backend/device/opencl/operator/diffusion.py
@@ -17,7 +17,11 @@ class OpenClDiffusion(DiffusionOperatorBase, OpenClSymbolic):
     """
     Solves the diffusion equation using an OpenCL FFT backend.
     """
-    
+
+    @debug
+    def __new__(cls, **kwds):
+        return super(OpenClDiffusion, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         super(OpenClDiffusion, self).__init__(**kwds)
@@ -29,7 +33,7 @@ class OpenClDiffusion(DiffusionOperatorBase, OpenClSymbolic):
         for (i,(Ft,Wn)) in enumerate(zip(self.forward_transforms, self.wave_numbers)):
             Fhs = Ft.output_symbolic_array('F{}_hat'.format(i))
             indices = local_indices_symbols[:Fhs.dim]
-        
+
             kname = 'filter_diffusion_{}d_{}'.format(Fhs.dim, i)
             kernel_names += (kname,)
 
@@ -38,11 +42,11 @@ class OpenClDiffusion(DiffusionOperatorBase, OpenClSymbolic):
                 indexed_Wi = self.tg._indexed_wave_numbers[Wi]
                 F += indexed_Wi
             expr = Assignment(Fhs, Fhs / (1 - nu*dt*F))
-            
+
             self.require_symbolic_kernel(kname, expr)
 
         self._kernel_names = kernel_names
-    
+
     @debug
     def setup(self, work):
         super(OpenClDiffusion, self).setup(work)
diff --git a/hysop/backend/device/opencl/operator/directional/advection_dir.py b/hysop/backend/device/opencl/operator/directional/advection_dir.py
index acf1ad9fbd88108bab5eca3ee39047087950c000..cc995c320e386a91da24d72e91fb7a5225f1c7b1 100644
--- a/hysop/backend/device/opencl/operator/directional/advection_dir.py
+++ b/hysop/backend/device/opencl/operator/directional/advection_dir.py
@@ -15,8 +15,13 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
     DEBUG = False
 
     @debug
-    def __init__(self, force_atomics=False, relax_min_particles=False, remesh_criteria_eps=None,
-                 **kwds):
+    def __new__(cls, force_atomics=False, relax_min_particles=False,
+                remesh_criteria_eps=None, **kwds):
+        return super(OpenClDirectionalAdvection, cls).__new__(cls, **kwds)
+
+    @debug
+    def __init__(self, force_atomics=False, relax_min_particles=False,
+                 remesh_criteria_eps=None, **kwds):
         """
         Particular advection of field(s) in a given direction,
         on opencl backend, with remeshing.
@@ -176,26 +181,28 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
         if self.DEBUG:
             queue.flush()
             queue.finish()
-            print 'OPENCL_DT= {}'.format(dt)
+            dfin = tuple(self.dadvected_fields_in.values())
+            dfout = tuple(self.dadvected_fields_out.values())
+            print('OPENCL_DT= {}'.format(dt))
             self.advection_kernel_launcher(queue=queue, dt=dt).wait()
-            print 'OPENCL_P'
+            print('OPENCL_P')
             self.dposition.print_with_ghosts()
-            print 'OPENCL_Sin (before remesh)'
-            print self.dadvected_fields_in.values()[0].data[0].get(queue=queue)
-            print 'OPENCL_Sout (before remesh)'
-            print self.dadvected_fields_out.values()[0].data[0].get(queue=queue)
-            print
+            print('OPENCL_Sin (before remesh)')
+            print(dfin[0].data[0].get(queue=queue))
+            print('OPENCL_Sout (before remesh)')
+            print(dfout.data[0].get(queue=queue))
+            print()
             self.remesh_kernel_launcher(queue=queue).wait()
-            print 'OPENCL_Sout (before accumulation)'
-            data = self.dadvected_fields_out.values()[0].data[0]
-            print data.get(queue=queue)
-            print 'OPENCL_Sout (before accumulation, no ghosts)  ID={}'.format(id(data))
-            self.dadvected_fields_out.values()[0].print_with_ghosts()
+            print('OPENCL_Sout (before accumulation)')
+            data = dfout[0].data[0]
+            print(data.get(queue=queue))
+            print('OPENCL_Sout (before accumulation, no ghosts)  ID={}'.format(id(data)))
+            dfout[0].print_with_ghosts()
             self.accumulate_and_exchange(queue=queue).wait()
-            print 'OPENCL_Sout (after accumulation)'
-            print self.dadvected_fields_out.values()[0].data[0]
-            print 'OPENCL_Sout (after accumulation, no ghosts)'
-            self.dadvected_fields_out.values()[0].print_with_ghosts()
+            print('OPENCL_Sout (after accumulation)')
+            print(dfout[0].data[0])
+            print('OPENCL_Sout (after accumulation, no ghosts)')
+            dfout[0].print_with_ghosts()
         else:
             self.all_kernels(queue=queue, dt=dt)
 
diff --git a/hysop/backend/device/opencl/operator/directional/opencl_directional_operator.py b/hysop/backend/device/opencl/operator/directional/opencl_directional_operator.py
index c78c299d8754171b016d24e5e966cfeaeedb1c46..6dffc4fc2eb4f057ada157c7676191f871741d8c 100644
--- a/hysop/backend/device/opencl/operator/directional/opencl_directional_operator.py
+++ b/hysop/backend/device/opencl/operator/directional/opencl_directional_operator.py
@@ -9,14 +9,7 @@ class OpenClDirectionalOperator(DirectionalOperatorBase, OpenClOperator):
 
     OpenCL kernels are build once per dimension in order to handle
     directional splitting with resolution non uniform in directions.
-    
-    Field requirements are set such that the current direction will 
+
+    Field requirements are set such that the current direction will
     be contiguous in memory.
     """
-
-    @debug
-    def __init__(self, **kwds):
-        """
-        Create a directional operator in a given direction, OpenCl version.
-        """
-        super(OpenClDirectionalOperator,self).__init__(**kwds)
diff --git a/hysop/backend/device/opencl/operator/directional/stretching_dir.py b/hysop/backend/device/opencl/operator/directional/stretching_dir.py
index e07df8479f54910a270564e35571aa1a8030c4e9..478469e572dcd7b3fe998978c94f3dee54c4ab35 100644
--- a/hysop/backend/device/opencl/operator/directional/stretching_dir.py
+++ b/hysop/backend/device/opencl/operator/directional/stretching_dir.py
@@ -1,5 +1,6 @@
+import numpy as np
+
 from hysop import Field, TopologyDescriptor
-from hysop.deps import np
 from hysop.tools.decorators import debug
 from hysop.tools.types import check_instance
 from hysop.core.graph.graph import not_initialized, initialized, discretized, ready, op_apply
@@ -35,6 +36,12 @@ class OpenClDirectionalStretching(OpenClDirectionalOperator):
         SpaceDiscretization:   InstanceOf(SpaceDiscretization)
     }
 
+    @debug
+    def __new__(cls, velocity, vorticity, vorticity_out,
+                variables, **kwds):
+        return super(OpenClDirectionalStretching, cls).__new__(cls,
+                                                               input_fields=None, output_fields=None, **kwds)
+
     @debug
     def __init__(self, velocity, vorticity, vorticity_out,
                  variables, **kwds):
@@ -122,8 +129,8 @@ class OpenClDirectionalStretching(OpenClDirectionalOperator):
             rboundary = BoundaryCondition.NONE
         boundaries = (lboundary, rboundary)
 
-        v_ghosts, w_ghosts = DirectionalStretchingKernel.min_ghosts(boundaries,
-                                                                    formulation, order, time_integrator, direction)
+        v_ghosts, w_ghosts = DirectionalStretchingKernel.min_ghosts(
+            boundaries, formulation, order, time_integrator, direction)
 
         v_requirements.min_ghosts = v_ghosts
         win_requirements.min_ghosts = w_ghosts
diff --git a/hysop/backend/device/opencl/operator/enstrophy.py b/hysop/backend/device/opencl/operator/enstrophy.py
index c587f106276472f552b2debb08e9808a37f74e5c..8310362d8ed62e08a0e4446a6932711a4f6bb0ee 100644
--- a/hysop/backend/device/opencl/operator/enstrophy.py
+++ b/hysop/backend/device/opencl/operator/enstrophy.py
@@ -1,4 +1,5 @@
-from hysop.deps import sm
+import sympy as sm
+
 from hysop.constants import DirectionLabels
 from hysop.backend.device.opencl.opencl_array_backend import OpenClArrayBackend
 from hysop.tools.decorators import debug
@@ -16,6 +17,10 @@ from hysop.symbolic.relational import Assignment
 
 class OpenClEnstrophy(EnstrophyBase, OpenClSymbolic):
 
+    @debug
+    def __new__(cls, **kwds):
+        return super(OpenClEnstrophy, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         super(OpenClEnstrophy, self).__init__(**kwds)
@@ -48,7 +53,7 @@ class OpenClEnstrophy(EnstrophyBase, OpenClSymbolic):
         super(OpenClEnstrophy, self).setup(work)
         (self.WdotW_kernel, self.WdotW_update_parameters) = self.symbolic_kernels['WdotW']
         self.sum_kernel = self.dWdotW.backend.sum(a=self.dWdotW.sdata,
-                                build_kernel_launcher=True, async=True)
+                                build_kernel_launcher=True, synchronize=False)
 
     @op_apply
     def apply(self, **kwds):
diff --git a/hysop/backend/device/opencl/operator/external_force.py b/hysop/backend/device/opencl/operator/external_force.py
index 6bbce34dbd2f6df4a2f8bd5beba1b8b1c810efbd..35f0b356b74df7ec72e31686ecab90ea9d101bb5 100644
--- a/hysop/backend/device/opencl/operator/external_force.py
+++ b/hysop/backend/device/opencl/operator/external_force.py
@@ -28,20 +28,29 @@ class OpenClSpectralExternalForce(SpectralExternalForceOperatorBase, OpenClSymbo
     """
     Operator to compute the curl of a symbolic expression.
     """
-    
+
+    @debug
+    def __new__(cls, Fext, **kwds):
+        return super(OpenClSpectralExternalForce, cls).__new__(cls, Fext=Fext, **kwds)
+
     @debug
     def __init__(self, Fext, **kwds):
         check_instance(Fext, SymbolicExternalForce)
         super(OpenClSpectralExternalForce, self).__init__(Fext=Fext, **kwds)
 
 
-
 class SymbolicExternalForce(ExternalForce):
+    @debug
+    def __new__(cls, name, Fext, diffusion=None, **kwds):
+        return super(SymbolicExternalForce, cls).__new__(cls,
+                name=name, dim=None, Fext=Fext, **kwds)
+
+    @debug
     def __init__(self, name, Fext, diffusion=None, **kwds):
         """
         Specify an external force as a tuple of symbolic expressions.
 
-        2D ExternalForce example:  
+        2D ExternalForce example:
             1) Fext = -rho*g*e_y where rho is a field and g a constant
                 Fext = (0, -rho.s()*g)
             2) Fext = (Rs*S+C)*e_y
@@ -61,10 +70,10 @@ class SymbolicExternalForce(ExternalForce):
         Fext = tuple(Fext)
 
         super(SymbolicExternalForce, self).__init__(name=name, dim=dim, Fext=Fext, **kwds)
-        
+
         diffusion = first_not_None(diffusion, {})
-        diffusion = {k:v for (k,v) in diffusion.iteritems() if (v is not None)}
-        for (k,v) in diffusion.iteritems():
+        diffusion = {k:v for (k,v) in diffusion.items() if (v is not None)}
+        for (k,v) in diffusion.items():
             assert k in self.input_fields(), k.short_description()
             assert isinstance(v, ScalarParameter)
         self._diffusion = diffusion
@@ -79,7 +88,7 @@ class SymbolicExternalForce(ExternalForce):
         return p0.union(p1)
     def output_params(self):
         return set()
-    
+
     def initialize(self, op):
         tg = op.new_transform_group()
         fft_fields = tuple(self.input_fields())
@@ -108,15 +117,15 @@ class SymbolicExternalForce(ExternalForce):
                     if field in self._diffusion:
                         assert field in backward_transforms, field.name
                 replace = {sf:forward_transforms[sf.field].s for sf in efields}
-                frame = replace.values()[0].frame
+                frame = next(iter(replace.values)).frame
                 e = e.xreplace(replace)
             fft_expressions += (e,)
-        
+
         if (frame is None):
             msg='Could not extract frame from expressions.'
             raise RuntimeError(frame)
         fft_expressions = to_tuple(curl(fft_expressions, frame))
-        
+
         self.tg = tg
         self.forward_transforms  = forward_transforms
         self.backward_transforms = backward_transforms
@@ -128,7 +137,7 @@ class SymbolicExternalForce(ExternalForce):
         dts  = op.dt.s
         forces = op.force.s()
         diffusion_kernels = {}
-        for (f, nu) in self.diffusion.iteritems():
+        for (f, nu) in self.diffusion.items():
             nus = nu.s
             kname = 'filter_diffusion_{}d_{}'.format(f.dim, f.name)
             Ft = self.forward_transforms[f]
@@ -141,23 +150,23 @@ class SymbolicExternalForce(ExternalForce):
             expr = Assignment(Fs, Fs / (1 - nus*dts*E))
             op.require_symbolic_kernel(kname, expr)
             diffusion_kernels[f] = kname
-        
+
         force_kernels = ()
         vorticity_kernels = ()
         assert len(op.vorticity.fields)==len(op.force.fields)==len(self.fft_expressions)
         for (Fi,Wi,e) in zip(
                 op.force.fields,
-                op.vorticity.fields, 
+                op.vorticity.fields,
                 self.fft_expressions):
             if (e==0):
                 force_kernels     += (None,)
                 vorticity_kernels += (None,)
                 continue
-            
+
             Fi_hat = self.force_backward_transforms[Fi]
             Fi_buf = Fi_hat.input_symbolic_array('{}_hat'.format(Fi.name))
             Wn     = self.tg.push_expressions(Assignment(Fi_hat, e))
-            
+
             msg='Could not extract transforms.'
             try:
                 transforms = e.atoms(AppliedSpectralTransform)
@@ -165,9 +174,9 @@ class SymbolicExternalForce(ExternalForce):
                 raise RuntimeError(msg)
             assert len(transforms)>=1, msg
 
-            fft_buffers = { Ft.s: Ft.output_symbolic_array('{}_hat'.format(Ft.field.name)) 
+            fft_buffers = { Ft.s: Ft.output_symbolic_array('{}_hat'.format(Ft.field.name))
                                 for Ft in self.forward_transforms.values() }
-            wavenumbers = { Wi: self.tg._indexed_wave_numbers[Wi] 
+            wavenumbers = { Wi: self.tg._indexed_wave_numbers[Wi]
                                 for Wi in Wn }
 
             replace = {}
@@ -175,7 +184,7 @@ class SymbolicExternalForce(ExternalForce):
             replace.update(wavenumbers)
             expr = e.xreplace(replace)
             expr = Assignment(Fi_buf, expr)
-            
+
             kname = 'compute_{}'.format(Fi.var_name)
             op.require_symbolic_kernel(kname, expr)
             force_kernels += (kname,)
@@ -195,10 +204,10 @@ class SymbolicExternalForce(ExternalForce):
 
     def discretize(self, op):
         pass
-    
+
     def get_mem_requests(self, op):
         requests = {}
-        for Fi in self.forward_transforms.keys(): 
+        for Fi in self.forward_transforms.keys():
             Ft = self.forward_transforms[Fi]
             Bt = self.backward_transforms.get(Fi, None)
             if (Bt is not None):
@@ -207,15 +216,15 @@ class SymbolicExternalForce(ExternalForce):
                 assert (Ft.output_shape == Bt.input_shape), (Ft.output_shape, Bt.input_shape)
             shape = Ft.output_shape
             dtype = Ft.output_dtype
-            request = MemoryRequest(backend=self.tg.backend, dtype=dtype, 
+            request = MemoryRequest(backend=self.tg.backend, dtype=dtype,
                                     shape=shape, nb_components=1,
                                     alignment=op.min_fft_alignment)
             name = '{}_hat'.format(Ft.field.name)
             requests[name] = request
         return requests
-    
+
     def pre_setup(self, op, work):
-        for Fi in self.forward_transforms.keys(): 
+        for Fi in self.forward_transforms.keys():
             Ft = self.forward_transforms[Fi]
             Bt = self.backward_transforms.get(Fi, None)
             dtmp, = work.get_buffer(op, '{}_hat'.format(Ft.field.name))
@@ -236,14 +245,14 @@ class SymbolicExternalForce(ExternalForce):
                 kwds = update_params()
                 return knl(queue=queue, **kwds)
             return kernel_launcher
-        
-        for (field, kname) in self.diffusion_kernel_names.iteritems():
+
+        for (field, kname) in self.diffusion_kernel_names.items():
             dfield = op.get_input_discrete_field(field)
             knl, update_params = op.symbolic_kernels[kname]
             diffusion_kernels[field] = build_launcher(knl, update_params)
             ghost_exchangers[field] = functools.partial(dfield.build_ghost_exchanger(),
                                                                             queue=queue)
-        
+
         if (op.Fmin is not None):
             min_values = npw.asarray(op.Fmin()).copy()
         if (op.Fmax is not None):
@@ -258,23 +267,23 @@ class SymbolicExternalForce(ExternalForce):
             Fi  = op.force.fields[i]
             dWi = op.dW.dfields[i]
             dFi = op.dF.dfields[i]
-            
+
             knl, update_params = op.symbolic_kernels[kname0]
             force_kernels[(Fi,Wi)]  = build_launcher(knl, update_params)
-            
+
             knl, update_params = op.symbolic_kernels[kname1]
             vorticity_kernels[(Fi,Wi)] = build_launcher(knl, update_params)
 
             ghost_exchangers[Wi] = functools.partial(dWi.build_ghost_exchanger(), queue=queue)
-            
-            def compute_statistic(op=op, queue=queue, dFi=dFi, 
+
+            def compute_statistic(op=op, queue=queue, dFi=dFi,
                                 min_values=min_values, max_values=max_values):
                 if (op.Fmin is not None):
                     min_values[i] = dFi.sdata.min(queue=queue).get()
                 if (op.Fmax is not None):
                     max_values[i] = dFi.sdata.max(queue=queue).get()
             compute_statistics[Fi] = compute_statistic
-        
+
         def update_statistics(op=op, min_values=min_values, max_values=max_values):
             if (op.Fmin is not None):
                 op.Fmin.value = min_values
@@ -282,7 +291,7 @@ class SymbolicExternalForce(ExternalForce):
                 op.Fmax.value = max_values
             if (op.Finf is not None):
                 op.Finf.value = npw.maximum(npw.abs(min_values), npw.abs(max_values))
-                
+
         assert len(diffusion_kernels) == len(self.diffusion) == len(self.backward_transforms)
         assert len(vorticity_kernels) == len(force_kernels) == len(self.force_backward_transforms)
         assert len(ghost_exchangers) == len(diffusion_kernels) + len(vorticity_kernels)
@@ -293,16 +302,16 @@ class SymbolicExternalForce(ExternalForce):
         self.ghost_exchangers   = ghost_exchangers
         self.compute_statistics = compute_statistics
         self.update_statistics  = update_statistics
-    
+
     @op_apply
     def apply(self, op, **kwds):
-        for (field, Ft) in self.forward_transforms.iteritems():
+        for (field, Ft) in self.forward_transforms.items():
             evt = Ft()
             if (field in self.backward_transforms):
                 evt = self.diffusion_kernels[field]()
                 evt = self.backward_transforms[field]()
                 evt = self.ghost_exchangers[field]()
-        
+
         for (Fi,Wi) in self.force_kernels.keys():
             evt = self.force_kernels[(Fi,Wi)]()
             evt = self.force_backward_transforms[Fi]()
@@ -323,17 +332,17 @@ class SymbolicExternalForce(ExternalForce):
 
     def short_description(self):
         return 'SymbolicExternalForce[name={}]'.format(self.name)
-    
+
     def long_description(self):
         sep = '\n      *'
         expressions = sep + sep.join('F{} = {}'.format(x,e) for (x,e) in zip('xyz',self.Fext))
-        diffusion = sep + sep.join('{}: {}'.format(f.pretty_name, p.pretty_name) 
-                                        for (f,p) in self.diffusion.iteritems())
+        diffusion = sep + sep.join('{}: {}'.format(f.pretty_name, p.pretty_name)
+                                        for (f,p) in self.diffusion.items())
         input_fields  = ', '.join(f.pretty_name for f in self.input_fields())
         output_fields = ', '.join(f.pretty_name for f in self.output_fields())
         input_params  = ', '.join(p.pretty_name for p in self.input_params())
         output_params = ', '.join(p.pretty_name for p in self.output_params())
-        
+
         ss = \
         '''SymbolicExternalForce:
     name:          {}
@@ -345,8 +354,8 @@ class SymbolicExternalForce(ExternalForce):
     output_fields: {}
     input_params:  {}
     output_params: {}
-        '''.format(self.name, self.pretty_name, 
-                expressions, diffusion, 
+        '''.format(self.name, self.pretty_name,
+                expressions, diffusion,
                 input_fields, output_fields,
                 input_params, output_params)
         return ss
diff --git a/hysop/backend/device/opencl/operator/integrate.py b/hysop/backend/device/opencl/operator/integrate.py
index 6fb77abe3f8d3f25d05716b8ad6140c66ce9ce7a..6dd22e861b4ac622e4d480d63a396750a6d28cfc 100644
--- a/hysop/backend/device/opencl/operator/integrate.py
+++ b/hysop/backend/device/opencl/operator/integrate.py
@@ -6,10 +6,6 @@ import pyopencl
 
 class OpenClIntegrate(IntegrateBase, OpenClOperator):
 
-    @debug
-    def __init__(self, **kwds):
-        super(OpenClIntegrate, self).__init__(**kwds)
-
     @debug
     def get_field_requirements(self):
         # force 0 ghosts for the reduction (pyopencl reduction kernel)
@@ -21,11 +17,10 @@ class OpenClIntegrate(IntegrateBase, OpenClOperator):
     @debug
     def setup(self, work):
         super(OpenClIntegrate, self).setup(work)
-        if self.expr is None:
+        if (self.expr is None):
             self.sum_kernels = tuple(
-                self.dF.backend.sum(a=self.dF.data[i],
-                                    build_kernel_launcher=True, async=True)
-                for i in xrange(self.dF.nb_components))
+                self.dF.backend.sum(a=self.dF.data[i], build_kernel_launcher=True, synchronize=False)
+                for i in range(self.dF.nb_components))
         else:
             from hysop.backend.device.codegen.base.variables import dtype_to_ctype
             self.sum_kernels = tuple(
@@ -36,7 +31,7 @@ class OpenClIntegrate(IntegrateBase, OpenClOperator):
                     reduce_expr="a+b",
                     map_expr=self.expr,
                     arguments="__global {} *x".format(dtype_to_ctype(self.dF.dtype)))
-                for i in xrange(self.dF.nb_components))
+                for i in range(self.dF.nb_components))
 
     @op_apply
     def apply(self, **kwds):
diff --git a/hysop/backend/device/opencl/operator/memory_reordering.py b/hysop/backend/device/opencl/operator/memory_reordering.py
index 6a889d9f1d52f54409fceb31d73baaad684b6491..f770b060a3a5078703112e6396354b84ad08b485 100644
--- a/hysop/backend/device/opencl/operator/memory_reordering.py
+++ b/hysop/backend/device/opencl/operator/memory_reordering.py
@@ -10,7 +10,11 @@ class OpenClMemoryReordering(MemoryReorderingBase, OpenClOperator):
     """
 
     @debug
-    def __init__(self, **kwds): 
+    def __new__(cls, **kwds):
+        return super(OpenClMemoryReordering, cls).__new__(cls, **kwds)
+
+    @debug
+    def __init__(self, **kwds):
         """Initialize a MemoryReordering operator on the opencl backend."""
         super(OpenClMemoryReordering, self).__init__(**kwds)
         msg='FATAL ERROR: OpenClMemoryReordering has been deprecated.'
@@ -22,6 +26,6 @@ class OpenClMemoryReordering(MemoryReorderingBase, OpenClOperator):
         super(OpenClMemoryReordering,self).apply(**kwds)
         pass
 
-    def check_dfield_memory_order(self, dfield): 
+    def check_dfield_memory_order(self, dfield):
         # force no checks
         pass
diff --git a/hysop/backend/device/opencl/operator/poisson.py b/hysop/backend/device/opencl/operator/poisson.py
index 8f7607b2d31f32e1f38f5668e2f99c4f53a87db3..de60ea3d291c94cc3a58543282c9f66a04646d52 100644
--- a/hysop/backend/device/opencl/operator/poisson.py
+++ b/hysop/backend/device/opencl/operator/poisson.py
@@ -18,16 +18,20 @@ class OpenClPoisson(PoissonOperatorBase, OpenClSymbolic):
     """
     Solves the poisson equation using an OpenCL FFT backend.
     """
-    
+
+    @debug
+    def __new__(cls, **kwds):
+        return super(OpenClPoisson, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         super(OpenClPoisson, self).__init__(**kwds)
-        
+
         kernel_names = ()
         for (i,(Ft,Wn)) in enumerate(zip(self.forward_transforms, self.wave_numbers)):
             Fhs = Ft.output_symbolic_array('F{}_hat'.format(i))
             indices = local_indices_symbols[:Fhs.dim]
-            
+
             kname = 'filter_poisson_{}d_{}'.format(Fhs.dim, i)
             kernel_names += (kname,)
 
@@ -37,11 +41,11 @@ class OpenClPoisson(PoissonOperatorBase, OpenClSymbolic):
                 F += indexed_Wi
             cond = LogicalAND(*tuple(LogicalEQ(idx,0) for idx in indices))
             expr = Assignment(Fhs, Select(Fhs/F, 0, cond))
-            
+
             self.require_symbolic_kernel(kname, expr)
 
         self._kernel_names = kernel_names
-    
+
     @debug
     def setup(self, work):
         super(OpenClPoisson, self).setup(work)
diff --git a/hysop/backend/device/opencl/operator/poisson_curl.py b/hysop/backend/device/opencl/operator/poisson_curl.py
index 4f98a682eeefd41ec874c0c49669d6bd551c2f98..23c229a3791d0009707153b85619fbb1f3401c9c 100644
--- a/hysop/backend/device/opencl/operator/poisson_curl.py
+++ b/hysop/backend/device/opencl/operator/poisson_curl.py
@@ -21,7 +21,10 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
     '''
     Solves the poisson-rotational equation using clFFT.
     '''
-    
+
+    def __new__(cls, **kwds):
+        return super(OpenClPoissonCurl, cls).__new__(cls, **kwds)
+
     def __init__(self, **kwds):
         super(OpenClPoissonCurl, self).__init__(**kwds)
         dim   = self.dim
@@ -29,7 +32,7 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
         assert (dim in (2,3)), dim
 
         # request the poisson rotational kernel
-            
+
         W_Ft = self.W_forward_transforms
         U_Bt = self.U_backward_transforms
         W_Bt = self.W_backward_transforms
@@ -55,7 +58,7 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
                 expr = Assignment(wout, win / (1 - nu*dt*F))
                 self.require_symbolic_kernel('diffusion_kernel__{}'.format(i), expr)
             Win = Wout
-        
+
         indices = local_indices_symbols[:dim]
         cond = LogicalAND(*tuple(LogicalEQ(idx,0) for idx in indices))
 
@@ -63,9 +66,9 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
             exprs = ()
             dtype = find_common_dtype(*tuple(Ft.output_dtype for Ft in self.W_forward_transforms))
             Cs = self.symbolic_tmp_scalars('C', dtype=dtype, count=3)
-            for i in xrange(3):
+            for i in range(3):
                 expr = 0
-                for j in xrange(3):
+                for j in range(3):
                     e = Win[j]
                     if (i==j):
                         e = KK[j][j]*e
@@ -77,18 +80,18 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
                 expr = Select(expr, 0, cond)
                 expr = Assignment(Cs[i], expr)
                 exprs += (expr,)
-            for i in xrange(3):
+            for i in range(3):
                 expr = Assignment(Wout[i], Win[i]-Cs[i])
                 exprs += (expr,)
             self.require_symbolic_kernel('projection_kernel', *exprs)
             Win = Wout
 
         exprs = ()
-        for i in xrange(wcomp):
+        for i in range(wcomp):
             F = sum(KK[i])
             expr = Assignment(Win[i], Select(Win[i]/F,0,cond))
             self.require_symbolic_kernel('poisson_kernel__{}'.format(i), expr)
-        
+
         if (dim == 2):
             assert wcomp==1
             e0 = Assignment(Uout[0], -mul(K[0][1], Win[0]))
@@ -105,7 +108,7 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
         self.require_symbolic_kernel('curl_kernel__1', e1)
         if (dim==3):
             self.require_symbolic_kernel('curl_kernel__2', e2)
-    
+
 
     @debug
     def setup(self, work):
@@ -114,11 +117,11 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
         self._build_projection_kernel()
         self._build_poisson_curl_kernel()
         self._build_ghost_exchangers()
-    
+
     def _build_diffusion_kernel(self):
         if self.should_diffuse:
             diffusion_filters = ()
-            for i in xrange(self.W.nb_components):
+            for i in range(self.W.nb_components):
                 knl, knl_kwds = \
                         self.symbolic_kernels['diffusion_kernel__{}'.format(i)]
                 knl = functools.partial(knl, queue=self.cl_env.default_queue)
@@ -131,17 +134,17 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
 
     def _build_poisson_curl_kernel(self):
         poisson_filters = ()
-        for i in xrange(self.W.nb_components):
-            knl, __ = self.symbolic_kernels['poisson_kernel__{}'.format(i)] 
+        for i in range(self.W.nb_components):
+            knl, __ = self.symbolic_kernels['poisson_kernel__{}'.format(i)]
             Fi = functools.partial(knl, queue=self.cl_env.default_queue)
             poisson_filters += (Fi,)
-        
+
         curl_filters = ()
-        for i in xrange(self.U.nb_components):
-            knl, __ = self.symbolic_kernels['curl_kernel__{}'.format(i)] 
+        for i in range(self.U.nb_components):
+            knl, __ = self.symbolic_kernels['curl_kernel__{}'.format(i)]
             Fi = functools.partial(knl, queue=self.cl_env.default_queue)
             curl_filters += (Fi,)
-        
+
         self.poisson_filters = poisson_filters
         self.curl_filters = curl_filters
 
@@ -158,7 +161,7 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
 
         exchange_U_ghosts = self.dU.exchange_ghosts(build_launcher=True)
         if (exchange_U_ghosts is not None):
-            self.exchange_U_ghosts = functools.partial(exchange_U_ghosts, 
+            self.exchange_U_ghosts = functools.partial(exchange_U_ghosts,
                     queue=self.cl_env.default_queue)
         else:
             self.exchange_U_ghosts = noop
@@ -166,7 +169,7 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
         if (self.should_project or self.should_diffuse):
             exchange_W_ghosts = self.dW.exchange_ghosts(build_launcher=True)
             if (exchange_W_ghosts is not None):
-                self.exchange_W_ghosts = functools.partial(exchange_W_ghosts, 
+                self.exchange_W_ghosts = functools.partial(exchange_W_ghosts,
                         queue=self.cl_env.default_queue)
             else:
                 self.exchange_W_ghosts = noop
@@ -174,10 +177,10 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
     @op_apply
     def apply(self, simulation, **kwds):
         '''Solve the PoissonCurl equation.'''
-        
+
         diffuse = self.should_diffuse
         project = self.do_project(simu=simulation)
-        
+
         for Ft in self.W_forward_transforms:
             evt = Ft(simulation=simulation)
         if diffuse:
@@ -195,5 +198,5 @@ class OpenClPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClSymbolic):
             evt = Fc()
             evt = Bt(simulation=simulation)
         evt = self.exchange_U_ghosts()
-        self.update_energy(simulation=simulation)        
+        self.update_energy(simulation=simulation)
 
diff --git a/hysop/backend/device/opencl/operator/solenoidal_projection.py b/hysop/backend/device/opencl/operator/solenoidal_projection.py
index dd49bde13322db570365ba2351e23674d96efe55..730147a3285f3b9c49c6d89dc4a476751822a9ea 100644
--- a/hysop/backend/device/opencl/operator/solenoidal_projection.py
+++ b/hysop/backend/device/opencl/operator/solenoidal_projection.py
@@ -44,9 +44,9 @@ class OpenClSolenoidalProjection(SolenoidalProjectionOperatorBase, OpenClSymboli
         cond = LogicalAND(*tuple(LogicalEQ(Ik, 0) for Ik in I))
 
         exprs = ()
-        for i in xrange(3):
+        for i in range(3):
             expr = 0
-            for j in xrange(3):
+            for j in range(3):
                 e = Fin[j]
                 if (i == j):
                     e = K2s[j][j]*e
@@ -58,7 +58,7 @@ class OpenClSolenoidalProjection(SolenoidalProjectionOperatorBase, OpenClSymboli
             expr = Select(expr, 0, cond)
             expr = Assignment(Cs[i], expr)
             exprs += (expr,)
-        for i in xrange(3):
+        for i in range(3):
             expr = Assignment(Fout[i], Fin[i]-Cs[i])
             exprs += (expr,)
 
@@ -67,13 +67,13 @@ class OpenClSolenoidalProjection(SolenoidalProjectionOperatorBase, OpenClSymboli
         if self.compute_divFin:
             divFin = self.backward_divFin_transform.input_symbolic_array('divFin')
             expr = sum(ComplexMul(K1s[j][j], Fin[j]) if K1s[j][j].Wn.is_complex else K1s[j][j]*Fin[j]
-                       for j in xrange(3))
+                       for j in range(3))
             expr = Assignment(divFin, expr)
             self.require_symbolic_kernel('compute_divFin', expr)
 
         if self.compute_divFout:
             expr = sum(ComplexMul(K1s[j][j], Fout[j]) if K1s[j][j].Wn.is_complex else K1s[j][j]*Fout[j]
-                       for j in xrange(3))
+                       for j in range(3))
             divFout = self.backward_divFout_transform.input_symbolic_array('divFout')
             expr = Assignment(divFout, expr)
             self.require_symbolic_kernel('compute_divFout', expr)
diff --git a/hysop/backend/device/opencl/operator/spatial_filtering.py b/hysop/backend/device/opencl/operator/spatial_filtering.py
index f5df7d162042b3ca361d5880ed6622935257dcd3..40067a36777916f97de2b223f9e4adb629597a09 100644
--- a/hysop/backend/device/opencl/operator/spatial_filtering.py
+++ b/hysop/backend/device/opencl/operator/spatial_filtering.py
@@ -85,7 +85,7 @@ class OpenClPolynomialRestrictionFilter(PolynomialRestrictionFilterBase, OpenClO
         assert self.subgrid_restrictor.gr == gr
 
         ekg = self.elementwise_kernel_generator
-        Rr = self.subgrid_restrictor.Rr / self.subgrid_restrictor.GR
+        Rr = self.subgrid_restrictor.Rr / self.subgrid_restrictor.gr
         ghosts = np.asarray(self.subgrid_restrictor.ghosts)
 
         I = np.asarray(local_indices_symbols[:dim][::-1])
@@ -156,7 +156,7 @@ class OpenClSubgridRestrictionFilter(SubgridRestrictionFilterBase, OpenClSymboli
         dFin, dFout = self.dFin, self.dFout
         ibuffer, obuffer = dFin.sbuffer, dFout.sbuffer
         self.symbolic_input_buffer.bind_memory_object(ibuffer)
-        for i in xrange(dFin.dim):
+        for i in range(dFin.dim):
             self.symbolic_grid_ratio[i].bind_value(self.grid_ratio[i])
             self.symbolic_input_strides[i].bind_value(ibuffer.strides[i] // ibuffer.dtype.itemsize)
             self.symbolic_input_ghosts[i].bind_value(dFin.ghosts[i])
diff --git a/hysop/backend/device/opencl/operator/transpose.py b/hysop/backend/device/opencl/operator/transpose.py
index 2bd6d8307c856db849ded75f40966c66458f6cd2..67d443687a45f85245bbca53de44786cb07159aa 100644
--- a/hysop/backend/device/opencl/operator/transpose.py
+++ b/hysop/backend/device/opencl/operator/transpose.py
@@ -7,10 +7,6 @@ from hysop.backend.device.opencl.opencl_kernel_launcher import OpenClKernelListL
 
 class OpenClTranspose(TransposeOperatorBase, OpenClOperator):
 
-    @debug
-    def __init__(self, **kwds):
-        super(OpenClTranspose, self).__init__(**kwds)
-
     @debug
     def setup(self, work):
         super(OpenClTranspose, self).setup(work)
@@ -48,7 +44,7 @@ class OpenClTranspose(TransposeOperatorBase, OpenClOperator):
                                        output_buffer=output_field.sbuffer)
 
         launcher = OpenClKernelListLauncher(name=transpose.name, profiler=self._profiler)
-        for i in xrange(self.nb_components):
+        for i in range(self.nb_components):
             if compute_inplace:
                 assert hardcode_arrays
                 launcher += transpose.build_launcher(inout_base=input_field.data[i].base_data)
diff --git a/hysop/backend/hardware/hardware_backend.py b/hysop/backend/hardware/hardware_backend.py
index 21fa5ab05a1aaad52ea4acc2393367c6721fc7c8..7c80319116777024ad6c075c76e3cf07d1600498 100644
--- a/hysop/backend/hardware/hardware_backend.py
+++ b/hysop/backend/hardware/hardware_backend.py
@@ -1,19 +1,17 @@
 
 from abc import ABCMeta, abstractmethod
 
-class HardwareBackend(object):
-
-    __metaclass__ = ABCMeta
+class HardwareBackend(object, metaclass=ABCMeta):
 
     def __init__(self, hardware_topo, **kargs):
         super(HardwareBackend,self).__init__(**kargs)
         self._platforms = {}
         self._discover_platforms(hardware_topo)
-    
+
     @property
     def platforms(self):
         return self._platforms
-    
+
     @abstractmethod
     def _discover_platforms(self, hardware_topo):
         pass
diff --git a/hysop/backend/hardware/hwinfo.py b/hysop/backend/hardware/hwinfo.py
index 08eaee62f030e0dde041097b5619daeaba8a4385..3bfdf6bd3b473162f334fc4f22e3b410e30b6328 100644
--- a/hysop/backend/hardware/hwinfo.py
+++ b/hysop/backend/hardware/hwinfo.py
@@ -1,11 +1,11 @@
-
-import sys
+import sys, subprocess, math, copy
+import itertools as it
+import numpy as np
 from xml.etree import cElementTree as ElementTree
 from abc import abstractmethod, ABCMeta
 
 from hysop import vprint
 from hysop.backend import __HAS_OPENCL_BACKEND__
-from hysop.deps import subprocess, math, np, it, copy
 
 from hysop.tools.enum import EnumFactory
 from hysop.tools.decorators import requires_cmd
@@ -18,14 +18,12 @@ from hysop.tools.cache import load_data_from_cache, update_cache, machine_id
 from hysop.backend.hardware.pci_ids import PCIIds
 from hysop.core.mpi import is_multihost, interhost_comm, host_rank
 
-class TopologyObject(object):
+class TopologyObject(object, metaclass=ABCMeta):
     """
     XML parser base to parse lstopo (hardware info) xml output.
     See hwloc(7) and lstopo(1) man.
     """
 
-    __metaclass__ = ABCMeta
-        
     _print_indent = ' '*2
 
     def __init__(self, parent, element, pciids=None):
@@ -42,16 +40,16 @@ class TopologyObject(object):
         for child in element:
             self._handle_child(child)
         self._post_init()
-    
+
     def indent(self, string, extra_spaces=0):
         return prepend(string, self._print_indent + ' '*extra_spaces)
 
     def pop_attr(self, name):
         return self._attributes.pop(name)
-            
+
     def attributes(self):
         return self._attributes
-    
+
     def attribute(self, name, default=None, cast=lambda x:x):
         if name in self._attributes:
             return cast(self._attributes[name])
@@ -60,7 +58,7 @@ class TopologyObject(object):
 
     def update_attributes(self, attr):
         self._attributes.update(attr)
-    
+
     def cpu_set(self):
         return  self.attribute('cpuset')
 
@@ -75,7 +73,7 @@ class TopologyObject(object):
             return self.physical_cores_count()
         else:
             return self.parent._physical_cores_count()
-    
+
     def _processing_units_count(self):
         if self._parsed_type() in ['Machine']:
             return self.processing_units_count()
@@ -87,9 +85,9 @@ class TopologyObject(object):
         mask_length = self._processing_units_count()
         _cpuset = '|{0:0{length}b}|'.format(cpuset, length=mask_length)
         _cpuset = _cpuset.replace('0','.').replace('1', 'x')
-        _cpuset += '  0x{0:0{length}x}'.format(cpuset, length=mask_length/4)
+        _cpuset += '  0x{0:0{length}x}'.format(cpuset, length=mask_length//4)
         return _cpuset
-    
+
     def all_cpu_set(self):
         return '0x{:x} (complete=0x{:x}, online=0x{:x}, allowed=0x{:x})'.format(
                 self.attribute('cpuset'),
@@ -97,15 +95,15 @@ class TopologyObject(object):
                 self.attribute('online_cpuset'),
                 self.attribute('allowed_cpuset'),
             )
-    
+
     def os_index(self):
         return self.attribute('os_index')
-    
+
     def print_attributes(self):
-        print '{} attributes:'.format(self.__class__.__name__)
-        for k,v in self.attributes().iteritems():
-            print ' {} -> {}'.format(k,v)
-        print
+        print('{} attributes:'.format(self.__class__.__name__))
+        for k,v in self.attributes().items():
+            print(' {} -> {}'.format(k,v))
+        print()
 
     def _post_init(self):
         pass
@@ -123,11 +121,11 @@ class TopologyObject(object):
             self._parse_page_type(attr)
         else:
             raise ValueError('Unknown tag {}.'.format(tag))
-    
+
     @abstractmethod
     def _parsed_type(self):
         pass
-    
+
     @abstractmethod
     def _parse_object(self, it):
         pass
@@ -139,7 +137,7 @@ class TopologyObject(object):
             msg='Type \'{}\' does not match parsed type \'{}\'.'
             msg=msg.format(_type, self._parsed_type())
             raise ValueError(msg)
-        for k,v in attributes.iteritems():
+        for k,v in attributes.items():
             if (k.find('cpuset')>=0) or (k.find('nodeset')>=0):
                 vv = tuple(int(x,16) if x != '' else 0 for x in v.split(','))
                 v = 0
@@ -150,7 +148,7 @@ class TopologyObject(object):
                 info[k] = int(v)
             elif k in ['pci_link_speed']:
                 info[k] = float(v)
-            elif k in ['bridge_pci', 'bridge_type', 'depth', 
+            elif k in ['bridge_pci', 'bridge_type', 'depth',
                     'pci_type', 'pci_busid', 'name']:
                 info[k] = v.strip()
             else:
@@ -176,21 +174,20 @@ class TopologyObject(object):
             assert v.tag == 'latency'
             val = v.attrib['value']
             values.append(val)
-        
+
         assert len(values) == nbobjs*nbobjs
         self._attributes['distances'] = np.reshape(np.asarray(values, dtype=np.float32),(nbobjs,nbobjs,))
 
 
-class HardwareStatistics(object):
-    __metaclass__ = ABCMeta
+class HardwareStatistics(object, metaclass=ABCMeta):
 
     def _minmax(self, values, op=lambda x: x, dtype=np.int32):
-        return 'mean={}, min={}, max={}'.format(op(np.mean(values).astype(dtype)), 
-                                                op(np.min(values).astype(dtype)), 
+        return 'mean={}, min={}, max={}'.format(op(np.mean(values).astype(dtype)),
+                                                op(np.min(values).astype(dtype)),
                                                 op(np.max(values).astype(dtype)))
-    
+
     def _total_minmax(self, values, op=lambda x: x, dtype=np.int32):
-        return '{:<10}  ({})'.format(op(np.sum(values).astype(dtype)), 
+        return '{:<10}  ({})'.format(op(np.sum(values).astype(dtype)),
                 self._minmax(values=values, op=op, dtype=dtype))
 
     def _pct(self, values):
@@ -230,7 +227,7 @@ class TopologyStatistics(HardwareStatistics):
         self._backend_statistics = {}
         if (topo is not None):
             self._count+=1
-            
+
             machine = topo.machine()
             self._numa_nodes.append(machine.numa_nodes_count())
             for node in machine.numa_nodes():
@@ -238,7 +235,7 @@ class TopologyStatistics(HardwareStatistics):
                 self._packages.append(node.cpu_packages_count())
                 self._physical_cores.append(node.physical_cores_count())
                 self._processing_units.append(node.processing_units_count())
-            
+
             self._has_opencl.append(topo.has_opencl())
             self._has_cuda.append(topo.has_cuda())
             if any(self._has_opencl):
@@ -263,7 +260,7 @@ class TopologyStatistics(HardwareStatistics):
         self._processing_units  += other._processing_units
         self._has_opencl        += other._has_opencl
         self._has_cuda          += other._has_cuda
-        for (k,v) in other._backend_statistics.iteritems():
+        for (k,v) in other._backend_statistics.items():
             if k in self._backend_statistics:
                 self._backend_statistics[k] += v
             else:
@@ -275,11 +272,11 @@ class TopologyStatistics(HardwareStatistics):
         inc = ' '*increment
         if 'opencl' in self._backend_statistics:
             opencl = '\n'+self._backend_statistics['opencl'].to_string(indent+2*increment,
-                    increment) 
+                    increment)
         else:
             opencl = ''
         if 'cuda' in self._backend_statistics:
-            cuda = '\n'+self._backend_statistics['cuda'].to_string(indent+2*increment,increment) 
+            cuda = '\n'+self._backend_statistics['cuda'].to_string(indent+2*increment,increment)
         else:
             cuda = ''
         msg=\
@@ -305,7 +302,7 @@ ind=ind, inc=inc)
 
 
 class Topology(TopologyObject):
-    
+
     @classmethod
     @requires_cmd('lstopo')
     def parse(cls, pciids=None, override_cache=False):
@@ -343,10 +340,10 @@ class Topology(TopologyObject):
             # ss += '\n    Cuda backend support not implemented yet.'
             # ss += '\n    Cuda backend not found on this system.'
         return ss
-    
+
     def _post_init(self):
         self._find_logical_devices()
-    
+
     def machine(self):
         return self._machine
 
@@ -376,11 +373,11 @@ class Topology(TopologyObject):
         and bind them to physical devices
         """
         if __HAS_OPENCL_BACKEND__:
-            from hysop.backend.device.opencl.opencl_hardware_backend import OpenClBackend 
+            from hysop.backend.device.opencl.opencl_hardware_backend import OpenClBackend
             self._opencl_backend = OpenClBackend(hardware_topo=self.machine())
-        else: 
+        else:
             self._opencl_backend = None
-    
+
     def __str__(self):
         return '{}\n{}'.format(self.machine(),self._backend_report())
 
@@ -390,4 +387,5 @@ if __name__ == '__main__':
     else:
         pciids = PCIIds(path=sys.argv[1])
     topo = Topology.parse(pciids)
-    print topo
+    print(topo)
+
diff --git a/hysop/backend/hardware/machine.py b/hysop/backend/hardware/machine.py
index 80c910198cb3d86280fdb5e262d5eabf92a0dd45..c4a2eab443e15177a322433a2a865cb879549d8a 100644
--- a/hysop/backend/hardware/machine.py
+++ b/hysop/backend/hardware/machine.py
@@ -1,7 +1,5 @@
-
-from hysop.deps import platform
+import platform, math, itertools as it
 from hysop.constants import System
-from hysop.deps import it, math
 from hysop.tools.units import bytes2str
 from hysop.tools.contexts import printoptions
 from hysop.tools.cache import machine_id
@@ -11,7 +9,7 @@ from hysop.backend.hardware.pci import PciBridge
 
 class NumaNode(TopologyObject):
     """
-    A set of processors around memory which the processors can directly access. 
+    A set of processors around memory which the processors can directly access.
     """
     def __init__(self, parent, node, package=None, bridge=None):
         if (node is not None):
@@ -39,7 +37,7 @@ class NumaNode(TopologyObject):
         mask_length = int(math.ceil(math.log(self.parent.full_node_set(),2)))
         _nodeset = '|{0:0{length}b}|'.format(nodeset, length=mask_length)
         _nodeset = _nodeset.replace('0','.').replace('1', 'x')
-        _nodeset += '  0x{0:0{length}x}'.format(nodeset, length=mask_length/4)
+        _nodeset += '  0x{0:0{length}x}'.format(nodeset, length=mask_length//4)
         return _nodeset
 
 
@@ -54,7 +52,7 @@ class NumaNode(TopologyObject):
         return len(self._bridges)
     def pci_devices_count(self):
         return sum(x.pci_device_count() for x in self._bridges)
-    
+
     def cpu_packages(self):
         return self.packages()
     def pci_devices(self):
@@ -80,10 +78,10 @@ node memory: {}
             for bridge in self.bridges():
                 content += self.indent(bridge.to_string(expand_pci_tree=expand_pci_tree))
         return header + self.indent(content)
-    
+
     def _parsed_type(self):
         return 'NUMANode'
-    
+
     def _parse_object(self, it):
         _type = it.attrib['type']
         if _type == 'Package':
@@ -110,7 +108,7 @@ class Machine(TopologyObject):
     """
     Class describing a physical machine (a set of processors and memory).
     """
-    
+
     def __init__(self, parent, machine):
         if platform.system() == 'Windows':
             system = System.WINDOWS
@@ -121,30 +119,30 @@ class Machine(TopologyObject):
         else:
             msg='Unknown platform system {}.'.format(platform.system())
             raise ValueError(msg)
-        
+
         self._system = system
         self._bridge, self._package = None, None
         self._numa_nodes = []
 
         super(Machine,self).__init__(parent, machine)
-    
+
     def _post_init(self):
             if self._package:
                 self._attributes['nodeset'] = 1
-                attr = {'local_memory': self.pop_attr('local_memory'), 
+                attr = {'local_memory': self.pop_attr('local_memory'),
                         'os_index': self.pop_attr('os_index'),
                         'cpuset':  self.cpu_set(),
                         'nodeset': 1}
-                self._numa_nodes = [ NumaNode.from_package(parent=self, attributes=attr, 
+                self._numa_nodes = [ NumaNode.from_package(parent=self, attributes=attr,
                                     package=self._package, bridge=self._bridge) ]
             elif self._numa_nodes:
                 pass
             else:
                 raise RuntimeError('Something went wrong during parsing.')
-    
+
     def _parsed_type(self):
         return 'Machine'
-    
+
     def _handle_child(self, child):
         if child.tag=='page_type':
             self._parse_page_type(child)
@@ -167,7 +165,7 @@ class Machine(TopologyObject):
             else:
                 raise ValueError('Unknown object type {} obtained during Machine parsing.'.format(_type))
 
-    
+
     ## Machine information
     def system(self):
         return self._system
@@ -205,9 +203,9 @@ class Machine(TopologyObject):
     def pci_devices(self, vendor_id=None, device_id=None):
         devices = it.chain.from_iterable([x.pci_devices()  for x in self.numa_nodes()])
         if (vendor_id is not None):
-            devices = it.ifilter(lambda x: x.pci_system_vendor_id() == vendor_id, devices)
+            devices = filter(lambda x: x.pci_system_vendor_id() == vendor_id, devices)
         if (device_id is not None):
-            devices = it.ifilter(lambda x: x.pci_system_device_id() == device_id, devices)
+            devices = filter(lambda x: x.pci_system_device_id() == device_id, devices)
         return tuple(devices)
 
     def architecture(self):
@@ -217,7 +215,7 @@ class Machine(TopologyObject):
         return '{} {}'.format(
                     self.attribute('backend'),
                     self.attribute('architecture'))
-    
+
     def os(self):
         return '{} {} ({})'.format(
                     self.attribute('os_name'),
@@ -246,7 +244,7 @@ class Machine(TopologyObject):
         return self.to_string(expand_pci_tree=True)
 
     def to_string(self, expand_pci_tree=True):
-        header = '== Physical Hardware Report =='  
+        header = '== Physical Hardware Report =='
         content = \
 '''
 bios:     {}
@@ -282,6 +280,6 @@ physical memory:  {}
 
         content += '\nHardware info gathered with {}'.format(self.hwinfo_version())
 
-        footer = '\n====================' 
-        
+        footer = '\n===================='
+
         return header + self.indent(content) + footer
diff --git a/hysop/backend/hardware/pci.py b/hysop/backend/hardware/pci.py
index 094312720a7ddb872b122f1ab86901ce6ba2e83d..28de9168e4143cc000b9746392e75be7a406313d 100644
--- a/hysop/backend/hardware/pci.py
+++ b/hysop/backend/hardware/pci.py
@@ -1,38 +1,39 @@
-from hysop.deps import re, it
+import re, itertools as it
+
 from hysop.tools.enum import EnumFactory
 from hysop.tools.string_utils import prepend
 from hysop.backend.hardware.hwinfo import TopologyObject, bytes2str
 
 OperatingSystemDeviceType = EnumFactory.create('OperatingSystemDeviceType', {
-    'BLOCK_DEVICE':       0x0, #Operating system block device. 
-                               #For instance "sda" on Linux. 
+    'BLOCK_DEVICE':       0x0, #Operating system block device.
+                               #For instance "sda" on Linux.
 
-    'GPU_DEVICE':         0x1, #Operating system GPU device. 
+    'GPU_DEVICE':         0x1, #Operating system GPU device.
                                #For instance ":0.0" for a GL display, "card0" for a Linux DRM dev.
 
-    'NETWORK_DEVICE':     0x2, #Operating system network device. 
-                               #For instance the "eth0" interface on Linux. 
+    'NETWORK_DEVICE':     0x2, #Operating system network device.
+                               #For instance the "eth0" interface on Linux.
 
-    'OPENFABRICS_DEVICE': 0x3, #Operating system openfabrics device. 
-                               #For instance the "mlx4_0" InfiniBand HCA device on Linux. 
+    'OPENFABRICS_DEVICE': 0x3, #Operating system openfabrics device.
+                               #For instance the "mlx4_0" InfiniBand HCA device on Linux.
 
-    'DMA_DEVICE':         0x4, #Operating system dma engine device. 
-                               #For instance the "dma0chan0" DMA channel on Linux. 
+    'DMA_DEVICE':         0x4, #Operating system dma engine device.
+                               #For instance the "dma0chan0" DMA channel on Linux.
 
-    'COPROCESSOR_DEVICE': 0x5  #Operating system co-processor device. 
-                               #For instance "mic0" for a Xeon Phi (MIC) on Linux, 
-                               # "opencl0d0" for a OpenCL device, "cuda0" for a CUDA device. 
+    'COPROCESSOR_DEVICE': 0x5  #Operating system co-processor device.
+                               #For instance "mic0" for a Xeon Phi (MIC) on Linux,
+                               # "opencl0d0" for a OpenCL device, "cuda0" for a CUDA device.
 })
 """
 Type of an Operating System device (pci device function).
 See hwloc documentation on type 'hwloc_obj_osdev_type_e'.
 """
 
-    
+
 class OperatingSystemDevice(TopologyObject):
     def __init__(self, parent, device):
         super(OperatingSystemDevice,self).__init__(parent, device)
-    
+
     def _parsed_type(self):
         return 'OSDev'
     def _parse_object(self, it):
@@ -62,12 +63,12 @@ class OperatingSystemDevice(TopologyObject):
     def linux_device_id(self):
         assert self.osdev_type()==0
         return self.attribute('linux_device_id')
-    
+
     # type 2 specific
     def address(self):
         assert self.osdev_type()==2
         return self.attribute('address')
-    
+
     # type 6 specific
     def backend(self):
         assert self.osdev_type()==5
@@ -86,7 +87,7 @@ class OperatingSystemDevice(TopologyObject):
                 'multi_processors': multi_processors,
                 'cores_per_mp': cores_per_mp,
                 'cores': cores_per_mp*multi_processors,
-                'global_memory_size': ', '.join(tuple(bytes2str(int(mem)*1024*1024/1000) for mem in global_mem)),
+                'global_memory_size': ', '.join(tuple(bytes2str(int(mem)*1024*1024//1000) for mem in global_mem)),
                 'shared_memory_size_per_mp': ', '.join(tuple(bytes2str(int(mem)*1000) for mem in shared_mem_per_mp)),
                 'l2_cache_size': ', '.join(tuple(bytes2str(int(mem)*1000) for mem in l2_cache_size)),
             }
@@ -97,7 +98,7 @@ class OperatingSystemDevice(TopologyObject):
     def __str__(self):
         _type = self.osdev_type()
         osdev_type = OperatingSystemDeviceType[_type]
-        if (osdev_type == OperatingSystemDeviceType.BLOCK_DEVICE): 
+        if (osdev_type == OperatingSystemDeviceType.BLOCK_DEVICE):
             return 'block_device: {} {} /dev/{}'.format(self.type(), self.model(), self.name())
         elif (osdev_type == OperatingSystemDeviceType.GPU_DEVICE):
             return 'gpu_device: {}'.format(self.name())
@@ -113,7 +114,7 @@ class OperatingSystemDevice(TopologyObject):
             backend_info = self.backend_info()
             if backend_info:
                 content=''
-                for k,v in backend_info.iteritems():
+                for k,v in backend_info.items():
                     content += '\n*{}: {}'.format(k,v)
                 return header+self.indent(content)
             else:
@@ -122,20 +123,20 @@ class OperatingSystemDevice(TopologyObject):
             self.print_attributes()
             raise ValueError('Unimplemented osdev printing for type {} ({}).'.format(
                 osdev_type, _type))
-        return content 
+        return content
 
 class PciDevice(TopologyObject):
     def __init__(self, parent, device):
         self._os_devices = []
         super(PciDevice,self).__init__(parent, device)
-    
+
     def leaf_pci_devices(self):
         return [self]
 
     def _post_init(self):
         pci_type = self.pci_type()
-        regexp =  '([a-f0-9]{4})\s+\[([a-f0-9]{4}):([a-f0-9]{4})\]\s+'
-        regexp += '\[([a-f0-9]{4}):([a-f0-9]{4})\]\s+([a-f0-9]{2})'
+        regexp =  r'([a-f0-9]{4})\s+\[([a-f0-9]{4}):([a-f0-9]{4})\]\s+'
+        regexp += r'\[([a-f0-9]{4}):([a-f0-9]{4})\]\s+([a-f0-9]{2})'
         regexp = re.compile(regexp)
         match  = re.match(regexp, pci_type)
         if not match:
@@ -144,7 +145,7 @@ class PciDevice(TopologyObject):
             raise ValueError(msg)
 
         pci_device_class_id = match.group(1)
-        
+
         vendor_id           = match.group(2)
         device_id           = match.group(3)
 
@@ -152,9 +153,9 @@ class PciDevice(TopologyObject):
         subdevice_id        = match.group(5)
 
         revision            = match.group(6)
-        
+
         pci_device_class = self.pciids.find_device_class_by_id(pci_device_class_id)
-        
+
         vendor = self.pciids.find_vendor(vendor_id)
         if not vendor:
             vendor = vendor_id
@@ -179,20 +180,20 @@ class PciDevice(TopologyObject):
         self._attributes['pci_subsystem_vendor_sid']   = subvendor_id
         self._attributes['pci_subsystem_device_sid']   = subdevice_id
         self._attributes['pci_device_revision_string'] = revision
-        
+
         self._attributes['pci_device_class_id']       = int(pci_device_class_id,16)
         self._attributes['pci_system_vendor_id']      = int(vendor_id, 16)
         self._attributes['pci_system_device_id']      = int(device_id, 16)
         self._attributes['pci_subsystem_vendor_id']   = int(subvendor_id, 16)
         self._attributes['pci_subsystem_device_id']   = int(subdevice_id, 16)
         self._attributes['pci_device_revision_value'] = int(revision, 16)
-       
+
         self._attributes['pci_device_class']     = pci_device_class
         self._attributes['pci_system_vendor']    = vendor
         self._attributes['pci_system_device']    = device
         self._attributes['pci_subsystem_vendor'] = subvendor
         self._attributes['pci_subsystem_device'] = subdevice
-        
+
     def pci_link_speed(self):
         return self.attribute('pci_link_speed')
     def pci_busid(self):
@@ -208,7 +209,7 @@ class PciDevice(TopologyObject):
         return self.pci_system_vendor_id()
     def device_id(self):
         return self.pci_system_device_id()
-    
+
     def subdevices(self):
         return self.operating_system_devices()
     def subdevices_count(self):
@@ -225,40 +226,40 @@ class PciDevice(TopologyObject):
         return self.attribute('pci_device_revision_value')
     def pci_device_revision_str(self):
         return self.attribute('pci_device_revision_string')
-    
+
     def pci_system_vendor_id(self):
         return self.attribute('pci_system_vendor_id')
     def pci_system_vendor_sid(self):
         return self.attribute('pci_system_vendor_sid')
     def pci_system_vendor(self):
         return self.attribute('pci_system_vendor')
-    
+
     def pci_system_device_id(self):
         return self.attribute('pci_system_device_id')
     def pci_system_device_sid(self):
         return self.attribute('pci_system_device_sid')
     def pci_system_device(self):
         return self.attribute('pci_system_device')
-    
+
     def pci_subsystem_vendor_id(self):
         return self.attribute('pci_subsystem_vendor_id')
     def pci_subsystem_vendor_sid(self):
         return self.attribute('pci_subsystem_vendor_sid')
     def pci_subsystem_vendor(self):
         return self.attribute('pci_subsystem_vendor')
-    
+
     def pci_subsystem_device_id(self):
         return self.attribute('pci_subsystem_device_id')
     def pci_subsystem_device_sid(self):
         return self.attribute('pci_subsystem_device_sid')
     def pci_subsystem_device(self):
         return self.attribute('pci_subsystem_device')
-    
+
     def operating_system_devices_count(self):
         return len(self._os_devices)
     def operating_system_devices(self):
         return self._os_devices
-    
+
     def to_string(self, expand_pci_tree=True, **kargs):
         if expand_pci_tree:
             header = '{} {}'.format(self.pci_busid(), self.pci_device_class())
@@ -276,7 +277,7 @@ class PciDevice(TopologyObject):
             content += '\n'
             return header + prepend(content, 5*' ')
         else:
-            return '{} {} ({})'.format(self.pci_busid(), 
+            return '{} {} ({})'.format(self.pci_busid(),
                                   self.device(),
                                   self.pci_device_class().name)
 
@@ -299,7 +300,7 @@ class PciBridge(TopologyObject):
         super(PciBridge, self).__init__(parent, bridge)
 
     def pci_devices(self, split=False):
-        devs = sorted(self._pci_devices, key=lambda x: x.os_index())
+        devs = tuple(sorted(self._pci_devices, key=lambda x: x.os_index()))
         if split:
             devices = [dev for dev in devs if isinstance(dev,PciBridge)]
             devices +=[dev for dev in devs if isinstance(dev,PciDevice)]
@@ -325,13 +326,13 @@ class PciBridge(TopologyObject):
         header = 'Bridge {}'.format(self.bridge_pci())
         content=''
         devices = self.pci_devices(split=True)
-        
+
         is_root = (self.bridge_depth()==0)
         if is_root:
             prefix = 'x-'
         else:
             prefix = ''
-        
+
         if (not is_root) and (is_last):
             extra_pad = 3
         else:
@@ -341,12 +342,12 @@ class PciBridge(TopologyObject):
             extra_bar = '|\n'
         else:
             extra_bar = ''
-        
+
         for dev_id, pci_device in enumerate(devices[:-1]):
             pci_device = pci_device.to_string(expand_pci_tree)
             pci_device = pci_device.split('\n')
             pci_device[0] = '|--'+pci_device[0]
-            for i in xrange(1,len(pci_device)):
+            for i in range(1, len(pci_device)):
                 pci_device[i] = '|  '+pci_device[i]
             if (dev_id==0) and expand_pci_tree:
                 pci_device = ['|']+pci_device
@@ -355,12 +356,12 @@ class PciBridge(TopologyObject):
             content += '\n'+branch
         pci_device = devices[-1].to_string(expand_pci_tree, is_last=True)
         branch = self.indent('{}|__{}'.format(extra_bar, pci_device), extra_pad+len(prefix))
-        content += '\n'+branch           
+        content += '\n'+branch
         return prefix + header + content
-    
+
     def _parsed_type(self):
         return 'Bridge'
-    
+
     def _parse_object(self, it):
         _type = it.attrib['type']
         if _type == 'PCIDev':
diff --git a/hysop/backend/hardware/pci_ids.py b/hysop/backend/hardware/pci_ids.py
index 1c19e91af252430d3d8e5107044b4b8dcd294dc8..a3ebdbd2b1e0551c1a73658f698068757e9d17e8 100644
--- a/hysop/backend/hardware/pci_ids.py
+++ b/hysop/backend/hardware/pci_ids.py
@@ -14,13 +14,13 @@ def _fmt_name(name):
         return name.strip().lower()
 
 class PciVendor(object):
-    _regexp = re.compile('([0-9a-f]{4})\s+(.*)')
+    _regexp = re.compile(r'([0-9a-f]{4})\s+(.*)')
 
     def __init__(self, vendor):
         """
         Class initializes with the raw line from pci.ids
         """
-        
+
         vendor = vendor.strip()
         match = re.match(self._regexp, vendor)
         if not match:
@@ -44,7 +44,7 @@ class PciVendor(object):
 
         subdevice = device.subdevices[subdevice_id]
         return subdevice
-    
+
     def find_device_by_name(self, device_name, subdevice_name=None):
         device_name    = _fmt_name(device_name)
         subdevice_name = _fmt_name(subdevice_name)
@@ -63,7 +63,6 @@ class PciVendor(object):
         idx = npw.argmin(npw.fromiter(
             (editdistance.eval(vn, device_name) for vn in self.device_names),
             dtype=npw.int16))
-        print self.device_names
         if isinstance(idx, npw.int64):
             device_name = self.device_names[idx]
         else:
@@ -81,17 +80,17 @@ class PciVendor(object):
         device_sid  = device.sid
         device_name = _fmt_name(device.name)
         if (device_id not in self.devices):
-            self.devices[device_id]   = device 
-            self.devices[device_sid]  = device 
-            self.devices[device_name] = device 
+            self.devices[device_id]   = device
+            self.devices[device_sid]  = device
+            self.devices[device_name] = device
             self.device_names.append(device_name)
         return device_id
-    
+
     def __str__(self):
         return self.name
 
 class PciDevice(object):
-    _regexp = re.compile('([0-9a-f]{4})\s+(.*)')
+    _regexp = re.compile(r'([0-9a-f]{4})\s+(.*)')
 
     def __init__(self, device, vendor):
         device = device.strip()
@@ -100,13 +99,13 @@ class PciDevice(object):
             msg='pci device could not match regexp: {}.'
             msg=msg.format(device)
             raise valueerror(msg)
-            
+
         self.vendor = vendor
         self.sid    = match.group(1)
         self.id     = int(self.sid, 16)
         self.name   = match.group(2)
         self.subdevices = {}
-    
+
     def __str__(self):
         return self.name
 
@@ -120,15 +119,15 @@ class PciDevice(object):
         subdevice_id  = subdevice.subdevice_id
         subdevice_sid = subdevice.subdevice_sid
         if (subdevice_id not in self.subdevices):
-            self.subdevices[subdevice_id]  = subdevice 
-            self.subdevices[subdevice_sid] = subdevice 
+            self.subdevices[subdevice_id]  = subdevice
+            self.subdevices[subdevice_sid] = subdevice
         return subdevice_id
-    
+
     def __str__(self):
         return self.name
 
 class PciSubDevice(object):
-    _regexp = re.compile('([0-9a-f]{4})\s+([0-9a-f]{4})\s+(.*)')
+    _regexp = re.compile(r'([0-9a-f]{4})\s+([0-9a-f]{4})\s+(.*)')
     def __init__(self, subdevice, device, vendor):
         subdevice = subdevice.strip()
         match = re.match(self._regexp, subdevice)
@@ -136,7 +135,7 @@ class PciSubDevice(object):
             msg='PCI device could not match regexp: {}.'
             msg=msg.format(subdevice)
             raise ValueError(msg)
-            
+
         self.vendor = vendor
         self.device = device
         self.subvendor_sid = match.group(1)
@@ -144,12 +143,12 @@ class PciSubDevice(object):
         self.subdevice_sid = match.group(2)
         self.subdevice_id  = int(self.subdevice_sid, 16)
         self.name = match.group(3)
-    
+
     def __str__(self):
         return self.name
 
 class PciDeviceClass(object):
-    _regexp = re.compile('C\s+([0-9a-f]{2})\s+(.*)')
+    _regexp = re.compile(r'C\s+([0-9a-f]{2})\s+(.*)')
 
     def __init__(self, device_class):
         device_class = device_class.strip()
@@ -158,7 +157,7 @@ class PciDeviceClass(object):
             msg='Pci device class could not match regexp: {}.'
             msg=msg.format(device_class)
             raise ValueError(msg)
-            
+
         self.sid  = match.group(1)
         self.id   = int(self.sid, 16)
         self.name = match.group(2)
@@ -174,15 +173,15 @@ class PciDeviceClass(object):
         subclass_id  = subclass.id
         subclass_sid = subclass.sid
         if (subclass_id not in self.device_subclasses):
-            self.device_subclasses[subclass_id]  = subclass 
-            self.device_subclasses[subclass_sid] = subclass 
+            self.device_subclasses[subclass_id]  = subclass
+            self.device_subclasses[subclass_sid] = subclass
         return subclass_id
-    
+
     def __str__(self):
         return self.name
 
 class PciDeviceSubClass(object):
-    _regexp = re.compile('([0-9a-f]{2})\s+(.*)')
+    _regexp = re.compile(r'([0-9a-f]{2})\s+(.*)')
 
     def __init__(self, device_subclass, device_class):
         device_subclass = device_subclass.strip()
@@ -191,7 +190,7 @@ class PciDeviceSubClass(object):
             msg='Pci device subclass could not match regexp: {}.'
             msg=msg.format(device_subclass)
             raise ValueError(msg)
-        
+
         self.device_class = device_class
         self.sid  = match.group(1)
         self.id   = int(self.sid, 16)
@@ -203,15 +202,15 @@ class PciDeviceSubClass(object):
         interface_id  = interface.id
         interface_sid = interface.sid
         if (interface_id not in self.programming_interfaces):
-            self.programming_interfaces[interface_id]  = interface 
-            self.programming_interfaces[interface_sid] = interface 
+            self.programming_interfaces[interface_id]  = interface
+            self.programming_interfaces[interface_sid] = interface
         return interface_id
-    
+
     def __str__(self):
         return '{} ({})'.format(self.name, self.device_class.name)
 
 class PciProgrammingInterface(object):
-    _regexp = re.compile('([0-9a-f]{2})\s+(.*)')
+    _regexp = re.compile(r'([0-9a-f]{2})\s+(.*)')
 
     def __init__(self, interface, device_subclass, device_class):
         interface = interface.strip()
@@ -220,9 +219,9 @@ class PciProgrammingInterface(object):
             msg='Pci device interface could not match regexp: {}.'
             msg=msg.format(interface)
             raise ValueError(msg)
-        
+
         self.device_class    = device_class
-        self.device_subclass = device_subclass 
+        self.device_subclass = device_subclass
         self.sid  = match.group(1)
         self.id   = int(self.sid, 16)
         self.name = match.group(2)
@@ -272,7 +271,7 @@ class PCIIds(object):
         self._parsed = False
 
         path = first_not_None(path, '/usr/share/hwdata/pci.ids')
-            
+
         if (url is not None):
             self.load_from_url(url=url)
         elif (path is not None):
@@ -333,7 +332,7 @@ class PCIIds(object):
             return None
         else:
             return self.vendors[vendor_name]
-    
+
     def find_vendor_by_distance_to_name(self, vendor_name):
         assert self._parsed
         vendor_name = _fmt_name(vendor_name)
@@ -344,13 +343,13 @@ class PCIIds(object):
         if not isinstance(vendor_name, str):
             vendor_name = vendor_name[0]
         return self.vendors[vendor_name]
-    
+
     def find_device(self, vendor_id, device_id, subdevice_id=None):
         vendor = self.find_vendor(vendor_id)
         if (vendor is None):
             return None
         return vendor.find_device(device_id, subdevice_id)
-    
+
     def find_device_by_name(self, vendor_name, device_name, subdevice_name=None):
         vendor = self.find_vendor_by_name(vendor_name)
         if (vendor is None):
@@ -376,8 +375,8 @@ class PCIIds(object):
             subdevice_id = None
 
         return self.find_device(vendor_id, device_id, subdevice_id)
-    
-    def find_device_class(self, device_class_id, device_subclass_id=None, 
+
+    def find_device_class(self, device_class_id, device_subclass_id=None,
             programming_interface=None):
         assert self._parsed
         if (device_class_id is None) or (device_class_id not in self.device_classes):
@@ -395,7 +394,7 @@ class PCIIds(object):
 
         programming_interface = device_subclass.programming_interfaces[programming_interface]
         return programming_interface
-    
+
     def find_device_class_by_id(self, id):
         if isinstance(id,str):
             id = int(id,16)
@@ -414,7 +413,7 @@ class PCIIds(object):
             device_subclass_id       = None
             programming_interface_id = None
 
-        return self.find_device_class(device_class_id, device_subclass_id, 
+        return self.find_device_class(device_class_id, device_subclass_id,
                 programming_interface_id)
 
     def _parse(self, content):
@@ -460,7 +459,7 @@ class PCIIds(object):
             self.vendors[vendor_name] = vendor
             self.vendor_names.append(vendor_name)
         return vendor_id
-    
+
     def _parse_device_class(self, line):
         device_class = PciDeviceClass(line)
         device_class_id   = device_class.id
diff --git a/hysop/backend/host/fortran/fortran_operator.py b/hysop/backend/host/fortran/fortran_operator.py
index a7a1d68c56d39f49c8771368c62fca1373ddf642..fbf4d5af2aa60ad849dbd159910520c0540c66c3 100644
--- a/hysop/backend/host/fortran/fortran_operator.py
+++ b/hysop/backend/host/fortran/fortran_operator.py
@@ -2,7 +2,7 @@
 discrete operators working with fortran.
 
 * :class:`~hysop.backend.host.fortran.FortranOperator` is an abstract class
-    used to provide a common interface to all discrete operators working with 
+    used to provide a common interface to all discrete operators working with
     fortran.
 """
 from abc import ABCMeta
@@ -12,12 +12,11 @@ from hysop.constants import MemoryOrdering
 from hysop.backend.host.host_operator import HostOperator
 from hysop.fields.discrete_field import DiscreteScalarFieldView
 
-class FortranOperator(HostOperator):
+class FortranOperator(HostOperator, metaclass=ABCMeta):
     """
     Abstract class for discrete operators working with fortran.
     """
-    __metaclass__ = ABCMeta
-    
+
     @debug
     def get_field_requirements(self):
         requirements = super(FortranOperator, self).get_field_requirements()
@@ -33,7 +32,7 @@ class FortranOperator(HostOperator):
         if self.discretized:
             return
         super(FortranOperator, self).discretize()
-    
+
     @debug
     def setup(self, work):
         super(FortranOperator, self).setup(work)
diff --git a/hysop/backend/host/fortran/operator/diffusion.py b/hysop/backend/host/fortran/operator/diffusion.py
index 631584a942eea3b08ffbb66eec0a0875ecf86cc0..036a2b77d86b410dda389d4cb86316e38429005d 100644
--- a/hysop/backend/host/fortran/operator/diffusion.py
+++ b/hysop/backend/host/fortran/operator/diffusion.py
@@ -10,8 +10,13 @@ from hysop.core.graph.graph import op_apply
 class DiffusionFFTW(FortranFFTWOperator):
 
     @debug
-    def __init__(self, Fin, Fout,
-                       nu, variables, dt, **kargs):
+    def __new__(cls, Fin, Fout, nu, variables, dt, **kargs):
+        return super(DiffusionFFTW, cls).__new__(cls,
+                input_fields=None, output_fields=None,
+                input_params=None, **kargs)
+
+    @debug
+    def __init__(self, Fin, Fout, nu, variables, dt, **kargs):
         """Diffusion operator base.
 
         Parameters
@@ -124,7 +129,7 @@ class DiffusionFFTW(FortranFFTWOperator):
             self.dFout.copy(self.dFin, compute_slice=True)
 
         nfields_per_call = self._nfields_per_call
-        for i in xrange(self.dFout.nb_components // nfields_per_call):
+        for i in range(self.dFout.nb_components // nfields_per_call):
             bufs = buffers[nfields_per_call*i:nfields_per_call*(i+1)]
             self._solve(nudt, ghosts, *bufs)
 
diff --git a/hysop/backend/host/fortran/operator/fortran_fftw.py b/hysop/backend/host/fortran/operator/fortran_fftw.py
index 87c552aabc51fff77872767178d61e902d5b5e97..03067d585a452f59bd99a9c889f71d34f99e28aa 100644
--- a/hysop/backend/host/fortran/operator/fortran_fftw.py
+++ b/hysop/backend/host/fortran/operator/fortran_fftw.py
@@ -25,6 +25,11 @@ class FortranFFTWOperator(FortranOperator):
     fftw2py interface.
     """
 
+    @debug
+    def __new__(cls, input_fields, output_fields, **kwds):
+        return super(FortranFFTWOperator, cls).__new__(cls,
+            input_fields=input_fields, output_fields=output_fields, **kwds)
+
     @debug
     def __init__(self, input_fields, output_fields, **kwds):
         """
@@ -44,8 +49,8 @@ class FortranFFTWOperator(FortranOperator):
         check_instance(output_fields, dict, keys=Field,
                        values=CartesianTopologyDescriptors)
 
-        nb_components =input_fields.keys()[0].nb_components
-        tensor_fields = set(input_fields.keys()+output_fields.keys())
+        nb_components = next(iter(input_fields)).nb_components
+        tensor_fields = set(input_fields.keys()).union(output_fields.keys())
         for tf in tensor_fields:
             for fi in tf.fields:
                 if (fi.dtype != HYSOP_REAL):
@@ -62,11 +67,11 @@ class FortranFFTWOperator(FortranOperator):
                     msg+='\n  lboundaries: {}'.format(fi.lboundaries)
                     msg+='\n  rboundaries: {}'.format(fi.rboundaries)
                     raise RuntimeError(msg)
-    
-        # Special case: 3D diffusion of a scalar 
+
+        # Special case: 3D diffusion of a scalar
         self._scalar_3d = (nb_components == 1) and all(tf.nb_components==1 for tf in tensor_fields)
 
-        domain = self.input_fields.keys()[0].domain
+        domain = next(iter(self.input_fields)).domain
         self.dim      = domain.dim
         self.domain   = domain
 
@@ -92,10 +97,10 @@ class FortranFFTWOperator(FortranOperator):
     def handle_topologies(self, input_topology_states, output_topology_states):
         super(FortranFFTWOperator,self).handle_topologies(input_topology_states, output_topology_states)
 
-        topology = self.input_fields.values()[0].topology
-        for (field,topoview) in self.input_fields.iteritems():
+        topology = next(iter(self.input_fields.values())).topology
+        for (field,topoview) in self.input_fields.items():
             assert all(topoview.topology.cart_shape == topology.cart_shape), 'topology mismatch'
-        for (field,topoview) in self.output_fields.iteritems():
+        for (field,topoview) in self.output_fields.items():
             assert all(topoview.topology.cart_shape == topology.cart_shape), 'topology mismatch'
         self.topology = topology
 
@@ -104,7 +109,7 @@ class FortranFFTWOperator(FortranOperator):
         if self.discretized:
             return
         super(FortranFFTWOperator,self).discretize()
-        topo_view = self.input_discrete_fields.values()[0].topology
+        topo_view = next(iter(self.input_discrete_fields.values())).topology
         self._fftw_discretize(topo_view)
 
     @debug
diff --git a/hysop/backend/host/fortran/operator/poisson.py b/hysop/backend/host/fortran/operator/poisson.py
index 6f5638ff9506973ba2f8af09ad16fa54d695e4d5..c44b8608be01c43486960b7f433e2d889f3764fe 100644
--- a/hysop/backend/host/fortran/operator/poisson.py
+++ b/hysop/backend/host/fortran/operator/poisson.py
@@ -9,6 +9,11 @@ from hysop.constants import HYSOP_REAL
 
 class PoissonFFTW(FortranFFTWOperator):
 
+    def __new__(cls, Fin, Fout, variables,
+                 extra_input_kwds=None, **kwds):
+        return super(PoissonFFTW, cls).__new__(cls,
+            input_fields=None, output_fields=None, **kwds)
+
     def __init__(self, Fin, Fout, variables,
                  extra_input_kwds=None, **kwds):
         """Operator to solve Poisson equation using FFTW in Fortran.
diff --git a/hysop/backend/host/fortran/operator/scales_advection.py b/hysop/backend/host/fortran/operator/scales_advection.py
index fe175eb419301fe765740a00d71a10dcf33b2699..1b930d04e04afce81b2ed7029f2930498e0aa727 100644
--- a/hysop/backend/host/fortran/operator/scales_advection.py
+++ b/hysop/backend/host/fortran/operator/scales_advection.py
@@ -78,6 +78,16 @@ class ScalesAdvection(FortranOperator):
         am.update(cls.__available_methods)
         return am
 
+    @debug
+    def __new__(cls, velocity,
+                 advected_fields_in, advected_fields_out,
+                 variables, dt, **kwds):
+        return super(ScalesAdvection, cls).__new__(cls,
+            input_fields=None, output_fields=None,
+            input_params=None, output_params=None,
+            **kwds)
+
+
     @debug
     def __init__(self, velocity,
                  advected_fields_in, advected_fields_out,
@@ -171,8 +181,8 @@ class ScalesAdvection(FortranOperator):
         try:
             self._dim_split = self.__dim_splitting_to_scales[strang_order]
         except KeyError as e:
-            print "Unknown dimenstional splitting method for Scales ({} given).".format(
-                strang_order)
+            print("Unknown dimenstional splitting method for Scales ({} given).".format(
+                strang_order))
             raise e
 
         # Translate hysop remesh kernels into Scales configuration
@@ -180,8 +190,8 @@ class ScalesAdvection(FortranOperator):
         try:
             self._scales_kernel = self.__rmsh_to_scales__[self.remesh_kernel]
         except KeyError as e:
-            print "Unknown remesh method for Scales ({} given).".format(
-                self.remesh_kernel)
+            print("Unknown remesh method for Scales ({} given).".format(
+                self.remesh_kernel))
             raise e
 
         # Translate hysop multi scale interpolation to Scales interpolation
@@ -189,8 +199,8 @@ class ScalesAdvection(FortranOperator):
         try:
             self._scales_interp = self.__interpol_to_scales[ms_interp]
         except KeyError as e:
-            print "Unknown multi scale interpolation method for Scales ({} given)".format(
-                ms_interp)
+            print("Unknown multi scale interpolation method for Scales ({} given)".format(
+                ms_interp))
             raise e
 
         self.time_integrator = method.pop(TimeIntegrator)
@@ -253,7 +263,7 @@ class ScalesAdvection(FortranOperator):
 
         dSin, dSout, all_buffers = (), (), ()
         # 3-components fields
-        for i in xrange(nscalars//3):
+        for i in range(nscalars//3):
             dfields_in  = dadvected_fields_in[3*i:3*(i+1)]
             dfields_out = dadvected_fields_out[3*i:3*(i+1)]
             sin = CartesianDiscreteTensorField.from_dfields(name='Sin{}'.format(i),
diff --git a/hysop/backend/host/host_allocator.py b/hysop/backend/host/host_allocator.py
index b84f9e092614c326a1191ab16966916d54516b11..f2cd99d2eb1dda4039e9c1b38e74004c37fc2112 100644
--- a/hysop/backend/host/host_allocator.py
+++ b/hysop/backend/host/host_allocator.py
@@ -1,6 +1,6 @@
-
+import cpuinfo
 from psutil import virtual_memory
-from hysop.deps import cpuinfo
+
 from hysop.constants                import default_order
 from hysop.core.memory.allocator import AllocatorBase
 from hysop.backend.host.host_buffer import HostBuffer
@@ -10,27 +10,30 @@ class HostAllocator(AllocatorBase):
     Allocator that allocates HostBuffers
     """
 
+    def __new__(cls, verbose=None):
+        return super(HostAllocator, cls).__new__(cls, verbose=verbose)
+
     def __init__(self, verbose=None):
         super(HostAllocator, self).__init__(verbose=verbose)
         self.mem_size = virtual_memory().total
-    
+
     def max_alloc_size(self):
         """Max allocatable size in bytes."""
         return self.mem_size
-    
+
     def allocate(self, nbytes, **kwds):
         super(HostAllocator, self).allocate(nbytes=nbytes, **kwds)
         return HostBuffer(size=nbytes)
-   
+
     def prefix(self):
         return '{}: '.format(self.full_tag)
-    
+
     def is_on_host(self):
         """
         Return true if buffers are allocated in host memory.
         """
         return True
-    
+
     def memory_pool(self, name, **kwds):
         """
         Construct a memory pool from this allocator.
@@ -39,15 +42,15 @@ class HostAllocator(AllocatorBase):
         if isinstance(self, MemoryPool):
             msg='allocator is already a memory pool.'
             raise RuntimeError(msg)
-        return HostMemoryPool(allocator=self, name=name, **kwds) 
+        return HostMemoryPool(allocator=self, name=name, **kwds)
 
-def __get_default_name():   
+def __get_default_name():
     try:
         cpu_name = cpuinfo.cpuinfo.get_cpu_info()['brand']
         pos = cpu_name.find('@')
         if pos>0:
             cpu_name = cpu_name[:pos]
-    except: 
+    except:
         cpu_name = 'CPU0'
     return cpu_name.strip()
 
diff --git a/hysop/backend/host/host_array.py b/hysop/backend/host/host_array.py
index dcaf51d8abc2c62d9b53d3a49945b8a70ce6d660..070256fe2aa7e5ea7dfc71c93608413c3d2ff6e0 100644
--- a/hysop/backend/host/host_array.py
+++ b/hysop/backend/host/host_array.py
@@ -1,4 +1,5 @@
-from hysop.deps import np
+import numpy as np
+
 from hysop.core.arrays import MemoryType, MemoryOrdering
 from hysop.core.arrays import default_order
 from hysop.core.arrays.array import Array
diff --git a/hysop/backend/host/host_array_backend.py b/hysop/backend/host/host_array_backend.py
index 50d1a160f4b56d70564ebc4bb1e0d884d1e5e8fb..c391ba3ad352f9cf07ae71c600f6d2924298a651 100644
--- a/hysop/backend/host/host_array_backend.py
+++ b/hysop/backend/host/host_array_backend.py
@@ -1,5 +1,6 @@
 import warnings
-from hysop.deps import np
+import numpy as np
+
 from hysop.constants import Backend
 from hysop.constants import HYSOP_REAL, HYSOP_INTEGER, HYSOP_BOOL
 from hysop.tools.decorators import wraps
@@ -37,8 +38,7 @@ def numpy_method(f):
         for k in kargs.keys():
             if k not in argnames:
                 msg = 'Unknown argument {} in function {}::{}(), possible ones are {}.'
-                msg = msg.format(k, getattr(args[0], '__name__', type(
-                    args[0]).__name__), f.__name__, argnames)
+                msg = msg.format(k, getattr(args[0], '__name__', type(args[0]).__name__), f.__name__, argnames)
                 raise ValueError(msg)
 
         # format input arguments for numpy
@@ -134,7 +134,7 @@ class HostArrayBackend(ArrayBackend):
         # return HostArray(backend=self, handle=handle)
         return HostArray(backend=self, handle=handle)
 
-    def copyto(self, dst, src, reshape=False, queue=None, async=False, **kwds):
+    def copyto(self, dst, src, reshape=False, queue=None, synchronize=True, **kwds):
         """
         src is a HostArray
         dst can be everything
@@ -161,13 +161,12 @@ class HostArrayBackend(ArrayBackend):
             queue = first_not_None(queue, dst.default_queue)
             from hysop.backend.device.opencl.opencl_copy_kernel_launchers \
                 import OpenClCopyBufferRectLauncher
-            kl = OpenClCopyBufferRectLauncher.from_slices('copyto',
-                                                          src=src, dst=dst)
+            kl = OpenClCopyBufferRectLauncher.from_slices('copyto', src=src, dst=dst)
             evt = kl(queue=queue)
-            if async:
-                return evt
-            else:
+            if synchronize:
                 evt.wait()
+            else:
+                return evt
         else:
             msg = 'Unknown type to copy to ({}) for array of type {}.'
             msg = msg.format(dst.__class__, src.__class__)
@@ -181,7 +180,6 @@ class HostArrayBackend(ArrayBackend):
 
 ## ALLOCATED WITH BACKEND ALLOCATOR ##
 
-
     def array(self, shape, dtype=HYSOP_REAL, order=default_order,
               min_alignment=None, buf=None, offset=0):
         """
@@ -192,7 +190,7 @@ class HostArrayBackend(ArrayBackend):
         order = self._arg(order)
         shape = to_tuple(shape)
 
-        if dtype == np.bool:
+        if dtype == np.bool_:
             dtype = HYSOP_BOOL
             import warning
             msg = 'HostArrayBackend: numpy bool array converted to hysop_bool={}.'.format(dtype)
@@ -261,24 +259,18 @@ class HostArrayBackend(ArrayBackend):
         self._unsupported_argument('empty_like', 'subok', subok, True)
         if (order is None) or (order == MemoryOrdering.SAME_ORDER):
             try:
-                if a.flags['C_CONTIGUOUS']:
+                if a.flags.c_contiguous:
                     order = MemoryOrdering.C_CONTIGUOUS
-                elif a.flags['F_CONTIGUOUS']:
+                elif a.flags.f_contiguous:
                     order = MemoryOrdering.F_CONTIGUOUS
                 else:
                     order = default_order
             except AttributeError:
                 order = default_order
-        try:
-            return self.empty(
-                shape=first_not_None(shape, a.shape),
-                dtype=first_not_None(dtype, a.dtype),
-                order=order)
-        except AttributeError:
-            return self.empty(
-                shape=first_not_None(shape, a.resolution),
-                dtype=first_not_None(dtype, a.dtype),
-                order=order)
+        return self.empty(
+            shape=first_not_None(shape, a.shape),
+            dtype=first_not_None(dtype, a.dtype),
+            order=order)
 
     def full_like(self, a, fill_value, dtype=None, order=None, subok=True, shape=None):
         """
@@ -1808,7 +1800,7 @@ class HostArrayBackend(ArrayBackend):
         """
         pass
 
-#Exponents and logarithms
+# Exponents and logarithms
     @numpy_method
     def exp(self, x, out=None):
         """
@@ -2155,7 +2147,7 @@ class HostArrayBackend(ArrayBackend):
 
 # Simple random data
 
-    def rand(self, shape=None, out=None):
+   def rand(self, shape=None, out=None):
         """
         Random values in a given shape.
         """
@@ -2757,7 +2749,7 @@ class HostArrayBackend(ArrayBackend):
         """
         pass
 
-#Averages and variances
+# Averages and variances
     @numpy_method
     def median(self, a, axis=None, out=None, overwrite_input=False):
         """
diff --git a/hysop/backend/host/host_buffer.py b/hysop/backend/host/host_buffer.py
index a3e6c08be3be9b194de407bfbc84b54c1ce2c6dd..7e44b22bae8632508da8946b433f8949f64c3aff 100644
--- a/hysop/backend/host/host_buffer.py
+++ b/hysop/backend/host/host_buffer.py
@@ -1,35 +1,37 @@
+import numpy as np
+import ctypes as C
 
-from hysop.deps import np
 from hysop.constants import MemoryOrdering, default_order
 from hysop.tools.types import check_instance
 from hysop.core.memory.buffer import Buffer, PooledBuffer
 
+
 class HostBuffer(np.ndarray, Buffer):
     """
     Host buffer class.
     """
     __array_priority__ = -1.0
-    
+
     def __new__(cls, size,
-            shape=None, dtype=np.uint8, order=None,
-            buffer=None, offset=0, strides=None):
+                shape=None, dtype=np.uint8, order=None,
+                buffer=None, offset=0, strides=None):
 
         from_buffer = False
         if isinstance(buffer, Buffer):
             __buffer = buffer
-            buffer   = buffer.buf
+            buffer = buffer.buf
             from_buffer = True
 
-        obj = super(HostBuffer,cls).__new__(cls, 
-                shape=shape or (size,), dtype=dtype, order=order,
-                buffer=buffer, offset=offset, strides=strides)
+        obj = super(HostBuffer, cls).__new__(cls,
+                                             shape=shape or (size,), dtype=dtype, order=order,
+                                             buffer=buffer, offset=offset, strides=strides)
 
-        #keep a reference to the buffer (usefull for pooled buffers)
-        #such that buffer.__del__ will only be called when all views
-        #on this HostBuffer have been destroyed.
+        # keep a reference to the buffer (usefull for pooled buffers)
+        # such that buffer.__del__ will only be called when all views
+        # on this HostBuffer have been destroyed.
         if from_buffer and isinstance(__buffer, HostPooledBuffer):
             obj._hysop_base_data = __buffer
-        
+
         return obj
 
     def __array_finalize__(self, obj):
@@ -38,20 +40,22 @@ class HostBuffer(np.ndarray, Buffer):
 
     def __str__(self):
         return self.view(np.ndarray).__str__()
+
     def __repr__(self):
         return self.view(np.ndarray).__repr__()
 
     def get_int_ptr(self):
         return self.ctypes.data
+
     def release(self):
         pass
-    
+
     @classmethod
     def from_int_ptr(cls, int_ptr_value, size):
         """
         Given int ptr should never be freed, numpy take ownership.
         """
-        buf = np.core.multiarray.int_asbuffer(int_ptr_value, size)
+        buf = np.ctypeslib.as_array(C.cast(int_ptr_value, C.POINTER(C.c_uint8)), (size,))
         return cls.from_buffer(buf)
 
     @classmethod
@@ -62,7 +66,7 @@ class HostBuffer(np.ndarray, Buffer):
     def aligned_view(self, alignment, size=None):
         assert self.ndim == 1
         assert self.dtype == np.uint8
-        assert alignment>0
+        assert alignment > 0
         assert not (alignment & (alignment-1)), 'alignment is not a power of 2.'
         ptr = self.get_int_ptr()
         offset = -ptr % alignment
@@ -70,7 +74,7 @@ class HostBuffer(np.ndarray, Buffer):
             size = self.size-offset
         else:
             assert self.size >= (offset+size)
-        buf = self.__getitem__(slice(offset,offset+size))
+        buf = self.__getitem__(slice(offset, offset+size))
         return buf
 
     @classmethod
@@ -79,9 +83,10 @@ class HostBuffer(np.ndarray, Buffer):
         assert handle.ndim == 1
         assert handle.dtype == np.uint8
         return handle.view(cls)
-    
+
     int_ptr = property(get_int_ptr)
-    
+
+
 class HostPooledBuffer(PooledBuffer):
     def get_array(self):
         return self._bufview
diff --git a/hysop/backend/host/host_directional_operator.py b/hysop/backend/host/host_directional_operator.py
index dc4e9816f00d3f3bd7b3fcd4d711b49e90863ed2..f43dccf690e0dff5a384e3933c65e4169fb02497 100644
--- a/hysop/backend/host/host_directional_operator.py
+++ b/hysop/backend/host/host_directional_operator.py
@@ -4,15 +4,13 @@ from hysop.tools.decorators  import debug
 from hysop.operator.directional.directional import DirectionalOperatorBase
 from hysop.backend.host.host_operator import HostOperator
 
-class HostDirectionalOperator(DirectionalOperatorBase, HostOperator):
+class HostDirectionalOperator(DirectionalOperatorBase, HostOperator, metaclass=ABCMeta):
     """
     Abstract class for discrete directional operators working on host backends.
-    
-    Field requirements are set such that the current direction will 
+
+    Field requirements are set such that the current direction will
     be contiguous in memory.
     """
-    
-    __metaclass__ = ABCMeta
 
     @debug
     def __init__(self, **kwds):
diff --git a/hysop/backend/host/host_mempool.py b/hysop/backend/host/host_mempool.py
index 44c05fff3e1482b4e6627d83ee1ff786b4d9cf7b..8d3b991147f2f07d44a0a42641604b8233b39920 100644
--- a/hysop/backend/host/host_mempool.py
+++ b/hysop/backend/host/host_mempool.py
@@ -5,11 +5,14 @@ from hysop.backend.host.host_buffer import HostPooledBuffer
 from hysop.core.memory.mempool import MemoryPool
 
 class HostMemoryPool(MemoryPool, HostAllocator):
-    
+
+    def __new__(cls, allocator, **kwds):
+        return super(HostMemoryPool, cls).__new__(cls, allocator=allocator, **kwds)
+
     def __init__(self, allocator, **kwds):
         check_instance(allocator, HostAllocator)
         super(HostMemoryPool,self).__init__(allocator=allocator, **kwds)
 
     def _wrap_buffer(self, buf, alloc_sz, size, alignment):
-        return HostPooledBuffer(pool=self, buf=buf, alloc_sz=alloc_sz, 
+        return HostPooledBuffer(pool=self, buf=buf, alloc_sz=alloc_sz,
                 size=size, alignment=alignment)
diff --git a/hysop/backend/host/host_operator.py b/hysop/backend/host/host_operator.py
index 926e604b9c55afda10acfa65f61ae6888cd309d0..cd20d22233f24b9523d5168e058fc57f4d715acc 100644
--- a/hysop/backend/host/host_operator.py
+++ b/hysop/backend/host/host_operator.py
@@ -2,7 +2,7 @@
 discrete operators working on the Host backend.
 
 * :class:`~hysop.backend.host.host_operator.HostOperator` is an abstract class
-    used to provide a common interface to all discrete operators working with the 
+    used to provide a common interface to all discrete operators working with the
     opencl backend.
 """
 from abc import ABCMeta
@@ -14,22 +14,32 @@ from hysop.core.graph.computational_operator import ComputationalGraphOperator
 from hysop.topology.topology_descriptor import TopologyDescriptor
 
 
-class HostOperator(ComputationalGraphOperator):
+class HostOperatorBase(ComputationalGraphOperator, metaclass=ABCMeta):
     """
-    Abstract class for discrete operators working on OpenCL backends.
+    Abstract class for discrete operators working on cpu.
+    HostOperatorBase ignore the extra cl_env keyword parameter.
     """
-    __metaclass__ = ABCMeta
-    
+
+    @debug
+    def __new__(cls, cl_env=None, **kwds):
+        return super(HostOperatorBase, cls).__new__(cls, **kwds)
+
     @debug
-    def __init__(self, **kwds):
+    def __init__(self, cl_env=None, **kwds):
         """
         Create the common attributes of all host operators.
-        
+
         All input and output variable topologies should be of kind
         Backend.HOST and share the same HostEnvironment.
         """
-        super(HostOperator, self).__init__(**kwds)
-    
+        super(HostOperatorBase, self).__init__(**kwds)
+
+
+class HostOperator(HostOperatorBase, metaclass=ABCMeta):
+    """
+    Abstract class for discrete operators working on cpu.
+    HostOperator extra cl_env keyword parameter and enforces HOST backend.
+    """
     @classmethod
     def supported_backends(cls):
         """
@@ -38,34 +48,40 @@ class HostOperator(ComputationalGraphOperator):
         return set([Backend.HOST])
 
 
+class OpenClMappedMemoryObjectGetter(object):
+    def __new__(cls, obj, evt, **kwds):
+        return super(OpenClMappedMemoryObjectGetter, cls).__new__(cls, **kwds)
+
+    def __init__(self, obj, evt, **kwds):
+        super(OpenClMappedMemoryObjectGetter, self).__init__(**kwds)
+        check_instance(obj, OpenClMappable)
+        self.__obj = obj
+        self.__evt = evt
+
+    def __getitem__(self, key):
+        return obj.get_mapped_object(key=key)
+
+    @property
+    def evt(self):
+        return self.__evt
+
 
 class OpenClMappable(object):
     """
     Extend host operator capabilities to work on mapped opencl buffers
     """
 
-    class OpenClMappedMemoryObjectGetter(object):
-        def __init__(self, obj, evt):
-            check_instance(obj, OpenClMappable)
-            self.__obj = obj
-            self.__evt = evt
-        def __getitem__(self, key):
-            return obj.get_mapped_object(key=key)
-        @property
-        def evt(self):
-            return self.__evt
-    
     @classmethod
     def supported_backends(cls):
         sb = super(OpenClMappable, cls).supported_backends()
         sb.add(Backend.OPENCL)
         return sb
-    
+
     @debug
-    def create_topology_descriptors(self): 
+    def create_topology_descriptors(self):
         if self.enable_opencl_host_buffer_mapping:
             # enforce opencl topology on host operator
-            for (field, topo_descriptor) in self.input_fields.iteritems():
+            for (field, topo_descriptor) in self.input_fields.items():
                 topo_descriptor = TopologyDescriptor.build_descriptor(
                         backend=Backend.OPENCL,
                         operator=self,
@@ -74,7 +90,7 @@ class OpenClMappable(object):
                         cl_env=self.cl_env)
                 self.input_fields[field] = topo_descriptor
 
-            for (field, topo_descriptor) in self.output_fields.iteritems():
+            for (field, topo_descriptor) in self.output_fields.items():
                 topo_descriptor = TopologyDescriptor.build_descriptor(
                         backend=Backend.OPENCL,
                         operator=self,
@@ -85,9 +101,13 @@ class OpenClMappable(object):
         else:
             super(OpenClMappable, self).create_topology_descriptors()
 
+    def __new__(cls, cl_env=None, mpi_params=None,
+            enable_opencl_host_buffer_mapping=False, **kwds):
+        return super(OpenClMappable, cls).__new__(cls, mpi_params=mpi_params, **kwds)
+
     def __init__(self, cl_env=None, mpi_params=None,
             enable_opencl_host_buffer_mapping=False, **kwds):
-        
+
         if enable_opencl_host_buffer_mapping:
             msg = 'OpenClMappable is an interface dedicated to extend HostOperator.'
             assert isinstance(self, HostOperator), msg
@@ -99,22 +119,22 @@ class OpenClMappable(object):
                 assert (mpi_params == cl_env.mpi_params)
 
         super(OpenClMappable, self).__init__(mpi_params=mpi_params, **kwds)
-        
+
         self.__cl_env = cl_env
         self.__enable_opencl_host_buffer_mapping = enable_opencl_host_buffer_mapping
-        
+
         self.__mapped = False
         self.__registered_objects = {}
         self.__registered_getters = {}
         self.__mapped_objects     = {}
 
     def __del__(self):
-        self.unmap_objects()
+        self.unmap_objects(force=True)
 
     @property
     def cl_env(self):
         return self.__cl_env
-    
+
     @property
     def enable_opencl_host_buffer_mapping(self):
         return self.__enable_opencl_host_buffer_mapping
@@ -148,7 +168,7 @@ class OpenClMappable(object):
                 if field in ofields:
                     flags |= cl.map_flags.WRITE
                 assert (field._data is not None)
-                self.register_mappable_object(key=field, obj=field._data.handle, 
+                self.register_mappable_object(key=field, obj=field._data.handle,
                         flags=flags)
             for vfield in vfields:
                 self.register_data_getter(get_key=vfield, obj_key=vfield.dfield,
@@ -161,7 +181,7 @@ class OpenClMappable(object):
         assert (key not in self.__registered_objects), msg
         check_instance(obj, clArray.Array)
         self.__registered_objects[key] = (obj, flags)
-    
+
     def register_data_getter(self, get_key, obj_key, getter):
         assert callable(getter)
         msg='Device memory getter "{}" has already been registered as an object.'
@@ -180,26 +200,26 @@ class OpenClMappable(object):
         msg='Device memory objects have already been mapped to host.'
         assert not self.__mapped, msg
         evt = None
-        for (obj_key, (dev_buf, flags)) in self.__registered_objects.iteritems():
+        for (obj_key, (dev_buf, flags)) in self.__registered_objects.items():
             if DEBUG:
                 msg='Mapping {}...'.format(obj_key.full_tag)
-                print msg
+                print(msg)
             if is_blocking:
                 host_buf = dev_buf.map_to_host(queue=queue, is_blocking=is_blocking, flags=flags)
             else:
                 host_buf, evt = dev_buf.map_to_host(queue=queue, is_blocking=is_blocking, flags=flags)
             self.__mapped_objects[obj_key] = host_buf
-        for (get_key, (obj_key, getter)) in self.__registered_getters.iteritems():
+        for (get_key, (obj_key, getter)) in self.__registered_getters.items():
             if DEBUG:
                 msg='Applying getter {} to mapped buffer {}...'.format(get_key.full_tag, obj_key.full_tag)
-                print msg
+                print(msg)
             self.__mapped_objects[get_key] = getter(self.__mapped_objects[obj_key])
         self.__mapped = True
         return evt
 
-    def unmap_objects(self):
+    def unmap_objects(self, force=False):
         msg='Device memory objects have already been unmapped from host.'
-        assert self.__mapped, msg
+        assert force or self.__mapped, msg
         self.__mapped_objects.clear()
         self.__mapped = False
 
@@ -216,7 +236,7 @@ class OpenClMappable(object):
         msg=msg.format(key)
         assert key in self.__registered_objects, msg
         return functools.partial(self.get_mapped_object, key=key)
-   
+
     @contextmanager
     def map_objects_to_host(self, queue=None, is_blocking=True):
         if self.__registered_objects:
@@ -224,7 +244,7 @@ class OpenClMappable(object):
             queue = first_not_None(queue, self.cl_env.default_queue)
             try:
                 evt = self.map_objects(queue, is_blocking)
-                yield self.OpenClMappedMemoryObjectGetter(self, evt)
+                yield OpenClMappedMemoryObjectGetter(self, evt)
             except:
                 raise
             finally:
diff --git a/hysop/backend/host/python/operator/analytic.py b/hysop/backend/host/python/operator/analytic.py
index 7d9dd1a93dbb7473cfdbc16c6ad8ae4047955b7f..10eb14efd0aa4561702d698a981676eb33df962e 100644
--- a/hysop/backend/host/python/operator/analytic.py
+++ b/hysop/backend/host/python/operator/analytic.py
@@ -14,12 +14,20 @@ class PythonAnalyticField(HostOperator):
     """
 
     @debug
-    def __init__(self, field, formula, variables, 
-            extra_input_kwds=None, **kwds): 
+    def __new__(cls, field, formula, variables,
+            extra_input_kwds=None, **kwds):
+        return super(PythonAnalyticField, cls).__new__(cls,
+                input_fields=None,
+                output_fields=None,
+                input_params=None, **kwds)
+
+    @debug
+    def __init__(self, field, formula, variables,
+            extra_input_kwds=None, **kwds):
         """
         Initialize a Analytic operator on the python backend.
 
-        Apply a user-defined formula onto a field, possibly 
+        Apply a user-defined formula onto a field, possibly
         dependent on space variables and external fields/parameters.
 
         Parameters
@@ -37,7 +45,7 @@ class PythonAnalyticField(HostOperator):
         extra_input_kwds: dict, optional
             Extra inputs that will be forwarded to the formula.
             Fields and Parameters are handled correctly as input requirements.
-            If the output field is modified inplace, it should be added 
+            If the output field is modified inplace, it should be added
             to extra_input_kwds.
         kwds: dict, optional
             Base class arguments.
@@ -55,7 +63,7 @@ class PythonAnalyticField(HostOperator):
 
         extra_kwds = {}
         map_fields = {}
-        for (k,v) in extra_input_kwds.iteritems():
+        for (k,v) in extra_input_kwds.items():
             if isinstance(v, Field):
                 input_fields[v] = self.get_topo_descriptor(variables, v)
                 map_fields[v] = k
@@ -65,7 +73,7 @@ class PythonAnalyticField(HostOperator):
             else:
                 extra_kwds[k] = v
 
-        super(PythonAnalyticField, self).__init__(input_fields=input_fields, 
+        super(PythonAnalyticField, self).__init__(input_fields=input_fields,
                 output_fields=output_fields,
                 input_params=input_params, **kwds)
 
@@ -73,7 +81,7 @@ class PythonAnalyticField(HostOperator):
         self.formula = formula
         self.extra_kwds = extra_kwds
         self.map_fields = map_fields
-    
+
     @debug
     def discretize(self):
         if self.discretized:
@@ -86,7 +94,7 @@ class PythonAnalyticField(HostOperator):
         assert 'coords' not in extra_kwds
         extra_kwds['data']   = dfield.compute_data[0]
         extra_kwds['coords'] = dfield.compute_mesh_coords
-        for (field, dfield) in self.input_discrete_fields.iteritems():
+        for (field, dfield) in self.input_discrete_fields.items():
             assert field.name not in extra_kwds, field.name
             extra_kwds[map_fields[field]] = dfield.compute_data
         self.dfield = dfield
@@ -97,7 +105,7 @@ class PythonAnalyticField(HostOperator):
         super(PythonAnalyticField, self).apply(**kwds)
         self.formula(**self.extra_kwds)
         self.dfield.exchange_ghosts()
-    
+
     @classmethod
     def supports_mpi(cls):
         return True
diff --git a/hysop/backend/host/python/operator/convergence.py b/hysop/backend/host/python/operator/convergence.py
index 309ea9595428dc701df55ea98277a1f946d7830e..47409133695d3291e29addba395df41113307020 100644
--- a/hysop/backend/host/python/operator/convergence.py
+++ b/hysop/backend/host/python/operator/convergence.py
@@ -16,6 +16,10 @@ import numpy as np
 class PythonConvergence(ConvergenceBase, HostOperator):
     """Computes convergence of a field through iterations"""
 
+    @debug
+    def __new__(cls, **kwds):
+        return super(PythonConvergence, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         super(PythonConvergence, self).__init__(**kwds)
diff --git a/hysop/backend/host/python/operator/curl.py b/hysop/backend/host/python/operator/curl.py
index 25aa3a3e70d87dde42a4240981296903452a4d32..305017c3df0518f931836e68e72655388d14d056 100644
--- a/hysop/backend/host/python/operator/curl.py
+++ b/hysop/backend/host/python/operator/curl.py
@@ -10,13 +10,13 @@ from hysop.core.graph.graph import op_apply
 from hysop.operator.base.curl import SpectralCurlOperatorBase
 
 @nb.guvectorize([
-    nb.void(nb.float32[:,::-1],    nb.float32[::-1],    nb.float32[:,::-1]), 
-    nb.void(nb.complex64[:,::-1],  nb.float32[::-1],    nb.complex64[:,::-1]), 
-    nb.void(nb.complex64[:,::-1],  nb.complex64[::-1],  nb.complex64[:,::-1]), 
-    nb.void(nb.float64[:,::-1],    nb.float64[::-1],    nb.float64[:,::-1]), 
-    nb.void(nb.complex128[:,::-1], nb.float64[::-1],    nb.complex128[:,::-1]), 
-    nb.void(nb.complex128[:,::-1], nb.complex128[::-1], nb.complex128[:,::-1]), 
-    ],  '(n,m),(m)->(n,m)', 
+    nb.void(nb.float32[:,::-1],    nb.float32[::-1],    nb.float32[:,::-1]),
+    nb.void(nb.complex64[:,::-1],  nb.float32[::-1],    nb.complex64[:,::-1]),
+    nb.void(nb.complex64[:,::-1],  nb.complex64[::-1],  nb.complex64[:,::-1]),
+    nb.void(nb.float64[:,::-1],    nb.float64[::-1],    nb.float64[:,::-1]),
+    nb.void(nb.complex128[:,::-1], nb.float64[::-1],    nb.complex128[:,::-1]),
+    nb.void(nb.complex128[:,::-1], nb.complex128[::-1], nb.complex128[:,::-1]),
+    ],  '(n,m),(m)->(n,m)',
     target=__DEFAULT_NUMBA_TARGET__, nopython=True, cache=True)
 def filter_curl_2d__0(Fin, K, Fout):
     for i in range(0, Fin.shape[0]):
@@ -24,13 +24,13 @@ def filter_curl_2d__0(Fin, K, Fout):
             Fout[i,j] = +K[j]*Fin[i,j]
 
 @nb.guvectorize([
-    nb.void(nb.float32[:,::-1],    nb.float32[::-1],    nb.float32[:,::-1]), 
-    nb.void(nb.complex64[:,::-1],  nb.float32[::-1],    nb.complex64[:,::-1]), 
-    nb.void(nb.complex64[:,::-1],  nb.complex64[::-1],  nb.complex64[:,::-1]), 
-    nb.void(nb.float64[:,::-1],    nb.float64[::-1],    nb.float64[:,::-1]), 
-    nb.void(nb.complex128[:,::-1], nb.float64[::-1],    nb.complex128[:,::-1]), 
-    nb.void(nb.complex128[:,::-1], nb.complex128[::-1], nb.complex128[:,::-1]), 
-    ],  '(n,m),(m)->(n,m)', 
+    nb.void(nb.float32[:,::-1],    nb.float32[::-1],    nb.float32[:,::-1]),
+    nb.void(nb.complex64[:,::-1],  nb.float32[::-1],    nb.complex64[:,::-1]),
+    nb.void(nb.complex64[:,::-1],  nb.complex64[::-1],  nb.complex64[:,::-1]),
+    nb.void(nb.float64[:,::-1],    nb.float64[::-1],    nb.float64[:,::-1]),
+    nb.void(nb.complex128[:,::-1], nb.float64[::-1],    nb.complex128[:,::-1]),
+    nb.void(nb.complex128[:,::-1], nb.complex128[::-1], nb.complex128[:,::-1]),
+    ],  '(n,m),(m)->(n,m)',
     target=__DEFAULT_NUMBA_TARGET__, nopython=True, cache=True)
 def filter_curl_2d__1(Fin, K, Fout):
     for i in range(0, Fin.shape[0]):
@@ -39,13 +39,13 @@ def filter_curl_2d__1(Fin, K, Fout):
 
 
 @nb.guvectorize([
-    nb.void(nb.float32[:,:,::-1],    nb.float32[::-1],    nb.float32[:,:,::-1]), 
-    nb.void(nb.complex64[:,:,::-1],  nb.float32[::-1],    nb.complex64[:,:,::-1]), 
-    nb.void(nb.complex64[:,:,::-1],  nb.complex64[::-1],  nb.complex64[:,:,::-1]), 
-    nb.void(nb.float64[:,:,::-1],    nb.float64[::-1],    nb.float64[:,:,::-1]), 
-    nb.void(nb.complex128[:,:,::-1], nb.float64[::-1],    nb.complex128[:,:,::-1]), 
-    nb.void(nb.complex128[:,:,::-1], nb.complex128[::-1], nb.complex128[:,:,::-1]), 
-    ],  '(n,m,p),(p)->(n,m,p)', 
+    nb.void(nb.float32[:,:,::-1],    nb.float32[::-1],    nb.float32[:,:,::-1]),
+    nb.void(nb.complex64[:,:,::-1],  nb.float32[::-1],    nb.complex64[:,:,::-1]),
+    nb.void(nb.complex64[:,:,::-1],  nb.complex64[::-1],  nb.complex64[:,:,::-1]),
+    nb.void(nb.float64[:,:,::-1],    nb.float64[::-1],    nb.float64[:,:,::-1]),
+    nb.void(nb.complex128[:,:,::-1], nb.float64[::-1],    nb.complex128[:,:,::-1]),
+    nb.void(nb.complex128[:,:,::-1], nb.complex128[::-1], nb.complex128[:,:,::-1]),
+    ],  '(n,m,p),(p)->(n,m,p)',
     target=__DEFAULT_NUMBA_TARGET__, nopython=True, cache=True)
 def filter_curl_3d__0(Fin, K, Fout):
     for i in range(0, Fin.shape[0]):
@@ -54,13 +54,13 @@ def filter_curl_3d__0(Fin, K, Fout):
                 Fout[i,j,k] = +K[k]*Fin[i,j,k]
 
 @nb.guvectorize([
-    nb.void(nb.float32[:,:,::-1],    nb.float32[::-1],    nb.float32[:,:,::-1]), 
-    nb.void(nb.complex64[:,:,::-1],  nb.float32[::-1],    nb.complex64[:,:,::-1]), 
-    nb.void(nb.complex64[:,:,::-1],  nb.complex64[::-1],  nb.complex64[:,:,::-1]), 
-    nb.void(nb.float64[:,:,::-1],    nb.float64[::-1],    nb.float64[:,:,::-1]), 
-    nb.void(nb.complex128[:,:,::-1], nb.float64[::-1],    nb.complex128[:,:,::-1]), 
-    nb.void(nb.complex128[:,:,::-1], nb.complex128[::-1], nb.complex128[:,:,::-1]), 
-    ],  '(n,m,p),(p)->(n,m,p)', 
+    nb.void(nb.float32[:,:,::-1],    nb.float32[::-1],    nb.float32[:,:,::-1]),
+    nb.void(nb.complex64[:,:,::-1],  nb.float32[::-1],    nb.complex64[:,:,::-1]),
+    nb.void(nb.complex64[:,:,::-1],  nb.complex64[::-1],  nb.complex64[:,:,::-1]),
+    nb.void(nb.float64[:,:,::-1],    nb.float64[::-1],    nb.float64[:,:,::-1]),
+    nb.void(nb.complex128[:,:,::-1], nb.float64[::-1],    nb.complex128[:,:,::-1]),
+    nb.void(nb.complex128[:,:,::-1], nb.complex128[::-1], nb.complex128[:,:,::-1]),
+    ],  '(n,m,p),(p)->(n,m,p)',
     target=__DEFAULT_NUMBA_TARGET__, nopython=True, cache=True)
 def filter_curl_3d__1(Fin, K, Fout):
     for i in range(0, Fin.shape[0]):
@@ -68,15 +68,15 @@ def filter_curl_3d__1(Fin, K, Fout):
             for k in range(0, Fin.shape[2]):
                 Fout[i,j,k] = -K[k]*Fin[i,j,k]
 
+
 class PythonSpectralCurl(SpectralCurlOperatorBase, HostOperator):
     """
     Compute the curl by using an python FFT backend.
     """
-        
-        
+
     def setup(self, work):
         super(PythonSpectralCurl, self).setup(work=work)
-        
+
         dim = self.dim
         Fin, Fout = self.Fin, self.Fout
         K, FIN, FOUT = self.dK, self.FIN, self.FOUT
@@ -93,10 +93,10 @@ class PythonSpectralCurl(SpectralCurlOperatorBase, HostOperator):
             if (Fin.nb_components == 3):
                 assert (Fout.nb_components == 3), Fout.nb_components
                 curl_filters = ()
-                for i in xrange(3):
+                for i in range(3):
                     curl_Fi = functools.partial(filter_curl_3d__0, FIN[i], K[i], FOUT[i])
                     curl_filters += (curl_Fi,)
-                for i in xrange(3):
+                for i in range(3):
                     curl_Fi = functools.partial(filter_curl_3d__1, FIN[3+i], K[3+i], FOUT[3+i])
                     curl_filters += (curl_Fi,)
             else:
@@ -111,7 +111,7 @@ class PythonSpectralCurl(SpectralCurlOperatorBase, HostOperator):
     def apply(self, simulation=None, **kwds):
         """Apply analytic formula."""
         super(PythonSpectralCurl, self).apply(**kwds)
-        for (Ft, curl_filter, Bt) in zip(self.forward_transforms, 
+        for (Ft, curl_filter, Bt) in zip(self.forward_transforms,
                                          self.curl_filters,
                                          self.backward_transforms):
             Ft()
diff --git a/hysop/backend/host/python/operator/custom.py b/hysop/backend/host/python/operator/custom.py
index 7c756e89a1a08f00d9da581bd276d5a44d943665..feb947cf553948ea08fa7bbd6ee8906a14bfae70 100644
--- a/hysop/backend/host/python/operator/custom.py
+++ b/hysop/backend/host/python/operator/custom.py
@@ -8,6 +8,15 @@ from hysop.core.graph.graph import op_apply
 
 
 class PythonCustomOperator(HostOperator):
+
+    @debug
+    def __new__(cls, func, invars=None, outvars=None,
+                 extra_args=None, variables=None, ghosts=None, **kwds):
+        return super(PythonCustomOperator, cls).__new__(cls,
+            input_fields=None, output_fields=None,
+            input_params=None, output_params=None,
+            **kwds)
+
     @debug
     def __init__(self, func, invars=None, outvars=None,
                  extra_args=None, variables=None, ghosts=None, **kwds):
diff --git a/hysop/backend/host/python/operator/derivative.py b/hysop/backend/host/python/operator/derivative.py
index e4a16f48b8bced67f5530d57c1d3b23ece44b8f2..f7c63ce0ee8e95362f6166062ae8e5c64c968c4d 100644
--- a/hysop/backend/host/python/operator/derivative.py
+++ b/hysop/backend/host/python/operator/derivative.py
@@ -16,9 +16,8 @@ class PythonSpectralSpaceDerivative(SpectralSpaceDerivativeBase, HostOperator):
         super(PythonSpectralSpaceDerivative, self).setup(work=work)
         dA = self.dA
         if self.scale_by_field:
-            assert isinstance(self.scaling_view, int)
             aview = dA.compute_slices
-            self.scale = dA.sbuffer[self.scaling_view][aview]
+            self.scale = dA.sbuffer[aview]
         else:
             self.scale = dA
 
@@ -41,10 +40,7 @@ class PythonSpectralSpaceDerivative(SpectralSpaceDerivativeBase, HostOperator):
         if self.scale_by_field:
             out[...] *= scale
         elif self.scale_by_parameter:
-            if (self.scaling_view is not None):
-                out[...] *= scale[self.scaling_view]
-            else:
-                out[...] *= scale()
+            out[...] *= scale()
         elif self.scale_by_value:
             out[...] *= scale
 
@@ -56,6 +52,10 @@ class PythonFiniteDifferencesSpaceDerivative(FiniteDifferencesSpaceDerivativeBas
     using explicit finite differences.
     """
 
+    @debug
+    def __new__(cls, **kwds):
+        return super(PythonFiniteDifferencesSpaceDerivative, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         """
@@ -120,9 +120,8 @@ class PythonFiniteDifferencesSpaceDerivative(FiniteDifferencesSpaceDerivativeBas
         self.iview = iview
 
         if self.scale_by_field:
-            assert isinstance(self.scaling_view, int)
             aview = dA.compute_slices
-            self.scale = dA.buffers[self.scaling_view][aview]
+            self.scale = dA.buffers[aview]
         else:
             self.scale = dA
 
@@ -146,11 +145,9 @@ class PythonFiniteDifferencesSpaceDerivative(FiniteDifferencesSpaceDerivativeBas
         if self.scale_by_field:
             out[...] *= scale
         elif self.scale_by_parameter:
-            if (self.scaling_view is not None):
-                out[...] *= scale[self.scaling_view]
-            else:
-                out[...] *= scale()
+            out[...] *= scale()
         elif self.scale_by_value:
             out[...] *= scale
 
         self.dFout.exchange_ghosts()
+
diff --git a/hysop/backend/host/python/operator/diffusion.py b/hysop/backend/host/python/operator/diffusion.py
index ec36df387b3d16c7dda75e911adcd4017e6cb02d..9f8e5d0b969ae0ee96ec466177c8c422a26b6fa9 100644
--- a/hysop/backend/host/python/operator/diffusion.py
+++ b/hysop/backend/host/python/operator/diffusion.py
@@ -20,7 +20,7 @@ class PythonDiffusion(DiffusionOperatorBase, OpenClMappable, HostOperator):
     """
     Solves the implicit diffusion equation using numpy fft.
     """
-        
+
     @classmethod
     def build_diffusion_filter(cls, dim, *args, **kwds):
         target = kwds.get('target', __DEFAULT_NUMBA_TARGET__)
@@ -71,18 +71,18 @@ class PythonDiffusion(DiffusionOperatorBase, OpenClMappable, HostOperator):
             raise NotImplementedError(msg)
         return functools.partial(F, *args)
 
-    
+
     def setup(self, work):
         super(PythonDiffusion, self).setup(work=work)
         diffusion_filters = ()
-        for (Fo,Ft,Kd) in zip(self.dFout.dfields, 
-                              self.forward_transforms, 
-                              self.all_dkds): 
+        for (Fo,Ft,Kd) in zip(self.dFout.dfields,
+                              self.forward_transforms,
+                              self.all_dkds):
             args = (Ft.full_output_buffer,) + tuple(Kd)
             F = self.build_diffusion_filter(Fo.dim, *args)
             diffusion_filters += (F,)
         self.diffusion_filters = diffusion_filters
-   
+
 
     @op_apply
     def apply(self, simulation, **kwds):
diff --git a/hysop/backend/host/python/operator/directional/advection_dir.py b/hysop/backend/host/python/operator/directional/advection_dir.py
index 9501cf42915b76dbec0e3128fbc6bc6df28cb800..39f37f0d4bcc0f8c30a2940bd3150850c890e15f 100644
--- a/hysop/backend/host/python/operator/directional/advection_dir.py
+++ b/hysop/backend/host/python/operator/directional/advection_dir.py
@@ -17,6 +17,10 @@ DEBUG = False
 class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperator):
     counter = 0
 
+    @debug
+    def __new__(cls, **kwds):
+        return super(PythonDirectionalAdvection, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         super(PythonDirectionalAdvection, self).__init__(**kwds)
@@ -24,7 +28,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
 
     def get_work_properties(self):
         requests = super(PythonDirectionalAdvection, self).get_work_properties()
-        
+
         V  = self.dvelocity
         Vr = self.relative_velocity
         check_instance(V, DiscreteScalarFieldView)
@@ -92,7 +96,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
         velo_compute_view = velo_mesh.local_compute_slices
         self._velocity_mesh_attributes = (velo_mesh_iterator, dx, inv_dx, velo_compute_view, X0)
 
-        dsinputs0 = dsinputs.values()[0]
+        dsinputs0 = next(iter(dsinputs.values()))
         scalar_mesh = dsinputs0.mesh
         scalar_mesh_iterator = scalar_mesh.build_compute_mesh_iterator(cr)
         N0          = scalar_mesh.global_start[-1]
@@ -135,38 +139,38 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
                     it = simulation.current_iteration
                     t = simulation.t()
                     _file,_line = inspect.stack()[1][1:3]
-                    debug_dumper(it, t, tag, tuple(df.sdata.get().handle[df.compute_slices] 
+                    debug_dumper(it, t, tag, tuple(df.sdata.get().handle[df.compute_slices]
                         for df in dfield.dfields), description=None)
             else:
                 def dump(*args, **kwds):
                     pass
-            Sin  = self.dadvected_fields_in.values()[0]
-            Sout = self.dadvected_fields_out.values()[0]
+            Sin  = next(iter(self.dadvected_fields_in.values()))
+            Sout = next(iter(self.dadvected_fields_out.values()))
             P    = self.dposition
-            print 'DT= {}'.format(dt)
+            print('DT= {}'.format(dt))
             self._compute_advection(dt)
-            print 'P'
-            print P.collect_data()
+            print('P')
+            print(P.collect_data())
             dump(P, 'P')
-            print 'S (before remesh)'
-            print Sin.collect_data()
+            print('S (before remesh)')
+            print(Sin.collect_data())
             dump(Sin, 'Sin before remesh')
             self._compute_remesh()
-            print 'S (before accumulation)'
-            print Sout[0].sbuffer[Sout[0].local_slices(ghosts=(0,self.remesh_ghosts))]
+            print('S (before accumulation)')
+            print(Sout[0].sbuffer[Sout[0].local_slices(ghosts=(0,self.remesh_ghosts))])
             dump(Sin, 'Sout (after remesh)')
             for sout in dsoutputs.values():
-                print 'Accumulate {}'.format(sout.short_description())
+                print('Accumulate {}'.format(sout.short_description()))
                 ghosts = tuple(sout.ghosts[:-1])+(self.remesh_ghosts,)
                 sout.accumulate_ghosts(directions=sout.dim-1, ghosts=ghosts)
-            print 'S (after accumulation, before ghost exchange)'
-            print Sout.collect_data()
+            print('S (after accumulation, before ghost exchange)')
+            print(Sout.collect_data())
             dump(Sin, 'Sout (after accumulation)')
             for sout in dsoutputs.values():
-                print 'Exchange {}'.format(sout.short_description())
+                print('Exchange {}'.format(sout.short_description()))
                 sout.exchange_ghosts()
-            print 'S (after ghost exchange)'
-            print Sout.collect_data()
+            print('S (after ghost exchange)')
+            print(Sout.collect_data())
             dump(Sin, 'Sout (after exchange)')
         else:
             self._compute_advection(dt)
@@ -176,7 +180,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
                 sout.accumulate_ghosts(directions=sout.dim-1, ghosts=ghosts)
             for sout in dsoutputs.values():
                 sout.exchange_ghosts()
-        
+
         self.counter += 1
 
     def _interp_velocity(self, Vin, Vout, dX, I, Ig, lidx, ridx, inv_dx, is_periodic, Vr):
@@ -222,7 +226,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
         Vout[...] -= Vr
 
     def _compute_advection(self, dt):
-        P   = self.dpos 
+        P   = self.dpos
         Vd  = self.dvelocity.sbuffer
         rtmp = self.drtmp
         itmp = self.ditmp
@@ -259,7 +263,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
                 (lidx,ridx) = itmp
                 (dX0,V1)    = rtmp
                 dX0[...]  = _Vi - Vr
-                dX0[...] *= (0.5*dt) 
+                dX0[...] *= (0.5*dt)
                 self._interp_velocity(Vi,V1,dX0,I,Ig,lidx,ridx,inv_dx,is_periodic,Vr)
                 Pi[...]  = V1
                 Pi[...] *= dt
@@ -276,7 +280,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
                 dXk[...] = V2
                 dXk[...] *= (1.0*dt)
                 self._interp_velocity(Vi,V3,dXk,I,Ig,lidx,ridx,inv_dx,is_periodic,Vr)
-                
+
                 V0 = dXk
                 V0[...] = _Vi  - Vr
 
@@ -327,15 +331,15 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
         (mesh_it, dx, inv_dx, compute_view, N0) = self._scalar_mesh_attributes
 
         if DEBUG:
-            print 'GLOBAL START'
-            print 'X0: {}'.format(X0[0])
-            print 'N0: {}'.format(N0)
-            print '***********'
+            print('GLOBAL START')
+            print('X0: {}'.format(X0[0]))
+            print('N0: {}'.format(N0))
+            print('***********')
 
         scalar_advection_ghosts = self.scalar_advection_ghosts
         remesh_ghosts = self.remesh_ghosts
         remesh_kernel = self.remesh_kernel
-        P = 1 + remesh_kernel.n/2
+        P = 1 + remesh_kernel.n//2
 
         is_periodic = self.is_periodic
 
@@ -385,11 +389,11 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
                 Sin  = dsinputs[ifield]
                 Sout = dsoutputs[ofield]
 
-                sin_view  = tuple(idx[i] + in_ghosts[Sin][i]  for i in xrange(cr))
+                sin_view  = tuple(idx[i] + in_ghosts[Sin][i]  for i in range(cr))
                 sin_view  += in_compute_slices[Sin][cr:]
 
                 dG = out_ghosts[Sout][-1] - remesh_ghosts
-                sout_view = tuple(idx[i] + out_ghosts[Sout][i] for i in xrange(cr))
+                sout_view = tuple(idx[i] + out_ghosts[Sout][i] for i in range(cr))
                 sout_view += out_compute_slices[Sout][cr:-1]
                 sout_view += (slice(dG, out_shapes[Sout][-1]-dG),)
 
@@ -398,14 +402,14 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
                 input_buffer_views[ifield] = in_views
                 output_buffer_views[ofield] = out_views
 
-            for q in xrange(-P+1, P+1):
+            for q in range(-P+1, P+1):
                 I0[...] += 1
                 R0[...] += 1
                 R1[...] = remesh_kernel.gamma(R0)
 
                 sid=0
                 for ifield, ofield in zip(sinputs, soutputs):
-                    for k in xrange(ifield.nb_components):
+                    for k in range(ifield.nb_components):
                         sin  = input_buffer_views[ifield][k]
                         sout = output_buffer_views[ofield][k]
                         Si = (S[sid] if is_inplace else sout)
diff --git a/hysop/backend/host/python/operator/directional/stretching_dir.py b/hysop/backend/host/python/operator/directional/stretching_dir.py
index 3112bbb3983364cc2d162c1d730c87fa0e635a06..ead3d9c42dd9fa2a5d5afa848383e453cba2ab4f 100644
--- a/hysop/backend/host/python/operator/directional/stretching_dir.py
+++ b/hysop/backend/host/python/operator/directional/stretching_dir.py
@@ -14,6 +14,11 @@ from hysop.numerics.odesolvers.runge_kutta import ExplicitRungeKutta, Euler, RK2
 
 class PythonDirectionalStretching(DirectionalStretchingBase, HostDirectionalOperator):
 
+    @debug
+    def __new__(cls, formulation, **kwds):
+        return super(PythonDirectionalStretching, cls).__new__(cls,
+                formulation=formulation, **kwds)
+
     @debug
     def __init__(self, formulation, **kwds):
         super(PythonDirectionalStretching, self).__init__(formulation=formulation, **kwds)
@@ -88,7 +93,7 @@ class PythonDirectionalStretching(DirectionalStretchingBase, HostDirectionalOper
         wdir = Wnames[self.splitting_direction]
 
         def rhs(out, X, **kwds):
-            for i in xrange(3):
+            for i in range(3):
                 wn = Wnames[i]
                 Vi = V[wn]
                 Wd = X[wdir]
diff --git a/hysop/backend/host/python/operator/enstrophy.py b/hysop/backend/host/python/operator/enstrophy.py
index a11a3b5cbc3f66bdebed9f0e7d6ce6141c2b1c03..7944e382eb662d30ff895125c5a45edcbb0ffda2 100644
--- a/hysop/backend/host/python/operator/enstrophy.py
+++ b/hysop/backend/host/python/operator/enstrophy.py
@@ -11,6 +11,11 @@ from hysop.tools.numpywrappers import npw
 
 class PythonEnstrophy(EnstrophyBase, HostOperator):
     """Compute enstrophy of the given vorticity field."""
+
+    @debug
+    def __new__(cls, **kwds):
+        return super(PythonEnstrophy, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         super(PythonEnstrophy, self).__init__(**kwds)
diff --git a/hysop/backend/host/python/operator/flowrate_correction.py b/hysop/backend/host/python/operator/flowrate_correction.py
index 9ed4274584801523e25207a038513728489a39ee..f380575a348d67e1c8b2cf74edadeca84bf7211f 100644
--- a/hysop/backend/host/python/operator/flowrate_correction.py
+++ b/hysop/backend/host/python/operator/flowrate_correction.py
@@ -21,6 +21,14 @@ class PythonFlowRateCorrection(HostOperator):
     Velocity is corrected in Y and Z-direction from mean vorticity
     """
 
+    @debug
+    def __new__(cls, velocity, vorticity,
+                 dt, flowrate, variables, absorption_start=None,
+                 implementation=None, **kwds):
+        return super(PythonFlowRateCorrection, cls).__new__(cls,
+            input_fields=None, output_fields=None,
+            input_params=None, **kwds)
+
     @debug
     def __init__(self, velocity, vorticity,
                  dt, flowrate, variables, absorption_start=None,
@@ -71,8 +79,8 @@ class PythonFlowRateCorrection(HostOperator):
         # Compute volume and surface integration coefficients
         spaceStep = vmesh.space_step
         lengths = vtopo.domain.length
-        self._inv_ds = 1. / np.prod(lengths[:-1])
-        self._inv_dvol = 1. / (lengths[0]*lengths[1] *
+        self._inv_ds = 1.0 / np.prod(lengths[:-1])
+        self._inv_dvol = 1.0 / (lengths[0]*lengths[1] *
                                (self.absorption_start-vtopo.domain.origin[-1]))
         self.coeff_mean = np.prod(spaceStep) / np.prod(lengths)
 
diff --git a/hysop/backend/host/python/operator/integrate.py b/hysop/backend/host/python/operator/integrate.py
index 044e07038a88f3694865f50234951f6e28f44686..5521b4f8894e500848d7b2ba2f9d3cfc70e7665e 100644
--- a/hysop/backend/host/python/operator/integrate.py
+++ b/hysop/backend/host/python/operator/integrate.py
@@ -7,6 +7,10 @@ import numpy as np
 
 class PythonIntegrate(IntegrateBase, HostOperator):
 
+    @debug
+    def __new__(cls, **kwds):
+        return super(PythonIntegrate, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         super(PythonIntegrate, self).__init__(**kwds)
@@ -15,7 +19,7 @@ class PythonIntegrate(IntegrateBase, HostOperator):
     @op_apply
     def apply(self, **kwds):
         value = self.parameter._value.copy()
-        for i in xrange(self.dF.nb_components):
+        for i in range(self.dF.nb_components):
             Pi = np.sum(self.dF.data[i][self.dF.compute_slices])
             if (self.scaling_coeff[i] is None):
                 self.scaling_coeff[i] = 1.0 / Pi
diff --git a/hysop/backend/host/python/operator/memory_reordering.py b/hysop/backend/host/python/operator/memory_reordering.py
index 478a20bc6af6a1677a3e8e556e2cc7fffd1adbf9..8fd67436f91a6d78311f18421b1e66cbec10b23d 100644
--- a/hysop/backend/host/python/operator/memory_reordering.py
+++ b/hysop/backend/host/python/operator/memory_reordering.py
@@ -1,4 +1,4 @@
-from hysop.deps import np
+import numpy as np
 
 from hysop.tools.decorators import debug, profile
 from hysop.backend.host.host_operator import HostOperator
@@ -12,7 +12,11 @@ class PythonMemoryReordering(MemoryReorderingBase, HostOperator):
     """
 
     @debug
-    def __init__(self, **kwds): 
+    def __new__(cls, **kwds):
+        return super(PythonMemoryReordering, cls).__new__(cls, **kwds)
+
+    @debug
+    def __init__(self, **kwds):
         """Initialize a MemoryReordering operator on the python backend."""
         super(PythonMemoryReordering, self).__init__(**kwds)
 
diff --git a/hysop/backend/host/python/operator/min_max.py b/hysop/backend/host/python/operator/min_max.py
index f16aa7be7f52359b7c84f9036dddef1f199bdbb3..4774846b8427a1b11e4d6021dd411b6d5f6e8496 100644
--- a/hysop/backend/host/python/operator/min_max.py
+++ b/hysop/backend/host/python/operator/min_max.py
@@ -6,10 +6,14 @@ from hysop.backend.host.host_operator import HostOperator
 from hysop.backend.host.python.operator.derivative import PythonSpectralSpaceDerivative, \
         PythonFiniteDifferencesSpaceDerivative
 
-class PythonMinMaxFieldStatistics(MinMaxFieldStatisticsBase,
-                                  HostOperator):
+
+class PythonMinMaxFieldStatistics(MinMaxFieldStatisticsBase, HostOperator):
     """Python implementation backend of operator MinMaxFieldStatistics."""
 
+    @debug
+    def __new__(cls, **kwds):
+        return super(PythonMinMaxFieldStatistics, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, **kwds):
         """See MinMaxFieldStatisticsBase.__init__()."""
diff --git a/hysop/backend/host/python/operator/penalization.py b/hysop/backend/host/python/operator/penalization.py
index 747f2118473e2544bfb4b0bbdb21868fe9c71fa6..396a300000b7b1b26dabf97522f0f46c0df9b753 100644
--- a/hysop/backend/host/python/operator/penalization.py
+++ b/hysop/backend/host/python/operator/penalization.py
@@ -35,7 +35,7 @@ class CommonPenalization(object):
 
         if isinstance(obstacles, dict):
             obs = {}
-            for c, o in obstacles.iteritems():
+            for c, o in obstacles.items():
                 if isinstance(c, ScalarParameter):
                     obs[lambda x: c()*x] = o
                 elif isinstance(c, type(lambda x: x)):
@@ -100,6 +100,14 @@ class PythonPenalizeVorticity(HostOperator, CommonPenalization):
         am.update(cls.__available_methods)
         return am
 
+    @dedug
+    def __new__(cls, obstacles, variables,
+                velocity, vorticity,
+                dt, coeff=None, ubar=None, formulation=None, **kwds):
+        return super(PythonPenalizeVorticity, cls).__new__(cls,
+                                                           input_fields=None, output_fields=None,
+                                                           input_params=None, **kwds)
+
     @debug
     def __init__(self, obstacles, variables,
                  velocity, vorticity,
@@ -171,7 +179,7 @@ class PythonPenalizeVorticity(HostOperator, CommonPenalization):
         W = tuple(Wi[view] for Wi in dw.buffers)
         self.W, self.V = W, V
         self.dobstacles = {}
-        for c, o in self.obstacles.iteritems():
+        for c, o in self.obstacles.items():
             o_df = self.input_discrete_fields[o]
             self.dobstacles[c] = o_df.data[0][o_df.local_slices(
                 ghosts=(G, )*dim)]
diff --git a/hysop/backend/host/python/operator/poisson_curl.py b/hysop/backend/host/python/operator/poisson_curl.py
index 5e724b48c35a283de97d93325cf4f7224397c2ba..3e81829385acfffe240882eb3eca7505471e2244 100644
--- a/hysop/backend/host/python/operator/poisson_curl.py
+++ b/hysop/backend/host/python/operator/poisson_curl.py
@@ -9,7 +9,7 @@ from hysop.tools.numba_utils import make_numba_signature, prange
 from hysop.core.graph.graph import op_apply
 from hysop.backend.host.host_operator import HostOperator, OpenClMappable
 from hysop.operator.base.poisson_curl import SpectralPoissonCurlOperatorBase
-        
+
 from hysop.backend.host.python.operator.diffusion import PythonDiffusion
 from hysop.backend.host.python.operator.poisson import PythonPoisson
 from hysop.backend.host.python.operator.solenoidal_projection import PythonSolenoidalProjection
@@ -19,7 +19,7 @@ class PythonPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClMappable, HostOpe
     """
     Solves the poisson rotational equation using numpy fftw.
     """
-        
+
     @classmethod
     def build_filter_curl_2d__0_m(cls, FIN, K, FOUT, target=__DEFAULT_NUMBA_TARGET__):
         args=(FIN,K,FOUT)
@@ -51,7 +51,7 @@ class PythonPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClMappable, HostOpe
     def build_filter_curl_3d__0_n(cls, FIN, K, FOUT, target=__DEFAULT_NUMBA_TARGET__):
         args=(FIN,K,FOUT)
         signature, _ = make_numba_signature(*args)
-        layout='(n,m,p),(n)->(n,m,p)' 
+        layout='(n,m,p),(n)->(n,m,p)'
         @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def filter_curl_3d__0_n(Fin, K, Fout):
@@ -93,7 +93,7 @@ class PythonPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClMappable, HostOpe
     def build_filter_curl_3d__1_n(cls, FIN, K, FOUT, target=__DEFAULT_NUMBA_TARGET__):
         args=(FIN,K,FOUT)
         signature, _ = make_numba_signature(*args)
-        layout='(n,m,p),(n)->(n,m,p)' 
+        layout='(n,m,p),(n)->(n,m,p)'
         @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def filter_curl_3d__1_n(Fin, K, Fout):
@@ -107,7 +107,7 @@ class PythonPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClMappable, HostOpe
     def build_filter_curl_3d__1_m(cls, FIN, K, FOUT, target=__DEFAULT_NUMBA_TARGET__):
         args=(FIN,K,FOUT)
         signature, _ = make_numba_signature(*args)
-        layout='(n,m,p),(m)->(n,m,p)' 
+        layout='(n,m,p),(m)->(n,m,p)'
         @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def filter_curl_3d__1_m(Fin, K, Fout):
@@ -121,7 +121,7 @@ class PythonPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClMappable, HostOpe
     def build_filter_curl_3d__1_p(cls, FIN, K, FOUT, target=__DEFAULT_NUMBA_TARGET__):
         args=(FIN,K,FOUT)
         signature, _ = make_numba_signature(*args)
-        layout='(n,m,p),(p)->(n,m,p)' 
+        layout='(n,m,p),(p)->(n,m,p)'
         @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def filter_curl_3d__1_p(Fin, K, Fout):
@@ -130,17 +130,17 @@ class PythonPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClMappable, HostOpe
                     for k in range(0, Fin.shape[2]):
                         Fout[i,j,k] += K[k]*Fin[i,j,k]
         return functools.partial(filter_curl_3d__1_p, *args)
-        
+
     def setup(self, work):
         super(PythonPoissonCurl, self).setup(work=work)
-        
+
         dim = self.dim
         WIN, UIN, UOUT = self.WIN, self.UIN, self.UOUT
         K, KK = self.K, self.KK
         UK = self.UK
         assert len(WIN)==len(KK), (len(WIN),len(KK))
         assert len(UIN)==len(UOUT)==len(UK), (len(UIN),len(UOUT),len(UK))
-        
+
         # diffusion filters
         if self.should_diffuse:
             diffusion_filters = ()
@@ -166,7 +166,7 @@ class PythonPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClMappable, HostOpe
             F = PythonPoisson.build_poisson_filter(dim, *((Wi,)+KKi+(Wi,)))
             poisson_filters += (F,)
         self.poisson_filters = poisson_filters
-        
+
         # curl filter
         if (dim==2):
             curl0 = self.build_filter_curl_2d__0_m(UIN[0], UK[0], UOUT[0])
@@ -194,7 +194,7 @@ class PythonPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClMappable, HostOpe
     def apply(self, simulation=None, **kwds):
         """Apply analytic formula."""
         super(PythonPoissonCurl, self).apply(**kwds)
-        
+
         diffuse=self.should_diffuse
         project=self.do_project(simu=simulation)
 
@@ -216,8 +216,8 @@ class PythonPoissonCurl(SpectralPoissonCurlOperatorBase, OpenClMappable, HostOpe
                 for curl_filter in curl_filters:
                     curl_filter()
                 Bt(simulation=simulation)
-        
+
         self.dU.exchange_ghosts()
         if (diffuse or project):
             self.dW.exchange_ghosts()
-        self.update_energy(simulation=simulation) 
+        self.update_energy(simulation=simulation)
diff --git a/hysop/backend/host/python/operator/solenoidal_projection.py b/hysop/backend/host/python/operator/solenoidal_projection.py
index a73a02da477b7f7f5ca004e553a09a49167e6ea6..b635f1ca6152b7620dddc213e4bf76627713061c 100644
--- a/hysop/backend/host/python/operator/solenoidal_projection.py
+++ b/hysop/backend/host/python/operator/solenoidal_projection.py
@@ -1,6 +1,6 @@
 
 import functools
-import numba as nb 
+import numba as nb
 
 from hysop import __DEFAULT_NUMBA_TARGET__
 from hysop.tools.types import check_instance, first_not_None
@@ -11,80 +11,81 @@ from hysop.core.graph.graph import op_apply
 from hysop.operator.base.solenoidal_projection import SolenoidalProjectionOperatorBase
 from hysop.tools.numba_utils import make_numba_signature, prange
 
+def build_projection_filter(FIN, FOUT, K, KK, target=__DEFAULT_NUMBA_TARGET__):
+    assert len(FIN)==len(FOUT)==3
+    assert len(K)==len(KK)==9
+    args = FIN+K+KK+FOUT
+
+    signature, _ = make_numba_signature(*args)
+    layout =  '(m,n,p),(m,n,p),(m,n,p), '
+    layout += '(m),(n),(p), (m),(n),(p), (m),(n),(p), '
+    layout += '(m),(n),(p), (m),(n),(p), (m),(n),(p) '
+    layout += '-> (m,n,p),(m,n,p),(m,n,p)'
+
+    @nb.guvectorize([signature], layout,
+        target=target, nopython=True, cache=True)
+    def filter_projection_3d(Fin0, Fin1, Fin2,
+                             K00, K01, K02,
+                             K10, K11, K12,
+                             K20, K21, K22,
+                             KK00, KK01, KK02,
+                             KK10, KK11, KK12,
+                             KK20, KK21, KK22,
+                             Fout0, Fout1, Fout2):
+        for i in prange(0, Fin0.shape[0]):
+            for j in prange(0, Fin0.shape[1]):
+                for k in range(0, Fin0.shape[2]):
+                    F0 = Fin0[i,j,k]
+                    F1 = Fin1[i,j,k]
+                    F2 = Fin2[i,j,k]
+                    G0 = K00[i]*F0
+                    G1 = K11[j]*F1
+                    G2 = K22[k]*F2
+                    L0 = KK00[i]*F0
+                    L1 = KK11[j]*F1
+                    L2 = KK22[k]*F2
+                    if ((i!=0) or (j!=0) or (k!=0)):
+                        C0 = ((L0        + K10[i]*G1 + K20[i]*G2) / (KK00[i] + KK01[j] + KK02[k]))
+                        C1 = ((K01[j]*G0 + L1        + K21[j]*G2) / (KK10[i] + KK11[j] + KK12[k]))
+                        C2 = ((K02[k]*G0 + K12[k]*G1 + L2       ) / (KK20[i] + KK21[j] + KK22[k]))
+                    else:
+                        C0 = 0
+                        C1 = 0
+                        C2 = 0
+                    Fout0[i,j,k] = Fin0[i,j,k] - C0
+                    Fout1[i,j,k] = Fin1[i,j,k] - C1
+                    Fout2[i,j,k] = Fin2[i,j,k] - C2
+
+    return functools.partial(filter_projection_3d, *args)
+
+
+def make_div_filter(target, *args):
+    signature, _ = make_numba_signature(*args)
+    layout =  '(m,n,p), (m,n,p), (m,n,p), (m),(n),(p) -> (m,n,p)'
+
+    @nb.guvectorize([signature], layout,
+        target=target, nopython=True, cache=True)
+    def compute_div_3d(Fin0, Fin1, Fin2, K0, K1, K2, Fout):
+        for i in prange(0, Fin0.shape[0]):
+            for j in prange(0, Fin0.shape[1]):
+                for k in range(0, Fin0.shape[2]):
+                    Fout[i,j,k] = (K0[i]*Fin0[i,j,k] + K1[j]*Fin1[i,j,k] + K2[k]*Fin2[i,j,k])
+
+    return functools.partial(compute_div_3d, *args)
+
+
 class PythonSolenoidalProjection(SolenoidalProjectionOperatorBase, OpenClMappable, HostOperator):
     """
     Solves solenoidal projection (project a 3d field F such that div(F)=0),
     using spectral methods.
     """
-    
-    @classmethod
-    def build_projection_filter(cls, FIN, FOUT, K, KK, target=__DEFAULT_NUMBA_TARGET__):
-        assert len(FIN)==len(FOUT)==3
-        assert len(K)==len(KK)==9
-        args = FIN+K+KK+FOUT
-
-        signature, _ = make_numba_signature(*args)
-        layout =  '(m,n,p),(m,n,p),(m,n,p), '
-        layout += '(m),(n),(p), (m),(n),(p), (m),(n),(p), '
-        layout += '(m),(n),(p), (m),(n),(p), (m),(n),(p) '
-        layout += '-> (m,n,p),(m,n,p),(m,n,p)'
-
-        @nb.guvectorize([signature], layout,
-            target=target, nopython=True, cache=True)
-        def filter_projection_3d(Fin0, Fin1, Fin2, 
-                                 K00, K01, K02, 
-                                 K10, K11, K12, 
-                                 K20, K21, K22, 
-                                 KK00, KK01, KK02, 
-                                 KK10, KK11, KK12, 
-                                 KK20, KK21, KK22, 
-                                 Fout0, Fout1, Fout2):
-            for i in prange(0, Fin0.shape[0]):
-                for j in prange(0, Fin0.shape[1]):
-                    for k in range(0, Fin0.shape[2]):
-                        F0 = Fin0[i,j,k]
-                        F1 = Fin1[i,j,k]
-                        F2 = Fin2[i,j,k]
-                        G0 = K00[i]*F0
-                        G1 = K11[j]*F1
-                        G2 = K22[k]*F2
-                        L0 = KK00[i]*F0
-                        L1 = KK11[j]*F1
-                        L2 = KK22[k]*F2
-                        if ((i!=0) or (j!=0) or (k!=0)):
-                            C0 = ((L0        + K10[i]*G1 + K20[i]*G2) / (KK00[i] + KK01[j] + KK02[k]))
-                            C1 = ((K01[j]*G0 + L1        + K21[j]*G2) / (KK10[i] + KK11[j] + KK12[k]))
-                            C2 = ((K02[k]*G0 + K12[k]*G1 + L2       ) / (KK20[i] + KK21[j] + KK22[k]))
-                        else:
-                            C0 = 0
-                            C1 = 0
-                            C2 = 0
-                        Fout0[i,j,k] = Fin0[i,j,k] - C0
-                        Fout1[i,j,k] = Fin1[i,j,k] - C1
-                        Fout2[i,j,k] = Fin2[i,j,k] - C2
-
-        return functools.partial(filter_projection_3d, *args)
 
     def build_divergence_filters(self, target=__DEFAULT_NUMBA_TARGET__):
-        def make_div_filter(*args):
-            signature, _ = make_numba_signature(*args)
-            layout =  '(m,n,p), (m,n,p), (m,n,p), (m),(n),(p) -> (m,n,p)'
-
-            @nb.guvectorize([signature], layout,
-                target=target, nopython=True, cache=True)
-            def compute_div_3d(Fin0, Fin1, Fin2, K0, K1, K2, Fout):
-                for i in prange(0, Fin0.shape[0]):
-                    for j in prange(0, Fin0.shape[1]):
-                        for k in range(0, Fin0.shape[2]):
-                            Fout[i,j,k] = (K0[i]*Fin0[i,j,k] + K1[j]*Fin1[i,j,k] + K2[k]*Fin2[i,j,k])
-
-            return functools.partial(compute_div_3d, *args)
-
         if self.compute_divFin:
             FIN, K, DIV_IN = self.FIN, self.K, self.DIV_IN
             K = (K[0], K[4], K[8])
             args = FIN+K+DIV_IN
-            self.pre_filter_div = make_div_filter(*args)
+            self.pre_filter_div = make_div_filter(target, *args)
         else:
             self.pre_filter_div = None
 
@@ -92,16 +93,16 @@ class PythonSolenoidalProjection(SolenoidalProjectionOperatorBase, OpenClMappabl
             FOUT, K, DIV_OUT = self.FOUT, self.K, self.DIV_OUT
             K = (K[0], K[4], K[8])
             args = FOUT+K+DIV_OUT
-            self.post_filter_div = make_div_filter(*args)
+            self.post_filter_div = make_div_filter(target, *args)
         else:
             self.post_filter_div = None
-    
+
     @debug
     def setup(self, work):
         super(PythonSolenoidalProjection, self).setup(work=work)
         FIN, FOUT = self.FIN, self.FOUT
         K, KK = self.K, self.KK
-        self.filter_projection = self.build_projection_filter(FIN, FOUT, K, KK)
+        self.filter_projection = build_projection_filter(FIN, FOUT, K, KK)
         self.build_divergence_filters()
 
     @op_apply
@@ -115,7 +116,7 @@ class PythonSolenoidalProjection(SolenoidalProjectionOperatorBase, OpenClMappabl
             if self.compute_divFin:
                 self.pre_filter_div()
                 self.backward_divFin_transform()
-            
+
             self.filter_projection()
 
             if self.compute_divFout:
diff --git a/hysop/backend/host/python/operator/spatial_filtering.py b/hysop/backend/host/python/operator/spatial_filtering.py
index 811384d313979d622506af035fe4ac1a0e0e19ba..77d23cef744900aca14cf536df59ac8d212697ec 100644
--- a/hysop/backend/host/python/operator/spatial_filtering.py
+++ b/hysop/backend/host/python/operator/spatial_filtering.py
@@ -11,11 +11,12 @@ from hysop.operator.base.spatial_filtering import (
         PolynomialInterpolationFilterBase,
         RemeshRestrictionFilterBase,
         SpectralRestrictionFilterBase,
-        SubgridRestrictionFilterBase, 
+        SubgridRestrictionFilterBase,
         PolynomialRestrictionFilterBase)
 
 
 class PythonPolynomialInterpolationFilter(PolynomialInterpolationFilterBase, HostOperator):
+
     def discretize(self, **kwds):
         if self.discretized:
             return
@@ -31,16 +32,17 @@ class PythonPolynomialInterpolationFilter(PolynomialInterpolationFilterBase, Hos
         periodicity = self.dFin.periodicity
         gr, n  = self.subgrid_interpolator.gr, self.subgrid_interpolator.n
         Wr     = self.Wr
-        
+
         for idx in np.ndindex(*self.iter_shape):
-            oslc = tuple(slice(j*gr[i], (j+1)*gr[i], 1) for i,j in enumerate(idx)) 
-            islc = tuple(slice(periodicity[i]+j, periodicity[i]+j+n[i], 1) 
+            oslc = tuple(slice(j*gr[i], (j+1)*gr[i], 1) for i,j in enumerate(idx))
+            islc = tuple(slice(periodicity[i]+j, periodicity[i]+j+n[i], 1)
                     for i,j in enumerate(idx))
             fout[oslc] = Wr.dot(fin[islc].ravel()).reshape(gr)
         self.dFout.exchange_ghosts()
 
 
 class PythonPolynomialRestrictionFilter(PolynomialRestrictionFilterBase, HostOperator):
+
     def discretize(self, **kwds):
         if self.discretized:
             return
@@ -58,12 +60,13 @@ class PythonPolynomialRestrictionFilter(PolynomialRestrictionFilterBase, HostOpe
         gr     = self.subgrid_restrictor.gr
         Rr     = self.Rr
         rshape = Rr.shape
-        
+
         for idx in np.ndindex(*self.iter_shape):
-            islc = tuple(slice(j*gr[i], j*gr[i]+rshape[i], 1) for i,j in enumerate(idx)) 
+            islc = tuple(slice(j*gr[i], j*gr[i]+rshape[i], 1) for i,j in enumerate(idx))
             fout[idx] = (Rr*fin[islc]).sum()
         self.dFout.exchange_ghosts()
 
+
 class PythonRemeshRestrictionFilter(RemeshRestrictionFilterBase, HostOperator):
     """
     Python implementation for lowpass spatial filtering: small grid -> coarse grid
@@ -77,7 +80,7 @@ class PythonRemeshRestrictionFilter(RemeshRestrictionFilterBase, HostOperator):
         oshape = self.fout.shape
 
         dviews = ()
-        for (idx, Wi) in self.nz_weights.iteritems():
+        for (idx, Wi) in self.nz_weights.items():
             slc = tuple(slice(i, i+r*s, r) for (i,s,r) in zip(idx, oshape, iratio))
             dviews += ((Wi, fin[slc]),)
         self.data_views = dviews
@@ -87,7 +90,7 @@ class PythonRemeshRestrictionFilter(RemeshRestrictionFilterBase, HostOperator):
         """Apply analytic formula."""
         super(PythonRemeshRestrictionFilter, self).apply(**kwds)
         fin, fout = self.fin, self.fout
-        
+
         fout[...] = 0
         for (Wi, iview) in self.data_views:
             fout[...] += Wi*iview
@@ -104,11 +107,11 @@ class PythonSpectralRestrictionFilter(SpectralRestrictionFilterBase, HostOperato
     def apply(self, simulation, **kwds):
         """Apply spectral filter (which is just a square window centered on low frequencies)."""
         super(PythonSpectralRestrictionFilter, self).apply(**kwds)
-        self.Ft(simulation=simulation) 
+        self.Ft(simulation=simulation)
         for i, (src_slc, dst_slc) in enumerate(zip(*self.fslices)):
             self.FOUT[dst_slc] = self.FIN[src_slc]
         self.FOUT[...] *= self.scaling
-        self.Bt(simulation=simulation) 
+        self.Bt(simulation=simulation)
         self.dFout.exchange_ghosts()
 
     def _compute_scaling_coefficient(self):
diff --git a/hysop/backend/host/python/operator/transpose.py b/hysop/backend/host/python/operator/transpose.py
index 840ac9c97c7edcc481d596cee270ec8f094e6405..de9da3702d8116672c4a2b3ec32de65318453b59 100644
--- a/hysop/backend/host/python/operator/transpose.py
+++ b/hysop/backend/host/python/operator/transpose.py
@@ -23,11 +23,15 @@ class PythonTranspose(TransposeOperatorBase, HostOperator):
     """
 
     @debug
-    def __init__(self, **kwds): 
+    def __new__(cls, **kwds):
+        return super(PythonTranspose, cls).__new__(cls, **kwds)
+
+    @debug
+    def __init__(self, **kwds):
         """Initialize a Transpose operator on the python backend."""
         super(PythonTranspose, self).__init__(**kwds)
 
-    
+
     def discretize(self):
         super(PythonTranspose, self).discretize()
         assert self.din.dtype == self.dout.dtype
@@ -52,7 +56,7 @@ class PythonTranspose(TransposeOperatorBase, HostOperator):
             (field, td, req) = reqs
             req.memory_order = MemoryOrdering.ANY
         return requirements
-    
+
     @classmethod
     def supports_mpi(cls):
         return True
@@ -62,34 +66,34 @@ class PythonTranspose(TransposeOperatorBase, HostOperator):
         """ Transpose in or out of place."""
         super(PythonTranspose,self).apply(**kwds)
         self.exec_transpose(**kwds)
-    
+
     def transpose_hptt_inplace(self, **kwds):
         axes = self.axes
         din, dout, dtmp = self.din, self.dout, self.dtmp.handle.view(np.ndarray)
         assert self.din.dfield is self.dout.dfield
-        for i in xrange(din.nb_components):
+        for i in range(din.nb_components):
             hptt.transpose(a=din.buffers[i], out=dtmp, axes=axes)
             dout.buffers[i][...] = dtmp
-            
+
     def transpose_hptt_outofplace(self, **kwds):
         axes = self.axes
         din, dout = self.din, self.dout
         assert self.din.dfield is not self.dout.dfield
-        for i in xrange(din.nb_components):
+        for i in range(din.nb_components):
             hptt.transpose(a=din.buffers[i], out=dout.buffers[i], axes=axes)
 
     def transpose_np_inplace(self, **kwds):
         axes = self.axes
         din, dout, dtmp = self.din, self.dout, self.dtmp.handle.view(np.ndarray)
         assert self.din.dfield is self.dout.dfield
-        for i in xrange(din.nb_components):
+        for i in range(din.nb_components):
             dtmp[...] = np.transpose(din.buffers[i], axes=axes)
             dout.buffers[i][...] = dtmp
-            
+
     def transpose_np_outofplace(self, **kwds):
         axes = self.axes
         din, dout = self.din, self.dout
         assert self.din.dfield is not self.dout.dfield
-        for i in xrange(din.nb_components):
+        for i in range(din.nb_components):
             dout.buffers[i][...] = np.transpose(din.buffers[i], axes=axes)
 
diff --git a/hysop/backend/host/python/operator/vorticity_absorption.py b/hysop/backend/host/python/operator/vorticity_absorption.py
index ae33f507a3319dc947272cbe45776fd88cb5f15e..1ee44d97d17ab05e49339f60426e9c8bc2a08bf6 100644
--- a/hysop/backend/host/python/operator/vorticity_absorption.py
+++ b/hysop/backend/host/python/operator/vorticity_absorption.py
@@ -1,3 +1,7 @@
+import numpy as np
+import sympy as sm
+from sympy.utilities.lambdify import lambdify
+
 from hysop.backend.host.host_operator import HostOperator
 from hysop.tools.types import check_instance
 from hysop.tools.decorators import debug
@@ -7,9 +11,6 @@ from hysop.parameters.tensor_parameter import TensorParameter
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 from hysop.core.graph.graph import op_apply
 from hysop.core.memory.memory_request import MemoryRequest
-import numpy as np
-from hysop.deps import sm
-from sympy.utilities.lambdify import lambdify
 
 
 class PythonVorticityAbsorption(HostOperator):
@@ -18,6 +19,15 @@ class PythonVorticityAbsorption(HostOperator):
     at domain outlet.
     """
 
+    @debug
+    def __new__(cls, velocity, vorticity,
+                 dt, flowrate, start_coord, variables,
+                 custom_filter=None,
+                 implementation=None, **kwds):
+        return super(PythonVorticityAbsorption, cls).__new__(cls,
+            input_fields=None, output_fields=None,
+            input_params=None, **kwds)
+
     @debug
     def __init__(self, velocity, vorticity,
                  dt, flowrate, start_coord, variables,
@@ -102,7 +112,7 @@ class PythonVorticityAbsorption(HostOperator):
 
         domain = self.dvelocity.domain
         lengths = domain.length
-        self._inv_ds = 1. / np.prod(lengths[:-1])
+        self._inv_ds = 1.0 / np.prod(lengths[:-1])
 
         # Compute slice and x_coords for the absorption region,
         # for given start_coord to domain end.
@@ -142,7 +152,7 @@ class PythonVorticityAbsorption(HostOperator):
         self._filter, self._filter_diff = None, None
         self._apply = self._false_apply
         if ind is not None:
-            x = sm.abc.x
+            x = sm.Symbol('x')
             if self.custom_filter is None:
                 # Default filter
                 xl = mesh_local_compute_slices[-1].stop-ind[-1].start
diff --git a/hysop/constants.py.in b/hysop/constants.py.in
index 21923e645f8f021dd1380abb2cc4a048b3726090..8556a1fc6a2f2499b73160de6ab052e8f27a5047 100644
--- a/hysop/constants.py.in
+++ b/hysop/constants.py.in
@@ -8,11 +8,13 @@ This file is generated from constants.py.in
 and it's probably a bad idea to modify it.
 """
 
+import math, inspect, os
+import itertools as it
+import numpy as np
 
 import hysop
 from hysop import __VERBOSE__, __DEBUG__, __PROFILE__,\
                   __KERNEL_DEBUG__, __MPI_ENABLED__
-from hysop.deps        import math, np, it, inspect, os
 from hysop.tools.enum  import EnumFactory
 
 if __MPI_ENABLED__:
diff --git a/hysop/core/arrays/array.py b/hysop/core/arrays/array.py
index 27a8f7b48266d1e6fcbb87381cb659168a05eb93..1c6dcac266db2c599a5347b0b7311fb17682711b 100644
--- a/hysop/core/arrays/array.py
+++ b/hysop/core/arrays/array.py
@@ -7,26 +7,26 @@ from hysop.tools.types import check_instance
 from hysop.tools.numpywrappers import slices_empty
 from hysop.tools.decorators import required_property, optional_property
 
-    
-class Array(object):
+
+class Array(object, metaclass=ABCMeta):
     """
     Interface of an abstract array.
-    An array is a numpy.ndarray work-alike that stores its data and performs 
+    An array is a numpy.ndarray work-alike that stores its data and performs
     its computations on various devices, depending on the backend.
-    All exposed functions should work exactly as in numpy. 
-    Arithmetic methods in Array, when available, should at least support the 
+    All exposed functions should work exactly as in numpy.
+    Arithmetic methods in Array, when available, should at least support the
     broadcasting of scalars.
 
-    For Fortran users, reverse the usual order of indices when accessing elements of an array. 
+    For Fortran users, reverse the usual order of indices when accessing elements of an array.
     to be in line with Python semantics and the natural order of the data.
-    The fact is that Python indexing on lists and other sequences naturally leads to an 
-    outside-to inside ordering (the first index gets the largest grouping, and the last 
+    The fact is that Python indexing on lists and other sequences naturally leads to an
+    outside-to inside ordering (the first index gets the largest grouping, and the last
     gets the smallest element).
-    
+
     See https://docs.scipy.org/doc/numpy-1.10.0/reference/internals.html for more information
     about C versus Fortran ordering in numpy.
-        
-    Numpy notation are used for axes, axe 0 is the slowest varying index and last axe is 
+
+    Numpy notation are used for axes, axe 0 is the slowest varying index and last axe is
     the fastest varying index.
     By default:
         3D C-ordering       is [0,1,2] which corresponds to ZYX transposition state.
@@ -35,13 +35,11 @@ class Array(object):
     This means that when taking array byte strides, in the axis order, the strides are
     decreasing until the last stride wich is the size of array dtype in bytes.
     """
-    
-    __metaclass__ = ABCMeta
-        
+
     def __init__(self, handle, backend, **kwds):
         """
         Build an Array instance.
-        
+
         Parameters
         ----------
         handle:  buffer backend implementation
@@ -51,7 +49,7 @@ class Array(object):
         Notes
         -----
         This should never be called directly by the user.
-        Arrays should be constructed using array backend facilities, like zeros or empty. 
+        Arrays should be constructed using array backend facilities, like zeros or empty.
         The parameters given here refer to a low-level method for instantiating an array.
         """
         from hysop.core.arrays.all import ArrayBackend
@@ -84,7 +82,7 @@ class Array(object):
             msg=msg.format(self.__class__)
             raise RuntimeError(msg)
         return self._backend
-   
+
     handle  = property(get_handle)
     backend = property(get_backend)
 
@@ -92,10 +90,6 @@ class Array(object):
         """Return scalar value as an int."""
         assert self.size==1
         return int(self.get())
-    def __long__(self):
-        """Return scalar value as a long."""
-        assert self.size==1
-        return long(self.get())
     def __float__(self):
         assert self.size==1
         """Return scalar value as a float."""
@@ -111,13 +105,13 @@ class Array(object):
             return self.handle.__nonzero__()
         else:
             return True
-    
+
     @classmethod
     def _not_implemented_yet(cls, funname):
         msg = '{}::{} has not been implemented yet.'
         msg=msg.format(cls.__name__, funname)
         raise NotImplementedError(msg)
-        
+
     @classmethod
     def _unsupported_argument(cls, fname, argname, arg, default_value=None):
         if arg != default_value:
@@ -125,34 +119,34 @@ class Array(object):
             msg+= 'supported and should be set to {}.'
             msg=msg.format(cls.__name__, fname, argname, default_value)
             raise NotImplementedError(msg)
-    
+
     def wrap(self, handle):
         """
         Wrap handle with the same initialization arguments as this instance.
         """
         return self.backend.wrap(handle=handle)
-    
+
     def _call(self, fname, *args, **kargs):
         """
         Calls a handle function.
         """
         f = getattr(self.handle, fname)
         return self.backend._call(f, *args, **kargs)
-    
+
     @abstractmethod
     def as_symbolic_array(self, name, **kwds):
         """
         Return a symbolic array variable that contain a reference to this array.
         """
         pass
-    
+
     @abstractmethod
     def as_symbolic_buffer(self, name, **kwds):
         """
         Return a symbolic buffer variable that contain a reference to this array.
         """
         pass
-    
+
     @abstractmethod
     @required_property
     def get_ndim(self):
@@ -160,7 +154,7 @@ class Array(object):
         Number of array dimensions.
         """
         pass
-    
+
     @abstractmethod
     @required_property
     def get_int_ptr(self):
@@ -182,8 +176,8 @@ class Array(object):
     def set_shape(self):
         """
         Set the shape of this buffer.
-        From the numpy doc: It is not always possible to change the shape of an array without 
-        copying the data. If you want an error to be raised if the data is copied, you should   
+        From the numpy doc: It is not always possible to change the shape of an array without
+        copying the data. If you want an error to be raised if the data is copied, you should
         assign the new shape to the shape attribute of the array.
         """
         pass
@@ -203,7 +197,7 @@ class Array(object):
         Tuple of ints that represents the byte step in each dimension when traversing an array.
         """
         pass
-    
+
     @abstractmethod
     @required_property
     def get_data(self):
@@ -219,7 +213,7 @@ class Array(object):
         Base object if memory is from some other object.
         """
         pass
-   
+
     @abstractmethod
     @required_property
     def get_dtype(self):
@@ -227,21 +221,21 @@ class Array(object):
         numpy.dtype representing the type stored into this buffer.
         """
         pass
-    
+
     @optional_property
     def get_flags(self):
         """
         Information about the memory layout of the array.
         """
         pass
-    
+
     @optional_property
     def get_imag(self):
         """
         The imaginary part of the array.
         """
         pass
-    
+
     @optional_property
     def get_real(self):
         """
@@ -255,7 +249,7 @@ class Array(object):
         An object to simplify the interaction of the array with the ctypes module.
         """
         pass
-    
+
     def get_T(self):
         """
         Same as self.transpose(), except that self is returned if self.ndim < 2.
@@ -264,13 +258,13 @@ class Array(object):
             return self
         else:
             return self.transpose()
-    
+
     def get_size(self):
         """
         Number of elements in the array.
         """
         return prod(self.get_shape())
-    
+
     def get_itemsize(self):
         """
         Number of bytes per element.
@@ -282,8 +276,8 @@ class Array(object):
         Number of bytes in the whole buffer.
         """
         return self.itemsize*self.size
-    
-    
+
+
     # array properties to be (re)defined
     ndim    = property(get_ndim)
     shape   = property(get_shape, set_shape)
@@ -293,19 +287,19 @@ class Array(object):
     base    = property(get_base)
     dtype   = property(get_dtype)
     int_ptr = property(get_int_ptr)
-    
+
     # optional array properties
     flags    = property(get_flags)
     imag     = property(get_imag)
     real     = property(get_real)
     ctypes   = property(get_ctypes)
-    
+
     # deduced array properties, may be redefined
     size     = property(get_size)
     itemsize = property(get_itemsize)
     nbytes   = property(get_nbytes)
     T        = property(get_T)
-    
+
     @abstractmethod
     def get(self, handle=False):
         """
@@ -330,18 +324,18 @@ class Array(object):
         physical memory as other.
         """
         return self.get_data_base() is other.get_data_base()
-    
+
     def ctype(self):
         """
         Equivalent C type corresponding to the numpy.dtype.
         """
         self.__class__.not_implemented_yet('ctype')
-    
+
     def get_order(self):
         """
         Memory ordering.
-        Determine whether the array view is written in C-contiguous order 
-        (last index varies the fastest), or FORTRAN-contiguous order 
+        Determine whether the array view is written in C-contiguous order
+        (last index varies the fastest), or FORTRAN-contiguous order
         in memory (first index varies the fastest).
         If dimension is one, default_order is returned.
         """
@@ -366,7 +360,7 @@ class Array(object):
              Axe 0 is the slowest varying index, last axe is the fastest varying index.
              ie 3D C-ordering       is [2,1,0]
                 3D fortran-ordering is [0,1,2]
-             
+
              Thoses are the axes seen as a numpy view on memory, *only* strides are permutated for access,
              Those axes are found by reverse argsorting the array strides, using a stable sorting algorithm.
 
@@ -416,14 +410,14 @@ class Array(object):
         Copy data from buffer src
         """
         self.backend.memcpy(self, src, **kargs)
-    
+
     def copy_to(self, dst, **kargs):
         """
         Copy data from buffer to dst
         """
         self.backend.memcpy(dst,self, **kargs)
-    
-    
+
+
     def transpose_to_state(self, state, **kargs):
         """
         Transpose buffer to specified transposition state.
@@ -437,7 +431,7 @@ class Array(object):
         for axe in target:
             axes.append(origin.index(axe))
         return self.transpose(axes=axes)
-   
+
 
 
     # np.ndarray like methods
@@ -470,7 +464,7 @@ class Array(object):
         """
         Returns the indices that would partition this array.
         """
-        return self.backend.argpartition(a=self, kth=kth, axis=axis, kind=kind, 
+        return self.backend.argpartition(a=self, kth=kth, axis=axis, kind=kind,
                 order=order, **kargs)
 
     def argsort(self, axis=-1, kind='quicksort', order=None, **kargs):
@@ -478,8 +472,8 @@ class Array(object):
         Returns the indices that would sort this array.
         """
         return self.backend.argsort(a=self, axis=axis, kind=kind, order=order, **kargs)
-    
-    def astype(self, dtype, order=MemoryOrdering.SAME_ORDER, casting='unsafe', subok=True, 
+
+    def astype(self, dtype, order=MemoryOrdering.SAME_ORDER, casting='unsafe', subok=True,
             copy=True, **kargs):
         """
         Copy of the array, cast to a specified type.
@@ -490,7 +484,7 @@ class Array(object):
     def byteswap(self, inplace=False, **kargs):
         """
         Swap the bytes of the array elements
-        Toggle between low-endian and big-endian data representation by returning 
+        Toggle between low-endian and big-endian data representation by returning
         a byteswapped array, optionally swapped in-place.
         """
         return self.backend.byteswap(a=self, inplace=inplace, **kargs)
@@ -519,7 +513,7 @@ class Array(object):
         Complex-conjugate of all elements.
         """
         return self.backend.conj(x=self, out=out, **kargs)
-    
+
     def conjugate(self, out=None, **kargs):
         """
         Return the complex conjugate, element-wise.
@@ -537,7 +531,7 @@ class Array(object):
         Return the cumulative sum of the elements along the given axis.
         """
         return self.backend.cumsum(a=self, axis=axis, dtype=dtype, out=out, **kargs)
-    
+
     def copy(self, order=MemoryOrdering.SAME_ORDER, **kargs):
         """
         Return a copy of the array.
@@ -610,7 +604,7 @@ class Array(object):
         Rearranges the elements in the array in such a way that value of the element i
         in kth position is in the position it would be in a sorted array.
         """
-        return self.backend.partition(a=self, kth=kth, axis=axis, kind=kind, 
+        return self.backend.partition(a=self, kth=kth, axis=axis, kind=kind,
                 order=order, **kargs)
 
     def prod(self, axis=None, dtype=None, out=None, **kargs):
@@ -630,56 +624,56 @@ class Array(object):
         Return a flattened array.
         """
         return self.backend.ravel(a=self, order=order, **kargs)
-    
+
     def repeat(self, repeats, axis=None, **kargs):
         """
         Repeat elements of an array.
         """
         return self.backend.repeat(a=self, repeats=repeats, axis=axis, **kargs)
-    
+
     def reshape(self, new_shape, order=default_order, **kargs):
         """
         Returns an array containing the same data with a new shape.
         """
         return self.backend.reshape(a=self, newshape=new_shape, order=order, **kargs)
-    
+
     def resize(self, new_shape, refcheck=True, **kargs):
         """
         Change shape and size of array in-place.
         """
         return self.backend.resize(a=self, new_shape=new_shape, refcheck=refcheck, **kargs)
-    
+
     def round(self, decimals=0, out=None, **kargs):
         """
         Return a with each element rounded to the given number of decimals.
         """
         return self.backend.around(a=self, decimals=decimals, out=out, **kargs)
-    
+
     def searchsorted(self, v, side='left', sorter=None, **kargs):
         """
         Find indices where elements of v should be inserted in a to maintain order.
         """
         return self.backend.searchsorted(a=self, v=v, side=side, sorter=sorter, **kargs)
-    
+
     def sort(self, axis=-1, kind='quicksort', order=None, **kargs):
         """
         Sort an array, in-place.
         """
         return self.backend.sort(a=self, axis=axis, kind=kind, order=order, **kargs)
-    
+
     def squeeze(self, axis=None, **kargs):
         """
         Remove single-dimensional entries from the shape of a.
         """
         return self.backend.squeeze(a=self, axis=axis, **kargs)
-    
+
     def std(self, axis=None, dtype=None, out=None, ddof=0, **kargs):
         """
         Returns the standard deviation of the array elements along given axis.
         """
-        return self.backend.std(a=self, axis=axis, dtype=dtype, out=out, 
+        return self.backend.std(a=self, axis=axis, dtype=dtype, out=out,
                 ddof=ddof)
-    
+
     def sum(self, axis=None, dtype=None, out=None, **kargs):
         """
         Return the sum of the array elements over the given axis.
@@ -691,18 +685,18 @@ class Array(object):
         Return a view of the array with axis1 and axis2 interchanged.
         """
         return self.backend.swapaxes(axis1=axis1, axis2=axis2, **kargs)
-    
+
     def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None, **kargs):
         """
         Return the sum along diagonals of the array.
         """
-        return self.backend.trace(a=self, offset=offset, 
+        return self.backend.trace(a=self, offset=offset,
                 axis1=axis1, axis2=axis2, dtype=dtype, out=out, **kargs)
-    
+
     def transpose(self, axes=None, **kargs):
         """
         Returns a view of the array with axes transposed.
-        """ 
+        """
         return self.backend.transpose(a=self, axes=axes, **kargs)
 
     def var(self, axis=None, dtype=None, out=None, ddof=0, **kargs):
@@ -710,7 +704,7 @@ class Array(object):
         Returns the variance of the array elements, along given axis.
         """
         return self.backend.var(a=self, axis=axis, dtype=dtype, out=out, ddof=ddof, **kargs)
-   
+
 
     ## Array restricted methods
     def setflags(self, write=None, align=None, uic=None):
@@ -720,13 +714,13 @@ class Array(object):
         msg='{}::set_flags() should not be called.'
         msg=msg.format(self.__class__.__name__)
         raise RuntimeError(msg)
-   
+
 
     ## Array specific unimplemented methods
     def tofile(self, fid, sep='', format='%s', **kargs):
         """
         Write array to a file as text or binary (default).
-        This is a convenience function for quick storage of array data. 
+        This is a convenience function for quick storage of array data.
         Information on endianness and precision is lost.
         """
         self.__class__.not_implemented_yet('tofile')
@@ -780,7 +774,7 @@ class Array(object):
         New view of array with the same data.
         """
         self._not_implemented_yet('view')
-    def astype(self, dtype, order=MemoryOrdering.SAME_ORDER, 
+    def astype(self, dtype, order=MemoryOrdering.SAME_ORDER,
                      casting='unsafe', subok=True, copy=True, **kargs):
         """
         Copy of the array, cast to a specified type.
@@ -793,7 +787,7 @@ class Array(object):
         """
         self._not_implemented_yet('tobytes')
 
-   
+
    # logical operators
     def __eq__(self, other):
         return self.backend.equal(self, other)
@@ -826,11 +820,11 @@ class Array(object):
         return self.backend.power(self, other)
     def __floordiv__(self, other):
         return self.backend.floor_divide(self, other)
-    def __div__(self, other):
+    def __truediv__(self, other):
         return self.backend.divide(self, other)
     def __mod__(self, other):
         return self.backend.mod(self, other)
-    
+
     def __and__ (self, other):
         return self.backend.bitwise_and(self,other)
     def __xor__ (self, other):
@@ -852,11 +846,11 @@ class Array(object):
         return self.backend.power(other, self)
     def __rfloordiv__(self, other):
         return self.backend.floor_divide(other, self)
-    def __rdiv__(self, other):
+    def __rtruediv__(self, other):
         return self.backend.divide(other, self)
     def __rmod__(self, other):
         return self.backend.mod(other, self)
-    
+
     def __rand__ (other, self):
         return self.backend.bitwise_and(other, self)
     def __rxor__ (other, self):
@@ -867,7 +861,7 @@ class Array(object):
         return self.backend.left_shift(other, self)
     def __rrshift__ (other, self):
         return self.backend.right_shift(other, self)
-    
+
     def __iadd__(self, other):
         return self.backend.add(self, other, out=self)
     def __isub__(self, other):
@@ -883,7 +877,7 @@ class Array(object):
     def __imod__(self, other):
         return self.backend.mod(self, other, out=self)
 
-    
+
     def __str__(self):
         return self._handle.__str__()
     def __repr__(self):
diff --git a/hysop/core/arrays/array_backend.py b/hysop/core/arrays/array_backend.py
index 534855a98ec74029b7bdb14476bdc36a467de0ae..a349765ef0899bc8ebeb18a66c2d1aa7a39cd26f 100644
--- a/hysop/core/arrays/array_backend.py
+++ b/hysop/core/arrays/array_backend.py
@@ -1,6 +1,7 @@
-
+import sys
+import numpy as np
 from abc import ABCMeta, abstractmethod
-from hysop.deps import np, sys
+
 from hysop.constants import default_order, MemoryOrdering, Backend
 from hysop.constants import HYSOP_REAL, HYSOP_COMPLEX
 from hysop.constants import HYSOP_INTEGER, HYSOP_INDEX, HYSOP_DIM, HYSOP_BOOL
@@ -11,26 +12,26 @@ from hysop.tools.numerics import is_fp, is_complex, match_float_type, \
                                  match_complex_type, complex_to_float_dtype
 from hysop.core.memory.allocator import AllocatorBase
 
-class ArrayBackend(TaggedObject):
+class ArrayBackend(TaggedObject, metaclass=ABCMeta):
     """
     Interface of an abstract array backend.
-    An array backend is a numpy work-alike collection of functions that 
+    An array backend is a numpy work-alike collection of functions that
     performs its computations on arrays on various devices.
-    
+
     Most of exposed functions should work exactly as in numpy,
     some default arguments are changed to match HySoP parameters
-    (default ordering, default floating point type, 
-      default integer type, default device, ...) 
+    (default ordering, default floating point type,
+      default integer type, default device, ...)
     All exposed functions are @classmethods, and this class cannot
     be instanciated.
 
-    Arithmetic methods, when available, should at least support the 
+    Arithmetic methods, when available, should at least support the
     broadcasting of scalars.
-    
+
     See this link for more information about numpy routines:
     https://docs.scipy.org/doc/numpy/reference/routines.html
-    
-    A backend implementation *may* expose subsets of the following 
+
+    A backend implementation *may* expose subsets of the following
     routine sections (as listed on previous link):
         1)  Array creation routines
         2)  Array manipulation routines
@@ -64,13 +65,11 @@ class ArrayBackend(TaggedObject):
         15) Polynomials
         16) Test support
         17) Window functions
-    
-    By default, all exposed methods raise a NotImplementedError with an 
+
+    By default, all exposed methods raise a NotImplementedError with an
     explicit message through the _not_implemented_yet method.
     """
 
-    __metaclass__ = ABCMeta
-    
     __registered_backends = {}
     """
     Contains all registered backends.
@@ -87,7 +86,7 @@ class ArrayBackend(TaggedObject):
     @classmethod
     def get_or_create(cls, **kwds):
         cls._not_implemented_yet('get_or_create')
-    
+
     @staticmethod
     def get_alignment_and_size(shape, dtype, min_alignment=None):
         """
@@ -113,41 +112,41 @@ class ArrayBackend(TaggedObject):
         nbytes = min_alloc_bytes + alignment - 1
 
         return (size,nbytes,alignment)
-    
+
     @staticmethod
     def _register_backend(handle, backend_cls):
         ArrayBackend.__registered_backends[handle] = backend_cls
-    
+
     @staticmethod
     def _registered_backends():
         return ArrayBackend.__registered_backends.copy()
-    
+
     @classmethod
     def _not_implemented_yet(cls, funname):
         msg = '{}::{}() has not been implemented yet.'
         msg=msg.format(cls.__name__, funname)
         raise NotImplementedError(msg)
-    
-    @classmethod 
+
+    @classmethod
     def _unsupported_argument(cls, fname, argname, arg, default_value=None):
         if arg != default_value:
             msg = '{}::{}() has been implemented but argument \'{}\' is not '
             msg+= 'supported and should be set to {}.'
             msg=msg.format(cls.__name__, fname, argname, default_value)
             raise NotImplementedError(msg)
-    
-    @classmethod 
+
+    @classmethod
     def _check_argtype(cls, fname, argname, arg, argself):
         if not isinstance(argself, tuple):
             argself=(argself,)
-        if not arg.__class__ in argself: 
+        if not arg.__class__ in argself:
             msg = '{}::{}(): argument type mismatch, expected a {}'
             msg+= ' for argument \'{}\' but got a {}.'
             msg=msg.format(cls.__name__, fname, argself, argname, arg.__class__)
             raise TypeError(msg)
 
     def __new__(cls, allocator, **kwds):
-        return super(ArrayBackend, cls).__new__(cls, tag_prefix='bk', 
+        return super(ArrayBackend, cls).__new__(cls, tag_prefix='bk',
                                                      tagged_cls=ArrayBackend,
                                                      **kwds)
 
@@ -186,7 +185,7 @@ class ArrayBackend(TaggedObject):
         return ne
     def __hash__(self):
         return id(self._allocator)
-    
+
     @abstractmethod
     def get_host_array_backend(self):
         msg='get_host_array_backend() not implemented in {}.'.format(self.__class__)
@@ -202,37 +201,40 @@ class ArrayBackend(TaggedObject):
         Get the allocated associated to this backend.
         """
         return self._allocator
-    
+
     @property
     def max_alloc_size(self):
         """
         Get the maximal size of allocatable contiguous chunk of memory in bytes.
         """
         return self._allocator.max_alloc_size()
-    
+
     allocator = property(get_allocator)
 
-    
+
     def _prepare_args(self, *args, **kargs):
         """
         Prepare all arguments for a call.
         """
         if ArrayBackend.__DEBUG:
-            print '__prepare_args'
+            print('__prepare_args')
         args = list(args)
         for i,arg in enumerate(args):
             args[i]  = self._arg(arg)
-        for k,arg in kargs.iteritems():
+        for k,arg in kargs.items():
             kargs[k] = self._arg(arg)
+        if ('synchronize' in kargs):
+            msg='synchronize cannot be an argument to pyopencl.'
+            raise RuntimeError(msg)
         return tuple(args), kargs
-        
+
     def _return(self, ret, **kargs):
         """
         Wrap returned value(s) if they match a backend array.
         """
         if isinstance(ret,tuple):
             if ArrayBackend.__DEBUG:
-                print '__return', [r.__class__.__name__ for r in ret]
+                print('__return', [r.__class__.__name__ for r in ret])
             values = list(ret)
             for i,val in enumerate(values):
                 if self.can_wrap(val):
@@ -240,34 +242,33 @@ class ArrayBackend(TaggedObject):
                 elif self.host_array_backend.can_wrap(val):
                     values[i] = self.host_array_backend.wrap(val)
             return tuple(values)
-        
+
         if ArrayBackend.__DEBUG:
-            print '__return', ret.__class__.__name__
+            print('__return', ret.__class__.__name__)
         if self.can_wrap(ret):
             ret = self.wrap(ret, **kargs)
         elif self.host_array_backend.can_wrap(ret):
             ret = self.host_array_backend.wrap(ret)
         return ret
-   
+
     def _call(self, functor, *varargs, **kwargs):
         """
         Prepare arguments for a call to functor and calls it.
-        If returned value contains an array handle, it is wrapped into 
+        If returned value contains an array handle, it is wrapped into
         the corresponding hysop.core.arrays.
         """
 
         if ArrayBackend.__DEBUG:
-            print '__call {}'.format(functor.__name__)
+            print('__call {}'.format(functor.__name__))
 
-        args, kargs, _ret, ret = None, None, None, None 
+        args, kargs, _ret, ret = None, None, None, None
         try:
             args, kargs = self._prepare_args(*varargs, **kwargs)
-            
+
             _ret = functor(*args, **kargs)
 
             ret = self._return(_ret)
         except Exception as e:
-            _type, _value, _traceback = sys.exc_info()
             def get_name(val):
                 if hasattr(val, '__class__'):
                     name = val.__class__.__name__
@@ -276,7 +277,7 @@ class ArrayBackend(TaggedObject):
                 else:
                     name = val
                 if hasattr(val,'__class__') and \
-                        val.__class__ in [type,int,long,float,np.dtype,list,tuple,set]:
+                        val.__class__ in [type,int,float,np.dtype,list,tuple,set]:
                     name += ' = {}'.format(val)
                 return name
 
@@ -286,7 +287,7 @@ class ArrayBackend(TaggedObject):
                 if isinstance(val,tuple):
                     return ', '.join([get_name(v) for v in val])
                 elif isinstance(val, dict):
-                    val = ['{} => {}'.format(k,get_name(val[k])) 
+                    val = ['{} => {}'.format(k,get_name(val[k]))
                             for k in sorted(val.keys())]
                     return '\n\t'.join(val)
 
@@ -295,7 +296,7 @@ class ArrayBackend(TaggedObject):
 Call of {} ({}) from class self={} failed
  Before argument processing:
     args:  {}
-    kargs: 
+    kargs:
         {}
     returns: {}
  After argument processing:
@@ -306,23 +307,24 @@ Call of {} ({}) from class self={} failed
 
 Exception was:
     {}
-'''.format(functor, functor.__name__, self.__class__.__name__, 
-        format(varargs), format(kwargs), format(_ret), 
+'''.format(functor, functor.__name__, self.__class__.__name__,
+        format(varargs), format(kwargs), format(_ret),
         format(args), format(kargs), format(ret),
         e)
-            raise _type, _value, _traceback
+            print(msg)
+            raise
 
         return ret
 
 
     def _alloc_outputs(self, fname, kargs):
         if ArrayBackend.__DEBUG:
-            print '__begin allocs'
+            print('__begin allocs')
         shapes = []
         orders = []
         input_dtypes = {}
         output_arg_names = []
-        for argname, arg in kargs.iteritems():
+        for argname, arg in kargs.items():
             if (argname.find('out')>=0):
                 if (arg is None):
                     output_arg_names.append(argname)
@@ -340,12 +342,12 @@ Exception was:
             raise RuntimeError(msg)
         else:
             shape = shapes[0]
-        
+
         if not all(order==orders[0] for order in orders):
             order=MemoryOrdering.C_CONTIGUOUS
         else:
             order = orders[0]
-        
+
         if ('axis' in kargs):
             # this is a reduction, we get rid of reduced axis
             axis = kargs['axis']
@@ -365,15 +367,15 @@ Exception was:
         if not shape:
             # scalar output, do not allocate an array
             return
-        
+
         output_dtypes = self._find_output_dtypes(fname, input_dtypes, output_arg_names)
-        
+
         if ArrayBackend.__DEBUG:
-            print '__allocating outputs for function {}'.format(fname)
-            print '   *shape: {} (input shape={}, axis={})'.format(shape, _shape, axis)
-            print '   *order: {}'.format(order)
-            print '   *input  dtypes: {}'.format(input_dtypes)
-            print '   *deduced output dtypes: {}'.format(output_dtypes)
+            print('__allocating outputs for function {}'.format(fname))
+            print('   *shape: {} (input shape={}, axis={})'.format(shape, _shape, axis))
+            print('   *order: {}'.format(order))
+            print('   *input  dtypes: {}'.format(input_dtypes))
+            print('   *deduced output dtypes: {}'.format(output_dtypes))
             f = getattr(np, fname)
             if isinstance(f, np.ufunc):
                 ftypes = f.types
@@ -386,22 +388,22 @@ Exception was:
                             type_info[typechar] = np.typename(typechar)
                         except:
                             type_info[typechar] = 'unknown type'
-                    ss = '{}->{} ({})'.format(fin,fout, ', '.join('{}={}'.format(k,v) 
-                        for (k,v) in type_info.iteritems()))
+                    ss = '{}->{} ({})'.format(fin,fout, ', '.join('{}={}'.format(k,v)
+                        for (k,v) in type_info.items()))
                     ftypes_str.append(ss)
 
-                print '   *ufunc available signatures:\n     {}'.format('\n     '.join(ftypes_str))
-        
+                print('   *ufunc available signatures:\n     {}'.format('\n     '.join(ftypes_str)))
+
         for argname in output_arg_names:
             dtype = output_dtypes[argname]
             kargs[argname] = self.empty(shape=shape, dtype=dtype, order=order).handle
         if ArrayBackend.__DEBUG:
-            print '__end allocs'
-    
+            print('__end allocs')
+
     def _find_output_dtypes(self, fname, input_dtypes, output_arg_names):
         output_dtypes = {}
-        
-        dtypes =  input_dtypes.values()
+
+        dtypes = tuple(input_dtypes.values())
         dtype  = np.find_common_type([], dtypes)
 
         if fname.find('frexp')==0:
@@ -409,10 +411,10 @@ Exception was:
             output_dtypes['out2'] = np.int32
         else:
             # all outputs share the same dtype
-            if fname in ['rint', 'floor', 'ceil', 'trunc', 
-                    'exp', 'exp2', 'expm1', 
-                    'log', 'log1p', 'log2', 'log10', 
-                    'logaddexp', 'logaddexp2', 'ldexp', 
+            if fname in ['rint', 'floor', 'ceil', 'trunc',
+                    'exp', 'exp2', 'expm1',
+                    'log', 'log1p', 'log2', 'log10',
+                    'logaddexp', 'logaddexp2', 'ldexp',
                     'sqrt', 'cbrt', 'hypot',
                     'fabs', 'copysign', 'modf',
                     'sin',  'cos',  'tan',  'arcsin',  'arccos',  'arctan', 'arctan2',
@@ -443,22 +445,22 @@ Exception was:
 ############################
 # BACKEND SPECIFIC METHODS #
 
-    
+
     @abstractmethod
     def wrap(self, handle, **kargs):
         """
         Create a backend specific Array from the corresponding array handle.
         """
         pass
-    
+
     @abstractmethod
     def can_wrap(self, handle, **kargs):
         """
-        Should return True if handle is an Array or a array handle corresponding 
+        Should return True if handle is an Array or a array handle corresponding
         this backend.
         """
         pass
-     
+
     def _arg(self, arg):
         """
         Prepare one argument for a call (non backend specific argument conversion).
@@ -483,24 +485,24 @@ Exception was:
         else:
             return arg
 
-    
+
     def copyto(self, dst, src, **kargs):
         """
         src is an Array
         dst can be everything
         """
         self._not_implemented_yet('copyto')
-    
+
 ##############################
 # EXTRA AND MODIFIED METHODS #
 
-    
+
     def fill(self, a, value):
         """
         Fill the array with given value
         """
         self._not_implemented_yet('fill')
-    
+
     def memcpy(self, dst, src, **kargs):
         """
         Copy memory from src buffer to dst buffer .
@@ -528,75 +530,75 @@ Exception was:
                 src = dst.backend.asarray(src)
                 dst.backend.copyto(dst, src, **kargs)
             else:
-                print src.__class__, dst.__class__
+                print(src.__class__, dst.__class__)
                 msg='src cannot be converted to type Array.'
                 raise TypeError(msg)
         else:
             msg='Neither src nor dst are of type Array.'
             raise TypeError(msg)
-            
 
-    
+
+
 ###########################
 # ARRAY CREATION ROUTINES #
-## See https://docs.scipy.org/doc/numpy/reference/routines.array-creation.html 
+## See https://docs.scipy.org/doc/numpy/reference/routines.array-creation.html
 
 # Ones and zeros
-    
+
     def empty(self, shape, dtype=HYSOP_REAL, order=default_order):
         """
         Return a new array of given shape and type, without initializing entries.
         """
         self._not_implemented_yet('empty')
-    
+
     def empty_like(self, a, dtype=None, order=MemoryOrdering.SAME_ORDER, subok=True, shape=None):
         """
         Return a new array with the same shape and type as a given array.
         """
         self._not_implemented_yet('empty_like')
-    
+
     def eye(self, N, M, k, dtype=None):
         """
         Return a 2-D array with ones on the diagonal and zeros elsewhere.
         """
         self._not_implemented_yet('eye')
-    
+
     def identity(self, n, dtype=None):
         """
         Return the identity array.
         """
         self._not_implemented_yet('identity')
-    
+
     def ones(self, shape, dtype=None, order=default_order):
         """
         Return a new array of given shape and type, filled with ones.
         """
         self._not_implemented_yet('ones')
-    
+
     def ones_like(self, a, dtype=None, order=MemoryOrdering.SAME_ORDER, subok=True, shape=None):
         """
         Return an array of ones with the same shape and type as a given array.
         """
         self._not_implemented_yet('ones_like')
-    
+
     def zeros(self, shape, dtype=None, order=default_order):
         """
         Return a new array of given shape and type, filled with zeros.
         """
         self._not_implemented_yet('zeros')
-    
+
     def zeros_like(self, a, dtype=None, order=MemoryOrdering.SAME_ORDER, subok=True, shape=None):
         """
         Return an array of zeros with the same shape and type as a given array.
         """
         self._not_implemented_yet('zeros_like')
-    
+
     def full(self, shape, fill_value, dtype=None, order=default_order):
         """
         Return a new array of given shape and type, filled with fill_value.
         """
         self._not_implemented_yet('full')
-    
+
     def full_like(self, a, fill_value, dtype=None, order=MemoryOrdering.SAME_ORDER, subok=True, shape=None):
         """
         Return a full array with the same shape and type as a given array.
@@ -604,68 +606,68 @@ Exception was:
         self._not_implemented_yet('full_like')
 
 #From existing data
-    
-    def array(self, object, dtype=None, copy=True, order=default_order, 
+
+    def array(self, object, dtype=None, copy=True, order=default_order,
             subok=False, ndmin=0):
         """
         Create an array.
         """
         self._not_implemented_yet('array')
-    
+
     def asarray(self, a, dtype=None, order=default_order, **kargs):
         """
         Convert the input to an array.
         """
         self._not_implemented_yet('asarray')
-    
+
     def asanyarray(self, a, dtype=None, order=default_order):
         """
         Convert the input to an ndarray, but pass ndarray subclasses through.
         """
         self._not_implemented_yet('asanyarray')
-    
+
     def asmatrix(self, data, dtype=None):
         """
         Interpret the input as a matrix.
         """
         self._not_implemented_yet('asmatrix')
-    
+
     def copy(self, a, order=MemoryOrdering.SAME_ORDER):
         """
         Return an array copy of the given object.
         """
         self._not_implemented_yet('copy')
-    
+
     def frombuffer(self, afer, dtype=HYSOP_REAL, count=-1, offset=0):
         """
         Interpret a afer as a 1-dimensional array.
         """
         self._not_implemented_yet('fromafer')
-    
+
     def fromfile(self, file, dtype=HYSOP_REAL, count=-1, sep=''):
         """
         Construct an array from data in a text or binary file.
         """
         self._not_implemented_yet('fromfile')
-    
+
     def fromfunction(self, function, shape, dtype=HYSOP_REAL):
         """
         Construct an array by executing a function over each coordinate.
         """
         self._not_implemented_yet('fromfunction')
-    
+
     def fromiter(self, iterable, dtype=HYSOP_REAL, count=-1):
         """
         Create a new 1-dimensional array from an iterable object.
         """
         self._not_implemented_yet('fromiter')
-    
+
     def fromstring(self, string, dtype=HYSOP_REAL, count=-1, sep=''):
         """
         A new 1-D array initialized from raw binary or text data in a string.
         """
         self._not_implemented_yet('fromstring')
-    
+
     def loadtxt(self, fname, dtype=HYSOP_REAL, comments='#', delimiter=None,
             converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0):
         """
@@ -674,32 +676,32 @@ Exception was:
         self._not_implemented_yet('loadtxt')
 
 #Numerical ranges
-    
+
     def arange(self, dtype=HYSOP_INTEGER, *args, **kargs):
         """
         Return evenly spaced values within a given interval.
         """
         self._not_implemented_yet('arange')
-    
-    
+
+
     def linspace(self, start, stop, num=50, endpoint=True, retstep=False, dtype=HYSOP_REAL):
         """
         Return evenly spaced numbers over a specified interval.
         """
         self._not_implemented_yet('linspace')
-    
+
     def logspace(self, start, stop, num=50, endpoint=True, base=10.0, dtype=HYSOP_REAL):
         """
         Return numbers spaced evenly on a log scale.
         """
         self._not_implemented_yet('logspace')
-    
+
     def geomspace(self, start, stop, num=50, endpoint=True, dtype=HYSOP_REAL):
         """
         Return numbers spaced evenly on a log scale (a geometric progression).
         """
         self._not_implemented_yet('geomspace')
-    
+
     def meshgrid(self, *xi, **kwargs):
         """
         Return coordinate matrices from coordinate vectors.
@@ -707,37 +709,37 @@ Exception was:
         self._not_implemented_yet('meshgrid')
 
 #Building matrices
-    
+
     def diag(self, v, k=0):
         """
         Extract a diagonal or construct a diagonal array.
         """
         self._not_implemented_yet('diag')
-    
+
     def diagflat(self, v, k=0):
         """
         Create a two-dimensional array with the flattened input as a diagonal.
         """
         self._not_implemented_yet('diagflat')
-    
+
     def tri(self, N, M=None, k=0, dtype=HYSOP_REAL):
         """
         An array with ones at and below the given diagonal and zeros elsewhere.
         """
         self._not_implemented_yet('tri')
-    
+
     def tril(self, m, k):
         """
         Lower triangle of an array.
         """
         self._not_implemented_yet('tril')
-    
+
     def triu(self, m, k=0):
         """
         Upper triangle of an array.
         """
         self._not_implemented_yet('triu')
-    
+
     def vander(self, x, N=None, increasing=False):
         """
         Generate a Vandermonde matrix.
@@ -750,13 +752,13 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.array-manipulation.html
 
 #Changing array shape
-    
+
     def reshape(self, a, newshape, order=default_order):
         """
         Gives a new shape to an array without changing its data.
         """
         self._not_implemented_yet('reshape')
-    
+
     def ravel(self, a, order=MemoryOrdering.SAME_ORDER):
         """
         Return a contiguous flattened array.
@@ -765,38 +767,38 @@ Exception was:
 
 #Transpose-like operations
 ## /!\ those functions alter the logical transposition state /!\
-    
+
     def moveaxis(self, a, source, destination):
         """
         Move axes of an array to new positions.
         Axe 0 is the slowest varying index, last axe is the fastest varying index.
         """
-        axes = tuple(i for i in xrange(a.ndim))
+        axes = tuple(i for i in range(a.ndim))
         if source>destination:
             axes = axes[:destination] + (source,) + axes[destination:source] + axes[source+1:]
         else:
             axes = axes[:source] + axes[source+1:destination] + (source,) + axes[destination:]
         return self.transpose(a=a, axes=axes)
-    
+
     def rollaxis(self, a, axis, start=0):
         """
         Roll the specified axis backwards, until it lies in a given position.
         Axe 0 is the slowest varying index, last axe is the fastest varying index.
         """
-        axes = tuple(np.roll(xrange(a.ndim), shift=-start).tolist())
+        axes = tuple(np.roll(range(a.ndim), shift=-start).tolist())
         return self.transpose(a=a, axes=axes)
-    
+
     def swapaxes(self, a, axis1, axis2):
         """
         Interchange two axes of an array.
         Axe 0 is the slowest varying index, last axe is the fastest varying index.
         """
-        axes = list(xrange(a.ndim))
+        axes = list(range(a.ndim))
         axes[axis1] = axis2
         axes[axis2] = axis1
         axes = tuple(axes)
         return self.transpose(a=a, axes=axes)
-    
+
     def transpose(self, a, axes=None):
         """
         Permute the dimensions of an array.
@@ -807,43 +809,43 @@ Exception was:
 
 
 #Changing number of dimensions
-    
+
     def atleast_1d(self, *arys):
         """
         Convert inputs to arrays with at least one dimension.
         """
         self._not_implemented_yet('atleast_1d')
-    
+
     def atleast_2d(self, *arys):
         """
         View inputs as arrays with at least two dimensions.
         """
         self._not_implemented_yet('atleast_2d')
-    
+
     def atleast_3d(self, *arys):
         """
         View inputs as arrays with at least three dimensions.
         """
         self._not_implemented_yet('atleast_3d')
-    
+
     def broadcast_to(self, array, shape, subok=False):
         """
         Broadcast an array to a new shape.
         """
         self._not_implemented_yet('broadcast_to')
-    
+
     def broadcast_arrays(self, *args, **kwargs):
         """
         Broadcast any number of arrays against each other.
         """
         self._not_implemented_yet('broadcast_arrays')
-    
+
     def expand_dims(self, a, axis):
         """
         Expand the shape of an array.
         """
         self._not_implemented_yet('expand_dims')
-    
+
     def squeeze(self, a, axis=None):
         """
         Remove single-dimensional entries from the shape of an array.
@@ -856,25 +858,25 @@ Exception was:
         Return an array laid out in Fortran order in memory.
         """
         self._not_implemented_yet('asfortranarray')
-    
+
     def ascontiguousarray(self, a, dtype=None):
         """
         Return a contiguous array in memory (C order).
         """
         self._not_implemented_yet('ascontiguousarray')
-    
+
     def asarray_chkfinite(self, a, dtype=None, order=default_order):
         """
         Convert the input to an array, checking for NaNs or Infs.
         """
         self._not_implemented_yet('asarray_chkfinite')
-    
+
     def asscalar(self, a):
         """
         Convert an array of size 1 to its scalar equivalent.
         """
         self._not_implemented_yet('asscalar')
-    
+
     def require(self, a, dtype=None, requirements=None):
         """
         Return an ndarray of the provided type that satisfies requirements.
@@ -882,37 +884,37 @@ Exception was:
         self._not_implemented_yet('require')
 
 #Joining arrays
-    
+
     def concatenate(self, a, axis=0):
         """
         Join a sequence of arrays along an existing axis.
         """
         self._not_implemented_yet('concatenate')
-    
+
     def stack(self, arrays, axis=0):
         """
         Join a sequence of arrays along a new axis.
         """
         self._not_implemented_yet('stack')
-    
+
     def column_stack(self, tup):
         """
         Stack 1-D arrays as columns into a 2-D array.
         """
         self._not_implemented_yet('column_stack')
-    
+
     def dstack(self, tup):
         """
         Stack arrays in sequence depth wise (along third axis).
         """
         self._not_implemented_yet('dstack')
-    
+
     def hstack(self, tup):
         """
         Stack arrays in sequence horizontally (column wise).
         """
         self._not_implemented_yet('hstack')
-    
+
     def vstack(self, tup):
         """
         Stack arrays in sequence vertically (row wise).
@@ -920,31 +922,31 @@ Exception was:
         self._not_implemented_yet('vstack')
 
 #Splitting arrays
-    
+
     def split(self, ary, indices_or_sections, axis=0):
         """
         Split an array into multiple sub-arrays.
         """
         self._not_implemented_yet('split')
-    
+
     def array_split(self, ary, indices_or_sections, axis=0):
         """
         Split an array into multiple sub-arrays.
         """
         self._not_implemented_yet('array_split')
-    
+
     def dsplit(self, ary, indices_or_sections):
         """
         Split array into multiple sub-arrays along the 3rd axis (depth).
         """
         self._not_implemented_yet('dsplit')
-    
+
     def hsplit(self, ary, indices_or_sections):
         """
         Split an array into multiple sub-arrays horizontally (column-wise).
         """
         self._not_implemented_yet('hsplit')
-    
+
     def vsplit(self, ary, indices_or_sections):
         """
         Split an array into multiple sub-arrays vertically (row-wise).
@@ -952,13 +954,13 @@ Exception was:
         self._not_implemented_yet('vsplit')
 
 #Tiling arrays
-    
+
     def tile(self, A, reps):
         """
         Construct an array by repeating A the number of times given by reps.
         """
         self._not_implemented_yet('tile')
-    
+
     def repeat(self, a, repeats, axis=None):
         """
         Repeat elements of an array.
@@ -966,37 +968,37 @@ Exception was:
         self._not_implemented_yet('repeat')
 
 #Adding and removing elements
-    
+
     def delete(self, arr, obj, axis=None):
         """
         Return a new array with sub-arrays along an axis deleted.
         """
         self._not_implemented_yet('delete')
-    
+
     def insert(self, arr, obj, values, axis=None):
         """
         Insert values along the given axis before the given indices.
         """
         self._not_implemented_yet('insert')
-    
+
     def append(self, arr, values, axis=None):
         """
         Append values to the end of an array.
         """
         self._not_implemented_yet('append')
-    
+
     def resize(self, a, new_shape):
         """
         Return a new array with the specified shape.
         """
         self._not_implemented_yet('resize')
-    
+
     def trim_zeros(self, filt, trim='fb'):
         """
         Trim the leading and/or trailing zeros from a 1-D array or sequence.
         """
         self._not_implemented_yet('trim_zeros')
-    
+
     def unique(self, ar, return_index=False, return_inverse=False, return_counts=False):
         """
         Find the unique elements of an array.
@@ -1004,31 +1006,31 @@ Exception was:
         self._not_implemented_yet('unique')
 
 #Rearranging elements
-    
+
     def flip(self, m, axis):
         """
         Reverse the order of elements in an array along the given axis.
         """
         self._not_implemented_yet('flip')
-    
+
     def fliplr(self, m):
         """
         Flip array in the left/right direction.
         """
         self._not_implemented_yet('fliplr')
-    
+
     def flipud(self, m):
         """
         Flip array in the up/down direction.
         """
         self._not_implemented_yet('flipud')
-    
+
     def roll(self, a, shift, axis=None):
         """
         Roll array elements along a given axis.
         """
         self._not_implemented_yet('roll')
-    
+
     def rot90(self, m, k=1, axes=(0,1)):
         """
         Rotate an array by 90 degrees in the plane specified by axes.
@@ -1041,38 +1043,38 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.bitwise.html
 
 # Elementwise bit operations
-    
+
     def bitwise_and(self, x1, x2, out=None):
         """
         Compute the bit-wise AND of two arrays element-wise.
         """
         self._not_implemented_yet('bitwise_and')
-    
+
     def bitwise_or(self, x1, x2, out=None):
         """
         Compute the bit-wise OR of two arrays element-wise.
         """
         self._not_implemented_yet('bitwise_or')
-    
+
     def bitwise_xor(self, x1, x2, out=None):
         """
         Compute the bit-wise XOR of two arrays element-wise.
         """
         self._not_implemented_yet('bitwise_xor')
 
-    
+
     def invert(self, x, out=None):
         """
         Compute bit-wise inversion, or bit-wise NOT, element-wise.
         """
         self._not_implemented_yet('invert')
-    
+
     def left_shift(self, x1, x2, out=None):
         """
         Shift the bits of an integer to the left.
         """
         self._not_implemented_yet('left_shift')
-    
+
     def right_shift(self, x1, x2, out=None):
         """
         Shift the bits of an integer to the right.
@@ -1080,13 +1082,13 @@ Exception was:
         self._not_implemented_yet('right_shift')
 
 #Bit packing
-    
+
     def packbits(self, myarray, axis=None):
         """
         Packs the elements of a binary-valued array into bits in a uint8 array.
         """
         self._not_implemented_yet('packbits')
-    
+
     def unpackbits(self, myarray, axis=None):
         """
         Unpacks elements of a uint8 array into a binary-valued output array.
@@ -1094,7 +1096,7 @@ Exception was:
         self._not_implemented_yet('unpackbits')
 
 #Output formatting
-    
+
     def binary_repr(self, num, width=None):
         """
         Return the binary representation of the input number as a string.
@@ -1107,37 +1109,37 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.fft.html
 
 #Standard FFTs
-    
+
     def fft(self, a, n=None, axis=-1, norm=None):
         """
         Compute the one-dimensional discrete Fourier Transform.
         """
         self._not_implemented_yet('fft')
-    
+
     def ifft(self, a, n=None, axis=-1, norm=None):
         """
         Compute the one-dimensional inverse discrete Fourier Transform.
         """
         self._not_implemented_yet('ifft')
-    
+
     def fft2(self, a, s=None, axes=None, norm=None):
         """
         Compute the 2-dimensional discrete Fourier Transform
         """
         self._not_implemented_yet('fft2')
-    
+
     def ifft2(self, a, s=None, axes=None, norm=None):
         """
         Compute the 2-dimensional inverse discrete Fourier Transform.
         """
         self._not_implemented_yet('ifft2')
-    
+
     def fftn(self, a, s=None, axes=None, norm=None):
         """
         Compute the N-dimensional discrete Fourier Transform.
         """
         self._not_implemented_yet('fftn')
-    
+
     def ifftn(self, a, s=None, axes=None, norm=None):
         """
         Compute the N-dimensional inverse discrete Fourier Transform.
@@ -1145,37 +1147,37 @@ Exception was:
         self._not_implemented_yet('ifftn')
 
 #Real FFTs
-    
+
     def rfft(self, a, n=None, axis=-1, norm=None):
         """
         Compute the one-dimensional discrete Fourier Transform for real input.
         """
         self._not_implemented_yet('rfft')
-    
+
     def irfft(self, a, n=None, axis=-1, norm=None):
         """
         Compute the inverse of the n-point DFT for real input.
         """
         self._not_implemented_yet('irfft')
-    
+
     def rfft2(self, a, s=None, axes=(-2,-1), norm=None):
         """
         Compute the 2-dimensional FFT of a real array.
         """
         self._not_implemented_yet('rfft2')
-    
+
     def irfft2(self, a, s=None, axes=(-2,-1), norm=None):
         """
         Compute the 2-dimensional inverse FFT of a real array.
         """
         self._not_implemented_yet('irfft2')
-    
+
     def rfftn(self, a, s=None, axes=None, norm=None):
         """
         Compute the N-dimensional discrete Fourier Transform for real input.
         """
         self._not_implemented_yet('rfftn')
-    
+
     def irfftn(self, a, s=None, axes=None, norm=None):
         """
         Compute the inverse of the N-dimensional FFT of real input.
@@ -1183,13 +1185,13 @@ Exception was:
         self._not_implemented_yet('irfftn')
 
 #Hermitian FFTs
-    
+
     def hfft(self, a, n=None, axis=-1, norm=None):
         """
         Compute the FFT of a signal that has Hermitian symmetry, i.e., a real spectrum.
         """
         self._not_implemented_yet('hfft')
-    
+
     def ihfft(self, a, n=None, axis=-1, norm=None):
         """
         Compute the inverse FFT of a signal that has Hermitian symmetry.
@@ -1197,25 +1199,25 @@ Exception was:
         self._not_implemented_yet('ihfft')
 
 #Helper routines
-    
+
     def fftfreq(self, n=None, d=1.0):
         """
         Return the Discrete Fourier Transform sample frequencies.
         """
         self._not_implemented_yet('fftfreq')
-    
+
     def rfftfreq(self, n=None, d=1.0):
         """
         Return the Discrete Fourier Transform sample frequencies (for usage with rfft, irfft).
         """
         self._not_implemented_yet('rfftfreq')
-    
+
     def fftshift(self, x, axes=None):
         """
         Shift the zero-frequency component to the center of the spectrum.
         """
         self._not_implemented_yet('fftshift')
-    
+
     def ifftshift(self, x, axes=None):
         """
         The inverse of fftshift.
@@ -1227,31 +1229,31 @@ Exception was:
 # FUNCTIONAL PROGRAMMING #
 ## See https://docs.scipy.org/doc/numpy/reference/routines.functional.html
 
-    
+
     def apply_along_axis(self, func1d, axis, arr, *args, **kwargs):
         """
         Apply a function to 1-D slices along the given axis.
         """
         self._not_implemented_yet('apply_along_axis')
-    
+
     def apply_over_axes(self, func, a, axes):
         """
         Apply a function repeatedly over multiple axes.
         """
         self._not_implemented_yet('apply_over_axes')
-    
+
     def vectorize(self, pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None):
         """
         Generalized function class.
         """
         self._not_implemented_yet('vectorize')
-    
+
     def frompyfunc(self, func, nin, nout):
         """
         Takes an arbitrary Python function and returns a NumPy ufunc.
         """
         self._not_implemented_yet('frompyfunc')
-    
+
     def piecewise(self, x, condlist, funclist, *args, **kw):
         """
         Evaluate a piecewise-defined function.
@@ -1264,25 +1266,25 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.io.html
 
 # NumPy binary files (NPY, NPZ)
-    
+
     def load(self, mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII'):
         """
         Load arrays or pickled objects from .npy, .npz or pickled files.
         """
         self._not_implemented_yet('load')
-    
+
     def save(self, arr, file, allow_pickle=True, fix_imports=True):
         """
         Save an array to a binary file in NumPy .npy format.
         """
         self._not_implemented_yet('save')
-    
+
     def savez(self, file, *args, **kwds):
         """
         Save several arrays into a single file in uncompressed .npz format.
         """
         self._not_implemented_yet('savez')
-    
+
     def savez_compressed(self, file, *args, **kwds):
         """
         Save several arrays into a single file in compressed .npz format.
@@ -1290,37 +1292,37 @@ Exception was:
         self._not_implemented_yet('savez_compressed')
 
 # Text files
-    
-    def loadtxt(self, dtype=HYSOP_REAL, comments='#', delimiter=None, 
+
+    def loadtxt(self, dtype=HYSOP_REAL, comments='#', delimiter=None,
             converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0):
         """
         Load data from a text file.
         """
         self._not_implemented_yet('loadtxt')
-    
-    def savetxt(self, fname, X,  fmt='%.18e', delimiter=' ', newline='\n', 
+
+    def savetxt(self, fname, X,  fmt='%.18e', delimiter=' ', newline='\n',
             header='', footer='', comments='# '):
         """
         Save an array to a text file.
         """
         self._not_implemented_yet('savetxt')
-    
-    def genfromtxt(self, fname, dtype=HYSOP_REAL, comments='#', delimiter=None, 
-            skip_header=0, skip_footer=0, converters=None, missing_values=None, 
-            filling_values=None, usecols=None, names=None, excludelist=None, deletechars=None, 
-            replace_space='_', autostrip=False, case_sensitive=True, defaultfmt='f%i', 
+
+    def genfromtxt(self, fname, dtype=HYSOP_REAL, comments='#', delimiter=None,
+            skip_header=0, skip_footer=0, converters=None, missing_values=None,
+            filling_values=None, usecols=None, names=None, excludelist=None, deletechars=None,
+            replace_space='_', autostrip=False, case_sensitive=True, defaultfmt='f%i',
             unpack=None, usemask=False, loose=True, invalid_raise=True, max_rows=None):
         """
         Load data from a text file, with missing values handled as specified.
         """
         self._not_implemented_yet('genfromtxt')
-    
+
     def fromregex(self, file, regexp, dtype):
         """
         Construct an array from a text file, using regular expression parsing.
         """
         self._not_implemented_yet('fromregex')
-    
+
     def fromstring(self, string, dtype=HYSOP_REAL, count=-1, sep=''):
         """
         A new 1-D array initialized from raw binary or text data in a string.
@@ -1328,54 +1330,54 @@ Exception was:
         self._not_implemented_yet('fromstring')
 
 # String formatting
-    
-    def array2string(self, a,  max_line_width=None, precision=None, suppress_small=None, 
+
+    def array2string(self, a,  max_line_width=None, precision=None, suppress_small=None,
             separator=' ', prefix='', style=repr, formatter=None):
         """
         Return a string representation of an array.
         """
         self._not_implemented_yet('array2string')
-    
+
     def array_repr(self, arr, max_line_width=None, precision=None, supress_small=None):
         """
         Return the string representation of an array.
         """
         self._not_implemented_yet('array_repr')
-    
+
     def array_str(self, a, max_line_width=None, precision=None, suppress_small=None):
         """
         Return a string representation of the data in an array.
         """
         self._not_implemented_yet('array_str')
 #Text formatting options
-    
-    def set_printoptions(self, precision=None, threshold=None, edgeitems=None, 
-                              linewidth=None, suppress=None, nanstr=None, 
+
+    def set_printoptions(self, precision=None, threshold=None, edgeitems=None,
+                              linewidth=None, suppress=None, nanstr=None,
                               infstr=None, formatter=None):
         """
         Set printing options.
         """
         self._not_implemented_yet('set_printoptions')
-    
+
     def get_printoptions(self):
         """
         Return the current print options.
         """
         self._not_implemented_yet('get_printoptions')
-    
+
     def set_string_function(self, f, repr=True):
         """
         Set a Python function to be used when pretty printing arrays.
         """
         self._not_implemented_yet('set_string_function')
 #Base-n representations
-    
+
     def binary_repr(self, num, width=None):
         """
         Return the binary representation of the input number as a string.
         """
         self._not_implemented_yet('binary_repr')
-    
+
     def base_repr(self, number, base=2, padding=0):
         """
         Return a string representation of a number in the given base system.
@@ -1388,56 +1390,56 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.linalg.html
 
 #Matrix and vector products
-    
+
     def dot(self, a, b, out=None):
         """
         Dot product of two arrays.
         """
         self._not_implemented_yet('dot')
-    
+
     def vdot(self, a, b):
         """
         Return the dot product of two vectors.
         """
         self._not_implemented_yet('vdot')
-    
+
     def inner(self, a, b):
         """
         Inner product of two arrays.
         """
         self._not_implemented_yet('inner')
-    
+
     def outer(self, a, b, out=None):
         """
         Compute the outer product of two vectors.
         """
         self._not_implemented_yet('outer')
-    
+
     def matmul(self, a, b, out=None):
         """
         Matrix product of two arrays.
         """
         self._not_implemented_yet('matmul')
-    
+
     def tensordot(self, a, b, axes=2):
         """
         Compute tensor dot product along specified axes for arrays >= 1-D.
         """
         self._not_implemented_yet('tensordot')
-    
-    def einsum(self, subscripts, out=None, dtype=None, order=MemoryOrdering.SAME_ORDER, 
+
+    def einsum(self, subscripts, out=None, dtype=None, order=MemoryOrdering.SAME_ORDER,
             casting='safe', optimize=False, *operands):
         """
         Evaluates the Einstein summation convention on the operands.
         """
         self._not_implemented_yet('einsum')
-    
+
     def matrix_power(self, M, n):
         """
         Raise a square matrix to the integer power n.
         """
         self._not_implemented_yet('matrix_power')
-    
+
     def kron(self, a, b):
         """
         Kronecker product of two arrays.
@@ -1445,19 +1447,19 @@ Exception was:
         self._not_implemented_yet('kron')
 
 #Decompositions
-    
+
     def cholesky(self, a):
         """
         Cholesky decomposition.
         """
         self._not_implemented_yet('cholesky')
-    
+
     def qr(self, a, mode='reduced'):
         """
         Compute the qr factorization of a matrix.
         """
         self._not_implemented_yet('qr')
-    
+
     def svd(self, a, full_matrices=True, compute_uv=True):
         """
         Singular Value Decomposition.
@@ -1465,25 +1467,25 @@ Exception was:
         self._not_implemented_yet('svd')
 
 #Matrix eigenvalues
-    
+
     def eig(self, a):
         """
         Compute the eigenvalues and right eigenvectors of a square array.
         """
         self._not_implemented_yet('eig')
-    
+
     def eigh(self, a, UPLO='L'):
         """
         Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
         """
         self._not_implemented_yet('eigh')
-    
+
     def eigvals(self, a):
         """
         Compute the eigenvalues of a general matrix.
         """
         self._not_implemented_yet('eigvals')
-    
+
     def eigvalsh(self, a, UPLO='L'):
         """
         Compute the eigenvalues of a Hermitian or real symmetric matrix.
@@ -1491,37 +1493,37 @@ Exception was:
         self._not_implemented_yet('eigvalsh')
 
 #Norms and other numbers
-    
+
     def norm(self, x, ord=None, axis=None, keepdims=False):
         """
         Matrix or vector norm.
         """
         self._not_implemented_yet('norm')
-    
+
     def cond(self, x, p=None):
         """
         Compute the condition number of a matrix.
         """
         self._not_implemented_yet('cond')
-    
+
     def det(self, a):
         """
         Compute the determinant of an array.
         """
         self._not_implemented_yet('det')
-    
+
     def matrix_rank(self, M, tol=None):
         """
         Return matrix rank of array using SVD method
         """
         self._not_implemented_yet('matrix_rank')
-    
-    def slogdet(self, a):    
+
+    def slogdet(self, a):
         """
         Compute the sign and natural logarithm of the determinant of an array.
         """
         self._not_implemented_yet('slogdet')
-    
+
     def trace(self, a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
         """
         Return the sum along diagonals of the array.
@@ -1529,37 +1531,37 @@ Exception was:
         self._not_implemented_yet('trace')
 
 #Solving equations and inverting matrices
-    
+
     def solve(self, a, b):
         """
         Solve a linear matrix equation, or system of linear scalar equations.
         """
         self._not_implemented_yet('solve')
-    
+
     def tensorsolve(self, a, b, axes=None):
         """
         Solve the tensor equation a x = b for x.
         """
         self._not_implemented_yet('tensorsolve')
-    
+
     def lstsq(self, a, b, rcond=-1):
         """
         Return the least-squares solution to a linear matrix equation.
         """
         self._not_implemented_yet('lstsq')
-    
+
     def inv(self, a):
         """
         Compute the (multiplicative) inverse of a matrix.
         """
         self._not_implemented_yet('inv')
-    
+
     def pinv(self, a, rcond=1e-15):
         """
         Compute the (Moore-Penrose) pseudo-inverse of a matrix.
         """
         self._not_implemented_yet('pinv')
-    
+
     def tensorinv(self, a, ind=2):
         """
         Compute the 'inverse' of an N-dimensional array.
@@ -1572,45 +1574,45 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.logic.html
 
 #Truth value testing
-    
+
     def any(self, a, axis=None, out=None):
         """
         Test whether any array elements along a given axis evaluate to True.
         """
         self._not_implemented_yet('any')
-    
+
     def all(self, a, axis=None, out=None):
         """
         Test whether all array elements along a given axis evaluate to True.
         """
         self._not_implemented_yet('all')
-    
+
 #Array contents
-    
+
     def isfinite(self, x, out=None):
         """
         Test element-wise for finiteness (not infinity or not Not a Number).
         """
         self._not_implemented_yet('isfinite')
-    
+
     def isinf(self, x, out=None):
         """
         Test element-wise for positive or negative infinity.
         """
         self._not_implemented_yet('isinf')
-    
+
     def isnan(self, x, out=None):
         """
         Test element-wise for NaN and return result as a boolean array.
         """
         self._not_implemented_yet('isnan')
-    
+
     def isneginf(self, x, out=None):
         """
         Test element-wise for negative infinity, return result as bool array.
         """
         self._not_implemented_yet('isneginf')
-    
+
     def isposinf(self, x, out=None):
         """
         Test element-wise for positive infinity, return result as bool array.
@@ -1618,25 +1620,25 @@ Exception was:
         self._not_implemented_yet('isposinf')
 
 #Logical operations
-    
+
     def logical_and(self, x1, x2, out=None):
         """
         Compute the truth value of x1 AND x2 element-wise.
         """
         self._not_implemented_yet('logical_and')
-    
+
     def logical_or(self, x1, x2, out=None):
         """
         Compute the truth value of x1 OR x2 element-wise.
         """
         self._not_implemented_yet('logical_or')
-    
+
     def logical_not(self, x, out=None):
         """
         Compute the truth value of NOT x element-wise.
         """
         self._not_implemented_yet('logical_not')
-    
+
     def logical_xor(self, x1, x2, out=None):
         """
         Compute the truth value of x1 XOR x2, element-wise.
@@ -1644,61 +1646,61 @@ Exception was:
         self._not_implemented_yet('logical_xor')
 
 #Comparisson
-    
+
     def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
         """
         Returns True if two arrays are element-wise equal within a tolerance.
         """
         self._not_implemented_yet('allclose')
-    
+
     def isclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
         """
         Returns a boolean array where two arrays are element-wise equal within a tolerance.
         """
         self._not_implemented_yet('isclose')
-    
+
     def array_equal(self, a1, a2):
         """
         True if two arrays have the same shape and elements, False otherwise.
         """
         self._not_implemented_yet('array_equal')
-    
+
     def array_equiv(self, a1, a2):
         """
         returns True if input arrays are shape consistent and all elements equal.
         """
         self._not_implemented_yet('array_equiv')
-    
+
     def greater(self, x1, x2, out=None):
         """
         Return the truth value of (x1 > x2) element-wise.
         """
         self._not_implemented_yet('greater')
-    
+
     def greater_equal(self, x1, x2, out=None):
         """
         Return the truth value of (x1 >= x2) element-wise.
         """
         self._not_implemented_yet('greater_equal')
-    
+
     def less(self, x1, x2, out):
         """
         Return the truth value of (x1 < x2) element-wise.
         """
         self._not_implemented_yet('less')
-    
+
     def less_equal(self, x1, x2, out):
         """
         Return the truth value of (x1 =< x2) element-wise.
         """
         self._not_implemented_yet('less_equal')
-    
+
     def equal(self, x1, x2, out=None):
         """
         Return (x1 == x2) element-wise.
         """
         self._not_implemented_yet('equal')
-    
+
     def not_equal(self, x1, x2, out=None):
         """
         Return (x1 != x2) element-wise.
@@ -1711,69 +1713,69 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.math.html
 
 # Trigonometric functions
-    
+
     def sin(self, x, out=None):
         """
         Trigonometric sine, element-wise.
         """
         self._not_implemented_yet('sin')
-    
+
     def cos(self, x, out=None):
         """
         Cosine element-wise.
         """
         self._not_implemented_yet('cos')
-    
+
     def tan(self, x, out=None):
         """
         Compute tangent element-wise.
         """
         self._not_implemented_yet('tan')
 
-    
+
     def arcsin(self, x, out=None):
         """
         Inverse sine, element-wise.
         """
         self._not_implemented_yet('arcsin')
-    
+
     def arccos(self, x, out=None):
         """
         Trigonometric inverse cosine, element-wise.
         """
         self._not_implemented_yet('arccos')
-    
+
     def arctan(self, x, out=None):
         """
         Trigonometric inverse tangent, element-wise.
         """
         self._not_implemented_yet('arctan')
-    
+
     def arctan2(self, x1, x2, out=None):
         """
         Element-wise arc tangent of x1/x2 choosing the quadrant correctly.
         """
         self._not_implemented_yet('arctan2')
 
-    
+
     def hypot(self, x1, x2, out=None):
         """
         Given the legs of a right triangle, return its hypotenuse.
         """
         self._not_implemented_yet('hypot')
-    
+
     def unwrap(self, p, discont=3.141592653589793, axis=-1):
         """
         Unwrap by changing deltas between values to 2*pi complement.
         """
         self._not_implemented_yet('unwrap')
-    
+
     def deg2rad(self, x, out=None):
         """
         Convert angles from degrees to radians.
         """
         self._not_implemented_yet('deg2rad')
-    
+
     def rad2deg(self, x, out=None):
         """
         Convert angles from radians to degrees.
@@ -1781,38 +1783,38 @@ Exception was:
         self._not_implemented_yet('rad2deg')
 
 # Hyperbolic functions
-    
+
     def sinh(self, x, out=None):
         """
         Hyperbolic sine, element-wise.
         """
         self._not_implemented_yet('sinh')
-    
+
     def cosh(self, x, out=None):
         """
         Hyperbolic cosine, element-wise.
         """
         self._not_implemented_yet('cosh')
-    
+
     def tanh(self, x, out=None):
         """
         Compute hyperbolic tangent element-wise.
         """
         self._not_implemented_yet('tanh')
 
-    
+
     def arcsinh(self, x, out=None):
         """
         Inverse hyperbolic sine element-wise.
         """
         self._not_implemented_yet('arcsinh')
-    
+
     def arccosh(self, x, out=None):
         """
         Inverse hyperbolic cosine, element-wise.
         """
         self._not_implemented_yet('arccosh')
-    
+
     def arctanh(self, x, out=None):
         """
         Inverse hyperbolic tangent element-wise.
@@ -1820,39 +1822,39 @@ Exception was:
         self._not_implemented_yet('arctanh')
 
 # Rounding
-    
+
     def around(self, a, decimals=0, out=None):
         """
         Evenly round to the given number of decimals, returns HYSOP_INTEGER.
         """
         self._not_implemented_yet('around')
 
-    
+
     def fix(self, x, y=None):
         """
         Round to nearest integer towards zero.
         """
         self._not_implemented_yet('fix')
 
-    
+
     def rint(self, x, out=None):
         """
         Round elements of the array to the nearest integer.
         """
         self._not_implemented_yet('rint')
-    
+
     def floor(self, x, out=None):
         """
         Return the floor of the input, element-wise.
         """
         self._not_implemented_yet('floor')
-    
+
     def ceil(self, x, out=None):
         """
         Return the ceiling of the input, element-wise.
         """
         self._not_implemented_yet('ceil')
-    
+
     def trunc(self, x, out=None):
         """
         Return the truncated value of the input, element-wise.
@@ -1861,82 +1863,82 @@ Exception was:
 
 
 # Sums, product, differences
-    
+
     def prod(self, a, axis=None, dtype=None, out=None):
         """
         Return the product of array elements over a given axis.
         """
         self._not_implemented_yet('prod')
-    
+
     def sum(self, a, axis=None, dtype=None, out=None):
         """
         Sum of array elements over a given axis.
         """
         self._not_implemented_yet('sum')
-    
+
     def nanprod(self, a, axis=None, dtype=None, out=None):
         """
-        Return the product of array elements over a given axis treating 
+        Return the product of array elements over a given axis treating
         Not a Numbers (NaNs) as ones.
         """
         self._not_implemented_yet('nanprod')
-    
+
     def nansum(self, a, axis=None, dtype=None, out=None):
         """
         Return the sum of array elements over a given axis treating Not a Numbers (NaNs) as zero.
         """
         self._not_implemented_yet('nansum')
-    
+
     def cumprod(self, a, axis=None, dtype=None, out=None):
         """
         Return the cumulative product of elements along a given axis.
         """
         self._not_implemented_yet('cumprod')
-    
+
     def cumsum(self, a, axis=None, dtype=None, out=None):
         """
         Return the cumulative sum of the elements along a given axis.
         """
         self._not_implemented_yet('cumsum')
-    
+
     def nancumprod(self, a, axis=None, dtype=None, out=None):
         """
-        Return the cumulative product of array elements over a given axis treating 
+        Return the cumulative product of array elements over a given axis treating
         Not a Numbers (NaNs) as one.
         """
         self._not_implemented_yet('nancumprod')
-    
+
     def nancumsum(self, a, axis=None, dtype=None, out=None):
         """
-        Return the cumulative sum of array elements over a given axis treating 
+        Return the cumulative sum of array elements over a given axis treating
         Not a Numbers (NaNs) as zero.
         """
         self._not_implemented_yet('nancumsum')
-    
+
     def diff(self, a, n=1, axis=-1):
         """
         Calculate the n-th discrete difference along given axis.
         """
         self._not_implemented_yet('diff')
-    
+
     def ediff1d(self, ary, to_end=None, to_begin=None):
         """
         The differences between consecutive elements of an array.
         """
         self._not_implemented_yet('ediff1d')
-    
+
     def gradient(self, f, *varargs, **kwargs):
         """
         Return the gradient of an N-dimensional array.
         """
         self._not_implemented_yet('gradient')
-    
+
     def cross(self, a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
         """
         Return the cross product of two (arrays of) vectors.
         """
         self._not_implemented_yet('cross')
-    
+
     def trapz(self, y, x=None, dx=1.0, axis=-1):
         """
         Integrate along the given axis using the composite trapezoidal rule.
@@ -1944,57 +1946,57 @@ Exception was:
         self._not_implemented_yet('trapz')
 
 #Exponents and logarithms
-    
+
     def exp(self, x, out=None):
         """
         Calculate the exponential of all elements in the input array.
         """
         self._not_implemented_yet('exp')
-    
+
     def exp2(self, x, out=None):
         """
         Calculate 2**p for all p in the input array.
         """
         self._not_implemented_yet('exp2')
-    
+
     def expm1(self, x, out=None):
         """
         Calculate exp(x) - 1 for all elements in the array.
         """
         self._not_implemented_yet('expm1')
-    
-    
+
+
     def log(self, x, out=None):
         """
         Natural logarithm, element-wise.
         """
         self._not_implemented_yet('log')
-    
+
     def log2(self, x, out=None):
         """
         Base-2 logarithm of x.
         """
         self._not_implemented_yet('log2')
-    
+
     def log10(self, x, out=None):
         """
         Return the base 10 logarithm of the input array, element-wise.
         """
         self._not_implemented_yet('log10')
-    
+
     def log1p(self, x, out=None):
         """
         Return the natural logarithm of one plus the input array, element-wise.
         """
         self._not_implemented_yet('log1p')
 
-    
+
     def logaddexp(self, x1, x2, out=None):
         """
         Logarithm of the sum of exponentiations of the inputs.
         """
         self._not_implemented_yet('logaddexp')
-    
+
     def logaddexp2(self, x1, x2, out=None):
         """
         Logarithm of the sum of exponentiations of the inputs in base-2.
@@ -2002,13 +2004,13 @@ Exception was:
         self._not_implemented_yet('logaddexp2')
 
 # Other special functions
-    
+
     def i0(self, x):
         """
         Modified Bessel function of the first kind, order 0.
         """
         self._not_implemented_yet('i0')
-    
+
     def sinc(self, x):
         """
         Return the sinc function.
@@ -2016,88 +2018,88 @@ Exception was:
         self._not_implemented_yet('sinc')
 
 # Floating point routines
-    
+
     def signbit(self, x, out=None):
         """
         Returns element-wise True where signbit is set (less than zero).
         """
         self._not_implemented_yet('signbit')
-    
+
     def copysign(self, x1, x2, out=None):
         """
         Change the sign of x1 to that of x2, element-wise.
         """
         self._not_implemented_yet('copysign')
-    
+
     def frexp(self, x, out1=None, out2=None):
         """
         Decompose the elements of x into mantissa and twos exponent.
         """
         self._not_implemented_yet('frexp')
-    
+
     def ldexp(self, x1, x2, out=None):
         """
         Returns x1 * 2**x2, element-wise.
         """
         self._not_implemented_yet('ldexp')
-        
+
 
 # Arithmetic operations
-    
+
     def add(self, x1, x2, out=None):
         """
         Add arguments element-wise.
         """
         self._not_implemented_yet('add')
-    
+
     def reciprocal(self, x, out=None):
         """
         Return the reciprocal of the argument, element-wise.
         """
         self._not_implemented_yet('reciprocal')
-    
+
     def negative(self, x, out=None):
         """
         Numerical negative, element-wise.
         """
         self._not_implemented_yet('negative')
-    
+
     def multiply(self, x1, x2, out=None):
         """
         Multiply arguments element-wise.
         """
         self._not_implemented_yet('multiply')
-    
+
     def divide(self, x1, x2, out=None):
         """
         Divide arguments element-wise.
         """
         self._not_implemented_yet('divide')
-    
+
     def power(self, x1, x2, out=None):
         """
         First array elements raised to powers from second array, element-wise.
         """
         self._not_implemented_yet('power')
-    
+
     def subtract(self, x1, x2, out=None):
         """
         Subtract arguments, element-wise.
         """
         self._not_implemented_yet('subtract')
-    
+
     def true_divide(self, x1, x2, out=None):
         """
         Returns a true division of the inputs, element-wise.
         """
         self._not_implemented_yet('true_divide')
-    
+
     def floor_divide(self, x1, x2, out=None):
         """
         Return the largest integer smaller or equal to the division of the inputs.
         """
         self._not_implemented_yet('floor_divide')
-    
+
     def fmod(self, x1, x2, out=None):
         """
         Return the element-wise remainder of division (REM).
@@ -2105,16 +2107,16 @@ Exception was:
         This should not be confused with the Python modulus operator x1 % x2.
         """
         self._not_implemented_yet('fmod')
-    
+
     def mod(self, x1, x2, out=None):
         """
         Return element-wise remainder of division (MOD).
         Remainder has the same sign as the divident x1.
-        It is complementary to the function floor_divide and 
+        It is complementary to the function floor_divide and
         match Python modfulus operator x1 % x2.
         """
         self._not_implemented_yet('mod')
-    
+
     def modf(self, x,  out1=None, out2=None):
         """
         Return the fractional and integral parts of an array, element-wise.
@@ -2122,120 +2124,120 @@ Exception was:
         self._not_implemented_yet('modf')
 
 # Handling complex numbers
-    
+
     def angle(self, z, deg=False):
         """
         Return the angle of the complex argument.
         """
         self._not_implemented_yet('angle')
-    
+
     def real(self, val):
         """
         Return the real part of the elements of the array.
         """
         self._not_implemented_yet('real')
-    
+
     def imag(self, val):
         """
         Return the imaginary part of the elements of the array.
         """
         self._not_implemented_yet('imag')
-    
+
     def conj(self, x, out=None):
         """
         Return the complex conjugate, element-wise.
         """
         self._not_implemented_yet('conj')
-    
-# Miscellanous 
-    
+
+# Miscellanous
+
     def convolve(self, a, v, mode='full'):
         """
         Returns the discrete, linear convolution of two one-dimensional sequences.
         """
         self._not_implemented_yet('convolve')
-    
+
     def clip(self, a, a_min, a_max, out=None):
         """
         Clip (limit) the values in an array.
         """
         self._not_implemented_yet('clip')
-    
+
     def sqrt(self, x, out=None):
         """
         Return the positive square-root of an array, element-wise.
         """
         self._not_implemented_yet('sqrt')
-    
+
     def cbrt(self, x, out=None):
         """
         Return the cube-root of an array, element-wise.
         """
         self._not_implemented_yet('cbrt')
-    
+
     def square(self, x, out=None):
         """
         Return the element-wise square of the input.
         """
         self._not_implemented_yet('square')
-    
+
     def nan_to_num(self, x):
         """
         Replace nan with zero and inf with finite numbers.
         """
         self._not_implemented_yet('nan_to_num')
-    
+
     def real_if_close(self, a, tol=100):
         """
         If complex input returns a real array if complex parts are close to zero.
         """
         self._not_implemented_yet('real_if_close')
-    
+
     def interp(self, x, xp, fp, left=None, right=None, period=None):
         """
         One-dimensional linear interpolation.
         """
         self._not_implemented_yet('interp')
-    
-    
+
+
     def maximum(self, x1, x2, out=None):
         """
         Element-wise maximum of array elements.
         """
         self._not_implemented_yet('maximum')
-    
+
     def minimum(self, x1, x2, out=None):
         """
         Element-wise minimum of array elements.
         """
         self._not_implemented_yet('minimum')
-    
+
     def fmin(self, x1, x2, out=None):
         """
         Element-wise maximum of array elements, ignore NaNs.
         """
         self._not_implemented_yet('fmin')
-    
+
     def fmax(self, x1, x2, out=None):
         """
         Element-wise minimum of array elements, ignore NaNs.
         """
         self._not_implemented_yet('fmax')
-    
+
     def fabs(self, x, out=None):
         """
         Calculate the absolute value element-wise, outputs HYSOP_REAL unless out is set.
         """
         self._not_implemented_yet('fabs')
-    
-    
+
+
     def absolute(self, x, out=None):
         """
         Calculate the absolute value element-wise.
         """
         self._not_implemented_yet('absolute')
-    
-    
+
+
     def sign(self, x, out=None):
         """
         Returns an element-wise indication of the sign of a number.
@@ -2248,61 +2250,61 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.random.html
 
 # Simple random data
-    
+
     def rand(self, shape, **kwds):
         """
         Random values in a given shape between 0.0 and 1.0.
         """
         self._not_implemented_yet('rand')
-    
+
     def randn(self, shape, **kwds):
         """
         Return a sample (or samples) from the 'standard normal' distribution.
         """
         self._not_implemented_yet('randn')
-    
+
     def randint(self, low, high=None, size=None, dtype=HYSOP_INTEGER):
         """
         Return random integers from low (inclusive) to high (exclusive).
         """
         self._not_implemented_yet('randint')
-    
+
     def random_integers(self, low, high=None, size=None):
         """
         Random integers of type np.int between low and high, inclusive.
         """
         self._not_implemented_yet('random_integers')
-    
+
     def random_sample(self, size=None):
         """
         Return random floats in the half-open interval 0.0, 1.0).
         """
         self._not_implemented_yet('random_sample')
-    
+
     def random(self, size=None):
         """
         Return random floats in the half-open interval 0.0, 1.0).
         """
         self._not_implemented_yet('random')
-    
+
     def ranf(self, size=None):
         """
         Return random floats in the half-open interval 0.0, 1.0).
         """
         self._not_implemented_yet('ranf')
-    
+
     def sample(self, size=None):
         """
         Return random floats in the half-open interval 0.0, 1.0).
         """
         self._not_implemented_yet('sample')
-    
+
     def choice(self, a, size=None, replace=True, p=None):
         """
         Generates a random sample from a given 1-D array
         """
         self._not_implemented_yet('choice')
-    
+
     def bytes(self, length):
         """
         Return random bytes.
@@ -2310,13 +2312,13 @@ Exception was:
         self._not_implemented_yet('bytes')
 
 # Permutations
-    
+
     def shuffle(self, x):
         """
         Modify a sequence in-place by shuffling its contents.
         """
         self._not_implemented_yet('shuffle')
-    
+
     def permutation(self, x):
         """
         Randomly permute a sequence, or return a permuted range.
@@ -2324,211 +2326,211 @@ Exception was:
         self._not_implemented_yet('permutation')
 
 # Distributions
-    
+
     def beta(self, a, b, size=None):
         """
         Draw samples from a Beta distribution.
         """
         self._not_implemented_yet('beta')
-    
+
     def binomial(self, n, p, size=None):
         """
         Draw samples from a binomial distribution.
         """
         self._not_implemented_yet('binomial')
-    
+
     def chisquare(self, df, size=None):
         """
         Draw samples from a chi-square distribution.
         """
         self._not_implemented_yet('chisquare')
-    
+
     def dirichlet(self, alpha, size=None):
         """
         Draw samples from the Dirichlet distribution.
         """
         self._not_implemented_yet('dirichlet')
-    
+
     def exponential(self, scale=1.0, size=None):
         """
         Draw samples from an exponential distribution.
         """
         self._not_implemented_yet('exponential')
-    
+
     def f(self, dfnum, dfden, size=None):
         """
         Draw samples from an F distribution.
         """
         self._not_implemented_yet('f')
-    
+
     def gamma(self, shape, scale=1.0, size=None):
         """
         Draw samples from a Gamma distribution.
         """
         self._not_implemented_yet('gamma')
-    
+
     def geometric(self, p, size=None):
         """
         Draw samples from the geometric distribution.
         """
         self._not_implemented_yet('geometric')
-    
+
     def gumbel(self, loc=0.0, scale=1.0, size=None):
         """
         Draw samples from a Gumbel distribution.
         """
         self._not_implemented_yet('gumbel')
-    
+
     def hypergeometric(self, ngood, nbad, nsample, size=None):
         """
         Draw samples from a Hypergeometric distribution.
         """
         self._not_implemented_yet('hypergeometric')
-    
+
     def laplace(self, loc=0.0, scale=1.0, size=None):
         """
         Draw samples from the Laplace or double exponential distribution with specified location (or mean=0.0) and scale (decay).
         """
         self._not_implemented_yet('laplace')
-    
+
     def logistic(self, loc=0.0, scale=1.0, size=None):
         """
         Draw samples from a logistic distribution.
         """
         self._not_implemented_yet('logistic')
-    
+
     def lognormal(self, mean=0.0, sigma=1.0, size=None):
         """
         Draw samples from a log-normal distribution.
         """
         self._not_implemented_yet('lognormal')
-    
+
     def logseries(self, p, size=None):
         """
         Draw samples from a logarithmic series distribution.
         """
         self._not_implemented_yet('logseries')
-    
+
     def multinomial(self, n, pvals, size=None):
         """
         Draw samples from a multinomial distribution.
         """
         self._not_implemented_yet('multinomial')
-    
+
     def multivariate_normal(self, mean, cov, size=None):
         """
         Draw random samples from a multivariate normal distribution.
         """
         self._not_implemented_yet('multivariate_normal')
-    
+
     def negative_binomial(self, n, p, size=None):
         """
         Draw samples from a negative binomial distribution.
         """
         self._not_implemented_yet('negative_binomial')
-    
+
     def noncentral_chisquare(self, df, nonc, size=None):
         """
         Draw samples from a noncentral chi-square distribution.
         """
         self._not_implemented_yet('noncentral_chisquare')
-    
+
     def noncentral_f(self, dfnum, dfden, nonc, size=None):
         """
         Draw samples from the noncentral F distribution.
         """
         self._not_implemented_yet('noncentral_f')
-    
+
     def normal(self, loc=0.0, scale=1.0, size=None):
         """
         Draw random samples from a normal (Gaussian) distribution.
         """
         self._not_implemented_yet('normal')
-    
+
     def pareto(self, a, size=None):
         """
         Draw samples from a Pareto II or Lomax distribution with specified shape.
         """
         self._not_implemented_yet('pareto')
-    
+
     def poisson(self, lam, size=None):
         """
         Draw samples from a Poisson distribution.
         """
         self._not_implemented_yet('poisson')
-    
+
     def power(self, a, size=None):
         """
         Draws samples in 0, 1 from a power distribution with positive exponent a - 1.
         """
         self._not_implemented_yet('power')
-    
+
     def rayleigh(self, scale=1.0, size=None):
         """
         Draw samples from a Rayleigh distribution.
         """
         self._not_implemented_yet('rayleigh')
-    
+
     def standard_cauchy(self, size=None):
         """
         Draw samples from a standard Cauchy distribution with mode = 0.
         """
         self._not_implemented_yet('standard_cauchy')
-    
+
     def standard_exponential(self, size=None):
         """
         Draw samples from the standard exponential distribution.
         """
         self._not_implemented_yet('standard_exponential')
-    
+
     def standard_gamma(self, shape, size=None):
         """
         Draw samples from a standard Gamma distribution.
         """
         self._not_implemented_yet('standard_gamma')
-    
+
     def standard_normal(self, size=None):
         """
         Draw samples from a standard Normal distribution (mean=0.0, stdev=1).
         """
         self._not_implemented_yet('standard_normal')
-    
+
     def standard_t(self, df, size=None):
         """
         Draw samples from a standard Student's t distribution with df degrees of freedom.
         """
         self._not_implemented_yet('standard_t')
-    
+
     def triangular(self, left, mode, right, size=None):
         """
         Draw samples from the triangular distribution over the interval left, right.
         """
         self._not_implemented_yet('triangular')
-    
+
     def uniform(self, low, high, size=None):
         """
         Draw samples from a uniform distribution.
         """
         self._not_implemented_yet('uniform')
-    
+
     def vonmises(self, mu, kappa, size=None):
         """
         Draw samples from a von Mises distribution.
         """
         self._not_implemented_yet('vonmises')
-    
+
     def wald(self, mean=0.0, scale=1.0, size=None):
         """
         Draw samples from a Wald, or inverse Gaussian, distribution.
         """
         self._not_implemented_yet('wald')
-    
+
     def weibull(self, a, size=None):
         """
         Draw samples from a Weibull distribution.
         """
         self._not_implemented_yet('weibull')
-    
+
     def zipf(self, a, size=None):
         """
         Draw samples from a Zipf distribution.
@@ -2536,19 +2538,19 @@ Exception was:
         self._not_implemented_yet('zipf')
 
 # Random generator
-    
+
     def seed(self, seed=None):
         """
         Seed the generator.
         """
         self._not_implemented_yet('seed')
-    
+
     def get_state(self):
         """
         Return a tuple representing the internal state of the generator.
         """
         self._not_implemented_yet('get_state')
-    
+
     def set_state(self, state):
         """
         Set the internal state of the generator from a tuple.
@@ -2561,7 +2563,7 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.set.html
 
 # Making proper sets
-    
+
     def unique(self, ar, return_index=False, return_inverse=False, return_counts=False):
         """
         Find the unique elements of an array.
@@ -2569,31 +2571,31 @@ Exception was:
         self._not_implemented_yet('unique')
 
 # Boolean operations
-    
+
     def in1d(self, ar1, ar2, assume_unique=False, invert=False):
         """
         Test whether each element of a 1-D array is also present in a second array.
         """
         self._not_implemented_yet('in1d')
-    
+
     def intersect1d(self, ar1, ar2, assume_unique=False):
         """
         Find the intersection of two arrays.
         """
         self._not_implemented_yet('intersect1d')
-    
+
     def setdiff1d(self, ar1, ar2, assume_unique=False):
         """
         Find the set difference of two arrays.
         """
         self._not_implemented_yet('setdiff1d')
-    
+
     def setxor1d(self, ar1, ar2, assume_unique=False):
         """
         Find the set exclusive-or of two arrays.
         """
         self._not_implemented_yet('setxor1d')
-    
+
     def union1d(self, ar1, ar2):
         """
         Find the union of two arrays.
@@ -2606,43 +2608,43 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.sort.html
 
 # Sorting
-    
+
     def sort(self, a, axis=-1, kind='quicksort', order=None):
         """
         Return a sorted copy of an array.
         """
         self._not_implemented_yet('sort')
-    
+
     def lexsort(self, keys, axis=-1):
         """
         Perform an indirect sort using a sequence of keys.
         """
         self._not_implemented_yet('lexsort')
-    
+
     def argsort(self, a, axis=-1, kind='quicksort', order=None):
         """
         Returns the indices that would sort an array.
         """
         self._not_implemented_yet('argsort')
-    
+
     def msort(self, a):
         """
         Return a copy of an array sorted along the first axis.
         """
         self._not_implemented_yet('msort')
-    
+
     def sort_complex(self, a):
         """
         Sort a complex array using the real part first, then the imaginary part.
         """
         self._not_implemented_yet('sort_complex')
-    
+
     def partition(self, a, kth, axis=-1, kind='quicksort', order=None):
         """
         Return a partitioned copy of an array.
         """
         self._not_implemented_yet('partition')
-    
+
     def argpartition(self, a, kth, axis=-1, kind='quicksort', order=None):
         """
         Perform an indirect partition along the given axis using the algorithm specified by the kind keyword.
@@ -2650,61 +2652,61 @@ Exception was:
         self._not_implemented_yet('argpartition')
 
 #Searching
-    
+
     def argmax(self, a, axis, out=None):
         """
         Returns the indices of the maximum values along an axis.
         """
         self._not_implemented_yet('argmax')
-    
+
     def nanargmax(self, a, axis=None):
         """
         Return the indices of the maximum values in the specified axis ignoring NaNs.
         """
         self._not_implemented_yet('nanargmax')
-    
+
     def argmin(self, a, axis, out=None):
         """
         Returns the indices of the minimum values along an axis.
         """
         self._not_implemented_yet('argmin')
-    
+
     def nanargmin(self, a, axis=None):
         """
         Return the indices of the minimum values in the specified axis ignoring NaNs.
         """
         self._not_implemented_yet('nanargmin')
-    
+
     def argwhere(self, a):
         """
         Find the indices of array elements that are non-zero, grouped by element.
         """
         self._not_implemented_yet('argwhere')
-    
+
     def nonzero(self, a):
         """
         Return the indices of the elements that are non-zero.
         """
         self._not_implemented_yet('nonzero')
-    
+
     def flatnonzero(self, a):
         """
         Return indices that are non-zero in the flattened version of a.
         """
         self._not_implemented_yet('flatnonzero')
-    
+
     def where(self, condition, x, y):
         """
         Return elements, either from x or y, depending on condition.
         """
         self._not_implemented_yet('where')
-    
+
     def searchsorted(self, a, v, side='left', sorter=None):
         """
         Find indices where elements should be inserted to maintain order.
         """
         self._not_implemented_yet('searchsorted')
-    
+
     def extract(self, condition, arr):
         """
         Return the elements of an array that satisfy some condition.
@@ -2712,7 +2714,7 @@ Exception was:
         self._not_implemented_yet('extract')
 
 #Counting
-    
+
     def count_nonzero(self, a, axis=None):
         """
         Counts the number of non-zero values in the array a.
@@ -2724,102 +2726,102 @@ Exception was:
 ## See https://docs.scipy.org/doc/numpy/reference/routines.sort.html
 
 #Order statistics
-    
+
     def amin(self, a, axis=None, out=None):
         """
         Return the minimum of an array or minimum along an axis.
         """
         self._not_implemented_yet('amin')
-    
+
     def amax(self, a, axis=None, out=None):
         """
         Return the maximum of an array or maximum along an axis.
         """
         self._not_implemented_yet('amax')
-    
+
     def nanmin(self, a, axis=None, out=None):
         """
         Return minimum of an array or minimum along an axis, ignoring any NaNs.
         """
         self._not_implemented_yet('nanmin')
-    
+
     def nanmax(self, a, axis=None, out=None):
         """
         Return the maximum of an array or maximum along an axis, ignoring any NaNs.
         """
         self._not_implemented_yet('nanmax')
-    
+
     def ptp(self, a, axis=None, out=None):
         """
         Range of values (maximum - minimum) along an axis.
         """
         self._not_implemented_yet('ptp')
-    
-    def percentile(self, a, q, axis=None, out=None, overwrite_input=False, 
+
+    def percentile(self, a, q, axis=None, out=None, overwrite_input=False,
             interpolation='linear'):
         """
         Compute the qth percentile of the data along the specified axis.
         """
         self._not_implemented_yet('percentile')
-    
-    def nanpercentile(self, a, q, axis=None, out=None, overwrite_input=False, 
+
+    def nanpercentile(self, a, q, axis=None, out=None, overwrite_input=False,
             interpolation='linear'):
         """
-        Compute the qth percentile of the data along the specified axis, 
+        Compute the qth percentile of the data along the specified axis,
         while ignoring nan values.
         """
         self._not_implemented_yet('nanpercentile')
 
 #Averages and variances
-    
+
     def median(self, a, axis=None, out=None, overwrite_input=False):
         """
         Compute the median along the specified axis.
         """
         self._not_implemented_yet('median')
-    
+
     def average(self, a, axis=None, weights=None, returned=False):
         """
         Compute the weighted average along the specified axis.
         """
         self._not_implemented_yet('average')
-    
+
     def mean(self, a, axis=None, dtype=None, out=None):
         """
         Compute the arithmetic mean along the specified axis.
         """
         self._not_implemented_yet('mean')
-    
+
     def std(self, a, axis=None, dtype=None, out=None, ddof=0):
         """
         Compute the standard deviation along the specified axis.
         """
         self._not_implemented_yet('std')
-    
+
     def var(self, a, axis=None, dtype=None, out=None, ddof=0):
         """
         Compute the variance along the specified axis.
         """
         self._not_implemented_yet('var')
-    
+
     def nanmedian(self, a, axis=None, out=None, overwrite_input=False):
         """
         Compute the median along the specified axis, while ignoring NaNs.
         """
         self._not_implemented_yet('nanmedian')
-    
+
     def nanmean(self, a, axis=None, dtype=None, out=None):
         """
         Compute the arithmetic mean along the specified axis, ignoring NaNs.
         """
         self._not_implemented_yet('nanmean')
-    
+
     def nanstd(self, a, axis=None, dtype=None, out=None, ddof=0):
         """
         Compute the standard deviation along the specified axis, while ignoring NaNs.
         """
         self._not_implemented_yet('nanstd')
-    
+
     def nanvar(self, a, axis=None, dtype=None, out=None, ddof=0):
         """
         Compute the variance along the specified axis, while ignoring NaNs.
@@ -2832,13 +2834,13 @@ Exception was:
         Return Pearson product-moment correlation coefficients.
         """
         self._not_implemented_yet('corrcoef')
-    
+
     def correlate(self, a, v, mode='valid'):
         """
         Cross-correlation of two 1-dimensional sequences.
         """
         self._not_implemented_yet('correlate')
-    
+
     def cov(self, m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None):
         """
         Estimate a covariance matrix, given data and weights.
@@ -2846,31 +2848,31 @@ Exception was:
         self._not_implemented_yet('cov')
 
 # Histograms
-    
+
     def histogram(self, a, bins=10, range=None, normed=False, weights=None, density=None):
         """
         Compute the histogram of a set of data.
         """
         self._not_implemented_yet('histogram')
-    
+
     def histogram2d(self, x, y, bins, range=None, normed=False, weights=None):
         """
         Compute the bi-dimensional histogram of two data samples.
         """
         self._not_implemented_yet('histogram2d')
-    
+
     def histogramdd(self, sample, bins, range=None, normed=False, weights=None):
         """
         Compute the multidimensional histogram of some data.
         """
         self._not_implemented_yet('histogramdd')
-    
+
     def bincount(self, x, weights=None, minlength=None):
         """
         Count number of occurrences of each value in array of non-negative ints.
         """
         self._not_implemented_yet('bincount')
-    
+
     def digitize(self, x, bins, right=False):
         """
         Return the indices of the bins to which each value in input array belongs.
@@ -2884,14 +2886,14 @@ Exception was:
         Convert angles from radians to degrees.
         """
         return self.rad2deg(x=x,out=out, **kargs)
-    
+
     def radians(self, x, out=None, **kargs):
         """
         Convert angles from degrees to radians.
         """
         return self.deg2rad(x=x,out=out, **kargs)
-    
-    
+
+
     def remainder(self, x1, x2, out=None,**kargs):
         """
         Return element-wise remainder of division (MOD).
@@ -2906,12 +2908,12 @@ Exception was:
 ### EXTRA HYSOP METHODS ##
 
 def __generate_hysop_type_functions():
-    
+
     functions = {
 
             'as{type}array':
 '''
-def __hysop_array_generated_method(self, a, order=default_order, **kargs):
+def hysop_array_generated_method(self, a, order=default_order, **kargs):
     """
     Convert the input to an array of dtype HYSOP_{TYPE}.
     """
@@ -2920,7 +2922,7 @@ def __hysop_array_generated_method(self, a, order=default_order, **kargs):
 ''',
             '{type}_prod':
 '''
-def __hysop_array_generated_method(self, a, axis=None, out=None, **kargs):
+def hysop_array_generated_method(self, a, axis=None, out=None, **kargs):
     """
     Sum of array elements over a given axis.
     """
@@ -2929,7 +2931,7 @@ def __hysop_array_generated_method(self, a, axis=None, out=None, **kargs):
 ''',
             '{type}_sum':
 '''
-def __hysop_array_generated_method(self, a, axis=None, out=None, **kargs):
+def hysop_array_generated_method(self, a, axis=None, out=None, **kargs):
     """
     Sum of array elements over a given axis.
     """
@@ -2939,7 +2941,7 @@ def __hysop_array_generated_method(self, a, axis=None, out=None, **kargs):
 
             '{type}_empty':
 '''
-def __hysop_array_generated_method(self, shape, order=default_order, **kargs):
+def hysop_array_generated_method(self, shape, order=default_order, **kargs):
     """
     Return a new array of given shape and type, without initializing entries.
     """
@@ -2949,17 +2951,17 @@ def __hysop_array_generated_method(self, shape, order=default_order, **kargs):
 
             '{type}_ones':
 '''
-def __hysop_array_generated_method(self, shape, order=default_order, **kargs):
+def hysop_array_generated_method(self, shape, order=default_order, **kargs):
     """
     Return a new array of given shape filled with ones of type HYSOP_{TYPE}.
     """
     dtype = HYSOP_{TYPE}
     return self.ones(shape=shape, order=order, dtype=dtype, **kargs)
 ''',
-    
+
             '{type}_zeros':
 '''
-def __hysop_array_generated_method(self, shape, order=default_order, **kargs):
+def hysop_array_generated_method(self, shape, order=default_order, **kargs):
     """
     Return a new array of given shape, filled with zeros of type HYSOP_{TYPE}.
     """
@@ -2969,7 +2971,7 @@ def __hysop_array_generated_method(self, shape, order=default_order, **kargs):
 
             '{type}_full':
 '''
-def __hysop_array_generated_method(self, shape, fill_value, order=default_order, **kargs):
+def hysop_array_generated_method(self, shape, fill_value, order=default_order, **kargs):
     """
     Return a new array of given shape, filled with fill_value of type HYSOP_{TYPE}.
     """
@@ -2977,15 +2979,22 @@ def __hysop_array_generated_method(self, shape, fill_value, order=default_order,
     return self.full(shape=shape, fill_value=filling_value, order=order, dtype=dtype, **kargs)
 '''
 }
-    
+
     hysop_types = ['real', 'complex', 'integer', 'index', 'dim', 'bool']
 
     for ht in hysop_types:
-        for fname, fdefinition in functions.iteritems():
+        for fname, fdefinition in functions.items():
             fname = fname.format(type=ht, TYPE=ht.upper())
-            fdef  = fdefinition.format(type=ht, TYPE=ht.upper())
-            exec(fdef)
-            setattr(ArrayBackend, fname, __hysop_array_generated_method)
+            fdef = \
+'''
+from hysop.constants import default_order, MemoryOrdering, Backend
+from hysop.constants import HYSOP_REAL, HYSOP_COMPLEX, HYSOP_ORDER
+from hysop.constants import HYSOP_INTEGER, HYSOP_INDEX, HYSOP_DIM, HYSOP_BOOL
+{}
+'''.format(fdefinition.format(type=ht, TYPE=ht.upper()))
+            namespace = dict()
+            exec(fdef, namespace)
+            setattr(ArrayBackend, fname, namespace['hysop_array_generated_method'])
 
 __generate_hysop_type_functions()
 
diff --git a/hysop/core/arrays/tests/test_array.py b/hysop/core/arrays/tests/test_array.py
index cfe8cfeff084fd8997b14c180ba5837342b6cb13..9a11c8520576c79b434c7b14a85114fcfb433a6a 100644
--- a/hysop/core/arrays/tests/test_array.py
+++ b/hysop/core/arrays/tests/test_array.py
@@ -106,8 +106,8 @@ class TestArray(object):
                                                          order=MemoryOrdering.C_CONTIGUOUS)
         B = backend.arange(2*4*8, dtype=np.int8).reshape((2, 4, 8),
                                                          order=MemoryOrdering.F_CONTIGUOUS)
-        i1, j1, k1 = (randint(0, A.shape[i]-1) for i in xrange(3))
-        i0, j0, k0 = (randint(0, A.shape[i]-1) for i in xrange(3))
+        i1, j1, k1 = (randint(0, A.shape[i]-1) for i in range(3))
+        i0, j0, k0 = (randint(0, A.shape[i]-1) for i in range(3))
 
         assert A.dtype.itemsize == 1
         assert A.shape == (2, 4, 8)
@@ -116,7 +116,7 @@ class TestArray(object):
         assert A[0][1][0] == 8
         assert A[1][0][0] == 8*4
         assert A.strides == (8*4, 8, 1)
-        assert A[1][1][1] == np.sum(np.asarray(A.strides) / A.dtype.itemsize)
+        assert A[1][1][1] == np.sum(np.asarray(A.strides) // A.dtype.itemsize)
         assert A[i1][j1][k1] == np.sum(np.asarray(A.strides) * (i1, j1, k1))
         assert A[i0][j0][k0] == np.sum(np.asarray(A.strides) * (i0, j0, k0))
         assert (A[i1][j1][k1]-A[i0][j0][k0]) == np.dot(A.strides, (i1-i0, j1-j0, k1-k0))
@@ -128,7 +128,7 @@ class TestArray(object):
         assert B[0][1][0] == 2
         assert B[1][0][0] == 1
         assert B.strides == (1, 2, 2*4)
-        assert B[1][1][1] == np.sum(np.asarray(B.strides) / B.dtype.itemsize)
+        assert B[1][1][1] == np.sum(np.asarray(B.strides) // B.dtype.itemsize)
         assert B[i1][j1][k1] == np.sum(np.asarray(B.strides) * (i1, j1, k1))
         assert B[i0][j0][k0] == np.sum(np.asarray(B.strides) * (i0, j0, k0))
         assert (B[i1][j1][k1]-B[i0][j0][k0]) == np.dot(B.strides, (i1-i0, j1-j0, k1-k0))
@@ -317,7 +317,6 @@ class TestArray(object):
         assert backend.allclose(a-b, -(b-a))
         assert backend.allclose(a+b-c-d, -c-d+b+a)
         assert backend.allclose(a*b*c*d, d*c*a*b)
-        #assert backend.allclose( (a/b)*(c/d), (a*c)/(b*d) )
         a = a % b
         a = a//b
 
@@ -401,9 +400,9 @@ class TestArray(object):
         array_binary_ops = [
             '__eq__', '__ne__', '__le__', '__ge__', '__lt__', '__gt__',
             '__add__', '__sub__', '__mul__', '__pow__',
-            '__floordiv__', '__div__', '__mod__',
+            '__floordiv__', '__truediv__', '__mod__',
             '__radd__', '__rsub__', '__rmul__', '__rpow__',
-            '__rfloordiv__', '__rdiv__', '__rmod__'
+            '__rfloordiv__', '__rtruediv__', '__rmod__'
         ]
 
         # real_skip_list = ['angle', 'real', 'imag', 'conj', 'real_if_close']
@@ -431,7 +430,7 @@ class TestArray(object):
 
         def clamp(_amin, _amax):
             def _filter(variables):
-                for k, _vars in variables.iteritems():
+                for k, _vars in variables.items():
                     for i, var in enumerate(_vars):
                         if is_complex(var):
                             if isinstance(var, np.ndarray):
@@ -478,7 +477,7 @@ class TestArray(object):
                 # if there is a specific constraint we copy everything
                 dtypes = {}
                 if opname in input_constraints:
-                    for vname, vargs in variables.iteritems():
+                    for vname, vargs in variables.items():
                         for i, var in enumerate(vargs):
                             variables[vname][i] = variables[vname][i].copy()
                     filters = to_list(input_constraints[opname])
@@ -486,7 +485,7 @@ class TestArray(object):
                         f(variables)
                 self.dtypes = dtypes
 
-                for vname, vargs in variables.iteritems():
+                for vname, vargs in variables.items():
                     dtypes[vname] = variables[vname][0].dtype
                     for i, var in enumerate(vargs):
                         varname = '{}{}'.format(vname, i)
@@ -497,20 +496,21 @@ class TestArray(object):
                 return self
 
             def __exit__(self, exception, e, traceback):
-                if (e is not None):
-                    msg = '\nTESTING: Test failed in at {}::{}() with dtypes {}\n'
-                    msg = msg.format(backend.__class__.__name__, self.opname, self.dtypes)
-                    print msg
-                    raise exception, e, traceback
+                if (e is None):
+                    return True
+                msg = '\nTESTING: Test failed in at {}::{}() with dtypes {}\n'
+                msg = msg.format(backend.__class__.__name__, self.opname, self.dtypes)
+                print(msg)
+                return False
 
         def check_inputs(name, _in):
             isclose = np.isclose(_in[0], _in[1].get(handle=True), equal_nan=True)
             if not isclose.all():
-                print '{} inputs mismatch...'.format(name)
-                print '{} NUMPY INPUT:'.format(name.upper())
-                print _in[0][~isclose]
-                print '{} BACKEND INPUT:'.format(name.upper())
-                print _in[1].get()[~isclose]
+                print('{} inputs mismatch...'.format(name))
+                print('{} NUMPY INPUT:'.format(name.upper()))
+                print(_in[0][~isclose])
+                print('{} BACKEND INPUT:'.format(name.upper()))
+                print(_in[1].get()[~isclose])
                 raise RuntimeError('Inputs did not match... Fix your input filters.')
 
         def check_close(lhs, rhs, r0, r1, opname):
@@ -529,10 +529,10 @@ class TestArray(object):
                         # FIXME OpenCl support for float16
                         if r0.dtype == np.float16:
                             r1 = r1.astype(np.float16)
-                    if r0.dtype == np.bool:
-                        r1 = r1.astype(np.bool)
+                    if r0.dtype == np.bool_:
+                        r1 = r1.astype(np.bool_)
 
-                if (r0.dtype == np.bool) and (r1.dtype == np.bool):
+                if (r0.dtype == np.bool_) and (r1.dtype == np.bool_):
                     l2 = np.sqrt(np.nansum(r0 ^ r1)) / r0.size
                     linf = np.nanmax(r0 ^ r1)
                 else:
@@ -568,45 +568,45 @@ class TestArray(object):
                         if close:
                             msg = 'WARNING: test passed with relaxed precision for {}::{}.'
                             msg = msg.format(backend.__class__.__name__, opname)
-                            print msg
+                            print(msg)
                     if not close:
                         msg = '\n{}::{} returned dtypes did match (got {}) '
                         msg += 'but failed to match numpy output,'
                         msg += '\n absolute tolerance was set to {}.'
                         msg = msg.format(backend.__class__.__name__, opname, r1.dtype, tol)
-                        print msg
+                        print(msg)
                         if isinstance(r0, np.ndarray) and isinstance(r1, np.ndarray):
                             failed = (~np.isclose(r0, r1, equal_nan=True, atol=atol[0]))
                             if (lhs is not None):
                                 check_inputs('lhs', lhs)
-                                print 'LHS_INPUT'
-                                print lhs[0][failed]
+                                print('LHS_INPUT')
+                                print(lhs[0][failed])
                             if (rhs is not None):
                                 check_inputs('rhs', rhs)
-                                print 'RHS INPUT'
-                                print rhs[0][failed]
-                            print 'EXPECTED'
-                            print r0[failed]
-                            print 'GOT'
-                            print r1[failed]
+                                print('RHS INPUT')
+                                print(rhs[0][failed])
+                            print('EXPECTED')
+                            print(r0[failed])
+                            print('GOT')
+                            print(r1[failed])
                         else:
-                            print 'r0 => {}'.format(r0.__class__)
-                            print 'r1 => {}'.format(r1.__class__)
+                            print('r0 => {}'.format(r0.__class__))
+                            print('r1 => {}'.format(r1.__class__))
                         msg0 = 'Method {}::{} failed to match numpy output'
                         msg0 = msg0.format(backend.__class__.__name__, opname)
                         msg = msg0+msg1
-                        print
-                        print msg
+                        print()
+                        print(msg)
                         raise ValueError(msg)
                     else:
                         msg0 = '{}::{} matched numpy output '
                         msg0 = msg0.format(backend.__class__.__name__, opname)
                         msg = msg0+msg1
-                        print msg
+                        print(msg)
                 else:
                     msg = '\n{}::{} returned dtypes didn\'t match (expected {} but got {}).'
                     msg = msg.format(backend.__class__.__name__, opname, r0.dtype, r1.dtype)
-                    print msg
+                    print(msg)
 
                     msg = '{}::{} returned dtypes did not match, '\
                         'got {} but numpy returned {}.'
@@ -682,7 +682,7 @@ class TestArray(object):
         def make_arrays(dtype):
             ftype = match_float_type(dtype)
             atol[0] = np.finfo(ftype).eps
-            print '::atol switched to {} epsilon: {}.'.format(ftype, atol)
+            print('::atol switched to {} epsilon: {}.'.format(ftype, atol))
 
             a = (np.random.rand(8192)-0.5)*100
             b = (np.random.rand(8192)-0.5)*100
@@ -716,26 +716,26 @@ class TestArray(object):
             complex_types = (np.complex64,)
 
         for dtype in signed_types:
-            print '\n== SIGNED INTEGER OPS {} =='.format(dtype)
+            print('\n== SIGNED INTEGER OPS {} =='.format(dtype))
             a, b, A, B = make_arrays(dtype)
             test_operators(a, b, A, B)
 
         for dtype in unsigned_types:
-            print '\n== UNSIGNED INTEGER OPS {} =='.format(dtype)
+            print('\n== UNSIGNED INTEGER OPS {} =='.format(dtype))
             a, b, A, B = make_arrays(dtype)
             test_operators(a, b, A, B)
 
         # FIXME OpenCl backend half float and long double support
         for dtype in float_types:
-            print '\n== FLOAT OPS {} =='.format(dtype)
+            print('\n== FLOAT OPS {} =='.format(dtype))
             if isinstance(backend, OpenClArrayBackend) and (dtype in [np.float16, np.longdouble]):
-                print '  -- NO SUPPORT PROVIDED BY BACKEND --'
+                print('  -- NO SUPPORT PROVIDED BY BACKEND --')
                 continue
 
             a, b, A, B = make_arrays(dtype)
             test_operators(a, b, A, B)
 
-            print '\n== POLLUTED FLOAT OPS {} =='.format(dtype)
+            print('\n== POLLUTED FLOAT OPS {} =='.format(dtype))
             pollute(a)
             pollute(b)
 
@@ -744,10 +744,10 @@ class TestArray(object):
 
         # FIXME OpenCL complex functions: arcsin, arccos, floordix, pow, ...
         for dtype in complex_types:
-            print '\n== COMPLEX OPS {} =='.format(dtype)
+            print('\n== COMPLEX OPS {} =='.format(dtype))
             if isinstance(backend, OpenClArrayBackend):
                 if dtype in [np.clongdouble]:
-                    print '  -- NO SUPPORT PROVIDED BY BACKEND --'
+                    print('  -- NO SUPPORT PROVIDED BY BACKEND --')
                     continue
 
                 skip_list = [x for x in complex_skip_list]
@@ -762,7 +762,7 @@ class TestArray(object):
             a, b, A, B = make_arrays(dtype)
             test_operators(a, b, A, B, skip=skip_list)
 
-            print '\n== POLLUTED COMPLEX OPS {} =='.format(dtype)
+            print('\n== POLLUTED COMPLEX OPS {} =='.format(dtype))
             pollute(a)
             pollute(b)
 
@@ -789,50 +789,19 @@ class TestArray(object):
         backend = HostArrayBackend(allocator=allocator)
         self._test_backend(backend)
 
-    # def test_host_array_backend_mempool(self):
-    #     allocator = HostAllocator()
-    #     pool = allocator.memory_pool(name='host')
-    #     backend = HostArrayBackend(allocator=pool)
-
-    #     self._test_backend(backend)
-
-    #     backend.allocator.print_allocation_report()
-    #     assert backend.allocator.active_blocks == 0
-    #     backend.allocator.stop_holding()
-    #     assert backend.allocator.held_blocks == 0
-    #     backend.allocator.print_allocation_report()
-
     @opencl_failed
     def test_opencl_array_backend_allocator(self):
         from hysop.backend.device.opencl.opencl_allocator import OpenClImmediateAllocator
         for cl_env in iter_clenv():
-            print
-            print 'TESTING OPENCL PLATFORM {}'.format(cl_env.platform.name)
+            print()
+            print('TESTING OPENCL PLATFORM {}'.format(cl_env.platform.name))
             allocator = OpenClImmediateAllocator(queue=cl_env.default_queue)
             backend = OpenClArrayBackend(cl_env=cl_env, allocator=allocator)
             self._test_backend(backend)
 
-    # @opencl_failed
-    # def test_opencl_array_backend_pool(self):
-    #     from hysop.backend.device.opencl.opencl_allocator import OpenClImmediateAllocator
-    #     for cl_env in iter_clenv():
-    #         allocator = OpenClImmediateAllocator(queue=cl_env.default_queue)\
-    #                                     .memory_pool(name=cl_env.device.name)
-    #         backend = OpenClArrayBackend(cl_env=cl_env, allocator=allocator)
-
-    #         self._test_backend(backend)
-
-    #         backend.allocator.print_allocation_report()
-    #         assert backend.allocator.active_blocks == 0
-    #         backend.allocator.stop_holding()
-    #         assert backend.allocator.held_blocks == 0
-    #         backend.allocator.print_allocation_report()
-
 
 if __name__ == '__main__':
     test = TestArray()
     test.test_host_array_backend_allocator()
-    # test.test_host_array_backend_mempool()
     if __HAS_OPENCL_BACKEND__:
         test.test_opencl_array_backend_allocator()
-        # test.test_opencl_array_backend_pool()
diff --git a/hysop/core/checkpoints.py b/hysop/core/checkpoints.py
index 592ee6dec62e77e589890bdaba48a86c639f7bef..ca69863e749277fd819aeda859a1b44a73848bcc 100644
--- a/hysop/core/checkpoints.py
+++ b/hysop/core/checkpoints.py
@@ -11,7 +11,7 @@ from hysop.parameters import ScalarParameter, TensorParameter, BufferParameter
 from hysop.fields.cartesian_discrete_field import CartesianDiscreteScalarField
 
 class CheckpointHandler(object):
-    def __init__(self, load_checkpoint_path, save_checkpoint_path, 
+    def __init__(self, load_checkpoint_path, save_checkpoint_path,
             compression_method, compression_level,
             io_params, relax_constraints):
         check_instance(load_checkpoint_path, str, allow_none=True)
@@ -41,7 +41,7 @@ class CheckpointHandler(object):
 
         self._checkpoint_template   = None
         self._checkpoint_compressor = None
-    
+
     @property
     def load_checkpoint_path(self):
         return self._load_checkpoint_path
@@ -60,17 +60,17 @@ class CheckpointHandler(object):
     @property
     def relax_constraints(self):
         return self._relax_constraints
-      
+
     def get_mpio_parameters(self, mpi_params):
         io_params    = self.io_params
         comm         = mpi_params.comm
-        io_leader    = io_params.io_leader 
+        io_leader    = io_params.io_leader
         is_io_leader = (io_leader == mpi_params.rank)
         return (io_params, mpi_params, comm, io_leader, is_io_leader)
 
     def is_io_leader(self, mpi_params):
         return (self.io_params.io_leader == mpi_params.rank)
-    
+
     def finalize(self, mpi_params):
         if ((self._checkpoint_template is not None)
              and os.path.exists(self._checkpoint_template)
@@ -81,17 +81,17 @@ class CheckpointHandler(object):
                 pass
         self._checkpoint_template   = None
         self._checkpoint_compressor = None
-        
+
     def load_checkpoint(self, problem, simulation):
-        from hysop.problem import Problem 
+        from hysop.problem import Problem
         from hysop.simulation import Simulation
         check_instance(problem, Problem)
         check_instance(simulation, Simulation)
-        
+
         load_checkpoint_path = self.load_checkpoint_path
         if (load_checkpoint_path is None):
             return
-        
+
         vprint('\n>Loading {}problem checkpoint from \'{}\'...'.format(
             'relaxed' if self.relax_constraints else '', load_checkpoint_path))
         if not os.path.exists(load_checkpoint_path):
@@ -100,19 +100,19 @@ class CheckpointHandler(object):
         if (self.io_params is None):
             msg='Load checkpoint has been set to \'{}\' but checkpoint_io_params has not been specified.'
             raise RuntimeError(msg.format(load_checkpoint_path))
-        
+
         (io_params, mpi_params, comm, io_leader, is_io_leader) = self.get_mpio_parameters(problem.mpi_params)
         start = Wtime()
-        
+
         # extract checkpoint to directory if required
         if os.path.isfile(load_checkpoint_path):
             if load_checkpoint_path.endswith('.tar'):
                 if is_io_leader:
-                    load_checkpoint_dir = os.path.join(os.path.dirname(load_checkpoint_path), 
+                    load_checkpoint_dir = os.path.join(os.path.dirname(load_checkpoint_path),
                                           os.path.basename(load_checkpoint_path).replace('.tar', ''))
                     while os.path.exists(load_checkpoint_dir):
                         # ok, use another directory name to avoid dataloss...
-                        load_checkpoint_dir = os.path.join(os.path.dirname(load_checkpoint_path), 
+                        load_checkpoint_dir = os.path.join(os.path.dirname(load_checkpoint_path),
                                                            '{}'.format(uuid.uuid4().hex))
                     tf = tarfile.open(load_checkpoint_path, mode='r')
                     tf.extractall(path=load_checkpoint_dir)
@@ -125,16 +125,16 @@ class CheckpointHandler(object):
                 raise NotImplementedError(msg.format(load_checkpoint_path))
         elif os.path.isdir(load_checkpoint_path):
             load_checkpoint_dir = load_checkpoint_path
-            should_remove_dir   = False 
+            should_remove_dir   = False
         else:
             raise RuntimeError
-        
+
         # import checkpoint data
         self._import_checkpoint(problem, simulation, load_checkpoint_dir)
 
         if (is_io_leader and should_remove_dir):
             shutil.rmtree(load_checkpoint_dir)
-                
+
         ellapsed = Wtime() - start
         msg=' > Successfully imported checkpoint in {}.'
         vprint(msg.format(time2str(ellapsed)))
@@ -146,11 +146,11 @@ class CheckpointHandler(object):
         if (io_params is None):
             return False
         return io_params.should_dump(simulation)
-    
+
 
     # Checkpoint is first exported as a directory containing a hierarchy of arrays (field and parameters data + metadata)
     # This folder is than tarred (without any form of compression) so that a checkpoint consists in a single movable file.
-    # Data is already compressed during data export by the zarr module, using the blosc compressor (snappy, clevel=9). 
+    # Data is already compressed during data export by the zarr module, using the blosc compressor (snappy, clevel=9).
     def save_checkpoint(self, problem, simulation):
         save_checkpoint_path = self.save_checkpoint_path
         if (self.save_checkpoint_path is None):
@@ -159,16 +159,16 @@ class CheckpointHandler(object):
         if (self.io_params is None):
             msg='Load checkpoint has been set to \'{}\' but checkpoint io_params has not been specified.'
             raise RuntimeError(msg.format(load_checkpoint_path))
-        
+
         vprint('>Exporting problem checkpoint to \'{}\':'.format(save_checkpoint_path))
         if not save_checkpoint_path.endswith('.tar'):
             msg='Can only export checkpoint with tar extension, got {}.'
             raise NotImplementedError(msg.format(save_checkpoint_path))
         save_checkpoint_tar = save_checkpoint_path
-        
+
         (io_params, mpi_params, comm, io_leader, is_io_leader) = self.get_mpio_parameters(problem.mpi_params)
         start = Wtime()
-        
+
         # create a backup of last checkpoint just in case things go wrong
         if is_io_leader and os.path.exists(save_checkpoint_tar):
             backup_checkpoint_tar = save_checkpoint_tar + '.bak'
@@ -180,16 +180,16 @@ class CheckpointHandler(object):
 
         # determine checkpoint dump directory
         if is_io_leader:
-            save_checkpoint_dir = os.path.join(os.path.dirname(save_checkpoint_tar), 
+            save_checkpoint_dir = os.path.join(os.path.dirname(save_checkpoint_tar),
                                                os.path.basename(save_checkpoint_tar).replace('.tar', ''))
             while os.path.exists(save_checkpoint_dir):
                 # ok, use another directory name to avoid dataloss...
-                save_checkpoint_dir = os.path.join(os.path.dirname(save_checkpoint_tar), 
+                save_checkpoint_dir = os.path.join(os.path.dirname(save_checkpoint_tar),
                                                    '{}'.format(uuid.uuid4().hex))
         else:
             save_checkpoint_dir = None
         save_checkpoint_dir = mpi_params.comm.bcast(save_checkpoint_dir, root=io_leader)
-        
+
         # try to create the checkpoint directory, this is a collective MPI operation
         try:
             success, reason, nbytes = self._export_checkpoint(problem, simulation, save_checkpoint_dir)
@@ -198,7 +198,7 @@ class CheckpointHandler(object):
             success = False
             reason  = str(e)
         success = comm.allreduce(int(success)) == comm.size
-        
+
         # Compress checkpoint directory to tar (easier to copy/move between clusters)
         # Note that there is no effective compression here, zarr already compressed field/param data
         if success and is_io_leader and os.path.isdir(save_checkpoint_dir):
@@ -213,7 +213,7 @@ class CheckpointHandler(object):
                     shutil.rmtree(save_checkpoint_dir)
                 else:
                     raise RuntimeError('Could not tar checkpoint datadir.')
-                
+
                 ellapsed = Wtime() - start
                 effective_nbytes = os.path.getsize(save_checkpoint_tar)
                 compression_ratio = max(1.0, float(nbytes)/effective_nbytes)
@@ -224,15 +224,15 @@ class CheckpointHandler(object):
                 success = False
                 reason = str(e)
         success = comm.allreduce(int(success)) == comm.size
-        
+
         if success:
             if (backup_checkpoint_tar is not None) and os.path.isfile(backup_checkpoint_tar) and is_io_leader:
                 os.remove(backup_checkpoint_tar)
             return
-        
+
         from hysop.tools.warning import HysopDumpWarning
         msg='Failed to export checkpoint because: {}.'.format(reason)
-        warnings.warn(msg, HysopDumpWarning) 
+        warnings.warn(msg, HysopDumpWarning)
 
         # Something went wrong (I/O error or other) so we rollback to previous checkpoint (if there is one)
         vprint(' | An error occured during checkpoint creation, rolling back to previous checkpoint...')
@@ -243,30 +243,30 @@ class CheckpointHandler(object):
                 os.remove(save_checkpoint_tar)
             if (backup_checkpoint_tar is not None) and os.path.exists(backup_checkpoint_tar):
                 os.rename(backup_checkpoint_tar, save_checkpoint_tar)
-    
+
 
     def create_checkpoint_template(self, problem, simulation):
         # Create groups of arrays on disk (only hierarchy and array metadata is stored in the template)
-        # /!\ ZipStores are not safe from multiple processes so we use a DirectoryStore 
+        # /!\ ZipStores are not safe from multiple processes so we use a DirectoryStore
         #      that can then be tarred manually by io_leader.
-        
+
         save_checkpoint_path = self.save_checkpoint_path
         if (save_checkpoint_path is None):
             return
-        
+
         if not save_checkpoint_path.endswith('.tar'):
             msg='Can only export checkpoint with tar extension, got {}.'
             raise NotImplementedError(msg.format(save_checkpoint_path))
-        
+
         (io_params, mpi_params, comm, io_leader, is_io_leader) = self.get_mpio_parameters(problem.mpi_params)
-        
+
         # determine an empty directory for the template
         if is_io_leader:
-            checkpoint_template = os.path.join(os.path.dirname(save_checkpoint_path), 
+            checkpoint_template = os.path.join(os.path.dirname(save_checkpoint_path),
                                                os.path.basename(save_checkpoint_path).replace('.tar', '.template'))
             while os.path.exists(checkpoint_template):
                 # ok, use another directory name to avoid dataloss...
-                checkpoint_template = os.path.join(os.path.dirname(save_checkpoint_path), 
+                checkpoint_template = os.path.join(os.path.dirname(save_checkpoint_path),
                                                '{}'.format(uuid.uuid4().hex))
         else:
             checkpoint_template = None
@@ -277,13 +277,13 @@ class CheckpointHandler(object):
         import zarr
         from numcodecs import blosc, Blosc
         blosc.use_threads = (mpi_params.size == 1) # disable threads for multiple processes (can deadlock)
-        
+
         # array data compressor
         self._compression_method = first_not_None(self._compression_method, 'zstd')
         self._compression_level  = first_not_None(self._compression_level, 6)
         compressor = Blosc(cname=self._compression_method, clevel=self._compression_level, shuffle=Blosc.SHUFFLE)
         self._checkpoint_compressor = compressor
-        
+
         # io_leader creates a directory layout on (hopefully) shared filesystem
         if is_io_leader:
             if os.path.exists(checkpoint_template):
@@ -304,7 +304,7 @@ class CheckpointHandler(object):
         # count number of total data bytes without compression
         nbytes = 0
         fmt_key = self._format_zarr_key
-        
+
         # operators
         for op in problem.nodes:
             if not op.checkpoint_required():
@@ -329,7 +329,7 @@ class CheckpointHandler(object):
                 assert isinstance(param._value, np.ndarray), type(param._value)
                 value = param._value
                 array = params_group.create_dataset(name=fmt_key(param.name),
-                            overwrite=False, data=None, synchronizer=None, 
+                            overwrite=False, data=None, synchronizer=None,
                             compressor=compressor, shape=value.shape, chunks=None,
                             dtype=value.dtype, fill_value=default_invalid_value(value.dtype))
                 array.attrs['kind'] = param.__class__.__name__
@@ -337,11 +337,11 @@ class CheckpointHandler(object):
             else:
                 msg = 'Cannot export parameter of type {}.'.format(param.__class__.__name__)
                 raise NotImplementedError(msg)
-        
+
         # Generate discrete field arrays
         # Here we assume that each process has a non-empty chunk of data
         for field in sorted(problem.fields, key=operator.attrgetter('name')):
-            
+
             # we do not care about fields discretized only on temporary fields
             if all(df.is_tmp for df in field.discrete_fields.values()):
                 continue
@@ -353,7 +353,7 @@ class CheckpointHandler(object):
 
             dim = field.dim
             domain = field.domain._domain
-            
+
             if isinstance(domain, Box):
                 if (field_group is not None):
                     field_group.attrs['domain'] = 'Box'
@@ -362,17 +362,17 @@ class CheckpointHandler(object):
                     field_group.attrs['end']    = to_tuple(domain.end)
                     field_group.attrs['length'] = to_tuple(domain.length)
             else:
-                # for now we just handle Boxed domains 
+                # for now we just handle Boxed domains
                 raise NotImplementedError
 
             for (k, topo) in enumerate(sorted(field.discrete_fields, key=operator.attrgetter('full_tag'))):
                 dfield = field.discrete_fields[topo]
                 mesh   = topo.mesh._mesh
-                
+
                 # we do not care about temporary fields
                 if dfield.is_tmp:
                     continue
-                
+
                 if not isinstance(dfield, CartesianDiscreteScalarField):
                     # for now we just handle CartesianDiscreteScalarFields.
                     raise NotImplementedError
@@ -380,9 +380,9 @@ class CheckpointHandler(object):
                 global_resolution = topo.global_resolution  # logical grid size
                 grid_resolution   = topo.grid_resolution    # effective grid size
                 ghosts            = topo.ghosts
-           
+
                 # get local resolutions exluding ghosts
-                compute_resolutions = comm.gather(to_tuple(mesh.compute_resolution), root=io_leader) 
+                compute_resolutions = comm.gather(to_tuple(mesh.compute_resolution), root=io_leader)
 
                 # is the current process handling a right boundary data block on a distributed axe ?
                 is_at_right_boundary = (mesh.is_at_right_boundary*(mesh.proc_shape>1)).any()
@@ -390,14 +390,14 @@ class CheckpointHandler(object):
 
                 if not is_io_leader:
                     continue
-                
-                # io_leader can now determine wether the cartesian discretization is uniformly distributed 
+
+                # io_leader can now determine wether the cartesian discretization is uniformly distributed
                 # between processes or not
-                inner_compute_resolutions = tuple(compute_resolutions[i] for i in range(len(compute_resolutions)) 
+                inner_compute_resolutions = tuple(compute_resolutions[i] for i in range(len(compute_resolutions))
                                                                          if not is_at_right_boundary[i])
-                grid_is_uniformly_distributed = all(res == inner_compute_resolutions[0] 
+                grid_is_uniformly_distributed = all(res == inner_compute_resolutions[0]
                                                             for res in inner_compute_resolutions)
-                
+
                 if grid_is_uniformly_distributed:
                     # We divide the array in 'compute_resolution' chunks, no sychronization is required.
                     # Here there is no need to use the process locker to write this array data.
@@ -412,29 +412,29 @@ class CheckpointHandler(object):
                     if dim == 1:
                         chunks = 1024*1024    # at least 1MB / chunk
                     elif dim == 2:
-                        chunks = (1024,1024)  # at least 1MB / chunk 
+                        chunks = (1024,1024)  # at least 1MB / chunk
                     elif dim == 3:
                         chunks = (64,128,128) # at least 1MB / chunk
                     else:
                         raise NotImplementedError(dim)
-                
+
                 if should_sync:
                     raise NotImplementedError
-                
+
                 # Create array (no memory is allocated here, even on disk because data blocks are empty)
                 dtype = dfield.dtype
                 shape = grid_resolution
-                
+
                 # We scale the keys up to 100 topologies, which seams to be a pretty decent upper limit
                 # on a per field basis.
-                array = field_group.create_dataset(name='topo_{:02d}'.format(k), 
-                            overwrite=False, data=None, synchronizer=None, 
-                            compressor=compressor, shape=shape, chunks=chunks, 
+                array = field_group.create_dataset(name='topo_{:02d}'.format(k),
+                            overwrite=False, data=None, synchronizer=None,
+                            compressor=compressor, shape=shape, chunks=chunks,
                             dtype=dtype, fill_value=default_invalid_value(dtype))
                 array.attrs['should_sync'] = should_sync
 
                 # We cannot rely on discrete mesh name because of topology names
-                # so we save some field metadata to be able to differentiate between 
+                # so we save some field metadata to be able to differentiate between
                 # discrete fields with the exact same grid resolution.
                 # proc_shape and name are used in last resort to differentiate discrete fields.
                 array.attrs['lboundaries'] = to_tuple(map(str, mesh.global_lboundaries))
@@ -442,11 +442,11 @@ class CheckpointHandler(object):
                 array.attrs['ghosts']      = to_tuple(mesh.ghosts)
                 array.attrs['proc_shape']  = to_tuple(mesh.proc_shape)
                 array.attrs['name']        = dfield.name
-                
+
                 nbytes += np.prod(shape, dtype=np.int64) * dtype.itemsize
-        
+
         if (root is not None):
-            root.attrs['nbytes'] = nbytes
+            root.attrs['nbytes'] = int(nbytes)
             msg='>Maximum checkpoint size will be {}, without compression and metadata.'
             vprint(root.tree())
             vprint(msg.format(bytes2str(nbytes)))
@@ -457,7 +457,7 @@ class CheckpointHandler(object):
                 root.close()
         except AttributeError:
             pass
-        
+
 
     def _export_checkpoint(self, problem, simulation, save_checkpoint_dir):
         # Given a template, fill field and parameters data from all processes.
@@ -475,13 +475,13 @@ class CheckpointHandler(object):
                 shutil.rmtree(save_checkpoint_dir)
             shutil.copytree(checkpoint_template, save_checkpoint_dir)
         comm.Barrier()
-        
+
         if not os.path.isdir(save_checkpoint_dir):
             msg='Could not find checkpoint directory \'{}\'. Are you using a network file system ?'.format(save_checkpoint_dir)
             raise RuntimeError(msg)
 
         #Every process now loads the same dataset template
-        import zarr 
+        import zarr
         try:
             store = zarr.DirectoryStore(save_checkpoint_dir)
             root  = zarr.open_group(store=store, mode='r+', synchronizer=None, path='data')
@@ -494,14 +494,14 @@ class CheckpointHandler(object):
             msg='A fatal error occured during checkpoint export, checkpoint template may be illformed.'
             vprint(msg)
             vprint()
-            raise 
+            raise
 
         fmt_key = self._format_zarr_key
 
         # Export simulation data
         if is_io_leader:
             simulation.save_checkpoint(simu_group, mpi_params, io_params, checkpoint_compressor)
-        
+
         # Export operator data
         for op in problem.nodes:
             if not op.checkpoint_required():
@@ -524,14 +524,14 @@ class CheckpointHandler(object):
                 else:
                     msg = 'Cannot dump parameter of type {}.'.format(param.__class__.__name__)
                     raise NotImplementedError(msg)
-        
+
         # Unlike parameter all processes participate for fields
         for field in sorted(problem.fields, key=operator.attrgetter('name')):
 
             # we do not care about fields discretized only on temporary fields
             if all(df.is_tmp for df in field.discrete_fields.values()):
                 continue
-                
+
             msg = ' | dumping field {}...'.format(field.pretty_name)
             vprint(msg)
 
@@ -539,7 +539,7 @@ class CheckpointHandler(object):
             for (k, topo) in enumerate(sorted(field.discrete_fields, key=operator.attrgetter('full_tag'))):
                 dfield = field.discrete_fields[topo]
                 mesh   = topo.mesh._mesh
-                
+
                 # we do not care about temporary fields
                 if dfield.is_tmp:
                     continue
@@ -552,18 +552,18 @@ class CheckpointHandler(object):
                 assert (array.shape == mesh.grid_resolution).all(), (array.shape, mesh.grid_resolution)
                 assert array.dtype == dfield.dtype, (array.dtype, dfield.dtype)
 
-                if should_sync: 
+                if should_sync:
                     # Should not be required untill we allow non-uniform discretizations
                     global_start = mesh.global_start
                     global_stop  = mesh.global_stop
                     raise NotImplementedError('Synchronized multiprocess write has not been implemented yet.')
                 else:
-                    assert ((mesh.compute_resolution == array.chunks).all() 
+                    assert ((mesh.compute_resolution == array.chunks).all()
                          or (mesh.is_at_right_boundary*(mesh.proc_shape>1)).any())
-                    local_data = dfield.compute_data[0].get()
+                    local_data = dfield.compute_data[0].get().handle
                     global_slices = mesh.global_compute_slices
                     array[global_slices] = local_data # ok, every process writes to an independent data blocks
-        
+
         # Some zarr store formats require a final close to flush data
         try:
             root.close()
@@ -571,20 +571,20 @@ class CheckpointHandler(object):
             pass
 
         return True, None, nbytes
-    
+
 
     # On data import, there is no need to synchronize read-only arrays
     # so we are good with multiple processes reading overlapping data blocks
     def _import_checkpoint(self, problem, simulation, load_checkpoint_dir):
-        
+
         (io_params, mpi_params, comm, io_leader, is_io_leader) = self.get_mpio_parameters(problem.mpi_params)
         mpi_params.comm.Barrier()
 
         if not os.path.isdir(load_checkpoint_dir):
             msg='Could not find checkpoint directory \'{}\'. Are you using a network file system ?'.format(load_checkpoint_dir)
             raise RuntimeError(msg)
-        
-        import zarr 
+
+        import zarr
         store = zarr.DirectoryStore(load_checkpoint_dir)
         try:
             root = zarr.open_group(store=store, mode='r', synchronizer=None, path='data')
@@ -595,8 +595,8 @@ class CheckpointHandler(object):
             msg='A fatal error occured during checkpoint import, checkpoint data may be illformed.'
             vprint(msg)
             vprint()
-            raise 
-        
+            raise
+
         # Define helper functions
         relax_constraints = self.relax_constraints
         raise_error = self._raise_error
@@ -606,12 +606,12 @@ class CheckpointHandler(object):
             raise_warning = self._raise_error
         load_array_data = functools.partial(self._load_array_data, on_mismatch=raise_warning)
         fmt_key = self._format_zarr_key
-        
+
         # Import simulation data after parameters are up to date
         msg = ' | importing simulation...'
         vprint(msg)
         simulation.load_checkpoint(simu_group, mpi_params, io_params, relax_constraints)
-        
+
         # Import operator data
         for op in problem.nodes:
             if not op.checkpoint_required():
@@ -623,7 +623,7 @@ class CheckpointHandler(object):
                 continue
             operator_group = operators_group[key]
             op.load_checkpoint(operator_group, mpi_params, io_params, relax_constraints)
-    
+
         # Import parameters, hopefully parameter names match the ones in the checkpoint
         msg = ' | importing parameters...'
         vprint(msg)
@@ -634,7 +634,7 @@ class CheckpointHandler(object):
                 msg='Checkpoint directory \'{}\' does not contain any data regarding to parameter {}'
                 msg=msg.format(load_checkpoint_dir, param.name)
                 raise_error(msg)
-            
+
             array = params_group[key]
 
             if array.attrs['kind'] != param.__class__.__name__:
@@ -644,7 +644,7 @@ class CheckpointHandler(object):
 
             if isinstance(param, (ScalarParameter, TensorParameter, BufferParameter)):
                 value = param._value
-                
+
                 if (array.shape != value.shape):
                     msg='Parameter shape does not match with checkpointed parameter {}, loaded shape {} but expected {}.'
                     msg=msg.format(param.name, array.shape, value.shape)
@@ -654,15 +654,15 @@ class CheckpointHandler(object):
                     msg='Parameter datatype does not match with checkpointed parameter {}, loaded dtype {} but expected {}.'
                     msg=msg.format(param.name, array.dtype, value.dtype)
                     raise_warning(msg)
-                
+
                 value[...] = array[...]
             else:
                 msg = 'Cannot import parameter of type {}.'.format(param.__class__.__name__)
                 raise NotImplementedError(msg)
-        
+
         # Import discrete fields, this is a bit more tricky because topologies or simply topology
-        # names can change. Moreover there is currently no waranty that the same operator graph is 
-        # generated for the exact same problem configuration each time. We just emit user warnings 
+        # names can change. Moreover there is currently no waranty that the same operator graph is
+        # generated for the exact same problem configuration each time. We just emit user warnings
         # if we find a way to match topologies that do not match exactly checkpointed ones.
         for field in sorted(problem.fields, key=operator.attrgetter('name')):
             domain = field.domain._domain
@@ -673,7 +673,7 @@ class CheckpointHandler(object):
 
             msg = ' | importing field {}...'.format(field.pretty_name)
             vprint(msg)
-            
+
             field_key = fmt_key(field.name)
             if (field_key not in fields_group):
                 msg='Checkpoint directory \'{}\' does not contain any data regarding to field {}'
@@ -685,7 +685,7 @@ class CheckpointHandler(object):
             # check that domain matches
             if field_group.attrs['domain'] != domain.__class__.__name__:
                 msg='Domain kind does not match with checkpointed field {}, loaded kind {} but expected {}.'
-                msg=msg.format(field.name, field_group.attrs['domain'], domain.__class__.__name__) 
+                msg=msg.format(field.name, field_group.attrs['domain'], domain.__class__.__name__)
                 raise_error(msg)
             if field_group.attrs['dim'] != domain.dim:
                 msg='Domain dim does not match with checkpointed field {}, loaded dim {} but expected {}.'
@@ -711,7 +711,7 @@ class CheckpointHandler(object):
                 # we do not care about temporary fields
                 if dfield.is_tmp:
                     continue
-                
+
                 # for now we just handle CartesianDiscreteScalarFields.
                 if not isinstance(dfield, CartesianDiscreteScalarField):
                     raise NotImplementedError
@@ -720,7 +720,7 @@ class CheckpointHandler(object):
                 candidates = tuple(filter(lambda d: np.equal(d.shape, mesh.grid_resolution).all(), field_group.values()))
                 if len(candidates)==0:
                     msg='Could not find any topology with shape {} for field {}, available discretizations are: {}.'
-                    msg=msg.format(to_tuple(mesh.grid_resolution), field.name, 
+                    msg=msg.format(to_tuple(mesh.grid_resolution), field.name,
                             ', '.join(set(str(d.shape) for d in field_group.values())))
                     raise_error(msg)
                 elif len(candidates)==1:
@@ -737,7 +737,7 @@ class CheckpointHandler(object):
                 elif len(candidates)==1:
                     load_array_data(candidates[0], dfield)
                     continue
-                
+
                 # From now on multiple topologies have the same grid resolution and boundary conditions
                 # We try to match exact ghost count, user did likely not change the order of the methods.
                 old_candidates = candidates
@@ -748,7 +748,7 @@ class CheckpointHandler(object):
                 elif len(candidates)==1:
                     load_array_data(candidates[0], dfield)
                     continue
-                
+
                 # Now we try to differentiate by using zero ghost info (ghosts may change with method order, but zero-ghost is very specific)
                 # Topology containing zero ghost layer usually target Fortran topologies for FFT operators or method that do not require any ghosts.
                 old_candidates = candidates
@@ -759,7 +759,7 @@ class CheckpointHandler(object):
                 elif len(candidates)==1:
                     load_array_data(candidates[0], dfield)
                     continue
-                
+
                 # Now we try to match exact topology shape (the MPICart grid of processes)
                 # We try this late because use may run the simulation again with a different number of processes.
                 old_candidates = candidates
@@ -770,7 +770,7 @@ class CheckpointHandler(object):
                 elif len(candidates)==1:
                     load_array_data(candidates[0], dfield)
                     continue
-                
+
                 # Now we try to differentiate by using topo splitting info (axes on which data is distributed)
                 # This again is very specific and can differentiate topologies used for spectral transforms.
                 old_candidates = candidates
@@ -781,7 +781,7 @@ class CheckpointHandler(object):
                 elif len(candidates)==1:
                     load_array_data(candidates[0], dfield)
                     continue
-                
+
                 # Ok now, our last hope is to match the discrete field name
                 old_candidates = candidates
                 candidates = tuple(filter(lambda d: d.attrs['name'] == dfield.name, candidates))
@@ -791,19 +791,19 @@ class CheckpointHandler(object):
                 elif len(candidates)==1:
                     load_array_data(candidates[0], dfield)
                     continue
-                
+
                 assert len(candidates) > 1, 'Something went wrong.'
 
                 msg='Could not discriminate checkpointed topologies for field {}, got {} candidates remaining.'
                 msg=msg.format(field.name, len(candidates))
                 raise_error(msg)
-                
-       
+
+
     @staticmethod
     def _load_array_data(array, dfield, on_mismatch):
         mesh = dfield.mesh._mesh
         assert np.equal(array.shape, mesh.grid_resolution).all()
-        
+
         # compare attributes but ignore name because this can be annoying
         attr_names = ('left boundaries', 'right boundaries', 'ghost layers', 'process shape', 'datatype')
         array_attributes = (array.attrs['lboundaries'], array.attrs['rboundaries'], array.attrs['ghosts'],
@@ -822,7 +822,7 @@ class CheckpointHandler(object):
         data = np.asarray(array[global_slices], dtype=dfield.dtype)
         dfield.compute_data[0][...] = data
         dfield.exchange_ghosts()
-        
+
     @staticmethod
     def _raise_error(msg):
         vprint(' |   error: {}\n'.format(msg))
@@ -834,7 +834,7 @@ class CheckpointHandler(object):
     def _raise_warning(msg):
         msg = ' |   warning: {}'.format(msg)
         vprint(msg)
-    
+
     @staticmethod
     def _format_zarr_key(k):
         # note keys that contains the special characters '/' and '\' do not work well with zarr
diff --git a/hysop/core/graph/allocator.py b/hysop/core/graph/allocator.py
index bf80ce7962c76d8b2fd864e71ecd814ee22c98df..154b38e7386b036c7aa97340e911831ae9b7bce6 100644
--- a/hysop/core/graph/allocator.py
+++ b/hysop/core/graph/allocator.py
@@ -1,21 +1,20 @@
 
 from abc import ABCMeta, abstractmethod
-from hysop.deps import np
-from hysop.tools.types import check_instance
+import numpy as np
 
+from hysop.tools.types import check_instance
 from hysop.core.arrays.all import ArrayBackend, HostArrayBackend, OpenClArrayBackend
 
 
-class MemoryRequestsAllocator(object):
-    __metaclass__ = ABCMeta
+class MemoryRequestsAllocator(object, metaclass=ABCMeta):
 
     @classmethod
     @not_implemented
     def handle_requests(cls, requests):
         pass
-    
+
 class StandardArrayAllocator(MemoryAllocator):
-    
+
     def __init__(self, array_backend):
         check_instance(self, ArrayBackend)
         self.array_backend = array_backend
@@ -25,20 +24,20 @@ class StandardArrayAllocator(MemoryAllocator):
         dtype = np.dtype(np.uint8)
         assert dtype.itemsize == 1
         return npb.empty(shape=(nbytes,), dtype=dtype)
-    
+
     def handle_requests(self, requests):
         from hysop.core.graph.mem_request import MultipleOperatorMemoryRequests
         if not isinstance(requests, MultipleOperatorMemoryRequests):
             msg = 'requests is not an instance of MultipleOperatorMemoryRequests (got a {}).'
             raise ValueError(msg.format(requests.__class__))
-        
+
         cls = self.__class__
 
         total_bytes = requests.min_bytes_to_allocate(self)
         if total_bytes == 0:
             return
         array = self.allocate(total_bytes)
-        
+
         op_requests = requests._all_requests_per_allocator[self]
         views       = requests._allocated_buffers
         self.build_array_views(array, op_requests, views)
@@ -47,19 +46,19 @@ class StandardArrayAllocator(MemoryAllocator):
         from hysop.core.graph.mem_request import OperatorMemoryRequests
         check_instance(op_requests, dict)
         ptr = array.ctypes.data
-        for (op,requests) in op_requests.iteritems():
+        for (op,requests) in op_requests.items():
             check_instance(requests, dict, values=OperatorMemoryRequests)
             start_idx = 0
-            for (req_id, req) in requests.iteritems():
+            for (req_id, req) in requests.items():
                 align_offset = (-ptr % req.alignment)
-                start_idx += align_offset 
-                end_idx    = start_idx + req.data_bytes() 
-                
+                start_idx += align_offset
+                end_idx    = start_idx + req.data_bytes()
+
                 view = data[start_idx:end_idx].view(req.dtype).reshape(req.shape)
                 if op not in views:
                     views[op] = {}
                 views[op][req_id] = view
-                
+
                 start_idx = end_idx
                 ptr += align_offset + req.data_bytes()
             assert end_idx <= total_bytes
@@ -74,7 +73,7 @@ if __name__ == '__main__':
     m1 = NumpyMemRequest(shape=(2,),     dtype=np.int32)
     m2 = NumpyMemRequest(shape=(2,2),    dtype=np.int32)
     m3 = NumpyMemRequest(shape=(2,2,2,), dtype=np.int32, alignment=64)
-     
+
     m4 = NumpyMemRequest(count=3,    dtype=np.int32)
     m5 = NumpyMemRequest(shape=(4,), dtype=np.int32)
     m6 = NumpyMemRequest(shape=(8,), dtype=np.int32)
@@ -84,7 +83,7 @@ if __name__ == '__main__':
     opm0.add_mem_request('reqA1', m1)
     opm0.add_mem_request('reqA2', m2)
     opm0.add_mem_request('reqA3', m3)
-    
+
     opm1 = OperatorMemoryRequests('opB')
     opm1.add_mem_request('reqB0', m4)
     opm1.add_mem_request('reqB1', m5)
@@ -93,26 +92,26 @@ if __name__ == '__main__':
     all_reqs = MultipleOperatorMemoryRequests()
     all_reqs.add_mem_requests(opm0)
     all_reqs.add_mem_requests(opm1)
-    
+
     all_reqs.allocate()
-    
+
     m0.data[...] = 1
     m1.data[...] = 2
     m2.data[...] = 3
     m3.data[...] = 4
-    
-    print m0.data
-    print m1.data
-    print m2.data
-    print m3.data
-    print
-    print m4.data
-    print m5.data
-    print m6.data
-    print
-    print all_reqs.buffers[NumpyMemoryAllocator][0].dtype
-    print all_reqs.buffers[NumpyMemoryAllocator][0].shape
-    print all_reqs.buffers[NumpyMemoryAllocator][0].view(dtype=np.int32)
+
+    print(m0.data)
+    print(m1.data)
+    print(m2.data)
+    print(m3.data)
+    print()
+    print(m4.data)
+    print(m5.data)
+    print(m6.data)
+    print()
+    print(all_reqs.buffers[NumpyMemoryAllocator][0].dtype)
+    print(all_reqs.buffers[NumpyMemoryAllocator][0].shape)
+    print(all_reqs.buffers[NumpyMemoryAllocator][0].view(dtype=np.int32))
 
     assert m3.data.ctypes.data % 64 == 0
 
diff --git a/hysop/core/graph/computational_graph.py b/hysop/core/graph/computational_graph.py
index 16af5b5f023d842992c0e01077b781fa708d6cfc..85989d63b67adafaf6723dc6c05b7052a135687b 100644
--- a/hysop/core/graph/computational_graph.py
+++ b/hysop/core/graph/computational_graph.py
@@ -20,15 +20,20 @@ from hysop.core.mpi.redistribute import RedistributeInter
 from abc import ABCMeta, abstractmethod
 
 
-class ComputationalGraph(ComputationalGraphNode):
+class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta):
     """
     Interface of an abstract graph of continuous operators (ie. a computational graph).
     """
 
-    __metaclass__ = ABCMeta
-
     __FORCE_REPORTS__ = False
 
+    @debug
+    def __new__(cls, candidate_input_tensors=None,
+                 candidate_output_tensors=None,
+                 **kwds):
+        return super(ComputationalGraph, cls).__new__(cls,
+                input_fields=None, output_fields=None, **kwds)
+
     @debug
     def __init__(self, candidate_input_tensors=None,
                  candidate_output_tensors=None,
@@ -76,24 +81,24 @@ class ComputationalGraph(ComputationalGraphNode):
             self._profiler += node._profiler
 
     def node_requirements_report(self, requirements):
-        values = [(u'OPERATOR', u'TOPOLOGY', u'TSTATE', u'GHOSTS', u'MEMORY ORDER',
-                   u'NODE.MRO[0]', u'NODE.MRO[1]', u'NODE.MRO[2]')]
+        values = [('OPERATOR', 'TOPOLOGY', 'TSTATE', 'GHOSTS', 'MEMORY ORDER',
+                   'NODE.MRO[0]', 'NODE.MRO[1]', 'NODE.MRO[2]')]
         for node in self.nodes:
             reqs = node.get_node_requirements()
             if not isinstance(reqs, OperatorRequirements):
                 continue
-            opname = node.pretty_name.decode('utf-8')
+            opname = node.pretty_name
             optypes = type(node).__mro__
             n = len(optypes)
-            optypes = tuple(_.__name__ for _ in optypes[:min(3, n)]) + (u'',)*(3-n)
+            optypes = tuple(_.__name__ for _ in optypes[:min(3, n)]) + ('',)*(3-n)
             vals = (opname,
                     reqs.enforce_unique_topology_shape, reqs.enforce_unique_transposition_state,
                     reqs.enforce_unique_ghosts, reqs.enforce_unique_memory_order,
                     ) + optypes
-            vals = tuple(unicode(x) for x in vals)
+            vals = tuple(map(str, vals))
             values.append(vals)
 
-        template = u'\n   {:<{name_size}}   {:^{topology_size}}      {:^{tstates_size}}      {:^{ghosts_size}}      {:^{order_size}}      {:<{type_size0}}      {:<{type_size1}}      {:<{type_size2}}'
+        template = '\n   {:<{name_size}}   {:^{topology_size}}      {:^{tstates_size}}      {:^{ghosts_size}}      {:^{order_size}}      {:<{type_size0}}      {:<{type_size1}}      {:<{type_size2}}'
         name_size = max(strlen(s[0]) for s in values)
         topology_size = max(strlen(s[1]) for s in values)
         tstates_size = max(strlen(s[2]) for s in values)
@@ -103,7 +108,7 @@ class ComputationalGraph(ComputationalGraphNode):
         type_size1 = max(strlen(s[6]) for s in values)
         type_size2 = max(strlen(s[7]) for s in values)
 
-        ss = u''
+        ss = ''
         for (opname,  enforce_unique_topology_shape, enforce_unique_transposition_state,
                 enforce_unique_ghosts, enforce_unique_memory_order, optype0, optype1, optype2) in values:
             ss += template.format(
@@ -122,9 +127,9 @@ class ComputationalGraph(ComputationalGraphNode):
                 type_size1=type_size1,
                 type_size2=type_size2)
 
-        title = u'ComputationalGraph {} node requirements report '.format(
-            self.pretty_name.decode('utf-8'))
-        return u'\n{}\n'.format(framed_str(title=title, msg=ss[1:])).encode('utf-8')
+        title = 'ComputationalGraph {} node requirements report '.format(
+            self.pretty_name)
+        return '\n{}\n'.format(framed_str(title=title, msg=ss[1:]))
 
     def field_requirements_report(self, requirements):
         inputs, outputs = {}, {}
@@ -133,76 +138,75 @@ class ComputationalGraph(ComputationalGraphNode):
         def sorted_reqs(reqs):
             return sorted(reqs, key=lambda x:
                           '{}::{}'.format(x.field.name, x.operator.name))
-        for field, mreqs in requirements.input_field_requirements.iteritems():
-            for td, reqs in mreqs.requirements.iteritems():
+        for field, mreqs in requirements.input_field_requirements.items():
+            for td, reqs in mreqs.requirements.items():
                 for req in reqs:
                     inputs.setdefault(td, {}).setdefault(field, []).append(req)
-        for td, td_reqs in inputs.iteritems():
+        for td, td_reqs in inputs.items():
             sin = sinputs.setdefault(td, [])
-            for field, reqs in td_reqs.iteritems():
+            for field, reqs in td_reqs.items():
                 for req in sorted_reqs(reqs):
-                    opname = getattr(req.operator, 'pretty_name', 'UnknownOperator').decode('utf-8')
-                    fname = getattr(req.field,    'pretty_name', 'UnknownField').decode('utf-8')
+                    opname = getattr(req.operator, 'pretty_name', 'UnknownOperator')
+                    fname = getattr(req.field,    'pretty_name', 'UnknownField')
                     min_ghosts = req.ghost_str(req.min_ghosts)
                     max_ghosts = req.ghost_str(req.max_ghosts+1)
                     discr = str(req.operator.input_fields[field].grid_resolution)
-                    ghosts = u'{}<=ghosts<{}'.format(min_ghosts, max_ghosts)
+                    ghosts = '{}<=ghosts<{}'.format(min_ghosts, max_ghosts)
                     can_split = req.can_split.view(npw.int8)
-                    memory_order = u'{}'.format(req.memory_order) if req.memory_order else u'ANY'
-                    can_split = u'[{}]'.format(
-                        u','.join('1' if cs else '0' for cs in req.can_split))
-                    tstates = u'{}'.format(u','.join(str(ts) for ts in req.tstates)) \
+                    memory_order = '{}'.format(req.memory_order) if req.memory_order else 'ANY'
+                    can_split = '[{}]'.format(
+                        ','.join('1' if cs else '0' for cs in req.can_split))
+                    tstates = '{}'.format(','.join(str(ts) for ts in req.tstates)) \
                         if req.tstates else 'ANY'
                     sin.append((opname, fname, discr, ghosts, memory_order, can_split, tstates))
-        for field, mreqs in requirements.output_field_requirements.iteritems():
-            for td, reqs in mreqs.requirements.iteritems():
+        for field, mreqs in requirements.output_field_requirements.items():
+            for td, reqs in mreqs.requirements.items():
                 for req in reqs:
                     outputs.setdefault(td, {}).setdefault(field, []).append(req)
-        for td, td_reqs in outputs.iteritems():
+        for td, td_reqs in outputs.items():
             sout = soutputs.setdefault(td, [])
-            for field, reqs in td_reqs.iteritems():
+            for field, reqs in td_reqs.items():
                 for req in sorted_reqs(reqs):
-                    opname = getattr(req.operator, 'pretty_name', 'UnknownOperator').decode('utf-8')
-                    fname = getattr(req.field,    'pretty_name', 'UnknownField').decode('utf-8')
+                    opname = getattr(req.operator, 'pretty_name', 'UnknownOperator')
+                    fname = getattr(req.field,    'pretty_name', 'UnknownField')
                     min_ghosts = req.ghost_str(req.min_ghosts)
                     max_ghosts = req.ghost_str(req.max_ghosts+1)
                     discr = str(req.operator.output_fields[field].grid_resolution)
-                    ghosts = u'{}<=ghosts<{}'.format(min_ghosts, max_ghosts)
+                    ghosts = '{}<=ghosts<{}'.format(min_ghosts, max_ghosts)
                     can_split = req.can_split.view(npw.int8)
-                    memory_order = u'{}'.format(req.memory_order) if req.memory_order else u'ANY'
-                    can_split = u'[{}]'.format(
-                        u','.join('1' if cs else '0' for cs in req.can_split))
-                    tstates = u'{}'.format(u','.join(str(ts) for ts in req.tstates)) \
-                        if req.tstates else u'ANY'
+                    memory_order = '{}'.format(req.memory_order) if req.memory_order else 'ANY'
+                    can_split = '[{}]'.format(
+                        ','.join('1' if cs else '0' for cs in req.can_split))
+                    tstates = '{}'.format(','.join(str(ts) for ts in req.tstates)) \
+                        if req.tstates else 'ANY'
                     sout.append((opname, fname, discr, ghosts, memory_order, can_split, tstates))
 
-        titles = [[(u'OPERATOR', u'FIELD', u'DISCRETIZATION', u'GHOSTS',
-                    u'MEMORY ORDER', u'CAN_SPLIT', u'TSTATES')]]
-        name_size = max(len(s[0]) for ss in sinputs.values()+soutputs.values()+titles for s in ss)
-        field_size = max(len(s[1]) for ss in sinputs.values()+soutputs.values()+titles for s in ss)
-        discr_size = max(len(s[2]) for ss in sinputs.values()+soutputs.values()+titles for s in ss)
-        ghosts_size = max(len(s[3]) for ss in sinputs.values()+soutputs.values()+titles for s in ss)
-        order_size = max(len(s[4]) for ss in sinputs.values()+soutputs.values()+titles for s in ss)
-        cansplit_size = max(len(s[5]) for ss in sinputs.values() +
-                            soutputs.values()+titles for s in ss)
-        tstates_size = max(len(s[6]) for ss in sinputs.values() +
-                           soutputs.values()+titles for s in ss)
-
-        template = u'\n   {:<{name_size}}   {:^{field_size}}     {:^{discr_size}}      {:^{ghosts_size}}      {:^{order_size}}      {:^{cansplit_size}}      {:^{tstates_size}}'
-
-        ss = u'>INPUTS:'
+        titles = [[('OPERATOR', 'FIELD', 'DISCRETIZATION', 'GHOSTS',
+                    'MEMORY ORDER', 'CAN_SPLIT', 'TSTATES')]]
+        vals = tuple(sinputs.values()) + tuple(soutputs.values()) + tuple(titles)
+        name_size = max(len(s[0]) for ss in vals for s in ss)
+        field_size = max(len(s[1]) for ss in vals for s in ss)
+        discr_size = max(len(s[2]) for ss in vals for s in ss)
+        ghosts_size = max(len(s[3]) for ss in vals for s in ss)
+        order_size = max(len(s[4]) for ss in vals for s in ss)
+        cansplit_size = max(len(s[5]) for ss in vals for s in ss)
+        tstates_size = max(len(s[6]) for ss in vals for s in ss)
+
+        template = '\n   {:<{name_size}}   {:^{field_size}}     {:^{discr_size}}      {:^{ghosts_size}}      {:^{order_size}}      {:^{cansplit_size}}      {:^{tstates_size}}'
+
+        ss = '>INPUTS:'
         if sinputs:
-            for (td, sreqs) in sinputs.iteritems():
+            for (td, sreqs) in sorted(sinputs.items()):
                 if isinstance(td, Topology):
-                    ss += u'\n {}'.format(td.short_description())
+                    ss += '\n {}'.format(td.short_description())
                 else:
-                    ss += u'\n {}'.format(td)
+                    ss += '\n {}'.format(td)
                 ss += template.format(*titles[0][0],
                                       name_size=name_size, field_size=field_size,
                                       discr_size=discr_size, ghosts_size=ghosts_size,
                                       order_size=order_size, cansplit_size=cansplit_size,
                                       tstates_size=tstates_size)
-                for (opname, fname, discr, ghosts, order, can_split, tstates) in sreqs:
+                for (opname, fname, discr, ghosts, order, can_split, tstates) in sorted(sreqs):
                     ss += template.format(
                         opname, fname, discr, ghosts, order, can_split, tstates,
                         name_size=name_size, field_size=field_size,
@@ -210,20 +214,20 @@ class ComputationalGraph(ComputationalGraphNode):
                         order_size=order_size, cansplit_size=cansplit_size,
                         tstates_size=tstates_size)
         else:
-            ss += u' None'
-        ss += u'\n>OUTPUTS:'
+            ss += ' None'
+        ss += '\n>OUTPUTS:'
         if soutputs:
-            for (td, sreqs) in soutputs.iteritems():
+            for (td, sreqs) in sorted(soutputs.items()):
                 if isinstance(td, Topology):
-                    ss += u'\n {}'.format(td.short_description())
+                    ss += '\n {}'.format(td.short_description())
                 else:
-                    ss += u'\n {}'.format(td)
+                    ss += '\n {}'.format(td)
                 ss += template.format(*titles[0][0],
                                       name_size=name_size, field_size=field_size,
                                       discr_size=discr_size, ghosts_size=ghosts_size,
                                       order_size=order_size, cansplit_size=cansplit_size,
                                       tstates_size=tstates_size)
-                for (opname, fname, discr, ghosts, order, can_split, tstates) in sreqs:
+                for (opname, fname, discr, ghosts, order, can_split, tstates) in sorted(sreqs):
                     ss += template.format(
                         opname, fname, discr, ghosts, order, can_split, tstates,
                         name_size=name_size, field_size=field_size,
@@ -231,11 +235,11 @@ class ComputationalGraph(ComputationalGraphNode):
                         order_size=order_size, cansplit_size=cansplit_size,
                         tstates_size=tstates_size)
         else:
-            ss += u' None'
+            ss += ' None'
 
-        title = u'ComputationalGraph {} field requirements report '.format(
-            self.pretty_name.decode('utf-8'))
-        return u'\n{}\n'.format(framed_str(title=title, msg=ss)).encode('utf-8')
+        title = 'ComputationalGraph {} field requirements report '.format(
+            self.pretty_name)
+        return '\n{}\n'.format(framed_str(title=title, msg=ss))
 
     def domain_report(self):
         domains = self.get_domains()
@@ -244,36 +248,36 @@ class ComputationalGraph(ComputationalGraphNode):
         maxlen = (None, 40,  None, 40,  None)
         split_sep = (None, ',', None, ',', None)
         newline_prefix = (None, ' ', '',   ' ', None)
-        replace = ('',   '', '-',   '',  '')
+        replace = ('', '', '-', '',  '')
 
-        for (domain, operators) in domains.iteritems():
+        for (domain, operators) in domains.items():
             if (domain is None):
                 continue
             for op in sorted(operators, key=lambda x: x.pretty_name):
-                finputs = u','.join(sorted([f.pretty_name.decode('utf-8')
+                finputs = ','.join(sorted([f.pretty_name
                                             for f in op.iter_input_fields() if f.domain is domain]))
-                foutputs = u','.join(sorted([f.pretty_name.decode('utf-8')
+                foutputs = ','.join(sorted([f.pretty_name
                                              for f in op.iter_output_fields() if f.domain is domain]))
-                pinputs = u','.join(sorted([p.pretty_name.decode('utf-8')
+                pinputs = ','.join(sorted([p.pretty_name
                                             for p in op.input_params.values()]))
-                poutputs = u','.join(sorted([p.pretty_name.decode('utf-8')
+                poutputs = ','.join(sorted([p.pretty_name
                                              for p in op.output_params.values()]))
-                infields = u'[{}]'.format(finputs) if finputs else u''
-                outfields = u'[{}]'.format(foutputs) if foutputs else u''
-                inparams = u'[{}]'.format(pinputs) if pinputs else u''
-                outparams = u'[{}]'.format(poutputs) if poutputs else u''
+                infields = '[{}]'.format(finputs) if finputs else ''
+                outfields = '[{}]'.format(foutputs) if foutputs else ''
+                inparams = '[{}]'.format(pinputs) if pinputs else ''
+                outparams = '[{}]'.format(poutputs) if poutputs else ''
 
-                inputs = u'{}{}{}'.format(
-                    infields,  u'x' if infields and inparams else u'', inparams)
-                outputs = u'{}{}{}'.format(
-                    outfields, u'x' if outfields and outparams else u'', outparams)
+                inputs = '{}{}{}'.format(
+                    infields,  'x' if infields and inparams else '', inparams)
+                outputs = '{}{}{}'.format(
+                    outfields, 'x' if outfields and outparams else '', outparams)
 
-                if inputs == u'':
-                    inputs = u'no inputs'
-                if outputs == u'':
-                    outputs = u'no outputs'
+                if inputs == '':
+                    inputs = 'no inputs'
+                if outputs == '':
+                    outputs = 'no outputs'
                 if op.mpi_params.on_task:
-                    opname = op.pretty_name.decode('utf-8')
+                    opname = op.pretty_name
                     optype = type(op).__name__
                     strdata = (opname, inputs, '->', outputs, optype)
 
@@ -283,27 +287,24 @@ class ComputationalGraph(ComputationalGraphNode):
         if (None in domains):
             operators = domains[None]
             for op in sorted(operators, key=lambda x: x.pretty_name):
-                pinputs = u','.join(sorted([p.pretty_name.decode('utf-8')
+                pinputs = ','.join(sorted([p.pretty_name
                                             for p in op.input_params.values()]))
-                poutputs = u','.join(sorted([p.pretty_name.decode('utf-8')
+                poutputs = ','.join(sorted([p.pretty_name
                                              for p in op.output_params.values()]))
-                inparams = u'[{}]'.format(pinputs) if pinputs else ''
-                outparams = u'[{}]'.format(poutputs) if poutputs else ''
+                inparams = '[{}]'.format(pinputs) if pinputs else ''
+                outparams = '[{}]'.format(poutputs) if poutputs else ''
 
-                inputs = u'{}'.format(inparams)
-                outputs = u'{}'.format(outparams)
+                inputs = '{}'.format(inparams)
+                outputs = '{}'.format(outparams)
                 if inputs == '':
-                    inputs = u'no inputs'
+                    inputs = 'no inputs'
                 if outputs == '':
-                    outputs = u'no outputs'
+                    outputs = 'no outputs'
                 if op.mpi_params.on_task:
-                    opname = op.pretty_name.decode('utf-8')
+                    opname = op.pretty_name
                     optype = type(op).__name__
                     strdata = (opname, inputs, '->', outputs, optype)
 
-                    op_data = ops.setdefault(None, [])
-                    op_data += multiline_split(strdata, maxlen, split_sep, replace, newline_prefix)
-
         title = u'ComputationalGraph {} domain and operator report '.format(
             self.pretty_name.decode('utf-8'))
         if len(ops) == 0:
@@ -315,48 +316,49 @@ class ComputationalGraph(ComputationalGraphNode):
         out_size = max(strlen(s[3]) for ss in ops.values() for s in ss)
         type_size = max(strlen(s[4]) for ss in ops.values() for s in ss)
 
-        ss = u''
-        for (domain, dops) in ops.iteritems():
+        ss = ''
+        for (domain, dops) in ops.items():
             if (domain is None):
                 continue
-            ss += u'\n>{}'.format(domain.short_description())
-            ss += u'\n   {:<{name_size}}  {:<{in_size}}  {:<{arrow_size}}   {:<{out_size}}    {:<{type_size}}'.format(
+            ss += '\n>{}'.format(domain.short_description())
+            ss += '\n   {:<{name_size}}  {:<{in_size}}  {:<{arrow_size}}   {:<{out_size}}    {:<{type_size}}'.format(
                 'OPERATOR', 'INPUTS', '', 'OUTPUTS', 'OPERATOR TYPE',
                 name_size=name_size, in_size=in_size,
                 arrow_size=arrow_size,
                 out_size=out_size, type_size=type_size)
             for (opname, inputs, arrow, outputs, optype) in dops:
-                ss += u'\n   {:<{name_size}}  {:<{in_size}}  {:<{arrow_size}}   {:<{out_size}}    {:<{type_size}}'.format(
+                ss += '\n   {:<{name_size}}  {:<{in_size}}  {:<{arrow_size}}   {:<{out_size}}    {:<{type_size}}'.format(
                     opname, inputs, arrow, outputs, optype,
                     name_size=name_size, in_size=in_size,
                     arrow_size=arrow_size,
                     out_size=out_size, type_size=type_size)
-        if (None in ops):
-            ss += u'\n>Domainless operators:'
-            ss += u'\n   {:<{name_size}}  {:<{in_size}}  {:<{arrow_size}}   {:<{out_size}}    {:<{type_size}}'.format(
+        if (None in domains):
+            ss += '\n>Domainless operators:'
+            ss += '\n   {:<{name_size}}  {:<{in_size}}  {:<{arrow_size}}   {:<{out_size}}    {:<{type_size}}'.format(
                 'OPERATOR', 'INPUTS', '', 'OUTPUTS', 'OPERATOR TYPE',
                 name_size=name_size, in_size=in_size,
                 arrow_size=arrow_size,
                 out_size=out_size, type_size=type_size)
             for (opname, inputs, arrow, outputs, optype) in ops[None]:
-                ss += u'\n   {:<{name_size}}  {:<{in_size}}  {:<{arrow_size}}   {:<{out_size}}    {:<{type_size}}'.format(
+                ss += '\n   {:<{name_size}}  {:<{in_size}}  {:<{arrow_size}}   {:<{out_size}}    {:<{type_size}}'.format(
                     opname, inputs, arrow, outputs, optype,
                     name_size=name_size, in_size=in_size,
                     arrow_size=arrow_size,
                     out_size=out_size, type_size=type_size)
-        return u'\n{}\n'.format(framed_str(title=title, msg=ss[1:])).encode('utf-8')
+
+        title = 'ComputationalGraph {} domain and operator report '.format(self.pretty_name)
+        return '\n{}\n'.format(framed_str(title=title, msg=ss[1:]))
 
     def topology_report(self):
         ss = ''
-        for (backend, topologies) in self.get_topologies().iteritems():
-            ss += u'\n {}:'.format(backend.short_description())
-            ss += u'\n  *'+'\n  *'.join(t.short_description()
+        for (backend, topologies) in self.get_topologies().items():
+            ss += '\n {}:'.format(backend.short_description())
+            ss += '\n  *'+'\n  *'.join(t.short_description()
                                         for t in sorted(topologies, key=lambda x: x.id))
         if ss == '':
             ss = ' Empty'
-        title = u'ComputationalGraph {} topology report '.format(
-            self.pretty_name.decode('utf-8'))
-        return u'\n{}\n'.format(framed_str(title=title, msg=ss[1:]))
+        title = 'ComputationalGraph {} topology report '.format(self.pretty_name)
+        return '\n{}\n'.format(framed_str(title=title, msg=ss[1:]))
 
     def variable_report(self):
         fields = self.fields
@@ -372,14 +374,14 @@ class ComputationalGraph(ComputationalGraphNode):
                     topo = node.output_fields[field]
                     field_topologies.setdefault(topo, []).append(node)
             for topo in sorted((_ for _ in field_topologies.keys() if not _ is None), key=lambda x: x.tag):
-                pnames = set(node.pretty_name.decode('utf-8') for node in field_topologies[topo])
-                pnames = sorted(pnames)
+                pnames = set(node.pretty_name for node in field_topologies[topo])
+                pnames = tuple(sorted(pnames))
                 nbyline = 4
                 nentries = len(pnames)//nbyline
                 n0 = len(str(topo.backend.kind).lower())
                 n1 = len(str(topo.tag))
-                for i in xrange(nentries):
-                    sops = u', '.join(pnames[nbyline*i:nbyline*(i+1)])
+                for i in range(nentries):
+                    sops = ', '.join(pnames[nbyline*i:nbyline*(i+1)])
                     if (i != nentries-1) or (len(pnames) % nbyline != 0):
                         sops += ','
                     if (i == 0):
@@ -388,23 +390,24 @@ class ComputationalGraph(ComputationalGraphNode):
                         entries = ('', '-'*n1, sops)
                     topologies.setdefault(field, []).append(entries)
                 if (len(pnames) % nbyline != 0):
-                    sops = u', '.join(pnames[nbyline*nentries:])
+                    sops = ', '.join(pnames[nbyline*nentries:])
                     if (nentries == 0):
                         entries = (str(topo.backend.kind).lower(), topo.tag, sops)
                     else:
                         entries = ('', '-'*n1, sops)
                     topologies.setdefault(field, []).append(entries)
 
-        titles = [[(u'BACKEND', u'TOPOLOGY', u'OPERATORS')]]
-        backend_size = max(len(s[0]) for ss in topologies.values()+titles for s in ss)
-        topo_size = max(len(s[1]) for ss in topologies.values()+titles for s in ss)
-        template = u'\n   {:<{backend_size}}   {:<{topo_size}}   {}'
+        titles = [[('BACKEND', 'TOPOLOGY', 'OPERATORS')]]
+        vals = tuple(topologies.values()) + tuple(titles)
+        backend_size = max(len(s[0]) for ss in vals for s in ss)
+        topo_size = max(len(s[1]) for ss in vals for s in ss)
+        template = '\n   {:<{backend_size}}   {:<{topo_size}}   {}'
         sizes = {'backend_size': backend_size,
                  'topo_size': topo_size}
 
-        ss = u''
+        ss = ''
         for field in sorted(self.fields, key=lambda x: x.name):
-            ss += u'\n>FIELD {}::{}'.format(field.name, field.pretty_name.decode('utf-8'))
+            ss += '\n>FIELD {}::{}'.format(field.name, field.pretty_name)
             ss += template.format(*titles[0][0], **sizes)
             field_topologies = topologies[field]
             for entries in field_topologies:
@@ -412,10 +415,10 @@ class ComputationalGraph(ComputationalGraphNode):
 
         if ss == '':
             ss = ' Empty'
-        title = u'ComputationalGraph {} fields report '.format(
-            self.pretty_name.decode('utf-8'))
-        ss = u'\n{}\n'.format(framed_str(title=title, msg=ss[1:]))
-        return ss.encode('utf-8')
+        title = 'ComputationalGraph {} fields report '.format(
+            self.pretty_name)
+        ss = '\n{}\n'.format(framed_str(title=title, msg=ss[1:]))
+        return ss
 
     def operator_report(self):
         from hysop.problem import Problem
@@ -453,15 +456,57 @@ class ComputationalGraph(ComputationalGraphNode):
                 finputs = u','.join(sorted(finputs))
                 foutputs = u','.join(sorted(foutputs))
 
-                pinputs = u','.join(sorted([p.pretty_name.decode('utf-8')
-                                            for p in node.input_params.values()]))
-                poutputs = u','.join(sorted([p.pretty_name.decode('utf-8')
-                                             for p in node.output_params.values()]))
-
-                infields = u'[{}]'.format(finputs) if finputs else u''
-                outfields = u'[{}]'.format(foutputs) if foutputs else u''
-                inparams = u'[{}]'.format(pinputs) if pinputs else u''
-                outparams = u'[{}]'.format(poutputs) if poutputs else u''
+        reduced_graph = self.reduced_graph
+        ops = []
+        for (i, node) in enumerate(self.nodes):
+            handled_inputs, handled_outputs = (), ()
+            finputs, foutputs = [], []
+            for f in node.input_tensor_fields:
+                f0 = f.fields[0]
+                t0 = node.input_fields[f0]
+                if all((node.input_fields[fi] is t0) for fi in f.fields):
+                    finputs.append('{}.{}'.format(f.pretty_name,
+                                                   t0.pretty_tag))
+                    handled_inputs += f.fields
+            for f in node.output_tensor_fields:
+                f0 = f.fields[0]
+                t0 = node.output_fields[f0]
+                if all((node.output_fields[fi] is t0) for fi in f.fields):
+                    foutputs.append('{}.{}'.format(f.pretty_name,
+                                                    t0.pretty_tag))
+                    handled_outputs += f.fields
+            finputs += ['{}.{}'.format(f.pretty_name,
+                                        t.pretty_tag)
+                        for (f, t) in node.input_fields.items()
+                        if f not in handled_inputs]
+            foutputs += ['{}.{}'.format(f.pretty_name,
+                                         t.pretty_tag)
+                         for (f, t) in node.output_fields.items()
+                         if f not in handled_outputs]
+            finputs = ','.join(sorted(finputs))
+            foutputs = ','.join(sorted(foutputs))
+
+            pinputs = ','.join(sorted([p.pretty_name
+                                        for p in node.input_params.values()]))
+            poutputs = ','.join(sorted([p.pretty_name
+                                         for p in node.output_params.values()]))
+
+            infields = '[{}]'.format(finputs) if finputs else ''
+            outfields = '[{}]'.format(foutputs) if foutputs else ''
+            inparams = '[{}]'.format(pinputs) if pinputs else ''
+            outparams = '[{}]'.format(poutputs) if poutputs else ''
+
+            inputs = '{}{}{}'.format(infields,  'x' if infields and inparams else '', inparams)
+            outputs = '{}{}{}'.format(
+                outfields, 'x' if outfields and outparams else '', outparams)
+            if inputs == '':
+                inputs = 'no inputs'
+            if outputs == '':
+                outputs = 'no outputs'
+
+            opname = node.pretty_name
+            optype = type(node).__name__
+            strdata = (str(i), opname, inputs, '->', outputs, optype)
 
                 inputs = u'{}{}{}'.format(infields,  u'x' if infields and inparams else u'', inparams)
                 outputs = u'{}{}{}'.format(
@@ -562,14 +607,14 @@ class ComputationalGraph(ComputationalGraphNode):
         for _ in ops:
             ss += ss_fmt.format(_[0], '' if notasks else _[1], *tuple(_[2:]), **ln)
 
-        title = u'ComputationalGraph {} task profiling report '.format(
-            self.pretty_name.decode('utf-8'))
-        return u'\n{}\n'.format(framed_str(title=title, msg=ss)).encode('utf-8')
+        title = 'ComputationalGraph {} task profiling report '.format(
+            self.pretty_name)
+        return '\n{}\n'.format(framed_str(title=title, msg=ss))
 
     def get_domains(self):
         domains = {}
         for node in self.nodes:
-            for (domain, ops) in node.get_domains().iteritems():
+            for (domain, ops) in node.get_domains().items():
                 domains.setdefault(domain, set()).update(ops)
         return domains
 
@@ -578,7 +623,7 @@ class ComputationalGraph(ComputationalGraphNode):
         topologies = {}
         for node in self.nodes:
             node_topologies = node.get_topologies()
-            for (backend, topos) in node_topologies.iteritems():
+            for (backend, topos) in node_topologies.items():
                 topos = to_set(topos)
                 if backend not in topologies:
                     topologies[backend] = set()
@@ -656,12 +701,12 @@ class ComputationalGraph(ComputationalGraphNode):
         avail_methods = {}
         if not self.nodes:
             from hysop.problem import Problem
-            msg = u'No nodes present in ComputationalGraph {}.'.format(
-                self.pretty_name.decode('utf-8'))
+            msg = 'No nodes present in ComputationalGraph {}.'.format(
+                self.pretty_name)
             if not isinstance(self, Problem):
-                raise RuntimeError(msg.encode('utf-8'))
+                raise RuntimeError(msg)
         for node in self.nodes:
-            for (k, v) in node.available_methods().iteritems():
+            for (k, v) in node.available_methods().items():
                 v = to_set(v)
                 if (k in avail_methods):
                     avail_methods[k].update(v)
@@ -686,9 +731,9 @@ class ComputationalGraph(ComputationalGraphNode):
         if self.is_root:
             self.pre_initialize(**kwds)
 
-        msg = u'ComputationalGraph {} is empty.'
+        msg = 'ComputationalGraph {} is empty.'
         if len(self.nodes) == 0:
-            vprint(msg.format(self.pretty_name.decode('utf-8')).encode('utf-8'))
+            vprint(msg.format(self.pretty_name))
 
         for node in self.nodes:
             node.pre_initialize(**kwds)
@@ -705,7 +750,7 @@ class ComputationalGraph(ComputationalGraphNode):
         for node in self.nodes:
             node.post_initialize(**kwds)
         if (self.is_root and __VERBOSE__) or __DEBUG__ or self.__FORCE_REPORTS__:
-            print self.domain_report()
+            print(self.domain_report())
 
         if self.is_root:
             field_requirements = self.get_and_set_field_requirements()
@@ -745,13 +790,13 @@ class ComputationalGraph(ComputationalGraphNode):
     def get_field_requirements(self):
         requirements = super(ComputationalGraph, self).get_field_requirements()
         if (self.is_root and __VERBOSE__) or __DEBUG__ or self.__FORCE_REPORTS__:
-            print self.node_requirements_report(requirements)
+            print(self.node_requirements_report(requirements))
         for node in self.nodes:
             if node.mpi_params is None or node.mpi_params.on_task:
                 node_requirements = node.get_and_set_field_requirements()
                 requirements.update(node_requirements)
         if (self.is_root and __VERBOSE__) or __DEBUG__ or self.__FORCE_REPORTS__:
-            print self.field_requirements_report(requirements)
+            print(self.field_requirements_report(requirements))
         return requirements
 
     @debug
@@ -824,9 +869,9 @@ class ComputationalGraph(ComputationalGraphNode):
         self.graph_built = True
 
         if (self.is_root and __VERBOSE__) or __DEBUG__ or self.__FORCE_REPORTS__:
-            print self.topology_report()
-            print self.variable_report()
-            print self.operator_report()
+            print(self.topology_report())
+            print(self.variable_report())
+            print(self.operator_report())
 
     def display(self, visu_rank=0, show_buttons=False):
         """
@@ -839,7 +884,7 @@ class ComputationalGraph(ComputationalGraphNode):
         net = self.to_pyvis()
 
         import tempfile
-        with tempfile.NamedTemporaryFile(suffix='.html') as f:
+        with tempfile.NamedTemporaryFile(suffix='.html', delete=False) as f:
             net.show(f.name)
 
     def to_html(self, path, io_rank=0, show_buttons=False):
@@ -851,6 +896,8 @@ class ComputationalGraph(ComputationalGraphNode):
             return
 
         net = self.to_pyvis()
+        if (net is None):
+            return
         net.write_html(path)
 
     @graph_built
@@ -862,9 +909,9 @@ class ComputationalGraph(ComputationalGraphNode):
             import pyvis
             import matplotlib
         except ImportError:
-            msg = '\nFATAL ERROR: Graph vizualization requires pyvis and matplotlib.\n'
+            msg = '\nGraph vizualization requires pyvis and matplotlib, which are not present on your system.\n'
             print(msg)
-            raise
+            return
 
         width = first_not_None(width,  1920)
         height = first_not_None(height, 1080)
@@ -877,8 +924,8 @@ class ComputationalGraph(ComputationalGraphNode):
             node_id = int(node)
             if node_id not in known_nodes:
                 network.add_node(node_id, label=node.label,
-                                 title=node.title, color=node.color,
-                                 shape=node.shape(with_custom_nodes))
+                        title=node.title, color=node.color,
+                        shape=node.shape(with_custom_nodes))
                 known_nodes.add(node_id)
 
         def add_edge(from_node, to_node):
@@ -886,7 +933,7 @@ class ComputationalGraph(ComputationalGraphNode):
             to_node_id = int(to_node)
             edge = graph[from_node][to_node]
             network.add_edge(from_node_id, to_node_id,
-                             title=str(edge.get('data', 'no edge data')))
+                    title=str(edge.get('data', 'no edge data')))
 
         for node in graph:
             add_node(node)
@@ -914,7 +961,7 @@ class ComputationalGraph(ComputationalGraphNode):
             node.discretize()
 
         input_discrete_fields = {}
-        for (field, topo) in self.input_fields.iteritems():
+        for (field, topo) in self.input_fields.items():
             istate = self.initial_input_topology_states[field][1]
             # problem inputs are writeable for initialization
             istate = istate.copy(is_read_only=False)
@@ -922,7 +969,7 @@ class ComputationalGraph(ComputationalGraphNode):
             input_discrete_fields[field] = dfield
 
         output_discrete_fields = {}
-        for field, topo in self.output_fields.iteritems():
+        for field, topo in self.output_fields.items():
             ostate = self.final_output_topology_states[field][1]
             dfield = field.discretize(topo, ostate)
             output_discrete_fields[field] = dfield
@@ -947,62 +994,62 @@ class ComputationalGraph(ComputationalGraphNode):
             tdfield = DiscreteTensorField(field=tfield, dfields=dfields)
             output_discrete_tensor_fields[tfield] = tdfield
 
-        discrete_fields = tuple(set(input_discrete_fields.values() +
+        discrete_fields = tuple(set(input_discrete_fields.values()).union(
                                     output_discrete_fields.values()))
 
-        discrete_tensor_fields = tuple(set(input_discrete_tensor_fields.values() +
+        discrete_tensor_fields = tuple(set(input_discrete_tensor_fields.values()).union()
                                            output_discrete_tensor_fields.values()))
 
-        self.input_discrete_fields = input_discrete_fields
-        self.output_discrete_fields = output_discrete_fields
-        self.input_discrete_tensor_fields = input_discrete_tensor_fields
-        self.output_discrete_tensor_fields = output_discrete_tensor_fields
-        self.discrete_fields = discrete_fields
-        self.discrete_tensor_fields = discrete_tensor_fields
+        self.input_discrete_fields=input_discrete_fields
+        self.output_discrete_fields=output_discrete_fields
+        self.input_discrete_tensor_fields=input_discrete_tensor_fields
+        self.output_discrete_tensor_fields=output_discrete_tensor_fields
+        self.discrete_fields=discrete_fields
+        self.discrete_tensor_fields=discrete_tensor_fields
 
-        self.discretized = True
+        self.discretized=True
 
     @debug
     @discretized
     def get_work_properties(self):
-        requests = MultipleOperatorMemoryRequests()
+        requests=MultipleOperatorMemoryRequests()
 
         for node in self.nodes:
             if node not in requests.operators():
-                wp = node.get_work_properties()
+                wp=node.get_work_properties()
                 requests += node.get_work_properties()
         if __DEBUG__ or (__VERBOSE__ and self.level == 0) or self.__FORCE_REPORTS__:
-            srequests = requests.sreport()
-            ss = (srequests if (srequests != u'') else u' *no extra work requested*')
-            title = u'ComputationalGraph {} work properties report '.format(
-                self.pretty_name.decode('utf-8'))
-            vprint(u'\n{}\n'.format(framed_str(title=title, msg=ss)).encode('utf-8'))
+            srequests=requests.sreport()
+            ss=(srequests if (srequests != '') else ' *no extra work requested*')
+            title='ComputationalGraph {} work properties report '.format(
+                self.pretty_name)
+            vprint('\n{}\n'.format(framed_str(title=title, msg=ss)))
         return requests
 
     @debug
     @discretized
-    def setup(self, work=None, allow_subbuffers=False):
+    def setup(self, work = None, allow_subbuffers = False):
         if self.ready:
             return
         if (work is None):
-            work = self.get_work_properties()
-            work.allocate(allow_subbuffers=allow_subbuffers)
+            work=self.get_work_properties()
+            work.allocate(allow_subbuffers = allow_subbuffers)
         for node in self.nodes:
             if not node.ready:
-                node.setup(work=work)
-        self.ready = True
+                node.setup(work = work)
+        self.ready=True
 
-    def build(self, outputs_are_inputs=True, method=None, allow_subbuffers=False):
+    def build(self, outputs_are_inputs = True, method = None, allow_subbuffers = False):
         """
         Shortcut for initialize(), discretize(), get_work_properties(), setup()
         for quick graph initialization.
         """
-        self.initialize(outputs_are_inputs=outputs_are_inputs,
-                        topgraph_method=method)
+        self.initialize(outputs_are_inputs = outputs_are_inputs,
+                        topgraph_method = method)
         assert self.is_root, 'Only root graph can be built.'
         self.discretize()
-        work = self.get_work_properties()
-        work.allocate(allow_subbuffers=allow_subbuffers)
+        work=self.get_work_properties()
+        work.allocate(allow_subbuffers = allow_subbuffers)
         self.setup(work)
         return self
 
@@ -1018,11 +1065,11 @@ class ComputationalGraph(ComputationalGraphNode):
     @debug
     @ready
     def finalize(self, **kwds):
-        reduced_graph = self.reduced_graph
+        reduced_graph=self.reduced_graph
         for node in self.nodes:
             if node.ready:
                 node.finalize(**kwds)
-        self.ready = False
+        self.ready=False
 
     @classmethod
     def supports_multiple_field_topologies(cls):
diff --git a/hysop/core/graph/computational_node.py b/hysop/core/graph/computational_node.py
index b444038ab1f282043d39c6dec7a684f6daa6bda6..8db7fb382ed7c15fc1c0c5a46834b9e163680cae 100644
--- a/hysop/core/graph/computational_node.py
+++ b/hysop/core/graph/computational_node.py
@@ -3,10 +3,11 @@
 Base for directionally splitted advection solvers (pure-python and GPU version).
 """
 
+import copy
+import warnings
 from abc import ABCMeta, abstractmethod
 
 from hysop import dprint
-from hysop.deps import copy, warnings
 from hysop.tools.types import InstanceOf, to_set, check_instance, first_not_None
 from hysop.tools.io_utils import IOParams
 from hysop.parameters.parameter import Parameter
@@ -54,12 +55,22 @@ def to_be_skipped_default(*args, **kwargs):
     return False
 
 
-class ComputationalGraphNode(OperatorBase):
+class ComputationalGraphNode(OperatorBase, metaclass=ABCMeta):
     """
     Interface of an abstract computational graph node.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __new__(cls, input_fields=None, output_fields=None,
+                input_params=None, output_params=None,
+                input_tensor_fields=None, output_tensor_fields=None,
+                name=None, pretty_name=None, method=None, to_be_skipped_func=None, **kwds):
+        return super(ComputationalGraphNode, cls).__new__(cls,
+                                                          name=None,
+                                                          fields=None,
+                                                          tensor_fields=None,
+                                                          parameters=None,
+                                                          **kwds)
 
     @debug
     def __init__(self, input_fields=None, output_fields=None,
@@ -90,7 +101,7 @@ class ComputationalGraphNode(OperatorBase):
         name: str, optional
             name of this node (string), optional, defaults to top class name.
             Default name will be the top class name (ie. self.__class__.__name__).
-        pretty_name: str or unicode, optional
+        pretty_name: str, optional
             Pretty name of this node (string), optional, defaults to name.
         method: dict, optional
             user method specification for this graph node, optional, defaults to None.
@@ -176,7 +187,7 @@ class ComputationalGraphNode(OperatorBase):
                         raise RuntimeError(msg)
         elif (input_fields is not None):
             input_tensor_fields = tuple(filter(lambda x: x.is_tensor, input_fields.keys()))
-            input_fields = {sfield: topod for (tfield, topod) in input_fields.iteritems()
+            input_fields = {sfield: topod for (tfield, topod) in input_fields.items()
                             for sfield in tfield.fields}
         else:
             input_tensor_fields = ()
@@ -191,7 +202,7 @@ class ComputationalGraphNode(OperatorBase):
                         raise RuntimeError(msg)
         elif (output_fields is not None):
             output_tensor_fields = tuple(filter(lambda x: x.is_tensor, output_fields.keys()))
-            output_fields = {sfield: topod for (tfield, topod) in output_fields.iteritems()
+            output_fields = {sfield: topod for (tfield, topod) in output_fields.items()
                              for sfield in tfield.fields}
         else:
             output_tensor_fields = ()
@@ -205,13 +216,10 @@ class ComputationalGraphNode(OperatorBase):
         name = first_not_None(name, self.__class__.__name__)
         pretty_name = first_not_None(pretty_name, name)
 
-        if isinstance(pretty_name, unicode):
-            pretty_name = pretty_name.encode('utf-8')
-
         if not isinstance(name, str):
             msg = 'name is not a string but a {}.'
             raise ValueError(msg.format(name.__class__))
-        if not isinstance(pretty_name, (str, unicode)):
+        if not isinstance(pretty_name, str):
             msg = 'pretty_name is not a string but a {}.'
             raise ValueError(msg.format(name.__class__))
         if not isinstance(input_fields, dict):
@@ -375,7 +383,7 @@ class ComputationalGraphNode(OperatorBase):
 
         if ('mpi_params' in self.__kwds) and ('ComputationalGraph' not in map(lambda c: c.__name__, self.__class__.__mro__)):
             mpi_params = self.__kwds['mpi_params']
-            for topo in set(self.input_fields.values() + self.output_fields.values()):
+            for topo in set(self.input_fields.values()).union(self.output_fields.values()):
                 if isinstance(topo, Topology) and (topo.mpi_params != mpi_params):
                     d = topo.mpi_params.diff(mpi_params)
                     msg = 'MPI parameters mismatch between already specified topology mpi_params '
@@ -406,7 +414,7 @@ class ComputationalGraphNode(OperatorBase):
             method.update(user_method)
 
         available_methods = self.available_methods()
-        for (k, v) in method.iteritems():
+        for (k, v) in method.items():
             if k not in available_methods.keys():
                 msg = '{} is not an available method key for computational node {}.'
                 msg = msg.format(k, self.name)
@@ -449,7 +457,7 @@ class ComputationalGraphNode(OperatorBase):
         Called automatically in ComputationalGraphNode.check()
         """
         for variables in [self.input_fields, self.output_fields]:
-            for (k, v) in variables.iteritems():
+            for (k, v) in variables.items():
                 if not isinstance(k, Field):
                     msg = 'Given key is not a continuous Field (got a {}).'
                     raise TypeError(msg.format(k.__class__))
@@ -476,11 +484,11 @@ class ComputationalGraphNode(OperatorBase):
         has_multiple_field_topologies = False
         multi_topo_fields = set()
 
-        topos = (self.input_fields.values()+self.output_fields.values())
+        topos = tuple(self.input_fields.values()) + tuple(self.output_fields.values())
         if topos:
             topo_ref = first_not_None(topos).topology
             for variables in [self.input_fields, self.output_fields]:
-                for field, topo in variables.iteritems():
+                for field, topo in variables.items():
                     if topo is not None and (topo.topology != topo_ref):
                         has_multiple_topologies = True
 
@@ -526,10 +534,10 @@ class ComputationalGraphNode(OperatorBase):
             msg = 'Graph operator {} does not support multiple field topologies yet.'
             msg = msg.format(self.node_tag)
             msg += '\n>Input topologies:'
-            for (field, topo) in self.input_fields.iteritems():
+            for (field, topo) in self.input_fields.items():
                 msg += '\n  *{} -> {}'.format(field.short_description(), topo.short_description())
             msg += '\n>Output topologies:'
-            for (field, topo) in self.output_fields.iteritems():
+            for (field, topo) in self.output_fields.items():
                 msg += '\n  *{} -> {}'.format(field.short_description(), topo.short_description())
             raise NotImplementedError(msg)
         if (self._is_distributed) and (not cls.supports_mpi()):
@@ -546,7 +554,7 @@ class ComputationalGraphNode(OperatorBase):
         Topologies are organized by backend in a dictionnary.
         """
         topologies = {}
-        for topo in set(self.input_fields.values()+self.output_fields.values()):
+        for topo in set(self.input_fields.values()).union(self.output_fields.values()):
             if topo is not None:
                 topologies.setdefault(topo.backend, set()).add(topo)
         return topologies
@@ -560,7 +568,7 @@ class ComputationalGraphNode(OperatorBase):
         if fills the 'None' domain.
         """
         domains = {}
-        for field in set(self.input_fields.keys()+self.output_fields.keys()):
+        for field in set(self.input_fields.keys()).union(self.output_fields.keys()):
             domains.setdefault(field.domain, set()).add(self)
         if self.is_domainless:
             domains.setdefault(None, set()).add(self)
@@ -605,7 +613,7 @@ class ComputationalGraphNode(OperatorBase):
             3) check method against available_methods.
         The result of this process is fed as argument of this function.
         """
-        self.method = {k: v for (k, v) in method.iteritems()}
+        self.method = {k: v for (k, v) in method.items()}
 
     @abstractmethod
     @debug
@@ -700,7 +708,7 @@ class ComputationalGraphNode(OperatorBase):
     def get_topo_descriptor(cls, variables, field):
         if (field in variables):
             return variables[field]
-        tfields = filter(lambda x: x.is_tensor, variables.keys())
+        tfields = tuple(filter(lambda x: x.is_tensor, variables.keys()))
         for tfield in tfields:
             if field in tfield:
                 return variables[tfield]
@@ -951,10 +959,10 @@ class ComputationalGraphNode(OperatorBase):
                                                for field in tfield.fields)
 
         if with_tensors and (not as_scalars):
-            for (tfield, tdfield) in self.input_discrete_tensor_fields.iteritems():
+            for (tfield, tdfield) in self.input_discrete_tensor_fields.items():
                 yield (tfield, tdfield)
 
-        for (field, dfield) in self.input_discrete_fields.iteritems():
+        for (field, dfield) in self.input_discrete_fields.items():
             if field in input_scalar_fields_from_tensors:
                 # field is contained in a tensor field
                 if with_tensors and as_scalars:
@@ -981,10 +989,10 @@ class ComputationalGraphNode(OperatorBase):
                                                 for field in tfield.fields)
 
         if with_tensors and (not as_scalars):
-            for (tfield, tdfield) in self.output_discrete_tensor_fields.iteritems():
+            for (tfield, tdfield) in self.output_discrete_tensor_fields.items():
                 yield (tfield, tdfield)
 
-        for (field, dfield) in self.output_discrete_fields.iteritems():
+        for (field, dfield) in self.output_discrete_fields.items():
             if field in output_scalar_fields_from_tensors:
                 # field is contained in a tensor field
                 if with_tensors and as_scalars:
diff --git a/hysop/core/graph/computational_node_frontend.py b/hysop/core/graph/computational_node_frontend.py
index ddbd5b377eda362d462694c688cc40c604038f26..1badb7896eabea95f8159cf04326dc9c96fb82f9 100644
--- a/hysop/core/graph/computational_node_frontend.py
+++ b/hysop/core/graph/computational_node_frontend.py
@@ -11,6 +11,16 @@ from hysop.topology.topology import Topology
 
 class ComputationalGraphNodeFrontend(ComputationalGraphNodeGenerator):
 
+    @debug
+    def __new__(cls, implementation=None, base_kwds=None,
+                candidate_input_tensors=None, candidate_output_tensors=None,
+                **impl_kwds):
+        base_kwds = {} if (base_kwds is None) else base_kwds
+        return super(ComputationalGraphNodeFrontend, cls).__new__(cls,
+                                                                  candidate_input_tensors=candidate_input_tensors,
+                                                                  candidate_output_tensors=candidate_output_tensors,
+                                                                  **base_kwds)
+
     @debug
     def __init__(self, implementation=None, base_kwds=None,
                  candidate_input_tensors=None, candidate_output_tensors=None,
@@ -90,9 +100,9 @@ class ComputationalGraphNodeFrontend(ComputationalGraphNodeGenerator):
             raise ValueError(msg)
         elif (self.implementations()[implementation] is None):
             msg = 'Specified implementation \'{}\' is registered as an available implementation for operator \'{}\', '
-            msg+= 'but no underlying implementation was found. This may be due to missing dependency or a catched '
-            msg+= 'import error in file file://{}.'
-            msg=msg.format(implementation, self.__class__.__name__, inspect.getfile(self.__class__)[:-1])
+            msg += 'but no underlying implementation was found. This may be due to missing dependency or a catched '
+            msg += 'import error in file file://{}.'
+            msg = msg.format(implementation, self.__class__.__name__, inspect.getfile(self.__class__)[:-1])
             raise ValueError(msg)
 
         self.implementation = implementation
@@ -103,19 +113,19 @@ class ComputationalGraphNodeFrontend(ComputationalGraphNodeGenerator):
         self._input_fields_to_dump = []
         self._output_fields_to_dump = []
 
-
     @debug
     def _generate(self):
         try:
             op = self.impl(**self.impl_kwds)
         except:
             sargs = ['*{} = {}'.format(k, v.__class__)
-                     for (k, v) in self.impl_kwds.iteritems()]
+                     for (k, v) in self.impl_kwds.items()]
             msg = 'FATAL ERROR during {}.generate():\n'
-            msg += ' => failed to call {}.__init__()\n    with the following keywords:'
+            msg += ' => failed to initialize an instance of type {}'
+            msg += '\n    by using the following keyword arguments:'
             msg += '\n     '+'\n     '.join(sargs)
             msg = msg.format(self.__class__, self.impl)
-            print '\n{}\n'.format(msg)
+            print('\n{}'.format(msg))
             raise
 
         for kwds in self._input_fields_to_dump:
@@ -190,6 +200,11 @@ class MultiComputationalGraphNodeFrontend(ComputationalGraphNodeFrontend):
     (ComputationalGraphNodeFrontend only generates OPERATOR from IMPLEMENTATION).
     """
 
+    @debug
+    def __new__(cls, implementation_key, implementation=None, **kwds):
+        return super(MultiComputationalGraphNodeFrontend, cls).__new__(cls,
+                                                                       implementation=implementation, **kwds)
+
     @debug
     def __init__(self, implementation_key, implementation=None, **kwds):
         """
diff --git a/hysop/core/graph/computational_operator.py b/hysop/core/graph/computational_operator.py
index 644c75d0e6e085d012a0eef8df7506d5cfb4e117..01b5ddceccec760314de5f64dedd3de22d603f96 100644
--- a/hysop/core/graph/computational_operator.py
+++ b/hysop/core/graph/computational_operator.py
@@ -10,7 +10,7 @@ from hysop.fields.field_requirements import DiscreteFieldRequirements
 from abc import ABCMeta
 
 
-class ComputationalGraphOperator(ComputationalGraphNode):
+class ComputationalGraphOperator(ComputationalGraphNode, metaclass=ABCMeta):
     """
     Interface of an abstract computational graph operator.
     An operator is a single graph node with its own inputs and outputs.
@@ -120,7 +120,10 @@ class ComputationalGraphOperator(ComputationalGraphNode):
     or at least, a child class of hysop.core.graph.computational_graph.ComputationalGraph.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __new__(cls, input_fields=None, output_fields=None, **kwds):
+        return super(ComputationalGraphOperator, cls).__new__(cls,
+                                                              input_fields=input_fields, output_fields=output_fields, **kwds)
 
     @debug
     def __init__(self, input_fields=None, output_fields=None, **kwds):
@@ -181,7 +184,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
              2) allowed splitting directions for cartesian topologies
         """
         # by default we create HOST (cpu) TopologyDescriptors
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+        for (field, topo_descriptor) in self.input_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                 backend=Backend.HOST,
                 operator=self,
@@ -189,7 +192,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
                 handle=topo_descriptor)
             self.input_fields[field] = topo_descriptor
 
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                 backend=Backend.HOST,
                 operator=self,
@@ -215,7 +218,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
         # can_split set to True in all directions, all TranspositionStates
         # and C memory ordering).
         input_field_requirements = {}
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+        for (field, topo_descriptor) in self.input_fields.items():
             if (topo_descriptor is None):
                 req = None
             else:
@@ -226,7 +229,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
             input_field_requirements[field] = req
 
         output_field_requirements = {}
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             if (topo_descriptor is None):
                 req = None
             else:
@@ -373,29 +376,29 @@ class ComputationalGraphOperator(ComputationalGraphNode):
             try:
                 ireq.check_topology(itopo)
             except RuntimeError:
-                msg = u'\nFATAL ERROR: {}::{}.handle_topologies() input topology mismatch.\n\n'
+                msg = '\nFATAL ERROR: {}::{}.handle_topologies() input topology mismatch.\n\n'
                 msg = msg.format(type(self).__name__, self.name)
-                msg += u'Operator expected field {} topology to match the following requirements\n'
+                msg += 'Operator expected field {} topology to match the following requirements\n'
                 msg = msg.format(ifield.name)
-                msg += u'  {}\n'.format(str(ireq).decode('utf-8'))
-                msg += u'but input effective discrete field topology is\n'
-                msg += u'  {}\n'.format(itopo)
-                msg += u'\n'
-                print msg
+                msg += '  {}\n'.format(str(ireq))
+                msg += 'but input effective discrete field topology is\n'
+                msg += '  {}\n'.format(itopo)
+                msg += '\n'
+                print(msg)
                 raise
 
             try:
                 ireq.check_discrete_topology_state(istate)
             except RuntimeError:
-                msg = u'\nFATAL ERROR: {}::{}.handle_topologies() input state mismatch.\n\n'
+                msg = '\nFATAL ERROR: {}::{}.handle_topologies() input state mismatch.\n\n'
                 msg = msg.format(type(self).__name__, self.name)
-                msg += u'Operator expected field {} on topology id {} to match the following '
-                msg += u'requirements\n'
+                msg += 'Operator expected field {} on topology id {} to match the following '
+                msg += 'requirements\n'
                 msg = msg.format(ifield.name, itopo.id)
-                msg += u'  {}\n'.format(str(ireq).decode('utf-8'))
-                msg += u'but input discrete topology state determined by the graph builder is\n'
-                msg += u'  {}\n'.format(istate)
-                msg += u'\n'
+                msg += '  {}\n'.format(str(ireq))
+                msg += 'but input discrete topology state determined by the graph builder is\n'
+                msg += '  {}\n'.format(istate)
+                msg += '\n'
                 raise
 
             istate_report = 'Field {}: {} (topo={})'.format(ifield.name, istate, itopo.id)
@@ -422,7 +425,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
                 msg += 'but output effective discrete field topology is\n'
                 msg += '  {}\n'.format(otopo)
                 msg += '\n'
-                print msg
+                print(msg)
                 raise
 
             try:
@@ -444,13 +447,13 @@ class ComputationalGraphOperator(ComputationalGraphNode):
 
         # replace topologies by topology view using topology states
         input_fields = self.input_fields
-        for field, topology in input_fields.iteritems():
+        for field, topology in input_fields.items():
             if topology is not None:
                 topology_state = input_topology_states[field].copy(is_read_only=True)
                 input_fields[field] = topology.view(topology_state)
 
         output_fields = self.output_fields
-        for field, topology in output_fields.iteritems():
+        for field, topology in output_fields.items():
             if topology is not None:
                 topology_state = output_topology_states[field].copy(is_read_only=False)
                 output_fields[field] = topology.view(topology_state)
@@ -491,11 +494,11 @@ class ComputationalGraphOperator(ComputationalGraphNode):
         super(ComputationalGraphOperator, self).discretize()
 
         # discretize ScalarFields
-        for (field, topology_view) in self.input_fields.iteritems():
+        for (field, topology_view) in self.input_fields.items():
             if topology_view is not None:
                 topology, topology_state = topology_view.topology, topology_view.topology_state
                 self.input_discrete_fields[field] = field.discretize(topology, topology_state)
-        for (field, topology_view) in self.output_fields.iteritems():
+        for (field, topology_view) in self.output_fields.items():
             if topology_view is not None:
                 topology, topology_state = topology_view.topology, topology_view.topology_state
                 self.output_discrete_fields[field] = field.discretize(topology, topology_state)
@@ -518,11 +521,11 @@ class ComputationalGraphOperator(ComputationalGraphNode):
             tdfield = DiscreteTensorField(field=tfield, dfields=dfields)
             self.output_discrete_tensor_fields[tfield] = tdfield
 
-        self.discrete_fields = tuple(set(self.input_discrete_fields.values() +
-                                         self.output_discrete_fields.values()))
+        self.discrete_fields = tuple(set(self.input_discrete_fields.values()).union(
+            self.output_discrete_fields.values()))
 
-        self.discrete_tensor_fields = tuple(set(self.input_discrete_tensor_fields.values() +
-                                                self.output_discrete_tensor_fields.values()))
+        self.discrete_tensor_fields = tuple(set(self.input_discrete_tensor_fields.values()).union(
+            self.output_discrete_tensor_fields.values()))
         self.discretized = True
 
     @debug
@@ -633,7 +636,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
         assert set(input_topology_states.keys()) == set(self.input_fields.keys())
 
         if input_topology_states:
-            (ref_field, ref_state) = input_topology_states.items()[0]
+            (ref_field, ref_state) = tuple(input_topology_states.items())[0]
             ref_topo = self.input_fields[ref_field]
         else:
             ref_state = self.output_fields[output_field].topology_state
@@ -676,7 +679,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
                     for fi in f.fields:
                         ifields[fi] = input_fields[fi]
             variables = {k: (v if v.backend.kind in HDF_Writer.supported_backends() else None)
-                         for (k, v) in ifields.iteritems()}
+                         for (k, v) in ifields.items()}
             op = HDF_Writer(io_params=io_params, variables=variables, **op_kwds)
             op.initialize(topgraph_method=self.method)
             op.get_and_set_field_requirements()
@@ -694,7 +697,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
                     for fi in f.fields:
                         ofields[fi] = output_fields[fi]
             variables = {k: (v if v.backend.kind in HDF_Writer.supported_backends() else None)
-                         for (k, v) in ofields.iteritems()}
+                         for (k, v) in ofields.items()}
             op = HDF_Writer(io_params=io_params, variables=variables, **op_kwds)
             op.initialize(topgraph_method=self.method)
             op.get_and_set_field_requirements()
@@ -708,7 +711,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
         Can be overriden to enable operator checkpointing.
         """
         return False
-    
+
     def checkpoint_datagroup_key(self):
         """
         By default the checkpoint datagroup key is based on operator name.
@@ -716,10 +719,10 @@ class ComputationalGraphOperator(ComputationalGraphNode):
         Note that all keys are post-processing by using CheckpointHandler._format_zarr_key.
         """
         return self.name
-    
+
     def save_checkpoint(self, datagroup, mpi_params, io_params, compressor):
         """
-        Save custom operator data to a checkpoint. 
+        Save custom operator data to a checkpoint.
 
         Datagroup is a zarr.hierarchy.Datagroup object, see hysop.core.checkpoints.CheckpointHandler for example usage.
         Parameters mpi_params and io_params are MPIParams and IOParams coming from the CheckpointHandler.
@@ -732,38 +735,38 @@ class ComputationalGraphOperator(ComputationalGraphNode):
         or
           array.attrs[key] = value
         where key is a string that does not contain '\' or '/', see hysop.core.checkpoints.CheckpointHandler._format_zarr_key.
-        
+
         Only io_leader should write metadata, io_leader can be determined as (mpi_params.rank == io_params.io_leader)
         Multiple processes array writes should be synchronized unless they write to different blocks of data.
         See https://zarr.readthedocs.io/en/stable/tutorial.html#parallel-computing-and-synchronization for more information.
         """
         if self.checkpoint_required():
-            msg='Operator {} does require checkpointing but {}.save_checkpoint() has not been overriden.'
+            msg = 'Operator {} does require checkpointing but {}.save_checkpoint() has not been overriden.'
             raise NotImplementedError(msg.format(self.name, self.__class__.__name__))
         else:
-            msg='{}.load_checkpoint() called but operator {} does not seem to require a checkpoint...'
+            msg = '{}.load_checkpoint() called but operator {} does not seem to require a checkpoint...'
             raise RuntimeError(msg.format(self.__class__.__name__, self.name))
-    
+
     def load_checkpoint(self, datagroup, mpi_params, io_params, relax_constraints):
         """
-        Reload custom operator data from a checkpoint. 
+        Reload custom operator data from a checkpoint.
 
         Datagroup is a zarr.hierarchy.Datagroup object, see hysop.core.checkpoints.CheckpointHandler for example usage.
         Parameters mpi_params and io_params are MPIParams and IOParams coming from the CheckpointHandler.
         If relax_constraints is set, you can ignore data discreapencies such as datatype, else should an error should be raised.
 
         Data arrays or subgroups can be accessed with the dict-like datagroup[key] syntax.
-        Group or array metadata can be retrieved by using the group.attrs[key] or array.attrs[key] syntax where key is a 
+        Group or array metadata can be retrieved by using the group.attrs[key] or array.attrs[key] syntax where key is a
         string that does not contain '\' or '/', see hysop.core.checkpoints.CheckpointHandler._format_zarr_key.
 
         As this operation read-only, there is no need to synchronize processes.
         Also note that metadata type is not always the same when deserialized (for example tuples become lists).
         """
         if self.checkpoint_required():
-            msg='Operator {} does require checkpointing but {}.load_checkpoint() has not been overriden.'
+            msg = 'Operator {} does require checkpointing but {}.load_checkpoint() has not been overriden.'
             raise NotImplementedError(msg.format(self.name, self.__class__.__name__))
         else:
-            msg='{}.load_checkpoint() called but operator {} does not seem to require a checkpoint...'
+            msg = '{}.load_checkpoint() called but operator {} does not seem to require a checkpoint...'
             raise RuntimeError(msg.format(self.__class__.__name__, self.name))
 
     def _check_backend(self):
@@ -772,13 +775,13 @@ class ComputationalGraphOperator(ComputationalGraphNode):
         """
         topologies_per_backend = self.get_topologies()
         supported_backends = self.supported_backends()
-        for (backend, topologies) in topologies_per_backend.iteritems():
+        for (backend, topologies) in topologies_per_backend.items():
             if (backend.kind not in supported_backends):
                 bad_fields = set()
-                for (field, topo) in self.input_fields.iteritems():
+                for (field, topo) in self.input_fields.items():
                     if topo and (topo.backend is backend):
                         bad_fields.add(field)
-                for (field, topo) in self.output_fields.iteritems():
+                for (field, topo) in self.output_fields.items():
                     if topo and (topo.backend is backend):
                         bad_fields.add(field)
                 msg = '\n\nOperator {} topology backend mismatch:'.format(self.node_tag)
@@ -790,10 +793,10 @@ class ComputationalGraphOperator(ComputationalGraphNode):
                 msg += '\n     *'+'\n     *'.join([str(b) for b in supported_backends])
                 msg += '\n -> bad fields were:'
                 msg += '\n     *'+'\n     *'.join([f.full_tag for f in bad_fields])
-                print '\nFATAL ERROR: Topology backend mismatch.\n'
-                print 'Offending topologies were:'
+                print('\nFATAL ERROR: Topology backend mismatch.\n')
+                print('Offending topologies were:')
                 for t in topologies:
-                    print '\n', t
+                    print('\n', t)
                 raise RuntimeError(msg)
 
     def to_graph(self, name=None):
diff --git a/hysop/core/graph/continuous.py b/hysop/core/graph/continuous.py
index ab70b0d52aa9c4c9f712857e1428aa2eaf211e9b..86e48338c82b64a6d3dc67202d119028b39ea045 100755
--- a/hysop/core/graph/continuous.py
+++ b/hysop/core/graph/continuous.py
@@ -18,17 +18,18 @@ from hysop.parameters.parameter import Parameter
 import hysop.tools.io_utils as io
 
 
-class OperatorBase(TaggedObject):
+class OperatorBase(TaggedObject, metaclass=ABCMeta):
     """
     Abstract interface to continuous operators.
     """
-    __metaclass__ = ABCMeta
 
     @debug
-    def __init__(self, name, fields, tensor_fields, parameters,
-                 mpi_params=None,
-                 io_params=False,
-                 **kwds):
+    def __new__(cls, name, fields, tensor_fields, parameters,
+                mpi_params=None, io_params=False, **kwds):
+        return super(OperatorBase, cls).__new__(cls, tagged_cls=OperatorBase, tag_prefix='node', **kwds)
+
+    @debug
+    def __init__(self, name, fields, tensor_fields, parameters, mpi_params=None, io_params=False, **kwds):
         """
         Parameters
         ----------
@@ -55,8 +56,7 @@ class OperatorBase(TaggedObject):
         mpi_params: MPIParams
             File i/o config (filename, format ...)
         """
-        super(OperatorBase, self).__init__(tagged_cls=OperatorBase, tag_prefix='node',
-                                           **kwds)
+        super(OperatorBase, self).__init__(tagged_cls=OperatorBase, tag_prefix='node', **kwds)
 
         check_instance(fields, tuple, values=ScalarField)
         check_instance(tensor_fields, tuple, values=TensorField)
diff --git a/hysop/core/graph/graph.py b/hysop/core/graph/graph.py
index f5ca3f7d6833293c20bbda34c2fc5e262220ba41..825416c16ef66b15fb9cf5be94c09352139c5891 100644
--- a/hysop/core/graph/graph.py
+++ b/hysop/core/graph/graph.py
@@ -113,7 +113,7 @@ class VertexAttributes(object):
         return self
 
     # hashing for networkx
-    def hash(self):
+    def __hash__(self):
         return self.node_id
 
     def __eq__(self, other):
@@ -144,7 +144,7 @@ class VertexAttributes(object):
             MemoryReorderingBase:     'box'
         }
         if with_custom_nodes:
-            for (op_type, shape) in special_shapes.iteritems():
+            for (op_type, shape) in special_shapes.items():
                 if isinstance(self.operator, op_type):
                     return shape
         return 'circle'
@@ -204,25 +204,26 @@ class VertexAttributes(object):
         suffix = '</b>&nbsp;&nbsp'
         sep = '\n'+'&nbsp'*14
 
-        ss = '<h2>Operator {}</h2>{}{}{}{}{}\n{}'.format(op.name,
-                                                         '{p}Rank:{s}{}\n\n'.format(self.op_ordering, p=prefix, s=suffix)
-                                                         if self.op_ordering else '',
-                                                         '{p}Pin:{s}{}\n'.format(sep.join(ipinfo(param)
-                                                                                          for param in iparams.values()), p=prefix, s=suffix+'&nbsp&nbsp')
-                                                         if iparams else '',
-                                                         '{p}Fin:{s}{}\n'.format(sep.join([ifinfo(f, topo)
-                                                                                           for (f, topo) in ifields.iteritems()]), p=prefix,
-                                                                                 s=suffix+'&nbsp&nbsp')
-                                                         if ifields else '',
-                                                         '{p}Pout:{s}{}\n'.format(sep.join([opinfo(param)
-                                                                                            for param in oparams.values()]), p=prefix, s=suffix)
-                                                         if oparams else '',
-                                                         '{p}Fout:{s}{}\n'.format(sep.join([ofinfo(f, topo)
-                                                                                            for (f, topo) in ofields.iteritems()]), p=prefix, s=suffix)
-                                                         if ofields else '',
-                                                         '{p}Type:{s} {}'.format(
-                                                             sep.join(map(lambda x: x.__name__, type(op).__mro__[:-2])),
-                                                             p=prefix, s=suffix))
+        ss = '<h2>Operator {}</h2>{}{}{}{}{}\n{}'.format(
+            op.name,
+            '{p}Rank:{s}{}\n\n'.format(self.op_ordering, p=prefix, s=suffix)
+            if self.op_ordering else '',
+            '{p}Pin:{s}{}\n'.format(sep.join(ipinfo(param)
+                                             for param in iparams.values()), p=prefix, s=suffix+'&nbsp&nbsp')
+            if iparams else '',
+            '{p}Fin:{s}{}\n'.format(sep.join([ifinfo(f, topo)
+                                              for (f, topo) in ifields.items()]), p=prefix,
+                                    s=suffix+'&nbsp&nbsp')
+            if ifields else '',
+            '{p}Pout:{s}{}\n'.format(sep.join([opinfo(param)
+                                               for param in oparams.values()]), p=prefix, s=suffix)
+            if oparams else '',
+            '{p}Fout:{s}{}\n'.format(sep.join([ofinfo(f, topo)
+                                               for (f, topo) in ofields.items()]), p=prefix, s=suffix)
+            if ofields else '',
+            '{p}Type:{s} {}'.format(
+                sep.join(map(lambda x: x.__name__, type(op).__mro__[:-2])),
+                p=prefix, s=suffix))
         return ss
 
 
diff --git a/hysop/core/graph/graph_builder.py b/hysop/core/graph/graph_builder.py
index 7764b221e23b9f5105577d46bb5065844dca8cf6..d4d65ecc46f45d4228fb83bcbf6e3acd028d2bce 100644
--- a/hysop/core/graph/graph_builder.py
+++ b/hysop/core/graph/graph_builder.py
@@ -1,6 +1,6 @@
-import networkx as nx
+import numpy as np
+
 from hysop import vprint, dprint, Problem
-from hysop.deps import np, __builtin__, print_function
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.io_utils import IOParams
 
@@ -38,7 +38,7 @@ GRAPH_BUILDER_DEBUG_LEVEL = 0
 def gprint(*args, **kwds):
     level = kwds.pop('level', 1)
     if GRAPH_BUILDER_DEBUG_LEVEL >= level:
-        __builtin__.print(*args, **kwds)
+        print(*args, **kwds)
 
 
 def gprint2(*args, **kwds):
@@ -185,7 +185,7 @@ class GraphBuilder(object):
             if not isinstance(op, Problem) and not isinstance(op, RedistributeInter):
                 # try to fill in undertermined topologies (experimental feature)
                 backends = op.supported_backends()
-                for (ifield, itopo) in sorted(ifields.iteritems(),
+                for (ifield, itopo) in sorted(ifields.items(),
                                               key=lambda x: x[0].name):
                     if (itopo is not None):
                         continue
@@ -543,7 +543,7 @@ class GraphBuilder(object):
             input_states = op_input_topology_states[op]
             output_states = op_output_topology_states[op]
             field_requirements = op.field_requirements
-            for (ifield, itopo) in sorted(ifields.iteritems(), key=lambda x: x[0].name):
+            for (ifield, itopo) in sorted(ifields.items(), key=lambda x: x[0].name):
                 if (itopo is not None):
                     continue
                 msg = '\nGraphBuilder {} could not automatically determine the '
@@ -571,7 +571,7 @@ class GraphBuilder(object):
             for (fields, io_params, op_kwds) in target_node._output_fields_to_dump:
                 if not fields:
                     fields = self.output_fields.keys()
-                fields = sorted(fields, key=lambda x: x.name)
+                fields = tuple(sorted(fields, key=lambda x: x.name))
                 for field in fields:
                     msg = '{} is not an output field.'.format(field.name)
                     assert field in self.output_fields, msg
@@ -722,8 +722,9 @@ class GraphBuilder(object):
             idx_range = (nodes[0].op_ordering, nodes[-1].op_ordering)
 
             if queues:
-                color = queues.keys()[-1]+1
-                for k in queues.keys()[::-1]:
+                qkeys = tuple(sorted(queues.keys()))
+                color = qkeys[-1]+1
+                for k in qkeys[::-1]:
                     paths = queues[k]
                     if (paths[-1][1] < idx_range[0]):
                         src = sorted_nodes[paths[-1][1]]
@@ -890,10 +891,10 @@ class GraphBuilder(object):
 
                 assert len(op.input_fields) == 1
                 assert len(op.output_fields) == 1
-                assert op.input_fields.keys()[0] == field
-                assert op.output_fields.keys()[0] == field
-                assert op.input_fields.values()[0] == src_topo
-                dst_topo = op.output_fields.values()[0]
+                assert next(iter(op.input_fields)) == field
+                assert next(iter(op.output_fields)) == field
+                assert next(iter(op.input_fields.values())) == src_topo
+                dst_topo = next(iter(op.output_fields.values()))
 
                 op_node = self.add_vertex(graph, op)
 
@@ -1162,7 +1163,7 @@ class GraphBuilder(object):
                 if (target_topo.backend.kind is Backend.HOST) and write_nodes:
                     # give source topo priority according to topology_affinity
                     src_topos = write_nodes.keys()
-                    src_topos = sorted(src_topos, key=topology_affinity, reverse=True)
+                    src_topos = tuple(sorted(src_topos, key=topology_affinity, reverse=True))
                     src_topo = src_topos[0]
                     if target_topo.mpi_params.task_id != src_topo.mpi_params.task_id:
                         dtopology_states[target_topo] = src_topo.topology_state
@@ -1180,7 +1181,7 @@ class GraphBuilder(object):
                 elif (target_topo.backend.kind is Backend.OPENCL) and write_nodes:
                     # give source topo priority according to topology_affinity
                     src_topos = write_nodes.keys()
-                    src_topos = sorted(src_topos, key=topology_affinity, reverse=True)
+                    src_topos = tuple(sorted(src_topos, key=topology_affinity, reverse=True))
                     src_topo = src_topos[0]
                     if target_topo.mpi_params.task_id != src_topo.mpi_params.task_id:
                         dtopology_states[target_topo] = src_topo.topology_state
diff --git a/hysop/core/graph/node_generator.py b/hysop/core/graph/node_generator.py
index 46a77d1b9908a8dc088f558717adffcdf58b2d7d..71640c4fa26b9b13e07756af06122eb5370d5600 100644
--- a/hysop/core/graph/node_generator.py
+++ b/hysop/core/graph/node_generator.py
@@ -6,17 +6,19 @@ from hysop.core.graph.computational_node import ComputationalGraphNode
 from hysop.core.mpi.redistribute import RedistributeInter
 
 
-class ComputationalGraphNodeGenerator(object):
+class ComputationalGraphNodeGenerator(object, metaclass=ABCMeta):
     """
     A class that can generate multiple hysop.core.graph.ComputationalGraphNode.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __new__(cls, candidate_input_tensors, candidate_output_tensors,
+                name=None, pretty_name=None, **kwds):
+        return super(ComputationalGraphNodeGenerator, cls).__new__(cls, **kwds)
 
     @debug
     def __init__(self, candidate_input_tensors, candidate_output_tensors,
-                 name=None, pretty_name=None,
-                 **kwds):
+                 name=None, pretty_name=None, **kwds):
         super(ComputationalGraphNodeGenerator, self).__init__(**kwds)
         candidate_input_tensors = first_not_None(candidate_input_tensors, ())
         candidate_output_tensors = first_not_None(candidate_output_tensors, ())
@@ -82,7 +84,7 @@ class ComputationalGraphNodeGenerator(object):
                     self.nodes += nodes
             except:
                 msg = '\nFailed to call generate() in class {}.\n'.format(self.__class__)
-                print msg
+                print(msg)
                 raise
 
             self.candidate_input_tensors = set(filter(lambda x: x.is_tensor,
diff --git a/hysop/core/graph/node_requirements.py b/hysop/core/graph/node_requirements.py
index 243ec321728ad0f6290eab290710cb042058491b..273c087ec8c5d19f0aa595d9b62548556b5c22ae 100644
--- a/hysop/core/graph/node_requirements.py
+++ b/hysop/core/graph/node_requirements.py
@@ -21,12 +21,12 @@ class NodeRequirements(object):
 
 
 class OperatorRequirements(NodeRequirements):
-    __slots__ = ('_enforce_unique_transposition_state', 
+    __slots__ = ('_enforce_unique_transposition_state',
                  '_enforce_unique_topology_shape',
                  '_enforce_unique_memory_order',
                  '_enforce_unique_ghosts')
 
-    def __init__(self, operator, 
+    def __init__(self, operator,
             enforce_unique_transposition_state=None,
             enforce_unique_topology_shape=None,
             enforce_unique_memory_order=None,
@@ -85,7 +85,7 @@ class OperatorRequirements(NodeRequirements):
         return self._enforce_unique_ghosts
     enforce_unique_ghosts = property(_get_enforce_unique_ghosts,
                                      _set_enforce_unique_ghosts)
-    
+
     def check_and_update_reqs(self, field_requirements):
         """Check and possibly update field requirements against global node requirements."""
         if (field_requirements is None):
@@ -93,7 +93,7 @@ class OperatorRequirements(NodeRequirements):
         axes = set()
         memory_order, can_split, min_ghosts, max_ghosts = None, None, None, None
         field_names = ''
-        for (is_input, freqs) in field_requirements.iter_requirements(): 
+        for (is_input, freqs) in field_requirements.iter_requirements():
             if (freqs is None):
                 continue
             (field, td, req) = freqs
@@ -103,8 +103,8 @@ class OperatorRequirements(NodeRequirements):
                         msg ='::GLOBAL OPERATOR REQUIREMENTS ERROR::\n'
                         msg+='Previous axes: \n  {} required by fields {}\nare incompatible '
                         msg+='with axes requirements \n  {} enforced by {} field {}.\n'
-                        msg=msg.format(tuple(axes),     field_names, 
-                                       tuple(req.axes), 
+                        msg=msg.format(tuple(axes),     field_names,
+                                       tuple(req.axes),
                                       'input' if is_input else 'output',
                                        field.name)
                         raise RuntimeError(msg)
@@ -119,8 +119,8 @@ class OperatorRequirements(NodeRequirements):
                             msg ='::GLOBAL OPERATOR REQUIREMENTS ERROR::\n'
                             msg+='Previous memory order: \n  {} required by fields {}\nare incompatible '
                             msg+='with memory order requirements \n  {} enforced by {} field {}.\n'
-                            msg=msg.format(memory_order, field_names, 
-                                           req.memory_order, 
+                            msg=msg.format(memory_order, field_names,
+                                           req.memory_order,
                                           'input' if is_input else 'output',
                                            field.name)
                             raise RuntimeError(msg)
@@ -136,8 +136,8 @@ class OperatorRequirements(NodeRequirements):
                             msg ='::GLOBAL OPERATOR REQUIREMENTS ERROR::\n'
                             msg+='Previous cartesian split directions: \n  {} required by fields {}\nare incompatible '
                             msg+='with cartesian split directions requirements \n  {} enforced by {} field {}.'
-                            msg=msg.format(can_split, field_names, 
-                                           req.can_split, 
+                            msg=msg.format(can_split, field_names,
+                                           req.can_split,
                                           'input' if is_input else 'output',
                                            field.name)
                             msg+='\nDomain cannot be splitted accross multiple processes.\n'
@@ -154,14 +154,14 @@ class OperatorRequirements(NodeRequirements):
                     if any(maxg<min_ghosts):
                         msg+='Previous cartesian min_ghosts: \n  {} required by fields {}\nare incompatible '
                         msg+='with cartesian max_ghosts requirements \n  {} enforced by {} field {}.\n'
-                        msg=msg.format(min_ghosts, field_names, 
+                        msg=msg.format(min_ghosts, field_names,
                                        maxg, 'input' if is_input else 'output',
                                        field.name)
                         raise RuntimeError(msg)
                     elif any(ming>max_ghosts):
                         msg+='Previous cartesian max_ghosts: \n  {} required by fields {}\nare incompatible '
                         msg+='with cartesian min_ghosts requirements \n  {} enforced by {} field {}.\n'
-                        msg=msg.format(max_ghosts, field_names, 
+                        msg=msg.format(max_ghosts, field_names,
                                        ming, 'input' if is_input else 'output',
                                        field.name)
                         raise RuntimeError(msg)
@@ -175,19 +175,18 @@ class OperatorRequirements(NodeRequirements):
             field_names += field.name + ', '
 
         axes = tuple(axes)
-        
+
         has_single_input = (len(field_requirements._input_field_requirements)<=1)
-    
+
         # enforce global operator requirements onto field requirements
-        for (is_input, freqs) in field_requirements.iter_requirements(): 
+        for (is_input, freqs) in field_requirements.iter_requirements():
             if (freqs is None):
                 continue
             (field, td, req) = freqs
-            
+
             # enforce transposition axes
             if self.enforce_unique_transposition_state:
                 if axes:
-                    #axes = sorted(axes, key=lambda x: sum((xi-i)**2 for (i,xi) in zip(range(len(x)),x)))
                     req.axes = (axes[0],)
                 elif not has_single_input:
                     req.axes = (TranspositionState[field.dim].default_axes(),)
@@ -198,7 +197,7 @@ class OperatorRequirements(NodeRequirements):
                     req.memory_order = memory_order
                 elif not has_single_input:
                     req.memory_order = MemoryOrdering.C_CONTIGUOUS
-            
+
             # enforce topology shape (indirectly by enforcing split directions)
             if self.enforce_unique_topology_shape:
                 assert (can_split is not None)
@@ -213,4 +212,4 @@ class OperatorRequirements(NodeRequirements):
             if self.enforce_unique_ghosts:
                 req.min_ghosts = min_ghosts
                 req.max_ghosts = min_ghosts
-            
+
diff --git a/hysop/core/graph/tests/test_graph.py b/hysop/core/graph/tests/test_graph.py
index 52b0b754ea44e545b6063fc6bbcb6a0978a2e0e4..f6de4e55ae9fc0831593690256269ec944d5f8fd 100644
--- a/hysop/core/graph/tests/test_graph.py
+++ b/hysop/core/graph/tests/test_graph.py
@@ -45,12 +45,12 @@ class TestGraph(object):
         rho1g = Field(domain=box, name='rho1g')
         rho1p = Field(domain=box, name='rho1p')
 
-        d3d0 = CartesianDiscretization(resolution=(64,64,64), 
+        d3d0 = CartesianDiscretization(resolution=(64,64,64),
                 ghosts=None, default_boundaries=True)
-        d3d1 = CartesianDiscretization(resolution=(128,128,128), 
+        d3d1 = CartesianDiscretization(resolution=(128,128,128),
                 ghosts=None, default_boundaries=True)
-        t0  = CartesianTopology(domain=box, discretization=d3d0)
-        t1  = CartesianTopology(domain=box, discretization=d3d1)
+        t0 = CartesianTopology(domain=box, discretization=d3d0)
+        t1 = CartesianTopology(domain=box, discretization=d3d1)
 
         ops = [
                     ('copyW',     [Wg],         [Wp]),
@@ -98,7 +98,7 @@ class TestGraph(object):
         g.discretize()
         g.setup(None)
         g.apply()
-    
+
         with tempfile.NamedTemporaryFile(suffix='.html') as f:
             g.to_html(f.name)
 
diff --git a/hysop/core/memory/allocator.py b/hysop/core/memory/allocator.py
index c59fd543ae73b96d594c64ee958c3e3c2085e5f3..4f93e42c0683aec94570f1f83bbbe26897a487e3 100644
--- a/hysop/core/memory/allocator.py
+++ b/hysop/core/memory/allocator.py
@@ -1,4 +1,3 @@
-
 import traceback
 from abc import ABCMeta, abstractmethod
 from hysop import __VERBOSE__, __TRACE_MEMALLOCS__, __BACKTRACE_BIG_MEMALLOCS__
@@ -7,24 +6,28 @@ from hysop.tools.units import bytes2str
 from hysop.tools.types import first_not_None
 from hysop.tools.handle import TaggedObject
 
-class AllocatorBase(TaggedObject):
+
+class AllocatorBase(TaggedObject, metaclass=ABCMeta):
     """
     Base class for allocators.
     """
-    
-    __metaclass__=ABCMeta
 
-    is_deferred=False
+    is_deferred = False
+
+    def __new__(cls, verbose, **kwds):
+        return super(AllocatorBase, cls).__new__(cls, tag_prefix='al', **kwds)
 
     def __init__(self, verbose, **kwds):
-        super(AllocatorBase,self).__init__(tag_prefix='al', **kwds)
+        super(AllocatorBase, self).__init__(tag_prefix='al', **kwds)
         verbose = first_not_None(verbose, __TRACE_MEMALLOCS__)
         self._verbose = verbose
 
     def __eq__(self, other):
         return (self is other)
+
     def __ne__(self, other):
         return (self is not other)
+
     def __hash__(self):
         return id(self)
 
@@ -34,16 +37,16 @@ class AllocatorBase(TaggedObject):
         If the first allocation fails, call the garbage collector and try again.
         It this fails a second time, raise a MemoryError.
         """
-        
+
         try_count = 0
         while try_count < 2:
             try:
-                if (alignment > 1):
-                    buf = self.allocate_aligned(size=size, alignment=alignment)
-                else:
+                if alignment is None or alignment == 1:
                     buf = self.allocate(nbytes=size)
+                else:
+                    buf = self.allocate_aligned(size=size, alignment=alignment)
                 if (buf is None):
-                    msg='{}.allocate(): returned allocation is None.'.format(self.__class__)
+                    msg = '{}.allocate(): returned allocation is None.'.format(self.__class__)
                     raise ValueError(msg)
                 return buf
             except MemoryError:
@@ -64,12 +67,12 @@ class AllocatorBase(TaggedObject):
         Return true if buffers are allocated in host memory.
         """
         pass
-    
+
     @abstractmethod
     def max_alloc_size(self):
         """Max allocatable size in bytes."""
         pass
-    
+
     @abstractmethod
     def allocate(self, nbytes, verbose=True):
         """
@@ -78,24 +81,24 @@ class AllocatorBase(TaggedObject):
         If allocation fails, this method has to raise a MemoryError.
         """
         if (self._verbose or __BACKTRACE_BIG_MEMALLOCS__) and verbose:
-            print '{}allocating block of size {}.'.format(
-                    self.prefix(), bytes2str(nbytes))
-        if __BACKTRACE_BIG_MEMALLOCS__ and nbytes>64*1024*1024:
-            print '[BIG ALLOCATION BACKTRACE]'
-            print ''.join(traceback.format_stack())
-            print '[END OF TRACE]'
-            print
+            print('{}allocating block of size {}.'.format(
+                self.prefix(), bytes2str(nbytes)))
+        if __BACKTRACE_BIG_MEMALLOCS__ and nbytes > 64*1024*1024:
+            print('[BIG ALLOCATION BACKTRACE]')
+            print(''.join(traceback.format_stack()))
+            print('[END OF TRACE]')
+            print()
 
     def allocate_aligned(self, size, alignment):
         """
         Allocate size bytes aligned on alignment.
         """
-        assert alignment>0
-        assert (alignment & (alignment-1))==0, 'alignment is not a power of 2.'
+        assert alignment > 0
+        assert (alignment & (alignment-1)) == 0, 'alignment is not a power of 2.'
         nbytes = size + alignment - 1
         if self._verbose:
-            print '{}allocating block of size {}, to satisfy {} aligned on {} bytes.'.format(
-                    self.prefix(), bytes2str(nbytes), bytes2str(size), alignment)
+            print('{}allocating block of size {}, to satisfy {} aligned on {} bytes.'.format(
+                self.prefix(), bytes2str(nbytes), bytes2str(size), alignment))
         return self.allocate(nbytes, verbose=False).aligned_view(alignment=alignment, size=size)
 
     def try_release_blocks(self):
@@ -110,4 +113,3 @@ class AllocatorBase(TaggedObject):
         Release the allocated buffer.
         """
         buf.release()
-
diff --git a/hysop/core/memory/buffer.py b/hysop/core/memory/buffer.py
index d84e41ff0f3255c0710b4435b5440432a7ce9800..a887d9ff9529955bca62df28aea481dc689e4459 100644
--- a/hysop/core/memory/buffer.py
+++ b/hysop/core/memory/buffer.py
@@ -1,6 +1,6 @@
-
 from abc import ABCMeta, abstractmethod
-from hysop.deps import np
+import numpy as np
+
 from hysop.tools.types import check_instance
 from hysop.tools.units import bytes2str
 
@@ -8,13 +8,13 @@ class Buffer(object):
     """
     Base class for releasable buffers.
     A buffer should just offer a release method to be compatible with allocators.
-    
+
     Host buffers are numpy buffers.
     Cuda and OpenCl buffers are the one provided by pycuda and pyopencl.
 
     Buffers should be obtained through allocators or memory pools.
     """
-    
+
     #/!\ ptr, size and int_ptr properties should be redefined in child classes.
     _DEBUG=False
 
@@ -22,7 +22,7 @@ class Buffer(object):
         assert (size is not None)
         self._size = size
         if self._DEBUG:
-            print 'Initializing {} of size {}.'.format(self.__class__.__name__, bytes2str(size))
+            print('Initializing {} of size {}.'.format(self.__class__.__name__, bytes2str(size)))
         try:
             super(Buffer, self).__init__(size=size, **kwds)
         except TypeError:
@@ -30,9 +30,9 @@ class Buffer(object):
                 super(Buffer, self).__init__(**kwds)
             except:
                 pass
-    def __del__(self):    
+    def __del__(self):
         if self._DEBUG:
-            print 'Releasing {}[{}].'.format(self.__class__.__name__, id(self))
+            print('Releasing {}[{}].'.format(self.__class__.__name__, id(self)))
 
     @abstractmethod
     def release(self):
@@ -44,15 +44,15 @@ class Buffer(object):
     @classmethod
     def from_int_ptr(cls, int_ptr_value, **kargs):
         """
-        Constructs a pyopencl handle from a C-level pointer (given as the integer int_ptr_value). 
-        If the previous owner of the object owns the handle and will not release it, 
+        Constructs a pyopencl handle from a C-level pointer (given as the integer int_ptr_value).
+        If the previous owner of the object owns the handle and will not release it,
         on can set retain to False, to effectively transfer ownership.
         Setting retain to True should increase an implementation specific reference counter.
         The buffer will be freed when buffer reference counter is 0.
         """
         msg='Buffer.from_int_ptr() is not implemented and should never have been called.'
         raise RuntimeError(msg)
-    
+
     @abstractmethod
     def aligned_view(self, alignment, size=None):
         """
@@ -61,7 +61,7 @@ class Buffer(object):
         and has now given size.
         """
         pass
-    
+
     def get_buffer(self):
         return self
     def get_size(self):
@@ -86,7 +86,7 @@ class PooledBuffer(Buffer):
         from hysop.core.memory.mempool import MemoryPool
         super(PooledBuffer,self).__init__(size=size, **kwds)
         if PooledBuffer._DEBUG:
-            print 'pooled buffer size={}, alignment={}, real_size={}, id={}, (src_buffer={}, src_id={})'.format(size,alignment,alloc_sz,id(self),type(buf),id(buf))
+            print('pooled buffer size={}, alignment={}, real_size={}, id={}, (src_buffer={}, src_id={})'.format(size,alignment,alloc_sz,id(self),type(buf),id(buf)))
         assert alloc_sz >= size + alignment - 1
         check_instance(pool, MemoryPool)
         check_instance(buf, Buffer)
@@ -118,14 +118,14 @@ class PooledBuffer(Buffer):
         Tells the memory pool that this buffer has no longer to be held.
         """
         if PooledBuffer._DEBUG:
-            print 'pooled buffer release() (id={})'.format(id(self))
+            print('pooled buffer release() (id={})'.format(id(self)))
         self._pool.free(self._buf, self._size)
         self._buf     = None
         self._bufview = None
-    
+
     def __del__(self):
         if PooledBuffer._DEBUG:
-            print 'pooled buffer __del__() (id={})'.format(id(self))
+            print('pooled buffer __del__() (id={})'.format(id(self)))
         if hasattr(self, '_buf') and (self._buf is not None):
             self.release()
 
diff --git a/hysop/core/memory/memory_request.py b/hysop/core/memory/memory_request.py
index 747f7b870597e37e1e78bfe46a380c972ee79bfb..6ff03f07dae04b6290669e9fb5a3d578253884e5 100644
--- a/hysop/core/memory/memory_request.py
+++ b/hysop/core/memory/memory_request.py
@@ -1,6 +1,7 @@
 from abc import ABCMeta, abstractmethod
+import copy
+import numpy as np
 
-from hysop.deps import np, copy
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.units import bytes2str
 from hysop.tools.numerics import get_dtype
@@ -8,15 +9,16 @@ from hysop.tools.misc import prod
 from hysop.core.arrays.array_backend import ArrayBackend
 from hysop.constants import HYSOP_BOOL, Backend
 
+
 class MemoryRequest(object):
     """Memory request that can be requested in get_work_properties()"""
 
     __slots__ = ('backend', 'alignment', 'dtype', 'size', 'shape', 'nb_components', 'id')
 
     def __init__(self, backend,
-                       size=None, shape=None,
-                       dtype=None, alignment=None,
-                       nb_components=1):
+                 size=None, shape=None,
+                 dtype=None, alignment=None,
+                 nb_components=1):
         """
         Creates a memory request to be served from given backend.
 
@@ -29,14 +31,14 @@ class MemoryRequest(object):
 
         Every np.bool request is converted to HYSOP_BOOL dtype (ie. some integer type).
         """
-        if dtype == np.bool:
+        if dtype == np.bool_:
             dtype = HYSOP_BOOL
 
         check_instance(backend, ArrayBackend)
-        check_instance(size,          (int,long,np.integer),   allow_none=True)
-        check_instance(alignment,     (int,long,np.integer),   allow_none=True)
-        check_instance(nb_components, (int,long,np.integer),   allow_none=True)
-        check_instance(shape,         (tuple,list,np.ndarray), allow_none=True)
+        check_instance(size,          (int, np.integer),   allow_none=True)
+        check_instance(alignment,     (int, np.integer),   allow_none=True)
+        check_instance(nb_components, (int, np.integer),   allow_none=True)
+        check_instance(shape,         (tuple, list, np.ndarray), allow_none=True)
 
         if (dtype is None):
             dtype = np.dtype(np.uint8)
@@ -49,57 +51,57 @@ class MemoryRequest(object):
         if (size is not None) and (shape is not None):
             pass
         if (size is not None):
-            if size<1:
+            if size < 1:
                 raise ValueError('size < 1.')
             shape = (size,)
         elif (shape is not None):
             size = 1
             for s in shape:
-                if s<1:
+                if s < 1:
                     raise ValueError('shape {} < 1'.format(shape))
                 size *= s
         else:
             raise ValueError('size and shape have not been specified.')
 
-        dtype_alignment    = self.min_dtype_alignment(dtype)
+        dtype_alignment = self.min_dtype_alignment(dtype)
         hardware_alignment = self.min_hardware_alignment(backend)
-        alignment          = first_not_None(alignment, hardware_alignment)
+        alignment = first_not_None(alignment, hardware_alignment)
 
-        min_alignment   = min(hardware_alignment, dtype_alignment, alignment)
-        max_alignment   = max(hardware_alignment, dtype_alignment, alignment)
+        min_alignment = min(hardware_alignment, dtype_alignment, alignment)
+        max_alignment = max(hardware_alignment, dtype_alignment, alignment)
         if (max_alignment % min_alignment != 0):
-            msg =  'Incompatible aligmnents, specified an alignment of {} '
+            msg = 'Incompatible aligmnents, specified an alignment of {} '
             msg += 'but given dtype should be aligned on {} bytes.'
             msg = msg.format(alignment, dtype_alignment)
             raise ValueError(msg)
         alignment = max_alignment
 
         if prod(shape) != size:
-            msg='Shape does not match size (size={}, prod(shape)={}).'
+            msg = 'Shape does not match size (size={}, prod(shape)={}).'
             msg.format(size, prod(shape))
             raise ValueError(msg)
         if alignment <= 0:
             msg = 'Alignment should be positive (got {}).'.format(alignment)
             raise ValueError(alignment)
-        if (alignment&(alignment-1)!=0):
+        if (alignment & (alignment-1) != 0):
             msg = 'Alignment is not a power of two (got {}).'.format(alignment)
             raise ValueError(alignment)
 
-        self.backend    = backend
-        self.alignment  = alignment
-        self.dtype      = dtype
-        self.size       = size
-        self.shape      = tuple(shape)
+        self.backend = backend
+        self.alignment = alignment
+        self.dtype = dtype
+        self.size = size
+        self.shape = tuple(shape)
         self.nb_components = nb_components
 
-        max_bytes      = self.max_bytes()
+        max_bytes = self.max_bytes()
         max_alloc_size = self.max_alloc_size(backend)
         if (max_bytes > max_alloc_size):
-            msg='Memory request size {} ({}B) exceeds maximal allocatable size {} ({}B) '
-            msg+='for backend {}.'
-            msg=msg.format(bytes2str(max_bytes), max_bytes,
-                           bytes2str(max_alloc_size), max_alloc_size,
-                           backend.full_tag)
+            msg = 'Memory request size {} ({}B) exceeds maximal allocatable size {} ({}B) '
+            msg += 'for backend {}.'
+            msg = msg.format(bytes2str(max_bytes), max_bytes,
+                             bytes2str(max_alloc_size), max_alloc_size,
+                             backend.full_tag)
             raise RuntimeError(msg)
 
     def data_bytes_per_component(self):
@@ -107,26 +109,31 @@ class MemoryRequest(object):
         Bytes to be allocated per components.
         """
         return self.size * self.bytes_per_element(self.dtype)
+
     def min_bytes(self):
         """
         Minimum number bytes that may be allocated for all components.
         """
         return self.nb_components*(self.data_bytes_per_component())
+
     def max_bytes(self):
         """
         Real number bytes that will be allocated for all components.
         """
         return self.nb_components*(self.data_bytes_per_component() + self.alignment - 1)
+
     def max_bytes_per_component(self):
         """
         Real number bytes that will be allocated for all components.
         """
         return (self.data_bytes_per_component() + self.alignment - 1)
-    def min_dtype_alignment(self,dtype):
+
+    def min_dtype_alignment(self, dtype):
         """
         Returns the minimum alignement of the allocated buffer (in bytes).
         """
         return self.bytes_per_element(dtype)
+
     def min_hardware_alignment(self, backend):
         """
         Returns the minimum alignement to be hardware aligned (in bytes).
@@ -134,12 +141,14 @@ class MemoryRequest(object):
         if backend.kind == Backend.OPENCL:
             return backend.cl_env.device.mem_base_addr_align
         else:
-            return 8 # 64 bits by default
-    def bytes_per_element(self,dtype):
+            return 8  # 64 bits by default
+
+    def bytes_per_element(self, dtype):
         """
         Returns the size in bytes of the allocated data type.
         """
         return dtype.itemsize
+
     def max_alloc_size(self, backend):
         """
         Returns the maximal alloc size supported by backend.
@@ -148,29 +157,29 @@ class MemoryRequest(object):
 
     @classmethod
     def cartesian_dfield_like(cls, name, dfield,
-            nb_components=None, initial_values=None, dtype=None,
-            grid_resolution=None, ghosts=None,
-            backend=None, is_read_only=None):
+                              nb_components=None, initial_values=None, dtype=None,
+                              grid_resolution=None, ghosts=None,
+                              backend=None, is_read_only=None):
         from hysop.fields.cartesian_discrete_field import CartesianDiscreteScalarFieldView
         check_instance(dfield,  CartesianDiscreteScalarFieldView)
 
         ghosts = first_not_None(ghosts, dfield.ghosts)
         if np.isscalar(ghosts):
-            ghosts=(ghosts,)*dfield.dim
+            ghosts = (ghosts,)*dfield.dim
         elif isinstance(ghosts, bool):
             ghosts = dfield.ghosts if (ghosts is True) else (0,)*dfield.dim
-        assert len(ghosts)==dfield.dim
+        assert len(ghosts) == dfield.dim
         ghosts = np.asarray(ghosts)
 
         (dfield, request, request_id) = dfield.tmp_dfield_like(name=name, backend=backend,
-                nb_components=nb_components, initial_values=initial_values, dtype=dtype,
-                grid_resolution=grid_resolution, ghosts=ghosts, is_read_only=is_read_only)
+                                                               nb_components=nb_components, initial_values=initial_values, dtype=dtype,
+                                                               grid_resolution=grid_resolution, ghosts=ghosts, is_read_only=is_read_only)
 
         return (dfield, request, request_id)
 
     def stuple(self):
-        if not hasattr(self,'id'):
-            id='None'
+        if not hasattr(self, 'id'):
+            id = 'None'
         else:
             id = self.id
         size = bytes2str(self.min_bytes(), decimal=False)
@@ -178,20 +187,20 @@ class MemoryRequest(object):
         return tuple(map(str, ret))
 
     def __str__(self):
-        if not hasattr(self,'id'):
-            id='None'
+        if not hasattr(self, 'id'):
+            id = 'None'
         else:
             id = self.id
-        msg= 'request of size {:<9} (ncomp={}, shape={:<12}, '
-        msg+='dtype={:<8}, align={:<2}, id={})'
-        msg=msg.format(bytes2str(self.min_bytes(), decimal=False),
-                self.nb_components, self.shape,
-                self.dtype, self.alignment, id)
+        msg = 'request of size {:<9} (ncomp={}, shape={:<12}, '
+        msg += 'dtype={:<8}, align={:<2}, id={})'
+        msg = msg.format(bytes2str(self.min_bytes(), decimal=False),
+                         self.nb_components, self.shape,
+                         self.dtype, self.alignment, id)
         return msg
 
     @classmethod
     def empty_like(cls, a, backend=None, alignment=None,
-            dtype=None, size=None, shape=None, nb_components=None):
+                   dtype=None, size=None, shape=None, nb_components=None):
 
         if hasattr(a, 'backend'):
             backend = first_not_None(backend, a.backend)
@@ -213,8 +222,8 @@ class MemoryRequest(object):
             nb_components = first_not_None(nb_components, a.nb_components)
 
         return MemoryRequest(backend=backend, alignment=alignment,
-                dtype=dtype, size=size, shape=shape,
-                nb_components=nb_components)
+                             dtype=dtype, size=size, shape=shape,
+                             nb_components=nb_components)
 
     def __call__(self, op=None, request_identifier=None):
         """
@@ -229,9 +238,10 @@ class OperatorMemoryRequests(object):
     """
     Set of memory requests originating from one operator, sorted by backend.
     """
+
     def __init__(self, operator):
         self._operator = operator
-        self._requests_per_backend  = {}
+        self._requests_per_backend = {}
         self._requests_per_identifier = {}
 
     def push_mem_request(self, request_identifier, mem_request):
@@ -241,7 +251,7 @@ class OperatorMemoryRequests(object):
             cls = mem_request.__class__.__name__
             raise ValueError('Input is not a MemoryRequest (got a {}).'.format(cls))
         backend = mem_request.backend
-        mem_request.id  = request_identifier
+        mem_request.id = request_identifier
         if backend not in self._requests_per_backend:
             self._requests_per_backend[backend] = []
         self._requests_per_backend[backend].append(mem_request)
@@ -249,7 +259,7 @@ class OperatorMemoryRequests(object):
         mem_request.id = request_identifier
 
     def min_bytes_to_allocate(self, backend):
-        return sum( [req.max_bytes() for req in self._requests_per_backend[backend]] )
+        return sum([req.max_bytes() for req in self._requests_per_backend[backend]])
 
     def __call__(self):
         """
@@ -265,6 +275,7 @@ class MultipleOperatorMemoryRequests(object):
     """
     Set of memory requests originating from one or more operators.
     """
+
     def __init__(self):
         self._allocated_buffers = {}
         self._all_requests_per_backend = {}
@@ -274,33 +285,33 @@ class MultipleOperatorMemoryRequests(object):
         for mem_requests in requests:
             if isinstance(mem_requests, MultipleOperatorMemoryRequests):
                 for backend, op_requests in \
-                        mem_requests._all_requests_per_backend.iteritems():
+                        mem_requests._all_requests_per_backend.items():
                     if backend not in self._all_requests_per_backend.keys():
                         self._all_requests_per_backend[backend] = {}
-                    for (op, op_reqs) in op_requests.iteritems():
+                    for (op, op_reqs) in op_requests.items():
                         if op in self._all_requests_per_backend[backend].keys():
-                            msg='Operator {} has already requested memory.'.format(op)
+                            msg = 'Operator {} has already requested memory.'.format(op)
                             raise ValueError(msg)
                         self._all_requests_per_backend[backend][op] = op_reqs
             elif isinstance(mem_requests, OperatorMemoryRequests):
                 operator = mem_requests._operator
-                for (backend, requests) in mem_requests._requests_per_backend.iteritems():
+                for (backend, requests) in mem_requests._requests_per_backend.items():
                     if backend not in self._all_requests_per_backend.keys():
                         self._all_requests_per_backend[backend] = {}
                     if operator in self._all_requests_per_backend[backend].keys():
-                        msg='Operator {} has already requested memory.'.format(operator)
+                        msg = 'Operator {} has already requested memory.'.format(operator)
                         raise ValueError(msg)
                     self._all_requests_per_backend[backend][operator] = requests
             else:
                 cls = mem_requests.__class__
-                msg='Input is not an OperatorMemoryRequests (got a {}).'.format(cls)
+                msg = 'Input is not an OperatorMemoryRequests (got a {}).'.format(cls)
                 raise ValueError(msg)
         return self
 
     def operators(self):
         ops = []
         for requests in self._all_requests_per_backend.values():
-            ops += requests.keys()
+            ops += list(requests.keys())
         return ops
 
     def __iadd__(self, other):
@@ -311,8 +322,8 @@ class MultipleOperatorMemoryRequests(object):
     def min_bytes_to_allocate(self, backend):
         max_bytes = 0
         for mem_requests in self._all_requests_per_backend[backend].values():
-            req_bytes = sum( [req.max_bytes() for req in mem_requests] )
-            max_bytes = max(req_bytes,max_bytes)
+            req_bytes = sum([req.max_bytes() for req in mem_requests])
+            max_bytes = max(req_bytes, max_bytes)
         return max_bytes
 
     def allocate(self, allow_subbuffers):
@@ -337,64 +348,64 @@ class MultipleOperatorMemoryRequests(object):
 
         if allow_subbuffers:
             data = backend.empty(shape=(total_bytes,), dtype=np.uint8)
-            for (op,requests) in op_requests.iteritems():
+            for (op, requests) in op_requests.items():
                 check_instance(requests, list, values=MemoryRequest)
                 start_idx, end_idx = 0, 0
                 for req in requests:
                     req_views = []
                     size = req.data_bytes_per_component()
-                    for i in xrange(req.nb_components):
+                    for i in range(req.nb_components):
                         # align on offset and not on pointer anymore (see issue #1)
                         align_offset = (-start_idx % req.alignment)
 
                         start_idx += align_offset
-                        end_idx    = start_idx + size
+                        end_idx = start_idx + size
 
                         view = data[start_idx:end_idx].view(dtype=req.dtype).reshape(req.shape)
                         req_views.append(view)
 
                         if (view.base is not data.base):
-                            msg  = 'FATAL ERROR: Could not create views on data because base '
+                            msg = 'FATAL ERROR: Could not create views on data because base '
                             msg += 'differs on backend {}.'
-                            msg=msg.format(backend.kind)
+                            msg = msg.format(backend.kind)
                             raise RuntimeError(msg)
 
                         if (view.int_ptr != data.int_ptr + start_idx):
-                            msg='FATAL ERROR: Point arithmetic is wrong.'
-                            msg+=' Expected ptr: {}'
-                            msg+=' Actual ptr:   {}'
-                            msg=msg.format(data.int_ptr+start_idx, view.int_ptr)
+                            msg = 'FATAL ERROR: Point arithmetic is wrong.'
+                            msg += ' Expected ptr: {}'
+                            msg += ' Actual ptr:   {}'
+                            msg = msg.format(data.int_ptr+start_idx, view.int_ptr)
                             raise RuntimeError(msg)
 
                         if ((view.int_ptr-data.int_ptr) % req.alignment) != 0:
-                            msg='FATAL ERROR: Could not provide requested offset alignment.'
-                            msg=msg.format(req.alignment)
+                            msg = 'FATAL ERROR: Could not provide requested offset alignment.'
+                            msg = msg.format(req.alignment)
                             raise RuntimeError(msg)
 
                         start_idx = end_idx
                     if (op not in views):
                         views[op] = {}
-                    if req.nb_components>=1:
+                    if req.nb_components >= 1:
                         views[op][req.id] = tuple(req_views)
                 assert end_idx <= total_bytes
         else:
             buffer_sizes = []
             ordered_requests = {}
-            for (op,requests) in op_requests.iteritems():
+            for (op, requests) in op_requests.items():
                 assert op not in ordered_requests
                 check_instance(requests, list, values=MemoryRequest)
                 op_buffer_sizes = ()
                 op_reqs = ()
                 for req in requests:
                     nbytes = req.max_bytes_per_component()
-                    for i in xrange(req.nb_components):
+                    for i in range(req.nb_components):
                         op_buffer_sizes += (nbytes,)
-                        op_reqs         += (req,)
+                        op_reqs += (req,)
                 idx = np.argsort(op_buffer_sizes, kind='mergesort')[::-1]
                 op_buffer_sizes = tuple(op_buffer_sizes[i] for i in idx)
-                op_sorted_reqs  = tuple(op_reqs[i] for i in idx)
-                for (i,size) in enumerate(op_buffer_sizes):
-                    if (i>=len(buffer_sizes)):
+                op_sorted_reqs = tuple(op_reqs[i] for i in idx)
+                for (i, size) in enumerate(op_buffer_sizes):
+                    if (i >= len(buffer_sizes)):
                         buffer_sizes.append(size)
                     else:
                         buffer_sizes[i] = max(buffer_sizes[i], size)
@@ -405,10 +416,10 @@ class MultipleOperatorMemoryRequests(object):
                 return
 
             buffers = tuple(backend.empty(shape=(nbytes,), dtype=np.uint8)
-                                                    for nbytes in buffer_sizes)
+                            for nbytes in buffer_sizes)
 
-            for (op, requests) in ordered_requests.iteritems():
-                assert len(buffers)>= len(requests)
+            for (op, requests) in ordered_requests.items():
+                assert len(buffers) >= len(requests)
                 views.setdefault(op, {})
                 old_req = None
                 for (buf, req) in zip(buffers, requests):
@@ -436,18 +447,18 @@ class MultipleOperatorMemoryRequests(object):
 
     def get_buffer(self, operator, request_identifier, handle=False):
         if not self._allocated:
-            msg='Memory request have not been allocated yet.'
+            msg = 'Memory request have not been allocated yet.'
             raise RuntimeError(msg)
         if operator not in self._allocated_buffers:
-            msg='Operator {} did not request any extra memory. \nOperators that requested memory are:\n  *{}'
-            msg=msg.format(operator, '\n  *'.join(str(op)
-                for op in self._allocated_buffers.keys()))
+            msg = 'Operator {} did not request any extra memory. \nOperators that requested memory are:\n  *{}'
+            msg = msg.format(operator, '\n  *'.join(str(op)
+                                                    for op in self._allocated_buffers.keys()))
             raise RuntimeError(msg)
         op_buffers = self._allocated_buffers[operator]
         if request_identifier not in op_buffers:
-            msg='Unknown request id {} for operator {}.'
-            msg+='\nValid identifiers are: ' + ','.join(str(op) for op in op_buffers.keys())
-            msg=msg.format(request_identifier, operator)
+            msg = 'Unknown request id {} for operator {}.'
+            msg += '\nValid identifiers are: ' + ','.join(str(op) for op in op_buffers.keys())
+            msg = msg.format(request_identifier, operator)
             raise ValueError(msg)
         buffers = op_buffers[request_identifier]
         if handle:
@@ -461,55 +472,55 @@ class MultipleOperatorMemoryRequests(object):
     def sreport(self):
         all_requests = {}
         totals = {}
-        for (backend, backend_requests) in self._all_requests_per_backend.iteritems():
-            total=0
+        for (backend, backend_requests) in self._all_requests_per_backend.items():
+            total = 0
             for op in sorted(backend_requests.keys(), key=lambda op: getattr(op, 'name', None)):
                 op_requests = backend_requests[op]
                 sop_request = all_requests.setdefault(backend, {}).setdefault(op, [])
-                local_total=0
+                local_total = 0
                 try:
-                    opname=u'{}'.format(op.pretty_name.decode('utf-8'))
+                    opname = '{}'.format(op.pretty_name)
                 except AttributeError:
                     opname = None
                 for req in op_requests:
                     sop_request.append((opname,)+req.stuple())
-                    local_total+=req.max_bytes()
-                if local_total>total:
-                    total=local_total
+                    local_total += req.max_bytes()
+                if local_total > total:
+                    total = local_total
             totals[backend] = total
 
         if len(all_requests):
             sizes = {}
-            template = u'\n'
-            titles=(u'OPERATOR', u'REQUEST_ID', u'SIZE', u'COMPONENTS', u'SHAPE', u'DTYPE', u'ALIGNMENT')
-            for (i,k) in enumerate(titles):
-                k=k.lower()
-                template += u'    '
+            template = '\n'
+            titles = ('OPERATOR', 'REQUEST_ID', 'SIZE', 'COMPONENTS', 'SHAPE', 'DTYPE', 'ALIGNMENT')
+            for (i, k) in enumerate(titles):
+                k = k.lower()
+                template += '    '
                 size = max(len(req[i]) for breqs in all_requests.values()
-                                       for reqs in breqs.values() for req in reqs)
+                           for reqs in breqs.values() for req in reqs)
                 size = max(size, len(k))
-                name=k+u'_len'
+                name = k+'_len'
                 sizes[name] = size
-                template += u'{:'+(u'<' if i==0 else u'^')+u'{'+name+u'}}'
+                template += '{:'+('<' if i == 0 else '^')+'{'+name+'}}'
 
-            ss=''
-            for (backend, backend_srequests) in all_requests.iteritems():
+            ss = ''
+            for (backend, backend_srequests) in all_requests.items():
                 total = totals[backend]
                 kind = backend.kind
                 if (kind == Backend.OPENCL):
-                    precision = u' on device {}'.format(backend.device.name.strip())
+                    precision = ' on device {}'.format(backend.device.name.strip())
                 else:
-                    precision = u''
-                ss+= u'\n {}{}:'.format(backend.full_tag, precision)
-                ss+= template.format(*titles, **sizes)
+                    precision = ''
+                ss += '\n {}{}:'.format(backend.full_tag, precision)
+                ss += template.format(*titles, **sizes)
                 for op in sorted(backend_srequests.keys(), key=lambda op: getattr(op, 'name', None)):
                     sop_reqs = backend_srequests[op]
                     for sreq in sop_reqs:
-                        ss+= template.format(*sreq, **sizes)
-                ss +=u'\n  Total extra work buffers requested: {} ({})'.format(
-                        bytes2str(total,decimal=False),
-                        bytes2str(total,decimal=True))
-                ss += u'\n'
+                        ss += template.format(*sreq, **sizes)
+                ss += '\n  Total extra work buffers requested: {} ({})'.format(
+                    bytes2str(total, decimal=False),
+                    bytes2str(total, decimal=True))
+                ss += '\n'
             return ss[1:-1]
         else:
-            return u' No extra buffers have been requested.'
+            return ' No extra buffers have been requested.'
diff --git a/hysop/core/memory/mempool.py b/hysop/core/memory/mempool.py
index 75e5f62afcded9600b71e1da433de6156624814e..b16bb1c39139df54d61c91c77dee6364bc1bb734 100644
--- a/hysop/core/memory/mempool.py
+++ b/hysop/core/memory/mempool.py
@@ -1,6 +1,6 @@
+import math
 
 from abc import ABCMeta, abstractmethod
-from hysop.deps import math, six
 from hysop.constants import __VERBOSE__, __DEBUG__
 from hysop.backend import __HAS_OPENCL_BACKEND__, __HAS_CUDA_BACKEND__
 from hysop.tools.units import bytes2str, time2str
@@ -22,21 +22,23 @@ else:
             p+=1
             x>>=1
         return p-1
-        
 
-class MemoryPool(object):
+
+class MemoryPool(object, metaclass=ABCMeta):
     """
     pyopencl/pycuda like memory pool extended to be compatible for all backends.
     """
 
-    __metaclass__ = ABCMeta   
+    def __new__(cls, name, allocator, max_alloc_bytes=None,
+                mantissa_bits=4, verbose=None, **kwds):
+        return super(MemoryPool, cls).__new__(cls, **kwds)
 
     def __init__(self, name, allocator, max_alloc_bytes=None,
                 mantissa_bits=4, verbose=None, **kwds):
         """
         Builds a MemoryPool from an allocator.
         Provides an allocator like interface.
-        
+
         Parameters
         ----------
         name: str
@@ -47,7 +49,7 @@ class MemoryPool(object):
             turn on or off allocator messages (defaults to hysop verbosity configuration)
         max_alloc_bytes: int
             maximum number of bytes this pool will try to allocate before raising a MemoryError.
-            default value is: 
+            default value is:
                 -80% of physical host memory if allocator is a HostAllocator.
                 -None (no limit) if allocator is a DeviceAllocator
         mantissa_bits: int
@@ -59,9 +61,9 @@ class MemoryPool(object):
         An allocator that fails to allocate memory should raise a MemoryError
         to expect the pool to work correctly.
 
-        Some allocators may fail to raise a MemoryError when there is no more memory 
+        Some allocators may fail to raise a MemoryError when there is no more memory
         left and will just trigger a SIGKILL from operating system or deadlock instead.
-        To avoid such situations, put an artificial software allocation imit trough the 
+        To avoid such situations, put an artificial software allocation imit trough the
         max_alloc_bytes parameter. Exceeding this allocation limit will throw a proper
         MemoryError.
 
@@ -71,18 +73,16 @@ class MemoryPool(object):
             *OpenCl Nvidia 375.20 driver on ubuntu 16.04        => work as expected
         """
         super(MemoryPool, self).__init__(**kwds)
-            
+
         check_instance(allocator, AllocatorBase)
         default_limit = int(0.80*virtual_memory().total) if allocator.is_on_host() else None
 
         max_alloc_bytes = max_alloc_bytes or default_limit
         verbose = verbose if isinstance(verbose,bool) else __DEBUG__
-        
-        if isinstance(name, unicode):
-            name = str(name)
+
         check_instance(name, str)
         check_instance(mantissa_bits, int)
-        check_instance(max_alloc_bytes,(int,long), allow_none=True)
+        check_instance(max_alloc_bytes, int, allow_none=True)
         check_instance(verbose, bool)
 
         self.name      = name.strip()
@@ -91,10 +91,10 @@ class MemoryPool(object):
 
         self.bin_nr_to_bin = {}
         self.alloc_statistics = {}
-    
+
         self.mantissa_bits = mantissa_bits
         self.mantissa_mask = (1 << mantissa_bits) - 1
-        
+
         self.allocated_bytes = 0
         self.max_alloc_bytes = max_alloc_bytes
 
@@ -107,14 +107,14 @@ class MemoryPool(object):
 
         self.active_blocks = 0
         self.stop_holding_flag = False
-        
+
     @abstractmethod
     def _wrap_buffer(self, buf, alloc_sz, size, alignment):
         """
         Wrap allocated buffer into a PooledBuffer.
         """
         pass
-    
+
     def may_alloc(self, size):
         """
         Return true if this pool may allocate a buffer of given size (in bytes).
@@ -164,13 +164,13 @@ class MemoryPool(object):
         else:
             ones = 0
             head = ((1 << mantissa_bits) | mantissa) >> -exp_minus_mbits
-        
+
         assert not (ones & head)
         return head | ones
 
     def stop_holding(self):
         """
-        Tells the pool to stop holding back freed buffer 
+        Tells the pool to stop holding back freed buffer
         and direclty free all unused buffers.
         """
         self.stop_holding_flag = True
@@ -178,7 +178,7 @@ class MemoryPool(object):
 
     def free_held(self):
         """
-        Free all unused held buffers but does not set 
+        Free all unused held buffers but does not set
         the stop_holding_flag flag.
         """
         for _ in self._try_to_free_memory():
@@ -190,7 +190,7 @@ class MemoryPool(object):
         Returns the number of held blocks.
         """
         return sum(len(bin_list)
-                for bin_list in six.itervalues(self.bin_nr_to_bin))
+                for bin_list in self.bin_nr_to_bin.values())
 
     def header(self):
         """
@@ -207,23 +207,23 @@ class MemoryPool(object):
     def allocate(self, nbytes, alignment=None):
         """
         Allocate a buffer of size nbytes and given alignment.
-        The real size of the allocated buffer may be greater and wasted memory mostly 
+        The real size of the allocated buffer may be greater and wasted memory mostly
         depend on configured mantissa_bits (default is 2).
-        The returned buffer is an instance of hysop.core.memory.buffer.PooledBuffer. 
+        The returned buffer is an instance of hysop.core.memory.buffer.PooledBuffer.
         While a reference to the returned object is kept, it won't return to the pool.
         """
 
         alignment = alignment or 1
         assert alignment>0
         assert not (alignment & alignment-1), 'alignment is not a power of 2.'
-        
+
         # we may need more memory to align returned ptr
         min_alloc_size = nbytes + alignment - 1
-        
+
         # maybe bin allocation size will be sufficient
         bin_nr   = self.bin_number(nbytes)
         alloc_sz = self.alloc_size(bin_nr)
-        
+
         # else we choose a bin that can provide min_alloc_size bytes
         if (alloc_sz < min_alloc_size):
             bin_nr   = self.bin_number(min_alloc_size)
@@ -231,7 +231,7 @@ class MemoryPool(object):
 
         bin_list = self.bin_nr_to_bin.setdefault(bin_nr, [])
         assert self.bin_number(alloc_sz) == bin_nr
-        
+
         size = nbytes
         stat_nr  = bitlog2(size)
         statistic = self.alloc_statistics.setdefault(stat_nr, PoolAllocationStatistics())
@@ -240,16 +240,14 @@ class MemoryPool(object):
         if bin_list:
             if verbose:
                 msg='{} allocation request of size {} served from bin {}.'
-                # which contained {} entries.'
                 msg=msg.format(self.header(),
                         bytes2str(size,decimal=False),
                         bin_nr)
-                        #len(bin_list))
-                print msg
+                print(msg)
             self.active_blocks += 1
             statistic.push_reuse(alloc_sz)
             return self._wrap_buffer(bin_list.pop(), alloc_sz, size, alignment)
-        
+
         if self._may_alloc(alloc_sz):
             try:
                 with Timer() as t:
@@ -259,17 +257,17 @@ class MemoryPool(object):
                 statistic.push_alloc(alloc_sz, t.interval)
                 if verbose:
                     msg='{} allocated new block of size {} to serve a {} request.'
-                    msg=msg.format(self.header(), 
+                    msg=msg.format(self.header(),
                             bytes2str(alloc_sz, decimal=False),
-                            bytes2str(size, decimal=False)) 
-                    print msg
+                            bytes2str(size, decimal=False))
+                    print(msg)
 
                 return self._wrap_buffer(result, alloc_sz, size, alignment)
             except MemoryError as e:
                 if verbose:
                     msg='{} allocation of size {} failed, freeing unused blocks.'
                     msg=msg.format(self.header(), bytes2str(alloc_sz, decimal=False))
-                    print msg
+                    print(msg)
         else:
             prefix = ' '*len(self.header())
             allocated_bytes = self.allocated_bytes
@@ -286,8 +284,8 @@ class MemoryPool(object):
                            bytes2str(max_alloc_bytes, decimal=False),
                            bytes2str(available, decimal=False),
                            p=prefix)
-            print msg
-        
+            print(msg)
+
         freed_bytes = 0
         try_last_alloc = False
         for fb in self._try_to_free_memory():
@@ -311,10 +309,10 @@ class MemoryPool(object):
                            bytes2str(max_alloc_bytes, decimal=False),
                            bytes2str(available, decimal=False),
                            p=prefix)
-                    print msg
+                    print(msg)
             else:
                 freed_bytes += fb
-            
+
             if ((freed_bytes>=alloc_sz) and may_alloc) or try_last_alloc:
                 try:
                     with Timer() as t:
@@ -325,18 +323,18 @@ class MemoryPool(object):
                     if verbose:
                        msg='{} allocation succeded after block destruction.'
                        msg=msg.format(self.header())
-                       print msg
+                       print(msg)
                     return (self, result, alloc_sz, size, alignment)
                 except MemoryError:
                     pass
-        
+
         msg='{} no more free blocks left, allocation failed.'
         msg=msg.format(self.header())
-                
-        print msg
-        print 
-        print memory_repport()
-        
+
+        print(msg)
+        print()
+        print(memory_repport())
+
         if statistic.nallocs==0:
             self.alloc_statistics.pop(stat_nr)
         self.print_allocation_report()
@@ -353,25 +351,25 @@ class MemoryPool(object):
 
         if not self.stop_holding_flag:
             self.bin_nr_to_bin.setdefault(bin_nr, []).append(buf)
-            
+
             if self.verbose:
                 msg = '{} block of size {} returned to bin {}.'# wich now contains {} entries.'
                 msg = msg.format(self.header(), bytes2str(size,decimal=False), bin_nr)
                         #len(self.bin_nr_to_bin[bin_nr]))
-                print msg
+                print(msg)
             statistics.push_return(size)
         else:
             if self.verbose:
                 msg = '{} freeing block of size {} in bin {}.'
                 msg = msg.format(self.header(), bytes2str(size), bin_nr)
-                print msg
+                print(msg)
             with Timer() as t:
                 self.allocator.free(buf)
             self.allocated_bytes -= size
             statistics.push_free(size, t.interval)
 
     def _try_to_free_memory(self):
-        for bin_nr, bin_list in six.iteritems(self.bin_nr_to_bin):
+        for bin_nr, bin_list in self.bin_nr_to_bin.items():
             if not bin_list:
                 continue
             size = bin_list[0].size
@@ -382,7 +380,7 @@ class MemoryPool(object):
                 if self.verbose:
                     msg = '{} freeing block of size {}.'
                     msg=msg.format(self.header(), bytes2str(size,decimal=False))
-                    print msg
+                    print(msg)
                 with Timer() as t:
                     self.allocator.free(block)
                 self.allocated_bytes -= size
@@ -397,7 +395,7 @@ class MemoryPool(object):
         """
 
         stats = self.alloc_statistics.values()
-        
+
         nrequests = sum(v.nrequests for v in stats)
         nallocs   = sum(v.nallocs   for v in stats)
         nreuses   = sum(v.nreuses   for v in stats)
@@ -412,13 +410,13 @@ class MemoryPool(object):
         active_blocks   = self.active_blocks
         held_blocks     = self.held_blocks
         allocated_bytes = self.allocated_bytes
-        
+
         assert active_blocks+held_blocks == nallocs-nfrees
         assert allocated_bytes == ballocs - bfrees
         assert active_blocks == nallocs+nreuses-nreturns
         assert held_blocks   == nreturns-nreuses-nfrees
         assert nrequests     == nallocs + nreuses
-        
+
         width=0
         for n in [active_blocks,held_blocks,nrequests,nallocs,nreuses,nfrees]:
             if n==0:
@@ -429,26 +427,26 @@ class MemoryPool(object):
         ss += '\n    {:>{width}} blocks active     {} ({})'.format(active_blocks,
                 bytes2str(ballocs+breuses-breturns, decimal=False),
                 bytes2str(ballocs+breuses-breturns), width=width)
-        ss += '\n    {:>{width}} blocks held       {} ({})'.format(held_blocks, 
+        ss += '\n    {:>{width}} blocks held       {} ({})'.format(held_blocks,
                 bytes2str(breturns-breuses-bfrees, decimal=False),
                 bytes2str(breturns-breuses-bfrees), width=width)
         ss += '\n'
         ss += '\n    {:>{width}} blocks requested  {} ({})'.format(nrequests,
                 bytes2str(ballocs+breuses, decimal=False),
                 bytes2str(ballocs+breuses), width=width)
-        ss += '\n    {:>{width}} blocks reused     {} ({})'.format(nreuses, 
+        ss += '\n    {:>{width}} blocks reused     {} ({})'.format(nreuses,
                 bytes2str(breuses, decimal=False),
                 bytes2str(breuses), width=width)
-        ss += '\n    {:>{width}} blocks allocated  {} ({})'.format(nallocs, 
+        ss += '\n    {:>{width}} blocks allocated  {} ({})'.format(nallocs,
                 bytes2str(ballocs, decimal=False),
                 bytes2str(ballocs), width=width)
-        ss += '\n    {:>{width}} blocks freed      {} ({})'.format(nfrees, 
+        ss += '\n    {:>{width}} blocks freed      {} ({})'.format(nfrees,
                 bytes2str(bfrees, decimal=False),
                 bytes2str(bfrees), width=width)
         ss += '\n'
         ss += '\n  Detailed pool statistics:'
         has_stats=False
-        for stat_nr, stat in self.alloc_statistics.iteritems():
+        for stat_nr, stat in self.alloc_statistics.items():
             has_stats=True
             ss += '\n  {:>10} <{} x <= {:<10} => {}'.format(
                     bytes2str(2**(stat_nr), decimal=False),
@@ -465,22 +463,22 @@ class MemoryPool(object):
             if nblocks == 0:
                 continue
             has_block = True
-            mean_bytes = sum([b.size for b in blocks]) / nblocks
-            ss += '\n    *bin {}:  nblocks={}  mean_block_size={}'.format(bin_nr, nblocks, 
+            mean_bytes = sum([b.size for b in blocks]) / float(nblocks)
+            ss += '\n    *bin {}:  nblocks={}  mean_block_size={}'.format(bin_nr, nblocks,
                     bytes2str(mean_bytes))
         if not has_block:
             ss += '\n    *no held blocks, all blocks are in use*'
         ss += '\n=========================================='
         return ss
-    
+
     def print_allocation_report(self):
         """
         Print various statistics of this pool.
         """
-        print
-        print self.allocation_report()
-        print 
-    
+        print()
+        print(self.allocation_report())
+        print()
+
 class PoolAllocationStatistics(object):
     def __init__(self):
         #counters
@@ -489,13 +487,13 @@ class PoolAllocationStatistics(object):
         self.nfrees   = 0
         self.nreturns = 0
         self.nreuses  = 0
-        
+
         # bytes
         self.ballocs  = 0
         self.bfrees   = 0
         self.breuses  = 0
         self.breturns = 0
-            
+
         # profiling
         self.tallocs = 0
         self.tfrees  = 0
@@ -540,8 +538,8 @@ class PoolAllocationStatistics(object):
     def __str__(self):
         ss = '{:>4} requests | {:>4} reuse | {} allocs | {} frees'.format(
                 self.nrequests,
-                '{}%'.format(int(self.reuse_factor()*1000)/10.0) if self.nreuses else '   no', 
-                time2str(self.mean_alloc_time(), on_zero='   no'), 
+                '{}%'.format(int(self.reuse_factor()*1000)/10.0) if self.nreuses else '   no',
+                time2str(self.mean_alloc_time(), on_zero='   no'),
                 time2str(self.mean_free_time(), on_zero='   no'))
         return ss
 
diff --git a/hysop/core/memory/tests/test_buffer.py b/hysop/core/memory/tests/test_buffer.py
index bb50c103f2c9782f26d206827c9c773ed9becc15..9dc4afe62d8964afc9177e1893853baaa1a9c652 100644
--- a/hysop/core/memory/tests/test_buffer.py
+++ b/hysop/core/memory/tests/test_buffer.py
@@ -1,4 +1,5 @@
-from hysop.deps import np
+import numpy as np
+
 from hysop.backend.host.host_buffer import HostBuffer
 from hysop.core.mpi import default_mpi_params
 from hysop.backend import __HAS_OPENCL_BACKEND__, __HAS_CUDA_BACKEND__
diff --git a/hysop/core/memory/tests/test_mempool.py b/hysop/core/memory/tests/test_mempool.py
index 2646442f55e4875757aa84f569eeaf987435413c..4cdb3b80ce91301eb86aed8e2b39bf64ae4e6157 100644
--- a/hysop/core/memory/tests/test_mempool.py
+++ b/hysop/core/memory/tests/test_mempool.py
@@ -1,4 +1,5 @@
-from hysop.deps import np
+import numpy as np
+
 from hysop.testsenv import opencl_failed, iter_clenv, \
     __HAS_OPENCL_BACKEND__, __ENABLE_LONG_TESTS__
 from hysop.core.memory.mempool import MemoryPool
diff --git a/hysop/core/mpi/__init__.py b/hysop/core/mpi/__init__.py
index d268d085bed74b70b4149d155e0f3d53b8fbd537..2591d0abd4d448a5e27a0fd582d67db3cb5e20a5 100644
--- a/hysop/core/mpi/__init__.py
+++ b/hysop/core/mpi/__init__.py
@@ -24,7 +24,7 @@ from mpi4py import MPI as MPI
 
 processor_name = MPI.Get_processor_name()
 """MPI processor name"""
-processor_hash = int(hashlib.sha1(processor_name).hexdigest(), 16) % (1<<31)
+processor_hash = int(hashlib.sha1(processor_name.encode('utf-8')).hexdigest(), 16) % (1<<31)
 """MPI hashed processor name as integer (fits into a 32bit signed integer)"""
 
 main_comm = MPI.COMM_WORLD.Dup()
@@ -48,7 +48,7 @@ if (shm_rank!=0):
     intershm_comm.Free()
     intershm_comm = None
     intershm_rank = None
-    intershm_size = shm_comm.bcast(None, root=0) 
+    intershm_size = shm_comm.bcast(None, root=0)
     is_multishm   = False
 else:
     intershm_rank = intershm_comm.Get_rank()
@@ -72,7 +72,7 @@ if (host_rank!=0):
     interhost_comm.Free()
     interhost_comm = None
     interhost_rank = None
-    interhost_size = main_comm.bcast(None, root=0) 
+    interhost_size = main_comm.bcast(None, root=0)
 else:
     interhost_rank = interhost_comm.Get_rank()
     """Communicator rank between hosts"""
diff --git a/hysop/core/mpi/redistribute.py b/hysop/core/mpi/redistribute.py
index 1f0f7c9dfd939ecb74193e5c3413050664bcf985..a0effda6d393d2a55bd7ffc59976eeba4e1d1d03 100644
--- a/hysop/core/mpi/redistribute.py
+++ b/hysop/core/mpi/redistribute.py
@@ -110,6 +110,9 @@ class RedistributeIntra(RedistributeOperatorBase):
 
         return True
 
+    def __new__(cls, **kwds):
+        return super(RedistributeIntra, cls).__new__(cls, **kwds)
+
     def __init__(self, **kwds):
         """Data transfer between two operators/topologies defined on the
         same communicator
@@ -246,18 +249,18 @@ class RedistributeIntra(RedistributeOperatorBase):
                 self._s_request[rk].Wait()
         self._has_requests = False
 
-        if DEBUG_REDISTRIBUTE != 0:
-            print 'resolution, compute_resolution, ghosts, compute_slices'
-            print dFin.resolution, dFin.compute_resolution, dFin.ghosts, dFin.compute_slices
-            print dFout.resolution, dFout.compute_resolution, dFout.ghosts, dFout.compute_slices
-            print
-            print 'BEFORE'
+        if DEBUG_REDISTRIBUTE:
+            print('resolution, compute_resolution, ghosts, compute_slices')
+            print(dFin.resolution, dFin.compute_resolution, dFin.ghosts, dFin.compute_slices)
+            print(dFout.resolution, dFout.compute_resolution, dFout.ghosts, dFout.compute_slices)
+            print()
+            print('BEFORE')
             dFout.print_with_ghosts()
 
         dFout.exchange_ghosts()
 
-        if DEBUG_REDISTRIBUTE != 0:
-            print 'AFTER'
+        if DEBUG_REDISTRIBUTE:
+            print('AFTER')
             dFout.print_with_ghosts()
             mean_in = refcomm.allreduce(
                 dFin.sdata[dFin.compute_slices].sum().get()) / float(refcomm.size)
diff --git a/hysop/core/mpi/topo_tools.py b/hysop/core/mpi/topo_tools.py
index fe296d1e8c6d57cf5014f99a78fa2d022361f311..5f644a167ca27394ffaca07a76adccbbaaca6ab8 100644
--- a/hysop/core/mpi/topo_tools.py
+++ b/hysop/core/mpi/topo_tools.py
@@ -154,7 +154,6 @@ class TopoTools(object):
         # Get the list of processes
         assert child is not None
         assert parent is not None
-        #child_ranks = [i for i in xrange(child.Get_size())]
         child_group = child.Get_group()
         parent_group = parent.Get_group()
         inter_group = MPI.Group.Intersect(child_group, parent_group)
@@ -218,9 +217,9 @@ class TopoTools(object):
         g_source = source.Get_group()
         g_target = target.Get_group()
         size_source = g_source.Get_size()
-        r_source = [i for i in xrange(size_source)]
+        r_source = [i for i in range(size_source)]
         res = MPI.Group.Translate_ranks(g_source, r_source, g_target)
-        return {r_source[i]: res[i] for i in xrange(size_source)}
+        return {r_source[i]: res[i] for i in range(size_source)}
 
     @staticmethod
     def create_subarray(sl_dict, data_shape, order,
@@ -245,11 +244,11 @@ class TopoTools(object):
 
         def _create_subarray(slc, data_shape):
             dim = len(data_shape)
-            slc = tuple(slc[i].indices(data_shape[i]) for i in xrange(dim))
-            subvshape = tuple((slc[i][1] - slc[i][0] for i in xrange(dim)))
-            substart = tuple((slc[i][0] for i in xrange(dim)))
-            substep = tuple((slc[i][2] for i in xrange(dim)))
-            assert all(substep[i] == 1 for i in xrange(dim))
+            slc = tuple(slc[i].indices(data_shape[i]) for i in range(dim))
+            subvshape = tuple((slc[i][1] - slc[i][0] for i in range(dim)))
+            substart = tuple((slc[i][0] for i in range(dim)))
+            substep = tuple((slc[i][2] for i in range(dim)))
+            assert all(substep[i] == 1 for i in range(dim))
             subtype = mpi_type.Create_subarray(data_shape, subvshape,
                                                substart, order=order)
             subtype.Commit()
@@ -257,7 +256,7 @@ class TopoTools(object):
 
         if isinstance(sl_dict, dict):
             subtypes = {}
-            for (rk, slc) in sl_dict.iteritems():
+            for (rk, slc) in sl_dict.items():
                 subtypes[rk] = _create_subarray(slc, data_shape)
             return subtypes
         else:
@@ -272,13 +271,13 @@ class TopoTools(object):
         order = get_mpi_order(data)
 
         assert len(slices) == dim
-        slices = tuple(slices[i].indices(shape[i]) for i in xrange(dim))
-        subshape = tuple((slices[i][1] - slices[i][0] for i in xrange(dim)))
-        substart = tuple((slices[i][0] for i in xrange(dim)))
-        substep = tuple((slices[i][2] for i in xrange(dim)))
-        assert all(0 <= substart[i] < shape[i] for i in xrange(dim))
-        assert all(0 < subshape[i] <= shape[i] for i in xrange(dim))
-        assert all(substep[i] == 1 for i in xrange(dim))
+        slices = tuple(slices[i].indices(shape[i]) for i in range(dim))
+        subshape = tuple((slices[i][1] - slices[i][0] for i in range(dim)))
+        substart = tuple((slices[i][0] for i in range(dim)))
+        substep = tuple((slices[i][2] for i in range(dim)))
+        assert all(0 <= substart[i] < shape[i] for i in range(dim))
+        assert all(0 < subshape[i] <= shape[i] for i in range(dim))
+        assert all(substep[i] == 1 for i in range(dim))
 
         basetype = dtype_to_mpi_type(dtype)
         subtype = basetype.Create_subarray(shape, subshape, substart, order=order)
@@ -335,9 +334,9 @@ class TopoTools(object):
         reduced_res = np.asarray(topo.mesh.local_resolution - topo.ghosts())
         n_group = npw.zeros_like(group_size)
         dimension = topo.domain.dim
-        for i in xrange(dimension):
-            ind = [j for j in xrange(len(reduced_res)) if j != i]
-            n_group[:, i] = reduced_res[ind] / group_size[:, i]
+        for i in range(dimension):
+            ind = [j for j in range(len(reduced_res)) if j != i]
+            n_group[:, i] = reduced_res[ind] // group_size[:, i]
 
         tag_size = npw.asintegerarray(np.ceil(np.log10(n_group)))
         tag_rank = max(2, math.ceil(math.log10(3 * max(topo.shape))))
diff --git a/hysop/core/tests/test_checkpoint.sh b/hysop/core/tests/test_checkpoint.sh
index d6cfc4919b1901987aedf81742a1ac9bf1bfb687..17c86157dc19a7429c9290c2abf576031dbfc3b0 100755
--- a/hysop/core/tests/test_checkpoint.sh
+++ b/hysop/core/tests/test_checkpoint.sh
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 set -feu -o pipefail
-PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE:-python2.7}
-MPIRUN_EXECUTABLE=${MPIRUN_EXECUTABLE:-mpirun --allow-run-as-root}
+PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE:-python3.8}
+MPIRUN_EXECUTABLE=${MPIRUN_EXECUTABLE:-mpirun}
 
 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 EXAMPLE_DIR="$(realpath ${SCRIPT_DIR}/../../../hysop_examples/examples)"
@@ -42,8 +42,8 @@ if [[ ! -f "${EXAMPLE_FILE}" ]]; then
 fi
 
 echo ' Running simulations...'
-"${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -S "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run0" --checkpoint-dump-time 0.05 --checkpoint-dump-freq 0
-"${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -S "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run1" --checkpoint-dump-time 0.05 --checkpoint-dump-freq 0
+${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -S "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run0" --checkpoint-dump-time 0.05 --checkpoint-dump-freq 0
+${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -S "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run1" --checkpoint-dump-time 0.05 --checkpoint-dump-freq 0
 
 echo ' Comparing solutions...'
 echo "  >debug dumps match"
@@ -56,8 +56,8 @@ done
 
 echo
 echo ' Running simulations from checkpoints...'
-"${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -L "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run2"
-"${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -L "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run3"
+${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -L "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run2"
+${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -L "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run3"
 
 echo ' Comparing solutions...'
 compare_files "${TEST_DIR}/run2/dump/run.txt" "${TEST_DIR}/run3/dump/run.txt"
@@ -76,10 +76,9 @@ done
 #
 # Basic test with 2D diffusion (MPI)
 #
-
 EXAMPLE_FILE="${EXAMPLE_DIR}/scalar_diffusion/scalar_diffusion.py"
 TEST_DIR='/tmp/hysop_tests/checkpoints/scalar_diffusion_mpi'
-COMMON_OPTIONS="-NC -impl opencl -cp fp32 -d64 --debug-dump-target dump -nu 0.02 -niter 20 -te 0.1 --dump-tstart 0.05 --dump-freq 1 "
+COMMON_OPTIONS="-NC -impl opencl -cp fp64 -d64 --debug-dump-target dump -nu 0.02 -niter 20 -te 0.1 --dump-tstart 0.05 --dump-freq 1 "
 
 echo
 echo "TEST SCALAR DIFFUSION CHECKPOINT (MPI)"
@@ -89,22 +88,22 @@ if [[ ! -f "${EXAMPLE_FILE}" ]]; then
 fi
 
 echo ' Running simulations...'
-${MPIRUN_EXECUTABLE} -np 4 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -S "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run0" --checkpoint-dump-time 0.05 --checkpoint-dump-freq 0
-${MPIRUN_EXECUTABLE} -np 4 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -S "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run1" --checkpoint-dump-time 0.05 --checkpoint-dump-freq 0
+${MPIRUN_EXECUTABLE} -np 4 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -S "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run0" --checkpoint-dump-time 0.05 --checkpoint-dump-freq 0
+${MPIRUN_EXECUTABLE} -np 4 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -S "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run1" --checkpoint-dump-time 0.05 --checkpoint-dump-freq 0
 
 echo ' Comparing solutions...'
 echo "  >debug dumps match"
 compare_files "${TEST_DIR}/run0/dump/run.txt" "${TEST_DIR}/run1/dump/run.txt"
 for f0 in $(find "${TEST_DIR}/run0" -name '*.h5' | sort -n); do
     f1=$(echo "${f0}" | sed 's/run0/run1/')
-    compare_files "${f0}" "${f1}"
+    h5diff -d '1e-15' "${f0}" "${f1}"
     echo "  >$(basename ${f0}) match"
 done
 
 echo
 echo ' Running simulations from checkpoints...'
-${MPIRUN_EXECUTABLE} -np 4 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -L "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run2"
-${MPIRUN_EXECUTABLE} -np 4 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -L "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run3"
+${MPIRUN_EXECUTABLE} -np 4 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -L "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run2"
+${MPIRUN_EXECUTABLE} -np 4 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -L "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run3"
 
 echo ' Comparing solutions...'
 compare_files "${TEST_DIR}/run2/dump/run.txt" "${TEST_DIR}/run3/dump/run.txt"
@@ -113,9 +112,9 @@ for f0 in $(find "${TEST_DIR}/run2" -name '*.h5' | sort -n); do
     f1=$(echo "${f0}" | sed 's/run2/run3/')
     f2=$(echo "${f0}" | sed 's/run2/run0/')
     f3=$(echo "${f0}" | sed 's/run2/run1/')
-    compare_files "${f0}" "${f1}"
-    compare_files "${f0}" "${f2}"
-    compare_files "${f0}" "${f3}"
+    h5diff -d '1e-15' "${f0}" "${f1}"
+    h5diff -d '1e-15' "${f0}" "${f2}"
+    h5diff -d '1e-15' "${f0}" "${f3}"
     echo "  >$(basename ${f0}) match"
 done
 
@@ -139,9 +138,9 @@ fi
 
 # Fortran FFTW does not yield exactly the same results in parallel so we use h5diff with an absolute tolerance of 10^-12
 echo ' Running simulations...'
-${MPIRUN_EXECUTABLE} -np 1 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -S "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run0" --checkpoint-dump-time 0.15 --checkpoint-dump-freq 0
-${MPIRUN_EXECUTABLE} -np 2 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -S "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run1" --checkpoint-dump-time 0.15 --checkpoint-dump-freq 0
-${MPIRUN_EXECUTABLE} -np 3 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -S "${TEST_DIR}/checkpoint2.tar" --dump-dir "${TEST_DIR}/run2" --checkpoint-dump-time 0.15 --checkpoint-dump-freq 0
+${MPIRUN_EXECUTABLE} -np 1 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -S "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run0" --checkpoint-dump-time 0.15 --checkpoint-dump-freq 0
+${MPIRUN_EXECUTABLE} -np 2 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -S "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run1" --checkpoint-dump-time 0.15 --checkpoint-dump-freq 0
+${MPIRUN_EXECUTABLE} -np 3 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -S "${TEST_DIR}/checkpoint2.tar" --dump-dir "${TEST_DIR}/run2" --checkpoint-dump-time 0.15 --checkpoint-dump-freq 0
 echo ' Comparing solutions...'
 for f0 in $(find "${TEST_DIR}/run0" -name '*.h5' | sort -n); do
     f1=$(echo "${f0}" | sed 's/run0/run1/')
@@ -153,9 +152,9 @@ done
 
 echo ' Running simulations from checkpoints using different MPI topologies...'
 COMMON_OPTIONS="-NC -d24 --tend 0.3 --dump-tstart 0.15 --dump-freq 1 --hdf5-disable-slicing --hdf5-disable-compression --checkpoint-relax-constraints"
-${MPIRUN_EXECUTABLE} -np 3 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -L "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run3"
-${MPIRUN_EXECUTABLE} -np 2 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -L "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run4"
-${MPIRUN_EXECUTABLE} -np 1 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -L "${TEST_DIR}/checkpoint2.tar" --dump-dir "${TEST_DIR}/run5"
+${MPIRUN_EXECUTABLE} -np 3 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -L "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run3"
+${MPIRUN_EXECUTABLE} -np 2 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -L "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run4"
+${MPIRUN_EXECUTABLE} -np 1 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -impl fortran -cp fp64 -L "${TEST_DIR}/checkpoint2.tar" --dump-dir "${TEST_DIR}/run5"
 echo ' Comparing solutions...'
 for f3 in $(find "${TEST_DIR}/run3" -name '*.h5' | sort -n); do
     f0=$(echo "${f3}" | sed 's/run3/run0/')
@@ -168,8 +167,8 @@ for f3 in $(find "${TEST_DIR}/run3" -name '*.h5' | sort -n); do
 done
 
 echo ' Running simulations from checkpoints using OpenCL and different datatypes...'
-${MPIRUN_EXECUTABLE} -np 1 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -cp fp64 -impl opencl -L "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run6"
-${MPIRUN_EXECUTABLE} -np 1 "${PYTHON_EXECUTABLE}" "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -cp fp32 -impl opencl -L "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run7"
+${MPIRUN_EXECUTABLE} -np 1 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -cp fp64 -impl opencl -L "${TEST_DIR}/checkpoint0.tar" --dump-dir "${TEST_DIR}/run6"
+${MPIRUN_EXECUTABLE} -np 1 ${PYTHON_EXECUTABLE} "${EXAMPLE_FILE}" ${COMMON_OPTIONS} -cp fp32 -impl opencl -L "${TEST_DIR}/checkpoint1.tar" --dump-dir "${TEST_DIR}/run7"
 echo ' Comparing solutions...'
 for f6 in $(find "${TEST_DIR}/run6" -name '*.h5' | sort -n); do
     f7=$(echo "${f6}" | sed 's/run0/run7/')
diff --git a/hysop/deps.py b/hysop/deps.py
deleted file mode 100644
index 52a7937c227a56281ab6de18e5a45853fee08874..0000000000000000000000000000000000000000
--- a/hysop/deps.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from __future__ import absolute_import 
-from __future__ import print_function
-
-try:
-    import __builtin__
-except:
-    import builtins as __builtin__
-
-try:
-   import cPickle as pickle
-except:
-   import pickle
-
-try:
-    import h5py
-except ImportError as e:
-    h5py = None
-    msg =  'Warning: h5py not found, you may not be able to'
-    msg += ' use hdf5 I/O functionnalities.'
-    print(msg)
-    raise
-
-import sys, os, subprocess, platform, warnings, traceback
-import resource, psutil, tempfile, cpuinfo, time
-import inspect, functools, operator, random
-import hashlib, gzip, copy, types, string
-import math, re, contextlib
-import six, itertools
-
-import itertools as it
-import numpy as np
-
-import sympy as sm
-import sympy.abc, sympy.polys, sympy.solvers, sympy.functions
-
-import scipy as sp
-import scipy.linalg, scipy.interpolate
-
-import gmpy2 as gmp
diff --git a/hysop/domain/box.py b/hysop/domain/box.py
index c687872010f84f2a358662781ad84e47baacb8dd..56ca2f9518463996b4d60aa1b2ec65e68316eb22 100644
--- a/hysop/domain/box.py
+++ b/hysop/domain/box.py
@@ -1,7 +1,8 @@
 """Box-shaped domains definition.
 """
 import warnings
-from hysop.deps import np
+import numpy as np
+
 from hysop.constants import BoxBoundaryCondition, HYSOP_REAL
 from hysop.domain.domain import Domain, DomainView
 from hysop.tools.decorators import debug
@@ -10,24 +11,27 @@ from hysop.tools.types import check_instance, first_not_None, to_tuple
 from hysop.tools.warning import HysopWarning
 
 class BoxView(DomainView):
-    
+
     __slots__ = ('_domain', '_topology_state')
-    
+
     @debug
     def __new__(cls, topology_state, domain=None, **kwds):
         """Create and initialize a BoxView."""
         from hysop.topology.cartesian_topology import CartesianTopologyState
         check_instance(topology_state, CartesianTopologyState)
         check_instance(domain, Box, allow_none=True)
-        obj = super(BoxView, cls).__new__(cls, topology_state=topology_state, 
+        obj = super(BoxView, cls).__new__(cls, topology_state=topology_state,
                 domain=domain, **kwds)
         check_instance(obj._domain, Box)
         return obj
 
+    def __init__(self, topology_state, domain=None, **kwds):
+        super(BoxView, self).__init__(topology_state=topology_state, domain=domain, **kwds)
+
     def __get_domain_attr(self, name):
         """Get a transposed domain attribute."""
         return self._topology_state.transposed(getattr(self._domain, name))
-    
+
     def _get_length(self):
         """Box sides lengthes."""
         return self.__get_domain_attr('_length')
@@ -46,9 +50,9 @@ class BoxView(DomainView):
         return self.__get_domain_attr('_rboundaries')
     def _get_boundaries(self):
         """Left and right boundary conditions as a tuple."""
-        return (self.__get_domain_attr('_lboundaries'), 
+        return (self.__get_domain_attr('_lboundaries'),
                 self.__get_domain_attr('_rboundaries'),)
-       
+
     def _get_periodicity(self):
         """Numpy array mask, True is axis is periodic, else False."""
         periodic = BoxBoundaryCondition.PERIODIC
@@ -66,14 +70,14 @@ class BoxView(DomainView):
         """
         s= '{} | {}D rectangular box domain:'.format(self.full_tag, self.dim)
         s+= '\n  *origin:  {}'.format(self.origin)
-        s+= '\n  *max_pos: {}'.format(self.end) 
+        s+= '\n  *max_pos: {}'.format(self.end)
         s+= '\n  *length:  {}'.format(self.length)
         s+= '\n  *left  boundary conditions: {}'.format(self.lboundaries.tolist())
         s+= '\n  *right boundary conditions: {}'.format(self.rboundaries.tolist())
         s+= '\n'
         return s
 
-    def short_description(self):    
+    def short_description(self):
         """
         Return a short description of this Box as a string.
         """
@@ -83,11 +87,11 @@ class BoxView(DomainView):
                 ','.join(('{:1.1f}'.format(val) for val in self.length)),
                 self.format_boundaries(),
                 self.current_task())
-    
+
     def format_boundaries(self):
         return ','.join(('{}/{}'.format(str(lb),str(rb)) for (lb,rb) in \
                         zip(*self.boundaries)))
-    
+
     length = property(_get_length)
     origin = property(_get_origin)
     end = property(_get_end)
@@ -98,12 +102,19 @@ class BoxView(DomainView):
 
 
 class Box(BoxView, Domain):
-    """ 
+    """
     Box-shaped domain description.
     """
-   
+
     @debug
-    def __new__(cls, length=None, origin=None, dim=None, 
+    def __init__(self, length=None, origin=None, dim=None,
+            lboundaries=None, rboundaries=None, **kwds):
+        super(Box, self).__init__(length=length, origin=origin, dim=dim,
+                lboundaries=lboundaries, rboundaries=rboundaries, domain=None,
+                topology_state=None, **kwds)
+
+    @debug
+    def __new__(cls, length=None, origin=None, dim=None,
             lboundaries=None, rboundaries=None, **kwds):
         """
         Create or get an existing Box from a dimension, length and origin with specified
@@ -129,9 +140,9 @@ class Box(BoxView, Domain):
         length : np.ndarray of HYSOP_REAL
             Box sides lengthes.
         origin: np.ndarray of HYSOP_REAL
-            Position of the lowest point of the box. 
+            Position of the lowest point of the box.
         end: np.ndarray of HYSOP_REAL
-            Position of the greatest point of the box. 
+            Position of the greatest point of the box.
         lboundaries: np.ndarray of BoxBoundaryCondition
             Left boundary conditions.
         rboundaries: np.ndarray of BoxBoundaryCondition
@@ -142,19 +153,20 @@ class Box(BoxView, Domain):
             Numpy array mask, True is axis is periodic, else False.
         """
         from hysop.topology.cartesian_topology import CartesianTopologyState
-        
-        check_instance(dim, (int,long), minval=1, allow_none=True)
-        check_instance(length, (np.ndarray,list,tuple), values=(int,long,float), allow_none=True)
-        check_instance(origin, (np.ndarray,list,tuple), values=(int,long,float), allow_none=True)
-        check_instance(lboundaries, (np.ndarray,list,tuple), values=BoxBoundaryCondition, 
+
+        check_instance(dim, int, minval=1, allow_none=True)
+        check_instance(length, (np.ndarray,list,tuple), values=(np.integer,int,float), allow_none=True)
+        check_instance(origin, (np.ndarray,list,tuple), values=(np.integer,int,float), allow_none=True)
+        check_instance(lboundaries, (np.ndarray,list,tuple), values=BoxBoundaryCondition,
                 allow_none=True)
-        check_instance(rboundaries, (np.ndarray,list,tuple), values=BoxBoundaryCondition, 
+        check_instance(rboundaries, (np.ndarray,list,tuple), values=BoxBoundaryCondition,
                 allow_none=True)
 
         if (length is None) and (origin is None) and (dim is None):
             msg='At least one of the following parameters should be given: length, origin, dim.'
             raise ValueError(msg)
-        
+
+        dim = first_not_None(dim, 0)
         length = to_tuple(first_not_None(length, 1.0))
         origin = to_tuple(first_not_None(origin, 0.0))
         dim = max(dim, len(length), len(origin))
@@ -169,13 +181,13 @@ class Box(BoxView, Domain):
         check_instance(origin, np.ndarray, size=dim)
         assert (length>=0.0).all(), 'length < 0'
 
-        lboundaries = npw.asarray( first_not_None(lboundaries, 
+        lboundaries = npw.asarray( first_not_None(lboundaries,
                                                  (BoxBoundaryCondition.PERIODIC,)*dim ) )
-        rboundaries = npw.asarray( first_not_None(rboundaries, 
+        rboundaries = npw.asarray( first_not_None(rboundaries,
                                                  (BoxBoundaryCondition.PERIODIC,)*dim ) )
 
         assert lboundaries.size == rboundaries.size == dim
-        
+
         for i,(lb,rb) in enumerate(zip(lboundaries,rboundaries)):
             if (lb==BoxBoundaryCondition.PERIODIC) ^ (rb==BoxBoundaryCondition.PERIODIC):
                 msg='FATAL ERROR: Periodic BoxBoundaryCondition mismatch on axis {}.'.format(i)
@@ -194,32 +206,32 @@ class Box(BoxView, Domain):
             msg+='\n  *lboundaries: {}'.format(lboundaries)
             msg+='\n  *rboundaries: {}'.format(rboundaries)
             warnings.warn(msg, HysopWarning)
-        
+
         # double check types, to be sure RegisteredObject will work as expected
         check_instance(dim, int)
         check_instance(length, np.ndarray, dtype=HYSOP_REAL)
         check_instance(origin, np.ndarray, dtype=HYSOP_REAL)
         check_instance(lboundaries, np.ndarray, dtype=object)
         check_instance(rboundaries, np.ndarray, dtype=object)
-        
+
         npw.set_readonly(length, origin, lboundaries, rboundaries)
-           
+
         topology_state = CartesianTopologyState(dim)
-        
-        obj = super(Box,cls).__new__(cls, 
+
+        obj = super(Box,cls).__new__(cls,
             length=length, origin=origin, dim=dim,
-            lboundaries=lboundaries, rboundaries=rboundaries, 
+            lboundaries=lboundaries, rboundaries=rboundaries,
             domain=None, topology_state=topology_state,
             **kwds)
 
-        if not obj.obj_initialized: 
+        if not obj.obj_initialized:
             obj._length = length
             obj._origin = origin
             obj._lboundaries = lboundaries
             obj._rboundaries = rboundaries
 
         return obj
-    
+
     def view(self, topology_state):
         """Return a view of this domain altered by some topology_state."""
-        return BoxView(domain=self, topology_state=topology_state) 
+        return BoxView(domain=self, topology_state=topology_state)
diff --git a/hysop/domain/control_box.py b/hysop/domain/control_box.py
index 8707cfa3e79676dfe0aafa19c9e915a1488d4478..2441899af5326615f442b7ee62e4e6dc9802d101 100644
--- a/hysop/domain/control_box.py
+++ b/hysop/domain/control_box.py
@@ -31,7 +31,7 @@ class ControlBox(SubBox):
         # Create a mesh for each side
         dim = topo.domain.dim
         ilist = np.arange(dim)
-        for direction in xrange(dim):
+        for direction in range(dim):
             ndir = np.where(ilist == direction)[0]
             length = self.real_length[topo].copy()
             length[ndir] = 0.0
@@ -120,7 +120,7 @@ class ControlBox(SubBox):
         for ndir in list_dir:
             surf = self.surf[ndir]
             assert self._check_boundaries(surf, topo)
-            for i in xrange(nbc):
+            for i in range(nbc):
                 res[i] += surf.integrate_field_on_proc(field, topo, i)
         if root is None:
             topo.comm.Allreduce(res.handle(), gres.handle())
diff --git a/hysop/domain/domain.py b/hysop/domain/domain.py
index 09a423f656f88acf051cced1f06b29ae76c5bfdd..7c679655610d5903426930d9a7b26cf1682613aa 100644
--- a/hysop/domain/domain.py
+++ b/hysop/domain/domain.py
@@ -3,7 +3,10 @@ Abstract interfaces for physical domains description.
 * :class:`~hysop.domain.domain.Domain`
 * :class:`~hysop.domain.domain.DomainView`
 """
+import hashlib
+import numpy as np
 from abc import ABCMeta, abstractmethod
+
 from hysop.constants import HYSOP_DEFAULT_TASK_ID, HYSOP_DIM
 from hysop.core.mpi import main_comm, MPI
 from hysop.tools.parameters import MPIParams
@@ -12,15 +15,17 @@ from hysop.tools.handle import RegisteredObject, TaggedObjectView
 from hysop.tools.types import check_instance
 from hysop.tools.numpywrappers import npw
 from hysop.symbolic.frame import SymbolicFrame
-from hysop.deps import hashlib, np
 
 
-class DomainView(TaggedObjectView):
+class DomainView(TaggedObjectView, metaclass=ABCMeta):
     """Abstract base class for views on domains. """
-    __metaclass__ = ABCMeta
 
     __slots__ = ('_domain', '_topology_state')
 
+    @debug
+    def __init__(self, topology_state, domain=None, **kwds):
+        super(DomainView, self).__init__(obj_view=domain, **kwds)
+
     @debug
     def __new__(cls, topology_state, domain=None, **kwds):
         """Create and initialize a DomainView."""
@@ -145,7 +150,7 @@ class DomainView(TaggedObjectView):
         """Test if the current process corresponds to param task."""
         if isinstance(params, MPIParams):
             task_id = params.task_id
-        elif isinstance(params, (int, long, npw.integer)):
+        elif isinstance(params, (int, npw.integer)):
             task_id = params
         else:
             msg = 'Could not extract task_id from type {}.'
@@ -155,9 +160,9 @@ class DomainView(TaggedObjectView):
 
     def print_topologies(self):
         """Print all topologies registered on the domain."""
-        print self.short_description() + ' defined the following topologies:'
+        print(self.short_description() + ' defined the following topologies:')
         for topo in self._domain._registered_topologies.values():
-            print '  *'+topo.short_description()
+            print('  *'+topo.short_description())
 
     @abstractmethod
     def short_description(self):
@@ -206,9 +211,13 @@ class DomainView(TaggedObjectView):
     frame = property(_get_frame)
 
 
-class Domain(RegisteredObject):
+class Domain(RegisteredObject, metaclass=ABCMeta):
     """Abstract base class for the description of physical domains. """
-    __metaclass__ = ABCMeta
+
+    @debug
+    def __init__(self, dim, parent_comm=None, proc_tasks=None, **kwds):
+        super(Domain, self).__init__(dim=dim, parent_comm=parent_comm,
+                proc_tasks=proc_tasks, tag_prefix='d', **kwds)
 
     @debug
     def __new__(cls, dim, parent_comm=None, proc_tasks=None, **kwds):
@@ -374,7 +383,7 @@ class Domain(RegisteredObject):
         # Build a per-machine communicator in order to get a rank on local machines
         # Split accoring to machine name hashed and converted to integer (strings generally differs only from a single character)
         machine_comm = parent_comm.Split(
-            color=np.int32(int(hashlib.md5(MPI.Get_processor_name()).hexdigest(), 16) %
+            color=np.int32(int(hashlib.md5(MPI.Get_processor_name().encode('utf-8')).hexdigest(), 16) %
                            np.iinfo(np.int32).max),
             key=parent_rank)
         machine_rank = machine_comm.Get_rank()
diff --git a/hysop/domain/porous.py b/hysop/domain/porous.py
index ff0d5a202516e03a6660c3aed13b7be88c31fbb0..92878655c8bc475f15865cf7b6e471e7fd850322 100644
--- a/hysop/domain/porous.py
+++ b/hysop/domain/porous.py
@@ -92,7 +92,7 @@ class Porous(Subset):
                     [out_set], poles, topo)
         else:
             out_radius = max_radius
-            for i in xrange(nb_layers - 1):
+            for i in range(nb_layers - 1):
                 in_radius = out_radius - self.layers[i]
                 in_set = self._source(parent=self._parent, origin=self.origin,
                                       radius=in_radius)
@@ -249,7 +249,7 @@ class QuadriPole(Porous):
         # dimension of top/bottom boxes
         poles = [None] * 4
         p = 0
-        for ndir in xrange(1, 3):
+        for ndir in range(1, 3):
             current = 2 * p
             downpos = self.origin - max_radius
             lbox = [2 * max_radius, ] * dim
@@ -327,7 +327,7 @@ class RingPole(Porous):
                                                              topo)
         else:
             out_radius = max_radius
-            for i in xrange(nb_layers - 1):
+            for i in range(nb_layers - 1):
                 in_radius = out_radius - self.layers[i]
                 in_set = self._source(parent=self._parent, origin=self.origin,
                                       radius=in_radius)
@@ -409,7 +409,7 @@ class Ring(Porous):
                 np.where(np.logical_and(iring, iout)))
         else:
             out_radius = max_radius
-            for i in xrange(nb_layers - 1):
+            for i in range(nb_layers - 1):
                 in_radius = out_radius - self.layers[i]
                 in_set = self._source(parent=self._parent, origin=self.origin,
                                       radius=in_radius)
diff --git a/hysop/fields/cartesian_discrete_field.py b/hysop/fields/cartesian_discrete_field.py
index 0dacc4105b6513778b188fdff99552952829c915..949d6e7670f5a7eae7ed0bbcda3973eb65e12b05 100644
--- a/hysop/fields/cartesian_discrete_field.py
+++ b/hysop/fields/cartesian_discrete_field.py
@@ -8,8 +8,10 @@ Discrete fields (scalars or vectors) descriptions.
 Documentation and examples can be found in :ref:`fields`.
 """
 
+import hashlib
+import numpy as np
+
 from hysop import vprint, dprint, MPI
-from hysop.deps import np, hashlib
 from hysop.core.arrays.all import HostArray, OpenClArray
 from hysop.constants import Backend, DirectionLabels, GhostOperation, GhostMask, ExchangeMethod, \
     MemoryOrdering
@@ -76,7 +78,7 @@ class CartesianDiscreteScalarFieldViewContainerI(object):
         def filter_components(x, components=components):
             assert isinstance(x, tuple)
             assert len(x) == self.nb_components
-            return tuple(x[i] if (i in components) else None for i in xrange(self.nb_components))
+            return tuple(x[i] if (i in components) else None for i in range(self.nb_components))
 
         reorder = to_tuple(first_not_None(reorder, ()))
         check_instance(reorder, tuple, values=str)
@@ -125,7 +127,7 @@ class CartesianDiscreteScalarFieldViewContainerI(object):
         dfields = ()
         buffers = ()
         coords = ()
-        for i in xrange(self.nb_components):
+        for i in range(self.nb_components):
             if (i not in components):
                 dfields += (None,)
                 buffers += (None,)
@@ -230,7 +232,7 @@ class CartesianDiscreteScalarFieldViewContainerI(object):
                 if np.isnan(d).any():
                     msg = 'Initialization of {} on component {} failed, got NaNs.'
                     msg = msg.format(self.pretty_name, i)
-                    print d
+                    print(d)
                     raise RuntimeError(msg)
                 if not np.isfinite(d).all():
                     msg = ('Initialization of {} on component {} failed, '
@@ -369,7 +371,7 @@ class CartesianDiscreteScalarFieldViewContainerI(object):
             for ndir in self.all_outer_ghost_slices:
                 for directions in self.all_outer_ghost_slices[ndir]:
                     for disp, (slc, _) in \
-                            self.all_outer_ghost_slices[ndir][directions].iteritems():
+                            self.all_outer_ghost_slices[ndir][directions].items():
                         if (sum(d != 0 for d in disp) == ndir) and ndir:
                             if callable(outer_ghosts):
                                 outer_ghosts = np.vectorize(outer_ghosts)
@@ -390,7 +392,7 @@ class CartesianDiscreteScalarFieldViewContainerI(object):
 
         from hysop.tools.contexts import printoptions
         with printoptions(**_print_opts):
-            print strarr
+            print(strarr)
 
     @property
     def compute_data(self):
@@ -630,6 +632,12 @@ class CartesianDiscreteScalarFieldView(CartesianDiscreteScalarFieldViewContainer
         obj._data_view = None
         return obj
 
+    @debug
+    def __init__(self, dfield, topology_state, **kwds):
+        super(CartesianDiscreteScalarFieldView, self).__init__(dfield=dfield,
+            topology_state=topology_state, **kwds)
+
+
     def _compute_data_view(self, data=None):
         """
         Compute transposed views of underlying discrete field data
@@ -1064,7 +1072,7 @@ class CartesianDiscreteScalarFieldView(CartesianDiscreteScalarFieldViewContainer
         from hysop.tools.sympy_utils import subscript
         default_name = '{}__{}'.format(self.name, self._dfield._clone_id)
         default_pname = '{}__{}'.format(self.pretty_name,
-                                        subscript(self._dfield._clone_id).encode('utf-8'))
+                                        subscript(self._dfield._clone_id))
         default_vname = '{}__{}'.format(self.var_name, self._dfield._clone_id)
         default_lname = '{}__{}'.format(self.latex_name, self._dfield._clone_id)
         self._dfield._clone_id += 1
@@ -1099,7 +1107,7 @@ class CartesianDiscreteScalarFieldView(CartesianDiscreteScalarFieldViewContainer
                         grid_resolution=None, ghosts=None, tstate=None,
                         lboundaries=None, rboundaries=None,
                         register_discrete_field=False, **kwds):
-        """
+        r"""
         Create a new Field and a new temporary CartesianDiscreteScalarField.
         like the current object, possibly on a different backend.
         /!\ The returned discrete field is not allocated.
@@ -1110,13 +1118,13 @@ class CartesianDiscreteScalarFieldView(CartesianDiscreteScalarFieldViewContainer
             tstate._is_read_only = is_read_only
 
         bfield = self._dfield._field
-        btopo = self._dfield._topology
+        btopo  = self._dfield._topology
 
         field = bfield.field_like(name=name, pretty_name=pretty_name,
                                   latex_name=latex_name, var_name=var_name,
                                   initial_values=initial_values, dtype=dtype,
                                   lboundaries=lboundaries, rboundaries=rboundaries,
-                                  register_object=register_discrete_field)
+                                  nb_components=kwds.pop('nb_components', None))
 
         topology = btopo.topology_like(backend=backend,
                                        grid_resolution=grid_resolution, ghosts=ghosts,
@@ -1163,7 +1171,7 @@ class CartesianDiscreteScalarFieldView(CartesianDiscreteScalarFieldViewContainer
 
     def randomize(self, **kwds):
         """Initialize a the with random values."""
-        for d in xrange(self.nb_components):
+        for d in range(self.nb_components):
             self.backend.rand(out=self.data[d], **kwds)
         return self
 
@@ -1268,7 +1276,7 @@ class CartesianDiscreteScalarFieldView(CartesianDiscreteScalarFieldViewContainer
         """
         assert ('data' not in kwds)
         msg = 'Passing ghosts as an integer is not supported anymore, use a tuple of size dim instead.'
-        if isinstance(ghosts, (int, long)):
+        if isinstance(ghosts, int):
             raise RuntimeError(msg)
 
         directions = to_tuple(first_not_None(directions, range(self.dim)), cast=int)
@@ -1320,7 +1328,7 @@ class CartesianDiscreteScalarFieldView(CartesianDiscreteScalarFieldViewContainer
         Build a ghost exchanger for cartesian discrete fields, possibly on different data.
         """
         msg = 'Passing ghosts as an integer is not supported anymore, use a tuple of size dim instead.'
-        if isinstance(ghosts, (int, long)):
+        if isinstance(ghosts, int):
             raise RuntimeError(msg)
 
         ghost_op = first_not_None(ghost_op, GhostOperation.EXCHANGE)
@@ -1527,6 +1535,12 @@ class CartesianDiscreteScalarField(CartesianDiscreteScalarFieldView, DiscreteSca
             obj._mem_tag = field.mem_tag
         return obj
 
+    @debug
+    def __init__(self, field, topology, init_topology_state=None,
+                allocate_data=True, **kwds):
+        super(CartesianDiscreteScalarField, self).__init__(field=field, topology=topology,
+                topology_state=None, dfield=None, **kwds)
+
     def _handle_data(self, data):
         assert (self._data is None)
         from hysop.core.arrays.array import Array
@@ -1576,6 +1590,7 @@ class TmpCartesianDiscreteScalarField(CartesianDiscreteScalarField):
                                                                   register_discrete_field=True, **kwds)
         return obj
 
+
     @debug
     def __init__(self, **kwds):
         super(TmpCartesianDiscreteScalarField, self).__init__(allocate_data=False,
diff --git a/hysop/fields/continuous_field.py b/hysop/fields/continuous_field.py
index 8eb8e09c08b03a079bfcd7820f76ec50e03a95c1..399f3354e1888b7b41d8617b6290e8f399ad175b 100644
--- a/hysop/fields/continuous_field.py
+++ b/hysop/fields/continuous_field.py
@@ -1,3 +1,5 @@
+# coding: utf-8
+
 """
 Continuous fields description and containers.
 * :class:`~hysop.fields.continuous.FieldContainerI`
@@ -11,28 +13,27 @@ import sympy as sm
 import numpy as np
 from abc import ABCMeta, abstractmethod
 
-from hysop.constants         import HYSOP_REAL, HYSOP_BOOL, BoundaryCondition, BoundaryConditionConfig, DirectionLabels
-from hysop.tools.decorators  import debug
-from hysop.tools.types       import check_instance, first_not_None, to_tuple
-from hysop.tools.warning    import HysopWarning
+from hysop.constants import HYSOP_REAL, HYSOP_BOOL, BoundaryCondition, BoundaryConditionConfig, DirectionLabels
+from hysop.tools.decorators import debug
+from hysop.tools.types import check_instance, first_not_None, to_tuple
+from hysop.tools.warning import HysopWarning
 from hysop.tools.handle import TaggedObject
 from hysop.tools.numpywrappers import npw
 from hysop.domain.domain import Domain
 from hysop.domain.box import BoxBoundaryCondition
 from hysop.topology.topology import Topology, TopologyState
 from hysop.tools.sympy_utils import nabla, partial, subscript, subscripts, \
-                                    exponent, exponents, xsymbol
+    exponent, exponents, xsymbol
 from hysop.symbolic import SpaceSymbol
 from hysop.tools.interface import NamedObjectI, SymbolContainerI, \
-        NamedScalarContainerI, NamedTensorContainerI
+    NamedScalarContainerI, NamedTensorContainerI
 
 
 class FieldContainerI(TaggedObject):
     """Common abstract interface for scalar and tensor-like fields."""
 
     @debug
-    def __new__(cls, domain,
-            name=None, nb_components=None, shape=None, is_vector=None, **kwds):
+    def __new__(cls, domain, name=None, nb_components=None, shape=None, is_vector=None, **kwds):
         """
         Create a FieldContainer on a specific domain.
         domain : domain.Domain
@@ -49,12 +50,12 @@ class FieldContainerI(TaggedObject):
             check_instance(name, str, allow_none=False)
             if (shape is not None):
                 return TensorField(domain=domain, name=name, shape=shape, **kwds)
-            elif ((is_vector is True) or \
+            elif ((is_vector is True) or
                   ((nb_components is not None) and (nb_components > 1))):
                 nb_components = first_not_None(nb_components, domain.dim)
                 assert (is_vector is not True) or (nb_components == domain.dim)
                 return VectorField(domain=domain, name=name,
-                                        nb_components=nb_components, **kwds)
+                                   nb_components=nb_components, **kwds)
             else:
                 return ScalarField(domain=domain, name=name, **kwds)
 
@@ -64,6 +65,10 @@ class FieldContainerI(TaggedObject):
         obj._dim = int(domain.dim)
         return obj
 
+    @debug
+    def __init__(self, domain, name=None, nb_components=None, shape=None, is_vector=None, **kwds):
+        super(FieldContainerI, self).__init__(**kwds)
+
     @property
     def is_scalar(self):
         return (not self.is_tensor)
@@ -107,8 +112,8 @@ class FieldContainerI(TaggedObject):
 
     @classmethod
     def from_sympy_expressions(cls, name, exprs, space_symbols,
-                                    scalar_name_prefix=None, scalar_pretty_name_prefix=None,
-                                    pretty_name=None,  **kwds):
+                               scalar_name_prefix=None, scalar_pretty_name_prefix=None,
+                               pretty_name=None,  **kwds):
         """
         Create a field wich has the same shape as exprs, with optional names.
         Expressions should be of kind sympy.Expr and are converted to FieldExpression: this
@@ -121,13 +126,9 @@ class FieldContainerI(TaggedObject):
             raise NotImplementedError('Call self.from_sympy_expression instead.')
         check_instance(exprs, npw.ndarray, values=sm.Expr)
         check_instance(name, str)
-        check_instance(pretty_name, (str, unicode), allow_none=True)
+        check_instance(pretty_name, str, allow_none=True)
         check_instance(scalar_name_prefix, str, allow_none=True)
-        check_instance(scalar_pretty_name_prefix, (str, unicode), allow_none=True)
-        if isinstance(pretty_name, unicode):
-            pretty_name = pretty_name.encode('utf-8')
-        if isinstance(scalar_pretty_name_prefix, unicode):
-            scalar_pretty_name_prefix = scalar_pretty_name_prefix.encode('utf-8')
+        check_instance(scalar_pretty_name_prefix, str, allow_none=True)
 
         fields = npw.empty(shape=exprs.shape, dtype=object)
         fields[...] = None
@@ -136,18 +137,18 @@ class FieldContainerI(TaggedObject):
                 sname = TensorField.default_name_formatter(scalar_name_prefix, idx)
                 if (scalar_pretty_name_prefix is not None):
                     spname = TensorField.default_pretty_name_formatter(
-                            scalar_pretty_name_prefix, idx)
+                        scalar_pretty_name_prefix, idx)
                 else:
                     spname = TensorField.default_pretty_name_formatter(
-                            scalar_name_prefix, idx)
+                        scalar_name_prefix, idx)
             else:
                 # names will be autogenerated from sympy expression
-                sname  = None
+                sname = None
                 spname = None
 
             fields[idx] = cls.from_sympy_expression(expr=exprs[idx],
-                            space_symbols=space_symbols,
-                            name=sname, pretty_name=spname, **kwds)
+                                                    space_symbols=space_symbols,
+                                                    name=sname, pretty_name=spname, **kwds)
         return TensorField.from_field_array(name=name, pretty_name=pretty_name,
                                             fields=fields)
 
@@ -169,8 +170,8 @@ class FieldContainerI(TaggedObject):
 
         # determine domain and boundary conditions
         fe = FieldExpressionBuilder.to_field_expression(
-                expr=expr, space_symbols=space_symbols, strict=True)
-        kwds['domain']      = fe.domain
+            expr=expr, space_symbols=space_symbols, strict=True)
+        kwds['domain'] = fe.domain
         kwds['lboundaries'] = fe.lboundaries
         kwds['rboundaries'] = fe.rboundaries
 
@@ -180,18 +181,17 @@ class FieldContainerI(TaggedObject):
         # finally return create and return the ScalarField
         return ScalarField(**kwds)
 
-
     def gradient(self, name=None, pretty_name=None,
-                       scalar_name_prefix=None, scalar_pretty_name_prefix=None,
-                       directions=None, axis=-1,
-                       space_symbols=None,
-                       dtype=None, **kwds):
+                 scalar_name_prefix=None, scalar_pretty_name_prefix=None,
+                 directions=None, axis=-1,
+                 space_symbols=None,
+                 dtype=None, **kwds):
         """
         Create a field capable of storing the gradient of self,
         possibly altered.
         """
-        dim    = self.dim  # dimension of the domain
-        ndim   = self.ndim # number of dimension of the np.ndarray
+        dim = self.dim  # dimension of the domain
+        ndim = self.ndim  # number of dimension of the np.ndarray
         frame = self.domain.frame
 
         directions = to_tuple(first_not_None(directions, range(dim)))
@@ -201,17 +201,17 @@ class FieldContainerI(TaggedObject):
         check_instance(space_symbols, tuple, values=SpaceSymbol, size=dim, unique=True)
 
         ndirs = len(directions)
-        if ndim>0:
-            axis  = (axis+ndim)%ndim
+        if ndim > 0:
+            axis = (axis+ndim) % ndim
             shape = self.shape[:axis+1] + (ndirs,) + self.shape[axis+1:]
         else:
             shape = (ndirs,)
 
         name = first_not_None(name, 'grad_{}'.format(self.name))
-        pretty_name = first_not_None(pretty_name, '{}{}'.format(nabla.encode('utf8'),
-                                     self.pretty_name))
+        pretty_name = first_not_None(pretty_name, '{}{}'.format(nabla,
+                                                                self.pretty_name))
 
-        if shape==(1,):
+        if shape == (1,):
             expr = self.symbol(frame.time, *space_symbols).diff(space_symbols[directions[0]])
             return self.from_sympy_expression(expr=expr, space_symbols=space_symbols,
                                               name=name, pretty_name=pretty_name,
@@ -223,16 +223,16 @@ class FieldContainerI(TaggedObject):
                 d = directions[idx[axis+1]]
                 if self.is_tensor:
                     exprs[idx] = self[i].symbol(frame.time,
-                            *space_symbols).diff(space_symbols[d])
+                                                *space_symbols).diff(space_symbols[d])
                 else:
-                    assert i==(), i
+                    assert i == (), i
                     exprs[idx] = self.symbol(frame.time, *space_symbols).diff(space_symbols[d])
             return self.from_sympy_expressions(
-                    exprs=exprs, space_symbols=space_symbols,
-                    name=name, pretty_name=pretty_name,
-                    scalar_name_prefix=scalar_name_prefix,
-                    scalar_pretty_name_prefix=scalar_pretty_name_prefix,
-                    dtype=dtype, **kwds)
+                exprs=exprs, space_symbols=space_symbols,
+                name=name, pretty_name=pretty_name,
+                scalar_name_prefix=scalar_name_prefix,
+                scalar_pretty_name_prefix=scalar_pretty_name_prefix,
+                dtype=dtype, **kwds)
 
     def laplacian(self, name=None, pretty_name=None,
                   scalar_name_prefix=None, scalar_pretty_name_prefix=None,
@@ -242,8 +242,8 @@ class FieldContainerI(TaggedObject):
         exprs = laplacian(self.symbol(*frame.vars), frame)
 
         name = first_not_None(name, 'laplacian_{}'.format(self.name))
-        pretty_name = first_not_None(pretty_name, u'\u0394{}'.format(
-                                     self.pretty_name.decode('utf-8')))
+        pretty_name = first_not_None(pretty_name, 'Δ{}'.format(
+                                     self.pretty_name))
 
         if isinstance(exprs, npw.ndarray):
             if (exprs.size == 1):
@@ -253,21 +253,20 @@ class FieldContainerI(TaggedObject):
                                                   dtype=dtype, **kwds)
             else:
                 return self.from_sympy_expressions(
-                        exprs=exprs, space_symbols=frame.coords,
-                        name=name, pretty_name=pretty_name,
-                        scalar_name_prefix=scalar_name_prefix,
-                        scalar_pretty_name_prefix=scalar_pretty_name_prefix,
-                        dtype=dtype, **kwds)
+                    exprs=exprs, space_symbols=frame.coords,
+                    name=name, pretty_name=pretty_name,
+                    scalar_name_prefix=scalar_name_prefix,
+                    scalar_pretty_name_prefix=scalar_pretty_name_prefix,
+                    dtype=dtype, **kwds)
         else:
             expr = exprs
             return self.from_sympy_expression(expr=expr, space_symbols=frame.coords,
                                               name=name, pretty_name=pretty_name,
                                               dtype=dtype, **kwds)
 
-
     def div(self, name=None, pretty_name=None,
-                  scalar_name_prefix=None, scalar_pretty_name_prefix=None,
-                  axis=-1, dtype=None, **kwds):
+            scalar_name_prefix=None, scalar_pretty_name_prefix=None,
+            axis=-1, dtype=None, **kwds):
         """
         Create a field capable of storing the divergence of self,
         on chosen axis.
@@ -277,11 +276,11 @@ class FieldContainerI(TaggedObject):
         exprs = npw.asarray(div(self.symbol(*frame.vars), frame))
 
         name = first_not_None(name, 'div_{}'.format(self.name))
-        pretty_name = first_not_None(pretty_name, u'{}\u22c5{}'.format(nabla,
-                                     self.pretty_name.decode('utf-8')))
+        pretty_name = first_not_None(pretty_name, '{}â‹…{}'.format(nabla,
+                                                                 self.pretty_name))
 
-        if exprs.size in (0,1):
-            expr = npw.asscalar(exprs)
+        if exprs.size in (0, 1):
+            expr = exprs.item()
             return self.from_sympy_expression(expr=expr, space_symbols=frame.coords,
                                               name=name, pretty_name=pretty_name,
                                               dtype=dtype, **kwds)
@@ -293,8 +292,8 @@ class FieldContainerI(TaggedObject):
                                                dtype=dtype, **kwds)
 
     def curl(self, name=None, pretty_name=None,
-                   scalar_name_prefix=None, scalar_pretty_name_prefix=None,
-                   dtype=None, **kwds):
+             scalar_name_prefix=None, scalar_pretty_name_prefix=None,
+             dtype=None, **kwds):
         """
         Create a field capable of storing the curl of self,
 
@@ -308,31 +307,30 @@ class FieldContainerI(TaggedObject):
         """
         from hysop.symbolic.field import curl
 
-
-        if (self.dim==2):
-            msg='Can only take curl for a 2D field with one or two components.'
-            assert self.nb_components in (1,2), msg
-        elif (self.dim==3):
-            msg='Can only take curl for a 3D field with three components.'
+        if (self.dim == 2):
+            msg = 'Can only take curl for a 2D field with one or two components.'
+            assert self.nb_components in (1, 2), msg
+        elif (self.dim == 3):
+            msg = 'Can only take curl for a 3D field with three components.'
             assert self.nb_components in (3,), msg
         else:
-            msg='Can only take curl for a 2D or 3D vector field.'
-            assert (self.dim in (2,3)), msg
+            msg = 'Can only take curl for a 2D or 3D vector field.'
+            assert (self.dim in (2, 3)), msg
 
         frame = self.domain.frame
         exprs = curl(self.symbol(*frame.vars), frame)
 
         name = first_not_None(name, 'curl_{}'.format(self.name))
-        pretty_name = first_not_None(pretty_name, u'{}\u2227{}'.format(nabla,
-                                     self.pretty_name.decode('utf-8')))
+        pretty_name = first_not_None(pretty_name, '{}∧{}'.format(nabla,
+                                                                 self.pretty_name))
 
         if isinstance(exprs, npw.ndarray):
             return self.from_sympy_expressions(
-                    exprs=exprs, space_symbols=frame.coords,
-                    name=name, pretty_name=pretty_name,
-                    scalar_name_prefix=scalar_name_prefix,
-                    scalar_pretty_name_prefix=scalar_pretty_name_prefix,
-                    dtype=dtype, **kwds)
+                exprs=exprs, space_symbols=frame.coords,
+                name=name, pretty_name=pretty_name,
+                scalar_name_prefix=scalar_name_prefix,
+                scalar_pretty_name_prefix=scalar_pretty_name_prefix,
+                dtype=dtype, **kwds)
         else:
             return self.from_sympy_expression(expr=exprs, space_symbols=frame.coords,
                                               name=name, pretty_name=pretty_name,
@@ -342,7 +340,6 @@ class FieldContainerI(TaggedObject):
         """See curl."""
         return self.curl(*args, **kwds)
 
-
     def get_attributes(self, *attrs):
         """
         Return all matching attributes contained in self.fields,
@@ -367,12 +364,12 @@ class FieldContainerI(TaggedObject):
             if type(a) != type(b):
                 return False
             if isinstance(a, (list, tuple, set, frozenset)):
-                for (ai,bi) in zip(a, b):
+                for (ai, bi) in zip(a, b):
                     if not are_equal(ai, bi):
                         return False
                 return True
             if isinstance(a, dict):
-                for k in set(a.keys()+b.keys()):
+                for k in set(a.keys()).union(b.keys()):
                     if (k not in a) or (k not in b):
                         return False
                     ak, bk = a[k], b[k]
@@ -380,8 +377,8 @@ class FieldContainerI(TaggedObject):
                         return False
                     return True
             if isinstance(a, npw.ndarray):
-                return npw.array_equal(a,b)
-            return (a==b)
+                return npw.array_equal(a, b)
+            return (a == b)
         objects = self.get_attributes(*attr)
         obj0 = objects[0]
         for obj in objects[1:]:
@@ -396,32 +393,39 @@ class FieldContainerI(TaggedObject):
         field views.
         """
         if self.has_unique_attribute(*attr):
-           return self.fields[0].get_attributes(*attr)[0]
-        msg='{} is not unique accross contained fields.'
-        msg=msg.format('.'.join(str(x) for x in attr))
+            return self.fields[0].get_attributes(*attr)[0]
+        msg = '{} is not unique accross contained fields.'
+        msg = msg.format('.'.join(str(x) for x in attr))
         raise AttributeError(msg)
 
     def has_unique_dtype(self):
         """Return true if all contained discrete fields share the same dtype."""
         return self.has_unique_attribute('dtype')
+
     def has_unique_lboundaries(self):
         """Return true if all contained continuous fields share the same lboundaries."""
         return self.has_unique_attribute("lboundaries")
+
     def has_unique_rboundaries(self):
         """Return true if all contained continuous fields share the same rboundaries."""
         return self.has_unique_attribute("rboundaries")
+
     def has_unique_boundaries(self):
         """Return true if all contained continuous fields share the same boundaries."""
         return self.has_unique_attribute("boundaries")
+
     def has_unique_lboundaries_kind(self):
         """Return true if all contained continuous fields share the same lboundaries kind."""
         return self.has_unique_attribute("lboundaries_kind")
+
     def has_unique_rboundaries_kind(self):
         """Return true if all contained continuous fields share the same rboundaries kind."""
         return self.has_unique_attribute("rboundaries_kind")
+
     def has_unique_boundaries_kind(self):
         """Return true if all contained continuous fields share the same boundaries kind."""
         return self.has_unique_attribute("boundaries_kind")
+
     def has_unique_periodicity(self):
         """Return true if all contained continuous fields share the same periodicity."""
         return self.has_unique_attribute("periodicity")
@@ -433,6 +437,7 @@ class FieldContainerI(TaggedObject):
         else raise an AttributeError.
         """
         return self.get_unique_attribute('dtype')
+
     @property
     def lboundaries(self):
         """
@@ -440,6 +445,7 @@ class FieldContainerI(TaggedObject):
         else raise an AttributeError.
         """
         return self.get_unique_attribute("lboundaries")
+
     @property
     def rboundaries(self):
         """
@@ -447,6 +453,7 @@ class FieldContainerI(TaggedObject):
         else raise an AttributeError.
         """
         return self.get_unique_attribute("rboundaries")
+
     @property
     def boundaries(self):
         """
@@ -454,6 +461,7 @@ class FieldContainerI(TaggedObject):
         else raise an AttributeError.
         """
         return self.get_unique_attribute("boundaries")
+
     @property
     def lboundaries_kind(self):
         """
@@ -461,6 +469,7 @@ class FieldContainerI(TaggedObject):
         else raise an AttributeError.
         """
         return self.get_unique_attribute("lboundaries_kind")
+
     @property
     def rboundaries_kind(self):
         """
@@ -468,6 +477,7 @@ class FieldContainerI(TaggedObject):
         else raise an AttributeError.
         """
         return self.get_unique_attribute("rboundaries_kind")
+
     @property
     def boundaries_kind(self):
         """
@@ -475,6 +485,7 @@ class FieldContainerI(TaggedObject):
         else raise an AttributeError.
         """
         return self.get_unique_attribute("boundaries_kind")
+
     @property
     def periodicity(self):
         """
@@ -485,15 +496,17 @@ class FieldContainerI(TaggedObject):
 
     def __eq__(self, other):
         return (self is other)
+
     def __ne__(self, other):
         return (self is not other)
+
     def __hash__(self):
         return id(self)
 
-
     def _get_domain(self):
         """Return the physical domain where this field is defined."""
         return self._domain
+
     def _get_dim(self):
         """Return the dimension of the physical domain."""
         return self._dim
@@ -532,7 +545,7 @@ class ScalarField(NamedScalarContainerI, FieldContainerI):
                 initial_values=None, dtype=HYSOP_REAL,
                 lboundaries=None, rboundaries=None,
                 is_tmp=False, mem_tag=None, **kwds):
-        """
+        r"""
         Create or get an existing continuous ScalarField (scalar or vector) on a specific domain.
 
         Parameters
@@ -541,7 +554,7 @@ class ScalarField(NamedScalarContainerI, FieldContainerI):
             Physical domain where this field is defined.
         name : string
             A name for the field.
-        pretty_name: string or unicode, optional.
+        pretty_name: str, optional.
             A pretty name used for display whenever possible.
             Defaults to name.
         var_name: string, optional.
@@ -587,7 +600,7 @@ class ScalarField(NamedScalarContainerI, FieldContainerI):
             Numpy array mask, True is axis is periodic, else False.
         """
         check_instance(name, str)
-        check_instance(pretty_name, (str, unicode), allow_none=True)
+        check_instance(pretty_name, str, allow_none=True)
         check_instance(latex_name, str, allow_none=True)
         check_instance(var_name, str, allow_none=True)
         check_instance(is_tmp, bool)
@@ -596,54 +609,53 @@ class ScalarField(NamedScalarContainerI, FieldContainerI):
             assert is_tmp, 'Can only specify mem_tag for temporary fields.'
 
         # Data type of the field
-        if (dtype==npw.bool) or (dtype==bool):
+        if (dtype == npw.bool_) or (dtype == bool):
             import warnings
-            msg='Parameter dtype=npw.bool has been converted '
-            msg+='to HYSOP_BOOL={}.'.format(HYSOP_BOOL.__name__)
+            msg = 'Parameter dtype=npw.bool_ has been converted '
+            msg += 'to HYSOP_BOOL={}.'.format(HYSOP_BOOL.__name__)
             warnings.warn(msg, HysopWarning)
             dtype = HYSOP_BOOL
         dtype = npw.dtype(dtype)
 
         # Name and pretty name
         pretty_name = first_not_None(pretty_name, name)
-        if isinstance(pretty_name, unicode):
-            pretty_name = pretty_name.encode('utf-8')
         check_instance(pretty_name, str)
 
         # Initial values
-        if not isinstance(initial_values,(list,tuple)):
+        if not isinstance(initial_values, (list, tuple)):
             initial_values = (initial_values, initial_values)
-        assert len(initial_values)==2
+        assert len(initial_values) == 2
         initial_values = tuple(initial_values)
         check_instance(initial_values, tuple, size=2)
 
         # Field boundary conditions
         lboundaries = npw.asarray(first_not_None(lboundaries,
-            cls.default_boundaries_from_domain(domain.lboundaries)))
+                                                 cls.default_boundaries_from_domain(domain.lboundaries)))
         rboundaries = npw.asarray(first_not_None(rboundaries,
-            cls.default_boundaries_from_domain(domain.rboundaries)))
+                                                 cls.default_boundaries_from_domain(domain.rboundaries)))
         check_instance(lboundaries, npw.ndarray, values=(BoundaryCondition, BoundaryConditionConfig),
-                ndim=1, size=domain.dim, dtype=object, allow_none=True)
+                       ndim=1, size=domain.dim, dtype=object, allow_none=True)
         check_instance(rboundaries, npw.ndarray, values=(BoundaryCondition, BoundaryConditionConfig),
-                ndim=1, size=domain.dim, dtype=object, allow_none=True)
+                       ndim=1, size=domain.dim, dtype=object, allow_none=True)
         assert lboundaries.size == rboundaries.size == domain.dim
-        for i,(lb,rb) in enumerate(zip(lboundaries,rboundaries)):
-            if (lb.bc==BoundaryCondition.PERIODIC) ^ (rb.bc==BoundaryCondition.PERIODIC):
-                msg='Periodic BoundaryCondition mismatch on axis {}.'.format(i)
+        for i, (lb, rb) in enumerate(zip(lboundaries, rboundaries)):
+            if (lb.bc == BoundaryCondition.PERIODIC) ^ (rb.bc == BoundaryCondition.PERIODIC):
+                msg = 'Periodic BoundaryCondition mismatch on axis {}.'.format(i)
                 raise ValueError(msg)
         check_instance(lboundaries, npw.ndarray, values=(BoundaryCondition, BoundaryConditionConfig),
-                ndim=1, size=domain.dim, dtype=object)
+                       ndim=1, size=domain.dim, dtype=object)
         check_instance(rboundaries, npw.ndarray, values=(BoundaryCondition, BoundaryConditionConfig),
-                ndim=1, size=domain.dim, dtype=object)
+                       ndim=1, size=domain.dim, dtype=object)
 
-        periodic    = BoundaryCondition.PERIODIC
-        periodicity = np.asarray(map(lambda x: x.bc, lboundaries))==periodic
+        periodic = BoundaryCondition.PERIODIC
+        periodicity = np.asarray(tuple(map(lambda x: x.bc, lboundaries))) == periodic
 
+        kwds.pop('make_field', None)
         obj = super(ScalarField, cls).__new__(cls, domain=domain,
-                name=name, pretty_name=pretty_name,
-                var_name=var_name, latex_name=latex_name,
-                tag_prefix='f', tagged_cls=ScalarField, **kwds)
-        obj._dtype  = dtype
+                                              name=name, pretty_name=pretty_name,
+                                              var_name=var_name, latex_name=latex_name,
+                                              tag_prefix='f', tagged_cls=ScalarField, **kwds)
+        obj._dtype = dtype
         obj._initial_values = initial_values
         obj._is_tmp = is_tmp
         obj._mem_tag = mem_tag
@@ -662,23 +674,34 @@ class ScalarField(NamedScalarContainerI, FieldContainerI):
         cls.__check_vars(obj)
         return obj
 
+    def __init__(self, domain, name, pretty_name=None,
+                 var_name=None, latex_name=None,
+                 initial_values=None, dtype=HYSOP_REAL,
+                 lboundaries=None, rboundaries=None,
+                 is_tmp=False, mem_tag=None, **kwds):
+        kwds.pop('make_field', None)
+        super(ScalarField, self).__init__(domain=domain,
+                                          name=name, pretty_name=pretty_name,
+                                          var_name=var_name, latex_name=latex_name,
+                                          tag_prefix='f', tagged_cls=ScalarField, **kwds)
+
     @classmethod
     def default_boundaries_from_domain(cls, boundaries):
         check_instance(boundaries, npw.ndarray, values=BoxBoundaryCondition)
         field_boundaries = npw.empty_like(boundaries)
         field_boundaries[...] = None
-        for (i,bd) in enumerate(boundaries):
+        for (i, bd) in enumerate(boundaries):
             if (bd is BoxBoundaryCondition.PERIODIC):
                 fbd = BoundaryCondition.PERIODIC
-            elif (bd is BoxBoundaryCondition.SYMMETRIC): # (normal to boundary velocity = 0)
+            elif (bd is BoxBoundaryCondition.SYMMETRIC):  # (normal to boundary velocity = 0)
                 # let any advected scalar to be 0 in boundaries
                 fbd = BoundaryCondition.HOMOGENEOUS_DIRICHLET
-            elif (bd is BoxBoundaryCondition.OUTFLOW): # (velocity normal to boundary)
+            elif (bd is BoxBoundaryCondition.OUTFLOW):  # (velocity normal to boundary)
                 # let any advected scalar to go trough the boundary
                 fbd = BoundaryCondition.HOMOGENEOUS_NEUMANN
             else:
-                msg='FATAL ERROR: Unknown domain boundary condition {}.'
-                msg=msg.format(bd)
+                msg = 'FATAL ERROR: Unknown domain boundary condition {}.'
+                msg = msg.format(bd)
                 raise NotImplementedError(msg)
             field_boundaries[i] = fbd
         return field_boundaries
@@ -695,31 +718,31 @@ class ScalarField(NamedScalarContainerI, FieldContainerI):
         check_instance(obj.discrete_fields, dict)
         check_instance(obj.initial_values, tuple, size=2)
         check_instance(obj.lboundaries, npw.ndarray, values=(BoundaryCondition, BoundaryConditionConfig),
-                ndim=1, size=obj.domain.dim, dtype=object)
+                       ndim=1, size=obj.domain.dim, dtype=object)
         check_instance(obj.rboundaries, npw.ndarray, values=(BoundaryCondition, BoundaryConditionConfig),
-                ndim=1, size=obj.domain.dim, dtype=object)
+                       ndim=1, size=obj.domain.dim, dtype=object)
         check_instance(obj.periodicity, npw.ndarray, dtype=bool,
-                ndim=1, size=obj.domain.dim)
+                       ndim=1, size=obj.domain.dim)
         check_instance(obj.is_tmp, bool)
 
     def field_like(self, name, pretty_name=None,
-            latex_name=None, var_name=None,
-            domain=None, dtype=None, is_tmp=None,
-            lboundaries=None, rboundaries=None,
-            initial_values=None, **kwds):
+                   latex_name=None, var_name=None,
+                   domain=None, dtype=None, is_tmp=None,
+                   lboundaries=None, rboundaries=None,
+                   initial_values=None, **kwds):
         """Create a ScalarField like this object, possibly altered."""
         check_instance(name, str)
-        domain         = first_not_None(domain, self.domain)
-        dtype          = first_not_None(dtype, self.dtype)
-        is_tmp         = first_not_None(is_tmp, self.is_tmp)
-        lboundaries    = first_not_None(lboundaries, self.lboundaries)
-        rboundaries    = first_not_None(rboundaries, self.rboundaries)
+        domain = first_not_None(domain, self.domain)
+        dtype = first_not_None(dtype, self.dtype)
+        is_tmp = first_not_None(is_tmp, self.is_tmp)
+        lboundaries = first_not_None(lboundaries, self.lboundaries)
+        rboundaries = first_not_None(rboundaries, self.rboundaries)
         initial_values = first_not_None(initial_values, self.initial_values)
         return ScalarField(name=name, pretty_name=pretty_name,
-                var_name=var_name, latex_name=latex_name,
-                domain=domain, dtype=dtype, is_tmp=is_tmp,
-                lboundaries=lboundaries, rboundaries=rboundaries,
-                initial_values=initial_values, **kwds)
+                           var_name=var_name, latex_name=latex_name,
+                           domain=domain, dtype=dtype, is_tmp=is_tmp,
+                           lboundaries=lboundaries, rboundaries=rboundaries,
+                           initial_values=initial_values, **kwds)
 
     def tmp_like(self, name, **kwds):
         """Create a TemporaryField like self, possibly altered."""
@@ -741,7 +764,7 @@ class ScalarField(NamedScalarContainerI, FieldContainerI):
     def long_description(self):
         """Long description of this field."""
         s = textwrap.dedent(
-        '''
+            '''
         {}
           *name:           {}
           *pretty_name:    {}
@@ -754,15 +777,14 @@ class ScalarField(NamedScalarContainerI, FieldContainerI):
           *initial values: {}
           *topology tags:  [{}]
         ''').format(self.full_tag,
-                self.name, self.pretty_name,
-                self.var_name, self.latex_name,
-                self.dim, self.dtype,
-                self.lboundaries.tolist(), self.rboundaries.tolist(),
-                self.initial_values,
-                ','.join([k.full_tag for k in self.discrete_fields.keys()]))
+                    self.name, self.pretty_name,
+                    self.var_name, self.latex_name,
+                    self.dim, self.dtype,
+                    self.lboundaries.tolist(), self.rboundaries.tolist(),
+                    self.initial_values,
+                    ','.join([k.full_tag for k in self.discrete_fields.keys()]))
         return s[1:]
 
-
     @debug
     def discretize(self, topology, topology_state=None):
         """
@@ -802,39 +824,50 @@ class ScalarField(NamedScalarContainerI, FieldContainerI):
     def _get_dtype(self):
         """Return the default allocation dtype of this ScalarField."""
         return self._dtype
+
     def _get_initial_values(self):
         """Return initial value of this field (compute_val, ghost_val)."""
         return self._initial_values
+
     def _get_discrete_fields(self):
         """
         Return the dictionnary containing all the discretizations
         of this field.
         """
         return self._discrete_fields
+
     def _get_lboundaries(self):
         """Left boundary conditions."""
         return self._lboundaries
+
     def _get_rboundaries(self):
         """Right boundary conditions."""
         return self._rboundaries
+
     def _get_lboundaries_kind(self):
         """Left boundary condition kind."""
-        return np.asarray(map(lambda x: x.bc, self._lboundaries))
+        return np.asarray(tuple(map(lambda x: x.bc, self._lboundaries)))
+
     def _get_rboundaries_kind(self):
         """Right boundary condition kind."""
-        return np.asarray(map(lambda x: x.bc, self._rboundaries))
+        return np.asarray(tuple(map(lambda x: x.bc, self._rboundaries)))
+
     def _get_boundaries(self):
         """Left and right boundary conditions as a tuple."""
         return (self._lboundaries, self._rboundaries)
+
     def _get_boundaries_kind(self):
         """Left and right boundary condition kind as a tuple."""
         return (self.lboundaries_kind, self._get_lboundaries_kind)
+
     def _get_periodicity(self):
         """Numpy array mask, True is axis is periodic, else False."""
         return self._periodicity
+
     def _get_is_tmp(self):
         """Is this ScalarField a temporary field ?"""
         return self._is_tmp
+
     def _get_mem_tag(self):
         return self._mem_tag
 
@@ -865,10 +898,13 @@ class ScalarField(NamedScalarContainerI, FieldContainerI):
 
     def __str__(self):
         return self.long_description()
+
     def __eq__(self, other):
         return (self is other)
+
     def __ne__(self, other):
         return (self is not other)
+
     def __hash__(self):
         return id(self)
 
@@ -895,28 +931,26 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
         return True
 
     def __new__(cls, domain, name, shape,
-                    pretty_name=None,
-                    name_formatter=None,
-                    pretty_name_formatter=None,
-                    skip_field=None,
-                    make_field=None,
-                    fields=None,
-                    base_kwds=None, **kwds):
+                pretty_name=None,
+                name_formatter=None,
+                pretty_name_formatter=None,
+                skip_field=None,
+                make_field=None,
+                fields=None,
+                base_kwds=None, **kwds):
         pretty_name = first_not_None(pretty_name, name)
         check_instance(name, str)
-        check_instance(pretty_name, (str, unicode))
-        if isinstance(pretty_name, unicode):
-            pretty_name = pretty_name.encode('utf-8')
+        check_instance(pretty_name, str)
 
         check_instance(shape, tuple, values=int)
-        if (len(shape)==1) and not issubclass(cls, VectorField):
+        if (len(shape) == 1) and not issubclass(cls, VectorField):
             obj = VectorField(domain=domain, shape=shape,
-                                name=name,
-                                name_formatter=name_formatter,
-                                pretty_name=pretty_name,
-                                pretty_name_formatter=pretty_name_formatter,
-                                skip_field=skip_field, make_field=make_field,
-                                fields=fields, base_kwds=base_kwds, **kwds)
+                              name=name,
+                              name_formatter=name_formatter,
+                              pretty_name=pretty_name,
+                              pretty_name_formatter=pretty_name_formatter,
+                              skip_field=skip_field, make_field=make_field,
+                              fields=fields, base_kwds=base_kwds, **kwds)
             return obj
 
         name_formatter = first_not_None(name_formatter, cls.default_name_formatter)
@@ -928,9 +962,9 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
 
         check_instance(domain, Domain)
 
-        if npw.prod(shape)<=0:
-            msg='Invalid shape for a tensor-like field, got {}.'
-            msg=msg.format(shape)
+        if npw.prod(shape) <= 0:
+            msg = 'Invalid shape for a tensor-like field, got {}.'
+            msg = msg.format(shape)
             raise ValueError(msg)
 
         if (fields is None):
@@ -939,10 +973,9 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
                 if skip_field(idx):
                     field = None
                 else:
-                    fname  = name_formatter(basename=name, idx=idx)
+                    fname = name_formatter(basename=name, idx=idx)
                     pfname = pretty_name_formatter(basename=pretty_name, idx=idx)
-                    field  = make_field(idx, domain=domain, name=fname, pretty_name=pfname,
-                                        **kwds)
+                    field = make_field(idx, domain=domain, name=fname, pretty_name=pfname, **kwds)
                 fields += (field,)
             cls._check_fields(*fields)
             fields = npw.asarray(fields, dtype=object).reshape(shape)
@@ -953,9 +986,9 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
         assert npw.array_equal(fields.shape, shape)
 
         obj = super(TensorField, cls).__new__(cls, domain=domain,
-                name=name, pretty_name=pretty_name,
-                tag_prefix='tf', tagged_cls=TensorField,
-                contained_objects=fields, **base_kwds)
+                                              name=name, pretty_name=pretty_name,
+                                              tag_prefix='tf', tagged_cls=TensorField,
+                                              contained_objects=fields, **base_kwds)
         obj._fields = fields
         obj._name_formatter = name_formatter
         obj._pretty_name_formatter = pretty_name_formatter
@@ -968,6 +1001,20 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
         obj._check_names()
         return obj
 
+    def __init__(self, domain, name, shape,
+                 pretty_name=None,
+                 name_formatter=None,
+                 pretty_name_formatter=None,
+                 skip_field=None,
+                 make_field=None,
+                 fields=None,
+                 base_kwds=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        super(TensorField, self).__init__(domain=domain,
+                                          name=name, pretty_name=pretty_name,
+                                          tag_prefix='tf', tagged_cls=TensorField,
+                                          contained_objects=fields, **base_kwds)
+
     def discretize(self, topology, topology_state=None):
         from hysop.fields.discrete_field import DiscreteTensorField
         dfields = npw.empty(shape=self.shape, dtype=object)
@@ -980,12 +1027,12 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
     def from_fields(cls, name, fields, shape, pretty_name=None, **kwds):
         """Create a TensorField from a list of fields and a shape."""
         fields = to_tuple(fields)
-        shape  = to_tuple(shape)
+        shape = to_tuple(shape)
 
         check_instance(fields, tuple, values=(ScalarField,), minsize=1)
         check_instance(shape, tuple, values=int)
         check_instance(name, str)
-        check_instance(pretty_name, (str, unicode), allow_none=True)
+        check_instance(pretty_name, str, allow_none=True)
 
         cls._check_fields(*fields)
 
@@ -995,14 +1042,14 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
         fields = npw.asarray(fields, dtype=object).reshape(shape)
 
         return Field(domain=domain, name=name, shape=shape, pretty_name=pretty_name,
-                    fields=fields, **kwds)
+                     fields=fields, **kwds)
 
     @classmethod
     def from_field_array(cls, name, fields, pretty_name=None, **kwds):
         """Create a TensorField from numpy.ndarray of fields."""
         assert (fields.size > 1)
         check_instance(name, str)
-        check_instance(pretty_name, (str, unicode), allow_none=True)
+        check_instance(pretty_name, str, allow_none=True)
         check_instance(fields, npw.ndarray, dtype=object, values=ScalarField)
         shape = fields.shape
 
@@ -1013,21 +1060,21 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
         domain = field0.domain
 
         return Field(domain=domain, name=name, pretty_name=pretty_name,
-                shape=shape, fields=fields, **kwds)
+                     shape=shape, fields=fields, **kwds)
 
     @classmethod
     def _check_fields(cls, *fields):
         """Check that at least one field is specified."""
         field0 = first_not_None(*fields)
         if (field0 is None):
-            msg='Tensor field {} should at least contain a valid ScalarField.'
-            msg=msg.format(name)
+            msg = 'Tensor field {} should at least contain a valid ScalarField.'
+            msg = msg.format(name)
             raise ValueError(msg)
 
     @classmethod
     def default_name_formatter(cls, basename, idx):
 
-        assert len(basename)>0
+        assert len(basename) > 0
         if basename[-1] in '0123456789':
             sep = '_'
         else:
@@ -1038,8 +1085,8 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
     @classmethod
     def default_pretty_name_formatter(cls, basename, idx):
         check_instance(basename, str)
-        assert len(basename)>0
-        pname = basename + subscripts(ids=idx, sep='').encode('utf-8')
+        assert len(basename) > 0
+        pname = basename + subscripts(ids=idx, sep='')
         return pname
 
     @classmethod
@@ -1055,7 +1102,7 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
         for field in self:
             check_instance(field, ScalarField)
             if (field.domain.domain is not domain):
-                msg='Domain mismatch for field {}.'.format(field.name)
+                msg = 'Domain mismatch for field {}.'.format(field.name)
                 raise ValueError(msg)
 
     def _check_names(self):
@@ -1063,17 +1110,17 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
         names = {}
         pnames = {}
         for field in self:
-            name  = field.name
+            name = field.name
             pname = field.pretty_name
             if (name in names) and (names[name] is not field):
-                msg='Name {} was already used by another field.'
-                msg=msg.format(name)
+                msg = 'Name {} was already used by another field.'
+                msg = msg.format(name)
                 raise ValueError(msg)
             if (pname in pnames) and (pretty_name[name] is not field):
-                msg='Name {} was already used by another field.'
-                msg=msg.format(pname)
+                msg = 'Name {} was already used by another field.'
+                msg = msg.format(pname)
                 raise ValueError(msg)
-            names[name]  = field
+            names[name] = field
             pnames[name] = field
 
     @property
@@ -1098,13 +1145,13 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
         """Short description of this tensor field."""
         s = '{}[name={}, pname={}, dim={}, shape={}]'
         s = s.format(self.full_tag, self.name, self.pretty_name, self.dim,
-                        self.shape)
+                     self.shape)
         return s
 
     def long_description(self):
         """Long description of this tensor field as a string."""
-        s=textwrap.dedent(
-        '''
+        s = textwrap.dedent(
+            '''
             {}
               *name:           {}
               *pretty_name:    {}
@@ -1113,13 +1160,13 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
               *nb_components:  {}
               *symbolic repr.:
         '''.format(self.full_tag,
-            self.name, self.pretty_name, self.dim, self.shape, self.nb_components))[1:]
-        s+='      '+'\n      '.join(str(self.symbol).split('\n'))
+                   self.name, self.pretty_name, self.dim, self.shape, self.nb_components))[1:]
+        s += '      '+'\n      '.join(str(self.symbol).split('\n'))
         return s
 
     def field_like(self, name, pretty_name=None,
-                        shape=None, nb_components=None,
-                        fn='field_like', **kwds):
+                   shape=None, nb_components=None,
+                   fn='field_like', **kwds):
         """Create a TensorField like this object, possibly altered."""
         if (shape is None) and (nb_components is not None):
             shape = (nb_components,)
@@ -1129,23 +1176,21 @@ class TensorField(NamedTensorContainerI, FieldContainerI):
 
         pretty_name = first_not_None(pretty_name, name)
         check_instance(name, str)
-        check_instance(pretty_name, (str,unicode))
-        if not isinstance(pretty_name, str):
-            pretty_name = pretty_name.encode('utf-8')
+        check_instance(pretty_name, str)
 
         if (nb_components == 1):
             return getattr(self.fields[0], fn)(name=name, pretty_name=pretty_name, **kwds)
         else:
             fields = npw.empty(shape=shape, dtype=object)
             if (self.shape == shape):
-                for (idx,field) in self.nd_iter():
-                    fname  = self._name_formatter(basename=name, idx=idx)
+                for (idx, field) in self.nd_iter():
+                    fname = self._name_formatter(basename=name, idx=idx)
                     pfname = self._pretty_name_formatter(basename=pretty_name, idx=idx)
                     fields[idx] = getattr(field, fn)(name=fname, pretty_name=pfname, **kwds)
             else:
                 field = self.fields[0]
                 for idx in npw.ndindex(*shape):
-                    fname  = self._name_formatter(basename=name, idx=idx)
+                    fname = self._name_formatter(basename=name, idx=idx)
                     pfname = self._pretty_name_formatter(basename=pretty_name, idx=idx)
                     fields[idx] = getattr(field, fn)(name=fname, pretty_name=pfname, **kwds)
             return self.from_field_array(name=name, pretty_name=pretty_name, fields=fields)
@@ -1172,20 +1217,24 @@ class VectorField(TensorField):
             check_instance(nb_components, int, minval=1)
             shape = (nb_components,)
         check_instance(shape, tuple, values=int, size=1)
-        if shape[0]==1:
+        if shape[0] == 1:
             return ScalarField(domain=domain, name=name, **kwds)
         obj = super(VectorField, cls).__new__(cls, domain=domain, name=name,
-                                                   shape=shape, **kwds)
+                                              shape=shape, **kwds)
         return obj
 
+    def __init__(self, domain, name, nb_components=None, shape=None, **kwds):
+        super(VectorField, self).__init__(domain=domain, name=name,
+                                          shape=shape, **kwds)
+
     @classmethod
     def default_name_formatter(cls, basename, idx):
-        assert len(basename)>0
+        assert len(basename) > 0
         if basename[-1] in '0123456789':
             sep = '_'
         else:
             sep = ''
-        if len(idx)==1:
+        if len(idx) == 1:
             name = basename + sep + '_'.join(DirectionLabels[i] for i in idx)
         else:
             name = basename + sep + '_'.join(str(i) for i in idx)
diff --git a/hysop/fields/default_fields.py b/hysop/fields/default_fields.py
index 39d9f2db563394553648a2fe66a1ba870211c02f..85b118d96258058e177110e9ab83f338daf0c31f 100644
--- a/hysop/fields/default_fields.py
+++ b/hysop/fields/default_fields.py
@@ -1,6 +1,6 @@
 from hysop.tools.types import first_not_None, check_instance
 from hysop.tools.sympy_utils import greak, Greak, subscripts
-from hysop.fields.continuous_field import Field, TensorField 
+from hysop.fields.continuous_field import Field, TensorField
 from hysop.tools.numpywrappers import npw
 from hysop.constants import BoxBoundaryCondition, BoundaryCondition
 
@@ -35,7 +35,7 @@ def VelocityField(domain, name=None, pretty_name=None, **kwds):
             fboundaries[i] = fbd
         check_instance(fboundaries, npw.ndarray, values=BoundaryCondition)
         return fboundaries
-    def _make_field(idx, **fkwds): 
+    def _make_field(idx, **fkwds):
         # Adapt velocity boundaries to domain boundaries
         component, = idx
         fkwds['lboundaries'] = velocity_boundaries(lboundaries, component)
@@ -53,7 +53,7 @@ def VorticityField(velocity, name=None, pretty_name=None, **kwds):
     assert velocity.nb_components == domain.dim, 'Invalid velocity Field.'
     name        = first_not_None(name, 'W')
     pretty_name = first_not_None(pretty_name, greak[24])
-    return velocity.curl(name=name, pretty_name=pretty_name, 
+    return velocity.curl(name=name, pretty_name=pretty_name,
             scalar_name_prefix=name, scalar_pretty_name_prefix=pretty_name,
             **kwds)
 
diff --git a/hysop/fields/discrete_field.py b/hysop/fields/discrete_field.py
index 1490a15a88cf54eb1ce05e4d2a67137514ef9370..d3babc3a02cdba3dde4ed7e8d4b38207e7da7c2b 100644
--- a/hysop/fields/discrete_field.py
+++ b/hysop/fields/discrete_field.py
@@ -7,8 +7,9 @@ Discrete fields (scalars or vectors) descriptions.
 """
 
 from abc import ABCMeta, abstractmethod
+import numpy as np
+
 from hysop import vprint
-from hysop.deps import np
 from hysop.tools.decorators import debug
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.variable import VariableTag, Variable
@@ -23,18 +24,20 @@ from hysop.fields.continuous_field import Field, VectorField, TensorField, \
 from hysop.domain.domain import DomainView
 from hysop.mesh.mesh import MeshView
 
-class DiscreteScalarFieldViewContainerI(object):
+class DiscreteScalarFieldViewContainerI(object, metaclass=ABCMeta):
     """
     Common abstract interface for scalar and tensor-like container of
     discrete field views.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __init__(self, **kwds):
+        super(DiscreteScalarFieldViewContainerI, self).__init__(**kwds)
 
     @debug
     def __new__(cls, **kwds):
         return super(DiscreteScalarFieldViewContainerI, cls).__new__(cls, **kwds)
-    
+
     @property
     def is_scalar(self):
         return (not self.is_tensor)
@@ -73,7 +76,7 @@ class DiscreteScalarFieldViewContainerI(object):
         but including duplicate fields.
         """
         return len(self.discrete_field_views())
-    
+
     def ids_to_components(self, ids):
         """Convert tensor coordinates into 1d offsets."""
         check_instance(ids, tuple, values=(int,tuple), allow_none=True)
@@ -122,7 +125,7 @@ class DiscreteScalarFieldViewContainerI(object):
 
     @abstractmethod
     def tmp_dfield_like(self, name, **kwds):
-        """
+        r"""
         Create a new Field container and a new temporary CartesianDiscreteField.
         like the current object, possibly on a different backend.
         /!\ The returned discrete field is not allocated.
@@ -267,7 +270,7 @@ class DiscreteScalarFieldViewContainerI(object):
             objects[idx] = obj
 
     def has_unique_attribute(self, *attr):
-        """
+        r"""
         Return true if all contained discrete fields share the same attribute
         (as stated by the == comparisson operator).
 
@@ -284,7 +287,7 @@ class DiscreteScalarFieldViewContainerI(object):
                         return False
                 return True
             if isinstance(a, dict):
-                for k in set(a.keys()+b.keys()):
+                for k in set(a.keys()).union(b.keys()):
                     if (k not in a) or (k not in b):
                         return False
                     ak, bk = a[k], b[k]
@@ -323,7 +326,7 @@ class DiscreteScalarFieldViewContainerI(object):
         return self.has_unique_attribute('dtype')
 
     def get_unique_attribute(self, *attr):
-        """
+        r"""
         Try to return the unique attribute common to all contained discrete fields.
         Raise an AttributeError if a attribute is not unique accross contained
         discrete field views.
@@ -412,19 +415,22 @@ class DiscreteScalarFieldViewContainerI(object):
         return self.long_description()
 
 
-class DiscreteScalarFieldView(DiscreteScalarFieldViewContainerI, TaggedObjectView, VariableTag):
+class DiscreteScalarFieldView(DiscreteScalarFieldViewContainerI, TaggedObjectView, VariableTag, metaclass=ABCMeta):
     """
     View over a DiscreteScalarField (taking into account a topology state).
     """
 
-    __metaclass__ = ABCMeta
-
     __slots__ = ('_dfield', '_topology_state', '_topology_view', '_symbol')
 
     @property
     def is_tensor(self):
         return False
 
+    @debug
+    def __init__(self, dfield, topology_state, **kwds):
+        super(DiscreteScalarFieldView, self).__init__(obj_view=dfield,
+                variable_kind=Variable.DISCRETE_FIELD, **kwds)
+
     @debug
     def __new__(cls, dfield, topology_state, **kwds):
         check_instance(dfield, DiscreteScalarField, allow_none=issubclass(cls, DiscreteScalarField))
@@ -448,7 +454,7 @@ class DiscreteScalarFieldView(DiscreteScalarFieldViewContainerI, TaggedObjectVie
         """Check properties and types."""
         check_instance(self.dtype, np.dtype)
         check_instance(self.name, str)
-        check_instance(self.pretty_name, (str,unicode))
+        check_instance(self.pretty_name, str)
         check_instance(self.dim, int, minval=1)
         check_instance(self.topology, TopologyView)
         check_instance(self.backend, ArrayBackend)
@@ -479,7 +485,7 @@ class DiscreteScalarFieldView(DiscreteScalarFieldViewContainerI, TaggedObjectVie
         return (obj is self)
     def __getitem__(self, slc):
         return self
-    
+
     def discrete_field_views(self):
         return (self,)
 
@@ -600,7 +606,7 @@ class DiscreteScalarFieldView(DiscreteScalarFieldViewContainerI, TaggedObjectVie
     memory_request_id = property(_get_memory_request_id)
 
 
-class DiscreteScalarField(NamedScalarContainerI, TaggedObject):
+class DiscreteScalarField(NamedScalarContainerI, TaggedObject, metaclass=ABCMeta):
     """
     Discrete representation of scalar or vector fields,
 
@@ -618,7 +624,14 @@ class DiscreteScalarField(NamedScalarContainerI, TaggedObject):
     depending on the discrete field topology backend and the ghost exchange strategy.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __init__(self, field, topology, register_discrete_field=True,
+                name=None, pretty_name=None,
+                var_name=None, latex_name=None,
+                **kwds):
+        super(DiscreteScalarField, self).__init__(name=name, pretty_name=pretty_name,
+                                                  var_name=var_name, latex_name=latex_name,
+                                                  tag_prefix='df', **kwds)
 
     @debug
     def __new__(cls, field, topology, register_discrete_field=True,
@@ -638,7 +651,7 @@ class DiscreteScalarField(NamedScalarContainerI, TaggedObject):
             If set register input topology to input continuous field.
         name : string, optional
             A name for the field.
-        pretty_name: string or unicode, optional.
+        pretty_name: str, optional.
             A pretty name used for display whenever possible.
             Defaults to name.
         var_name: string, optional.
@@ -653,7 +666,8 @@ class DiscreteScalarField(NamedScalarContainerI, TaggedObject):
         check_instance(field, Field)
         check_instance(topology, Topology)
         check_instance(name, str, allow_none=True)
-        check_instance(pretty_name, (str,unicode), allow_none=True)
+        check_instance(pretty_name, str, allow_none=True)
+
 
         _name, _pretty_name, _var_name, _latex_name = \
                 cls.format_discrete_names(field.name,
@@ -703,8 +717,8 @@ class DiscreteScalarField(NamedScalarContainerI, TaggedObject):
             # Scalar discrete field names
             name        = '{}_t{}'.format(name, topology.id)
             pretty_name = '{}.{}{}'.format(pretty_name,
-                                            u'\u209c'.encode('utf-8'),
-                                            subscript(topology.id).encode('utf-8'))
+                                            'ₜ',
+                                            subscript(topology.id))
             var_name    = var_name + '_t{}'.format(topology.id)
             latex_name  = latex_name + '.t_{{{}}}'.format(0)
         return (name, pretty_name, var_name, latex_name)
@@ -728,11 +742,18 @@ class DiscreteTensorField(NamedTensorContainerI, DiscreteScalarFieldViewContaine
     Is also garanties that all fields shares the same domain, but contained
     discrete fields may be defined on different topologies.
     """
-    
+
     @property
     def is_tensor(self):
         return True
 
+    def __init__(self, field, dfields, name=None,
+                pretty_name=None, latex_name=None, **kwds):
+        super(DiscreteTensorField, self).__init__(name=name,
+                pretty_name=pretty_name, latex_name=latex_name,
+                tag_prefix='tdf', tagged_cls=DiscreteTensorField,
+                contained_objects=dfields, **kwds)
+
     def __new__(cls, field, dfields, name=None,
                 pretty_name=None, latex_name=None, **kwds):
         check_instance(field, TensorField)
@@ -807,7 +828,7 @@ class DiscreteTensorField(NamedTensorContainerI, DiscreteScalarFieldViewContaine
         Create a TensorField and a DiscreteTensorField from np.ndarray of discrete fields.
         """
         check_instance(name, str)
-        check_instance(pretty_name, (str, unicode), allow_none=True)
+        check_instance(pretty_name, str, allow_none=True)
         check_instance(dfields, npw.ndarray, dtype=object, values=DiscreteScalarFieldView)
 
         shape = dfields.shape
@@ -905,7 +926,7 @@ class DiscreteTensorField(NamedTensorContainerI, DiscreteScalarFieldViewContaine
                                         **kwds)
 
     def tmp_dfield_like(self, name, pretty_name=None, **kwds):
-        """
+        r"""
         Create a new Field container and a new temporary CartesianDiscreteField.
         like the current object, possibly on a different backend.
         /!\ The returned discrete field is not allocated.
diff --git a/hysop/fields/field_requirements.py b/hysop/fields/field_requirements.py
index a3e8f3fdfb59e3a8b637279c1d39623ed4912a51..5e0d60e35ce8b3079db39869497a46ad0a8e1931 100644
--- a/hysop/fields/field_requirements.py
+++ b/hysop/fields/field_requirements.py
@@ -1,8 +1,12 @@
+# coding: utf-8
+
+import numpy as np
+import itertools as it
+
 from hysop import __DEBUG__, main_size
-from hysop.deps import np, it, __builtin__, print_function
 from hysop.constants import MemoryOrdering
 from hysop.tools.transposition_states import TranspositionState
-from hysop.tools.types import to_list, to_tuple, check_instance
+from hysop.tools.types import to_list, to_tuple, check_instance, first_not_None
 from hysop.tools.numpywrappers import npw
 from hysop.tools.decorators import debug
 from hysop.topology.topology import Topology
@@ -21,7 +25,7 @@ TOPO_CREATION_DEBUG_LEVEL = 0
 def gprint(*args, **kwds):
     level = kwds.pop('level', 2)
     if TOPO_CREATION_DEBUG_LEVEL >= level:
-        __builtin__.print(*args, **kwds)
+        print(*args, **kwds)
 
 
 class DiscreteFieldRequirements(object):
@@ -114,20 +118,20 @@ class DiscreteFieldRequirements(object):
                   self.memory_order, self.tstates))
 
     def ghost_str(self, array):
-        inf = u'+\u221e'
-        vals = [u''+str(x) if np.isfinite(x) else inf for x in array]
-        return u'[{}]'.format(u','.join(vals)).strip()
+        inf = '+∞'
+        vals = [''+str(x) if np.isfinite(x) else inf for x in array]
+        return '[{}]'.format(','.join(vals)).strip()
 
     def __str__(self):
-        return u'{:15s}  {:>10s}<=ghosts<{:<10s}  memory_order={}  can_split={}  tstates={}'.format(
-               u'{}::{}'.format(getattr(self.operator, 'name', u'UnknownOperator'),
-                                getattr(self.field, 'name', u'UnknownField')),
+        return '{:15s}  {:>10s}<=ghosts<{:<10s}  memory_order={}  can_split={}  tstates={}'.format(
+               '{}::{}'.format(getattr(self.operator, 'name', 'UnknownOperator'),
+                               getattr(self.field, 'name', 'UnknownField')),
                self.ghost_str(self.min_ghosts),
                self.ghost_str(self.max_ghosts+1),
                self.memory_order,
-               u''+str(self.can_split.view(np.int8)),
-               u'[{}]'.format(u','.join(u''+str(ts) for ts in self.tstates))
-            if self.tstates else u'ANY').encode('utf-8')
+               ''+str(self.can_split.view(np.int8)),
+               '[{}]'.format(','.join(''+str(ts) for ts in self.tstates))
+            if self.tstates else 'ANY')
 
     def get_axes(self):
         return self._axes
@@ -353,7 +357,7 @@ class MultiFieldRequirements(object):
             return None
         else:
             assert self.nrequirements() == 1
-            return next(iter(self.requirements.values()[0]))
+            return next(iter(tuple(self.requirements.values())[0]))
 
     def nrequirements(self):
         return sum(len(lreqs) for lreqs in self.requirements.values())
@@ -378,7 +382,7 @@ class MultiFieldRequirements(object):
             return
 
         gprint("  1) SPLITTING REQUIREMENTS IN COMPATIBLE SUBGROUPS:")
-        multi_process = (self.requirements.keys()[0].mpi_params.size > 1)
+        multi_process = (tuple(self.requirements.keys())[0].mpi_params.size > 1)
         splitted_reqs = self._split(multi_process)
 
         gprint("  2) DETERMINING COMMON CARTESIAN TOPOLOGY SPLITTING AXES (if possible):")
@@ -441,7 +445,7 @@ class MultiFieldRequirements(object):
                     sub_field_requirements.append(new_group)
         assert self.nrequirements() == sum(sf.nrequirements() for sf in sub_field_requirements)
         for multi_reqs in sub_field_requirements:
-            for topology_descriptor, reqs in multi_reqs.requirements.iteritems():
+            for topology_descriptor, reqs in multi_reqs.requirements.items():
                 if isinstance(topology_descriptor, Topology):
                     dim = topology_descriptor.domain_dim
                 else:
@@ -459,7 +463,7 @@ class MultiFieldRequirements(object):
     def _build_compatible_topologies(self):
         assert self.all_compatible()
         all_topologies = set()
-        for topology_descriptor, reqs in self.requirements.iteritems():
+        for topology_descriptor, reqs in self.requirements.items():
             if isinstance(topology_descriptor, Topology):
                 gprint("     -Topology {}".format(topology_descriptor.short_description()))
                 dim = topology_descriptor.domain_dim
@@ -509,11 +513,11 @@ class OperatorFieldRequirements(object):
 
         check_instance(input_field_requirements, dict, keys=ScalarField,
                        values=MultiFieldRequirements, allow_none=True)
-        self._input_field_requirements = input_field_requirements or dict()
+        self._input_field_requirements = first_not_None(input_field_requirements, {})
 
         check_instance(output_field_requirements, dict, keys=ScalarField,
                        values=MultiFieldRequirements, allow_none=True)
-        self._output_field_requirements = output_field_requirements or dict()
+        self._output_field_requirements = first_not_None(output_field_requirements, {})
 
     def get_input_field_requirements(self):
         return self._input_field_requirements
@@ -554,9 +558,9 @@ class OperatorFieldRequirements(object):
         """
         Iterates over (field, topology_descriptor, field_requirement) for all input requirements.
         """
-        for (field, freqs) in self.input_field_requirements.iteritems():
+        for (field, freqs) in self.input_field_requirements.items():
             freqs = freqs.requirements
-            for (td, reqs) in freqs.iteritems():
+            for (td, reqs) in freqs.items():
                 for req in reqs:
                     yield field, td, req
 
@@ -564,9 +568,9 @@ class OperatorFieldRequirements(object):
         """
         Iterates over (field, topology_descriptor, field_requirement) for all output requirements.
         """
-        for (field, freqs) in self.output_field_requirements.iteritems():
+        for (field, freqs) in self.output_field_requirements.items():
             freqs = freqs.requirements
-            for (td, reqs) in freqs.iteritems():
+            for (td, reqs) in freqs.items():
                 for req in reqs:
                     yield (field, td, req)
 
@@ -575,8 +579,8 @@ class OperatorFieldRequirements(object):
         Iterates over (is_input, field, topology_descriptor, field_requirement) for
         all inputs and outputs.
         """
-        it0 = it.izip_longest((True,),  self.iter_input_requirements())
-        it1 = it.izip_longest((False,), self.iter_output_requirements())
+        it0 = it.zip_longest((True,),  self.iter_input_requirements())
+        it1 = it.zip_longest((False,), self.iter_output_requirements())
         return it.chain(it0, it1)
 
     def _get_requirement(self, field, field_requirements):
@@ -600,7 +604,7 @@ class OperatorFieldRequirements(object):
         if len(freqs.keys()) == 0:
             msg = 'No topology descriptors are present for field {}.'.format(field.name)
             raise RuntimeError(msg)
-        td = freqs.keys()[0]
+        td = tuple(freqs.keys())[0]
         reqs = freqs[td]
         if len(reqs) > 1:
             msg = 'Multiple requirements are present for field {}.'.format(field.name)
@@ -615,8 +619,7 @@ class OperatorFieldRequirements(object):
 
     @debug
     def build_topologies(self):
-        fields = set(self._input_field_requirements.keys()
-                     + self._output_field_requirements.keys())
+        fields = set(self._input_field_requirements.keys()).union(self._output_field_requirements.keys())
         # enforce deterministic iteration
         for field in sorted(fields, key=lambda x: '{}::{}'.format(x.name, x.short_description())):
             reqs = MultiFieldRequirements(field)
diff --git a/hysop/fields/ghost_exchangers.py b/hysop/fields/ghost_exchangers.py
index 66ecc18ad97f524792528c17e7fcd9ad46a5cbc2..258b03cba69d1c1b6b351003984e47d31f23f2f5 100644
--- a/hysop/fields/ghost_exchangers.py
+++ b/hysop/fields/ghost_exchangers.py
@@ -1,5 +1,7 @@
 from abc import ABCMeta, abstractmethod
-from hysop.deps import np, hashlib, __builtin__, print_function
+import hashlib
+import numpy as np
+
 from hysop.tools.types import check_instance, to_tuple, first_not_None
 from hysop.tools.numerics import default_invalid_value
 from hysop.tools.mpi_utils import iter_mpi_requests, dtype_to_mpi_type
@@ -18,7 +20,7 @@ GHOST_EXCHANGE_DEBUG_LEVEL=0
 def gprint(*args, **kwds):
     level = kwds.pop('level', 2)
     if GHOST_EXCHANGE_DEBUG_LEVEL >= level:
-        __builtin__.print(*args, **kwds)
+        print(*args, **kwds)
 def gprint_buffer(msg, buf, *args, **kwds):
     no_data = kwds.pop('no_data', False)
     if isinstance(buf, list):
@@ -27,7 +29,7 @@ def gprint_buffer(msg, buf, *args, **kwds):
     else:
         mpi_type=None
     gprint('{}: mpi_type={}, shape={}, dtype={}, c_contiguous={}, f_contiguous={}'.format(msg,
-        mpi_type, buf.shape, buf.dtype, buf.flags['C_CONTIGUOUS'], buf.flags['F_CONTIGUOUS']))
+        mpi_type, buf.shape, buf.dtype, buf.flags.c_contiguous, buf.flags.f_contiguous))
     if no_data:
         return
     gprint('   ' + '\n   '.join(str(buf).split('\n')))
@@ -97,7 +99,7 @@ class  LocalBoundaryExchanger(object):
         H = (0,)+(-1,)*(S-1)
         return cls.build_exchanger(shape=shape, direction=direction,
                                    H=H, to_left=to_left)
-    
+
     @classmethod
     def build_scalar_exchanger(cls, value, shape, direction, to_left):
         def exchange_ghosts(X, value=value):
@@ -106,9 +108,8 @@ class  LocalBoundaryExchanger(object):
         return exchange_ghosts
 
 
-class GhostExchangerI(object):
+class GhostExchangerI(object, metaclass=ABCMeta):
     """Abstract interface for a ghost exchanger."""
-    __metaclass__ = ABCMeta
 
     @abstractmethod
     def exchange_ghosts(self, **kwds):
@@ -212,7 +213,7 @@ class GhostExchanger(GhostExchangerI):
         self.mesh = topology.mesh
         self.backend = data[0].backend if hasattr(data[0], 'backend') else topology.backend
         self.host_backend = self.backend.host_array_backend
-        self.base_tag = int(hashlib.sha1(name).hexdigest(), 16) % (104729)
+        self.base_tag = int(hashlib.sha1(name.encode('utf-8')).hexdigest(), 16) % (104729)
         self.exchange_method = exchange_method
         self.ghost_op = ghost_op
         self.ghost_mask = ghost_mask
@@ -287,7 +288,7 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
         assert all(0<=d<dim for d in directions)
         assert len(ghosts)==dim or len(ghosts)==1
         if len(ghosts)==1:
-            ghosts = tuple(ghosts[0] if (i in directions) else 0 for i in xrange(dim))
+            ghosts = tuple(ghosts[0] if (i in directions) else 0 for i in range(dim))
 
         self.directions = directions
         self.outer_ghosts = mesh.get_local_outer_ghost_slices(ghosts=ghosts,
@@ -300,13 +301,13 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
         self.all_outer_ghost_slices = mesh.get_all_local_outer_ghost_slices(ghosts=ghosts)
         self.dim = dim
         self.ghosts = ghosts
-        
+
         # check that enforced boundaries kind matches topology boundaries
         if (global_lboundaries_config is not None):
-            global_lboundaries = np.asarray(map(lambda x: x.bc, global_lboundaries_config))
+            global_lboundaries = np.asarray(tuple(map(lambda x: x.bc, global_lboundaries_config)))
             assert (global_lboundaries == mesh.global_lboundaries).all(), (global_lboundaries, mesh.global_lboundaries)
         if (global_rboundaries_config is not None):
-            global_rboundaries = np.asarray(map(lambda x: x.bc, global_rboundaries_config))
+            global_rboundaries = np.asarray(tuple(map(lambda x: x.bc, global_rboundaries_config)))
             assert (global_rboundaries == mesh.global_rboundaries).all(), (global_rboundaries, mesh.global_rboundaries)
         self.global_lboundaries_config = global_lboundaries_config
         self.global_rboundaries_config = global_rboundaries_config
@@ -330,7 +331,7 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
             for i, (lslcs,rslcs,shape) in enumerate(slices):
                 s+='{}direction {}  ||  LEFT: {}  ||  RIGHT: {}  ||  SHAPE: {}'.format(prefix, i, lslcs, rslcs, shape)
             return s
-        
+
         msg=\
 '''
     TOPOLOGY INFO:
@@ -350,8 +351,8 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
         global rboundaries: {}
 '''.format(self.dim, topology.proc_shape, topology.proc_coords,
             mesh.is_at_left_boundary, mesh.is_at_right_boundary,
-           self.kind, self.directions, self.ghosts, 
-           fmt_slices(self.outer_ghosts), fmt_slices(self.inner_ghosts), 
+           self.kind, self.directions, self.ghosts,
+           fmt_slices(self.outer_ghosts), fmt_slices(self.inner_ghosts),
            self.boundary_layers, global_lboundaries_config, global_rboundaries_config)
         gprint(msg)
 
@@ -431,7 +432,7 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
                     elif (bc is BoundaryCondition.HOMOGENEOUS_NEUMANN):
                         if isinstance(boundary, BoundaryConditionConfig) and (boundary.data is not None):
                             # allow to force boundary ghosts to a specific scalar value
-                            if isinstance(boundary.data, (int,long,float,np.number)):
+                            if isinstance(boundary.data, (int,float,np.number)):
                                 fn = LocalBoundaryExchanger.build_scalar_exchanger(value=boundary.data,
                                         shape=shape, direction=d, to_left=to_left)
                             else:
@@ -496,13 +497,13 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
                             slc, shape = all_outer_ghost_slices[dim][directions][displacements]
                             value = default_invalid_value(dtype=base_dtype)
                             lp.diagonal_ghosts.append((buf,slc,shape,value))
-            
+
             assert self.directions
             assert any(ghosts[d]>0 for d in self.directions)
             for d in self.directions:
                 if ghosts[d]==0:
                     continue
-                
+
                 lboundary = left_boundaries_kind[d]
                 rboundary = right_boundaries_kind[d]
                 at_left   = is_at_left_boundary[d]
@@ -529,7 +530,7 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
                 nprocs = proc_shape[d]
                 lp.has_mpi_exchanges |= (nprocs > 1)
                 assert (nprocs==1) or should_exchange_to_left or should_exchange_to_right
-                
+
                 msg=\
 '''      DATA {} EXCHANGES FOR DIRECTION {}:
         nprocs (directional procs):   {}
@@ -545,11 +546,11 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
         associated_shape:             {}
         left_blayer  / right_blayer:  {} / {}
         boundary_layer_shape:         {}
-        Send/Receive:'''.format(i, d, nprocs, 
-        src_data_on_device, dst_data_on_device,        
-        local_rank, left_rank, right_rank, 
-        at_left, at_right, lboundary, rboundary, 
-        should_exchange_to_left, should_exchange_to_right, 
+        Send/Receive:'''.format(i, d, nprocs,
+        src_data_on_device, dst_data_on_device,
+        local_rank, left_rank, right_rank,
+        at_left, at_right, lboundary, rboundary,
+        should_exchange_to_left, should_exchange_to_right,
         inner_left, inner_right, outer_left, outer_right, shape,
         left_boundary_layer, right_boundary_layer, bl_shape)
                 gprint(msg)
@@ -653,7 +654,7 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
                                          'source':left_rank,
                                          'tag':recvtag}
                             lp.irecv_kwds.append(recv_kwds)
-                            
+
                             msg='\t\t>P{}.IRecv(shape={}, dtype={}, tag={}) outer left data from left neighbour process P{}.'
                             msg=msg.format(local_rank, shape, buf.dtype, recvtag, left_rank)
                             gprint(msg)
@@ -677,7 +678,7 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
                                          'dest':right_rank,
                                          'tag':sendtag}
                             lp.isend_kwds.append(send_kwds)
-                            
+
                             msg='\t\t>P{}.ISend(shape={}, dtype={}, tag={}) inner right data to right neighbour process P{}.'
                             msg=msg.format(local_rank, shape, buf.dtype, sendtag, right_rank)
                             gprint(msg)
@@ -697,7 +698,7 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
                                          'source':right_rank,
                                          'tag':recvtag}
                             lp.irecv_kwds.append(recv_kwds)
-                            
+
                             msg='\t\t>P{}.IRecv(shape={}, dtype={}, tag={}) outer right data from right neighbour process P{}.'
                             msg=msg.format(local_rank, shape, buf.dtype, recvtag, right_rank)
                             gprint(msg)
@@ -842,7 +843,7 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
                 else:
                     msg='Unknown GhostOperation {}.'.format(ghost_op)
                     raise NotImplementedError(msg)
-            
+
             msg='Something went wrong while initializing LauncherParameters::{}::{}, got value {{}}.'
             msg=msg.format(ghost_op, exchange_method)
             assert (lp.from_buffer is not None), msg.format(lp.from_buffer)
@@ -857,7 +858,7 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
                 scount, rcount = 0, 0
 
                 src_buffers, dst_buffers = (), ()
-                for direction in xrange(comm.Get_dim()):
+                for direction in range(comm.Get_dim()):
                     for (tag,rank) in zip(('left','right'), comm.Shift(direction, 1)):
                         displ = 0
                         if ((tag,rank) in lp.w_send_requests):
@@ -943,7 +944,7 @@ class CartesianDiscreteFieldGhostExchanger(GhostExchanger):
             send_disp, recv_disp = 0,0     # in type counts
             src_buffers, dst_buffers = (), ()
             send_buffer, recv_buffer = None, None
-            for direction in xrange(comm.Get_dim()):
+            for direction in range(comm.Get_dim()):
                 for (tag,rank) in zip(('left','right'), comm.Shift(direction, 1)):
                     send_count, recv_count = 0, 0
                     if ((tag,rank) in lp.v_send_requests):
diff --git a/hysop/fields/tests/test_cartesian.py b/hysop/fields/tests/test_cartesian.py
index 43a947fd7c8c49fe8e4cdc6e6298ccbe1b64e1b8..f09eba05d4bd0cb62c8d2777e9b49582ac279d10 100644
--- a/hysop/fields/tests/test_cartesian.py
+++ b/hysop/fields/tests/test_cartesian.py
@@ -1,8 +1,13 @@
-import os, subprocess, sys, time
+import os
+import subprocess
+import sys
+import time
+import itertools as it
+import numpy as np
+
 from hysop import __ENABLE_LONG_TESTS__
-from hysop.deps import it, np
 from hysop.constants import Backend, ExchangeMethod, GhostOperation, \
-                        GhostMask, DirectionLabels, BoundaryCondition
+    GhostMask, DirectionLabels, BoundaryCondition
 from hysop.tools.parameters import CartesianDiscretization
 from hysop.tools.numerics import is_integer, is_fp
 from hysop.tools.numpywrappers import npw
@@ -12,6 +17,7 @@ from hysop.topology.cartesian_topology import CartesianTopology, CartesianTopolo
 from hysop.testsenv import iter_clenv, test_context, domain_boundary_iterator
 from hysop.tools.numerics import is_fp, is_integer
 
+
 def __random_init(data, coords, component):
     shape = data.shape
     dtype = data.dtype
@@ -23,37 +29,40 @@ def __random_init(data, coords, component):
         msg = 'Unknown dtype {}.'.format(dtype)
         raise NotImplementedError(msg)
 
+
 def __zero_init(data, coords, component):
     data[...] = 0
 
+
 def __cst_init(cst):
-    def __init(data,coords,component):
+    def __init(data, coords, component):
         data[...] = cst
     return __init
 
+
 def test_serial_initialization_1d():
-    print
-    print 'test_serial_initialization_1d()'
+    print()
+    print('test_serial_initialization_1d()')
     dim = 1
     npts = (10,)
     nghosts = (2,)
 
     for (lbd, rbd) in domain_boundary_iterator(dim):
         domain = Box(dim=dim, lboundaries=lbd,
-                              rboundaries=rbd)
+                     rboundaries=rbd)
         F0 = Field(domain=domain, name='F0', nb_components=1)
         F1 = Field(domain=domain, name='F1', nb_components=2)
-        F2 = Field(domain=domain, name='F2', shape=(2,2))
-        print '[{}]'.format(F0.format_boundaries())
+        F2 = Field(domain=domain, name='F2', shape=(2, 2))
+        print('[{}]'.format(F0.format_boundaries()))
 
         discretization = CartesianDiscretization(npts, nghosts,
-                                            lboundaries=F0.lboundaries,
-                                            rboundaries=F0.rboundaries)
+                                                 lboundaries=F0.lboundaries,
+                                                 rboundaries=F0.rboundaries)
 
         topo0 = CartesianTopology(domain=domain, discretization=discretization,
-                        backend=Backend.HOST)
+                                  backend=Backend.HOST)
         topos = (topo0,) + tuple(CartesianTopology(domain=domain, discretization=discretization,
-            backend=Backend.OPENCL, cl_env=cl_env) for cl_env in iter_clenv())
+                                                   backend=Backend.OPENCL, cl_env=cl_env) for cl_env in iter_clenv())
 
         assert all(t.mesh.global_lboundaries == t.mesh.local_lboundaries == F0.lboundaries for t in topos)
         assert all(t.mesh.global_rboundaries == t.mesh.local_rboundaries == F0.rboundaries for t in topos)
@@ -81,54 +90,54 @@ def test_serial_initialization_1d():
             Lx, = tuple(discretization.lboundaries)
             Rx, = tuple(discretization.rboundaries)
             try:
-                for i,d in enumerate(data):
+                for i, d in enumerate(data):
                     b = d.get().handle
-                    assert b.shape==(Nx+2*Gx,)
-                    if (Lx==BoundaryCondition.PERIODIC):
+                    assert b.shape == (Nx+2*Gx,)
+                    if (Lx == BoundaryCondition.PERIODIC):
                         assert (b[Gx:2*Gx] == b[Gx+Nx:]).all(), b
-                    elif (Lx==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                    elif (Lx == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
                         assert (b[Gx] == 0).all(), b
                         assert (b[:Gx] == -b[Gx+1:2*Gx+1][::-1]).all(), b
-                    elif (Lx==BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                    elif (Lx == BoundaryCondition.HOMOGENEOUS_NEUMANN):
                         assert (b[:Gx] == +b[Gx+1:2*Gx+1][::-1]).all(), b
                     else:
                         raise NotImplementedError('Unknown boundary condition {}.'.format(Lx))
-                    if (Rx==BoundaryCondition.PERIODIC):
+                    if (Rx == BoundaryCondition.PERIODIC):
                         assert (b[:Gx] == b[Nx:Gx+Nx]).all(), b
-                    elif (Rx==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                    elif (Rx == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
                         assert (b[Nx+Gx-1] == 0).all(), b
                         assert (b[Nx-1:Nx+Gx-1] == -b[Nx+Gx:][::-1]).all(), b
-                    elif (Rx==BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                    elif (Rx == BoundaryCondition.HOMOGENEOUS_NEUMANN):
                         assert (b[Nx-1:Nx+Gx-1] == +b[Nx+Gx:][::-1]).all(), b
                     else:
                         raise NotImplementedError('Unknown boundary condition {}.'.format(Rx))
                     sys.stdout.write('.')
                     sys.stdout.flush()
             finally:
-                print
+                print()
 
 
 def test_serial_initialization_2d():
-    print
-    print 'test_serial_initialization_2d()'
+    print()
+    print('test_serial_initialization_2d()')
     dim = 2
-    npts = (4,8)
-    nghosts = (1,2)
+    npts = (4, 8)
+    nghosts = (1, 2)
     for (lbd, rbd) in domain_boundary_iterator(dim):
         domain = Box(dim=dim, lboundaries=lbd,
-                              rboundaries=rbd)
+                     rboundaries=rbd)
         F0 = Field(domain=domain, name='F0', nb_components=1)
         F1 = Field(domain=domain, name='F1', nb_components=2)
-        print '[{}]'.format(F0.format_boundaries())
+        print('[{}]'.format(F0.format_boundaries()))
 
         discretization = CartesianDiscretization(npts, nghosts,
-                                            lboundaries=F0.lboundaries,
-                                            rboundaries=F0.rboundaries)
+                                                 lboundaries=F0.lboundaries,
+                                                 rboundaries=F0.rboundaries)
 
         topo0 = CartesianTopology(domain=domain, discretization=discretization,
-                        backend=Backend.HOST)
+                                  backend=Backend.HOST)
         topos = (topo0,) + tuple(CartesianTopology(domain=domain, discretization=discretization,
-            backend=Backend.OPENCL, cl_env=cl_env) for cl_env in iter_clenv())
+                                                   backend=Backend.OPENCL, cl_env=cl_env) for cl_env in iter_clenv())
 
         assert all(np.all(t.mesh.local_lboundaries == F0.lboundaries) for t in topos)
         assert all(np.all(t.mesh.local_rboundaries == F0.rboundaries) for t in topos)
@@ -144,24 +153,24 @@ def test_serial_initialization_2d():
             assert all(np.all(d.local_lboundaries == F0.lboundaries) for d in dfields)
             assert all(np.all(d.local_rboundaries == F0.rboundaries) for d in dfields)
 
-            Ny,Nx = npts
-            Gy,Gx = nghosts
-            Ly,Lx = tuple(discretization.lboundaries)
-            Ry,Rx = tuple(discretization.rboundaries)
-            Xo = (slice(0,Gx),
-                   slice(Gx,Gx+Nx),
-                   slice(Gx+Nx,None))
-            Yo = (slice(0,Gy),
-                   slice(Gy,Gy+Ny),
-                   slice(Gy+Ny,None))
+            Ny, Nx = npts
+            Gy, Gx = nghosts
+            Ly, Lx = tuple(discretization.lboundaries)
+            Ry, Rx = tuple(discretization.rboundaries)
+            Xo = (slice(0, Gx),
+                  slice(Gx, Gx+Nx),
+                  slice(Gx+Nx, None))
+            Yo = (slice(0, Gy),
+                  slice(Gy, Gy+Ny),
+                  slice(Gy+Ny, None))
             Xi = (slice(Gx, 2*Gx),
-                   slice(2*Gx,Nx),
-                   slice(Nx, Gx+Nx))
+                  slice(2*Gx, Nx),
+                  slice(Nx, Gx+Nx))
             Yi = (slice(Gy, 2*Gy),
-                   slice(2*Gy,Ny),
-                   slice(Ny, Gy+Ny))
-            Ix = (slice(None,None,+1), slice(None,None,-1))
-            Iy = (slice(None,None,-1), slice(None,None,+1))
+                  slice(2*Gy, Ny),
+                  slice(Ny, Gy+Ny))
+            Ix = (slice(None, None, +1), slice(None, None, -1))
+            Iy = (slice(None, None, -1), slice(None, None, +1))
             data = dF0.data + dF1.data
 
             for ghost_mask in GhostMask.all:
@@ -172,8 +181,8 @@ def test_serial_initialization_2d():
                 sys.stdout.flush()
 
                 if ghost_mask is GhostMask.FULL:
-                    Fx = slice(None,None)
-                    Fy = slice(None,None)
+                    Fx = slice(None, None)
+                    Fy = slice(None, None)
                 elif ghost_mask is GhostMask.CROSS:
                     # we exclude exterior ghosts because pattern is CROSS
                     # we exclude interior ghost because of boundary clashes:
@@ -186,110 +195,110 @@ def test_serial_initialization_2d():
                     raise NotImplementedError(ghost_mask)
 
                 try:
-                    for (i,d) in enumerate(data):
+                    for (i, d) in enumerate(data):
                         b = d.get().handle
 
-                        assert b.shape==(Ny+2*Gy,Nx+2*Gx)
-                        assert b[Yo[1],Xo[1]].shape == npts
+                        assert b.shape == (Ny+2*Gy, Nx+2*Gx)
+                        assert b[Yo[1], Xo[1]].shape == npts
 
-                        assert b[Yo[0],Xo[0]].shape == nghosts
-                        assert b[Yo[0],Xo[2]].shape == nghosts
-                        assert b[Yo[2],Xo[0]].shape == nghosts
-                        assert b[Yo[2],Xo[2]].shape == nghosts
+                        assert b[Yo[0], Xo[0]].shape == nghosts
+                        assert b[Yo[0], Xo[2]].shape == nghosts
+                        assert b[Yo[2], Xo[0]].shape == nghosts
+                        assert b[Yo[2], Xo[2]].shape == nghosts
 
-                        assert b[Yi[0],Xi[0]].shape == nghosts
-                        assert b[Yi[0],Xi[2]].shape == nghosts
-                        assert b[Yi[2],Xi[0]].shape == nghosts
-                        assert b[Yi[2],Xi[2]].shape == nghosts
+                        assert b[Yi[0], Xi[0]].shape == nghosts
+                        assert b[Yi[0], Xi[2]].shape == nghosts
+                        assert b[Yi[2], Xi[0]].shape == nghosts
+                        assert b[Yi[2], Xi[2]].shape == nghosts
 
                         if ghost_mask is GhostMask.FULL:
-                            assert b[Fy,Fx].shape == b.shape
+                            assert b[Fy, Fx].shape == b.shape
                         elif ghost_mask is GhostMask.CROSS:
-                            assert b[Fy,Fx].shape == (Ny-2*Gy, Nx-2*Gx)
+                            assert b[Fy, Fx].shape == (Ny-2*Gy, Nx-2*Gx)
                         else:
                             raise NotImplementedError(ghost_mask)
 
-                        if (Lx==BoundaryCondition.PERIODIC):
-                            assert np.all(b[Fy,Xo[0]] == b[Fy,Xi[2]]), '\n'+str(d)
-                        elif (Lx==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
-                            assert (b[Fy,Gx] == 0).all(), '\n'+str(d)
-                            assert (b[Fy,:Gx] == -b[Fy,Gx+1:2*Gx+1][Ix]).all(), '\n'+str(d)
-                        elif (Lx==BoundaryCondition.HOMOGENEOUS_NEUMANN):
-                            assert (b[Fy,:Gx] == +b[Fy,Gx+1:2*Gx+1][Ix]).all(), '\n'+str(d)
+                        if (Lx == BoundaryCondition.PERIODIC):
+                            assert np.all(b[Fy, Xo[0]] == b[Fy, Xi[2]]), '\n'+str(d)
+                        elif (Lx == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                            assert (b[Fy, Gx] == 0).all(), '\n'+str(d)
+                            assert (b[Fy, :Gx] == -b[Fy, Gx+1:2*Gx+1][Ix]).all(), '\n'+str(d)
+                        elif (Lx == BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                            assert (b[Fy, :Gx] == +b[Fy, Gx+1:2*Gx+1][Ix]).all(), '\n'+str(d)
                         else:
                             raise NotImplementedError('Unknown boundary condition {}.'.format(Lx))
 
-                        if (Rx==BoundaryCondition.PERIODIC):
-                            assert np.all(b[Fy,Xo[2]] == b[Fy,Xi[0]]), '\n'+str(d)
-                        elif (Rx==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
-                            assert (b[Fy,Nx+Gx-1] == 0).all(), '\n'+str(d)
-                            assert (b[Fy,Nx-1:Nx+Gx-1] == -b[Fy,Nx+Gx:][Ix]).all(), '\n'+str(d)
-                        elif (Rx==BoundaryCondition.HOMOGENEOUS_NEUMANN):
-                            assert (b[Fy,Nx-1:Nx+Gx-1] == +b[Fy,Nx+Gx:][Ix]).all(), '\n'+str(d)
+                        if (Rx == BoundaryCondition.PERIODIC):
+                            assert np.all(b[Fy, Xo[2]] == b[Fy, Xi[0]]), '\n'+str(d)
+                        elif (Rx == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                            assert (b[Fy, Nx+Gx-1] == 0).all(), '\n'+str(d)
+                            assert (b[Fy, Nx-1:Nx+Gx-1] == -b[Fy, Nx+Gx:][Ix]).all(), '\n'+str(d)
+                        elif (Rx == BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                            assert (b[Fy, Nx-1:Nx+Gx-1] == +b[Fy, Nx+Gx:][Ix]).all(), '\n'+str(d)
                         else:
                             raise NotImplementedError('Unknown boundary condition {}.'.format(Rx))
 
-                        if (Ly==BoundaryCondition.PERIODIC):
-                            assert np.all(b[Yo[0],Fx] == b[Yi[2],Fx]), '\n'+str(d)
-                        elif (Ly==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
-                            assert (b[Gy,Fx] == 0).all(), '\n'+str(d)
-                            assert (b[:Gy,Fx] == -b[Gy+1:2*Gy+1,Fx][Iy]).all(), '\n'+str(d)
-                        elif (Ly==BoundaryCondition.HOMOGENEOUS_NEUMANN):
-                            assert (b[:Gy,Fx] == +b[Gy+1:2*Gy+1,Fx][Iy]).all(), '\n'+str(d)
+                        if (Ly == BoundaryCondition.PERIODIC):
+                            assert np.all(b[Yo[0], Fx] == b[Yi[2], Fx]), '\n'+str(d)
+                        elif (Ly == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                            assert (b[Gy, Fx] == 0).all(), '\n'+str(d)
+                            assert (b[:Gy, Fx] == -b[Gy+1:2*Gy+1, Fx][Iy]).all(), '\n'+str(d)
+                        elif (Ly == BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                            assert (b[:Gy, Fx] == +b[Gy+1:2*Gy+1, Fx][Iy]).all(), '\n'+str(d)
                         else:
                             raise NotImplementedError('Unknown boundary condition {}.'.format(Ly))
 
-                        if (Ry==BoundaryCondition.PERIODIC):
-                            assert np.all(b[Yo[2],Fx] == b[Yi[0],Fx]), '\n'+str(d)
-                        elif (Ry==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
-                            assert (b[Ny+Gy-1,Fx] == 0).all(), '\n'+str(d)
-                            assert (b[Ny-1:Ny+Gy-1,Fx] == -b[Ny+Gy:,Fx][Iy]).all(), '\n'+str(d)
-                        elif (Ry==BoundaryCondition.HOMOGENEOUS_NEUMANN):
-                            assert (b[Ny-1:Ny+Gy-1,Fx] == +b[Ny+Gy:,Fx][Iy]).all(), '\n'+str(d)
+                        if (Ry == BoundaryCondition.PERIODIC):
+                            assert np.all(b[Yo[2], Fx] == b[Yi[0], Fx]), '\n'+str(d)
+                        elif (Ry == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                            assert (b[Ny+Gy-1, Fx] == 0).all(), '\n'+str(d)
+                            assert (b[Ny-1:Ny+Gy-1, Fx] == -b[Ny+Gy:, Fx][Iy]).all(), '\n'+str(d)
+                        elif (Ry == BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                            assert (b[Ny-1:Ny+Gy-1, Fx] == +b[Ny+Gy:, Fx][Iy]).all(), '\n'+str(d)
                         else:
                             raise NotImplementedError('Unknown boundary condition {}.'.format(Ry))
 
                         if (ghost_mask is GhostMask.FULL):
-                            if (Lx==Ly==Rx==Ry==BoundaryCondition.PERIODIC):
-                                assert np.all(b[Yo[0],Xo[0]]==b[Yi[2],Xi[2]]), '\n'+str(d)
-                                assert np.all(b[Yo[2],Xo[0]]==b[Yi[0],Xi[2]]), '\n'+str(d)
-                                assert np.all(b[Yo[2],Xo[2]]==b[Yi[0],Xi[0]]), '\n'+str(d)
-                                assert np.all(b[Yo[0],Xo[2]]==b[Yi[2],Xi[0]]), '\n'+str(d)
+                            if (Lx == Ly == Rx == Ry == BoundaryCondition.PERIODIC):
+                                assert np.all(b[Yo[0], Xo[0]] == b[Yi[2], Xi[2]]), '\n'+str(d)
+                                assert np.all(b[Yo[2], Xo[0]] == b[Yi[0], Xi[2]]), '\n'+str(d)
+                                assert np.all(b[Yo[2], Xo[2]] == b[Yi[0], Xi[0]]), '\n'+str(d)
+                                assert np.all(b[Yo[0], Xo[2]] == b[Yi[2], Xi[0]]), '\n'+str(d)
                         elif (ghost_mask is GhostMask.CROSS):
-                            assert np.all(np.isnan(b[Yo[0],Xo[0]])), '\n'+str(d)
-                            assert np.all(np.isnan(b[Yo[2],Xo[0]])), '\n'+str(d)
-                            assert np.all(np.isnan(b[Yo[2],Xo[2]])), '\n'+str(d)
-                            assert np.all(np.isnan(b[Yo[0],Xo[2]])), '\n'+str(d)
+                            assert np.all(np.isnan(b[Yo[0], Xo[0]])), '\n'+str(d)
+                            assert np.all(np.isnan(b[Yo[2], Xo[0]])), '\n'+str(d)
+                            assert np.all(np.isnan(b[Yo[2], Xo[2]])), '\n'+str(d)
+                            assert np.all(np.isnan(b[Yo[0], Xo[2]])), '\n'+str(d)
                         else:
-                            msg='Unknown ghost mask {}.'.format(ghost_mask)
+                            msg = 'Unknown ghost mask {}.'.format(ghost_mask)
                             raise NotImplementedError(msg)
                         sys.stdout.write('.')
                         sys.stdout.flush()
                 finally:
-                    print
+                    print()
 
 
 def test_serial_initialization_3d():
-    print
-    print 'test_serial_initialization_3d()'
+    print()
+    print('test_serial_initialization_3d()')
     dim = 3
-    npts = (8,5,5)
-    nghosts = (3,1,2)
+    npts = (8, 5, 5)
+    nghosts = (3, 1, 2)
     for (lbd, rbd) in domain_boundary_iterator(dim):
         domain = Box(dim=dim, lboundaries=lbd,
-                              rboundaries=rbd)
+                     rboundaries=rbd)
         F0 = Field(domain=domain, name='F0', nb_components=1)
         F1 = Field(domain=domain, name='F1', nb_components=3)
-        print '[{}]'.format(F0.format_boundaries())
+        print('[{}]'.format(F0.format_boundaries()))
 
         discretization = CartesianDiscretization(npts, nghosts,
-                                            lboundaries=F0.lboundaries,
-                                            rboundaries=F0.rboundaries)
+                                                 lboundaries=F0.lboundaries,
+                                                 rboundaries=F0.rboundaries)
 
         topo0 = CartesianTopology(domain=domain, discretization=discretization,
-                        backend=Backend.HOST)
+                                  backend=Backend.HOST)
         topos = (topo0,) + tuple(CartesianTopology(domain=domain, discretization=discretization,
-            backend=Backend.OPENCL, cl_env=cl_env) for cl_env in iter_clenv())
+                                                   backend=Backend.OPENCL, cl_env=cl_env) for cl_env in iter_clenv())
 
         assert all(np.all(t.mesh.local_lboundaries == F0.lboundaries) for t in topos)
         assert all(np.all(t.mesh.local_rboundaries == F0.rboundaries) for t in topos)
@@ -305,31 +314,31 @@ def test_serial_initialization_3d():
             assert all(np.all(d.local_lboundaries == F0.lboundaries) for d in dfields)
             assert all(np.all(d.local_rboundaries == F0.rboundaries) for d in dfields)
 
-            Nz,Ny,Nx = npts
-            Gz,Gy,Gx = nghosts
-            Lz,Ly,Lx = tuple(discretization.lboundaries)
-            Rz,Ry,Rx = tuple(discretization.rboundaries)
-            Xo = (slice(0,Gx),
-                   slice(Gx,Gx+Nx),
-                   slice(Gx+Nx,None))
-            Yo = (slice(0,Gy),
-                   slice(Gy,Gy+Ny),
-                   slice(Gy+Ny,None))
-            Zo = (slice(0,Gz),
-                   slice(Gz,Gz+Nz),
-                   slice(Gz+Nz,None))
+            Nz, Ny, Nx = npts
+            Gz, Gy, Gx = nghosts
+            Lz, Ly, Lx = tuple(discretization.lboundaries)
+            Rz, Ry, Rx = tuple(discretization.rboundaries)
+            Xo = (slice(0, Gx),
+                  slice(Gx, Gx+Nx),
+                  slice(Gx+Nx, None))
+            Yo = (slice(0, Gy),
+                  slice(Gy, Gy+Ny),
+                  slice(Gy+Ny, None))
+            Zo = (slice(0, Gz),
+                  slice(Gz, Gz+Nz),
+                  slice(Gz+Nz, None))
             Xi = (slice(Gx, 2*Gx),
-                   slice(2*Gx,Nx),
-                   slice(Nx, Gx+Nx))
+                  slice(2*Gx, Nx),
+                  slice(Nx, Gx+Nx))
             Yi = (slice(Gy, 2*Gy),
-                   slice(2*Gy,Ny),
-                   slice(Ny, Gy+Ny))
+                  slice(2*Gy, Ny),
+                  slice(Ny, Gy+Ny))
             Zi = (slice(Gz, 2*Gz),
-                   slice(2*Gz,Nz),
-                   slice(Nz, Gz+Nz))
-            Ix = (slice(None,None,+1), slice(None,None,+1), slice(None,None,-1))
-            Iy = (slice(None,None,+1), slice(None,None,-1), slice(None,None,+1))
-            Iz = (slice(None,None,-1), slice(None,None,+1), slice(None,None,+1))
+                  slice(2*Gz, Nz),
+                  slice(Nz, Gz+Nz))
+            Ix = (slice(None, None, +1), slice(None, None, +1), slice(None, None, -1))
+            Iy = (slice(None, None, +1), slice(None, None, -1), slice(None, None, +1))
+            Iz = (slice(None, None, -1), slice(None, None, +1), slice(None, None, +1))
             data = dF0.data + dF1.data
 
             for ghost_mask in GhostMask.all:
@@ -340,9 +349,9 @@ def test_serial_initialization_3d():
                 sys.stdout.flush()
 
                 if ghost_mask is GhostMask.FULL:
-                    Fx = slice(None,None)
-                    Fy = slice(None,None)
-                    Fz = slice(None,None)
+                    Fx = slice(None, None)
+                    Fy = slice(None, None)
+                    Fz = slice(None, None)
                 elif ghost_mask is GhostMask.CROSS:
                     # we exclude exterior ghosts because pattern is CROSS
                     # we exclude interior ghost because of boundary clashes:
@@ -356,122 +365,122 @@ def test_serial_initialization_3d():
                     raise NotImplementedError(ghost_mask)
 
                 try:
-                    for (i,d) in enumerate(data):
+                    for (i, d) in enumerate(data):
                         b = d.get().handle
-                        assert b.shape==(Nz+2*Gz,Ny+2*Gy,Nx+2*Gx)
-                        assert b[Zo[1],Yo[1],Xo[1]].shape == npts
-
-                        assert b[Zo[0],Yo[0],Xo[0]].shape == nghosts
-                        assert b[Zo[0],Yo[0],Xo[2]].shape == nghosts
-                        assert b[Zo[0],Yo[2],Xo[0]].shape == nghosts
-                        assert b[Zo[0],Yo[2],Xo[2]].shape == nghosts
-                        assert b[Zo[2],Yo[0],Xo[0]].shape == nghosts
-                        assert b[Zo[2],Yo[0],Xo[2]].shape == nghosts
-                        assert b[Zo[2],Yo[2],Xo[0]].shape == nghosts
-                        assert b[Zo[2],Yo[2],Xo[2]].shape == nghosts
-
-                        assert b[Zi[0],Yi[0],Xi[0]].shape == nghosts
-                        assert b[Zi[0],Yi[0],Xi[2]].shape == nghosts
-                        assert b[Zi[0],Yi[2],Xi[0]].shape == nghosts
-                        assert b[Zi[0],Yi[2],Xi[2]].shape == nghosts
-                        assert b[Zi[2],Yi[0],Xi[0]].shape == nghosts
-                        assert b[Zi[2],Yi[0],Xi[2]].shape == nghosts
-                        assert b[Zi[2],Yi[2],Xi[0]].shape == nghosts
-                        assert b[Zi[2],Yi[2],Xi[2]].shape == nghosts
+                        assert b.shape == (Nz+2*Gz, Ny+2*Gy, Nx+2*Gx)
+                        assert b[Zo[1], Yo[1], Xo[1]].shape == npts
+
+                        assert b[Zo[0], Yo[0], Xo[0]].shape == nghosts
+                        assert b[Zo[0], Yo[0], Xo[2]].shape == nghosts
+                        assert b[Zo[0], Yo[2], Xo[0]].shape == nghosts
+                        assert b[Zo[0], Yo[2], Xo[2]].shape == nghosts
+                        assert b[Zo[2], Yo[0], Xo[0]].shape == nghosts
+                        assert b[Zo[2], Yo[0], Xo[2]].shape == nghosts
+                        assert b[Zo[2], Yo[2], Xo[0]].shape == nghosts
+                        assert b[Zo[2], Yo[2], Xo[2]].shape == nghosts
+
+                        assert b[Zi[0], Yi[0], Xi[0]].shape == nghosts
+                        assert b[Zi[0], Yi[0], Xi[2]].shape == nghosts
+                        assert b[Zi[0], Yi[2], Xi[0]].shape == nghosts
+                        assert b[Zi[0], Yi[2], Xi[2]].shape == nghosts
+                        assert b[Zi[2], Yi[0], Xi[0]].shape == nghosts
+                        assert b[Zi[2], Yi[0], Xi[2]].shape == nghosts
+                        assert b[Zi[2], Yi[2], Xi[0]].shape == nghosts
+                        assert b[Zi[2], Yi[2], Xi[2]].shape == nghosts
 
                         if ghost_mask is GhostMask.FULL:
-                            assert b[Fz,Fy,Fx].shape == b.shape
+                            assert b[Fz, Fy, Fx].shape == b.shape
                         elif ghost_mask is GhostMask.CROSS:
-                            assert b[Fz,Fy,Fx].shape == (Nz-2*Gz,Ny-2*Gy, Nx-2*Gx)
+                            assert b[Fz, Fy, Fx].shape == (Nz-2*Gz, Ny-2*Gy, Nx-2*Gx)
                         else:
                             raise NotImplementedError(ghost_mask)
 
-                        if (Lx==BoundaryCondition.PERIODIC):
-                            assert np.all(b[Fz,Fy,Xo[0]] == b[Fz,Fy,Xi[2]]), '\n'+str(d)
-                        elif (Lx==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
-                            assert (b[Fz,Fy,Gx] == 0).all(), '\n'+str(d)
-                            assert (b[Fz,Fy,:Gx] == -b[Fz,Fy,Gx+1:2*Gx+1][Ix]).all(), '\n'+str(d)
-                        elif (Lx==BoundaryCondition.HOMOGENEOUS_NEUMANN):
-                            assert (b[Fz,Fy,:Gx] == +b[Fz,Fy,Gx+1:2*Gx+1][Ix]).all(), '\n'+str(d)
+                        if (Lx == BoundaryCondition.PERIODIC):
+                            assert np.all(b[Fz, Fy, Xo[0]] == b[Fz, Fy, Xi[2]]), '\n'+str(d)
+                        elif (Lx == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                            assert (b[Fz, Fy, Gx] == 0).all(), '\n'+str(d)
+                            assert (b[Fz, Fy, :Gx] == -b[Fz, Fy, Gx+1:2*Gx+1][Ix]).all(), '\n'+str(d)
+                        elif (Lx == BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                            assert (b[Fz, Fy, :Gx] == +b[Fz, Fy, Gx+1:2*Gx+1][Ix]).all(), '\n'+str(d)
                         else:
                             raise NotImplementedError('Unknown boundary condition {}.'.format(Lx))
 
-                        if (Rx==BoundaryCondition.PERIODIC):
-                            assert np.all(b[Fz,Fy,Xo[2]] == b[Fz,Fy,Xi[0]]), '\n'+str(d)
-                        elif (Rx==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
-                            assert (b[Fz,Fy,Nx+Gx-1] == 0).all(), '\n'+str(d)
-                            assert (b[Fz,Fy,Nx-1:Nx+Gx-1] == -b[Fz,Fy,Nx+Gx:][Ix]).all(), '\n'+str(d)
-                        elif (Rx==BoundaryCondition.HOMOGENEOUS_NEUMANN):
-                            assert (b[Fz,Fy,Nx-1:Nx+Gx-1] == +b[Fz,Fy,Nx+Gx:][Ix]).all(), '\n'+str(d)
+                        if (Rx == BoundaryCondition.PERIODIC):
+                            assert np.all(b[Fz, Fy, Xo[2]] == b[Fz, Fy, Xi[0]]), '\n'+str(d)
+                        elif (Rx == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                            assert (b[Fz, Fy, Nx+Gx-1] == 0).all(), '\n'+str(d)
+                            assert (b[Fz, Fy, Nx-1:Nx+Gx-1] == -b[Fz, Fy, Nx+Gx:][Ix]).all(), '\n'+str(d)
+                        elif (Rx == BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                            assert (b[Fz, Fy, Nx-1:Nx+Gx-1] == +b[Fz, Fy, Nx+Gx:][Ix]).all(), '\n'+str(d)
                         else:
                             raise NotImplementedError('Unknown boundary condition {}.'.format(Rx))
 
-                        if (Ly==BoundaryCondition.PERIODIC):
-                            assert np.all(b[Fz,Yo[0],Fx] == b[Fz,Yi[2],Fx]), '\n'+str(d)
-                        elif (Ly==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
-                            assert (b[Fz,Gy,Fx] == 0).all(), '\n'+str(d)
-                            assert (b[Fz,:Gy,Fx] == -b[Fz,Gy+1:2*Gy+1,Fx][Iy]).all(), '\n'+str(d)
-                        elif (Ly==BoundaryCondition.HOMOGENEOUS_NEUMANN):
-                            assert (b[Fz,:Gy,Fx] == +b[Fz,Gy+1:2*Gy+1,Fx][Iy]).all(), '\n'+str(d)
+                        if (Ly == BoundaryCondition.PERIODIC):
+                            assert np.all(b[Fz, Yo[0], Fx] == b[Fz, Yi[2], Fx]), '\n'+str(d)
+                        elif (Ly == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                            assert (b[Fz, Gy, Fx] == 0).all(), '\n'+str(d)
+                            assert (b[Fz, :Gy, Fx] == -b[Fz, Gy+1:2*Gy+1, Fx][Iy]).all(), '\n'+str(d)
+                        elif (Ly == BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                            assert (b[Fz, :Gy, Fx] == +b[Fz, Gy+1:2*Gy+1, Fx][Iy]).all(), '\n'+str(d)
                         else:
                             raise NotImplementedError('Unknown boundary condition {}.'.format(Ly))
 
-                        if (Ry==BoundaryCondition.PERIODIC):
-                            assert np.all(b[Fz,Yo[2],Fx] == b[Fz,Yi[0],Fx]), '\n'+str(d)
-                        elif (Ry==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
-                            assert (b[Fz,Ny+Gy-1,Fx] == 0).all(), '\n'+str(d)
-                            assert (b[Fz,Ny-1:Ny+Gy-1,Fx] == -b[Fz,Ny+Gy:,Fx][Iy]).all(), '\n'+str(d)
-                        elif (Ry==BoundaryCondition.HOMOGENEOUS_NEUMANN):
-                            assert (b[Fz,Ny-1:Ny+Gy-1,Fx] == +b[Fz,Ny+Gy:,Fx][Iy]).all(), '\n'+str(d)
+                        if (Ry == BoundaryCondition.PERIODIC):
+                            assert np.all(b[Fz, Yo[2], Fx] == b[Fz, Yi[0], Fx]), '\n'+str(d)
+                        elif (Ry == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                            assert (b[Fz, Ny+Gy-1, Fx] == 0).all(), '\n'+str(d)
+                            assert (b[Fz, Ny-1:Ny+Gy-1, Fx] == -b[Fz, Ny+Gy:, Fx][Iy]).all(), '\n'+str(d)
+                        elif (Ry == BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                            assert (b[Fz, Ny-1:Ny+Gy-1, Fx] == +b[Fz, Ny+Gy:, Fx][Iy]).all(), '\n'+str(d)
                         else:
                             raise NotImplementedError('Unknown boundary condition {}.'.format(Ry))
 
-                        if (Lz==BoundaryCondition.PERIODIC):
-                            assert np.all(b[Zo[0],Fy,Fx] == b[Zi[2],Fy,Fx]), '\n'+str(d)
-                        elif (Lz==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
-                            assert (b[Gz,Fy,Fx] == 0).all(), '\n'+str(d)
-                            assert (b[:Gz,Fy,Fx] == -b[Gz+1:2*Gz+1,Fy,Fx][Iz]).all(), '\n'+str(d)
-                        elif (Lz==BoundaryCondition.HOMOGENEOUS_NEUMANN):
-                            assert (b[:Gz,Fy,Fx] == +b[Gz+1:2*Gz+1,Fy,Fx][Iz]).all(), '\n'+str(d)
+                        if (Lz == BoundaryCondition.PERIODIC):
+                            assert np.all(b[Zo[0], Fy, Fx] == b[Zi[2], Fy, Fx]), '\n'+str(d)
+                        elif (Lz == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                            assert (b[Gz, Fy, Fx] == 0).all(), '\n'+str(d)
+                            assert (b[:Gz, Fy, Fx] == -b[Gz+1:2*Gz+1, Fy, Fx][Iz]).all(), '\n'+str(d)
+                        elif (Lz == BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                            assert (b[:Gz, Fy, Fx] == +b[Gz+1:2*Gz+1, Fy, Fx][Iz]).all(), '\n'+str(d)
                         else:
                             raise NotImplementedError('Unknown boundary condition {}.'.format(Lz))
 
-                        if (Rz==BoundaryCondition.PERIODIC):
-                            assert np.all(b[Zo[2],Fy,Fx] == b[Zi[0],Fy,Fx]), '\n'+str(d)
-                        elif (Rz==BoundaryCondition.HOMOGENEOUS_DIRICHLET):
-                            assert (b[Nz+Gz-1,Fy,Fx] == 0).all(), '\n'+str(d)
-                            assert (b[Nz-1:Nz+Gz-1,Fy,Fx] == -b[Nz+Gz:,Fy,Fx][Iz]).all(), '\n'+str(d)
-                        elif (Rz==BoundaryCondition.HOMOGENEOUS_NEUMANN):
-                            assert (b[Nz-1:Nz+Gz-1,Fy,Fx] == +b[Nz+Gz:,Fy,Fx][Iz]).all(), '\n'+str(d)
+                        if (Rz == BoundaryCondition.PERIODIC):
+                            assert np.all(b[Zo[2], Fy, Fx] == b[Zi[0], Fy, Fx]), '\n'+str(d)
+                        elif (Rz == BoundaryCondition.HOMOGENEOUS_DIRICHLET):
+                            assert (b[Nz+Gz-1, Fy, Fx] == 0).all(), '\n'+str(d)
+                            assert (b[Nz-1:Nz+Gz-1, Fy, Fx] == -b[Nz+Gz:, Fy, Fx][Iz]).all(), '\n'+str(d)
+                        elif (Rz == BoundaryCondition.HOMOGENEOUS_NEUMANN):
+                            assert (b[Nz-1:Nz+Gz-1, Fy, Fx] == +b[Nz+Gz:, Fy, Fx][Iz]).all(), '\n'+str(d)
                         else:
                             raise NotImplementedError('Unknown boundary condition {}.'.format(Rz))
 
                         if (ghost_mask is GhostMask.FULL):
-                            if (Lx==Ly==Lz==Rx==Ry==Rz==BoundaryCondition.PERIODIC):
-                                assert np.all(b[Zo[0],Yo[0],Xo[0]]==b[Zi[2],Yi[2],Xi[2]])
-                                assert np.all(b[Zo[2],Yo[0],Xo[0]]==b[Zi[0],Yi[2],Xi[2]])
-                                assert np.all(b[Zo[2],Yo[2],Xo[0]]==b[Zi[0],Yi[0],Xi[2]])
-                                assert np.all(b[Zo[0],Yo[2],Xo[0]]==b[Zi[2],Yi[0],Xi[2]])
-                                assert np.all(b[Zo[0],Yo[0],Xo[2]]==b[Zi[2],Yi[2],Xi[0]])
-                                assert np.all(b[Zo[2],Yo[0],Xo[2]]==b[Zi[0],Yi[2],Xi[0]])
-                                assert np.all(b[Zo[2],Yo[2],Xo[2]]==b[Zi[0],Yi[0],Xi[0]])
-                                assert np.all(b[Zo[0],Yo[2],Xo[2]]==b[Zi[2],Yi[0],Xi[0]])
+                            if (Lx == Ly == Lz == Rx == Ry == Rz == BoundaryCondition.PERIODIC):
+                                assert np.all(b[Zo[0], Yo[0], Xo[0]] == b[Zi[2], Yi[2], Xi[2]])
+                                assert np.all(b[Zo[2], Yo[0], Xo[0]] == b[Zi[0], Yi[2], Xi[2]])
+                                assert np.all(b[Zo[2], Yo[2], Xo[0]] == b[Zi[0], Yi[0], Xi[2]])
+                                assert np.all(b[Zo[0], Yo[2], Xo[0]] == b[Zi[2], Yi[0], Xi[2]])
+                                assert np.all(b[Zo[0], Yo[0], Xo[2]] == b[Zi[2], Yi[2], Xi[0]])
+                                assert np.all(b[Zo[2], Yo[0], Xo[2]] == b[Zi[0], Yi[2], Xi[0]])
+                                assert np.all(b[Zo[2], Yo[2], Xo[2]] == b[Zi[0], Yi[0], Xi[0]])
+                                assert np.all(b[Zo[0], Yo[2], Xo[2]] == b[Zi[2], Yi[0], Xi[0]])
                         elif (ghost_mask is GhostMask.CROSS):
-                            assert np.all(np.isnan(b[Zo[0],Yo[0],Xo[0]]))
-                            assert np.all(np.isnan(b[Zo[2],Yo[0],Xo[0]]))
-                            assert np.all(np.isnan(b[Zo[2],Yo[2],Xo[0]]))
-                            assert np.all(np.isnan(b[Zo[0],Yo[2],Xo[0]]))
-                            assert np.all(np.isnan(b[Zo[0],Yo[0],Xo[2]]))
-                            assert np.all(np.isnan(b[Zo[2],Yo[0],Xo[2]]))
-                            assert np.all(np.isnan(b[Zo[2],Yo[2],Xo[2]]))
-                            assert np.all(np.isnan(b[Zo[0],Yo[2],Xo[2]]))
+                            assert np.all(np.isnan(b[Zo[0], Yo[0], Xo[0]]))
+                            assert np.all(np.isnan(b[Zo[2], Yo[0], Xo[0]]))
+                            assert np.all(np.isnan(b[Zo[2], Yo[2], Xo[0]]))
+                            assert np.all(np.isnan(b[Zo[0], Yo[2], Xo[0]]))
+                            assert np.all(np.isnan(b[Zo[0], Yo[0], Xo[2]]))
+                            assert np.all(np.isnan(b[Zo[2], Yo[0], Xo[2]]))
+                            assert np.all(np.isnan(b[Zo[2], Yo[2], Xo[2]]))
+                            assert np.all(np.isnan(b[Zo[0], Yo[2], Xo[2]]))
                         else:
-                            msg='Unknown ghost mask {}.'.format(ghost_mask)
+                            msg = 'Unknown ghost mask {}.'.format(ghost_mask)
                             raise NotImplementedError(msg)
                         sys.stdout.write('.')
                         sys.stdout.flush()
                 finally:
-                    print
+                    print()
 
 
 def iter_backends():
@@ -479,6 +488,7 @@ def iter_backends():
     for cl_env in iter_clenv():
         yield (Backend.OPENCL, cl_env)
 
+
 def test_mpi_ghost_exchange_periodic(comm=None):
     if comm is None:
         from mpi4py import MPI
@@ -489,107 +499,108 @@ def test_mpi_ghost_exchange_periodic(comm=None):
               np.int16,  np.int32,  np.int64,
               np.uint16, np.uint32, np.uint64)
     assert size-1 < len(dtypes)
-    if rank==0:
-        print
+    if rank == 0:
+        print()
         msg = '*** COMM_WORLD_SIZE {} ***'.format(size)
-        print
-        print '*'*len(msg)
-        print msg
-        print '*'*len(msg)
-        print 'test_mpi_ghost_exchange_periodic()'.format(size)
-    for dim in xrange(1,3+__ENABLE_LONG_TESTS__):
-        if rank==0:
-            print('  >DIM={}'.format(dim))
-
-        npts = (53,47,59,23)[:dim]
-        nghosts = (2,1,0,3)[:dim]
+        print()
+        print('*'*len(msg))
+        print(msg)
+        print('*'*len(msg))
+        print('test_mpi_ghost_exchange_periodic()'.format(size))
+    for dim in range(1, 3+__ENABLE_LONG_TESTS__):
+        if rank == 0:
+            print(('  >DIM={}'.format(dim)))
+
+        npts = (53, 47, 59, 23)[:dim]
+        nghosts = (2, 1, 0, 3)[:dim]
         discretization = CartesianDiscretization(npts, nghosts,
-                                        default_boundaries=True)
+                                                 default_boundaries=True)
         domain = Box(dim=dim)
 
         for dtype in dtypes[size-1:size]:
-            if rank==0:
+            if rank == 0:
                 print('    >DTYPE={}'.format(dtype))
 
-            F0 = Field(domain=domain, name='F0', nb_components=1, dtype=dtype, _register=False)
-            F1 = Field(domain=domain, name='F1', nb_components=2, dtype=dtype, _register=False)
-            F2 = Field(domain=domain, name='F2', shape=(2,2), dtype=dtype, _register=False)
+            F0 = Field(domain=domain, name='F0', nb_components=1, dtype=dtype)
+            F1 = Field(domain=domain, name='F1', nb_components=2, dtype=dtype)
+            F2 = Field(domain=domain, name='F2', shape=(2, 2), dtype=dtype)
 
             for (backend, cl_env) in iter_backends():
-                if rank==0:
-                    print '      >BACKEND.{}{}'.format(backend,
-                            '' if (cl_env is None) else '::{}.{}'.format(
-                                cl_env.platform.name.strip(), cl_env.device.name.strip()))
-                for shape in it.product(xrange(0,size+1), repeat=dim):
-                    if np.prod(shape, dtype=np.uint32)!=size:
+                if rank == 0:
+                    print('      >BACKEND.{}{}'.format(backend,
+                                                       '' if (cl_env is None) else '::{}.{}'.format(
+                                                           cl_env.platform.name.strip(), cl_env.device.name.strip())))
+                for shape in it.product(range(0, size+1), repeat=dim):
+                    if np.prod(shape, dtype=np.uint32) != size:
                         continue
-                    if rank==0:
-                        print '         *cart_shape: {}'.format(shape)
+                    if rank == 0:
+                        print('         *cart_shape: {}'.format(shape))
                     topo = CartesianTopology(domain=domain, discretization=discretization,
-                                               backend=backend, cart_shape=shape, cl_env=cl_env)
-                    assert (topo.proc_shape==shape).all()
+                                             backend=backend, cart_shape=shape, cl_env=cl_env)
+                    assert (topo.proc_shape == shape).all()
 
-                    def ghost_base(i,d,rank,local_dir):
+                    def ghost_base(i, d, rank, local_dir):
                         return (i+1)*17 + (d+1)*13 + (local_dir+1)*11 + (rank+1)*7
-                    def ghost_vals(shape,dtype,i,d,rank,local_dir):
-                        if (shape is None) or (len(shape)==0):
+
+                    def ghost_vals(shape, dtype, i, d, rank, local_dir):
+                        if (shape is None) or (len(shape) == 0):
                             raise ValueError('Shape is None or an empty tuple: {}'.format(shape))
                         base = np.full(shape=shape, dtype=dtype, fill_value=ghost_base(i, d, rank,
-                                                                                local_dir))
-                        I = np.ix_(*tuple(np.arange(shape[direction], dtype=dtype) \
-                                                for direction in xrange(len(shape))))
+                                                                                       local_dir))
+                        I = np.ix_(*tuple(np.arange(shape[direction], dtype=dtype)
+                                          for direction in range(len(shape))))
                         return base + I[d]
 
                     for F in (F0, F1, F2):
                         dF = F.discretize(topo)
-                        if rank==0:
-                            print '          |{} COMPONENT(S)'.format(F.nb_components)
+                        if rank == 0:
+                            print('          |{} COMPONENT(S)'.format(F.nb_components))
                         for exchange_method in (ExchangeMethod.ISEND_IRECV,
                                                 ExchangeMethod.NEIGHBOR_ALL_TO_ALL_V,
                                                 ExchangeMethod.NEIGHBOR_ALL_TO_ALL_W):
-                            if rank==0:
-                                print '             ExchangeMethod.{} |'.format(exchange_method),
+                            if rank == 0:
+                                print('             ExchangeMethod.{} |'.format(exchange_method), end=' ')
                             sys.stdout.flush()
-                            for d in xrange(dim):
+                            for d in range(dim):
                                 dF.initialize(__zero_init, exchange_ghosts=False)
-                                if rank==0:
-                                    print DirectionLabels[dim-1-d],
+                                if rank == 0:
+                                    print(DirectionLabels[dim-1-d], end=' ')
                                 sys.stdout.flush()
                                 lghosts, rghosts, shape = dF.inner_ghost_slices[d]
                                 _lghosts, _rghosts, shape = dF.outer_ghost_slices[d]
 
                                 if (shape is not None):
-                                    for (i,data) in enumerate(dF.data):
-                                            data[lghosts] = ghost_vals(shape, dtype, i,d,rank,0)
-                                            data[_lghosts] = -10
-                                            data[rghosts] = ghost_vals(shape, dtype, i,d,rank,1)
-                                            data[_rghosts] = +10
+                                    for (i, data) in enumerate(dF.data):
+                                        data[lghosts] = ghost_vals(shape, dtype, i, d, rank, 0)
+                                        data[_lghosts] = -10
+                                        data[rghosts] = ghost_vals(shape, dtype, i, d, rank, 1)
+                                        data[_rghosts] = +10
 
                                 dF.exchange_ghosts(directions=d, exchange_method=exchange_method)
 
                                 lghosts, rghosts, shape = dF.outer_ghost_slices[d]
-                                left_rank, right_rank = topo.proc_neighbour_ranks[:,d]
+                                left_rank, right_rank = topo.proc_neighbour_ranks[:, d]
                                 if (left_rank == -1):
-                                    assert right_rank==-1
+                                    assert right_rank == -1
                                     left_rank, right_rank = rank, rank
 
                                 if (shape is not None):
-                                    for (i,data) in enumerate(dF.data):
+                                    for (i, data) in enumerate(dF.data):
                                         ldata = data[lghosts]
                                         rdata = data[rghosts]
 
                                         ldata = np.atleast_1d(ldata.get())
                                         target_vals = ghost_vals(ldata.shape, dtype, i, d,
-                                                left_rank,1)
+                                                                 left_rank, 1)
                                         assert np.allclose(ldata, target_vals), (rank,
-                                                                                    target_vals)
+                                                                                 target_vals)
                                         rdata = np.atleast_1d(rdata.get())
                                         target_vals = ghost_vals(rdata.shape, dtype, i, d,
-                                                                                right_rank, 0)
+                                                                 right_rank, 0)
                                         assert np.allclose(rdata, target_vals), (rank,
-                                                                                        target_vals)
-                            if rank==0:
-                                print
+                                                                                 target_vals)
+                            if rank == 0:
+                                print()
 
 
 def test_mpi_ghost_exchange_runtime(comm=None):
@@ -601,76 +612,75 @@ def test_mpi_ghost_exchange_runtime(comm=None):
     size = comm.Get_size()
     dtype = np.float32
 
-    if rank==0:
-        print
+    if rank == 0:
+        print()
         msg = '*** COMM_WORLD_SIZE {} ***'.format(size)
-        print
-        print '*'*len(msg)
-        print msg
-        print '*'*len(msg)
-        print 'test_mpi_ghost_exchange_runtime()'.format(size)
-
-    for dim in xrange(1,3+__ENABLE_LONG_TESTS__):
-        if rank==0:
+        print()
+        print('*'*len(msg))
+        print(msg)
+        print('*'*len(msg))
+        print('test_mpi_ghost_exchange_runtime()'.format(size))
+
+    for dim in range(1, 3+__ENABLE_LONG_TESTS__):
+        if rank == 0:
             sys.stdout.write('>DIM={}\n'.format(dim))
 
-        npts = (17,16,19)[:dim]
-        nghosts = (2,1,3)[:dim]
+        npts = (17, 16, 19)[:dim]
+        nghosts = (2, 1, 3)[:dim]
 
-        for shape in it.product(xrange(0,size+1), repeat=dim):
-            if np.prod(shape, dtype=np.uint32)!=size:
+        for shape in it.product(range(0, size+1), repeat=dim):
+            if np.prod(shape, dtype=np.uint32) != size:
                 continue
-            if rank==0:
+            if rank == 0:
                 sys.stdout.write('  >CART SHAPE: {}\n'.format(shape))
 
             for (backend, cl_env) in iter_backends():
-                if rank==0:
+                if rank == 0:
                     sys.stdout.write('    >BACKEND.{:<7} '.format(str(backend)+':'))
 
                 def breakline(i):
-                    if (rank==0) and ((i+1)%63==0):
+                    if (rank == 0) and ((i+1) % 63 == 0):
                         sys.stdout.write('\n' + ' '*21)
                         sys.stdout.flush()
                         return True
                     return False
 
-                i=0
+                i = 0
                 brk = False
                 try:
                     for (lbd, rbd) in domain_boundary_iterator(dim):
-                        domain = Box(dim=dim, lboundaries=lbd,
-                                              rboundaries=rbd)
+                        domain = Box(dim=dim, lboundaries=lbd, rboundaries=rbd)
 
-                        F = Field(domain=domain, name='F', nb_components=1,
-                                    dtype=dtype, _register=False)
+                        F = Field(domain=domain, name='F', nb_components=1, dtype=dtype)
 
                         discretization = CartesianDiscretization(npts, nghosts,
-                                                lboundaries=F.lboundaries,
-                                                rboundaries=F.rboundaries)
+                                                                 lboundaries=F.lboundaries,
+                                                                 rboundaries=F.rboundaries)
 
                         topo = CartesianTopology(domain=domain, discretization=discretization,
-                                                   backend=backend, cart_shape=shape, cl_env=cl_env)
-                        assert (topo.proc_shape==shape).all()
+                                                 backend=backend, cart_shape=shape, cl_env=cl_env)
+                        assert (topo.proc_shape == shape).all()
 
                         dF = F.discretize(topo)
+                        dF.initialize(__random_init, exchange_ghosts=False)
                         for exchange_method in (ExchangeMethod.ISEND_IRECV,
                                                 ExchangeMethod.NEIGHBOR_ALL_TO_ALL_V,
                                                 ExchangeMethod.NEIGHBOR_ALL_TO_ALL_W):
-                                for d in xrange(dim):
-                                    dF.exchange_ghosts(directions=d, exchange_method=exchange_method)
-                                    if rank==0:
-                                        sys.stdout.write('.')
-                                        brk = breakline(i)
-                                        i+=1
-
-                                    dF.accumulate_ghosts(directions=d, exchange_method=exchange_method)
-                                    if rank==0:
-                                        sys.stdout.write('.')
-                                        brk = breakline(i)
-                                        i+=1
-                                        sys.stdout.flush()
+                            for d in range(dim):
+                                dF.exchange_ghosts(directions=d, exchange_method=exchange_method)
+                                if rank == 0:
+                                    sys.stdout.write('.')
+                                    brk = breakline(i)
+                                    i += 1
+
+                                dF.accumulate_ghosts(directions=d, exchange_method=exchange_method)
+                                if rank == 0:
+                                    sys.stdout.write('.')
+                                    brk = breakline(i)
+                                    i += 1
+                                    sys.stdout.flush()
                 finally:
-                    if (rank==0):
+                    if (rank == 0):
                         sys.stdout.write('\n')
                         sys.stdout.flush()
 
@@ -681,119 +691,121 @@ def test_mpi_ghost_accumulate_periodic(comm=None):
         comm = MPI.COMM_WORLD
     rank = comm.Get_rank()
     size = comm.Get_size()
-    if rank==0:
+    if rank == 0:
         msg = '*** COMM_WORLD_SIZE {} ***'.format(size)
-        print
-        print '*'*len(msg)
-        print msg
-        print '*'*len(msg)
-        print 'test_mpi_ghost_accumulate_periodic()'.format(size)
+        print()
+        print('*'*len(msg))
+        print(msg)
+        print('*'*len(msg))
+        print('test_mpi_ghost_accumulate_periodic()'.format(size))
 
     dtypes = (np.float32, np.float32, np.float64,
               np.complex64, np.complex128,
               np.int16,  np.int32,  np.int64,
               np.uint16, np.uint32, np.uint64)
     assert size-1 < len(dtypes)
-    for dim in xrange(1,3+__ENABLE_LONG_TESTS__):
-        if rank==0:
-            print('  >DIM={}'.format(dim))
+    for dim in range(1, 3+__ENABLE_LONG_TESTS__):
+        if rank == 0:
+            print(('  >DIM={}'.format(dim)))
 
-        npts    = (53,57,51,49)[:dim]
-        nghosts = (1,3,0,2)[:dim]
+        npts = (53, 57, 51, 49)[:dim]
+        nghosts = (1, 3, 0, 2)[:dim]
         discretization = CartesianDiscretization(npts, nghosts,
-                                        default_boundaries=True)
+                                                 default_boundaries=True)
         domain = Box(dim=dim)
 
         for dtype in dtypes[size-1:size]:
-            if rank==0:
+            if rank == 0:
                 print('    >DTYPE={}'.format(dtype))
 
-            F0 = Field(domain=domain, name='F0', nb_components=1, dtype=dtype, _register=False)
-            F1 = Field(domain=domain, name='F1', nb_components=2, dtype=dtype, _register=False)
-            F2 = Field(domain=domain, name='F2', shape=(2,2), dtype=dtype, _register=False)
+            F0 = Field(domain=domain, name='F0', nb_components=1, dtype=dtype)
+            F1 = Field(domain=domain, name='F1', nb_components=2, dtype=dtype)
+            F2 = Field(domain=domain, name='F2', shape=(2, 2), dtype=dtype)
 
             for (backend, cl_env) in iter_backends():
-                if rank==0:
-                    print '      >BACKEND.{}{}'.format(backend,
-                            '' if (cl_env is None) else '::{}.{}'.format(
-                                cl_env.platform.name.strip(), cl_env.device.name.strip()))
-                for shape in it.product(xrange(0,size+1), repeat=dim):
-                    if np.prod(shape, dtype=np.uint32)!=size:
+                if rank == 0:
+                    print('      >BACKEND.{}{}'.format(backend,
+                                                       '' if (cl_env is None) else '::{}.{}'.format(
+                                                           cl_env.platform.name.strip(), cl_env.device.name.strip())))
+                for shape in it.product(range(0, size+1), repeat=dim):
+                    if np.prod(shape, dtype=np.uint32) != size:
                         continue
-                    if rank==0:
-                        print '        *cart_shape: {}'.format(shape)
+                    if rank == 0:
+                        print('        *cart_shape: {}'.format(shape))
                     topo = CartesianTopology(domain=domain, discretization=discretization,
-                                               backend=backend, cart_shape=shape, cl_env=cl_env)
-                    assert (topo.proc_shape==shape).all()
+                                             backend=backend, cart_shape=shape, cl_env=cl_env)
+                    assert (topo.proc_shape == shape).all()
 
                     def ghost_base(rank, directions, displacements, i):
-                        disweight     = np.asarray([19,23,29,31], dtype=np.int32)
-                        dirweight     = np.asarray([37,41,43,51], dtype=np.int32)
-                        directions    = np.asarray(directions, dtype=np.int32) + 1
+                        disweight = np.asarray([19, 23, 29, 31], dtype=np.int32)
+                        dirweight = np.asarray([37, 41, 43, 51], dtype=np.int32)
+                        directions = np.asarray(directions, dtype=np.int32) + 1
                         displacements = np.asarray(displacements, dtype=np.int32) + 2
-                        tag =  (rank+1)*17 + (i+1)*13
+                        tag = (rank+1)*17 + (i+1)*13
                         tag += dirweight[:directions.size].dot(directions)
                         tag += disweight[:displacements.size].dot(displacements)
                         return tag
+
                     def ghost_vals(shape, dtype, rank, directions, displacements, i):
-                        if (shape is None) or (len(shape)==0):
+                        if (shape is None) or (len(shape) == 0):
                             raise ValueError('Shape is None or an empty tuple: {}'.format(shape))
-                        base_value = ghost_base(rank,directions,displacements,i)
+                        base_value = ghost_base(rank, directions, displacements, i)
                         vals = np.full(shape=shape, fill_value=base_value, dtype=dtype)
                         return vals
 
-                    for F in (F0,F1,F2):
+                    for F in (F0, F1, F2):
                         dF = F.discretize(topo)
-                        proc_ranks  = topo.proc_ranks
-                        proc_shape  = topo.proc_shape
+                        dF.initialize(__random_init, exchange_ghosts=False)
+                        proc_ranks = topo.proc_ranks
+                        proc_shape = topo.proc_shape
                         proc_coords = tuple(topo.proc_coords.tolist())
                         assert proc_ranks[proc_coords] == rank
                         all_inner_ghost_slices = dF.all_inner_ghost_slices
                         all_outer_ghost_slices = dF.all_outer_ghost_slices
-                        if rank==0:
-                            print '          |{} COMPONENT(S)'.format(F.nb_components)
+                        if rank == 0:
+                            print('          |{} COMPONENT(S)'.format(F.nb_components))
                         for exchange_method in (ExchangeMethod.ISEND_IRECV,
                                                 ExchangeMethod.NEIGHBOR_ALL_TO_ALL_V,
                                                 ExchangeMethod.NEIGHBOR_ALL_TO_ALL_W,):
-                            if rank==0:
-                                print '             ExchangeMethod.{:<25} |'.format(exchange_method),
+                            if rank == 0:
+                                print('             ExchangeMethod.{:<25} |'.format(str(exchange_method)), end=' ')
                             sys.stdout.flush()
 
                             # test one direction at a time
                             max_displacements = 1
-                            for ndirections in xrange(1,dim+1):
-                                all_displacements = tuple(it.product((-1,0,+1), repeat=ndirections))
-                                all_directions    = tuple(it.combinations(range(dim), ndirections))
-                                masks             = tuple(it.product((0,1), repeat=ndirections))
+                            for ndirections in range(1, dim+1):
+                                all_displacements = tuple(it.product((-1, 0, +1), repeat=ndirections))
+                                all_directions = tuple(it.combinations(range(dim), ndirections))
+                                masks = tuple(it.product((0, 1), repeat=ndirections))
                                 for directions in all_directions:
-                                    if rank==0:
+                                    if rank == 0:
                                         if directions:
-                                            print ''.join(DirectionLabels[dim-1-d]
-                                                            for d in directions),
+                                            print(''.join(DirectionLabels[dim-1-d]
+                                                          for d in directions), end=' ')
                                         else:
-                                            print '--',
+                                            print('--', end=' ')
                                         sys.stdout.flush()
 
-                                    for (i,data) in enumerate(dF.data):
+                                    for (i, data) in enumerate(dF.data):
                                         data[...] = (rank+1)*(i+1)
                                         for displacements in all_displacements:
-                                            if sum(d!=0 for d in displacements) == 0:
+                                            if sum(d != 0 for d in displacements) == 0:
                                                 continue
-                                            (iview,ishape) = all_inner_ghost_slices[ndirections][directions][displacements]
-                                            (oview,oshape) = all_outer_ghost_slices[ndirections][directions][displacements]
+                                            (iview, ishape) = all_inner_ghost_slices[ndirections][directions][displacements]
+                                            (oview, oshape) = all_outer_ghost_slices[ndirections][directions][displacements]
                                             if (oshape is not None):
                                                 assert (ishape is not None)
                                                 data[oview] = ghost_vals(oshape, dtype, rank, directions,
-                                                                                            displacements, i)
+                                                                         displacements, i)
 
                                     dF.accumulate_ghosts(directions=directions,
                                                          exchange_method=exchange_method)
 
-                                    for (i,data) in enumerate(dF.data):
+                                    for (i, data) in enumerate(dF.data):
                                         for displacements in all_displacements:
-                                            ndisplacements = sum(d!=0 for d in displacements)
-                                            (iview,ishape) = all_inner_ghost_slices[ndirections][directions][displacements]
-                                            (oview,oshape) = all_outer_ghost_slices[ndirections][directions][displacements]
+                                            ndisplacements = sum(d != 0 for d in displacements)
+                                            (iview, ishape) = all_inner_ghost_slices[ndirections][directions][displacements]
+                                            (oview, oshape) = all_outer_ghost_slices[ndirections][directions][displacements]
 
                                             if (ishape is None):
                                                 assert (oshape is None)
@@ -803,16 +815,17 @@ def test_mpi_ghost_accumulate_periodic(comm=None):
                                             assert np.array_equal(data[oview].shape, oshape)
 
                                             overlaping_neighbours = set(tuple((np.asarray(mask, dtype=np.int32)*displacements).tolist()) for mask in masks)
-                                            overlaping_neighbours = filter(lambda x: 0<sum(_!=0 for _ in x)<=max_displacements, overlaping_neighbours) #diagonals
+                                            overlaping_neighbours = filter(lambda x: 0 < sum(_ != 0 for _ in x) <= max_displacements, overlaping_neighbours)  # diagonals
+                                            overlaping_neighbours = tuple(overlaping_neighbours)
 
                                             expected_values = (rank+1)*(i+1)
                                             for disp in overlaping_neighbours:
                                                 ncoords = ()
-                                                j=0
-                                                for _ in xrange(dim):
+                                                j = 0
+                                                for _ in range(dim):
                                                     if _ in directions:
                                                         ci = (proc_shape[_]+proc_coords[_]+disp[j]) % proc_shape[_]
-                                                        j+=1
+                                                        j += 1
                                                     else:
                                                         ci = proc_coords[_]
                                                     ncoords += (ci,)
@@ -822,11 +835,12 @@ def test_mpi_ghost_accumulate_periodic(comm=None):
                                             idata = data[iview].get()
                                             odata = data[oview].get()
                                             assert np.allclose(idata, expected_values)
-                                            if (ndisplacements>0):
+                                            if (ndisplacements > 0):
                                                 assert np.allclose(odata, ghost_vals(oshape, dtype, rank, directions, displacements, i))
                                 continue
-                            if rank==0:
-                                print
+                            if rank == 0:
+                                print()
+
 
 if __name__ == '__main__':
     from mpi4py import MPI
@@ -836,7 +850,7 @@ if __name__ == '__main__':
     from hysop.tools.warning import disable_hysop_warnings
 
     with test_context():
-        if (size==1):
+        if (size == 1):
             test_serial_initialization_1d()
             test_serial_initialization_2d()
             if __ENABLE_LONG_TESTS__:
diff --git a/hysop/fields/tests/test_cartesian.sh b/hysop/fields/tests/test_cartesian.sh
index f73967d46af4fe6601d2b2e1ffda51363cdd7d71..6c840f628d22f3d41b307f80fdd73be858fd1c11 100755
--- a/hysop/fields/tests/test_cartesian.sh
+++ b/hysop/fields/tests/test_cartesian.sh
@@ -1,5 +1,7 @@
 #!/usr/bin/env bash
 set -feu -o pipefail
+PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE:-python3.8}
+MPIRUN_EXECUTABLE=${MPIRUN_EXECUTABLE:-mpirun}
 
 SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 TEST_FILE=${SCRIPT_DIR}/test_cartesian.py
@@ -9,5 +11,5 @@ export HYSOP_DEBUG=0
 export KERNEL_DEBUG=0
 
 for i in 2; do
-     mpirun -np $i --allow-run-as-root python2.7 ${TEST_FILE}
+     ${MPIRUN_EXECUTABLE} -np $i ${PYTHON_EXECUTABLE} ${TEST_FILE}
 done
diff --git a/hysop/fields/tests/test_fields.py b/hysop/fields/tests/test_fields.py
index 76ad49e456743df0f228c9681a18b5b4f4dfcbcb..7271023637dd6da395f47aab38c28469f221a662 100644
--- a/hysop/fields/tests/test_fields.py
+++ b/hysop/fields/tests/test_fields.py
@@ -63,8 +63,8 @@ def test_field():
 
     assert F0.domain is domain
     assert F0.dim is domain.dim
-    assert F0.name is 'F0'
-    assert F0.pretty_name is 'F0'
+    assert F0.name == 'F0'
+    assert F0.pretty_name == 'F0'
     assert F0 is not F1
     assert F0 == F0
     assert F0 != F1
@@ -73,8 +73,8 @@ def test_field():
     assert hash(F0) != hash(F2)
     assert str(F0) == F0.long_description()
     assert len(F0.short_description()) < len(F0.long_description())
-    assert F1.name is 'F1'
-    assert F1.pretty_name is 'test'
+    assert F1.name == 'F1'
+    assert F1.pretty_name == 'test'
     assert F0.nb_components == 1
     assert len(F0.fields) == 1
     assert F0.fields[0] is F0
@@ -159,8 +159,8 @@ def test_tensor_field():
         assert t1.dtype != t0.dtype
         assert t1.name.replace('1', '0', 1) == t0.name
         assert t1.pretty_name.replace('1', '0', 1) == t0.pretty_name
-        assert t0.dim is 3
-        assert t1.dim is 3
+        assert t0.dim == 3
+        assert t1.dim == 3
     assert np.array_equal(T0._fields[1:, 1:], T2._fields)
     assert T0._fields[1, 1] is T3
     assert T0._fields[0, 0] is T4[0, 0]
@@ -196,10 +196,10 @@ def test_tensor_field():
 
     work = requests11().allocate(True)
     DT11.honor_memory_request(work)
-    
+
     work = requests12().allocate(True)
     DT12.honor_memory_request(work)
-    
+
     for df in DT11.data:
         df[...] = 11
     for df in DT12.data:
@@ -208,7 +208,6 @@ def test_tensor_field():
         assert np.all(df == 11)
     for df in DT12.data:
         assert np.all(df == 12)
-    
 
     str(DT0)
     DT0.short_description()
@@ -222,9 +221,9 @@ def test_tensor_field():
 
     DT9.rename('foo')
     DT9.initialize(func)
+    DT10.randomize()
     DT9.initialize(DT10.data)
     DT9.fill(4)
-    DT9.randomize()
     DT9.copy(DT0[1:, 1:])
 
     DT9.has_ghosts()
@@ -301,13 +300,13 @@ def test_tensor_field():
     assert (DT0.global_boundaries[0] == dfield.global_boundaries[0]).all()
     assert (DT0.global_boundaries[1] == dfield.global_boundaries[1]).all()
     assert (DT0.space_step == dfield.space_step).all()
-    for i in xrange(DT0.dim):
+    for i in range(DT0.dim):
         assert (DT0.coords[i] == dfield.coords[i]).all()
         assert (DT0.mesh_coords[i] == dfield.mesh_coords[i]).all()
     assert DT0.compute_slices == dfield.compute_slices
-    assert np.array_equal(DT0.inner_ghost_slices, dfield.inner_ghost_slices)
-    assert np.array_equal(DT0.inner_ghost_slices, dfield.inner_ghost_slices)
-    assert np.array_equal(DT0.outer_ghost_slices, dfield.outer_ghost_slices)
+    assert DT0.inner_ghost_slices == dfield.inner_ghost_slices
+    assert DT0.inner_ghost_slices == dfield.inner_ghost_slices
+    assert DT0.outer_ghost_slices == dfield.outer_ghost_slices
     assert DT0.grid_npoints == dfield.grid_npoints
     assert DT0.axes == dfield.axes
     assert DT0.tstate == dfield.tstate
@@ -361,30 +360,30 @@ def test_boundaries():
             divV = V.div()
             gradV = V.gradient()
             lapV = V.laplacian()
-            print
-            print 'DOMAIN BOUNDARIES:'
-            print ' *boundaries=[{}]'.format(domain.format_boundaries())
-            print 'SCALAR BOUNDARIES:'
-            print ' *{} boundaries=[{}]'.format(S.pretty_name, S.format_boundaries())
-            print 'VELOCITY BOUNDARIES:'
+            print()
+            print('DOMAIN BOUNDARIES:')
+            print(' *boundaries=[{}]'.format(domain.format_boundaries()))
+            print('SCALAR BOUNDARIES:')
+            print(' *{} boundaries=[{}]'.format(S.pretty_name, S.format_boundaries()))
+            print('VELOCITY BOUNDARIES:')
             for Vi in V.fields:
-                print ' *{} boundaries=[{}]'.format(Vi.pretty_name, Vi.format_boundaries())
-            print '{} BOUNDARIES:'.format(divV.pretty_name)
-            print ' *{} boundaries=[{}]'.format(divV.pretty_name, divV.format_boundaries())
-            print '{} BOUNDARIES:'.format(gradV.pretty_name)
+                print(' *{} boundaries=[{}]'.format(Vi.pretty_name, Vi.format_boundaries()))
+            print('{} BOUNDARIES:'.format(divV.pretty_name))
+            print(' *{} boundaries=[{}]'.format(divV.pretty_name, divV.format_boundaries()))
+            print('{} BOUNDARIES:'.format(gradV.pretty_name))
             for gVi in gradV.fields:
-                print ' *{} boundaries=[{}]'.format(gVi.pretty_name, gVi.format_boundaries())
-            print '{} BOUNDARIES:'.format(lapV.pretty_name)
+                print(' *{} boundaries=[{}]'.format(gVi.pretty_name, gVi.format_boundaries()))
+            print('{} BOUNDARIES:'.format(lapV.pretty_name))
             for lVi in lapV.fields:
-                print ' *{} boundaries=[{}]'.format(lVi.pretty_name, lVi.format_boundaries())
+                print(' *{} boundaries=[{}]'.format(lVi.pretty_name, lVi.format_boundaries()))
             if (dim > 1):
                 rotV = V.curl()
-                print '{} (VORTICITY) BOUNDARIES:'.format(rotV.pretty_name)
+                print('{} (VORTICITY) BOUNDARIES:'.format(rotV.pretty_name))
                 for Wi in rotV.fields:
-                    print ' *{} boundaries=[{}]'.format(Wi.pretty_name, Wi.format_boundaries())
+                    print(' *{} boundaries=[{}]'.format(Wi.pretty_name, Wi.format_boundaries()))
 
 
 if __name__ == '__main__':
-    test_field()
+    # test_field()
     test_tensor_field()
-    test_boundaries()
+    # test_boundaries()
diff --git a/hysop/iterative_method.py b/hysop/iterative_method.py
index 2fce0bfc10a738d10e2b4ef7e13c8c0cde7593c7..e8e7cead36420041acb1a0e30525c3486522bfe7 100644
--- a/hysop/iterative_method.py
+++ b/hysop/iterative_method.py
@@ -62,7 +62,7 @@ class PseudoSimulation(Simulation):
                                    formatter={'float_kind': lambda x: '{:.8g}'.format(x)})
         msg = "=== PseudoSimulation : {0:6d}, criteria = {1} =="
         if verbose:
-            print msg.format(self.current_iteration, crit)
+            print(msg.format(self.current_iteration, crit))
         else:
             vprint(msg.format(self.current_iteration, crit))
 
diff --git a/hysop/mesh/cartesian_mesh.py b/hysop/mesh/cartesian_mesh.py
index dbeb0ba105f3c31d595ceaefe7f08547f43e4e1d..bef6f8fcf9327c3100ac67765853fb9cedaff6cc 100644
--- a/hysop/mesh/cartesian_mesh.py
+++ b/hysop/mesh/cartesian_mesh.py
@@ -9,9 +9,9 @@ See also
 * :class:`~hysop.topology.topology.CartesianTopology`
 * :ref:`topologies` in HySoP user guide.
 """
+import itertools as it
 
 from hysop.constants import np, BoundaryCondition, GhostMask, HYSOP_INTEGER, HYSOP_REAL, MemoryOrdering
-from hysop.deps import it
 from hysop.tools.decorators import debug
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types import check_instance, to_tuple
@@ -269,15 +269,15 @@ class CartesianMeshView(MeshView):
             raise NotImplementedError(msg_mask)
 
         local_inner_ghost_slices = []
-        for i in xrange(dim):
+        for i in range(dim):
             inner_lslices = tuple(
                 gh_slices(j) if j!=i else
                 slice(local_start[i], local_start[i]+ghosts[i])
-                for j in xrange(dim) )
+                for j in range(dim) )
             inner_rslices = tuple(
                 gh_slices(j) if j!=i else
                 slice(local_stop[i]-ghosts[i], local_stop[i])
-                for j in xrange(dim) )
+                for j in range(dim) )
             if (ghosts[i] > 0):
                 inner_shape = resolution.copy()
                 inner_shape[i] = ghosts[i]
@@ -319,15 +319,15 @@ class CartesianMeshView(MeshView):
             raise NotImplementedError(msg_mask)
 
         local_outer_ghost_slices = []
-        for i in xrange(dim):
+        for i in range(dim):
             outer_lslices = tuple(
                 gh_slices(j) if j!=i else
                 slice(local_start[i]-ghosts[i], local_start[i])
-                for j in xrange(dim) )
+                for j in range(dim) )
             outer_rslices = tuple(
                 gh_slices(j) if j!=i else
                 slice(local_stop[i], local_stop[i]+ghosts[i])
-                for j in xrange(dim) )
+                for j in range(dim) )
             if (ghosts[i] > 0):
                 outer_shape = resolution.copy()
                 outer_shape[i] = ghosts[i]
@@ -374,7 +374,7 @@ class CartesianMeshView(MeshView):
             raise NotImplementedError(msg_mask)
 
         boundary_layer_slices = []
-        for i in xrange(dim):
+        for i in range(dim):
             (lbd, rbd) = local_lboundaries[i], local_rboundaries[i]
             has_left_layer  = (lbd not in blacklisted_boundaries)
             has_right_layer = (rbd not in blacklisted_boundaries)
@@ -383,7 +383,7 @@ class CartesianMeshView(MeshView):
                 layer_lslices = tuple(
                     gh_slices(j) if j!=i else
                     slice(local_start[i]-ghosts[i], local_start[i]+ghosts[i]+1)
-                    for j in xrange(dim) )
+                    for j in range(dim) )
             else:
                 layer_lslices = None
 
@@ -391,7 +391,7 @@ class CartesianMeshView(MeshView):
                 layer_rslices = tuple(
                     gh_slices(j) if j!=i else
                     slice(local_stop[i]-ghosts[i]-1, local_stop[i]+ghosts[i])
-                    for j in xrange(dim) )
+                    for j in range(dim) )
             else:
                 layer_rslices = None
 
@@ -428,15 +428,15 @@ class CartesianMeshView(MeshView):
         assert len(ghosts)==dim
 
         views = {}
-        for ndirections in xrange(dim+1):
-            all_directions = tuple(it.combinations(xrange(dim), ndirections))
+        for ndirections in range(dim+1):
+            all_directions = tuple(it.combinations(range(dim), ndirections))
             for directions in all_directions:
                 all_displacements = tuple(it.product((-1,0,+1), repeat=ndirections))
                 for displacements in all_displacements:
                     ai = 0
                     view  = ()
                     shape = ()
-                    for d in xrange(dim):
+                    for d in range(dim):
                         if (d in directions):
                             if displacements[ai] == -1:
                                 view  += (slice(1*ghosts[d], 2*ghosts[d]),)
@@ -483,15 +483,15 @@ class CartesianMeshView(MeshView):
         assert len(ghosts)==dim
 
         views = {}
-        for ndirections in xrange(dim+1):
-            all_directions = tuple(it.combinations(xrange(dim), ndirections))
+        for ndirections in range(dim+1):
+            all_directions = tuple(it.combinations(range(dim), ndirections))
             for directions in all_directions:
                 all_displacements = tuple(it.product((-1,0,+1), repeat=ndirections))
                 for displacements in all_displacements:
                     ai = 0
                     view  = ()
                     shape = ()
-                    for d in xrange(dim):
+                    for d in range(dim):
                         if (d in directions):
                             if displacements[ai] == -1:
                                 view  += (slice(0, ghosts[d]),)
@@ -559,7 +559,7 @@ class CartesianMeshView(MeshView):
         views = {}
         for ndirections in all_views:
             for directions in all_views[ndirections]:
-                for ncenters in xrange(ndirections+1):
+                for ncenters in range(ndirections+1):
                     for displacements in it.product((-1,0,+1), repeat=ndirections):
                         if sum(d==0 for d in displacements)!=ncenters:
                             continue
@@ -703,7 +703,7 @@ class CartesianMeshView(MeshView):
             def __init__(self, dim, ghosts, compute_resolution, local_compute_indices):
                 iter_shape = tuple(compute_resolution[:axes])
                 gI = np.ix_(*local_compute_indices[axes:])
-                I = tuple(gI[i]-ghosts[j]  for i,j in enumerate(xrange(axes, dim)))
+                I = tuple(gI[i]-ghosts[j] for i,j in enumerate(range(axes, dim)))
 
                 self._iter_shape = iter_shape
                 self._data = (dim, ghosts, I, gI)
@@ -725,7 +725,7 @@ class CartesianMeshView(MeshView):
             def iter_compute_mesh(self):
                 (dim, ghosts, I, gI) = self._data
                 for idx in self._new_ndindex_iterator():
-                    gidx = tuple(idx[i]+ghosts[i] for i in xrange(axes))
+                    gidx = tuple(idx[i]+ghosts[i] for i in range(axes))
                     yield (idx, gidx, I, gI)
 
         return __CartesianMeshComputeAxeIterator(dim=self.dim,
@@ -799,7 +799,7 @@ class CartesianMeshView(MeshView):
         return [slice(slc[i].start + self.global_start[i] - self.ghosts[i],
                       slc[i].stop  + self.global_start[i] - self.ghosts[i],
                       slc[i].step)
-                for i in xrange(self._dim)]
+                for i in range(self._dim)]
 
     def global_to_local(self, global_slices):
         """
@@ -824,14 +824,14 @@ class CartesianMeshView(MeshView):
             return tuple( slice( slc[i].start - self.global_start[i] + self.ghosts[i],
                                  slc[i].stop  - self.global_start[i] + self.ghosts[i],
                                  slc[i].step )
-                                 for i in xrange(self.dim) )
+                                 for i in range(self.dim) )
 
     def local_shift(self, indices):
         """Shift input indices (tuple of integer array as obtained from np.where)
         with ghost layer size
         """
-        shift = [self.local_start[d] for d in xrange(self.dim)]
-        return tuple([indices[d] + shift[d] for d in xrange(self.dim)])
+        shift = [self.local_start[d] for d in range(self.dim)]
+        return tuple([indices[d] + shift[d] for d in range(self.dim)])
 
     def compute_integ_point(self, is_last, ic, n_dir=None):
         """Compute indices corresponding to integration points
@@ -848,7 +848,7 @@ class CartesianMeshView(MeshView):
         dim = len(ic)
         # We must find which points must be used
         # when we integrate on this submesh
-        stops = npw.asdimarray([ic[d].stop for d in xrange(dim)])
+        stops = npw.asdimarray([ic[d].stop for d in range(dim)])
         # when 'is_last', the last point must be removed for integration
         stops[is_last] -= 1
         # and finally, for direction where subset length is zero,
@@ -856,7 +856,7 @@ class CartesianMeshView(MeshView):
         if n_dir is not None:
             stops[n_dir] = npw.asdimarray([ic[d].start + 1 for d in n_dir])
 
-        return [slice(ic[d].start, stops[d]) for d in xrange(dim)]
+        return [slice(ic[d].start, stops[d]) for d in range(dim)]
 
     def reduce_coords(self, coords, reduced_index):
         """Compute a reduced set of coordinates
@@ -875,14 +875,14 @@ class CartesianMeshView(MeshView):
         assert isinstance(coords, tuple)
         assert isinstance(reduced_index, tuple)
         dim = len(coords)
-        shapes = [list(coords[i].shape) for i in xrange(dim)]
+        shapes = [list(coords[i].shape) for i in range(dim)]
         res = [reduced_index[i].stop - reduced_index[i].start
-               for i in xrange(dim)]
-        for i in xrange(dim):
+               for i in range(dim)]
+        for i in range(dim):
             shapes[i][i] = res[i]
         shapes = tuple(shapes)
         return [coords[i].flat[reduced_index[i]].reshape(shapes[i])
-                for i in xrange(dim)]
+                for i in range(dim)]
 
     def short_description(self):
         """
@@ -1074,9 +1074,9 @@ class CartesianMesh(CartesianMeshView, Mesh):
         # domain. These variables do not depend on the mpi distribution.
         from hysop.topology.cartesian_topology import CartesianTopology
         check_instance(topology, CartesianTopology)
-        check_instance(local_resolution, (list,tuple,np.ndarray), values=(int,long,np.integer),
+        check_instance(local_resolution, (list,tuple,np.ndarray), values=(int,np.integer),
                             minval=1)
-        check_instance(global_start, (list,tuple,np.ndarray), values=(int,long,np.integer),
+        check_instance(global_start, (list,tuple,np.ndarray), values=(int,np.integer),
                             minval=0)
 
         super(CartesianMesh, self).__init__(topology=topology,
@@ -1127,11 +1127,11 @@ class CartesianMesh(CartesianMeshView, Mesh):
         global_stop   = global_start + compute_resolution
         global_compute_slices = tuple( slice(i,j) for (i,j) in zip(global_start, global_stop) )
         global_ghost_slices = []
-        for i in xrange(dim):
+        for i in range(dim):
             lslices = tuple( slice(None) if j!=i else slice(global_start[i]-ghosts[i], \
-                    global_start[i]) for j in xrange(dim) )
+                    global_start[i]) for j in range(dim) )
             rslices = tuple( slice(None) if j!=i else slice(global_stop[i],
-                global_stop[i]+ghosts[i]) for j in xrange(dim) )
+                global_stop[i]+ghosts[i]) for j in range(dim) )
             global_ghost_slices.append( (lslices,rslices) )
         global_ghost_slices = tuple(global_ghost_slices)
 
@@ -1153,8 +1153,8 @@ class CartesianMesh(CartesianMeshView, Mesh):
         local_indices = tuple(np.arange(Ni, dtype=HYSOP_INTEGER) for Ni in local_resolution)
         local_coords = tuple(
                 npw.asrealarray(tuple(local_origin[d] + i*space_step[d]
-                            for i in xrange(local_resolution[d])))
-                            for d in xrange(dim))
+                            for i in range(local_resolution[d])))
+                            for d in range(dim))
         local_compute_indices = tuple(local_indices[d][local_start[d]:local_stop[d]]
                                         for d in range(dim))
         local_compute_coords = tuple(local_coords[d][local_start[d]:local_stop[d]]
@@ -1246,7 +1246,7 @@ class CartesianMesh(CartesianMeshView, Mesh):
             check_instance(lg, tuple, values=slice, size=dim)
             check_instance(rg, tuple, values=slice, size=dim)
         check_instance(self.global_boundaries, tuple, values=np.ndarray, size=2)
-        for i in xrange(2):
+        for i in range(2):
             check_instance(self.global_boundaries[i], np.ndarray, dtype=object, size=dim,
                                 values=BoundaryCondition)
         assert np.array_equal(self.global_boundaries[0], self.global_lboundaries)
@@ -1270,7 +1270,7 @@ class CartesianMesh(CartesianMeshView, Mesh):
             check_instance(rg, tuple, values=slice, size=dim)
             check_instance(sh, np.ndarray, dtype=np.int32, shape=(dim,), allow_none=True)
         check_instance(self.local_boundaries, tuple, values=np.ndarray, size=2)
-        for i in xrange(2):
+        for i in range(2):
             check_instance(self.local_boundaries[i], np.ndarray, dtype=object, size=dim,
                                 values=BoundaryCondition)
         assert np.array_equal(self.local_boundaries[0], self.local_lboundaries)
@@ -1284,7 +1284,7 @@ class CartesianMesh(CartesianMeshView, Mesh):
         check_instance(self.local_mesh_coords, tuple, size=dim, values=np.ndarray)
         check_instance(self.local_compute_coords, tuple, size=dim, values=np.ndarray)
         check_instance(self.local_compute_mesh_coords, tuple, size=dim, values=np.ndarray)
-        for i in xrange(dim):
+        for i in range(dim):
             check_instance(self.local_indices[i], np.ndarray, dtype=HYSOP_INTEGER,
                     ndim=1, size=self.local_resolution[i])
             check_instance(self.local_mesh_indices[i], np.ndarray, dtype=HYSOP_INTEGER,
diff --git a/hysop/mesh/mesh.py b/hysop/mesh/mesh.py
index 4f30aa6003a63553b00f5b6a5cf9e27c54d72401..2b7993d7311299e973e5ae17ac1c0be83cd64441 100644
--- a/hysop/mesh/mesh.py
+++ b/hysop/mesh/mesh.py
@@ -17,16 +17,15 @@ from hysop.tools.decorators import debug
 from hysop.tools.types import check_instance
 from hysop.tools.handle import TaggedObject, TaggedObjectView
 
-class MeshView(TaggedObjectView):
+class MeshView(TaggedObjectView, metaclass=ABCMeta):
     """Abstract base class for views on meshes. """
-    __metaclass__ = ABCMeta
 
     __slots__ = ('_mesh', '_topology_state')
-    
+
     @debug
     def __new__(cls, mesh, topology_state, **kwds):
         return super(MeshView, cls).__new__(cls, obj_view=mesh, **kwds)
-    
+
     @debug
     def __init__(self, mesh, topology_state, **kwds):
         """Initialize a MeshView."""
@@ -39,11 +38,11 @@ class MeshView(TaggedObjectView):
     def _get_mesh(self):
         """Return the original mesh on which the view is."""
         return self._mesh
-    
+
     def _get_topology_state(self):
         """Return the topology state"""
         return self._topology_state
-    
+
     def _get_dim(self):
         """Return the dimension of the domain."""
         return self._mesh._topology.domain.dim
@@ -61,30 +60,29 @@ class MeshView(TaggedObjectView):
     def __str__(self):
         """Equivalent to self.long_description()"""
         return self.long_description()
-        
+
     mesh = property(_get_mesh)
     topology_state = property(_get_topology_state)
     dim = property(_get_dim)
 
-class Mesh(TaggedObject):
+class Mesh(TaggedObject, metaclass=ABCMeta):
     """Abstract base class for local to process meshes."""
-    __metaclass__ = ABCMeta
 
     def __new__(cls, topology, **kwds):
         return super(Mesh, cls).__new__(cls, **kwds)
-    
+
     @debug
     def __init__(self, topology, **kwds):
         """Initialize a mesh."""
         check_instance(topology, Topology)
         super(Mesh, self).__init__(tag_prefix='m', **kwds)
         self._topology = topology
-    
+
     @abstractmethod
     def view(self, topology_state):
         """Return a view on this mesh using a topology state."""
         pass
-    
+
     def _get_topology(self):
         """Return a topology view on the original topology that defined this mesh."""
         return self._topology
diff --git a/hysop/numerics/fft/_mkl_fft.py b/hysop/numerics/fft/_mkl_fft.py
index a095dfa05644a2400b13ad1a485efe401cfe4bff..3e7cc39abc001b6d72832087d222088959e9366c 100644
--- a/hysop/numerics/fft/_mkl_fft.py
+++ b/hysop/numerics/fft/_mkl_fft.py
@@ -1,23 +1,23 @@
-"""
+r"""
 FFT interface for fast Fourier Transforms using Intel MKL (numpy interface).
 :class:`~hysop.numerics.MklFFT`
 :class:`~hysop.numerics.MklFFTPlan`
 
-/!\ -- OPENMP CONFLICT WITH GRAPHTOOLS -- 
+/!\ -- OPENMP CONFLICT WITH GRAPHTOOLS --
 /!\ Only works if MKL_THREADING_LAYER is set to OMP if some
     dependencies are compiled against GNU OpenMP.
 /!\ May also work with MKL_THREADING_LAYER=TBB and SEQUENCIAL but not INTEL.
 
-Required version of mkl_fft is: https://gitlab.com/keckj/mkl_fft 
+Required version of mkl_fft is: https://gitlab.com/keckj/mkl_fft
 If MKL_THREADING_LAYER is not set, or is set to INTEL, FFT tests will fail.
 """
 
 import functools, warnings
 import numpy as np
 import numba as nb
-from mkl_fft import (ifft as mkl_ifft, 
-                     fft as mkl_fft, 
-                     rfft_numpy as mkl_rfft, 
+from mkl_fft import (ifft as mkl_ifft,
+                     fft as mkl_fft,
+                     rfft_numpy as mkl_rfft,
                      irfft_numpy as mkl_irfft)
 
 from hysop.numerics.fft.host_fft import HostFFTPlanI, HostFFTI, HostArray
@@ -60,7 +60,7 @@ def setup_transform(x, axis, transform, inverse, type):
             dout = dtype
         else:
             raise NotImplementedError
-    elif (transform is 'dst'):
+    elif (transform == 'dst'):
         ctype = float_to_complex_dtype(x.dtype)
         if (type==1):
             sin = mk_shape(shape, axis, 2*N+2)
@@ -165,7 +165,7 @@ def dct(x, out=None, type=2, axis=-1, input_tmp=None, output_tmp=None):
         assert output_tmp.shape == sout
         assert output_tmp.dtype == dout
         output_tmp *= (2*np.exp(-1j*np.pi*np.arange(n0)/(2*N)))[slc4]
-        
+
         if (out is None):
             out = np.empty_like(x)
         else:
@@ -187,7 +187,7 @@ def dct(x, out=None, type=2, axis=-1, input_tmp=None, output_tmp=None):
         slc7  = mk_view(ndim, axis, None, None, None, default=None)
         slc8  = mk_view(ndim, axis, n0  , None, None)
         slc9  = mk_view(ndim, axis, None, 1   , None)
-        
+
         if (input_tmp is None):
             input_tmp = np.empty(shape=sin, dtype=din)
         if (output_tmp is None):
@@ -272,7 +272,7 @@ def dst(x, out=None, type=2, axis=-1, input_tmp=None, output_tmp=None):
         np.concatenate((x[slc1], -x[slc2][slc3]), axis=axis, out=input_tmp)
         rfft(x=input_tmp, out=output_tmp, axis=axis)
         output_tmp *= (2*np.exp(-1j*np.pi*np.arange(n0)/(2*N)))[slc4]
-        
+
         if (out is None):
             out = np.empty_like(x)
         else:
@@ -297,7 +297,7 @@ def dst(x, out=None, type=2, axis=-1, input_tmp=None, output_tmp=None):
         slc9  = mk_view(ndim, axis, n0,   None,   None)
         slc10 = mk_view(ndim, axis, 0, 1, None)
         s0    = mk_shape(shape, axis, n0)
-        
+
         if (input_tmp is None):
             input_tmp = np.empty(shape=sin, dtype=din)
         if (output_tmp is None):
@@ -341,19 +341,19 @@ def idst(x, out=None, type=2, axis=-1, **kwds):
 
 class MklFFTPlan(HostFFTPlanI):
     """
-    Wrap a mkl fft call (mkl.fft does not offer real planning capabilities). 
+    Wrap a mkl fft call (mkl.fft does not offer real planning capabilities).
     """
 
     def __init__(self, planner, fn, a, out, axis, scaling=None, **kwds):
         super(MklFFTPlan, self).__init__()
-        
+
         self.planner      = planner
         self.fn           = fn
         self.a            = a
         self.out          = out
         self.scaling      = scaling
 
-        (sin, din, sout, dout) = setup_transform(a, axis, 
+        (sin, din, sout, dout) = setup_transform(a, axis,
                 'dct' if fn in (dct, idct) else 'dst' if fn in (dst, idst) else None,
                 fn in (idct, idst),
                 kwds.get('type', None))
@@ -367,7 +367,7 @@ class MklFFTPlan(HostFFTPlanI):
             self._required_output_tmp = None
         else:
             self._required_output_tmp = {'size': np.prod(sout, dtype=np.int64), 'shape':sout, 'dtype':dout}
-        
+
         self._allocated = False
 
         if isinstance(a, HostArray):
@@ -377,7 +377,7 @@ class MklFFTPlan(HostFFTPlanI):
 
         self.rescale = self.bake_scaling_plan(out, scaling)
 
-    
+
         kwds = kwds.copy()
         kwds['x']    = a
         kwds['out']  = out
@@ -392,34 +392,34 @@ class MklFFTPlan(HostFFTPlanI):
         target =  __DEFAULT_NUMBA_TARGET__
         signature, layout = make_numba_signature(x, scaling)
         if (x.ndim == 1):
-            @nb.guvectorize([signature], layout, 
+            @nb.guvectorize([signature], layout,
                 target=target, nopython=True, cache=True)
             def scale(x, scaling):
-                for i in xrange(0, x.shape[0]):
+                for i in range(0, x.shape[0]):
                     x[i] *= scaling
         elif (x.ndim == 2):
-            @nb.guvectorize([signature], layout, 
+            @nb.guvectorize([signature], layout,
                 target=target, nopython=True, cache=True)
             def scale(x, scaling):
                 for i in prange(0, x.shape[0]):
-                    for j in xrange(0, x.shape[1]):
+                    for j in range(0, x.shape[1]):
                         x[i,j] *= scaling
         elif (x.ndim == 3):
-            @nb.guvectorize([signature], layout, 
+            @nb.guvectorize([signature], layout,
                 target=target, nopython=True, cache=True)
             def scale(x, scaling):
                 for i in prange(0, x.shape[0]):
                     for j in prange(0, x.shape[1]):
-                        for k in xrange(0, x.shape[2]):
+                        for k in range(0, x.shape[2]):
                             x[i,j,k] *= scaling
         elif (x.ndim == 4):
-            @nb.guvectorize([signature], layout, 
+            @nb.guvectorize([signature], layout,
                 target=target, nopython=True, cache=True)
             def scale(x, scaling):
                 for i in prange(0, x.shape[0]):
                     for j in prange(0, x.shape[1]):
                         for k in prange(0, x.shape[2]):
-                            for l in xrange(0, x.shape[3]):
+                            for l in range(0, x.shape[3]):
                                 x[i,j,k,l] *= scaling
         else:
             raise NotImplementedError(x.ndim)
@@ -455,16 +455,16 @@ class MklFFTPlan(HostFFTPlanI):
             sout, dout = self._required_output_tmp['size'], self._required_output_tmp['dtype']
             sout *= get_itemsize(dout)
             Bout = ((sout+alignment-1)//alignment)*alignment
-        else: 
+        else:
             Bout = 0
         return Bin+Bout
-        
+
     def allocate(self, buf=None):
         """Allocate plan extra memory, possibly with a custom buffer."""
         if self._allocated:
             msg='Plan was already allocated.'
             raise RuntimeError(msg)
-        
+
         if (buf is not None):
             alignment = simd_alignment
             if self._required_input_tmp:
@@ -477,7 +477,7 @@ class MklFFTPlan(HostFFTPlanI):
                 sout, dout, ssout = self._required_output_tmp['size'], self._required_output_tmp['dtype'], self._required_output_tmp['shape']
                 sout *= get_itemsize(dout)
                 Bout = ((sout+alignment-1)//alignment)*alignment
-            else: 
+            else:
                 Bout = 0
             assert buf.dtype.itemsize == 1
             assert buf.size == Bin+Bout
@@ -487,7 +487,7 @@ class MklFFTPlan(HostFFTPlanI):
             input_buf = None
             output_buf = None
 
-        for (k, buf, required_tmp) in zip(('input', 'output'), 
+        for (k, buf, required_tmp) in zip(('input', 'output'),
                                           (input_buf, output_buf),
                                           (self._required_input_tmp, self._required_output_tmp)):
             if (required_tmp is None):
@@ -505,7 +505,7 @@ class MklFFTPlan(HostFFTPlanI):
                             raise RuntimeError(msg)
                         else:
                             warnings.warn(msg, HysopMKLFftWarning)
-                    buf = self.planner.backend.empty(shape=shape, 
+                    buf = self.planner.backend.empty(shape=shape,
                                                      dtype=dtype)
                 elif (buf.shape != shape) or (buf.dtype != dtype):
                     msg='Buffer does not match required shape: {} != {}'
@@ -526,69 +526,69 @@ class MklFFT(HostFFTI):
     """
     Interface to compute local to process FFT-like transforms using the mkl fft backend.
 
-    Mkl fft backend has some disadvantages: 
+    Mkl fft backend has some disadvantages:
       - creates intermediate temporary buffers at each call (out and tmp for real-to-real transforms)
       - no planning capabilities (mkl.fft methods are just wrapped into fake plans)
     """
 
-    def __init__(self, backend=None, allocator=None, 
-                        warn_on_allocation=True, error_on_allocation=False, 
+    def __init__(self, backend=None, allocator=None,
+                        warn_on_allocation=True, error_on_allocation=False,
                         destroy_input=None, **kwds):
-        super(MklFFT, self).__init__(backend=backend, allocator=allocator, 
+        super(MklFFT, self).__init__(backend=backend, allocator=allocator,
                 warn_on_allocation=warn_on_allocation, error_on_allocation=error_on_allocation, **kwds)
         self.supported_ftypes = (np.float32, np.float64,)
         self.supported_ctypes = (np.complex64, np.complex128,)
-    
+
     def fft(self, a, out=None, axis=-1, **kwds):
         (shape, dtype) = super(MklFFT, self).fft(a=a, out=out, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         plan = MklFFTPlan(self, fn=fft, a=a, out=out, axis=axis, **kwds)
         return plan
 
     def ifft(self, a, out=None, axis=-1, **kwds):
         (shape, dtype, s) = super(MklFFT, self).ifft(a=a, out=out, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         plan = MklFFTPlan(self, fn=ifft, a=a, out=out, axis=axis, **kwds)
         return plan
 
     def rfft(self, a, out=None, axis=-1, **kwds):
         (shape, dtype) = super(MklFFT, self).rfft(a=a, out=out, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
-        plan = MklFFTPlan(self, fn=rfft, a=a, out=out, axis=axis, 
+        out = self.allocate_output(out, shape, dtype)
+        plan = MklFFTPlan(self, fn=rfft, a=a, out=out, axis=axis,
                             **kwds)
         return plan
 
     def irfft(self, a, out=None, n=None, axis=-1, **kwds):
         (shape, dtype, s) = super(MklFFT, self).irfft(a=a, out=out, n=n, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
-        plan = MklFFTPlan(self, fn=irfft, a=a, out=out, axis=axis, 
+        out = self.allocate_output(out, shape, dtype)
+        plan = MklFFTPlan(self, fn=irfft, a=a, out=out, axis=axis,
                             n=shape[axis], **kwds)
         return plan
-    
+
     def dct(self, a, out=None, type=2, axis=-1, **kwds):
         (shape, dtype) = super(MklFFT, self).dct(a=a, out=out, type=type, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         plan = MklFFTPlan(self, fn=dct, a=a, out=out, axis=axis, type=type, **kwds)
         return plan
-    
+
     def idct(self, a, out=None, type=2, axis=-1, scaling=None, **kwds):
-        (shape, dtype, _, s) = super(MklFFT, self).idct(a=a, out=out, type=type, 
+        (shape, dtype, _, s) = super(MklFFT, self).idct(a=a, out=out, type=type,
                                     axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
-        plan = MklFFTPlan(self, fn=idct, a=a, out=out, axis=axis, type=type, 
+        out = self.allocate_output(out, shape, dtype)
+        plan = MklFFTPlan(self, fn=idct, a=a, out=out, axis=axis, type=type,
                                 scaling=first_not_None(scaling, 1.0/s), **kwds)
         return plan
-    
+
     def dst(self, a, out=None, type=2, axis=-1, **kwds):
         (shape, dtype) = super(MklFFT, self).dst(a=a, out=out, type=type, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         plan = MklFFTPlan(self, fn=dst, a=a, out=out, axis=axis, type=type, **kwds)
         return plan
 
     def idst(self, a, out=None, type=2, axis=-1, scaling=None, **kwds):
-        (shape, dtype, _, s) = super(MklFFT, self).idst(a=a, out=out, type=type, axis=axis, 
+        (shape, dtype, _, s) = super(MklFFT, self).idst(a=a, out=out, type=type, axis=axis,
                 **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         plan = MklFFTPlan(self, fn=idst, a=a, out=out, axis=axis, type=type,
                                 scaling=first_not_None(scaling, 1.0/s), **kwds)
         return plan
diff --git a/hysop/numerics/fft/fft.py b/hysop/numerics/fft/fft.py
index 494fb6550b4082ff1d1d9317362b08b7dfb63c7a..c54495669d0252bb8109006c7c4487c89d5fbd9b 100644
--- a/hysop/numerics/fft/fft.py
+++ b/hysop/numerics/fft/fft.py
@@ -89,7 +89,7 @@ class FFTQueueI(object):
     def execute(self, wait_for=None):
         """Execute all planned plans."""
         pass
-    
+
     @abstractmethod
     def __iadd__(self, *plans):
         """Add a plan to the queue."""
@@ -100,13 +100,12 @@ class FFTQueueI(object):
         return self.execute(**kwds)
 
 
-class FFTPlanI(object):
+class FFTPlanI(object, metaclass=ABCMeta):
     """
     Common inteface for FFT plans.
     Basically just a functor that holds relevant data
     to execute a preconfigurated FFT-like tranform.
     """
-    __metaclass__ = ABCMeta
 
     def __init__(self, verbose=__VERBOSE__):
         self.verbose = verbose
@@ -119,14 +118,14 @@ class FFTPlanI(object):
         Return currently planned input array.
         """
         pass
-    
+
     @abstractmethod
     def output_array(self):
         """
         Return currently planned output array.
         """
         pass
-    
+
     def setup(self, queue=None):
         """
         Method that has to be called before any call to execute.
@@ -136,11 +135,11 @@ class FFTPlanI(object):
             raise RuntimeError(msg)
         self._setup = True
         return self
-    
+
     @property
     def required_buffer_size(self):
         """
-        Return the required temporary buffer size in bytes to 
+        Return the required temporary buffer size in bytes to
         compute the transform.
         """
         assert self._setup
@@ -151,7 +150,7 @@ class FFTPlanI(object):
         assert self._setup
         assert not self._allocated
         self._allocated = True
-    
+
 
     @abstractmethod
     def execute(self):
@@ -171,7 +170,7 @@ class FFTPlanI(object):
         self.execute(**kwds)
 
 
-class FFTI(object):
+class FFTI(object, metaclass=ABCMeta):
     """
     Interface to compute local to process FFT-like transforms.
     Common inteface for all array backends, based on the numpy.fft interface.
@@ -194,15 +193,15 @@ class FFTI(object):
     Other R2R transforms:
         DCT-IV and DCT-IV are only supported by the FFTW backend at this time.
         DCT-V to DCT-VIII and DST-V to DST-VII are not supported by any FFT backend.
-    
+
     About floating point precision:
-        By default, both simple and double precision are supported. 
+        By default, both simple and double precision are supported.
         numpy only supports double precision (simple precision is supported by casting).
         FFTW also supports long double precision.
-    
+
     Normalization:
         The default normalization has the direct transforms unscaled and the inverse transform
-        is scaled by 1/N where N is the logical size of the transform. 
+        is scaled by 1/N where N is the logical size of the transform.
         N should not to be confused with the physical size of the input arrays n:
 
         FFT, RFFT:               N = n
@@ -240,8 +239,7 @@ class FFTI(object):
         *Zero fill
     Those methods will be used by the n-dimensional planner.
     """
-    __metaclass__ = ABCMeta
-        
+
     __transform2fn = {
         TransformType.FFT:      ('fft',   {}),
         TransformType.IFFT:     ('ifft',  {}),
@@ -264,9 +262,9 @@ class FFTI(object):
         TransformType.IDST_III: ('idst',  {'type': 3}),
         TransformType.IDST_IV:  ('idst',  {'type': 4}),
     }
-    
+
     @classmethod
-    def default_interface_from_backend(cls, backend, 
+    def default_interface_from_backend(cls, backend,
             enable_opencl_host_buffer_mapping, **kwds):
         check_instance(backend, ArrayBackend)
         if (backend.kind is Backend.HOST):
@@ -296,7 +294,7 @@ class FFTI(object):
                 msg='Backend mismatch {} vs {}.'
                 msg=msg.format(self.backend, backend)
                 raise RuntimeError(msg)
-    
+
     def get_transform(self, transform):
         check_instance(transform, TransformType)
         if (transform not in self.__transform2fn):
@@ -308,7 +306,7 @@ class FFTI(object):
             fn = functools.partial(fn, **fkwds)
         return fn
 
-    def __init__(self, backend, 
+    def __init__(self, backend,
             warn_on_allocation=True,
             error_on_allocation=False):
         """Initializes the interface and default supported real and complex types."""
@@ -325,7 +323,7 @@ class FFTI(object):
         self.backend = backend
         self.warn_on_allocation  = warn_on_allocation
         self.error_on_allocation = error_on_allocation
-   
+
     def allocate_output(self, out, shape, dtype):
         """Alocate output if required and check shape and dtype."""
         if (out is None):
@@ -342,7 +340,7 @@ class FFTI(object):
             assert out.dtype == dtype
             assert out.shape == shape
         return out
-    
+
     @classmethod
     def default_interface(cls, **kwds):
         """Get the default FFT interface."""
@@ -399,17 +397,17 @@ class FFTI(object):
         out: array_like of np.complex64 or np.complex128
             Complex output array of the same shape and dtype as the input.
         axis: int, optional
-            Axis over witch to compute the FFT. 
+            Axis over witch to compute the FFT.
             Defaults to last axis.
 
         Returns
         -------
         (shape, dtype) of the output array determined from the input array.
-        
+
         Notes
         -----
         N = a.shape[axis]
-        out[0] will contain the sum of the signal (zero-frequency term always real for 
+        out[0] will contain the sum of the signal (zero-frequency term always real for
         real inputs).
 
         If N is even:
@@ -439,9 +437,9 @@ class FFTI(object):
         out: array_like of np.complex64 or np.complex128
             Complex output array of the same shape and dtype as the input.
         axis: int, optional
-            Axis over witch to compute the FFT. 
+            Axis over witch to compute the FFT.
             Defaults to last axis.
-        
+
         Returns
         -------
         (shape, dtype, logical_size) of the output array determined from the input array.
@@ -456,7 +454,7 @@ class FFTI(object):
     @abstractmethod
     def rfft(self, a, out, axis=-1, **kwds):
         """
-        Compute the unscaled one-dimensional real to hermitian complex discrete Fourier 
+        Compute the unscaled one-dimensional real to hermitian complex discrete Fourier
         Transform.
 
         Parameters
@@ -468,17 +466,17 @@ class FFTI(object):
             out.shape[...]  = a.shape[...]
             out.shape[axis] = a.shape[axis]//2 + 1
         axis: int, optional
-            Axis over witch to compute the transform. 
+            Axis over witch to compute the transform.
             Defaults to last axis.
-        
+
         Returns
         -------
         (shape, dtype) of the output array determined from the input array.
-        
+
         Notes
         -----
-        For real inputs there is no information in the negative frequency components that 
-        is not already  available from the positive frequency component because of the 
+        For real inputs there is no information in the negative frequency components that
+        is not already  available from the positive frequency component because of the
         Hermitian symmetry.
 
         N = out.shape[axis] = a.shape[axis]//2 + 1
@@ -504,7 +502,7 @@ class FFTI(object):
     @abstractmethod
     def irfft(self, a, out, n=None, axis=-1, **kwds):
         """
-        Compute the one-dimensional hermitian complex to real discrete Fourier Transform 
+        Compute the one-dimensional hermitian complex to real discrete Fourier Transform
         scaled by 1/N.
 
         Parameters
@@ -521,22 +519,22 @@ class FFTI(object):
             Length of the transformed axis of the output.
             ie: n should be in [2*(a.shape[axis]-1), 2*(a.shape[axis]-1)+1]
         axis: int, optional
-            Axis over witch to compute the transform. 
+            Axis over witch to compute the transform.
             Defaults to last axis.
-        
+
         Notes
         -----
         To get an odd number of output points, n or out must be specified.
-        
+
         Returns
         -------
-        (shape, dtype, logical_size) of the output array determined from the input array, 
+        (shape, dtype, logical_size) of the output array determined from the input array,
         out and n.
         """
         assert a.dtype   in self.supported_ctypes
         cshape = a.shape
         rtype  = complex_to_float_dtype(a.dtype)
-        
+
         rshape_even, rshape_odd = list(a.shape), list(a.shape)
         rshape_even[axis] = 2*(cshape[axis]-1)
         rshape_odd[axis]  = 2*(cshape[axis]-1) + 1
@@ -561,12 +559,12 @@ class FFTI(object):
             n = rshape[axis]
         else:
             rshape = rshape_odd
-        
+
         rshape = tuple(rshape)
         logical_size = n
         assert rshape[axis] == logical_size
         return (rshape, rtype, logical_size)
-    
+
     @abstractmethod
     def dct(self, a, out=None, type=2, axis=-1, **kwds):
         """
@@ -579,7 +577,7 @@ class FFTI(object):
         out: array_like
             Real output array of matching input type and shape.
         axis: int, optional
-            Axis over witch to compute the transform. 
+            Axis over witch to compute the transform.
             Defaults to last axis.
         Returns
         -------
@@ -591,13 +589,13 @@ class FFTI(object):
             assert a.dtype == out.dtype
             assert np.array_equal(a.shape, out.shape)
         return (a.shape, a.dtype)
-    
+
     @abstractmethod
     def idct(self, a, out=None, type=2, axis=-1, **kwds):
         """
         Compute the one-dimensional Inverse Cosine Transform of specified type.
-        
-        Default scaling is 1/(2*N)   for IDCT type (2,3,4) and 
+
+        Default scaling is 1/(2*N)   for IDCT type (2,3,4) and
                            1/(2*N-2) for IDCT type 1.
 
         Parameters
@@ -607,11 +605,11 @@ class FFTI(object):
         out: array_like
             Real output array of matching input type and shape.
         axis: int, optional
-            Axis over witch to compute the transform. 
+            Axis over witch to compute the transform.
             Defaults to last axis.
         Returns
         -------
-        (shape, dtype, inverse_type, logical_size) of the output array determined from the input 
+        (shape, dtype, inverse_type, logical_size) of the output array determined from the input
         array.
         """
         itype = [1,3,2,4][type-1]
@@ -624,7 +622,7 @@ class FFTI(object):
             assert a.dtype == out.dtype
             assert np.array_equal(a.shape, out.shape)
         return (a.shape, a.dtype, itype, logical_size)
-    
+
     @abstractmethod
     def dst(self, a, out=None, type=2, axis=-1, **kwds):
         """
@@ -637,7 +635,7 @@ class FFTI(object):
         out: array_like
             Real output array of matching input type and shape.
         axis: int, optional
-            Axis over witch to compute the transform. 
+            Axis over witch to compute the transform.
             Defaults to last axis.
         Returns
         -------
@@ -655,7 +653,7 @@ class FFTI(object):
         """
         Compute the one-dimensional Inverse Sine Transform of specified type.
 
-        Default scaling is 1/(2*N)   for IDST type (2,3,4) and 
+        Default scaling is 1/(2*N)   for IDST type (2,3,4) and
                            1/(2*N+2) for IDST type 1.
 
         Parameters
@@ -665,11 +663,11 @@ class FFTI(object):
         out: array_like
             Real output array of matching input type and shape.
         axis: int, optional
-            Axis over witch to compute the transform. 
+            Axis over witch to compute the transform.
             Defaults to last axis.
         Returns
         -------
-        (shape, dtype, inverse_type, logical_size) of the output array determined from the input 
+        (shape, dtype, inverse_type, logical_size) of the output array determined from the input
         array.
         """
         itype = [1,3,2,4][type-1]
@@ -692,7 +690,7 @@ class FFTI(object):
     def plan_copy(self, tg, src, dst):
         """Plan a copy from src to dst."""
         pass
-    
+
     @abstractmethod
     def plan_accumulate(self, tg, src, dst):
         """Plan an accumulation from src into dst."""
@@ -707,7 +705,7 @@ class FFTI(object):
     def plan_fill_zeros(self, tg, a, slices):
         """Plan to fill every input slices of input array a with zeroes."""
         pass
-    
+
     @abstractmethod
     def plan_compute_energy(self, tg, fshape, src, dst, transforms, mutexes=None):
         """Plan to compute energy from src to energy."""
diff --git a/hysop/numerics/fft/fftw_fft.py b/hysop/numerics/fft/fftw_fft.py
index 6af22b3144fae997a757e872a751ed4e8c754302..dbf09acfea1baab0948e1241e7efb66531ad5791 100644
--- a/hysop/numerics/fft/fftw_fft.py
+++ b/hysop/numerics/fft/fftw_fft.py
@@ -9,7 +9,7 @@ import pyfftw
 import numpy as np
 
 from hysop import __FFTW_NUM_THREADS__, __FFTW_PLANNER_EFFORT__, __FFTW_PLANNER_TIMELIMIT__, __VERBOSE__
-from hysop.tools.io_utils   import IO 
+from hysop.tools.io_utils   import IO
 from hysop.tools.types import first_not_None
 from hysop.tools.misc import prod
 from hysop.tools.string_utils import framed_str
@@ -25,7 +25,7 @@ class FftwFFTPlan(HostFFTPlanI):
     """
 
     __FFTW_USE_CACHE__=True
-    
+
     @classmethod
     def cache_file(cls):
         _cache_dir  = IO.cache_path() + '/numerics'
@@ -40,7 +40,7 @@ class FftwFFTPlan(HostFFTPlanI):
                 pyfftw.import_wisdom(wisdom)
                 return True
         return False
-        
+
     @classmethod
     def save_wisdom(cls, h, plan):
         if cls.__FFTW_USE_CACHE__:
@@ -60,17 +60,17 @@ class FftwFFTPlan(HostFFTPlanI):
             plan_kwds['output_array'] = out.handle
         else:
             plan_kwds['output_array'] = out
-        
+
         def fmt_arg(name):
             return plan_kwds[name]
         def fmt_array(name):
             arr = fmt_arg(name)
             return 'shape={:<16} strides={:<16} dtype={:<16}'.format(
-                    str(arr.shape)+',', 
+                    str(arr.shape)+',',
                     str(arr.strides)+',',
-                    arr.dtype)
+                    str(arr.dtype))
 
-        title=' Planning {} '.format(self.__class__.__name__) 
+        title=' Planning {} '.format(self.__class__.__name__)
         msg = \
         '''    in_array:           {}
     out_array:          {}
@@ -87,9 +87,9 @@ class FftwFFTPlan(HostFFTPlanI):
             ' | '.join(fmt_arg('flags')),
             fmt_arg('planning_timelimit'))
         if self.verbose:
-            print
-            print framed_str(title, msg, c='*')
-        
+            print()
+            print(framed_str(title, msg, c='*'))
+
         def hash_arg(name):
             return hash(plan_kwds[name])
         def hash_array(name):
@@ -97,7 +97,7 @@ class FftwFFTPlan(HostFFTPlanI):
             return hash(arr.shape) ^ hash(arr.strides)
         #h = hash_array('input_array') ^ hash_array('output_array') ^ hash_arg('axes') ^ hash_arg('direction')
         h = None
-        
+
         plan = None
         may_have_wisdom = self.load_wisdom(h)
         if may_have_wisdom:
@@ -120,7 +120,7 @@ class FftwFFTPlan(HostFFTPlanI):
         self.scaling = scaling
         self.out = out
         self.a = a
-    
+
     @property
     def input_array(self):
         return self.a
@@ -156,11 +156,11 @@ class FftwFFTPlan(HostFFTPlanI):
         self.plan.__call__()
         if (self.scaling is not None):
             self.output_array[...] *= self.scaling
-    
+
     def __call__(self):
         """
         Execute the plan on possibly different input and output arrays.
-        Input array updates with arrays that are not aligned on original byte boundary 
+        Input array updates with arrays that are not aligned on original byte boundary
         will result in a copy being made.
         Return output array for convenience.
         """
@@ -171,7 +171,7 @@ class FftwFFT(HostFFTI):
     """
     Interface to compute local to process FFT-like transforms using the FFTW backend.
 
-    Fftw fft backend has many advantages: 
+    Fftw fft backend has many advantages:
         - single, double and long double precision supported
         - no intermediate temporary buffers created at each call.
         - planning capability with caching
@@ -180,7 +180,7 @@ class FftwFFT(HostFFTI):
     Planning destroys initial arrays content.
     """
 
-    def __init__(self, threads=None, 
+    def __init__(self, threads=None,
                        planner_effort=None,
                        planning_timelimit=None,
                        destroy_input=False,
@@ -192,7 +192,7 @@ class FftwFFT(HostFFTI):
         threads            = first_not_None(threads,            __FFTW_NUM_THREADS__)
         planner_effort     = first_not_None(planner_effort,     __FFTW_PLANNER_EFFORT__)
         planning_timelimit = first_not_None(planning_timelimit, __FFTW_PLANNER_TIMELIMIT__)
-        super(FftwFFT, self).__init__(backend=backend, allocator=allocator, 
+        super(FftwFFT, self).__init__(backend=backend, allocator=allocator,
                 warn_on_allocation=warn_on_allocation, error_on_allocation=error_on_allocation,
                 **kwds)
         self.supported_ftypes = (np.float32, np.float64, np.longdouble)
@@ -215,7 +215,7 @@ class FftwFFT(HostFFTI):
         if (a is not None) and not pyfftw.is_byte_aligned(array=a):
             msg=msg0.format('Input')
             warnings.warn(msg, HysopFFTWarning)
-        elif (out is not None) and not pyfftw.is_byte_aligned(out): 
+        elif (out is not None) and not pyfftw.is_byte_aligned(out):
             msg=msg0.format('Output')
             warnings.warn(msg, HysopFFTWarning)
 
@@ -231,7 +231,7 @@ class FftwFFT(HostFFTI):
 
         flags = ()
         flags += (kwds.pop('planner_effort', self.planner_effort),)
-        if kwds.pop('destroy_input', self.destroy_input) is True: 
+        if kwds.pop('destroy_input', self.destroy_input) is True:
             flags += ('FFTW_DESTROY_INPUT',)
         if kwds.pop('wisdom_only', False) is True:
             flags += ('FFTW_WISDOM_ONLY',)
@@ -241,14 +241,14 @@ class FftwFFT(HostFFTI):
             msg='Unknown keyword arguments: {}'
             msg=msg.format(', '.join('\'{}\''.format(kwd) for kwd in kwds.keys()))
             raise RuntimeError(msg)
-        
+
         return plan_kwds
 
-    
+
     def fft(self, a, out=None, axis=-1, **kwds):
         """Planning destroys initial arrays content."""
         (shape, dtype) = super(FftwFFT, self).fft(a=a, out=out, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         if self.warn_on_misalignment:
             self.check_alignment(a, out)
         kwds = self.bake_kwds(a=a, out=out, axis=axis, direction='FFTW_FORWARD', **kwds)
@@ -258,7 +258,7 @@ class FftwFFT(HostFFTI):
     def ifft(self, a, out=None, axis=-1, **kwds):
         """Planning destroys initial arrays content."""
         (shape, dtype, s) = super(FftwFFT, self).ifft(a=a, out=out, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         if self.warn_on_misalignment:
             self.check_alignment(a, out)
         kwds = self.bake_kwds(a=a, out=out, axis=axis, direction='FFTW_BACKWARD', **kwds)
@@ -268,7 +268,7 @@ class FftwFFT(HostFFTI):
     def rfft(self, a, out=None, axis=-1, **kwds):
         """Planning destroys initial arrays content."""
         (shape, dtype) = super(FftwFFT, self).rfft(a=a, out=out, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         if self.warn_on_misalignment:
             self.check_alignment(a, out)
         kwds = self.bake_kwds(a=a, out=out, axis=axis, direction='FFTW_FORWARD', **kwds)
@@ -277,9 +277,9 @@ class FftwFFT(HostFFTI):
 
     def irfft(self, a, out=None, n=None, axis=-1, **kwds):
         """Planning destroys initial arrays content."""
-        (shape, dtype, s) = super(FftwFFT, self).irfft(a=a, out=out, axis=axis, 
+        (shape, dtype, s) = super(FftwFFT, self).irfft(a=a, out=out, axis=axis,
                                                         n=n, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         if self.warn_on_misalignment:
             self.check_alignment(a, out)
         kwds = self.bake_kwds(a=a, out=out, axis=axis, direction='FFTW_BACKWARD', **kwds)
@@ -289,7 +289,7 @@ class FftwFFT(HostFFTI):
     def dct(self, a, out=None, type=2, axis=-1, **kwds):
         """Planning destroys initial arrays content."""
         (shape, dtype) = super(FftwFFT, self).dct(a=a, out=out, type=type, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         if self.warn_on_misalignment:
             self.check_alignment(a, out)
         dct_types = ['FFTW_REDFT00', 'FFTW_REDFT10', 'FFTW_REDFT01', 'FFTW_REDFT11']
@@ -297,13 +297,13 @@ class FftwFFT(HostFFTI):
         kwds = self.bake_kwds(a=a, out=out, axis=axis, direction=direction, **kwds)
         plan = FftwFFTPlan(**kwds)
         return plan
-    
+
     def idct(self, a, out=None, type=2, axis=-1, scaling=None, **kwds):
         """Planning destroys initial arrays content."""
-        (shape, dtype, itype, s) = super(FftwFFT, self).idct(a=a, out=out, type=type, axis=axis, 
+        (shape, dtype, itype, s) = super(FftwFFT, self).idct(a=a, out=out, type=type, axis=axis,
                                         **kwds)
         scaling = first_not_None(scaling, 1.0/s)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         if self.warn_on_misalignment:
             self.check_alignment(a, out)
         dct_types = ['FFTW_REDFT00', 'FFTW_REDFT10', 'FFTW_REDFT01', 'FFTW_REDFT11']
@@ -311,11 +311,11 @@ class FftwFFT(HostFFTI):
         kwds = self.bake_kwds(a=a, out=out, axis=axis, direction=direction, **kwds)
         plan = FftwFFTPlan(scaling=scaling, **kwds)
         return plan
-    
+
     def dst(self, a, out=None, type=2, axis=-1, **kwds):
         """Planning destroys initial arrays content."""
         (shape, dtype) = super(FftwFFT, self).dst(a=a, out=out, type=type, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         if self.warn_on_misalignment:
             self.check_alignment(a, out)
         dst_types = ['FFTW_RODFT00', 'FFTW_RODFT10', 'FFTW_RODFT01', 'FFTW_RODFT11']
@@ -326,10 +326,10 @@ class FftwFFT(HostFFTI):
 
     def idst(self, a, out=None, type=2, axis=-1, scaling=None, **kwds):
         """Planning destroys initial arrays content."""
-        (shape, dtype, itype, s) = super(FftwFFT, self).idst(a=a, out=out, type=type, 
+        (shape, dtype, itype, s) = super(FftwFFT, self).idst(a=a, out=out, type=type,
                 axis=axis, **kwds)
         scaling = first_not_None(scaling, 1.0/s)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         if self.warn_on_misalignment:
             self.check_alignment(a, out)
         dst_types = ['FFTW_RODFT00', 'FFTW_RODFT10', 'FFTW_RODFT01', 'FFTW_RODFT11']
diff --git a/hysop/numerics/fft/gpyfft_fft.py b/hysop/numerics/fft/gpyfft_fft.py
index ef74d4ade5e3e61b577076571bde9cbd9a87340a..efa7072b5dcdce09234263ccdb5bc7dce815c9ad 100644
--- a/hysop/numerics/fft/gpyfft_fft.py
+++ b/hysop/numerics/fft/gpyfft_fft.py
@@ -34,6 +34,8 @@ from hysop.backend.device.opencl.opencl_kernel_launcher import trace_kernel, pro
 class HysopGpyFftWarning(HysopWarning):
     pass
 
+# fix a weird bug in clfft/gpyfft
+keep_plans_ref = []
 
 class GpyFFTPlan(OpenClFFTPlanI):
     """
@@ -43,7 +45,22 @@ class GpyFFTPlan(OpenClFFTPlanI):
 
     DEBUG=False
 
-    def __init__(self, cl_env, queue, 
+    def __new__(cls, cl_env, queue,
+            in_array, out_array, axes,
+            scaling=None, scale_by_size=None,
+            fake_input=None, fake_output=None,
+            callback_kwds=None,
+            direction_forward=True,
+            hardcode_twiddles=False,
+            warn_on_unaligned_output_offset=True,
+            warn_on_allocation=True,
+            error_on_allocation=False,
+            **kwds):
+        obj = super(GpyFFTPlan, cls).__new__(cls)
+        keep_plans_ref.append(obj)
+        return obj
+
+    def __init__(self, cl_env, queue,
             in_array, out_array, axes,
             scaling=None, scale_by_size=None,
             fake_input=None, fake_output=None,
@@ -62,14 +79,14 @@ class GpyFFTPlan(OpenClFFTPlanI):
         ----------
         cl_env: OpenClEnvironment
             OpenCL environment that will provide a context and a default queue.
-        queue: 
+        queue:
             OpenCL queue that will be used by default.
         in_array: cl.Array or OpenClArray
             Real input array for this transform.
         out_array: cl.Array or OpenClArray
             Real output array for this transform.
         axes: array_like of ints
-            Axis over witch to compute the transform. 
+            Axis over witch to compute the transform.
         scaling: float, optional
             Force the scaling of the transform.
             If not given, no scaling is applied (unlike clfft default behaviour).
@@ -88,9 +105,9 @@ class GpyFFTPlan(OpenClFFTPlanI):
         direction_forward: bool, optional, defaults to True
             The direction of the transform. True <=> forward transform.
         hardcode_twiddles: bool, optional, defaults to False
-            Hardcode twiddles as a __constant static array of complex directly 
-            in the opencl code. Only used by DCT-II, DCT-III, DST-II and DST-III. 
-            If set to False, the twiddles will be computed by the device on the 
+            Hardcode twiddles as a __constant static array of complex directly
+            in the opencl code. Only used by DCT-II, DCT-III, DST-II and DST-III.
+            If set to False, the twiddles will be computed by the device on the
             fly, freeing device __constant memory banks.
         warn_on_unaligned_output_offset: bool, optional, defaults to True
             Emit a warning if the planner encounter an output array that has
@@ -107,7 +124,7 @@ class GpyFFTPlan(OpenClFFTPlanI):
         callback_kwds = first_not_None(callback_kwds, {})
 
         if (queue is None):
-            queue = cl_env.default_queue 
+            queue = cl_env.default_queue
         if (queue.context != cl_env.context):
             msg = 'Queue does not match context:'
             msg += '\n  *Given context is {}.'.format(cl_env.context)
@@ -127,15 +144,15 @@ class GpyFFTPlan(OpenClFFTPlanI):
 
         self.in_array  = in_array
         self.out_array = out_array
-        
+
         axes = np.asarray(axes)
         axes = (axes + in_array.ndim) % in_array.ndim
-        
+
         assert in_array.ndim == out_array.ndim
         assert fake_input.ndim == in_array.ndim
         assert fake_output.ndim == out_array.ndim
         assert 0 < axes.size <= in_array.ndim, axes.size
-        
+
         scale_by_size = first_not_None(scale_by_size, 1)
 
         self._setup_kwds = {
@@ -150,7 +167,7 @@ class GpyFFTPlan(OpenClFFTPlanI):
                 'hardcode_twiddles': hardcode_twiddles,
                 'callback_kwds': callback_kwds
             }
-    
+
     def setup(self, queue=None):
         super(GpyFFTPlan, self).setup(queue=queue)
         self.setup_plan(**self._setup_kwds)
@@ -159,9 +176,9 @@ class GpyFFTPlan(OpenClFFTPlanI):
             self.bake(queue=queue._queue)
         return self
 
-    def setup_plan(self, in_array, out_array, 
+    def setup_plan(self, in_array, out_array,
             fake_input, fake_output,
-            axes, direction_forward, 
+            axes, direction_forward,
             scaling, scale_by_size,
             hardcode_twiddles, callback_kwds):
 
@@ -207,14 +224,14 @@ class GpyFFTPlan(OpenClFFTPlanI):
             msg='Unsupported precision {}.'
             msg=msg.format(in_array.dtype)
             raise NotImplementedError(msg)
-        
+
         for array in (out_array, fake_input, fake_output):
             if (array.dtype not in valid_precision_types):
                 msg='Incompatible precisions: Got {} but valid precisions are {} '
                 msg+='based on input_array datatype which has been determined to be of kind {}.'
                 msg=msg.format(array.dtype, valid_precision_types, h_precision)
                 raise RuntimeError(msg)
-        
+
         # Determine transform layout and expected output shape and dtype
         float_types   = (np.float32, np.float64)
         complex_types = (np.complex64, np.complex128)
@@ -222,7 +239,7 @@ class GpyFFTPlan(OpenClFFTPlanI):
         if fake_input.dtype in float_types:
             layout_in  = gfft.CLFFT_REAL
             layout_out = gfft.CLFFT_HERMITIAN_INTERLEAVED
-            expected_output_shape = mk_shape(fake_input.shape, 
+            expected_output_shape = mk_shape(fake_input.shape,
                     axe0, fake_input.shape[axe0]//2 +1)
             expected_output_dtype = float_to_complex_dtype(fake_input.dtype)
             t_shape = t_shape_in
@@ -248,7 +265,7 @@ class GpyFFTPlan(OpenClFFTPlanI):
             msg='dtype {} is currently not handled.'
             msg=msg.format(fake_input.dtype)
             raise NotImplementedError(msg)
-           
+
         if (fake_output.dtype != expected_output_dtype):
             msg='Output array dtype {} does not match expected dtype {}.'
             msg=msg.format(fake_output.dtype, expected_output_dtype)
@@ -269,7 +286,7 @@ class GpyFFTPlan(OpenClFFTPlanI):
             assert ((in_array.strides[t_axes_in[0]] == in_array.dtype.itemsize) and \
                     (out_array.strides[t_axes_in[0]] == out_array.dtype.itemsize)), \
                     'Inplace real transforms need stride 1 for first transform axis.'
-        
+
         self.check_transform_shape(t_shape)
         plan = GFFT.create_plan(self.context, t_shape[::-1])
         plan.inplace       = t_inplace
@@ -279,7 +296,7 @@ class GpyFFTPlan(OpenClFFTPlanI):
         plan.batch_size    = t_batchsize_in
         plan.precision     = t_precision
         plan.layouts       = (layout_in, layout_out)
-        if (scaling is 'DEFAULT'):
+        if (scaling == 'DEFAULT'):
             pass
         elif (scaling is not None):
             plan.scale_forward  = scale
@@ -288,21 +305,21 @@ class GpyFFTPlan(OpenClFFTPlanI):
             plan.scale_forward  = 1.0
             plan.scale_backward = 1.0
 
-        
+
         # last transformed axis real output array size
         N = out_array.shape[axes[-1]]
-        
+
         typegen = self.cl_env.build_typegen(precision=h_precision,
-                                            float_dump_mode='dec', 
-                                            use_short_circuit_ops=False, 
+                                            float_dump_mode='dec',
+                                            use_short_circuit_ops=False,
                                             unroll_loops=False)
 
         (in_data, out_data) = self.set_callbacks(plan=plan, axes=axes,
-                                                 in_array=in_array, out_array=out_array, 
-                                                 fake_input=fake_input, 
+                                                 in_array=in_array, out_array=out_array,
+                                                 fake_input=fake_input,
                                                  fake_output=fake_output,
                                                  layout_in=layout_in, layout_out=layout_out,
-                                                 N=N, S=scale_by_size, 
+                                                 N=N, S=scale_by_size,
                                                  typegen=typegen, fp=fp,
                                                  hardcode_twiddles=hardcode_twiddles,
                                                  **callback_kwds)
@@ -312,7 +329,7 @@ class GpyFFTPlan(OpenClFFTPlanI):
         self.out_data   = out_data
         self.is_inplace = t_inplace
         self.direction_forward = direction_forward
-        
+
         if self.DEBUG:
             def estrides(array):
                 s = array.dtype.itemsize
@@ -357,18 +374,18 @@ Post callback source code:
 {}
 '''.format(in_array.shape, in_array.dtype, estrides(in_array), in_array.offset,
             out_array.shape, out_array.dtype, estrides(out_array), out_array.offset,
-            fake_input.shape, fake_input.dtype, estrides(fake_input), 
-            fake_output.shape, fake_output.dtype, estrides(fake_output), 
+            fake_input.shape, fake_input.dtype, estrides(fake_input),
+            fake_output.shape, fake_output.dtype, estrides(fake_output),
             t_distance_in, t_distance_out, t_axes_in, t_axes_out, t_batchsize_in, t_batchsize_out,
             t_shape_in, t_shape_out, t_strides_in, t_strides_out,
-            plan.inplace, plan.precision, plan.layouts[0], plan.layouts[1],
-            plan.shape, plan.strides_in, plan.strides_out, plan.batch_size, 
-            plan.distances[0], plan.distances[1], 
-            plan.scale_forward, plan.scale_backward, 
-            self.pre_callback_src, self.post_callback_src)
-            print msg
-        
-        if (scaling is 'DEFAULT'):
+            plan.inplace, None, plan.layouts[0], plan.layouts[1],
+            plan.shape, plan.strides_in, plan.strides_out, plan.batch_size,
+            plan.distances[0], plan.distances[1],
+            plan.scale_forward, plan.scale_backward,
+            self.pre_callback_src.decode(), self.post_callback_src.decode())
+            print(msg)
+
+        if (scaling == 'DEFAULT'):
             pass
         elif (scaling is not None):
             plan.scale_forward  = scale
@@ -376,17 +393,17 @@ Post callback source code:
         else:
             plan.scale_forward  = 1.0
             plan.scale_backward = 1.0
-        
+
         # profiling info is delegated to this class, inform the KernelListLauncher
         self._show_profiling_info = False
-    
+
         # custom apply msg
         self._apply_msg_template = '  fft_{}2{}_{}_{}_{{}}<<<>>>'.format(
                 'C' if is_complex(in_array) else 'R',
                 'C' if is_complex(out_array) else 'R',
                 'forward' if direction_forward else 'backward',
                 self.__class__.__name__.replace('Gpy','').replace('Plan','_plan').replace('FFT','DFT'))
-       
+
 
     def set_callbacks(self, plan, axes, N,
             in_array, out_array, fake_input, fake_output,
@@ -399,31 +416,33 @@ Post callback source code:
             # ********************************************************************************
             # CLFFT C2R BUGFIX
             # Force the zero and the Nyquist frequency of the input to be purely real.
-            (pre_src, user_data) = self.pre_offset_callback_C2R(offset_input_pointer=oip, 
+            (pre_src, user_data) = self.pre_offset_callback_C2R(offset_input_pointer=oip,
                     in_fp=in_fp, N=N, **kwds)
             # ********************************************************************************
         else:
-            (pre_src, user_data) = self.pre_offset_callback(offset_input_pointer=oip, 
+            (pre_src, user_data) = self.pre_offset_callback(offset_input_pointer=oip,
                     in_fp=in_fp, N=N, **kwds)
 
-        (out_data, out_fp, oop) = self.compute_output_array_offset(out_array, fake_output, 
+        (out_data, out_fp, oop) = self.compute_output_array_offset(out_array, fake_output,
                 axes)
-        (post_src, user_data) = self.post_offset_callback(offset_output_pointer=oop, 
+        (post_src, user_data) = self.post_offset_callback(offset_output_pointer=oop,
                 out_fp=out_fp, N=N, **kwds)
-        
+
         # ***********************************************************************************
         # GPYFFT BUGFIX
         # Keep a reference to callback source code to prevent dangling const char* pointers.
-        # Do not remove because clfft only get the pointer and gpyfft does not increase the 
+        # Do not remove because clfft only get the pointer and gpyfft does not increase the
         # refcount of those strings, resulting in random code injection into the fft kernels.
+        pre_src  = pre_src.encode('utf-8')
+        post_src = post_src.encode('utf-8')
         self.pre_callback_src  = pre_src
         self.post_callback_src = post_src
         # ***********************************************************************************
-        
+
         if (pre_src is not None):
-            plan.set_callback('pre_callback',  pre_src,  'pre',  user_data=user_data)
+            plan.set_callback(b'pre_callback',  pre_src,  'pre',  user_data=user_data)
         if (post_src is not None):
-            plan.set_callback('post_callback', post_src, 'post', user_data=user_data)
+            plan.set_callback(b'post_callback', post_src, 'post', user_data=user_data)
         return (in_data, out_data)
 
     @classmethod
@@ -434,7 +453,7 @@ Post callback source code:
             factors = tuple( primefac.primefac(int(Ni)) )
             invalid_factors = set(factors) - valid_factors
             if invalid_factors:
-                factorization = ' * '.join('{}^{}'.format(factor, factors.count(factor)) 
+                factorization = ' * '.join('{}^{}'.format(factor, factors.count(factor))
                                                                 for factor in set(factors))
                 candidates = ', '.join(str(vf) for vf in valid_factors)
                 msg ='\nInvalid transform shape {} for clFFT:'
@@ -443,14 +462,14 @@ Post callback source code:
                 msg+='\n'
                 msg=msg.format(shape, Ni, factorization, candidates)
                 raise ValueError(msg)
-    
+
     @classmethod
     def calculate_transform_strides(cls, taxes, array):
         """Redefine gpyfft.FFT.calculate_transform_strides"""
         shape   = np.asarray(array.shape, dtype=np.uint32)
         strides = np.asarray(array.strides, dtype=np.uint32)
         dtype   = array.dtype
-        
+
         # array dimension and transform dimension
         ndim = len(shape)
         tdim = len(taxes)
@@ -462,7 +481,7 @@ Post callback source code:
 
         # sort untransformed axes by strides.
         baxes = baxes[np.argsort(strides[baxes][::-1])][::-1]
-        
+
         # compute a list of collapsable axes:  [ [x,y], [z] ]
         cal = []                 # collaspsable axes list
         cac = baxes[:1].tolist() # collaspsable axes candidates
@@ -473,68 +492,68 @@ Post callback source code:
                 cal.append(cac)
                 cac = [a]
         cal.append(cac)
-        
-        msg='Data layout not supported (only single non-transformed axis allowed)' 
+
+        msg='Data layout not supported (only single non-transformed axis allowed)'
         if (len(cal)!=1):
             raise HysopFFTDataLayoutError(msg)
         baxes = cal[0]
-        
+
         t_distances = strides[baxes]//dtype.itemsize
-                
+
         if len(t_distances) == 0:
             t_distance = 0
         else:
-            t_distance = t_distances[0] 
-                       
+            t_distance = t_distances[0]
+
         batchsize = np.prod(shape[baxes])
-        
+
         t_shape   = shape[taxes]
         t_strides = strides[taxes]//dtype.itemsize
-        
+
         return (tuple(t_strides), t_distance, batchsize, tuple(t_shape), tuple(taxes))
-        
+
     @classmethod
-    def compute_input_array_offset(cls, real_input, fake_input, axes, 
+    def compute_input_array_offset(cls, real_input, fake_input, axes,
             transform_offset='K', idx='k{}', batch_id='b',
             void_ptr='input',  casted_ptr='in'):
-        
+
         new_input    = cls.extract_array(real_input)
         input_offset = cls.get_array_offset(new_input, emit_warning=False)
         input_data = new_input.base_data
         input_fp   = dtype_to_ctype(new_input.dtype)
-        
+
         offset_input_pointer = \
                 cls.compute_pointer_offset(real_array=real_input, fake_array=fake_input,
                     axes=axes, base_offset=input_offset,
-                    transform_offset=transform_offset, idx=idx, batch_id=batch_id, 
+                    transform_offset=transform_offset, idx=idx, batch_id=batch_id,
                     fp='const '+input_fp, void_ptr=void_ptr, casted_ptr=casted_ptr,
                     is_input=True)
 
         return (input_data, input_fp, offset_input_pointer)
-    
+
     @classmethod
-    def compute_output_array_offset(cls, real_output, fake_output, axes, 
+    def compute_output_array_offset(cls, real_output, fake_output, axes,
             transform_offset='K', idx='k{}', batch_id='b',
             void_ptr='output',  casted_ptr='out'):
-        
+
         new_output    = cls.extract_array(real_output)
         output_offset = cls.get_array_offset(new_output, emit_warning=True)
         output_data = new_output.base_data
         output_fp   = dtype_to_ctype(new_output.dtype)
-        
+
         offset_output_pointer = \
                 cls.compute_pointer_offset(real_array=real_output, fake_array=fake_output,
                     axes=axes, base_offset=output_offset,
-                    transform_offset=transform_offset, idx=idx, batch_id=batch_id, 
+                    transform_offset=transform_offset, idx=idx, batch_id=batch_id,
                     fp=output_fp, void_ptr=void_ptr, casted_ptr=casted_ptr,
                     is_input=False)
 
         return (output_data, output_fp, offset_output_pointer)
-    
+
     @classmethod
-    def compute_pointer_offset(cls, real_array, fake_array, 
+    def compute_pointer_offset(cls, real_array, fake_array,
             axes, base_offset,
-            transform_offset, idx, batch_id, 
+            transform_offset, idx, batch_id,
             fp, void_ptr, casted_ptr,
             is_input):
 
@@ -550,7 +569,7 @@ Post callback source code:
         cptr = casted_ptr
         D = fake_distance
         S = fake_strides[::-1]
-        
+
         oip = ()
         oip += ('uint {K} = offset;'.format(K=K),)
         if (fake_batchsize > 1):
@@ -558,17 +577,16 @@ Post callback source code:
             # FIX ANOTHER CLFFT BUG (wrong batch size scheduling...)
             if is_input:
                 oip += ('if (b>={}) {{ return ({})(NAN); }};'.format(fake_batchsize, fp),)
-                #oip += ('printf("\\noffset=%u", offset);',)
             else:
                 oip += ('if (b>={}) {{ return; }};'.format(fake_batchsize, fp),)
             oip += ('{K} -= {b}*{D};'.format(K=K, b=b, D=D),)
-        for i in xrange(ndim-1,-1,-1):
+        for i in range(ndim-1, -1, -1):
             Ki = idx.format('xyz'[i])
             Si = S[i]
             oip += ('const uint {Ki} = {K}/{Si};'.format(Ki=Ki, K=K, Si=Si),)
             if (i>0):
                 oip += ('{K} -= {Ki}*{Si};'.format(K=K, Ki=Ki, Si=Si),)
-        
+
         if (real_array is fake_array):
             offset = '{base_offset} + offset - {k}'.format(base_offset=base_offset,
                                                             K=K, k=k.format('x'))
@@ -581,12 +599,12 @@ Post callback source code:
             real_offset = ('{base_offset}uL'.format(base_offset=base_offset),)
             if (fake_batchsize > 1):
                 real_offset += ('{b}*{D}'.format(b=b, D=real_distance),)
-            for i in xrange(ndim-1,0,-1):
+            for i in range(ndim-1, 0, -1):
                 Ki = idx.format('xyz'[i])
                 Si = real_strides[i]
                 real_offset += ('{Ki}*{Si}'.format(Ki, Si),)
             offset = ' + '.join(real_offset)
-            
+
         oip += ('__global {fp}* {cptr} = (__global {fp}*)({vptr}) + {offset};'.format(
                         cptr=cptr, vptr=vptr,
                         fp=fp, offset=offset),)
@@ -607,7 +625,7 @@ Post callback source code:
         except:
             new_array = array
         return new_array
-    
+
     @classmethod
     def get_array_offset(cls, array, emit_warning):
         """
@@ -623,7 +641,7 @@ Post callback source code:
         if emit_warning and (base_offset != 0):
             msg='OpenCl array offset is not zero and will be injected into a clFFT pre or '
             msg+= 'post callback. This could entail bad results if this buffer is used as '
-            msg+= 'an output: the beginning of this buffer may be used as a temporary ' 
+            msg+= 'an output: the beginning of this buffer may be used as a temporary '
             msg+= 'buffer during the transform before actual results are stored at the right '
             msg+= 'offset through the callback.'
             warnings.warn(msg, HysopGpyFftWarning)
@@ -639,10 +657,10 @@ Post callback source code:
         def fmt_array(name):
             arr = fmt_arg(name)
             return 'shape={:<16} strides={:<16} dtype={:<16}'.format(
-                    str(arr.shape)+',', 
+                    str(arr.shape)+',',
                     str(arr.strides)+',',
-                    arr.dtype)
-        title=' Baking {} '.format(self.__class__.__name__) 
+                    str(arr.dtype))
+        title=' Baking {} '.format(self.__class__.__name__)
         msg = \
         '''    in_array:          {}
     out_array:         {}
@@ -659,8 +677,8 @@ Post callback source code:
             fmt_arg('direction_forward'),
             fmt_arg('hardcode_twiddles'))
         if self.verbose:
-            print
-            print framed_str(title, msg, c='*')
+            print()
+            print(framed_str(title, msg, c='*'))
         queue = first_not_None(queue, self.queue)
         self.plan.bake(queue)
         self._baked = True
@@ -694,10 +712,11 @@ Post callback source code:
         self._allocated = True
         return self
 
-    
+
     def profile(self, events):
         for (i,evt) in enumerate(events):
             profile_kernel(None, evt, self._apply_msg_template.format(i))
+            evt.wait()
         return evt
 
     def enqueue(self, queue=None, wait_for=None):
@@ -716,16 +735,18 @@ Post callback source code:
         trace_kernel(self._apply_msg_template.format('kernels'))
 
         if self.is_inplace:
-            events = self.plan.enqueue_transform((queue,), 
-                                            (in_data,), 
-                                            direction_forward=direction_forward, 
-                                            temp_buffer=self.temp_buffer, 
+            events = self.plan.enqueue_transform((queue,),
+                                            (in_data,),
+                                            direction_forward=direction_forward,
+                                            temp_buffer=self.temp_buffer,
                                             wait_for_events=wait_for)
         else:
-            events = self.plan.enqueue_transform((queue,), 
+            #print(self.in_array)
+            #print(self.out_array)
+            events = self.plan.enqueue_transform((queue,),
                                             (in_data,), (out_data),
-                                            direction_forward=direction_forward, 
-                                            temp_buffer=self.temp_buffer, 
+                                            direction_forward=direction_forward,
+                                            temp_buffer=self.temp_buffer,
                                             wait_for_events=wait_for)
         evt = self.profile(events)
         return evt
@@ -771,10 +792,10 @@ Post callback source code:
             return in[kx];
         }}'''.format(fp=in_fp, offset_input_pointer=offset_input_pointer)
         return callback, None
-    
+
     def pre_offset_callback_C2R(self, offset_input_pointer, fp, N, **kwds):
         """
-        C2R specific pre_offset_callback, inject input array offset 
+        C2R specific pre_offset_callback, inject input array offset
         and force the nyquist frequency to be purely real (fixes a bug
         in clfft for even C2R transform of dimension > 1).
         """
@@ -797,7 +818,7 @@ Post callback source code:
 
     def post_offset_callback(self, offset_output_pointer, out_fp, S, **kwds):
         """
-        Default post_offset_callback, just inject output array offset and scale by size 
+        Default post_offset_callback, just inject output array offset and scale by size
         (divide by some integer, which will often be the logical size of the
         transform or 1).
         """
@@ -808,8 +829,8 @@ Post callback source code:
             out[kx] = R / {S};
         }}'''.format(fp=out_fp, offset_output_pointer=offset_output_pointer, S=S)
         return callback, None
-    
-    
+
+
     @classmethod
     def fake_array(cls, shape, dtype, strides=None):
         """
@@ -876,16 +897,16 @@ class GpyR2RPlan(GpyFFTPlan):
     custom pre and post processing callbacks.
     """
 
-    def __init__(self, in_array, out_array, 
-            fake_input, fake_output, 
-            scale_by_size, axes, 
+    def __init__(self, in_array, out_array,
+            fake_input, fake_output,
+            scale_by_size, axes,
             **kwds):
         """
         Handmade R2R transforms rely on fake input and output that will
         never really be read or written. This is necessary because
         clFFT do not handle R2R transforms and we use pre and post processing
         to compute an equivalent R2C or C2R problem.
-        
+
         Fake arrays are used to compute transform size, batch size and strides.
         Real arrays pointer are passed to the kernels and pre and post callbacks
         map the input and output data from those real arrays, adjusting the stride
@@ -902,11 +923,11 @@ class GpyR2RPlan(GpyFFTPlan):
         assert (fake_input is not None), msg
         msg='Fake output has not been set.'
         assert (fake_output is not None), msg
-        
+
         axis = self.check_r2r_axes(in_array, axes)
         axes = np.asarray([axis])
-        
-        super(GpyR2RPlan, self).__init__(in_array=in_array, out_array=out_array, 
+
+        super(GpyR2RPlan, self).__init__(in_array=in_array, out_array=out_array,
                 fake_input=fake_input, fake_output=fake_output,
                 axes=axes, scale_by_size=scale_by_size, **kwds)
 
@@ -915,7 +936,7 @@ class GpyR2RPlan(GpyFFTPlan):
         if self.is_inplace:
             msg='R2R transforms cannot be compute inplace on this backend.'
             raise NotImplementedError(msg)
-    
+
     @classmethod
     def prepare_r2r(cls, in_array, axes):
         """Return all the required variables to build fake arrays for a all R2R transforms."""
@@ -925,7 +946,7 @@ class GpyR2RPlan(GpyFFTPlan):
         dtype  = in_array.dtype
         ctype  = float_to_complex_dtype(dtype)
         return (dtype, ctype, shape, axis, N)
-    
+
     @classmethod
     def check_r2r_axes(cls, in_array, axes):
         """Check that only the last axis is transformed."""
@@ -938,6 +959,7 @@ class GpyR2RPlan(GpyFFTPlan):
 class GpyDCTIPlan(GpyR2RPlan):
 
     def __init__(self, in_array, axes, **kwds):
+        #print('\ncreate DCT-I plan {}'.format(id(self)))
         (dtype, ctype, shape, axis, N) = self.prepare_r2r(in_array, axes)
         rshape = mk_shape(shape, axis, 2*N-2)
         cshape = mk_shape(shape, axis, N)
@@ -946,9 +968,12 @@ class GpyDCTIPlan(GpyR2RPlan):
         super(GpyDCTIPlan, self).__init__(in_array=in_array, axes=axes,
                 fake_input=fake_input, fake_output=fake_output, **kwds)
 
+    #def __del__(self):
+        #print('\ndelete DCT-I plan {}'.format(id(self)))
+
     def pre_offset_callback(self, N, fp, offset_input_pointer, **kwds):
         pre = \
-        '''{fp} pre_callback(const __global void* input, const uint offset, 
+        '''{fp} pre_callback(const __global void* input, const uint offset,
                                    __global void* userdata) {{
 {offset_input_pointer}
             {fp} ret;
@@ -972,6 +997,8 @@ class GpyDCTIPlan(GpyR2RPlan):
         return post, None
 
 
+
+
 class GpyDCTIIPlan(GpyR2RPlan):
     def __init__(self, in_array, axes, **kwds):
         (dtype, ctype, shape, axis, N) = self.prepare_r2r(in_array, axes)
@@ -986,7 +1013,7 @@ class GpyDCTIIPlan(GpyR2RPlan):
         n = (N-1)//2 + 1
         pre = \
         '''
-        {fp} pre_callback(const __global void* input, uint offset, 
+        {fp} pre_callback(const __global void* input, uint offset,
                                 __global void* userdata) {{
 {offset_input_pointer}
             {fp} ret;
@@ -1003,14 +1030,14 @@ class GpyDCTIIPlan(GpyR2RPlan):
     def post_offset_callback(self, N, S, fp, offset_output_pointer,
             typegen, hardcode_twiddles, **kwds):
         n = (N-1)//2 + 1
-        (twiddle, twiddles) = self.generate_twiddles('dct2_twiddles', 
+        (twiddle, twiddles) = self.generate_twiddles('dct2_twiddles',
                                             base=-np.pi/(2*N), count=N//2+1,
                                             fp=fp, typegen=typegen,
                                             hardcode_twiddles=hardcode_twiddles)
         post = \
         '''
         {twiddles}
-        void post_callback(__global void* output, const uint offset, 
+        void post_callback(__global void* output, const uint offset,
                            __global void* userdata, const {fp}2 R) {{
             {offset_output_pointer}
             {twiddle}
@@ -1020,7 +1047,7 @@ class GpyDCTIIPlan(GpyR2RPlan):
             if (kx > 0) {{
                 out[{N}-kx] = -2*(R.x*T.y + R.y*T.x)/{S};
             }}
-        }}'''.format(N=N, S=S, n=n, fp=fp, 
+        }}'''.format(N=N, S=S, n=n, fp=fp,
                 twiddle=twiddle, twiddles=twiddles,
                 offset_output_pointer=offset_output_pointer)
         return post, None
@@ -1035,14 +1062,14 @@ class GpyDCTIIIPlan(GpyR2RPlan):
         fake_output = self.fake_array(shape=rshape, dtype=dtype)
         super(GpyDCTIIIPlan, self).__init__(in_array=in_array, axes=axes,
                 fake_input=fake_input, fake_output=fake_output, **kwds)
-    
+
     def pre_offset_callback(self, **kwds):
         msg='pre_offset_callback_C2R should be used instead.'
         raise NotImplementedError(msg)
 
-    def pre_offset_callback_C2R(self, N, S, fp, typegen, 
+    def pre_offset_callback_C2R(self, N, S, fp, typegen,
             offset_input_pointer, hardcode_twiddles, **kwds):
-        (twiddle, twiddles) = self.generate_twiddles('dct3_twiddles', 
+        (twiddle, twiddles) = self.generate_twiddles('dct3_twiddles',
                                             base=+np.pi/(2*N), count=N//2+1,
                                             fp=fp, typegen=typegen,
                                             hardcode_twiddles=hardcode_twiddles)
@@ -1052,7 +1079,7 @@ class GpyDCTIIIPlan(GpyR2RPlan):
         pre = \
         '''
         {twiddles}
-        {fp}2 pre_callback(const __global void* input, const uint offset, 
+        {fp}2 pre_callback(const __global void* input, const uint offset,
                                  __global void* userdata) {{
             {offset_input_pointer}
             {twiddle}
@@ -1072,18 +1099,18 @@ class GpyDCTIIIPlan(GpyR2RPlan):
                 C.y = R.x*T.y + R.y*T.x;
             }}
             return C;
-        }}'''.format(N=N, fp=fp, 
+        }}'''.format(N=N, fp=fp,
                 offset_input_pointer=offset_input_pointer,
                 twiddle=twiddle, twiddles=twiddles,
                 force_real_input=force_real_input)
         return pre, None
 
-    def post_offset_callback(self, N, S, fp, 
+    def post_offset_callback(self, N, S, fp,
             offset_output_pointer, **kwds):
         n = (N-1)//2 + 1
         post = \
         '''
-        void post_callback(__global void* output, const uint offset, 
+        void post_callback(__global void* output, const uint offset,
                            __global void* userdata, const {fp} R) {{
             {offset_output_pointer}
             if (kx < {n}) {{
@@ -1092,7 +1119,7 @@ class GpyDCTIIIPlan(GpyR2RPlan):
             else {{
                 out[2*({N}-kx)-1] = R/{S};
             }}
-        }}'''.format(N=N, S=S, n=n, fp=fp, 
+        }}'''.format(N=N, S=S, n=n, fp=fp,
                         offset_output_pointer=offset_output_pointer)
         return post, None
 
@@ -1110,7 +1137,7 @@ class GpyDSTIPlan(GpyR2RPlan):
 
     def pre_offset_callback(self, N, fp, offset_input_pointer, **kwds):
         pre = \
-        '''{fp} pre_callback(const __global void* input, const uint offset, 
+        '''{fp} pre_callback(const __global void* input, const uint offset,
                                    __global void* userdata) {{
 {offset_input_pointer}
             {fp} ret;
@@ -1153,7 +1180,7 @@ class GpyDSTIIPlan(GpyR2RPlan):
         n = (N-1)//2 + 1
         pre = \
         '''
-        {fp} pre_callback(const __global void* input, uint offset, 
+        {fp} pre_callback(const __global void* input, uint offset,
                                 __global void* userdata) {{
 {offset_input_pointer}
             {fp} ret;
@@ -1170,14 +1197,14 @@ class GpyDSTIIPlan(GpyR2RPlan):
     def post_offset_callback(self, N, S, fp, offset_output_pointer,
             typegen, hardcode_twiddles, **kwds):
         n = (N-1)//2 + 1
-        (twiddle, twiddles) = self.generate_twiddles('dst2_twiddles', 
+        (twiddle, twiddles) = self.generate_twiddles('dst2_twiddles',
                                             base=-np.pi/(2*N), count=N//2+1,
                                             fp=fp, typegen=typegen,
                                             hardcode_twiddles=hardcode_twiddles)
         post = \
         '''
         {twiddles}
-        void post_callback(__global void* output, const uint offset, 
+        void post_callback(__global void* output, const uint offset,
                            __global void* userdata, const {fp}2 R) {{
             {offset_output_pointer}
             {twiddle}
@@ -1187,7 +1214,7 @@ class GpyDSTIIPlan(GpyR2RPlan):
             if (kx < {n}) {{
                 out[{N}-kx-1] = +2*(R.x*T.x - R.y*T.y)/{S};
             }}
-        }}'''.format(N=N, S=S, n=n, fp=fp, 
+        }}'''.format(N=N, S=S, n=n, fp=fp,
                 twiddle=twiddle, twiddles=twiddles,
                 offset_output_pointer=offset_output_pointer)
         return post, None
@@ -1207,9 +1234,9 @@ class GpyDSTIIIPlan(GpyR2RPlan):
         msg='pre_offset_callback_C2R should be used instead.'
         raise NotImplementedError(msg)
 
-    def pre_offset_callback_C2R(self, N, S, fp, typegen, 
+    def pre_offset_callback_C2R(self, N, S, fp, typegen,
             offset_input_pointer, hardcode_twiddles, **kwds):
-        (twiddle, twiddles) = self.generate_twiddles('dst3_twiddles', 
+        (twiddle, twiddles) = self.generate_twiddles('dst3_twiddles',
                                             base=+np.pi/(2*N), count=N//2+1,
                                             fp=fp, typegen=typegen,
                                             hardcode_twiddles=hardcode_twiddles)
@@ -1219,7 +1246,7 @@ class GpyDSTIIIPlan(GpyR2RPlan):
         pre = \
         '''
         {twiddles}
-        {fp}2 pre_callback(const __global void* input, const uint offset, 
+        {fp}2 pre_callback(const __global void* input, const uint offset,
                                  __global void* userdata) {{
             {offset_input_pointer}
             {twiddle}
@@ -1239,18 +1266,18 @@ class GpyDSTIIIPlan(GpyR2RPlan):
                 C.y = R.x*T.y + R.y*T.x;
             }}
             return C;
-        }}'''.format(N=N, fp=fp, 
+        }}'''.format(N=N, fp=fp,
                 offset_input_pointer=offset_input_pointer,
                 twiddle=twiddle, twiddles=twiddles,
                 force_real_input=force_real_input)
         return pre, None
 
-    def post_offset_callback(self, N, S, fp, 
+    def post_offset_callback(self, N, S, fp,
             offset_output_pointer, **kwds):
         n = (N-1)//2 + 1
         post = \
         '''
-        void post_callback(__global void* output, const uint offset, 
+        void post_callback(__global void* output, const uint offset,
                            __global void* userdata, const {fp} R) {{
             {offset_output_pointer}
             if (kx < {n}) {{
@@ -1259,7 +1286,7 @@ class GpyDSTIIIPlan(GpyR2RPlan):
             else {{
                 out[2*({N}-kx)-1] = -R/{S};
             }}
-        }}'''.format(N=N, S=S, n=n, fp=fp, 
+        }}'''.format(N=N, S=S, n=n, fp=fp,
                         offset_output_pointer=offset_output_pointer)
         return post, None
 
@@ -1269,7 +1296,7 @@ class GpyFFT(OpenClFFTI):
     Interface to compute local to process FFT-like transforms using the clFFT backend
     through the gpyfft python interface.
 
-    clFFT backend has many advantages: 
+    clFFT backend has many advantages:
         - single and double precision supported
         - no intermediate temporary buffers created at each call.
         - all required temporary buffers can be supplied or are auto-allocated only once.
@@ -1290,31 +1317,31 @@ class GpyFFT(OpenClFFTI):
 
     Notes
     -----
-    Output array is used during transform and if out.data is not aligned 
-    on device.MEM_BASE_ADDR_ALIGN the begining of the buffer may be overwritten by 
+    Output array is used during transform and if out.data is not aligned
+    on device.MEM_BASE_ADDR_ALIGN the begining of the buffer may be overwritten by
     intermediate transform results.
 
-    out.data = out.base_data + out.offset 
+    out.data = out.base_data + out.offset
     if (offset%alignment > 0)
-        out.base_data[0:out.size] 
-        may be trashed during computation and the result of the transform will go to 
+        out.base_data[0:out.size]
+        may be trashed during computation and the result of the transform will go to
         out.base_data[out.offset:out.offset+out.size]
 
-    Thus for every transforms out.base_data[0:min(out.offset,out.size)] may be overwritten with 
+    Thus for every transforms out.base_data[0:min(out.offset,out.size)] may be overwritten with
     trash data. The default behaviour is to emmit a warning when output data is not aligned on
     device memory boundary.
     """
 
-    def __init__(self, cl_env, 
+    def __init__(self, cl_env,
                        backend=None, allocator=None,
                        warn_on_allocation=True,
-                       warn_on_unaligned_output_offset=True, 
+                       warn_on_unaligned_output_offset=True,
                        error_on_allocation=False,
                        **kwds):
 
-        super(GpyFFT, self).__init__(cl_env=cl_env, 
-                backend=backend, allocator=allocator, 
-                warn_on_allocation=warn_on_allocation, 
+        super(GpyFFT, self).__init__(cl_env=cl_env,
+                backend=backend, allocator=allocator,
+                warn_on_allocation=warn_on_allocation,
                 error_on_allocation=error_on_allocation, **kwds)
 
         self.supported_ftypes = (np.float32, np.float64)
@@ -1353,7 +1380,7 @@ class GpyFFT(OpenClFFTI):
         plan_kwds['warn_on_allocation']  = kwds.pop('warn_on_allocation',  self.warn_on_allocation)
         plan_kwds['error_on_allocation'] = kwds.pop('error_on_allocation', self.error_on_allocation)
         plan_kwds['warn_on_unaligned_output_offset'] = \
-                kwds.pop('warn_on_unaligned_output_offset', 
+                kwds.pop('warn_on_unaligned_output_offset',
                         self.warn_on_unaligned_output_offset)
 
         if kwds:
@@ -1365,35 +1392,35 @@ class GpyFFT(OpenClFFTI):
 
     def fft(self, a, out=None, axis=-1, **kwds):
         (shape, dtype) = super(GpyFFT, self).fft(a=a, out=out, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         kwds = self.bake_kwds(a=a, out=out, axis=axis, **kwds)
         plan = GpyFFTPlan(**kwds)
         return plan
 
     def ifft(self, a, out=None, axis=-1, **kwds):
         (shape, dtype, s) = super(GpyFFT, self).ifft(a=a, out=out, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         kwds = self.bake_kwds(a=a, out=out, axis=axis, scaling='DEFAULT', **kwds)
         plan = GpyFFTPlan(direction_forward=False, **kwds)
         return plan
 
     def rfft(self, a, out=None, axis=-1, **kwds):
         (shape, dtype) = super(GpyFFT, self).rfft(a=a, out=out, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         kwds = self.bake_kwds(a=a, out=out, axis=axis, **kwds)
         plan = GpyFFTPlan(**kwds)
         return plan
 
     def irfft(self, a, out=None, n=None, axis=-1, **kwds):
         (shape, dtype, s) = super(GpyFFT, self).irfft(a=a, out=out, axis=axis, n=n, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         kwds = self.bake_kwds(a=a, out=out, axis=axis, scale_by_size=s, **kwds)
         plan = GpyFFTPlan(**kwds)
         return plan
 
     def dct(self, a, out=None, type=2, axis=-1, **kwds):
         (shape, dtype) = super(GpyFFT, self).dct(a=a, out=out, type=type, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         kwds = self.bake_kwds(a=a, out=out, axis=axis, **kwds)
         if type==1:
             plan = GpyDCTIPlan(**kwds)
@@ -1408,7 +1435,7 @@ class GpyFFT(OpenClFFTI):
 
     def dst(self, a, out=None, type=2, axis=-1, **kwds):
         (shape, dtype) = super(GpyFFT, self).dst(a=a, out=out, type=type, axis=axis, **kwds)
-        out = self.allocate_output(out, shape, dtype) 
+        out = self.allocate_output(out, shape, dtype)
         kwds = self.bake_kwds(a=a, out=out, axis=axis, **kwds)
         if type==1:
             plan = GpyDSTIPlan(**kwds)
@@ -1420,14 +1447,14 @@ class GpyFFT(OpenClFFTI):
             msg='Unimplemented sine transform type {}'.format(itype)
             raise RuntimeError(msg)
         return plan
-    
+
     def idct(self, a, out=None, type=2, axis=-1, **kwds):
         (shape, dtype, itype, s) = super(GpyFFT, self).idct(a=a, out=out, type=type,
                 axis=axis, **kwds)
         return self.dct(a=a, out=out, type=itype, axis=axis, scale_by_size=s, **kwds)
 
     def idst(self, a, out=None, type=2, axis=-1, **kwds):
-        (shape, dtype, itype, s) = super(GpyFFT, self).idst(a=a, out=out, type=type, 
+        (shape, dtype, itype, s) = super(GpyFFT, self).idst(a=a, out=out, type=type,
                 axis=axis, **kwds)
         return self.dst(a=a, out=out, type=itype, axis=axis, scale_by_size=s, **kwds)
 
diff --git a/hysop/numerics/fft/host_fft.py b/hysop/numerics/fft/host_fft.py
index ceb5c70e65751bbfde54060854c52b9f11302580..2e19a8e31c93585d50a147bbff24243eb84d31a8 100644
--- a/hysop/numerics/fft/host_fft.py
+++ b/hysop/numerics/fft/host_fft.py
@@ -70,9 +70,9 @@ class HostFFTI(FFTI):
         super(HostFFTI, self).__init__(backend=backend, **kwds)
 
     @classmethod
-    def default_interface(cls, threads=None, 
+    def default_interface(cls, threads=None,
                        backend=None, allocator=None,
-                       planner_effort=None, 
+                       planner_effort=None,
                        planning_timelimit=None,
                        destroy_input=False,
                        warn_on_allocation=True,
@@ -96,8 +96,8 @@ class HostFFTI(FFTI):
             threads            = first_not_None(threads,            __FFTW_NUM_THREADS__)
             planner_effort     = first_not_None(planner_effort,     __FFTW_PLANNER_EFFORT__)
             planning_timelimit = first_not_None(planning_timelimit, __FFTW_PLANNER_TIMELIMIT__)
-            return FftwFFT(threads=threads,  
-                           planner_effort=planner_effort, 
+            return FftwFFT(threads=threads,
+                           planner_effort=planner_effort,
                            planning_timelimit=planning_timelimit,
                            backend=backend, allocator=allocator,
                            destroy_input=destroy_input,
@@ -105,19 +105,19 @@ class HostFFTI(FFTI):
                            warn_on_misalignment=warn_on_misalignment,
                            error_on_allocation=error_on_allocation,
                            **kwds)
-    
+
     def new_queue(self, tg, name):
         return HostFFTQueue(name=name)
-                       
+
     def plan_copy(self, tg, src, dst):
         src = self.ensure_callable(src)
         dst = self.ensure_callable(dst)
-        
+
         @static_vars(numba_copy=None)
         def exec_copy(src=src, dst=dst):
             src, dst = src(), dst()
             if can_exec_hptt(src, dst):
-                hptt.tensorTransposeAndUpdate(perm=range(src.ndim),
+                hptt.tensorTransposeAndUpdate(perm=tuple(range(src.ndim)),
                         alpha=1.0, A=src, beta=0.0, B=dst)
             elif HAS_NUMBA:
                 if (exec_copy.numba_copy is None):
@@ -126,11 +126,11 @@ class HostFFTI(FFTI):
             else:
                 dst[...] = src
         return exec_copy
-    
+
     def plan_accumulate(self, tg, src, dst):
         src = self.ensure_callable(src)
         dst = self.ensure_callable(dst)
-        
+
         @static_vars(numba_accumulate=None)
         def exec_accumulate(src=src, dst=dst):
             src, dst = src(), dst()
@@ -162,7 +162,7 @@ class HostFFTI(FFTI):
             else:
                 dst[...] = np.transpose(a=src, axes=axes)
         return exec_transpose
-    
+
     def plan_fill_zeros(self, tg, a, slices):
         assert slices
         a = self.ensure_callable(a)
@@ -171,21 +171,21 @@ class HostFFTI(FFTI):
             for slc in slices:
                 buf[slc] = 0
         return exec_fill_zeros
-    
-    def plan_compute_energy(self, tg, fshape, src, dst, transforms, 
+
+    def plan_compute_energy(self, tg, fshape, src, dst, transforms,
             method='round', target=None, **kwds):
         """
-        Plan to compute energy from src array to dst array using given transforms, 
+        Plan to compute energy from src array to dst array using given transforms,
         method (round or weighted) and numba target.
         """
         (N, NS2, C2C, R2C, S) = super(HostFFTI, self).plan_compute_energy(tg, fshape, src, dst, transforms, **kwds)
         dim = src.ndim
-        
+
         # We do not forget the hermitian symmetry:
-        #  normally: E = 1/2 |Fi|**2 
+        #  normally: E = 1/2 |Fi|**2
         #  but for a R2C transform:  E = 1/2 |Fi|**2 + 1/2 |Fi*|**2 = 1 |Fi|**2
         C = 2**(R2C-1)
-        
+
         target = first_not_None(target, __DEFAULT_NUMBA_TARGET__)
         args = (src,)+N+NS2+C2C+(C, S, dst)
         signature, layout  = make_numba_signature(*args)
diff --git a/hysop/numerics/interpolation/polynomial.py b/hysop/numerics/interpolation/polynomial.py
index 5f088ee4b8036d9d9287d039f626175c019c5184..3f2d446ff7c0bee12cfa449fe6fba9e2e48038b2 100644
--- a/hysop/numerics/interpolation/polynomial.py
+++ b/hysop/numerics/interpolation/polynomial.py
@@ -44,10 +44,10 @@ PolynomialInterpolation = EnumFactory.create('PolynomialInterpolation',
          'QUINTIC_FDC2', # requires 1 ghosts
          'QUINTIC_FDC4', # requires 2 ghosts
          'QUINTIC_FDC6', # requires 3 ghosts
-         'SEPTIC_FDC2',  # requires 2 ghosts  
+         'SEPTIC_FDC2',  # requires 2 ghosts
          'SEPTIC_FDC4',  # requires 3 ghosts
          'SEPTIC_FDC6',  # requires 4 ghosts
-         'NONIC_FDC2',   # requires 2 ghosts  
+         'NONIC_FDC2',   # requires 2 ghosts
          'NONIC_FDC4',   # requires 3 ghosts
          'NONIC_FDC6'    # requires 4 ghosts
         ])
@@ -74,7 +74,7 @@ class PolynomialInterpolator(object):
         else:
             msg='Unknown PolynomialInterpolation value {}.'.format(pi)
             raise NotImplementedError(msg)
-        for i in xrange(1,5):
+        for i in range(1, 5):
             if spi.endswith(str(2*i)):
                 fd=2*i
                 break
@@ -84,7 +84,7 @@ class PolynomialInterpolator(object):
         obj = cls(deg=deg, fd=fd, **kwds)
         obj.spi = spi
         return obj
-    
+
     @classmethod
     def cache_file(cls):
         _cache_dir  = IO.cache_path() + '/numerics'
@@ -117,9 +117,9 @@ class PolynomialInterpolator(object):
             Number of dimensions to interpolate.
         deg: tuple of ints
             Polynomial degree (1=linear, 3=cubic, 5=quintic, 7=septic, ...)
-            Degree should be odd: deg=2k+1 
+            Degree should be odd: deg=2k+1
         fd: tuple of ints
-            Order of centered finite differences stencils used to compute derivatives for 
+            Order of centered finite differences stencils used to compute derivatives for
             each direction.
         p: tuple of ints
             Corresponds to deg+1.
@@ -127,7 +127,7 @@ class PolynomialInterpolator(object):
         P: int
             The total number of polynomial coefficients P=p0*p1*...*p(dim-1)
         k: tuple of ints
-            Max derivative order required to compute the polynomial interpolator coefficients 
+            Max derivative order required to compute the polynomial interpolator coefficients
             in each direction.
             Also the regularity of the resulting interpolant. Corresponds to (deg-1)/2.
         ghosts: tuple of ints
@@ -141,7 +141,7 @@ class PolynomialInterpolator(object):
             nonic:     9     4      2     |  2     3     4
         n: tuple of ints
             Corresponds to 2*(ghosts+1), the number of required nodes to generate the
-            polynomial coefficients (in each direction). 
+            polynomial coefficients (in each direction).
             In total we have N=n0*n1*...*n(dim-1) input nodes.
 
             G1    G1
@@ -157,7 +157,7 @@ class PolynomialInterpolator(object):
         M: np.ndarray
             Grid values to polynomial coefficient matrix:
                 M.dot(F.ravel()) will give C.ravel(), coefficients of P(x0,x1,...)
-                     N 
+                     N
                 <--------->
                 X X X X X X ^                         |f0| ^                   |c0| ^
                 X X X X X X |                         |f1| |                   |c1| |
@@ -174,33 +174,33 @@ class PolynomialInterpolator(object):
         :class:`PolynomialSubgridInterpolator`: Precompute weights for fixed subgrid
         interpolation.
         """
-        
+
         assert dim>0, 'dim<=0'
 
         deg = to_tuple(deg)
         if len(deg)==1:
             deg*=dim
         check_instance(deg, tuple, values=int, size=dim)
-        
+
         fd = to_tuple(fd)
         if len(fd)==1:
             fd*=dim
         check_instance(fd, tuple, values=int, size=dim)
 
         p = tuple(degi+1 for degi in deg)
-        k = tuple((degi-1)/2 for degi in deg)
+        k = tuple((degi-1)//2 for degi in deg)
 
         ghosts = ()
         n = ()
         for (fdi,ki) in zip(fd,k):
             if (ki>0):
-                gi = (fdi/2) - 1 + (ki+1)/2
+                gi = (fdi//2) - 1 + (ki+1)//2
             else:
                 gi = 0
             ni = 2*(gi+1)
             ghosts += (gi,)
             n += (ni,)
-        
+
         check_instance(deg,   tuple, values=int, size=dim)
         check_instance(fd,    tuple, values=int, size=dim)
         check_instance(p,     tuple, values=int, size=dim)
@@ -211,7 +211,7 @@ class PolynomialInterpolator(object):
         assert all(degi%2==1 for degi in deg), 'deg % 2 != 1'
         assert all(pi%2==0 for pi in p), 'p % 2 != 0'
         assert all(ni%2==0 for ni in n), 'n % 2 != 0'
-        
+
         P = np.prod(p, dtype=np.int32)
         N = np.prod(n, dtype=np.int32)
 
@@ -239,22 +239,22 @@ class PolynomialInterpolator(object):
         k = self.k
         n = self.n
         ghosts = self.ghosts
-        
+
         if verbose:
-            print '\nCollecting 1D stencils:'
-        
+            print('\nCollecting 1D stencils:')
+
         SG = CenteredStencilGenerator()
         SG.configure(dim=1, dtype=np.float64)
         S = {}
-        for direction in xrange(dim):
+        for direction in range(dim):
             if verbose:
-                print ' Direction {}'.format(direction)
+                print(' Direction {}'.format(direction))
             Sd  = S.setdefault(direction, [])
             nd  = n[direction]
             kd  = k[direction]
             fdd = fd[direction]
             gd  = ghosts[direction]
-            for i in xrange(kd+1):
+            for i in range(kd+1):
                 msg='Failed to compute stencil derivative={}, order={}, origin={}'
                 msg=msg.format(i, fdd, gd)
                 try:
@@ -264,7 +264,7 @@ class PolynomialInterpolator(object):
                         Si = SG.generate_exact_stencil(order=fdd, derivative=i)
                     Si.replace_symbols({Si.dx:1})
                 except:
-                    print msg
+                    print(msg)
                     raise
                 msg+=' got {}.'.format(Si.coeffs)
                 assert (not Si.is_symbolic()), msg
@@ -273,11 +273,11 @@ class PolynomialInterpolator(object):
                 Si = Si.coeffs
                 Sd.append(Si)
                 if verbose:
-                    print '  {}-th derivative: {}'.format(i,Si)
+                    print('  {}-th derivative: {}'.format(i,Si))
         return S
 
     def _build_stencil(self, dvec):
-        dvec = np.asarray(dvec)    
+        dvec = np.asarray(dvec)
         k = self.k
         S = self.S
         assert dvec.size == self.dim, 'dvec.size != dim'
@@ -306,7 +306,7 @@ class PolynomialInterpolator(object):
         self.xvals,  self.xvars  = xvals, xvars
         self.fvals,  self.fvars  = fvals, fvars
         self.pvals,  self.pvars  = pvals, pvars
-        
+
         try:
             data = load_data_from_cache(self.cache_file(), self.key)
             if (data is not None):
@@ -319,59 +319,59 @@ class PolynomialInterpolator(object):
         except Exception as e:
             msg='Failed to load data from cache because:\n{}'.format(e)
             warnings.warn(msg, HysopCacheWarning)
-        
+
         P0 = 0
-        for idx in it.product(*tuple(range(0,pi) for pi in p)):
+        for idx in it.product(*tuple(range(0, pi) for pi in p)):
             P0 += pvals[idx] * np.prod(np.power(xvals, idx))
         self.P0 = P0
-        
+
         S = self._collect_stencils()
         self.S = S
-        
+
         if verbose:
-            print '\nGenerating variables:'
-            print '  *space vars: '
-            print xvals
-            print '  *grid values:'
-            print fvals
-            print '  *polynomial coefficients:'
-            print pvals
-            print '  *polynomial patch:'
-            print P0
-            print '\nBuilding system...'
-        
+            print('\nGenerating variables:')
+            print('  *space vars: ')
+            print(xvals)
+            print('  *grid values:')
+            print(fvals)
+            print('  *polynomial coefficients:')
+            print(pvals)
+            print('  *polynomial patch:')
+            print(P0)
+            print('\nBuilding system...')
+
         eqs = []
-        for dvec in it.product(*tuple(range(0,ki+1) for ki in k)):
+        for dvec in it.product(*tuple(range(0, ki+1) for ki in k)):
             if verbose:
-                print '  => derivative {}'.format(dvec)
-            
+                print('  => derivative {}'.format(dvec))
+
             dP0 = P0
             for i,deg in enumerate(dvec):
                 dP0 = sm.diff(dP0, xvals[i], deg)
-            
+
             stencil = self._build_stencil(dvec)
             if verbose:
-                print '     stencil:'
-                print stencil
+                print('     stencil:')
+                print(stencil)
 
-            for idx in it.product(*tuple(range(gi,gi+2) for gi in ghosts)):
+            for idx in it.product(*tuple(range(gi, gi+2) for gi in ghosts)):
                 if verbose:
-                    print '    -> point {}'.format(idx)
-                
+                    print('    -> point {}'.format(idx))
+
                 pos = np.asarray(idx)-ghosts
                 pos = dict(zip(xvals, pos))
                 eq = dP0.xreplace(pos)
-                
+
                 for offset in it.product(*tuple(range(-gi, gi+1) for gi in ghosts)):
                     fidx = tuple(np.add(idx, offset))
                     sidx = tuple(np.add(offset, ghosts))
                     eq -= fvals[fidx]*stencil[sidx]
-                
+
                 eqs.append(eq)
                 if verbose:
-                    print '        {}'.format(eq)
-       
-        # Build system such that A*c = B*f where c are the polynomial coefficients and 
+                    print('        {}'.format(eq))
+
+        # Build system such that A*c = B*f where c are the polynomial coefficients and
         # f the node values
         dtype = (np.float64 if approximative else object)
         A = np.empty((self.P,self.P), dtype=dtype)
@@ -382,11 +382,11 @@ class PolynomialInterpolator(object):
                 A[i,j] = +eq.coeff(ci)
             for (j,fi) in enumerate(fvars):
                 B[i,j] = -eq.coeff(fi)
-        
+
         # C = Ainv*B*f = M*f
         if verbose:
-            print '\nSolving system...'
-        
+            print('\nSolving system...')
+
         if approximative:
             Ainv = np.linalg.inv(A)
         elif has_flint:
@@ -400,11 +400,11 @@ class PolynomialInterpolator(object):
             Ainv = np.asarray(sm.Matrix(A).inv())
 
         if verbose:
-            print '\nBuilding matrix...'
+            print('\nBuilding matrix...')
         M = Ainv.dot(B)
         self.M = M
         update_cache(self.cache_file(), self.key, (P0,S,M))
-        
+
 
     def interpolate(self, fvals):
         """Return the polynomial interpolating input node values"""
@@ -415,9 +415,9 @@ class PolynomialInterpolator(object):
         return sm.utilities.lambdify(self.xvars, P0)
 
     def generate_subgrid_interpolator(self, grid_ratio, dtype=None):
-        return PolynomialSubgridInterpolator(interpolator=self, 
+        return PolynomialSubgridInterpolator(interpolator=self,
                 grid_ratio=grid_ratio, dtype=dtype)
-    
+
     def __hash__(self):
         objs = (self.dim, self.deg, self.fd, self.approximative)
         return hash(objs)
@@ -437,7 +437,7 @@ class PolynomialSubgridInterpolator(object):
             Tuple of integers representing the ratio between the coarse and the fine grid.
         dtype: np.dtype
             Force to cast dtype for all matrices (interpolator.M may contain rationals).
-        
+
         Attributes
         ----------
         dim: int
@@ -446,15 +446,15 @@ class PolynomialSubgridInterpolator(object):
             Number of required ghosts.
         n: tuple of int
             Corresponds to 2*(ghosts+1), the number of required nodes to generate the
-            polynomial coefficients (same as interpolator.n). 
+            polynomial coefficients (same as interpolator.n).
         N: int
             Total number of input nodes N including ghosts (same as interpolator.N).
             N = n0*n1*...*n[dim-1]
         s: tuple of int
             Corresponds to grid_ratio + 1, number of points of the subgrid in each directions.
-            Example for a grid ratio=(3,3), we have s=(4,4): 
+            Example for a grid ratio=(3,3), we have s=(4,4):
                O=coarse grid nodes, X=fine grid nodes
-            
+
             Coarse grid:               Fine grid:
 
            ^  O-----O                  ^  O X X O
@@ -467,11 +467,11 @@ class PolynomialSubgridInterpolator(object):
             Represents the number of fine grid points contained in a coarse grid cell.
             S = s0*s1*...*s[dim-1]
         gr: tuple of int
-            Corresponds to grid_ratio, number of points of the subgrid in each directions, 
+            Corresponds to grid_ratio, number of points of the subgrid in each directions,
             minus one.
-            Example for a grid ratio=(3,3), we have gr=(3,3) and s=(4,4): 
+            Example for a grid ratio=(3,3), we have gr=(3,3) and s=(4,4):
                O=coarse grid nodes, X=fine grid nodes, -=excluded find grid nodes
-            
+
             Coarse grid:               Fine grid:
 
            ^  O-----O                  ^  O X X O  ^
@@ -489,8 +489,8 @@ class PolynomialSubgridInterpolator(object):
                 Let F be the vector of N known coarse grid node values (including required
                 ghosts).
                 Let G be the vector of S unknown fine grid node values.
-                
-                         N                 
+
+                         N
                     <--------->
                     X X X X X X ^                                         |g0| ^
                     X X X X X X |                |f0| ^                   |g1| |
@@ -507,12 +507,12 @@ class PolynomialSubgridInterpolator(object):
 
         Wr: np.ndarray
             Reduced W that exludes rightmost output points of ndimensional output vector.
-                
+
             Pre computed weights to interpolate directly from coarse to inner fine grid.
             Let F be the vector of N known coarse grid node values (including required ghosts).
             Let G be the vector of GR unknown fine inner grid node values (see gr attribute).
-            
-                     N                 
+
+                     N
                 <--------->
                 X X X X X X ^                                         |g0| ^
                 X X X X X X |                |f0| ^                   |g1| |
@@ -539,13 +539,13 @@ class PolynomialSubgridInterpolator(object):
         gr = grid_ratio
         GR = np.prod(gr, dtype=np.int32)
         del grid_ratio
-        
+
         s = tuple(gri+1 for gri in gr)
         S = np.prod(s, dtype=np.int32)
 
         dim = interpolator.dim
         key = ('PolynomialSubgridInterpolator', interpolator.key, gr, str(dtype))
-        
+
         self.p = p
         self.P = p
         self.s = s
@@ -558,7 +558,7 @@ class PolynomialSubgridInterpolator(object):
         self.dim = dim
         self.interpolator = interpolator
         self.key = key
-        
+
         cache_file = interpolator.cache_file()
         try:
             data = load_data_from_cache(cache_file, key)
@@ -571,15 +571,15 @@ class PolynomialSubgridInterpolator(object):
             msg='Failed to load data from cache because:\n{}'.format(e)
             warnings.warn(msg, HysopCacheWarning)
 
-        X = tuple(np.asarray(tuple(sm.Rational(j,gr) for j in xrange(0,si))) 
+        X = tuple(np.asarray(tuple(sm.Rational(j,gr) for j in range(0, si)))
                 for i,(gr,si) in enumerate(zip(gr, s)))
         V = np.vander(X[0], N=p[0], increasing=True)
-        for i in xrange(1, dim):
+        for i in range(1, dim):
             Vi = np.vander(X[i], N=p[i], increasing=True)
             V = np.multiply.outer(V,Vi)
-        
-        even_axes = range(0,V.ndim,2)
-        odd_axes  = range(1,V.ndim,2)
+
+        even_axes = tuple(range(0, V.ndim,2))
+        odd_axes  = tuple(range(1, V.ndim,2))
         axes = even_axes + odd_axes
 
         V = np.transpose(V, axes=axes).copy()
@@ -622,7 +622,7 @@ class PolynomialSubgridRestrictor(object):
         ----------
         subgrid_interpolator: PolynomialSubgridInterpolator
             Interpolant used to compute restrictor weights.
-        
+
         Attributes
         ----------
         g: tuple of int
@@ -648,7 +648,7 @@ class PolynomialSubgridRestrictor(object):
         g = tuple(ni*gri+1 for (ni,gri) in zip(n,gr))
         G = np.prod(g, dtype=np.int64)
         assert all(gi%2==1 for gi in g)
-        origin = tuple(gi/2 for gi in g)
+        origin = tuple(gi//2 for gi in g)
         gvals, gvars = tensor_symbol('g',g,origin)
         I = 0
         for idx in np.ndindex(*gr):
@@ -681,161 +681,137 @@ class PolynomialSubgridRestrictor(object):
 if __name__ == '__main__':
     np.set_printoptions(precision=4, linewidth=1e8, threshold=1e8,
             formatter={'float': lambda x: "{0:+0.3f}".format(x)})
-   
+
     # 2D tests
     grid_ratio = (2,2)
-    F = [[1,1], 
+    F = [[1,1],
          [1,1]]
     F = np.asarray(F)
-    
-    #print 'Solving bilinear...'
-    #PI = PolynomialInterpolator(dim=2, deg=1, fd=2, verbose=False)
-    #GI0 = PI.generate_subgrid_interpolator(grid_ratio=grid_ratio)
-    #GI1 = PI.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'Bilinear (Rational)'
-    #print GI0(F)
-    #print
-    #print 'Bilinear (np.float64)'
-    #print GI1(F)
-    #print
-
-    #F = [[0,0,0,0],
-         #[0,1,1,0], 
-         #[0,1,1,0],
-         #[0,0,0,0]]
-    #F = np.asarray(F)
-
-    #print 'Solving bicubic2...'
-    #PI0 = PolynomialInterpolator(dim=2, deg=3, fd=2, verbose=False)
-    #GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'Bicubic (FDC2)'
-    #print GI0(F)
-    #print
-    
-    #print 'Solving biquintic2...'
-    #PI1 = PolynomialInterpolator(dim=2, deg=5, fd=2, verbose=False)
-    #GI1 = PI1.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'Biquintic (FDC2)'
-    #print GI1(F)
-    #print
-    
-    #F = [[0,1,1,0], 
-         #[0,1,1,0]]
-    #F = np.asarray(F)
-    
-    #print 'Solving linear/cubic...'
-    #PI0 = PolynomialInterpolator(dim=2, deg=(1,3), fd=2, verbose=False)
-    #GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'Linear/Cubic (FDC2)'
-    #print GI0(F)
-    #print
-    
-    #F = [[0,0],
-         #[1,1], 
-         #[1,1],
-         #[0,0]]
-    #F = np.asarray(F)
-    
-    #print 'Solving cubic/linear...'
-    #PI0 = PolynomialInterpolator(dim=2, deg=(3,1), fd=2, verbose=False)
-    #GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'Cubic/Linear (FDC2)'
-    #print GI0(F)
-    #print
-    
-
-    #F = [[0,0,0,0,0,0],
-         #[0,0,0,0,0,0],
-         #[0,0,1,1,0,0], 
-         #[0,0,1,1,0,0],
-         #[0,0,0,0,0,0],
-         #[0,0,0,0,0,0]]
-    #F = np.asarray(F)
-
-    #print 'Solving bicubic4...'
-    #PI0 = PolynomialInterpolator(dim=2, deg=3, fd=4, verbose=False)
-    #GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'Bicubic (FDC4)'
-    #print GI0(F)
-    #print
-    
-    #print 'Solving biquintic4...'
-    #PI1 = PolynomialInterpolator(dim=2, deg=5, fd=4, verbose=False)
-    #GI1 = PI1.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'Biquintic (FDC4)'
-    #print GI1(F)
-    #print
-    
-    #print 'Solving biseptic2...'
-    #PI2 = PolynomialInterpolator(dim=2, deg=7, fd=2, verbose=False, 
-		#approximative=(not has_flint))
-    #GI2 = PI2.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'Biseptic (FDC2)'
-    #print GI2(F)
-    #print
-
-    #print 'Solving binonic2...'
-    #PI3 = PolynomialInterpolator(dim=2, deg=9, fd=2, verbose=False, 
-		#approximative=(not has_flint))
-    #GI3 = PI3.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'Binonic (FDC2)'
-    #print GI3(F)
-    #print
-    
-    #print 'Solving septic2/nonic2 ...'
-    #PI4 = PolynomialInterpolator(dim=2, deg=(7,9), fd=2, verbose=False, 
-		#approximative=(not has_flint))
-    #GI4 = PI4.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'septic/nonic (FDC2)'
-    #print GI4(F)
-    #print
-    
-    #print 'Solving septic2/quintic4 ...'
-    #PI5 = PolynomialInterpolator(dim=2, deg=(7,5), fd=(2,4), verbose=False, 
-		#approximative=(not has_flint))
-    #GI5 = PI5.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print 'septic/nonic (FDC2/FDC4)'
-    #print GI5(F)
-    #print
-    
-    # 3D test
-    #grid_ratio = (2,2,2)
-    #print 'Solving trilinear...'
-    #PI = PolynomialInterpolator(dim=3, deg=1, fd=2, verbose=False)
-    #GI0 = PI.generate_subgrid_interpolator(grid_ratio=grid_ratio)
-    #print
-
-    #print 'Solving tricubic2...'
-    #PI0 = PolynomialInterpolator(dim=3, deg=3, fd=2, verbose=False)
-    #GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print
-    
-    #print 'Solving triquintic2...'
-    #PI0 = PolynomialInterpolator(dim=3, deg=5, fd=2, verbose=False, 
-		#approximative=(not has_flint))
-    #GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
-    #print
-    
-    # Delaurier-Dubuc interpolating wavelets
-    from matplotlib import pyplot as plt
-    grid_ratio = (32,)
-    fig, axes = plt.subplots(ncols=2, nrows=1)
-    #for k,(deg, sdeg) in enumerate(zip((1,3,5,7),('cubic','quintic','septic','nonic'))):
-        #print k, deg, sdeg
-    for k,(deg, sdeg) in enumerate(zip((1,3,),('linear','cubic'))):
-        ax = axes[k]
-        for fd in (2,4,6):
-            PI = PolynomialInterpolator(dim=1, deg=deg, fd=fd, verbose=False)
-            SI = PI.generate_subgrid_interpolator(grid_ratio=grid_ratio)
-            SR = SI.generate_subgrid_restrictor()
-            X = np.linspace(-SR.n[0]/2, +SR.n[0]/2, SR.g[0])
-            if (k==0):
-                ax.plot(X, SR.R, label='{}'.format(sdeg))
-                break
-            else:
-                ax.plot(X, SR.R, label='{}_fdc{}'.format(sdeg, fd))
-        ax.legend()
 
-        ax.plot(X, np.zeros(SR.g[0]), '--')
-    plt.show()
-    
+    print(('Solving bilinear...'))
+    PI = PolynomialInterpolator(dim=2, deg=1, fd=2, verbose=False)
+    GI0 = PI.generate_subgrid_interpolator(grid_ratio=grid_ratio)
+    GI1 = PI.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('Bilinear (Rational)')
+    print(GI0(F))
+    print()
+    print('Bilinear (np.float64)')
+    print(GI1(F))
+    print()
+
+    F = [[0,0,0,0],
+         [0,1,1,0],
+         [0,1,1,0],
+         [0,0,0,0]]
+    F = np.asarray(F)
+
+    print('Solving bicubic2...')
+    PI0 = PolynomialInterpolator(dim=2, deg=3, fd=2, verbose=False)
+    GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('Bicubic (FDC2)')
+    print(GI0(F))
+    print()
+
+    print('Solving biquintic2...')
+    PI1 = PolynomialInterpolator(dim=2, deg=5, fd=2, verbose=False)
+    GI1 = PI1.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('Biquintic (FDC2)')
+    print(GI1(F))
+    print()
+
+    F = [[0,1,1,0],
+         [0,1,1,0]]
+    F = np.asarray(F)
+
+    print('Solving linear/cubic...')
+    PI0 = PolynomialInterpolator(dim=2, deg=(1,3), fd=2, verbose=False)
+    GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('Linear/Cubic (FDC2)')
+    print(GI0(F))
+    print()
+
+    F = [[0,0],
+         [1,1],
+         [1,1],
+         [0,0]]
+    F = np.asarray(F)
+
+    print('Solving cubic/linear...')
+    PI0 = PolynomialInterpolator(dim=2, deg=(3,1), fd=2, verbose=False)
+    GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('Cubic/Linear (FDC2)')
+    print(GI0(F))
+    print()
+
+
+    F = [[0,0,0,0,0,0],
+         [0,0,0,0,0,0],
+         [0,0,1,1,0,0],
+         [0,0,1,1,0,0],
+         [0,0,0,0,0,0],
+         [0,0,0,0,0,0]]
+    F = np.asarray(F)
+
+    print('Solving bicubic4...')
+    PI0 = PolynomialInterpolator(dim=2, deg=3, fd=4, verbose=False)
+    GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('Bicubic (FDC4)')
+    print(GI0(F))
+    print()
+
+    print('Solving biquintic4...')
+    PI1 = PolynomialInterpolator(dim=2, deg=5, fd=4, verbose=False)
+    GI1 = PI1.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('Biquintic (FDC4)')
+    print(GI1(F))
+    print()
+
+    print('Solving biseptic2...')
+    PI2 = PolynomialInterpolator(dim=2, deg=7, fd=2, verbose=False,
+        approximative=(not has_flint))
+    GI2 = PI2.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('Biseptic (FDC2)')
+    print(GI2(F))
+    print()
+
+    print('Solving binonic2...')
+    PI3 = PolynomialInterpolator(dim=2, deg=9, fd=2, verbose=False,
+        approximative=(not has_flint))
+    GI3 = PI3.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('Binonic (FDC2)')
+    print(GI3(F))
+    print()
+
+    print('Solving septic2/nonic2 ...')
+    PI4 = PolynomialInterpolator(dim=2, deg=(7,9), fd=2, verbose=False,
+        approximative=(not has_flint))
+    GI4 = PI4.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('septic/nonic (FDC2)')
+    print(GI4(F))
+    print()
+
+    print('Solving septic2/quintic4 ...')
+    PI5 = PolynomialInterpolator(dim=2, deg=(7,5), fd=(2,4), verbose=False,
+        approximative=(not has_flint))
+    GI5 = PI5.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print('septic/nonic (FDC2/FDC4)')
+    print(GI5(F))
+    print()
+
+    # 3D test
+    grid_ratio = (2,2,2)
+    print('Solving trilinear...')
+    PI = PolynomialInterpolator(dim=3, deg=1, fd=2, verbose=False)
+    GI0 = PI.generate_subgrid_interpolator(grid_ratio=grid_ratio)
+    print()
+
+    print('Solving tricubic2...')
+    PI0 = PolynomialInterpolator(dim=3, deg=3, fd=2, verbose=False)
+    GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print()
+
+    print('Solving triquintic2...')
+    PI0 = PolynomialInterpolator(dim=3, deg=5, fd=2, verbose=False,
+        approximative=(not has_flint))
+    GI0 = PI0.generate_subgrid_interpolator(grid_ratio=grid_ratio, dtype=np.float64)
+    print()
diff --git a/hysop/numerics/odesolvers/runge_kutta.py b/hysop/numerics/odesolvers/runge_kutta.py
index edf6861a566b01bc641ea270358899a79db23985..cefb09bbcab95a3043a4752045c0c8825846d62c 100644
--- a/hysop/numerics/odesolvers/runge_kutta.py
+++ b/hysop/numerics/odesolvers/runge_kutta.py
@@ -1,5 +1,5 @@
+import numpy as np
 
-from hysop.deps import np
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.numerics import is_fp
 from hysop.numerics.odesolvers.runge_kutta_coeffs import Itype,Qtype,rk_params, available_methods
@@ -20,7 +20,7 @@ class RungeKutta(TimeIntegrator):
 # Tni = Tn + alpha_i*dt
 # Xni = Xn + dt*sum(j=0,i-1,gamma_ij*Knj)
 # Kni = F(Tni,Xni)
-# X_n+1 = Xn + dt*sum(i=0,M-1,beta_i*Ki) 
+# X_n+1 = Xn + dt*sum(i=0,M-1,beta_i*Ki)
 class RhsFunction(object):
     def __call__(out, X, t, dt, **kwds):
         msg='{}.__call__() has not been overrided.'
@@ -28,16 +28,16 @@ class RhsFunction(object):
         raise NotImplementedError(msg)
 
 
-class ExplicitRungeKutta(RungeKutta): 
+class ExplicitRungeKutta(RungeKutta):
     def __init__(self,method,I_dump=I_dump,Q_dump=Q_dump):
 
         if method not in available_methods():
             msg='{} was not implemented yet! Valid values are {}.'
             msg=msg.format(name,implemented_methods.keys())
             raise ValueError(msg)
-        
+
         params = rk_params[method]
-         
+
         self.method = method
 
         self.order  = params['order']
@@ -56,21 +56,21 @@ class ExplicitRungeKutta(RungeKutta):
         """Buffers = dict of (nb_stages+1) np.ndarray of size compatible with Xin"""
         check_instance(Xin, dict, keys=str, values=np.ndarray)
         varnames = Xin.keys()
-        views   = first_not_None(views,   { k:Ellipsis for (k,v) in Xin.iteritems() })
-        Xout    = first_not_None(Xout,    { k:np.empty_like(v[views[k]]) 
-                                                for (k,v) in Xin.iteritems() })
-        buffers = first_not_None(buffers, { k:tuple(np.empty_like(v) 
-                                                for i in xrange(self.stages+1)) 
-                                                for (k,v) in Xin.iteritems()})
+        views   = first_not_None(views,   { k: Ellipsis for (k,v) in Xin.items() })
+        Xout    = first_not_None(Xout,    { k: np.empty_like(v[views[k]])
+                                                for (k,v) in Xin.items() })
+        buffers = first_not_None(buffers, { k: tuple(np.empty_like(v)
+                                                for i in range(self.stages+1))
+                                                for (k,v) in Xin.items()})
         check_instance(views,   dict, keys=str, values=(type(Ellipsis),slice,tuple))
         check_instance(Xout,    dict, keys=str, values=np.ndarray)
         check_instance(buffers, dict, keys=str, values=tuple)
         assert callable(RHS), type(RHS)
         if __debug__:
-            compute_shape = Xout.values()[0].shape
-            assert varnames == Xout.keys() 
-            assert varnames == views.keys() 
-            assert varnames == buffers.keys() 
+            compute_shape = next(iter(Xout.values())).shape
+            assert varnames == Xout.keys()
+            assert varnames == views.keys()
+            assert varnames == buffers.keys()
             for vname in varnames:
                 ivar = Xin[vname]
                 ovar = Xout[vname]
@@ -84,11 +84,11 @@ class ExplicitRungeKutta(RungeKutta):
                 for buf in buffers[vname]:
                     assert is_fp(buf.dtype), buf.dtype
                     assert np.all(buf.shape == ivar.shape)
-        
-        Xtmp = {k: v[0] for (k,v) in buffers.iteritems()}
-        K = tuple( {k: v[i] for (k,v) in buffers.iteritems()} 
-                            for i in xrange(1, self.stages+1) )
-        for i in xrange(self.stages):
+
+        Xtmp = {k: v[0] for (k,v) in buffers.items()}
+        K = tuple( {k: v[i] for (k,v) in buffers.items()}
+                            for i in range(1, self.stages+1) )
+        for i in range(self.stages):
             ai = self.alpha[i]
             ti = t + float(ai)*dt
             if (i==0):
@@ -96,7 +96,7 @@ class ExplicitRungeKutta(RungeKutta):
             else:
                 for vname in varnames:
                     Xtmp[vname][...] = 0
-                    for j in xrange(i):
+                    for j in range(i):
                         gij = float(self.gamma[i-1, j])
                         Xtmp[vname] += (float(gij)*K[j][vname])
                     Xtmp[vname] *= dt
@@ -109,7 +109,7 @@ class ExplicitRungeKutta(RungeKutta):
             Xout[vname] *= dt
             Xout[vname] += Xin[vname][views[vname]]
         return Xout
-    
+
     def __eq__(self, other):
         if not isinstance(other, ExplicitRungeKutta):
             return NotImplemented
@@ -147,19 +147,19 @@ class ExplicitRungeKutta(RungeKutta):
             return '{} + {}'.format(Tn,dt)
         else:
             return '{} + {}*{}'.format(Tn,self.dump(alpha),dt)
-    
+
     # Xni = Xn + dt*sum(j=0,i-1,gamma_ij*Knj)
     def Xni_sum(self,i):
         _sum = ''
         gamma = self.gamma[i-1,:]
-        for j in xrange(i):
+        for j in range(i):
             g = gamma[j]
             if g == 0:
                 continue
             elif g == 1:
-                _sum += 'K[{}]'.format(j) 
+                _sum += 'K[{}]'.format(j)
             else:
-                _sum += '{}*K[{}]'.format(self.dump(g),j) 
+                _sum += '{}*K[{}]'.format(self.dump(g),j)
             _sum += ' + '
         _sum = _sum[:-3]
         return _sum
@@ -170,10 +170,10 @@ class ExplicitRungeKutta(RungeKutta):
         else:
             return '{}'.format(Xn)
 
-    # X_n+1 = Xn + dt*sum(i=0,M-1,beta_i*Ki) 
+    # X_n+1 = Xn + dt*sum(i=0,M-1,beta_i*Ki)
     def step_sum(self):
         _sum = ''
-        for i in xrange(self.stages):
+        for i in range(self.stages):
             beta = self.beta[i]
             if beta == 0:
                 continue
@@ -197,46 +197,46 @@ RK4_38 = ExplicitRungeKutta('RK4_38')
 
 if __name__ == '__main__':
     R = ExplicitRungeKutta('RK4')
-    for i in xrange(4):
-        print R.Tni(i,Tn='To',dt='dt')
-    print
-    for i in xrange(4):
-        print R.Xni(i,Xn='Xo',dt='dt')
-    print 
-    print R.step(Xn='Xo',dt='dt')
-    
+    for i in range(4):
+        print(R.Tni(i,Tn='To',dt='dt'))
+    print()
+    for i in range(4):
+        print(R.Xni(i,Xn='Xo',dt='dt'))
+    print()
+    print(R.step(Xn='Xo',dt='dt'))
+
     Xin  = {'a': np.random.rand(10,10).astype(np.float32)}
     Xout = {'a': np.empty_like(Xin['a'])}
 
-    print '\nRHS=0'
+    print('\nRHS=0')
     class Rhs(RhsFunction):
         def __call__(self, out, X, t, dt, **kwds):
             out['a'][...] = 0
     rhs = Rhs()
     for Integrator in (Euler, RK2, RK3, RK4, RK4_38):
-        print ' {}'.format(Integrator.name().ljust(8)),
+        print(' {}'.format(Integrator.name().ljust(8)), end=' ')
         dt = 0.1
         Integrator(Xin, rhs, dt=dt, Xout=Xout)
         d = np.max(np.abs((Xout['a']-Xin['a'])))
-        print ' d={}'.format(d)
+        print(' d={}'.format(d))
         assert (d<1e-7), d
 
-    print '\nRHS=1'
+    print('\nRHS=1')
     def rhs(out, X, t, dt, **kwds):
         out['a'][...] = 1
     for Integrator in (Euler, RK2, RK3, RK4, RK4_38):
-        print ' {}'.format(Integrator.name().ljust(8)),
+        print(' {}'.format(Integrator.name().ljust(8)), end=' ')
         dt = 0.1
         Integrator(Xin, rhs, dt=dt, Xout=Xout)
         d = np.max(np.abs((Xout['a']-Xin['a'])-dt))
-        print ' d={}'.format(d)
+        print(' d={}'.format(d))
         assert (d<1e-7), d
-    
-    print '\nRHS=cos(t)'
+
+    print('\nRHS=cos(t)')
     def rhs(out, X, t, dt, **kwds):
         out['a'][...] = np.cos(t)
     for Integrator in (Euler, RK2, RK3, RK4, RK4_38):
-        print ' {}'.format(Integrator.name().ljust(8)),
+        print(' {}'.format(Integrator.name().ljust(8)), end=' ')
         dt = 0.1
         Integrator(Xin, rhs, dt=dt, Xout=Xout)
 
@@ -244,14 +244,14 @@ if __name__ == '__main__':
         beta  = np.asarray(tuple(float(x) for x in Integrator.beta), dtype=np.float32)
         dX = np.dot(np.cos(alpha*dt), beta)
         d = np.max(np.abs((Xout['a']-Xin['a'])-dX*dt))
-        print ' d={}'.format(d)
+        print(' d={}'.format(d))
         assert (d<1e-7), d
 
-    print '\nRHS=f(x)'
+    print('\nRHS=f(x)')
     def rhs(out, X, t, dt, **kwds):
         out['a'][...] = 0.01*X['a'] + 0.02
     for Integrator in (Euler, RK2, RK3, RK4, RK4_38):
-        print ' {}'.format(Integrator.name().ljust(8)),
+        print(' {}'.format(Integrator.name().ljust(8)), end=' ')
         dt = 0.1
         Integrator(Xin, rhs, dt=dt, Xout=Xout)
 
@@ -259,12 +259,12 @@ if __name__ == '__main__':
         beta  = np.asarray(tuple(float(x) for x in Integrator.beta), dtype=np.float32)
         K = [None,]*Integrator.stages
         beta = np.asarray(tuple(float(x) for x in Integrator.beta), dtype=np.float32)
-        for i in xrange(Integrator.stages):
+        for i in range(Integrator.stages):
             if i==0:
                 X = Xin['a'].copy()
             else:
                 X[...] = 0
-                for j in xrange(i):
+                for j in range(i):
                     gij = float(Integrator.gamma[i-1,j])
                     X[...] += float(gij)*K[j]
                 X[...] *= dt
@@ -274,14 +274,14 @@ if __name__ == '__main__':
         for i,bi in enumerate(beta):
             dX += bi*K[i]
         d = np.max(np.abs((Xout['a']-Xin['a'])-dX*dt))
-        print ' d={}'.format(d)
+        print(' d={}'.format(d))
         assert (d<1e-7), d
 
-    print '\nRHS=f(x, t)'
+    print('\nRHS=f(x, t)')
     def rhs(out, X, t, dt, **kwds):
         out['a'][...] = 0.01*X['a'] + 2*t
     for Integrator in (Euler, RK2, RK3, RK4, RK4_38):
-        print ' {}'.format(Integrator.name().ljust(8)),
+        print(' {}'.format(Integrator.name().ljust(8)), end=' ')
         dt = 0.1
         Integrator(Xin, rhs, dt=dt, Xout=Xout)
 
@@ -290,13 +290,13 @@ if __name__ == '__main__':
         K = [None,]*Integrator.stages
         alpha0 = np.asarray(tuple(float(x) for x in Integrator.alpha), dtype=np.float32)
         beta = np.asarray(tuple(float(x) for x in Integrator.beta), dtype=np.float32)
-        for i in xrange(Integrator.stages):
+        for i in range(Integrator.stages):
             ti = 0+alpha[i]*dt
             if i==0:
                 X = Xin['a'].copy()
             else:
                 X[...] = 0
-                for j in xrange(i):
+                for j in range(i):
                     gij = float(Integrator.gamma[i-1,j])
                     X[...] += float(gij)*K[j]
                 X[...] *= dt
@@ -306,5 +306,5 @@ if __name__ == '__main__':
         for i,bi in enumerate(beta):
             dX += bi*K[i]
         d = np.max(np.abs((Xout['a']-Xin['a'])-dX*dt))
-        print ' d={}'.format(d)
+        print(' d={}'.format(d))
         assert (d<1e-7), d
diff --git a/hysop/numerics/remesh/kernel_generator.py b/hysop/numerics/remesh/kernel_generator.py
index 364b83a32c17cfab49479b2f9f4d588cf08f34e0..7333e96b206d1fef23e638751244c5327a8987d5 100644
--- a/hysop/numerics/remesh/kernel_generator.py
+++ b/hysop/numerics/remesh/kernel_generator.py
@@ -1,4 +1,9 @@
-from hysop.deps             import os, hashlib, sm, np, gzip, sp
+import os, hashlib, gzip
+import numpy as np
+import scipy as sp
+import sympy as sm
+from scipy import interpolate
+
 from hysop.tools.io_utils   import IO
 from hysop.tools.numerics   import mpq,mpfr,mpqize,f2q
 from hysop.tools.cache      import load_data_from_cache, update_cache
@@ -28,7 +33,7 @@ class Kernel(object):
     @classmethod
     def hash_kernel_key(cls,n,r,deg,Ms,H,remesh):
         s = '{}_{}_{}_{}_{}_{}'.format(n,r,deg,Ms,H,int(remesh))
-        return hashlib.sha256(s).hexdigest()
+        return hashlib.sha256(s.encode('utf-8')).hexdigest()
 
     @classmethod
     def cache_file(cls):
@@ -57,8 +62,8 @@ class Kernel(object):
     def _build(self,verbose,split_polys):
 
         #polynom symbolic variables
-        x = sm.abc.x
-        t = sm.abc.t
+        x = sm.Symbol('x')
+        t = sm.Symbol('t')
 
         #usefull vars
         Ms = self.Ms
@@ -67,34 +72,34 @@ class Kernel(object):
         self.Px = P
 
         if verbose:
-            print '  => Substitution in Polynomials'
+            print('  => Substitution in Polynomials')
             for Pix in P:
-                print '  ',Pix.all_coeffs()[::-1]
-            print
+                print('  ',Pix.all_coeffs()[::-1])
+            print()
             for Pix in P:
-                print '  ',sm.horner(Pix)
-            print
+                print('  ',sm.horner(Pix))
+            print()
 
         #split polynomials
-        X = np.arange(-Ms,+Ms+1)
+        X = np.arange(-Ms, +Ms+1)
         if split_polys:
             Pt_l = []
             Pt_r = []
             Pt_L = []
             Pt_R = []
-            Cl = np.empty(shape=(deg+1,2*Ms),dtype=float)
-            Cr = np.empty(shape=(deg+1,2*Ms),dtype=float)
-            CL = np.empty(shape=(deg+1,2*Ms),dtype=float)
-            CR = np.empty(shape=(deg+1,2*Ms),dtype=float)
+            Cl = np.empty(shape=(deg+1,2*Ms), dtype=np.float64)
+            Cr = np.empty(shape=(deg+1,2*Ms), dtype=np.float64)
+            CL = np.empty(shape=(deg+1,2*Ms), dtype=np.float64)
+            CR = np.empty(shape=(deg+1,2*Ms), dtype=np.float64)
             for i,Pix in enumerate(P):
-                Pit_l = Pix.xreplace({x:t+X[i]})
-                Pit_r = Pix.xreplace({x:X[i]+1-t})
-                Pit_L = Pix.xreplace({x:+1-t+X[i]})
-                Pit_R = Pix.xreplace({x:X[i]-t})
-                Cl[:,i] = np.asarray(Pit_l.all_coeffs(), dtype=float)
-                Cr[:,i] = np.asarray(Pit_r.all_coeffs(), dtype=float)
-                CL[:,i] = np.asarray(Pit_L.all_coeffs(), dtype=float)
-                CR[:,i] = np.asarray(Pit_R.all_coeffs(), dtype=float)
+                Pit_l = sm.polys.polytools.poly(Pix.as_expr().xreplace({x:t+X[i]}), t, domain='QQ')
+                Pit_r = sm.polys.polytools.poly(Pix.as_expr().xreplace({x:X[i]+1-t}), t, domain='QQ')
+                Pit_L = sm.polys.polytools.poly(Pix.as_expr().xreplace({x:+1-t+X[i]}), t, domain='QQ')
+                Pit_R = sm.polys.polytools.poly(Pix.as_expr().xreplace({x:X[i]-t}), t, domain='QQ')
+                Cl[:,i] = np.asarray(Pit_l.all_coeffs(), dtype=np.float64)
+                Cr[:,i] = np.asarray(Pit_r.all_coeffs(), dtype=np.float64)
+                CL[:,i] = np.asarray(Pit_L.all_coeffs(), dtype=np.float64)
+                CR[:,i] = np.asarray(Pit_R.all_coeffs(), dtype=np.float64)
                 Pt_l.append(Pit_l)
                 Pt_r.append(Pit_r)
                 Pt_L.append(Pit_L)
@@ -109,29 +114,29 @@ class Kernel(object):
             self.Pt_R = Pt_R
 
             if verbose:
-                print '  => Splitting polynomials'
+                print('  => Splitting polynomials')
                 for p in Pt_l:
-                    print '  ',p.all_coeffs()
-                print
+                    print('  ',p.all_coeffs())
+                print()
                 for p in Pt_r:
-                    print '  ',p.all_coeffs()
-                print
-                print
+                    print('  ',p.all_coeffs())
+                print()
+                print()
                 for p in Pt_l:
-                    print '  ',sm.horner(p)
-                print
+                    print('  ',sm.horner(p))
+                print()
                 for p in Pt_r:
-                    print '  ',sm.horner(p)
+                    print('  ',sm.horner(p))
         else:
             Pt   = []
             Pt_L = []
-            C = np.empty(shape=(deg+1,2*Ms),dtype=float)
-            CL = np.empty(shape=(deg+1,2*Ms),dtype=float)
+            C  = np.empty(shape=(deg+1,2*Ms), dtype=np.float64)
+            CL = np.empty(shape=(deg+1,2*Ms), dtype=np.float64)
             for i,Pix in enumerate(P):
-                Pit = Pix.xreplace({x:t+X[i]})
-                Pit_L = Pix.xreplace({x:1-t+X[i]})
-                C[:,i] = np.asarray(Pit.all_coeffs(), dtype=float)
-                CL[:,i] = np.asarray(Pit_L.all_coeffs(), dtype=float)
+                Pit   = sm.polys.polytools.poly(Pix.as_expr().xreplace({x:t+X[i]}), t, domain='QQ')
+                Pit_L = sm.polys.polytools.poly(Pix.as_expr().xreplace({x:1-t+X[i]}), t, domain='QQ')
+                C[:,i]  = np.asarray(Pit.all_coeffs(), dtype=np.float64)
+                CL[:,i] = np.asarray(Pit_L.all_coeffs(), dtype=np.float64)
                 Pt.append(Pit)
                 Pt_L.append(Pit_L)
             self.Pt = Pt
@@ -140,17 +145,17 @@ class Kernel(object):
             self.CL = CL
 
             if verbose:
-                print '  => Substituing x = t+i'
+                print('  => Substituing x = t+i')
                 for Pit in Pt:
-                    print '  ',Pit.all_coeffs()[::-1]
-                print
+                    print('  ',Pit.all_coeffs()[::-1])
+                print()
                 for Pit in Pt:
-                    print '  ',sm.horner(Pit)
-                print
+                    print('  ',sm.horner(Pit))
+                print()
 
         if verbose:
-            print '  => Generating lambdas'
-        
+            print('  => Generating lambdas')
+
         imin=-Ms
         imax=+Ms
         if split_polys:
@@ -179,7 +184,7 @@ class Kernel(object):
             self.poly_splitted = False
 
         if verbose:
-            print '  All done.'
+            print('  All done.')
 
     def _register(self, dic):
         key = self.hash_kernel_key(self.n,self.r,self.deg,self.Ms,self.H,self.remesh)
@@ -235,7 +240,7 @@ class SymmetricKernelGenerator(object):
             self.Mh = None
         else:
             Mh = []
-            for q in xrange(n+1):
+            for q in range(n+1):
                 mq = mpq(0)
                 for i,h in enumerate(H,start=-Ms):
                     mq += (i**q) * h
@@ -267,16 +272,16 @@ class SymmetricKernelGenerator(object):
             cls = Kernel
 
         if verbose:
-            print '\nSymmetricKernelGenerator was called with the following parameters:'
-            print '  n   = {} (preserved discrete moments)'.format(n)
-            print '  r   = {} (overall kernel regularity)'.format(r)
-            print '  Ms  = {} (piecewise polynomial count)'.format(Ms)
-            print '  deg = {} (local polynomial regularity)'.format(deg)
-            print
-            print '  H   = ['+','.join([h.__str__() for h in H])+']'
+            print('\nSymmetricKernelGenerator was called with the following parameters:')
+            print('  n   = {} (preserved discrete moments)'.format(n))
+            print('  r   = {} (overall kernel regularity)'.format(r))
+            print('  Ms  = {} (piecewise polynomial count)'.format(Ms))
+            print('  deg = {} (local polynomial regularity)'.format(deg))
+            print()
+            print('  H   = ['+','.join([h.__str__() for h in H])+']')
             if not self.remesh:
-                print '  Mh  = ['+','.join([m.__str__() for m in Mh])+']'
-            print
+                print('  Mh  = ['+','.join([m.__str__() for m in Mh])+']')
+            print()
 
         H  = mpqize(np.asarray(H))
         if not self.remesh:
@@ -286,34 +291,34 @@ class SymmetricKernelGenerator(object):
         key = Kernel.hash_kernel_key(n,r,deg,Ms,H,remesh)
         if override_cache:
             if verbose:
-                print '  Cache overwrite requested.'
+                print('  Cache overwrite requested.')
         else:
             if verbose:
-                print '  Checking if kernel was already computed... ',
+                print('  Checking if kernel was already computed... ', end=' ')
             data = load_data_from_cache(Kernel.cache_file(), key)
             if (data is not None):
                 if verbose:
-                    print 'True'
-                    print '  Loading kernel from cache.'
+                    print('True')
+                    print('  Loading kernel from cache.')
                 if (data == self.SINGULAR_SYSTEM):
                     raise RuntimeError('Could not solve linear system.')
                 kernel = cls(verbose=verbose,register=False,split_polys=split_polys,**data)
                 return kernel
             elif verbose:
-                print 'False'
-                print '  Building linear system...'
+                print('False')
+                print('  Building linear system...')
         #polynom symbolic variable
-        x = sm.abc.x
+        x = sm.Symbol('x')
 
         #build Ms*(deg+1) symbolic coefficients (polynomial unknowns)
         coeffs = []
-        for k in xrange(Ms):
-            coeffs.append([sm.symbols('C{k}_{d}'.format(k=k,d=d)) for d in xrange(deg+1)])
+        for k in range(Ms):
+            coeffs.append([sm.symbols('C{k}_{d}'.format(k=k,d=d)) for d in range(deg+1)])
         #build discrete moments rhs values
         M = []
-        for i in xrange(n+1):
+        for i in range(n+1):
             Mi = []
-            for j in xrange(deg+1):
+            for j in range(deg+1):
                 if remesh and i==j:
                     Mij = f2q(1)
                 elif not remesh and i-j>=0 and Mh[i-j]!=0:
@@ -335,16 +340,12 @@ class SymmetricKernelGenerator(object):
             Pp.append(sm.polys.polytools.poly(pexpr,x))
             Pm.append(sm.polys.polytools.poly(mexpr,x))
         P    = Pm[::-1] + Pp
-        Pbeg   = -Ms
-        Pend   = +Ms-1
-        Prange = lambda: xrange(-Ms,+Ms)
-        Penum  = lambda: enumerate(P,start=Pbeg)
 
         #precompute the r first polynomial derivatives
         dPs = []
         for p in Pp:
             dP = [p]
-            for i in xrange(r):
+            for i in range(r):
                 p = p.diff()
                 dP.append(p)
             dPs.append(dP)
@@ -354,32 +355,32 @@ class SymmetricKernelGenerator(object):
 
         ### Continuity equations (Gamma is Cr continuous)
         # Parity in x=0 -- Gamma is EVEN     -> (r+1)//2 eqs
-        for d in xrange(1,r+1,2):
+        for d in range(1, r+1,2):
             eq = coeffs[0][d] # =0
             eqs.append(eq)
         # Right-most point, zero derivatives -> (r+1)    eqs
-        for d in xrange(r+1):
-            eq = dPs[-1][d].xreplace({x:Ms}) # =0
+        for d in range(r+1):
+            eq = dPs[-1][d].xreplace({x:sm.Integer(Ms)}) # =0
             eqs.append(eq.as_expr())
         # Cr-continuity on inner points      -> (Ms-1)*(r+1) eqs
-        for d in xrange(r+1):
-            for i in xrange(Ms-1):
-                eq = dPs[i][d].xreplace({x:i+1}) - dPs[i+1][d].xreplace({x:i+1}) # = 0
+        for d in range(r+1):
+            for i in range(Ms-1):
+                eq = dPs[i][d].xreplace({x:sm.Integer(i+1)}) - dPs[i+1][d].xreplace({x:sm.Integer(i+1)}) # = 0
                 eqs.append(eq.as_expr())
 
         ### Interpolation condition on the left -> Ms equations
-        for i in xrange(Ms):
-            eq = Pp[i].xreplace({x:i}) - H[Ms+i] # = 0
+        for i in range(Ms):
+            eq = Pp[i].xreplace({x:sm.Integer(i)}) - H[Ms+i] # = 0
             eqs.append(eq.as_expr())
 
         ### Discrete moments
         s = sm.symbols('s')
-        for m in xrange(0,n+1):
+        for m in range(0, n+1):
             expr = f2q(0)
-            for l in xrange(-Ms+1,Ms+1):
+            for l in range(-Ms+1, Ms+1):
                 if m>0 and l==0: continue
                 i = Ms-l
-                e = P[i].xreplace({x:s-f2q(l)})
+                e = P[i].xreplace({x:s-f2q(l)}).as_expr()
                 if m>0: e *= f2q(l**m)
                 expr += e
             Pm = sm.polys.polytools.poly(expr,s)
@@ -387,23 +388,23 @@ class SymmetricKernelGenerator(object):
                 eqs.append(Cmi-M[m][i])
 
         if verbose:
-            print '  => System built.'
+            print('  => System built.')
 
         unknowns = [c for cl in coeffs for c in cl]\
                   +[m for ml in M for m in ml if isinstance(m,sm.Symbol)]
         if verbose:
-            print '  Unknowns: ',unknowns
+            print('  Unknowns: ',unknowns)
 
         sol = sm.solve(eqs,unknowns)
         if len(sol)!=len(unknowns):
             if verbose:
-                print 'sol=',sol
+                print('sol=',sol)
             update_cache(Kernel.cache_file(), key, self.SINGULAR_SYSTEM)
             raise RuntimeError('Could not solve linear system.')
         elif verbose:
-            print '  => System solved.'
+            print('  => System solved.')
             for k in sorted(sol.keys(), key=lambda x: x.name):
-                print '  {}: {}'.format(k, sol[k])
+                print('  {}: {}'.format(k, sol[k]))
 
         for i,Pix in enumerate(P):
             P[i] = Pix.xreplace(sol)
@@ -422,10 +423,10 @@ if __name__=='__main__':
     sg = StencilGenerator()
     sg.configure(dim=1, derivative=2)
 
-    for i in xrange(1,5):
+    for i in range(1, 5):
         p = 2*i
         H = sg.generate_exact_stencil(order=p-1, origin=i)
-        print H.coeffs
+        print(H.coeffs)
         assert H.is_centered()
         H = [0] + H.coeffs.tolist() + [0]
         kg = SymmetricKernelGenerator(verbose).configure(p, H=H)
@@ -434,17 +435,18 @@ if __name__=='__main__':
             try:
                 kernels.append(kg.solve(r,override_cache=False))
             except RuntimeError:
-                print 'Solver failed fro p={} and r={}.'.format(p,r)
+                print('Solver failed fro p={} and r={}.'.format(p,r))
 
         if len(kernels)==0:
             continue
+        continue
         k0 = kernels[0]
 
         fig = plt.figure()
         plt.xlabel(r'$x$')
         plt.ylabel(r'$\Lambda_{'+'{},{}'.format(p,'r')+'}$')
         X = np.linspace(-k0.Ms-1,+k0.Ms+1,1000)
-        s = plt.subplot(1,1,1)
+        s = plt.subplot(1,1,1, label=i)
         for i,k in enumerate(kernels):
             s.plot(X,k(X),label=r'$\Lambda_{'+'{},{}'.format(p,k.r)+'}$')
         s.plot(k0.I,k0.H,'or')
diff --git a/hysop/numerics/remesh/remesh.py b/hysop/numerics/remesh/remesh.py
index c830e0d1dbce0a9c7e844a4b25693e1987a28a3e..a53b27952edb8cd9fa28def5bfe93b1b99940287 100644
--- a/hysop/numerics/remesh/remesh.py
+++ b/hysop/numerics/remesh/remesh.py
@@ -1,3 +1,5 @@
+import sympy as sm
+
 from hysop.constants import __VERBOSE__, __DEBUG__
 from hysop.tools.enum import EnumFactory
 from hysop.tools.types import check_instance
@@ -42,8 +44,7 @@ class RemeshKernel(Kernel):
             'Only lambda remesh kernels are supported.'
         if remesh in ('M4', 'M8'):
             # given M4 or M8 kernels
-            from hysop.deps import sm
-            x = sm.abc.x
+            x = sm.Symbol('x')
             if remesh == 'M4':
                 M4 = (sm.Poly((1/sm.Rational(6))*((2-x)**3-4*(1-x)**3), x),
                       sm.Poly((1/sm.Rational(6))*((2-x)**3), x))
@@ -70,7 +71,7 @@ if __name__ == '__main__':
     import numpy as np
     from matplotlib import pyplot as plt
 
-    for i in xrange(1, 5):
+    for i in range(1, 5):
         p = 2*i
         kernels = []
         for r in [1, 2, 4, 8]:
@@ -78,7 +79,7 @@ if __name__ == '__main__':
                 kernel = RemeshKernel(p, r)
                 kernels.append(kernel)
             except RuntimeError:
-                print 'Solver failed for p={} and r={}.'.format(p, r)
+                print('Solver failed for p={} and r={}.'.format(p, r))
 
         if len(kernels) == 0:
             continue
diff --git a/hysop/numerics/splitting/directional_splitting.py b/hysop/numerics/splitting/directional_splitting.py
index 694322f9ec655921a95f67a8e8c5802aeef1e492..2a2228b9e0200bdd43d17c390af3b45bd25df85f 100644
--- a/hysop/numerics/splitting/directional_splitting.py
+++ b/hysop/numerics/splitting/directional_splitting.py
@@ -7,6 +7,11 @@ from hysop.tools.types import first_not_None
 
 class DirectionalSplitting(ComputationalGraphNodeGenerator):
 
+    @debug
+    def __new__(cls, splitting_dim, extra_kwds, **kargs):
+        return super(DirectionalSplitting, cls).__new__(cls,
+                candidate_input_tensors=None, candidate_output_tensors=None, **kargs)
+
     @debug
     def __init__(self, splitting_dim, extra_kwds, **kargs):
         super(DirectionalSplitting, self).__init__(candidate_input_tensors=None,
diff --git a/hysop/numerics/splitting/strang.py b/hysop/numerics/splitting/strang.py
index 3d9fbec979d86687ae6ec43c3d5000e9d166326f..ac842f22e1eda6eca4fcaf78d1816085c7c12a62 100644
--- a/hysop/numerics/splitting/strang.py
+++ b/hysop/numerics/splitting/strang.py
@@ -8,6 +8,12 @@ StrangOrder = EnumFactory.create('StrangOrder',
 
 class StrangSplitting(DirectionalSplitting):
 
+    @debug
+    def __new__(cls, splitting_dim, order, extra_kwds=None, **kargs):
+        return super(StrangSplitting, cls).__new__(cls,
+                splitting_dim=splitting_dim,
+                extra_kwds=extra_kwds, **kargs)
+
     @debug
     def __init__(self, splitting_dim, order, extra_kwds=None, **kargs):
         super(StrangSplitting,self).__init__(splitting_dim=splitting_dim,
@@ -24,12 +30,12 @@ class StrangSplitting(DirectionalSplitting):
             msg=msg.format(order)
             raise ValueError(msg)
 
-        directions = range(splitting_dim)
+        directions = tuple(range(splitting_dim))
         if order==1:
             dt_coeff = (1.0,)*splitting_dim
         elif order==2:
             dt_coeff = (0.5,)*(2*splitting_dim)
-            directions += range(splitting_dim-1,-1,-1)
+            directions += tuple(range(splitting_dim-1,-1,-1))
         self.directions = directions
         self.dt_coeff = dt_coeff
 
diff --git a/hysop/numerics/splitting/test/test_strang.py b/hysop/numerics/splitting/test/test_strang.py
index b6c543fb082f7523ca16d127dc3dc88b3f9c51cf..719b3a6b0f18b76adc996a69533974cc87bc7803 100644
--- a/hysop/numerics/splitting/test/test_strang.py
+++ b/hysop/numerics/splitting/test/test_strang.py
@@ -40,7 +40,7 @@ class TestStrang(object):
                     name='W', is_vector=True)
 
         scalars = [ Field(domain=box, name='S{}'.format(i))
-                           for i in xrange(nscalars)]
+                           for i in range(nscalars)]
 
         return box, velo, vorti, tuple(scalars)
 
diff --git a/hysop/numerics/stencil/stencil.py b/hysop/numerics/stencil/stencil.py
index 5cbb22ca253901e3cd0afac320507d5023b7dc94..fab19aa662bdfbd2bd0bb30aa1ce1176e67c422d 100644
--- a/hysop/numerics/stencil/stencil.py
+++ b/hysop/numerics/stencil/stencil.py
@@ -7,7 +7,12 @@
 
 """
 
-from hysop.deps import sm, sp, it, np, hashlib
+import hashlib
+import numpy as np
+import scipy as sp
+import sympy as sm
+import itertools as it
+
 from hysop.tools.types import check_instance, first_not_None, to_tuple
 from hysop.tools.sympy_utils import recurse_expression_tree
 
@@ -102,7 +107,7 @@ class Stencil(object):
         assert (new_shape.ndim==shape.ndim)
         assert (new_shape>=shape).all()
         assert ((new_shape-shape)%2 == 0).all()
-        zeros = (new_shape-shape)/2
+        zeros = (new_shape-shape)//2
         slc = tuple(slice(z,z+s,1) for (z,s) in zip(zeros, shape))
 
         new_origin = zeros + self.origin
@@ -140,7 +145,7 @@ class Stencil(object):
         assert sdim<=adim, 'Stencil dimension greater than array dimension.'
         assert set(symbols.keys())==self.variables(), 'Missing symbols {}.'.format(self.variables()-set(symbols.keys()))
         out  = first_not_None(out, np.empty_like(a[iview]))
-        axis = first_not_None(to_tuple(axis), range(adim)[-sdim:])
+        axis = first_not_None(to_tuple(axis), tuple(range(adim))[-sdim:])
         assert len(axis) == sdim
         assert out.ndim  == a.ndim
         assert out.shape == a[iview].shape
@@ -181,7 +186,7 @@ class Stencil(object):
         else:
             mask = (coeffs==0)
         keep_mask = np.ones_like(coeffs,dtype=bool)
-        for d in xrange(dim):
+        for d in range(dim):
             laccess = [slice(None)]*dim
             raccess = [slice(None)]*dim
             laccess[d] = slice(0,1)
@@ -276,8 +281,8 @@ class Stencil(object):
             value = factor*self.coeffs[x]
             return (offset,value)
         iterator = np.ndindex(self.shape)
-        iterator = it.imap(mapfun, iterator)
-        iterator = it.ifilter(lambda x: x[1]!=0, iterator)
+        iterator = map(mapfun, iterator)
+        iterator = filter(lambda x: x[1]!=0, iterator)
         return iterator
 
     def refactor(self, factor):
@@ -377,7 +382,7 @@ class Stencil(object):
             return True
         else:
             access = self.origin.tolist()
-            for i in xrange(self.dim):
+            for i in range(self.dim):
                 acc = [x for x in access]
                 acc[i] = slice(0,self.shape[i])
                 mask[acc] = False
@@ -425,9 +430,9 @@ class CenteredStencil(Stencil):
         shape = np.asarray(coeffs.shape)
         if (shape%2==0).any():
             raise ValueError('Shape compnonent even!')
-        if (origin!=(shape-1)/2).any():
-            print origin
-            print (shape-1)/2
+        if (origin!=(shape-1)//2).any():
+            print(origin)
+            print((shape-1)//2)
             raise ValueError('Origin is not centered!')
         super(CenteredStencil,self).__init__(coeffs, origin, order, dx, factor, error, **kwds)
 
diff --git a/hysop/numerics/stencil/stencil_generator.py b/hysop/numerics/stencil/stencil_generator.py
index 7bd1e91f5e42fc32a65e4db2ee940d6417690cd5..7e2d9e1200a168cf5ef7910ed94e06a37b2ba947 100644
--- a/hysop/numerics/stencil/stencil_generator.py
+++ b/hysop/numerics/stencil/stencil_generator.py
@@ -4,8 +4,17 @@
 * :class:`~hysop.numerics.stencil.StencilGenerator`
 
 """
-import fractions
-from hysop.deps              import it, np, sp, sm, os, copy, math, gzip, pickle
+import fractions, os, copy, math, gzip
+import itertools as it
+import numpy as np
+import scipy as sp
+import sympy as sm
+
+try:
+   import cPickle as pickle
+except:
+   import pickle
+
 from hysop.tools.misc        import prod
 from hysop.tools.io_utils    import IO
 from hysop.tools.numerics    import MPQ, MPZ, MPFR, F2Q, mpqize, mpq, mpz
@@ -148,16 +157,16 @@ class StencilGeneratorConfiguration(object):
 
         if mask==StencilGenerator.CROSS:
             mask = np.zeros(shape,dtype=bool)
-            for d in xrange(dim):
-                access = [slice(origin[dd],origin[dd]+1) for dd in xrange(dim)]
+            for d in range(dim):
+                access = [slice(origin[dd],origin[dd]+1) for dd in range(dim)]
                 access[d] = slice(None)
                 access = tuple(access)
                 mask[access] = True
                 mask[tuple(access)] = True
         elif mask==StencilGenerator.DIAG:
             mask = np.ones(shape,dtype=bool)
-            for d in xrange(dim):
-                access = [slice(origin[dd],origin[dd]+1) for dd in xrange(dim)]
+            for d in range(dim):
+                access = [slice(origin[dd],origin[dd]+1) for dd in range(dim)]
                 access[d] = slice(None)
                 access = tuple(access)
                 mask[access] = False
@@ -182,7 +191,7 @@ StencilGeneratorConfiguration
     mask_type:  {}
     mask:       {}
     shape:      {}
-'''.format(self.dim, self.dtype, self.dx, self.user_eqs, 
+'''.format(self.dim, self.dtype, self.dx, self.user_eqs,
            self.derivative, self.order, self.mask_type, self._mask,
            self.shape())
         return ss
@@ -254,7 +263,7 @@ class StencilGenerator(object):
         >>> generator = StencilGenerator(dim=1,dtype=np.float64)
         >>> s0 = generator.generate_approximative_stencil(origin=0, derivative=1, min_order=1)
         >>> s1 = generator.generate_approximative_stencil(origin=0, derivative=1, min_order=2)
-        >>> print '{}\\n{}'.format(s0.coeffs,s1.coeffs)
+        >>> print('{}\\n{}'.format(s0.coeffs,s1.coeffs))
         [-1.  1.]
         [-1.5  2.  -0.5]
 
@@ -265,7 +274,7 @@ class StencilGenerator(object):
         >>> generator = StencilGenerator(dim=1)
         >>> s0 = generator.generate_exact_stencil(origin=-1, derivative=1, min_order=1)
         >>> s1 = generator.generate_exact_stencil(origin=-1, derivative=1, min_order=2)
-        >>> print '{}\\n{}'.format(s0.coeffs,s1.coeffs)
+        >>> print('{}\\n{}'.format(s0.coeffs,s1.coeffs))
         [-1 1]
         [1/2 -2 3/2]
 
@@ -275,7 +284,7 @@ class StencilGenerator(object):
         >>> from hysop.numerics.stencil import StencilGenerator
         >>> generator = StencilGenerator(dim=2)
         >>> laplacian = generator.generate_exact_stencil(origin=1, derivative=2, min_order=2)
-        >>> print laplacian.coeffs
+        >>> print(laplacian.coeffs)
         [[0 1 0]
          [1 -4 1]
          [0 1 0]]
@@ -299,7 +308,7 @@ class StencilGenerator(object):
     def _hash_equations(cls, eqs):
         hasher = cls._hash_algorithm()
         for e in eqs:
-            hasher.update(str(e))
+            hasher.update(str(e).encode('utf-8'))
         return hasher.hexdigest()
 
     def get_config(self):
@@ -344,26 +353,26 @@ class StencilGenerator(object):
             solve_dtype = np.float64
         else:
             solve_dtype = dtype
-        
+
         dx     = config.dx[0]
         k      = config.derivative[0]
         order  = config.order[0]
-        
+
         N      = config.shape()[0]
         origin = StencilGenerator._format_origin(origin,N)
 
         L      = config.L(origin)
         R      = config.R(origin)
-        
+
         if k == 0:
             return Stencil([1],[0],0,dx=dx,error=None)
-        
+
         A = np.empty((N,N),dtype=solve_dtype)
         b = np.empty(N,dtype=solve_dtype)
-        for i in xrange(N):
-            b[i] = solve_dtype(long(i==k))
-            for j in xrange(N):
-                A[i,j] = solve_dtype(long((j-origin)**i))
+        for i in range(N):
+            b[i] = solve_dtype(int(i==k))
+            for j in range(N):
+                A[i,j] = solve_dtype(int((j-origin)**i))
 
         try:
             if has_flint:
@@ -375,18 +384,18 @@ class StencilGenerator(object):
             else:
                 S = sp.linalg.solve(A,b,overwrite_a=True,overwrite_b=True)
         except:
-            print '\nError: Cannot generate stencil (singular system).\n'
+            print('\nError: Cannot generate stencil (singular system).\n')
             raise
 
         S *= math.factorial(k)
-        
+
         actual_dtype = type(S.ravel()[0])
         target_dtype = dtype
         if actual_dtype != target_dtype:
             if target_dtype in [np.float16, np.float32, np.float64]:
                 if has_flint and (actual_dtype is flint.fmpq):
                     def convert(x):
-                        return target_dtype(float(long(x.p)) / float(long(x.q)))
+                        return target_dtype(float(int(x.p)) / float(int(x.q)))
                     S = np.vectorize(convert)(S)
                 else:
                     S = S.astype(target_dtype)
@@ -439,7 +448,7 @@ class StencilGenerator(object):
         if len(user_eqs)==0:
             for i,d in enumerate(derivative):
                 if dim>1:
-                    access = [slice(0,1) for _ in xrange(dim)]
+                    access = [slice(0,1) for _ in range(dim)]
                     access[i] = slice(d,d+1)
                     access = tuple(access)
                     user_eqs[df[access].ravel()[0]]=1
@@ -448,7 +457,7 @@ class StencilGenerator(object):
 
         def taylor(df,dx,N):
             expr = 0
-            for n in xrange(max(N)):
+            for n in range(max(N)):
                 expr += taylorn(df,dx,n,N)
             return expr
 
@@ -458,9 +467,9 @@ class StencilGenerator(object):
             def preficate(it):
                 return (it<=N).all() and sum(it)==n
 
-            nn = range(n+1)
-            itd = it.product(nn,repeat=dim)
-            itd = it.ifilter(preficate,itd)
+            nn  = range(n+1)
+            itd = it.product(nn, repeat=dim)
+            itd = filter(preficate, itd)
             expr = 0
             for der in itd:
                 expr += taylorn_term(df,dx,der)
@@ -502,7 +511,7 @@ class StencilGenerator(object):
                     order+=1
                 i+=1
             eqs = []
-            for k,eq in factor_split(err,df_vars).iteritems():
+            for k,eq in factor_split(err,df_vars).items():
                 if isinstance(eq,int):
                     continue
                 eq = sm.simplify(eq.xreplace(sol))
@@ -537,7 +546,7 @@ class CenteredStencilGenerator(StencilGenerator):
         config.configure(**kargs)
         shape = config.shape()
 
-        origin = (shape-1)/2
+        origin = (shape-1)//2
         stencil = super(CenteredStencilGenerator,self)\
                 .generate_exact_stencil(origin=origin, **kargs)
         if stencil.is_centered():
@@ -549,8 +558,8 @@ class CenteredStencilGenerator(StencilGenerator):
         config = self._config.copy()
         config.configure(**kargs)
         shape = config.shape()
-        
-        origin = (shape-1)/2
+
+        origin = (shape-1)//2
         stencil = super(CenteredStencilGenerator,self)\
                 .generate_approximative_stencil(origin, **kargs)
         if stencil.is_centered():
@@ -568,59 +577,59 @@ if __name__ == '__main__':
 
     with printoptions(precision=4):
         sg.configure(dim=1, derivative=0, order=2, dtype=np.float64)
-        print '\ndim=1, 0th derivative, np.float64, approximative, shape=(5,):'
-        for i in xrange(sg.get_config().shape()):
+        print('\ndim=1, 0th derivative, np.float64, approximative, shape=(5,):')
+        for i in range(sg.get_config().shape()):
             stencil = sg.generate_approximative_stencil(origin=i).reshape((5,))
-            print '  origin: {} =>  {} . {}'.format(i, stencil.factor, stencil.coeffs)
+            print('  origin: {} =>  {} . {}'.format(i, stencil.factor, stencil.coeffs))
 
         sg.configure(dim=1, derivative=1, order=2, dtype=np.float64)
-        print '\ndim=1, 1st order first derivative, np.float64, approximative, shape=(5,):'
-        for i in xrange(sg.get_config().shape()):
+        print('\ndim=1, 1st order first derivative, np.float64, approximative, shape=(5,):')
+        for i in range(sg.get_config().shape()):
             stencil = sg.generate_approximative_stencil(origin=i).reshape((5,))
-            print '  origin: {} =>  {} . {}'.format(i, stencil.factor, stencil.coeffs)
+            print('  origin: {} =>  {} . {}'.format(i, stencil.factor, stencil.coeffs))
 
         sg.configure(dim=1, derivative=2, order=2, dtype=np.float64)
-        print '\ndim=1, 2nd order first derivative, np.float64, approximative:'
-        for i in xrange(sg.get_config().shape()):
+        print('\ndim=1, 2nd order first derivative, np.float64, approximative:')
+        for i in range(sg.get_config().shape()):
             stencil = sg.generate_approximative_stencil(origin=i)
-            print '  origin: {} =>  {} . {}'.format(i, stencil.factor, stencil.coeffs)
-        
-        print '\ndim=1, 2nd order first derivative, exact:'
+            print('  origin: {} =>  {} . {}'.format(i, stencil.factor, stencil.coeffs))
+
+        print('\ndim=1, 2nd order first derivative, exact:')
         sg.configure(dtype=MPQ, derivative=1)
-        for i in xrange(sg.get_config().shape()):
+        for i in range(sg.get_config().shape()):
             stencil = sg.generate_exact_stencil(origin=i)
-            print '  origin: {} => {} . {}'.format(i, stencil.factor, stencil.coeffs)
+            print('  origin: {} => {} . {}'.format(i, stencil.factor, stencil.coeffs))
 
-        print '\ndim=1, 2nd order second derivative, exact:'
+        print('\ndim=1, 2nd order second derivative, exact:')
         sg.configure(dtype=MPQ, derivative=2)
-        for i in xrange(sg.get_config().shape()):
+        for i in range(sg.get_config().shape()):
             stencil = sg.generate_exact_stencil(origin=i)
-            print '  origin: {} => {} . {}'.format(i, stencil.factor, stencil.coeffs)
+            print('  origin: {} => {} . {}'.format(i, stencil.factor, stencil.coeffs))
 
-        print '\n 2D Laplacian, 2nd order'
+        print('\n 2D Laplacian, 2nd order')
         sg.configure(dim=2, order=2)
         laplacian = sg.generate_exact_stencil(origin=1)
-        print laplacian.coeffs
+        print(laplacian.coeffs)
 
         laplacian = sg.generate_exact_stencil(origin=1, dx=[sm.Symbol('dy'), sm.Symbol('dx')])
-        print '\n',laplacian.coeffs
+        print('\n',laplacian.coeffs)
 
         df,_ = sg.get_config().symbolic_derivatives()
         user_eqs = {df[0][2] : sm.Symbol('Cx'), df[2][0] : sm.Symbol('Cy')}
         stencil = sg.generate_exact_stencil(origin=1,user_eqs=user_eqs)
-        print '\n',stencil.coeffs
+        print('\n',stencil.coeffs)
 
         sg = CenteredStencilGenerator()
         sg.configure(derivative=2)
-        print '\nCentered second order derivative stencils:'
-        for i in xrange(1,4):
+        print('\nCentered second order derivative stencils:')
+        for i in range(1,4):
             stencil = sg.generate_approximative_stencil(order=2*i, dtype=np.float16)
-            print '  {}'.format(stencil.coeffs)
-        print
-        for i in xrange(1,4):
+            print('  {}'.format(stencil.coeffs))
+        print()
+        for i in range(1,4):
             stencil = sg.generate_approximative_stencil(order=2*i, dtype=MPQ)
-            print '  {}'.format(stencil.coeffs)
-        print
-        for i in xrange(1,4):
+            print('  {}'.format(stencil.coeffs))
+        print()
+        for i in range(1,4):
             stencil = sg.generate_exact_stencil(order=2*i)
-            print '  {}'.format(stencil.coeffs)
+            print('  {}'.format(stencil.coeffs))
diff --git a/hysop/numerics/tests/bench_fft.py b/hysop/numerics/tests/bench_fft.py
index 0a38d06c520f94a0ff9b670c780b43a0d94a1c4a..f038a317b6305992df81a61e65c7db496264641a 100644
--- a/hysop/numerics/tests/bench_fft.py
+++ b/hysop/numerics/tests/bench_fft.py
@@ -1,17 +1,13 @@
 """
 Test of fields defined with an analytic formula.
 """
-import os
-import random
-import gc
-import pyfftw
-import gpyfft
+import os, random, gc, pyfftw, gpyfft
 
+import sympy as sm
 import numpy as np
 import itertools as it
 import pyopencl as cl
 
-from hysop.deps import it, sm, random
 from hysop.constants import Implementation, Backend, HYSOP_REAL
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
 from hysop.testsenv import opencl_failed, iter_clenv
@@ -42,12 +38,12 @@ class BenchFFT(object):
     }
     nruns = 2
 
-    print
-    print ':: STARTING FFT BENCH ::'
+    print()
+    print(':: STARTING FFT BENCH ::')
     for (i,cl_env) in enumerate(iter_clenv(all_platforms=True)):
-        print '> Registering opencl backend {} as:\n{}'.format(
-                i, cl_env)
-        print
+        print('> Registering opencl backend {} as:\n{}'.format(
+                i, cl_env))
+        print()
         name = 'clfft{}'.format(i)
         queue = cl.CommandQueue(cl_env.context,
                     properties=cl.command_queue_properties.PROFILING_ENABLE)
@@ -58,8 +54,8 @@ class BenchFFT(object):
                    warn_on_unaligned_output_offset=True)
 
     def _bench_1d(self, dtype):
-        print
-        print '::Benching 1D transforms, precision {}::'.format(dtype.__name__)
+        print()
+        print('::Benching 1D transforms, precision {}::'.format(dtype.__name__))
         nruns = self.nruns
         ctype = float_to_complex_dtype(dtype)
 
@@ -75,25 +71,25 @@ class BenchFFT(object):
             (' C2R: hermitian complex to real transform',  ('irfft',{}), ('cshape','ctype'), ('rshape', 'dtype'), {}),
         ]
         for (itype,stype) in enumerate(types, 1):
-            b = (' DCT-{}: real to real discrete cosine transform {}'.format(stype, itype), 
+            b = (' DCT-{}: real to real discrete cosine transform {}'.format(stype, itype),
                     ('dct',{'type':itype}), ('shape','dtype'), ('shape','dtype'), {'offset':+(itype==1)})
             bench.append(b)
         for (itype,stype) in enumerate(types, 1):
-            b = (' DST-{}: real to real discrete sine transform {}'.format(stype, itype), 
+            b = (' DST-{}: real to real discrete sine transform {}'.format(stype, itype),
                     ('dst',{'type':itype}), ('shape','dtype'), ('shape','dtype'), {'offset':-(itype==1)})
             bench.append(b)
 
         results = {}
         for (descr, fn, Bi, Bo, it_kwds) in bench:
-            print descr
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            print(descr)
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     results[name] = ()
                     if dtype not in impl.supported_ftypes:
                         continue
-                    print '  {:<8}: '.format(name),
+                    print('  {:<8}: '.format(name), end=' ')
                     for (shape, cshape, rshape, N, Nc, Nr, mk_buffer) in self.iter_shapes(**it_kwds):
-                        print '.',
+                        print('.', end=' ')
                         with stderr_redirected(devnull): # get rid of Intel opencl warnings
                             try:
                                 Bin  = mk_buffer(backend=impl.backend, shape=locals()[Bi[0]], dtype=locals()[Bi[1]]).handle
@@ -101,47 +97,47 @@ class BenchFFT(object):
                                 plan = getattr(impl,fn[0])(a=Bin, out=Bout, **fn[1]).setup()
                                 gc.disable() # disable garbage collector for timing (like timeit)
                                 if (kind==Implementation.OPENCL):
-                                    for i in xrange(nruns):
+                                    for i in range(nruns):
                                         _ = plan.execute()
                                     _.wait()
                                     evt = plan.execute()
                                     evts = (evt,)
-                                    for i in xrange(nruns-1):
+                                    for i in range(nruns-1):
                                         evt = plan.execute(wait_for=[evt])
                                         evts += (evt,)
                                     evt.wait()
-                                    res = OpenClKernelStatistics(events=evts).mean/1e6 # ms
+                                    res = OpenClKernelStatistics(events=evts).mean / 1.0e6 # ms
                                 else:
                                     plan.execute()
                                     with Timer() as t:
-                                        for i in xrange(nruns):
+                                        for i in range(nruns):
                                             plan.execute()
                                     res = (1e3*t.interval) / float(nruns) # ms
                                 res = round(res, 2)
                                 gc.enable()
                             except HysopFFTDataLayoutError as e:
                                 res = DATA_LAYOUT_ERROR
-                                print e
+                                print(e)
                             except MemoryError as e:
-                                print e
+                                print(e)
                                 res = ALLOCATION_ERROR
                             except gpyfft.gpyfftlib.GpyFFT_Error as e:
                                 if str(e)=='MEM_OBJECT_ALLOCATION_FAILURE':
                                     res = ALLOCATION_ERROR
                                 else:
                                     res = UNKNOWN_ERROR
-                                print e
+                                print(e)
                             except Exception as e:
-                                print e
+                                print(e)
                                 res = UNKNOWN_ERROR
                             results[name] += (res,)
-                    print results[name]
+                    print(results[name])
 
 
     def iter_shapes(self, offset=0):
         minj=12
         maxj=27
-        for j in xrange(minj,maxj):
+        for j in range(minj, maxj):
             shape = (2**j+offset,)
             cshape = list(shape)
             cshape[-1] = cshape[-1]//2 + 1
diff --git a/hysop/numerics/tests/test_fft.py b/hysop/numerics/tests/test_fft.py
index e20e217817a24d0908773c471ae192339ac8afa8..b4244926a8507d273562d49771f26af4fa8ddae4 100644
--- a/hysop/numerics/tests/test_fft.py
+++ b/hysop/numerics/tests/test_fft.py
@@ -1,13 +1,12 @@
 """
 Test of fields defined with an analytic formula.
 """
-import random
-import pyfftw
+import random, pyfftw
 
 import numpy as np
+import sympy as sm
 import itertools as it
 
-from hysop.deps import it, sm, random
 from hysop.constants import Implementation, HYSOP_REAL
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
 from hysop.testsenv import opencl_failed, iter_clenv
@@ -39,21 +38,20 @@ class TestFFT(object):
         },
         Implementation.OPENCL: {}
     }
-    
+
     if HAS_MKLFFT:
         implementations[Implementation.PYTHON]['mkl'] = MklFFT(warn_on_allocation=False)
 
-    print
-    print ':: STARTING FFT BACKEND TESTS ::'
-    #for (i,cl_env) in enumerate(iter_clenv()):
-        #print '> Registering opencl backend {} as:\n{}'.format(
-                #i, cl_env)
-        #print
-        #name = 'clfft{}'.format(i)
-        #implementations[Implementation.OPENCL][name] = \
-            #GpyFFT(cl_env=cl_env,
-                   #warn_on_allocation=False,
-                   #warn_on_unaligned_output_offset=False)
+    print()
+    print(':: STARTING FFT BACKEND TESTS ::')
+    for (i,cl_env) in enumerate(iter_clenv()):
+        print('> Registering opencl backend {} as:\n{}'.format(i, cl_env))
+        print()
+        name = 'clfft{}'.format(i)
+        implementations[Implementation.OPENCL][name] = \
+            GpyFFT(cl_env=cl_env,
+                   warn_on_allocation=False,
+                   warn_on_unaligned_output_offset=False)
 
     msg_shape = 'Expected output array shape to be {} but got {} for implementation {}.'
     msg_dtype = 'Expected output array dtype to be {} but got {} for implementation {}.'
@@ -66,18 +64,18 @@ class TestFFT(object):
     stop_on_error = True
 
     def _test_1d(self, dtype, failures):
-        print
-        print '::Testing 1D transform, precision {}::'.format(dtype.__name__)
+        print()
+        print('::Testing 1D transform, precision {}::'.format(dtype.__name__))
         eps = np.finfo(dtype).eps
         ctype = float_to_complex_dtype(dtype)
 
         def check_distances(results, eps, report_eps, tag, failures):
             if len(results.keys())==0:
-                print 'no support'
+                print('no support')
                 return
             elif len(results.keys())==1:
-                impl = results.keys()[0]
-                print 'cannot compare'
+                impl = next(iter(results.keys()))
+                print('cannot compare')
                 return
             ss=()
             for (r0,r1) in it.combinations(results.keys(), 2):
@@ -87,9 +85,9 @@ class TestFFT(object):
                     s='|{}-{}|=N.A.'.format(r0,r1)
                     failed=False
                 elif not (E0.shape == E1.shape):
-                        print
-                        msg='Output shapes do not match.'
-                        raise RuntimeError(msg)
+                    print()
+                    msg='Output shapes do not match.'
+                    raise RuntimeError(msg)
                 else:
                     E = results[r1] - results[r0]
                     Einf = np.max(np.abs(E))
@@ -105,19 +103,19 @@ class TestFFT(object):
                 if failed:
                     shape=results[r0].shape
                     failures.setdefault(tag, []).append((r0, r1, shape, Einf, Eeps))
-            print ', '.join(ss)
+            print(', '.join(ss))
             if failed and raise_on_failure:
                 raise RuntimeError
 
 
-        print '\n FORWARD C2C: complex to complex forward transform'
+        print('\n FORWARD C2C: complex to complex forward transform')
         for (shape, cshape, rshape, N, Nc, Nr,
                 ghosts, mk_buffer) in self.iter_shapes():
-            print '   FFT shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'),
+            print('   FFT shape={:12s} ghosts={:12s} '.format(str(shape), str(ghosts)+':'), end=' ')
             Href = np.random.rand(2*N).astype(dtype).view(dtype=ctype).reshape(shape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     try:
@@ -144,14 +142,14 @@ class TestFFT(object):
                         results[name] = e
             check_distances(results, eps, self.report_eps, 'forward C2C', failures)
 
-        print '\n BACKWARD C2C: complex to complex backward transform'
+        print('\n BACKWARD C2C: complex to complex backward transform')
         for (shape, cshape, rshape, N, Nc, Nr,
                 ghosts, mk_buffer) in self.iter_shapes():
-            print '   IFFT shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'),
+            print('   IFFT shape={:12s} ghosts={:12s} '.format(str(shape), str(ghosts)+':'), end=' ')
             Href = np.random.rand(2*N).astype(dtype).view(dtype=ctype).reshape(shape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     try:
@@ -178,14 +176,14 @@ class TestFFT(object):
                         results[name] = e
             check_distances(results, eps, self.report_eps, 'backward C2C', failures)
 
-        print '\n FORWARD R2C: real to hermitian complex transform'
+        print('\n FORWARD R2C: real to hermitian complex transform')
         for (shape, cshape, rshape, N, Nc, Nr,
                 ghosts, mk_buffer) in self.iter_shapes():
-            print '   RFFT shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'),
+            print('   RFFT shape={:12s} ghosts={:12s} '.format(str(shape), str(ghosts)+':'), end=' ')
             Href = np.random.rand(*shape).astype(dtype).reshape(shape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     try:
@@ -212,14 +210,14 @@ class TestFFT(object):
                         results[name] = e
             check_distances(results, eps, self.report_eps, 'R2C', failures)
 
-        print '\n BACKWARD C2R: real to hermitian complex transform'
+        print('\n BACKWARD C2R: real to hermitian complex transform')
         for (shape, cshape, rshape, N, Nc, Nr,
                 ghosts, mk_buffer) in self.iter_shapes():
-            print '   IRFFT shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'),
+            print('   IRFFT shape={:12s} ghosts={:12s} '.format(str(shape), str(ghosts)+':'), end=' ')
             Href = np.random.rand(2*Nc).astype(dtype).view(dtype=ctype).reshape(cshape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     try:
@@ -246,15 +244,14 @@ class TestFFT(object):
                         results[name] = e
             check_distances(results, eps, self.report_eps, 'normal C2R', failures)
 
-        print ('\n BACKWARD FORCED C2R: real to hermitian complex transform with specified '
-                +'shape')
+        print('\n BACKWARD FORCED C2R: real to hermitian complex transform with specified shape')
         for (shape, cshape, rshape, N, Nc, Nr,
                 ghosts, mk_buffer) in self.iter_shapes():
-            print '   IRFFT shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'),
+            print('   IRFFT shape={:12s} ghosts={:12s} '.format(str(shape), str(ghosts)+':'), end=' ')
             Href = np.random.rand(2*Nc).astype(dtype).view(dtype=ctype).reshape(cshape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     try:
@@ -283,17 +280,17 @@ class TestFFT(object):
 
         types = ['I  ','II ','III','IV ']
         for (itype,stype) in enumerate(types, 1):
-            print '\n DCT-{}: real to real discrete cosine transform {}'.format(
-                    stype.strip(), itype)
+            print('\n DCT-{}: real to real discrete cosine transform {}'.format(
+                    stype.strip(), itype))
             for (shape, cshape, rshape, N, Nc, Nr,
                     ghosts, mk_buffer) in self.iter_shapes():
-                print '   DCT-{} shape={:12s} ghosts={:12s} '.format(stype, shape, str(ghosts)+':'),
+                print('   DCT-{} shape={:12s} ghosts={:12s} '.format(stype, str(shape), str(ghosts)+':'), end=' ')
                 if (itype==1): # real size is 2*(N-1)
                     shape = mk_shape(shape, -1, shape[-1] + 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         if dtype not in impl.supported_ftypes:
                             continue
                         if itype not in impl.supported_cosine_transforms:
@@ -325,17 +322,17 @@ class TestFFT(object):
 
         for (itype,stype) in enumerate(types, 1):
             iitype = [1,3,2,4][itype-1]
-            print '\n IDCT-{}: real to real inverse discrete cosine transform {}'.format(
-                    stype.strip(), itype)
+            print('\n IDCT-{}: real to real inverse discrete cosine transform {}'.format(
+                    stype.strip(), itype))
             for (shape, cshape, rshape, N, Nc, Nr,
                     ghosts, mk_buffer) in self.iter_shapes():
-                print '   IDCT-{} shape={:12s} ghosts={:12s} '.format(stype, shape, str(ghosts)+':'),
+                print('   IDCT-{} shape={:12s} ghosts={:12s} '.format(stype, str(shape), str(ghosts)+':'), end=' ')
                 if (iitype==1): # real size is 2*(N-1)
                     shape = mk_shape(shape, -1, shape[-1] + 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         if dtype not in impl.supported_ftypes:
                             continue
                         if iitype not in impl.supported_cosine_transforms:
@@ -367,17 +364,17 @@ class TestFFT(object):
 
         types = ['I  ','II ','III','IV ']
         for (itype,stype) in enumerate(types, 1):
-            print '\n DST-{}: real to real discrete sine transform {}'.format(
-                    stype.strip(), itype)
+            print('\n DST-{}: real to real discrete sine transform {}'.format(
+                    stype.strip(), itype))
             for (shape, cshape, rshape, N, Nc, Nr,
                     ghosts, mk_buffer) in self.iter_shapes():
-                print '   DST-{} shape={:12s} ghosts={:12s} '.format(stype, shape, str(ghosts)+':'),
+                print('   DST-{} shape={:12s} ghosts={:12s} '.format(stype, str(shape), str(ghosts)+':'), end=' ')
                 if (itype==1): # real size will be 2*(N+1)
                     shape = mk_shape(shape, -1, shape[-1] - 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         if dtype not in impl.supported_ftypes:
                             continue
                         if itype not in impl.supported_sine_transforms:
@@ -409,17 +406,17 @@ class TestFFT(object):
 
         for (itype,stype) in enumerate(types, 1):
             iitype = [1,3,2,4][itype-1]
-            print '\n IDST-{}: real to real inverse discrete sine transform {}'.format(
-                    stype.strip(), itype)
+            print('\n IDST-{}: real to real inverse discrete sine transform {}'.format(
+                    stype.strip(), itype))
             for (shape, cshape, rshape, N, Nc, Nr,
                     ghosts, mk_buffer) in self.iter_shapes():
-                print '   IDST-{} shape={:12s} ghosts={:12s} '.format(stype, shape, str(ghosts)+':'),
+                print('   IDST-{} shape={:12s} ghosts={:12s} '.format(stype, str(shape), str(ghosts)+':'), end=' ')
                 if (iitype==1): # real size will be 2*(N+1)
                     shape = mk_shape(shape, -1, shape[-1] - 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         if dtype not in impl.supported_ftypes:
                             continue
                         if iitype not in impl.supported_sine_transforms:
@@ -453,15 +450,15 @@ class TestFFT(object):
 
 
     def _test_forward_backward_1d(self, dtype):
-        print
-        print '::Testing 1D forward-backward transforms, precision {}::'.format(dtype.__name__)
+        print()
+        print('::Testing 1D forward-backward transforms, precision {}::'.format(dtype.__name__))
         eps = np.finfo(dtype).eps
         ctype = float_to_complex_dtype(dtype)
 
         def check_distances(distances):
             failed = False
             ss=()
-            for (name, Einf) in distances.iteritems():
+            for (name, Einf) in distances.items():
                 if isinstance(Einf, HysopFFTDataLayoutError):
                     s='{}=UNSUPPORTED_STRIDES'.format(name)
                 elif np.isfinite(Einf):
@@ -473,21 +470,21 @@ class TestFFT(object):
                     failed |= True
                     s='{}={}'.format(name,str(Eeps).upper())
                 ss += (s,)
-            print ', '.join(ss)
+            print(', '.join(ss))
             if failed:
-                print
+                print()
                 msg='Some implementations failed !'
                 if raise_on_failure:
                     raise RuntimeError(msg)
 
-        print '\n C2C-C2C transform'
+        print('\n C2C-C2C transform')
         for (shape, cshape, rshape, N, Nc, Nr,
                 ghosts, mk_buffer) in self.iter_shapes():
-            print '   X - IFFT(FFT(X)) shape={:12s} ghosts={:12s}'.format(shape, str(ghosts)+':'),
+            print('   X - IFFT(FFT(X)) shape={:12s} ghosts={:12s}'.format(str(shape), str(ghosts)+':'), end=' ')
             Href = np.random.rand(2*N).astype(dtype).view(dtype=ctype).reshape(shape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     D0 = mk_buffer(backend=impl.backend, shape=shape, dtype=ctype)
@@ -510,14 +507,14 @@ class TestFFT(object):
                         results[name] = e
             check_distances(results)
 
-        print '\n R2C-C2R transform'
+        print('\n R2C-C2R transform')
         for (shape, cshape, rshape, N, Nc, Nr,
                 ghosts, mk_buffer) in self.iter_shapes():
-            print '   X - IRFFT(RFFT(X)) shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'),
+            print('   X - IRFFT(RFFT(X)) shape={:12s} ghosts={:12s} '.format(str(shape), str(ghosts)+':'), end=' ')
             Href = np.random.rand(*shape).astype(dtype).reshape(shape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     D0 = mk_buffer(backend=impl.backend, shape=shape,  dtype=dtype)
@@ -540,22 +537,22 @@ class TestFFT(object):
                         results[name] = e
             check_distances(results)
 
-        print '\n R2R-R2R transforms'
+        print('\n R2R-R2R transforms')
 
         types = ['I  ','II ','III','IV ']
         for (itype,stype) in enumerate(types, 1):
-            print '\n DCT-{}: real to real discrete cosine transform {}'.format(
-                    stype.strip(), itype)
+            print('\n DCT-{}: real to real discrete cosine transform {}'.format(
+                    stype.strip(), itype))
             ttype = 'COS{}'.format(itype)
             for (shape, cshape, rshape, N, Nc, Nr,
                     ghosts, mk_buffer) in self.iter_shapes():
-                print '   X - I{}({}(X)) shape={:12s} ghosts={:12s} '.format(ttype, ttype, shape, str(ghosts)+':'),
+                print('   X - I{}({}(X)) shape={:12s} ghosts={:12s} '.format(ttype, ttype, str(shape), str(ghosts)+':'), end=' ')
                 if (itype==1): # real size is 2*(N-1)
                     shape = mk_shape(shape, -1, shape[-1] + 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         iitype = [1,3,2,4][itype-1]
                         if dtype not in impl.supported_ftypes:
                             continue
@@ -584,18 +581,19 @@ class TestFFT(object):
                 check_distances(results)
 
         for (itype,stype) in enumerate(types, 1):
-            print '\n DST-{}: real to real discrete sine transform {}'.format(
-                    stype.strip(), itype)
+            print('\n DST-{}: real to real discrete sine transform {}'.format(
+                    stype.strip(), itype))
             ttype = 'SIN{}'.format(itype)
             for (shape, cshape, rshape, N, Nc, Nr,
                     ghosts, mk_buffer) in self.iter_shapes():
-                print '   X - I{}({}(X)) shape={:12s} ghosts={:12s} '.format(ttype, ttype, shape, str(ghosts)+':'),
+                print('   X - I{}({}(X)) shape={:12s} ghosts={:12s} '.format(ttype, ttype, str(shape), str(ghosts)+':'),
+                        end=' ')
                 if (itype==1): # real size is 2*(N+1)
                     shape = mk_shape(shape, -1, shape[-1] - 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         iitype = [1,3,2,4][itype-1]
                         if dtype not in impl.supported_ftypes:
                             continue
@@ -633,11 +631,11 @@ class TestFFT(object):
         def _mk_view(shape, ghosts):
             assert len(shape)==len(ghosts)
             return tuple(slice(Gi, Si+Gi) for (Si,Gi) in zip(shape, ghosts))
-        for i in xrange(2):
+        for i in range(2):
             base = 2+i
-            print '  '+msg[i]
+            print('  '+msg[i])
             for ghosts in ((0,0,0),): #(2,0,0),(0,1,0),(0,0,3)):
-                for j1 in xrange(minj[i],maxj[i]):
+                for j1 in range(minj[i], maxj[i]):
                     shape = (3,2,base**j1,)
                     cshape = list(shape)
                     cshape[-1] = cshape[-1]//2 + 1
@@ -659,27 +657,27 @@ class TestFFT(object):
                     yield (shape, cshape, rshape, N, Nc, Nr, ghosts, mk_buffer)
 
     def report_failures(self, failures):
-        print
-        print '== TEST FAILURES REPORT =='
-        print '  Report error has been set to {} epsilons.'.format(self.report_eps)
-        print '  Test failure has been set to {} epsilons.'.format(self.fail_eps)
+        print()
+        print('== TEST FAILURES REPORT ==')
+        print('  Report error has been set to {} epsilons.'.format(self.report_eps))
+        print('  Test failure has been set to {} epsilons.'.format(self.fail_eps))
         assert self.report_eps <= self.fail_eps
         failed = False
         cnt = 0
-        for (dtype, fails) in failures.iteritems():
+        for (dtype, fails) in failures.items():
             if not fails:
                 continue
-            print '::{} precision tests'.format(dtype)
-            for (tag, tfails) in fails.iteritems():
-                print '  |{} transform errors:'.format(tag)
+            print('::{} precision tests'.format(dtype))
+            for (tag, tfails) in fails.items():
+                print('  |{} transform errors:'.format(tag))
                 for (r0,r1, shape, Einf, Eeps) in sorted(tfails,
                             key=lambda x: -x[-2]):
                     failed |= (Eeps >= self.fail_eps)
                     cnt+=1
                     msg='    {} vs {}:\tshape {}\t->\tE={}\t({} eps)'
                     msg=msg.format(r0, r1, shape, Einf, Eeps)
-                    print msg
-        print '==========================='
+                    print(msg)
+        print('===========================')
 
         if failed:
             msg =''
@@ -687,7 +685,7 @@ class TestFFT(object):
             msg+='\n** One or more test exceeded failure error. **'
             msg+='\n**********************************************'
             msg+='\n'
-            print msg
+            print(msg)
             raise RuntimeError
         else:
             msg =''
@@ -695,7 +693,7 @@ class TestFFT(object):
             msg+='\n** Some tests may have exceeded reporting error. **'
             msg+='\n***************************************************'
             msg+='\n'
-            print msg
+            print(msg)
 
 
     def perform_tests(self):
diff --git a/hysop/old/gpu.old/QtRendering.py b/hysop/old/gpu.old/QtRendering.py
deleted file mode 100644
index 71179be651491ad4890459b0b17af08c02a8461c..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/QtRendering.py
+++ /dev/null
@@ -1,325 +0,0 @@
-"""
-@file QtRendering.py
-
-Contains all stuff to perform real-time rendering on GPU.
-"""
-from hysop.constants import debug, np, HYSOP_REAL
-import sys
-from PyQt4 import QtGui, QtCore
-from PyQt4.QtOpenGL import QGLWidget
-import OpenGL.GL as gl
-from hysop.backend.device.opencl.opencl_tools import get_opengl_shared_environment
-from hysop.backend.device.opencl import cl
-from hysop.backend.device.opencl.opencl_discrete import OpenClDiscreteField
-from hysop.backend.device.opencl.opencl_kernel import OpenClKernelLauncher
-from hysop.core.mpi import main_rank
-from hysop.operator.computational import Computational
-from hysop.tools.numpywrappers import npw
-
-
-class QtOpenGLRendering(Computational):
-    """
-    Monitor that performs the rendering.
-
-    Rendering is handled by OpenGL instructions. Context is shared between
-    OpenGL and OpenCL.
-
-    Vertex Buffer Objects are created on the OpenGL side and bound to OpenCL
-    GLBuffers. VBOs store points coordinates and color that contains
-    respectively X,Y coordinates and RGBA color definition. An OpenCL kernel
-    computes once the coordinates and an other colorize regarding values of
-    a given scalar field.
-
-    Redering is displayed asynchronously. Main tread handle Qt application
-    execution. A secondary thread, a QThread, performs the computations loop.
-    A redrawing singal is emmited to synchronize threads.
-
-    @remark Rendering is implemented only for 2D problems
-    @remark Rendering is implemented only in single precision
-
-    @see http://cyrille.rossant.net/2d-graphics-rendering-tutorial-with-pyopengl/
-    @see http://enja.org/2011/03/22/adventures-in-pyopencl-part-2-particles-with-pyopengl/
-    """
-
-    @debug
-    def __init__(self, field, component=0):
-        """
-        Build an OpenGL rendering object.
-
-        @param field : Scalar field to render.
-
-        Store a QApplication and a QMainWindow objects.
-        """
-        super(QtOpenGLRendering, self)._init__([field],
-                                               frequency=1, name="QtRendering")
-        if not field.dimension == 2:
-            raise ValueError("Rendering implemented in 2D only.")
-        ## Qt application
-        self.app = QtGui.QApplication(sys.argv)
-        ## Visualization window
-        self.window = TestWindow()
-        self.isGLRender = True
-        self.input = [field]
-        self.component = component if field.nb_components > 1 else 0
-        self.output = []
-        self.ctime = 0.
-        self.mtime = 0.
-
-    @debug
-    def setup(self):
-        """
-        Create two VBOs buffers: GL_STATIC_DRAW and GL_COLOR_ARRAY.
-        Create two OpenCL GLBuffers bound to VBOs.
-        Pass buffers to QGLWidget and compile OpenCL kernels.
-        """
-        ## GPU scalar field
-        for df in self.variables[0].discrete_fields.values():
-            if isinstance(df, OpenClDiscreteField):
-                self.gpu_field = df
-        # Create OpenGL VBOs
-        ## VBO for coordinates
-        if self.gpu_field.nb_components > 1:
-            self.pos_vbo = gl.glGenBuffers(1)
-            gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.pos_vbo)
-            gl.glBufferData(gl.GL_ARRAY_BUFFER, self.gpu_field.data[self.component].nbytes * 2,
-                            None, gl.GL_STATIC_DRAW)  # gl.GL_DYNAMIC_DRAW
-            gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
-            gl.glVertexPointer(2, gl.GL_FLOAT, 0, None)
-            ## VBO for color
-            self.color_vbo = gl.glGenBuffers(1)
-            gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.color_vbo)
-            gl.glBufferData(gl.GL_ARRAY_BUFFER, self.gpu_field.data[self.component].nbytes * 4,
-                            None, gl.GL_STREAM_DRAW)  # gl.GL_DYNAMIC_DRAW
-            gl.glEnableClientState(gl.GL_COLOR_ARRAY)
-            gl.glColorPointer(4, gl.GL_FLOAT, 0, None)
-        else:
-            self.pos_vbo = gl.glGenBuffers(1)
-            gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.pos_vbo)
-            gl.glBufferData(gl.GL_ARRAY_BUFFER, self.gpu_field.data[0].nbytes * 2,
-                            None, gl.GL_STATIC_DRAW)  # gl.GL_DYNAMIC_DRAW
-            gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
-            gl.glVertexPointer(2, gl.GL_FLOAT, 0, None)
-            ## VBO for color
-            self.color_vbo = gl.glGenBuffers(1)
-            gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.color_vbo)
-            gl.glBufferData(gl.GL_ARRAY_BUFFER, self.gpu_field.data[0].nbytes * 4,
-                            None, gl.GL_STREAM_DRAW)  # gl.GL_DYNAMIC_DRAW
-            gl.glEnableClientState(gl.GL_COLOR_ARRAY)
-            gl.glColorPointer(4, gl.GL_FLOAT, 0, None)
-
-        # Create OpenCL GLBuffers
-        ## OpenCL GLBuffer for coordinates
-        self.pos = cl.GLBuffer(
-            self.window.widget.cl_env.ctx, cl.mem_flags.READ_WRITE,
-            int(self.pos_vbo))
-        ## OpenCL GLBuffer for color
-        self.color = cl.GLBuffer(
-            self.window.widget.cl_env.ctx, cl.mem_flags.READ_WRITE,
-            int(self.color_vbo))
-        # Pass VBO and GLBuffers to the QGLWidget
-        self.window.widget.setup(gl_buffers=[self.pos, self.color],
-                                 color_vbo=self.color_vbo,
-                                 pos_vbo=self.pos_vbo,
-                                 partNumber=np.prod(
-                self.gpu_field.topology.mesh.local_resolution))
-
-        total_mem_used = self.pos.size + \
-            self.color.size
-        print "Total Device Global Memory used  for rendering: ",
-        print total_mem_used, "Bytes (", total_mem_used / (1024 ** 2), "MB)",
-        print "({0:.3f} %)".format(
-            100 * total_mem_used / (
-                self.window.widget.cl_env.device.global_mem_size * 1.))
-        ## OpenCL kernel binaries
-        self.prg = self.window.widget.cl_env.build_src(
-            'kernels/rendering.cl')
-        ## OpenCL kernel for computing coordinates
-        if self.gpu_field.nb_components > 1:
-            gwi = self.gpu_field.data[self.component].shape
-        else:
-            gwi = self.gpu_field.data[0].shape
-        self.initCoordinates = OpenClKernelLauncher(
-            self.prg.initPointCoordinates, self.window.widget.cl_env.queue,
-            gwi, None)
-        ## OpenCL kernel for computing colors
-        self.numMethod = OpenClKernelLauncher(
-            self.prg.colorize, self.window.widget.cl_env.queue,
-            gwi, None)
-
-        self.window.show()
-        ## Text label of the window StatusBar
-        self.labelText = str(self.gpu_field.topology.mesh.local_resolution)
-        self.labelText += " particles, "
-        coord_min = npw.ones(4)
-        mesh_size = npw.ones(4)
-        coord_min[0:2] = npw.asrealarray(self.gpu_field.topology.mesh.origin)
-        mesh_size[0:2] = npw.asrealarray(self.gpu_field.topology.mesh.space_step)
-        self.initCoordinates(self.pos, coord_min, mesh_size)
-
-    @debug
-    def apply(self, simulation):
-        """
-        Update the color GLBuffer and redraw the QGLWidget.
-        """
-        t = simulation.time
-        dt = simulation.time_step
-        if main_rank == 0:
-            simulation.print_state()
-        # OpenCL update
-        self.numMethod(self.gpu_field.gpu_data[self.component],
-                       self.color)
-        self.window.widget.updateGL()
-        if simulation.current_iteration > 1:
-            self.window.label.setText(
-                self.labelText + "t={0:6.2f}, fps={1:6.2f}".format(
-                    t + dt,
-                    1. / (self.timer.f_timers.values()[0].t - self.ctime)))
-        self.ctime = self.timer.f_timers.values()[0].t
-
-    @debug
-    def finalize(self):
-        """
-        Terminates the thread containing the computations loop.
-        """
-        self.thread.quit()
-        self.color.release()
-        self.pos.release()
-
-        if self.initCoordinates.f_timer is not None:
-            for f_timer in self.initCoordinates.f_timer:
-                self.timer.addFunctionTimer(f_timer)
-        if self.numMethod.f_timer is not None:
-            for f_timer in self.numMethod.f_timer:
-                self.timer.addFunctionTimer(f_timer)
-
-    @debug
-    def startMainLoop(self):
-        """
-        Starts the secondary thread, that handle computation loop, and the main
-        Qt application.
-        """
-        self.thread.start()
-        self.theMainLoop.emit(QtCore.SIGNAL("step()"))
-        self.app.exec_()
-
-    def setMainLoop(self, problem):
-        """
-        Set a secondary QThread to performs problem solve computations.
-        Synchronims between threads is done by signal emmission.
-        @param problem : Problem to set the Qt main loop.
-        """
-        def problem_step():
-            if not problem.simulation.is_over:
-                problem.simulation.print_state()
-                for op in problem.operators:
-                    op.apply(problem.simulation)
-                problem.simulation.advance()
-                self.theMainLoop.emit(QtCore.SIGNAL("step()"))
-        ## Object handling main loop in a secondary thread
-        self.theMainLoop = MainLoop(problem_step)
-        ## Secondary thread
-        self.thread = QtCore.QThread()
-        self.theMainLoop.moveToThread(self.thread)
-        QtCore.QObject.connect(self.theMainLoop,
-                               QtCore.SIGNAL("step()"),
-                               self.theMainLoop.step)
-
-
-class MainLoop(QtCore.QObject):
-    """
-    Object that handle steps of the main computational loop.
-    """
-
-    def __init__(self, function):
-        """
-        Set the step function
-
-        @param function : the step function
-        """
-        super(MainLoop, self).__init__()
-        self.function = function
-
-    def step(self):
-        """Call the step function"""
-        self.function()
-
-
-class TestWindow(QtGui.QMainWindow):
-    """
-    Window definiton.
-
-    This window contains a central widget which is the QGLWidget displaying
-    OpenGL buffers and a StatusBar containing simulation informations.
-    """
-
-    def __init__(self):
-        super(TestWindow, self).__init__()
-        self.widget = GLWidget()
-        self.setGeometry(100, 100, self.widget.width, self.widget.height)
-        self.setCentralWidget(self.widget)
-        self.label = QtGui.QLabel("")
-        self.statusBar().setSizeGripEnabled(False)
-        self.statusBar().addWidget(self.label)
-        self.show()
-
-
-class GLWidget(QGLWidget):
-    """
-    Qt widget that display OpenGL content.
-    """
-    def __init__(self):
-        super(GLWidget, self).__init__()
-        self.gl_objects = None
-        self.color_vbo, self.pos_vbo = None, None
-        self.partNumber = None
-        self.width, self.height = 600, 600
-
-    def setup(self, gl_buffers, color_vbo, pos_vbo, partNumber):
-        """Set up VBOs and GLBuffers"""
-        self.gl_objects = gl_buffers
-        self.color_vbo, self.pos_vbo = color_vbo, pos_vbo
-        self.partNumber = partNumber
-
-    @debug
-    def initializeGL(self):
-        """GL content initialization"""
-        self.buffer = gl.glGenBuffers(1)
-        gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffer)
-        gl.glBufferData(gl.GL_ARRAY_BUFFER, npw.zeros(2),
-                        gl.GL_DYNAMIC_DRAW)
-        self.cl_env = get_opengl_shared_environment(
-            platform_id=0, device_id=0,device_type='gpu',
-            precision=HYSOP_REAL, resolution=None)
-
-    @debug
-    def paintGL(self):
-        """Drawing function"""
-        if not self.gl_objects is None:
-            cl.enqueue_release_gl_objects(self.cl_env.queue, self.gl_objects)
-            self.cl_env.queue.finish()
-            # OpenGL draw
-            gl.glClear(gl.GL_COLOR_BUFFER_BIT)
-
-            gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.color_vbo)
-            gl.glColorPointer(4, gl.GL_FLOAT, 0, None)
-            gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.pos_vbo)
-            gl.glVertexPointer(2, gl.GL_FLOAT, 0, None)
-
-            gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
-            gl.glEnableClientState(gl.GL_COLOR_ARRAY)
-            gl.glDrawArrays(gl.GL_POINTS, 0, self.partNumber)
-            gl.glDisableClientState(gl.GL_COLOR_ARRAY)
-            gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
-
-            gl.glFlush()
-            cl.enqueue_acquire_gl_objects(self.cl_env.queue, self.gl_objects)
-
-    @debug
-    def resizeGL(self, width, height):
-        """Call on resizing the window"""
-        self.width, self.height = width, height
-        gl.glViewport(0, 0, width, height)
-        gl.glMatrixMode(gl.GL_PROJECTION)
-        gl.glLoadIdentity()
-        gl.glOrtho(0, 1, 0, 1, 0, 1)
-
diff --git a/hysop/old/gpu.old/__init__.py b/hysop/old/gpu.old/__init__.py
deleted file mode 100644
index 8100c6042e1580efbffda9c752ade8bd27434272..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/__init__.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""Everything concerning GPU in hysop.
-
-OpenCL sources are located in the cl_src directory and organized as follows
-  - kernels/
-  Contains kernels src
-    - advection.cl
-    - remeshing.cl
-    - advection_and_remeshing.cl
-    - rendering.cl
-  Functions used by kernels
-  - advection/
-    - builtin.cl
-    - builtin_noVec.cl
-  - remeshing/
-    - basic.cl
-    - basic_noVec.cl
-    - private.cl
-    - weights.cl
-    - weights_builtin.cl
-    - weights_noVec.cl
-    - weights_noVec_builtin.cl
-  - common.cl
-
-Some sources are parsed at build to handle several OpenCL features.
-Other sources are generated and optimized at runtime.
-see hysop.gpu.tools.parse_file
-see hysop.backend.device.codegen
-
-"""
-import os
-import pyopencl
-import pyopencl.tools
-import pyopencl.array
-import pyopencl.characterize
-import pyopencl.reduction
-import pyopencl.clrandom
-import pyopencl.elementwise
-import pyopencl.scan
-
-from hysop import __DEFAULT_PLATFORM_ID__, __DEFAULT_DEVICE_ID__, __VERBOSE__
-
-## open cl underlying implementation
-cl = pyopencl
-"""open cl underlying implementation"""
-
-clTools = pyopencl.tools
-"""PyOpencl tools"""
-
-clArray = pyopencl.array
-"""PyOpenCL arrays"""
-
-clRandom = pyopencl.clrandom
-"""PyOpenCL random"""
-
-clReduction = pyopencl.reduction
-"""PyOpenCL reductions"""
-
-clScan = pyopencl.scan
-"""PyOpenCL scan"""
-
-clElementwise = pyopencl.elementwise
-"""PyOpenCL reductions"""
-
-clCharacterize = pyopencl.characterize
-"""PyOpenCL characterize"""
-
-GPU_SRC = os.path.join(__path__[0], "cl_src", '')
-"""Default path to cl kernels source files"""
-
-KERNEL_DUMP_FOLDER='generated_kernels'
-"""Default folder to dump debug opencl kernel sources"""
-
-CL_PROFILE = False
-"""Boolean, true to enable OpenCL profiling events to time computations"""
-
-if __DEFAULT_PLATFORM_ID__ < len(cl.get_platforms()):
-    default_cl_platform = cl.get_platforms()[__DEFAULT_PLATFORM_ID__]
-else:
-    default_cl_platform = cl.get_platforms()[0]
-    msg= 'Warning: default opencl platform not available,'
-    msg+=' switched to platform {}.'.format(default_cl_platform.name)
-    print msg
-
-if __DEFAULT_DEVICE_ID__ < len(default_cl_platform.get_devices()):
-    default_cl_device   = default_cl_platform.get_devices()[__DEFAULT_DEVICE_ID__]
-else:
-    default_cl_device   = default_cl_platform.get_devices()[0]
-    msg= 'Warning: default device not available,'
-    msg+=' switched to device {}.'.format(default_cl_device.name)
-    print msg
-
-default_cl_context  = cl.Context(devices=[default_cl_device])
-default_cl_queue    = cl.CommandQueue(context=default_cl_context, device=default_cl_device)
-
-default_cl_allocator = clTools.ImmediateAllocator(queue=default_cl_queue, 
-                                                    mem_flags=cl.mem_flags.READ_WRITE)
-default_cl_pool = clTools.MemoryPool(default_cl_allocator)
diff --git a/hysop/old/gpu.old/cl_src/advection/basic_rk2.cl b/hysop/old/gpu.old/cl_src/advection/basic_rk2.cl
deleted file mode 100644
index 559638ff61919ef65d8a5e6c6013320f56976495..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/basic_rk2.cl
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * @file advection/basic.cl
- * Advection function, vectorized version, no use of builtins functions.
- */
-
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity cache.
- * @return Particle position.
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float__N__ v,        		/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Intermediary position */
-    c,				/* initial coordinate */
-    hdt = (float__N__)(0.5*dt);	/* half time step */
-  int__N__ i_ind,		/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-  c = (float__N__)((i+__NN__)*mesh->dx.x,
-		   );
-  c = c + mesh->min_position;
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = (float__N__)(velocity_cache[noBC_id(i+__NN__)],
-		   );
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  v = (p*(vp-v) + v);
-#endif
-
-  p = (c + hdt * v) * mesh->v_invdx;
-
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  v = (p*(vp-v) + v);
-
-  return c + dt * v;
-}
diff --git a/hysop/old/gpu.old/cl_src/advection/basic_rk2_noVec.cl b/hysop/old/gpu.old/cl_src/advection/basic_rk2_noVec.cl
deleted file mode 100644
index a635dd9d4d064de1157d853e2758b0f18d1eeb7f..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/basic_rk2_noVec.cl
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * @file advection/basic_noVec.cl
- * Advection function, basic version
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param gvelo Global velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Normalized intermediary position */
-    c = i * mesh->dx.x + mesh->min_position,  /* initial coordinate */
-    hdt = 0.5 * dt;		/* half time step */
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = velocity_cache[noBC_id(i)]; 	/* k = k1 */
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  p = (c + hdt*v) * mesh->v_invdx;
-
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-
-  v = velocity_cache[noBC_id(i_ind)];
-  vp = velocity_cache[noBC_id(i_ind_p)];
-  v = (p*(vp-v) + v);
-
-  return c + dt * v;
-}
-/* Operations number :  */
-/*   - 2 positions = 2 * 2 */
-/*   - 1 iterpolation = 9 */
-/* Total = 13 */
diff --git a/hysop/old/gpu.old/cl_src/advection/basic_rk4.cl b/hysop/old/gpu.old/cl_src/advection/basic_rk4.cl
deleted file mode 100644
index 853873ba47e171acfffef036b37c7eafd72388ac..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/basic_rk4.cl
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * @file basic_rk4.cl
- * Advection function (RK4 scheme), vectorized version, no use of builtins functions.
- */
-
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity cache.
- * @return Particle position.
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float__N__ v,        		/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Intermediary position */
-    k,				/* rk averaged velocity */
-    kn,				/* rk intermediate velocity */
-    c,				/* initial coordinate */
-    hdt = (float__N__)(0.5*dt);	/* half time step */
-  int__N__ i_ind,		/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-  c = (float__N__)((i+__NN__)*mesh->dx.x,
-		   );
-  c = c + mesh->min_position;
-
-  //k1 = f(t,y)
-  //k2 = f(t + dt/2, y + dt/2 * k1)
-  //k3 = f(t + dt/2, y + dt/2 * k2)
-  //k4 = f(t + dt, y + dt * k3)
-  //result = y + dt/6( k1 + 2 * k2 + 2 * k3 + k4)
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  k = (float__N__)(velocity_cache[noBC_id(i+__NN__)],
-		   );
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  k = p*(vp-v) + v;
-#endif
-
-  p = (c + hdt * k) * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = p*(vp-v) + v;
-
-  k += 2.0 * kn;
-
-  p = (c + hdt * kn) * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = p*(vp-v) + v;
-
-  k += 2.0 * kn;
-
-  p = (c + dt * kn) * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = p*(vp-v) + v;
-
-  k += kn;
-
-
-  return c + (float__N__)(dt *0.16666666666666666) * k;
-}
diff --git a/hysop/old/gpu.old/cl_src/advection/basic_rk4_noVec.cl b/hysop/old/gpu.old/cl_src/advection/basic_rk4_noVec.cl
deleted file mode 100644
index 7b7d4b2752c7611d3c58c0eddfd08ca78fdc58a6..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/basic_rk4_noVec.cl
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * @file basic_rk4_noVec.cl
- * Advection function (RK4 scheme), basic version
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Intermediary position */
-    k,				/* rk averaged velocity */
-    kn,				/* rk intermediate velocity */
-    c = i * mesh->dx.x + mesh->min_position,  /* initial coordinate */
-    hdt = 0.5 * dt;		/* half time step */
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-  //k1 = f(t,y)
-  //k2 = f(t + dt/2, y + dt/2 * k1)
-  //k3 = f(t + dt/2, y + dt/2 * k2)
-  //k4 = f(t + dt, y + dt * k3)
-  //result = y + dt/6( k1 + 2 * k2 + 2 * k3 + k4)
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  k = velocity_cache[noBC_id(i)]; 	/* k = k1 */
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  k = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  p = (c + hdt * k) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = velocity_cache[noBC_id(i_ind)];
-  vp = velocity_cache[noBC_id(i_ind_p)];
-  kn = p*(vp-v) + v;		/* kn = k2 */
-
-  k += 2.0 * kn;		/* k = k1 + 2*k2 */
-
-  p = (c + hdt * kn) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = velocity_cache[noBC_id(i_ind)];
-  vp = velocity_cache[noBC_id(i_ind_p)];
-  kn = p*(vp-v) + v;		/* kn = k3 */
-
-  k += 2.0 * kn;		/* k = k1 + 2*k2 + 2*k3 */
-
-  p = (c + dt * kn) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = velocity_cache[noBC_id(i_ind)];
-  vp = velocity_cache[noBC_id(i_ind_p)];
-  kn = p*(vp-v) + v;		/* kn = k4 */
-
-  k += kn;			/* k = k1 + 2*k2 + 2*k3 + k4 */
-
-  return c + dt * k*0.16666666666666666;
-}
-/* Operations number :  */
-/*   - 4 positions = 4 * 2 + 3 */
-/*   - 3 iterpolation = 3 * 9 */
-/*   - velocity weights = 5*/
-/* Total = 41 */
diff --git a/hysop/old/gpu.old/cl_src/advection/builtin_euler_noVec.cl b/hysop/old/gpu.old/cl_src/advection/builtin_euler_noVec.cl
deleted file mode 100644
index aecb5ff383230c6c454733a3ebb564acba7c7e7f..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/builtin_euler_noVec.cl
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * @file builtin_noVec.cl
- * Advection function, basic version
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    c = fma(i, mesh->dx.x, mesh->min_position);	/* initial coordinate */
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = velocity_cache[noBC_id(i)];
-#else
-  float p;
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  return fma(dt, v, c);
-}
-/* Operations number :  */
-/*   - 3 positions = 3 * fma */
-/*   - 1 iterpolation = 2 + 1 * mix */
-/*   - dt/2 = 1 */
-/* 1mix <=> 3flop : mix(x,y,a) = x+(y-x)*a */
-/* Total = 3 fma + 1 mix + 3 = 12flop */
diff --git a/hysop/old/gpu.old/cl_src/advection/builtin_rk2.cl b/hysop/old/gpu.old/cl_src/advection/builtin_rk2.cl
deleted file mode 100644
index c45d5cee4b78861d35ecb891efc51a3116bbc1af..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/builtin_rk2.cl
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * @file builtin.cl
- * Advection function, vectorized version.
- */
-
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity cache.
- * @return Particle position.
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float__N__ v,        		/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Intermediary position */
-    c,				/* initial coordinate */
-    hdt = (float__N__)(0.5*dt);	/* half time step */
-  int__N__ i_ind,		/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-  c = (float__N__)((i+__NN__)*mesh->dx.x,
-		       );
-  c = c + mesh->min_position;
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = (float__N__)(velocity_cache[noBC_id(i+__NN__)],
-		   );
-  p = fma(hdt, v, c) * mesh->v_invdx;
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  p = fma(hdt, mix(v,vp,p), c) * v_invdx;
-#endif
-
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  return fma(mix(v,vp,p),dt,c);
-}
diff --git a/hysop/old/gpu.old/cl_src/advection/builtin_rk2_noVec.cl b/hysop/old/gpu.old/cl_src/advection/builtin_rk2_noVec.cl
deleted file mode 100644
index 415eec1d1b257836c576b6928015c5dabb2c41a2..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/builtin_rk2_noVec.cl
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * @file builtin_noVec.cl
- * Advection function, basic version
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    p,				/* Intermediary position */
-    c = i*mesh->dx.x, //fma(i, mesh->dx.x, mesh->min_position),	/* initial coordinate */
-    hdt = 0.5 * dt;		/* half time step */
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = velocity_cache[noBC_id(i)];
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  p = fma(hdt, v, c) * mesh->v_invdx;
-
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-
-  return fma(dt, v, c) + mesh->min_position;
-}
-/* Operations number :  */
-/*   - 3 positions = 3 * fma */
-/*   - 1 iterpolation = 2 + 1 * mix */
-/*   - dt/2 = 1 */
-/* 1mix <=> 3flop : mix(x,y,a) = x+(y-x)*a */
-/* Total = 3 fma + 1 mix + 3 = 12flop */
diff --git a/hysop/old/gpu.old/cl_src/advection/builtin_rk4.cl b/hysop/old/gpu.old/cl_src/advection/builtin_rk4.cl
deleted file mode 100644
index 2dcc7dc1e77817fa753a0cc48a15929f03e360f9..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/builtin_rk4.cl
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * @file builtin_rk4.cl
- * Advection function, vectorized version.
- */
-
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity cache.
- * @return Particle position.
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-float__N__ advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float__N__ v,        		/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Intermediary position */
-    k,				/* rk averaged velocity */
-    kn,				/* rk intermediate velocity */
-    c,				/* initial coordinate */
-    hdt = (float__N__)(0.5*dt);	/* half time step */
-  int__N__ i_ind,		/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-  c = (float__N__)((i+__NN__)*mesh->dx.x,
-		       );
-  c = c + mesh->min_position;
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  v = (float__N__)(velocity_cache[noBC_id(i+__NN__)],
-		   );
-  p = fma(hdt, v, c) * mesh->v_invdx;
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  p = fma(hdt, mix(v,vp,p), c) * mesh->v_invdx;
-#endif
-
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = mix(v,vp,p);
-
-  k += 2.0 * kn;
-
-  p = fma(hdt, kn, c) * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = mix(v,vp,p);
-
-  k += 2.0 * kn;
-
-  p = fma((float__N__)(dt), kn, c) * mesh->v_invdx;
-  i_ind = convert_int__N___rtn(p);
-  p = p - convert_float__N__(i_ind);
-
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  v = (float__N__)(velocity_cache[noBC_id(i_ind.s__NN__)],
-		   );
-  vp = (float__N__)(velocity_cache[noBC_id(i_ind_p.s__NN__)],
-		    );
-  kn = mix(v,vp,p);
-
-  k += kn;
-
-
-  return fma(k,(float__N__)(dt*0.16666666666666666),c);
-}
diff --git a/hysop/old/gpu.old/cl_src/advection/builtin_rk4_noVec.cl b/hysop/old/gpu.old/cl_src/advection/builtin_rk4_noVec.cl
deleted file mode 100644
index 170fa90c81026c26b8e61396a0e14c8b471d4d91..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/builtin_rk4_noVec.cl
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * @file builtin_rk4_noVec.cl
- * Advection function, basic version
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float p,		       /* Intermediary position */
-    k,			       /* rk averaged velocity */
-    kn,			       /* rk intermediate velocity */
-    c = fma(i, mesh->dx.x, mesh->min_position), /* initial coordinate */
-    hdt = 0.5 * dt;	       /* half time step */
-  int i_ind,		       /* Interpolation left point */
-    i_ind_p;		       /* Interpolation right point */
-
-  //k1 = f(t,y)
-  //k2 = f(t + dt/2, y + dt/2 * k1)
-  //k3 = f(t + dt/2, y + dt/2 * k2)
-  //k4 = f(t + dt, y + dt * k3)
-  //result = y + dt/6( k1 + 2 * k2 + 2 * k3 + k4)
-
-#if !(ADVEC_IS_MULTISCALE)
-  // single-scale:
-  k = velocity_cache[noBC_id(i)]; 	/* k = k1 */
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind + V_GHOSTS_NB;
-  i_ind_p = i_ind + 1;
-  k = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  p = fma(hdt, k, c) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  kn = mix(velocity_cache[noBC_id(i_ind)],
-	   velocity_cache[noBC_id(i_ind_p)],p);		/* kn = k2 */
-
-  k += 2.0 * kn;		/* k = k1 + 2*k2 */
-
-  p = fma(hdt, kn, c) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  kn = mix(velocity_cache[noBC_id(i_ind)],
-	   velocity_cache[noBC_id(i_ind_p)],p);		/* kn = k3 */
-
-  k += 2.0 * kn;		/* k = k1 + 2*k2 + 2*k3 */
-
-  p = fma(dt, kn, c) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = ((i_ind + V_GHOSTS_NB + V_NB_I) % V_NB_I);
-  i_ind_p = ((i_ind + 1) % V_NB_I);
-  kn = mix(velocity_cache[noBC_id(i_ind)],
-	   velocity_cache[noBC_id(i_ind_p)],p);		/* kn = k4 */
-
-  k += kn;			/* k = k1 + 2*k2 + 2*k3 + k4 */
-
-  return fma(k, dt*0.16666666666666666, c);
-}
-
-/* Operations number :  */
-/*   - 5 positions = 5 * fma*/
-/*   - 3 iterpolation = 3 * (1 * mix + 2) */
-/*   - velocity weights = 7 */
-/*   - dt/2, dt/6 = 2 */
-/* Total = 5 fma + 3 mix + 13 = 32flop */
diff --git a/hysop/old/gpu.old/cl_src/advection/comm_basic_rk2_noVec.cl b/hysop/old/gpu.old/cl_src/advection/comm_basic_rk2_noVec.cl
deleted file mode 100644
index 037d19052fe5b1627b36080d852a9100cbd024bb..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/comm_basic_rk2_noVec.cl
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * @file advection/comm_basic_noVec.cl
- * Advection function, basic version, mpi communications on the host side
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index.
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark T_NB_I: global points number in the 1st direction (mpi cutted direction)
- * @remark START_INDEX Global staring index for computational points
- * @remark STOP_INDEX Global stop index for computational points
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    vp,				/* Velocity at right point */
-    p,				/* Normalized intermediary position */
-    c = i * mesh->dx.x + mesh->min_position, /* initial coordinate */
-    hdt = 0.5 * dt;		/* half time step */
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-#if (V_NB_I-2*V_GHOSTS_NB) == NB_I
-  // single-scale:
-  v = velocity_cache[noBC_id(i + V_GHOSTS_NB)]; 	/* k = k1 */
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB);
-  i_ind_p = i_ind + 1;
-  v = mix(velocity_cache[noBC_id(i_ind)],
-	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-  p = (c + hdt*v) * mesh->v_invdx;
-
-  i_ind = convert_int_rtn(p);
-  if( i_ind>=(V_START_INDEX-MS_INTERPOL_SHIFT) && i_ind < (V_STOP_INDEX-V_GHOSTS_NB))
-    {
-      p = p - convert_float(i_ind);
-
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB);
-      i_ind_p = i_ind + 1;
-
-      v = velocity_cache[noBC_id(i_ind)];
-      vp = velocity_cache[noBC_id(i_ind_p)];
-      v = (p*(vp-v) + v);
-
-      p = c + dt * v;
-    }
-  else
-    {
-      p = (1000*T_NB_I)*1.0 + p;
-    }
-
-  return p;
-}
-/* Operations number :  */
-/*   - 2 positions = 2 * 2 */
-/*   - 1 iterpolation = 9 */
-/* Total = 13 */
diff --git a/hysop/old/gpu.old/cl_src/advection/comm_builtin_rk2_noVec.cl b/hysop/old/gpu.old/cl_src/advection/comm_builtin_rk2_noVec.cl
deleted file mode 100644
index a9a717f8088de07317e5eac2c46d787442880f0f..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/comm_builtin_rk2_noVec.cl
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * @file comm_builtin_noVec.cl
- * Advection function, basic version, mpi communications on the host side
- */
-
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Compute the position of a particle with a RK2 integration scheme. Velocity is linearly interpolated from the global field.
- * Use of builtin OpenCL functions fma and mix.
- *
- * @param i Particle index (without velocity ghosts considering).
- * @param dt Time step.
- * @param dx Space step.
- * @param invdx 1/dx.
- * @param velocity_cache Local velocity field.
- * @return Particle position
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark T_NB_I: global points number in the 1st direction (mpi cutted direction)
- * @remark START_INDEX Global staring index for computational points
- * @remark STOP_INDEX Global stop index for computational points
- */
-float advection(uint i, float dt, __local float* velocity_cache, __constant struct AdvectionMeshInfo* mesh)
-{
-  float v, 			/* Velocity at point */
-    p,				/* Intermediary position */
-    c = i * dx + min_position,  /* initial coordinate */
-    hdt = 0.5 * dt;		/* half time step */
-  int i_ind,			/* Interpolation left point */
-    i_ind_p;			/* Interpolation right point */
-
-#if (V_NB_I-2*V_GHOSTS_NB) == NB_I
-  // single scale:
-  v = velocity_cache[noBC_id(i + V_GHOSTS_NB)];
-#else
-  // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-  p = c * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  p = p - convert_float(i_ind);
-  i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB);
-  i_ind_p = i_ind + 1;
-  v = mix(velocity_cache[noBC_id(i_ind)],
-  	  velocity_cache[noBC_id(i_ind_p)],p);
-#endif
-
-  p = fma(hdt, v, c) * mesh->v_invdx;
-  i_ind = convert_int_rtn(p);
-  if( i_ind>=(V_START_INDEX-MS_INTERPOL_SHIFT) && i_ind < (V_STOP_INDEX-V_GHOSTS_NB))
-    {
-      p = p - convert_float(i_ind);
-
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB);
-      i_ind_p = i_ind + 1;
-
-      v = mix(velocity_cache[noBC_id(i_ind)],
-      	      velocity_cache[noBC_id(i_ind_p)],p);
-
-      p = fma(dt, v, c);
-    }
-  else
-    {
-      p = (1000*T_NB_I)*1.0 + p;
-    }
-
-  return p;
-}
-/* Operations number :  */
-/*   - 2 positions = 2 * fma */
-/*   - 1 iterpolation = 6 + 1 * mix */
-/* Total = 2 fma + 1 mix + 6 */
diff --git a/hysop/old/gpu.old/cl_src/advection/velocity_cache.cl b/hysop/old/gpu.old/cl_src/advection/velocity_cache.cl
deleted file mode 100644
index 6fd4ae6e6046ca8d3db8c2472c18b0dcd7483ad9..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/velocity_cache.cl
+++ /dev/null
@@ -1,217 +0,0 @@
-void fill_velocity_cache(__global const float* gvelo,
-			 uint gidX, uint gidY, uint gidZ,
-			 __local float* gvelo_loc,
-#if ADVEC_IS_MULTISCALE
-			 float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			 __constant struct AdvectionMeshInfo* mesh);
-
-void fill_velocity_cache(__global const float* gvelo,
-			 uint gidX, uint gidY, uint gidZ,
-			 __local float* velocity_cache,
-#if ADVEC_IS_MULTISCALE
-			 float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			 __constant struct AdvectionMeshInfo* mesh)
-{
-  uint i;
-  float__N__ v;
-#if !(ADVEC_IS_MULTISCALE)
-  // Single scale : Velocity and scalar grids are identical : cache is just read from global
-  uint line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II; /* Current 1D problem index */
-  for(i=gidX*__N__; i<V_NB_I; i+=(WI_NB*__N__))
-    {
-      /* Read velocity */
-      v = vload__N__((i+line_index)/__N__, gvelo);
-      /* Fill the cache */
-      velocity_cache[noBC_id(i+__NN__)] = v.s__NN__;
-    }
-#else
-  // Multi-scale: Velocity cache is interpolated from global
-
-#if NB_III == 1
-  // 2D case
-
-
-  float line_posY, hY;
-  int indY;
-#if MS_FORMULA == MS_LINEAR
-  int2 v_line_index;
-  float2 wY;
-#elif MS_FORMULA == MS_L2_1
-  int4 v_line_index;
-  float4 wY;
-#elif MS_FORMULA == MS_L4_2 ||  MS_FORMULA == MS_L4_4
-  // Only the 6 first elements will be used
-  int8 v_line_index;
-  float8 wY;
-#endif
-
-  line_posY = (gidY * mesh->dx.y) * inv_v_dx_y;// mesh->v_dx.y;
-  indY = convert_int_rtn(line_posY);
-  hY = line_posY - convert_float(indY);
-
-#if MS_FORMULA == MS_LINEAR
-  wY.s1 = hY;
-  wY.s0 = 1.0 - wY.s1;
-#else
-  wY.s0 = MS_INTERPOL(alpha)(hY);
-  wY.s1 = MS_INTERPOL(beta)(hY);
-  wY.s2 = MS_INTERPOL(gamma)(hY);
-#if MS_INTERPOL_SHIFT > 1
-  wY.s3 = MS_INTERPOL(delta)(hY);
-  wY.s4 = MS_INTERPOL(eta)(hY);
-  wY.s5 = 1.0 - wY.s0 - wY.s1 - wY.s2 - wY.s3 - wY.s4;
-#else
-  wY.s3 = 1.0 - wY.s0 - wY.s1 - wY.s2;
-#endif
-#endif
-
-  indY = indY + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
-
-  v_line_index.s0 = indY * V_NB_I;
-  v_line_index.s1 = (indY + 1) * V_NB_I;
-#if MS_INTERPOL_SHIFT > 0
-  v_line_index.s2 = (indY + 2) * V_NB_I;
-  v_line_index.s3 = (indY + 3) * V_NB_I;
-#elif MS_INTERPOL_SHIFT > 1
-  v_line_index.s4 = (indY + 4) * V_NB_I;
-  v_line_index.s5 = (indY + 5) * V_NB_I;
-#endif
-
-  for(i=gidX*__N__; i<V_NB_I; i+=(WI_NB*__N__))
-    {
-    gvelo_loc[noBC_id(i)] = wY.s0 * gvelo[i + v_line_index.s0];
-    gvelo_loc[noBC_id(i)] += wY.s1 * gvelo[i + v_line_index.s1];
-#if MS_INTERPOL_SHIFT > 0
-    gvelo_loc[noBC_id(i)] += wY.s2 * gvelo[i + v_line_index.s2];
-    gvelo_loc[noBC_id(i)] += wY.s3 * gvelo[i + v_line_index.s3];
-#elif MS_INTERPOL_SHIFT > 1
-    gvelo_loc[noBC_id(i)] += wY.s4 * gvelo[i + v_line_index.s4];
-    gvelo_loc[noBC_id(i)] += wY.s5 * gvelo[i + v_line_index.s5];
-#endif
-    }
-
-#else
-  // 3D case
-
-
-  float line_posY, hY;
-  float line_posZ, hZ;
-  int indY, indZ;
-#if MS_FORMULA == MS_LINEAR
-  int2 v_line_indexY, v_line_indexZ;
-  float2 wY, wZ;
-#elif MS_FORMULA == MS_L2_1
-  int4 v_line_indexY, v_line_indexZ;
-  float4 wY, wZ;
-#elif MS_FORMULA == MS_L4_2 || MS_FORMULA == MS_L4_4
-  int8 v_line_indexY, v_line_indexZ;
-  float8 wY, wZ;
-#endif
-
-  line_posY = (gidY * mesh->dx.y) * inv_v_dx_y;// mesh->v_dx.y;
-  line_posZ = (gidZ * mesh->dx.z) * inv_v_dx_z;// mesh->v_dx.z;
-  indY = convert_int_rtn(line_posY);
-  indZ = convert_int_rtn(line_posZ);
-  hY = line_posY - convert_float(indY);
-  hZ = line_posZ - convert_float(indZ);
-
-#if MS_FORMULA == MS_LINEAR
-  wY.s1 = hY;
-  wY.s0 = 1.0 - wY.s1;
-  wZ.s1 = hZ;
-  wZ.s0 = 1.0 - wZ.s1;
-#else
-  wY.s0 = MS_INTERPOL(alpha)(hY);
-  wY.s1 = MS_INTERPOL(beta)(hY);
-  wY.s2 = MS_INTERPOL(gamma)(hY);
-  wZ.s0 = MS_INTERPOL(alpha)(hZ);
-  wZ.s1 = MS_INTERPOL(beta)(hZ);
-  wZ.s2 = MS_INTERPOL(gamma)(hZ);
-#if MS_INTERPOL_SHIFT > 1
-  wY.s3 = MS_INTERPOL(delta)(hY);
-  wY.s4 = MS_INTERPOL(eta)(hY);
-  wY.s5 = 1.0 - wY.s0 - wY.s1 - wY.s2 - wY.s3 - wY.s4;
-  wZ.s3 = MS_INTERPOL(delta)(hZ);
-  wZ.s4 = MS_INTERPOL(eta)(hZ);
-  wZ.s5 = 1.0 - wZ.s0 - wZ.s1 - wZ.s2 - wZ.s3 - wZ.s4;
-#else
-  wY.s3 = 1.0 - wY.s0 - wY.s1 - wY.s2;
-  wZ.s3 = 1.0 - wZ.s0 - wZ.s1 - wZ.s2;
-#endif
-#endif
-
- indY = indY + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
- indZ = indZ + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
-
-  v_line_indexY.s0 = indY * V_NB_I;
-  v_line_indexY.s1 = (indY + 1) * V_NB_I;
-  v_line_indexZ.s0 = indZ * V_NB_I * V_NB_II;
-  v_line_indexZ.s1 = (indZ + 1) * V_NB_I * V_NB_II;
-#if MS_INTERPOL_SHIFT > 0
-  v_line_indexY.s2 = (indY + 2) * V_NB_I;
-  v_line_indexY.s3 = (indY + 3) * V_NB_I;
-  v_line_indexZ.s2 = (indZ + 2) * V_NB_I * V_NB_II;
-  v_line_indexZ.s3 = (indZ + 3) * V_NB_I * V_NB_II;
-#elif MS_INTERPOL_SHIFT > 1
-  v_line_indexY.s4 = (indY + 4) * V_NB_I;
-  v_line_indexY.s5 = (indY + 5) * V_NB_I;
-  v_line_indexZ.s4 = (indZ + 4) * V_NB_I * V_NB_II;
-  v_line_indexZ.s5 = (indZ + 5) * V_NB_I * V_NB_II;
-#endif
-
-
-  for(i=gidX*__N__; i<V_NB_I; i+=(WI_NB*__N__))
-    {
-    gvelo_loc[noBC_id(i)] = wY.s0 * wZ.s0 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s1 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s0 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s1 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s1];
-#if MS_INTERPOL_SHIFT > 0
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s2 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s3 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s2 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s3 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s0 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s1 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s2 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s3 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s0 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s1 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s2 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s3 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s3];
-#elif MS_INTERPOL_SHIFT > 1
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s4 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s5 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s4 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s5 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s4 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s5 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s4 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s5 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s0 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s1 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s2 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s3 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s3];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s4 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s5 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s0 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s1 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s2 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s3 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s3];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s4 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s5 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s5];
-#endif
-    }
-#endif
-#endif
-}
diff --git a/hysop/old/gpu.old/cl_src/advection/velocity_cache_noVec.cl b/hysop/old/gpu.old/cl_src/advection/velocity_cache_noVec.cl
deleted file mode 100644
index 1677b2546e42b36ebc28dacf7288989461c9c559..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/advection/velocity_cache_noVec.cl
+++ /dev/null
@@ -1,231 +0,0 @@
-void fill_velocity_cache(__global const float* gvelo,
-			 uint gidX, uint gidY, uint gidZ,
-			 __local float* gvelo_loc,
-#if ADVEC_IS_MULTISCALE
-			 float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			 __constant struct AdvectionMeshInfo* mesh);
-
-void fill_velocity_cache(__global const float* gvelo,
-			 uint gidX, uint gidY, uint gidZ,
-			 __local float* gvelo_loc,
-#if ADVEC_IS_MULTISCALE
-			 float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			 __constant struct AdvectionMeshInfo* mesh)
-{
-  uint i;
-
-  // ********************************
-  // **    Single Scale
-  // ********************************
-#if !(ADVEC_IS_MULTISCALE)
-  // Single scale : Velocity and scalar grids are identical : cache is just read from global
-  uint line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II; /* Current 1D problem index */
-  for(i=gidX; i<V_NB_I; i+=(WI_NB))
-    {
-      /* Read velocity */
-      /* Fill velocity cache */
-      gvelo_loc[noBC_id(i)] = gvelo[i+line_index];
-    }
-
-  // ********************************
-  // **    Multi-Scale
-  // ********************************
-  // Velocity cache is interpolated from global memory
-#else
-
-
-#if NB_III == 1
-  //  Multi-Scale (2D)
-
-  float line_posY, hY;
-  int indY;
-#if MS_FORMULA == LINEAR
-  int2 v_line_index;
-  float2 wY;
-#elif MS_FORMULA == L2_1
-  int4 v_line_index;
-  float4 wY;
-#elif MS_FORMULA == L4_2 ||  MS_FORMULA == L4_4
-  // Only the 6 first elements will be used
-  int8 v_line_index;
-  float8 wY;
-#endif
-
-  line_posY = (gidY * mesh->dx.y) * inv_v_dx_y; // mesh->v_dx.y;
-  indY = convert_int_rtn(line_posY);
-  hY = line_posY - convert_float(indY);
-
-
-#if MS_FORMULA == LINEAR
-  wY.s1 = hY;
-  wY.s0 = 1.0 - wY.s1;
-#else
-  wY.s0 = MS_INTERPOL(alpha)(hY);
-  wY.s1 = MS_INTERPOL(beta)(hY);
-  wY.s2 = MS_INTERPOL(gamma)(hY);
-#if MS_INTERPOL_SHIFT > 1
-  wY.s3 = MS_INTERPOL(delta)(hY);
-  wY.s4 = MS_INTERPOL(eta)(hY);
-  wY.s5 = 1.0 - wY.s0 - wY.s1 - wY.s2 - wY.s3 - wY.s4;
-#else
-  wY.s3 = 1.0 - wY.s0 - wY.s1 - wY.s2;
-#endif
-#endif
-
-  indY = indY + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
-
-  v_line_index.s0 = indY * V_NB_I;
-  v_line_index.s1 = (indY + 1) * V_NB_I;
-#if MS_INTERPOL_SHIFT > 0
-  v_line_index.s2 = (indY + 2) * V_NB_I;
-  v_line_index.s3 = (indY + 3) * V_NB_I;
-#elif MS_INTERPOL_SHIFT > 1
-  v_line_index.s4 = (indY + 4) * V_NB_I;
-  v_line_index.s5 = (indY + 5) * V_NB_I;
-#endif
-
-
-  for(i=gidX; i<V_NB_I; i+=(WI_NB)){
-    gvelo_loc[noBC_id(i)] = wY.s0 * gvelo[i + v_line_index.s0];
-    gvelo_loc[noBC_id(i)] += wY.s1 * gvelo[i + v_line_index.s1];
-#if MS_INTERPOL_SHIFT > 0
-    gvelo_loc[noBC_id(i)] += wY.s2 * gvelo[i + v_line_index.s2];
-    gvelo_loc[noBC_id(i)] += wY.s3 * gvelo[i + v_line_index.s3];
-#elif MS_INTERPOL_SHIFT > 1
-    gvelo_loc[noBC_id(i)] += wY.s4 * gvelo[i + v_line_index.s4];
-    gvelo_loc[noBC_id(i)] += wY.s5 * gvelo[i + v_line_index.s5];
-#endif
-  }
-			 /* nombre d'opérations 2D Linéaire:
-			    - calcul des poids de ligne : 4flop (par wi)
-			    - calcul de la vitesse : 3flop par point de grille de vitesse
-			 */
-
-
-#else
-  //  Multi-Scale (3D)
-
-  float line_posY, hY;
-  float line_posZ, hZ;
-  int indY, indZ;
-#if MS_FORMULA == LINEAR
-  int2 v_line_indexY, v_line_indexZ;
-  float2 wY, wZ;
-#elif MS_FORMULA == L2_1
-  int4 v_line_indexY, v_line_indexZ;
-  float4 wY, wZ;
-#elif MS_FORMULA == L4_2 || MS_FORMULA == L4_4
-  int8 v_line_indexY, v_line_indexZ;
-  float8 wY, wZ;
-#endif
-
-  line_posY = (gidY * mesh->dx.y) * inv_v_dx_y; // mesh->v_dx.y;
-  line_posZ = (gidZ * mesh->dx.z) * inv_v_dx_z;// mesh->v_dx.z;
-  indY = convert_int_rtn(line_posY);
-  indZ = convert_int_rtn(line_posZ);
-  hY = line_posY - convert_float(indY);
-  hZ = line_posZ - convert_float(indZ);
-
-#if MS_FORMULA == LINEAR
-  wY.s1 = hY;
-  wY.s0 = 1.0 - wY.s1;
-  wZ.s1 = hZ;
-  wZ.s0 = 1.0 - wZ.s1;
-#else
-  wY.s0 = MS_INTERPOL(alpha)(hY);
-  wY.s1 = MS_INTERPOL(beta)(hY);
-  wY.s2 = MS_INTERPOL(gamma)(hY);
-  wZ.s0 = MS_INTERPOL(alpha)(hZ);
-  wZ.s1 = MS_INTERPOL(beta)(hZ);
-  wZ.s2 = MS_INTERPOL(gamma)(hZ);
-#if MS_INTERPOL_SHIFT > 1
-  wY.s3 = MS_INTERPOL(delta)(hY);
-  wY.s4 = MS_INTERPOL(eta)(hY);
-  wY.s5 = 1.0 - wY.s0 - wY.s1 - wY.s2 - wY.s3 - wY.s4;
-  wZ.s3 = MS_INTERPOL(delta)(hZ);
-  wZ.s4 = MS_INTERPOL(eta)(hZ);
-  wZ.s5 = 1.0 - wZ.s0 - wZ.s1 - wZ.s2 - wZ.s3 - wZ.s4;
-#else
-  wY.s3 = 1.0 - wY.s0 - wY.s1 - wY.s2;
-  wZ.s3 = 1.0 - wZ.s0 - wZ.s1 - wZ.s2;
-#endif
-#endif
-
-  indY = indY + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
-  indZ = indZ + V_GHOSTS_NB - MS_INTERPOL_SHIFT;
-
-  v_line_indexY.s0 = indY * V_NB_I;
-  v_line_indexY.s1 = (indY + 1) * V_NB_I;
-  v_line_indexZ.s0 = indZ * V_NB_I * V_NB_II;
-  v_line_indexZ.s1 = (indZ + 1) * V_NB_I * V_NB_II;
-#if MS_INTERPOL_SHIFT > 0
-  v_line_indexY.s2 = (indY + 2) * V_NB_I;
-  v_line_indexY.s3 = (indY + 3) * V_NB_I;
-  v_line_indexZ.s2 = (indZ + 2) * V_NB_I * V_NB_II;
-  v_line_indexZ.s3 = (indZ + 3) * V_NB_I * V_NB_II;
-#elif MS_INTERPOL_SHIFT > 1
-  v_line_indexY.s4 = (indY + 4) * V_NB_I;
-  v_line_indexY.s5 = (indY + 5) * V_NB_I;
-  v_line_indexZ.s4 = (indZ + 4) * V_NB_I * V_NB_II;
-  v_line_indexZ.s5 = (indZ + 5) * V_NB_I * V_NB_II;
-#endif
-
-  for(i=gidX; i<V_NB_I; i+=(WI_NB)){
-    gvelo_loc[noBC_id(i)] = wY.s0 * wZ.s0 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s1 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s0 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s1 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s1];
-#if MS_INTERPOL_SHIFT > 0
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s2 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s3 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s2 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s3 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s0 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s1 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s2 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s3 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s3];
-
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s0 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s1 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s2 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s3 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s3];
-#elif MS_INTERPOL_SHIFT > 1
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s4 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s0 * wZ.s5 * gvelo[i + v_line_indexY.s0 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s4 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s1 * wZ.s5 * gvelo[i + v_line_indexY.s1 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s4 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s2 * wZ.s5 * gvelo[i + v_line_indexY.s2 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s4 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s3 * wZ.s5 * gvelo[i + v_line_indexY.s3 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s0 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s1 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s2 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s3 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s3];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s4 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s4 * wZ.s5 * gvelo[i + v_line_indexY.s4 + v_line_indexZ.s5];
-
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s0 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s0];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s1 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s1];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s2 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s2];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s3 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s3];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s4 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s4];
-    gvelo_loc[noBC_id(i)] += wY.s5 * wZ.s5 * gvelo[i + v_line_indexY.s5 + v_line_indexZ.s5];
-#endif
-  }
-			 /* nombre d'opérations 3D Linéaire:
-			    - calcul des poids de ligne : 8flop (par wi)
-			    - calcul de la vitesse : 11flop par point de grille de vitesse
-			 */
-
-#endif
-#endif
-}
diff --git a/hysop/old/gpu.old/cl_src/common.cl b/hysop/old/gpu.old/cl_src/common.cl
deleted file mode 100644
index 4f67d2aa000011e2dc5fe8b875e5c5521107a283..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/common.cl
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * @file common.cl
- * Common parameters for advection and remeshing kernels.
- */
-
-inline uint noBC_id(int id);
-
-#ifdef WITH_NOBC
-/**
- * Mapping to local memory arrays to avoir banck conflics.
- * 1D buffer is taken as 2D one with wor-items vs. particles.
- *
- * @param id 1D index
- *
- * @return 2D index
- */
-inline uint noBC_id(int id){
-  return (id%PART_NB_PER_WI)*WI_NB+(id/PART_NB_PER_WI);
-}
-#else
-/**
- * Leave mapping unchanged, 1D.
- *
- * @param id 1D index
- *
- * @return 1D index
- */
-inline uint noBC_id(int id){
-  return id;
-}
-#endif
-
-/**
- * Constants for remeshing formulas:
- *   - L2_1 1
- *   - L2_2 2
- *   - L2_3 3
- *   - L2_4 4
- *   - L4_2 5
- *   - L4_3 6
- *   - L4_4 7
- *   - L6_3 8
- *   - L6_4 9
- *   - L6_5 10
- *   - L6_6 11
- *   - L8_4 12
- *   - M8PRIME 13
- */
-#define L2_1 1
-#define L2_2 2
-#define L2_3 3
-#define L2_4 4
-#define L4_2 5
-#define L4_3 6
-#define L4_4 7
-#define L6_3 8
-#define L6_4 9
-#define L6_5 10
-#define L6_6 11
-#define L8_4 12
-#define M8PRIME 13
-#define LINEAR 14
-
-/**
- * Remeshing configuration
- */
-#if FORMULA == L2_1
-#define REMESH_SHIFT 1
-#define REMESH(greek) greek##_l2_1
-#elif FORMULA == L2_2
-#define REMESH_SHIFT 1
-#define REMESH(greek) greek##_l2_2
-#elif FORMULA == L2_3
-#define REMESH_SHIFT 1
-#define REMESH(greek) greek##_l2_3
-#elif FORMULA == L2_4
-#define REMESH_SHIFT 1
-#define REMESH(greek) greek##_l2_4
-
-#elif FORMULA == L4_2
-#define REMESH_SHIFT 2
-#define REMESH(greek) greek##_l4_2
-#elif FORMULA == L4_3
-#define REMESH_SHIFT 2
-#define REMESH(greek) greek##_l4_3
-#elif FORMULA == L4_4
-#define REMESH_SHIFT 2
-#define REMESH(greek) greek##_l4_4
-
-#elif FORMULA == M8PRIME
-#define REMESH_SHIFT 3
-#define REMESH(greek) greek##_M8p
-#elif FORMULA == L6_3
-#define REMESH_SHIFT 3
-#define REMESH(greek) greek##_l6_3
-#elif FORMULA == L6_4
-#define REMESH_SHIFT 3
-#define REMESH(greek) greek##_l6_4
-#elif FORMULA == L6_5
-#define REMESH_SHIFT 3
-#define REMESH(greek) greek##_l6_5
-#elif FORMULA == L6_6
-#define REMESH_SHIFT 3
-#define REMESH(greek) greek##_l6_6
-
-#elif FORMULA == L8_4
-#define REMESH_SHIFT 4
-#define REMESH(greek) greek##_l8_4
-#endif
-
-
-/**
- * Multi-scale configuration
- */
-
-#ifndef ADVEC_IS_MULTISCALE
-#define ADVEC_IS_MULTISCALE (V_NB_I-2*V_GHOSTS_NB) != NB_I
-#endif
-
-#if MS_FORMULA == LINEAR
-#define MS_INTERPOL_SHIFT 0
-// MS_INTERPOL not used
-#elif MS_FORMULA == L2_1
-#define MS_INTERPOL_SHIFT 1
-#define MS_INTERPOL(greek) greek##_l2_1
-#elif MS_FORMULA == L4_2
-#define MS_INTERPOL_SHIFT 2
-#define MS_INTERPOL(greek) greek##_l4_2
-#elif MS_FORMULA == L4_4
-#define MS_INTERPOL_SHIFT 2
-#define MS_INTERPOL(greek) greek##_l4_4
-#else
-//Default case for single-scale (only used in comm advection)
-#define MS_INTERPOL_SHIFT 0
-#endif
-
-/*
-a minmax element is a 12 int defined as follows:
-*/
-#define L_MIN_X 0
-#define L_MAX_X 1
-#define L_MIN_Y 2
-#define L_MAX_Y 3
-#define L_MIN_Z 4
-#define L_MAX_Z 5
-#define R_MIN_X 6
-#define R_MAX_X 7
-#define R_MIN_Y 8
-#define R_MAX_Y 9
-#define R_MIN_Z 10
-#define R_MAX_Z 11
-
-/* Structure to store __constants advection parameters */
-typedef struct AdvectionMeshInfo
-{
-  float4 dx;                   /* Mesh step (advected grid) */
-  float4 v_dx;                 /* Mesh step (velocity) */
-  float min_position;           /* Domain minimum coordinate in current direction */
-  float invdx;                 /* Store 1./dx.x */
-  float v_invdx;               /* Store 1./v_dx.x */
-  float x;                     /* Padding */
-} AdvectionMeshInfo;
-
-/* Finite differences constants */
-#define FD_C_2 88
-#define FD_C_4 99
diff --git a/hysop/old/gpu.old/cl_src/kernels/advection.cl b/hysop/old/gpu.old/cl_src/kernels/advection.cl
deleted file mode 100644
index 2e9e341e3df1cbc898a89b882e7ee83ddf4c2a77..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/advection.cl
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * @file advection.cl
- * Advection kernel, vectorized version.
- */
-
-/**
- * Computes particles positions from the velocity field.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group. Computations of 1D problems are placed in loops over gidY and gidZ to adjust local workload and handle the work-item maximum size.
- * Each work-item computes <code>NB_I/WI_NB</code> particles positions in each 1D problem.
- * Particle are computed through OpenCL vector types of length 2, 4 or 8.
- * Velocity data are copied to a local buffer as a cache.
- *
- * @param gvelo Velocity.
- * @param ppos Particle position.
- * @param dt Time step.
- * @param mesh Mesh description.
- * @param inv_v_dx_y velocity grid 1/dy
- * @param inv_v_dx_z velocity grid 1/dz
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>NB_X</code>, <code>NB_Y</code>, <code>NB_Z</code> : points number in physical space directions.
- * @remark <code>WI_NB</code> corresponds to the work-item number.
- * @remark <code>ADVEC_IS_MULTISCALE</code> is a flag for multiscale.
- * @remark <code>V_NB_I</code>, <code>V_NB_II</code>, <code>V_NB_III</code> : points number for velocity grid in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void advection_kernel(__global const float* gvelo,
-			       __global float* ppos,
-			       float dt,
-#if ADVEC_IS_MULTISCALE
-			       float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			       __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  float__N__ p;				/* Particle position */
-  uint line_index; /* Current 1D problem index */
-
-  __local float velocity_cache[V_NB_I]; /* Velocity cache */
-
-  for(gidZ=get_global_id(2);
-#ifdef NB_Z
-      gidZ<NB_III;
-#else
-      gidZ<=get_global_id(2); // Single element loop
-#endif
-      gidZ+=get_global_size(2)) {
-    for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-
-      // 1D problem computations
-      line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-#if ADVEC_IS_MULTISCALE
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, inv_v_dx_y, inv_v_dx_z, mesh);
-#else
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, mesh);
-#endif
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX*__N__; i<NB_I; i+=WI_NB*__N__) {
-	/* Compute position */
-	p = advection(i, dt, velocity_cache, mesh);
-	/* Store result */
-	vstore__N__(p, (i+line_index)/__N__, ppos);
-      }
-
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/advection_and_remeshing.cl b/hysop/old/gpu.old/cl_src/kernels/advection_and_remeshing.cl
deleted file mode 100644
index c9fb52ebdcd758d33088e60acefcce35d640ecf7..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/advection_and_remeshing.cl
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * @file advection_and_remeshing.cl
- * Advection and remeshing kernel, vectorized version.
- */
-
-/**
- * Performs advection and then remeshing of the particles scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group. Computations of 1D problems are placed in loops over gidY and gidZ to adjust local workload and handle the work-item maximum size.
- * Each work-item computes NB_I/WI_NB particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param gvelo Velocity field
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param dt Time step
- * @param mesh Mesh description.
- * @param inv_v_dx_y velocity grid 1/dy
- * @param inv_v_dx_z velocity grid 1/dz
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>NB_X</code>, <code>NB_Y</code>, <code>NB_Z</code> : points number in physical space directions.
- * @remark <code>WI_NB</code> corresponds to the work-item number.
- * @remark <code>ADVEC_IS_MULTISCALE</code> is a flag for multiscale.
- * @remark <code>V_NB_I</code>, <code>V_NB_II</code>, <code>V_NB_III</code> : points number for velocity grid in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void advection_and_remeshing(__global const float* gvelo,
-				      __RCOMP_P__global const float* pscal__ID__,
-				      __RCOMP_P__global float* gscal__ID__,
-				      float dt,
-#if ADVEC_IS_MULTISCALE
-				      float inv_v_dx_y, float inv_v_dx_z,
-#endif
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  float__N__ p;			/* Particle position */
-  __RCOMP_I float__N__ s__ID__; /* Particle scalar */
-  uint line_index; /* Current 1D problem index */
-
-  __RCOMP_I__local float gscal_loc__ID__[NB_I]; /* Local buffer for result */
-  __local float velocity_cache[V_NB_I]; /* Velocity cache */
-
-  for(gidZ=get_global_id(2);
-#ifdef NB_Z
-      gidZ<NB_III;
-#else
-      gidZ<=get_global_id(2); // Single element loop
-#endif
-      gidZ+=get_global_size(2)) {
-    for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-
-      // 1D problem computations
-      line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-#if ADVEC_IS_MULTISCALE
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, inv_v_dx_y, inv_v_dx_z, mesh);
-#else
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, mesh);
-#endif
-
-      for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__)) {
-	/* Initialize result buffer */
-	__RCOMP_Igscal_loc__ID__[noBC_id(i+__NN__)] = 0.0;
-      }
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=__N__) {
-	/* Read Particle scalar */
-	__RCOMP_Is__ID__ = vload__N__((i + line_index)/__N__, pscal__ID__);
-	/* Compute particle position */
-	p = advection(i, dt, velocity_cache, mesh);
-	/* Remesh particle */
-	remesh(i, __RCOMP_Ps__ID__, p, __RCOMP_Pgscal_loc__ID__, mesh);
-      }
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__)) {
-	/* Store result */
-	__RCOMP_Ivstore__N__((float__N__)(gscal_loc__ID__[noBC_id(i+__NN__)],
-					  ), (i + line_index)/__N__, gscal__ID__);
-      }
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/advection_and_remeshing_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/advection_and_remeshing_noVec.cl
deleted file mode 100644
index 5759dc6f56f201d248c1ec794598533d648aae38..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/advection_and_remeshing_noVec.cl
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * @file advection_and_remeshing.cl
- * Advection and remeshing kernel.
- */
-
-/**
- * Performs advection and then remeshing of the particles' scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes NB_I/WI_NB particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param gvelo Velocity field
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param dt Time step
- * @param min_position Domain lower coordinate
- * @param dx Space step
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark WI_NB corresponds to the work-item number.
- * @remark \__N__ is expanded at compilation time by vector width.
- * @remark \__NN__ is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void advection_and_remeshing(__global const float* gvelo,
-				      __RCOMP_P__global const float* pscal__ID__,
-				      __RCOMP_P__global float* gscal__ID__,
-				      float dt,
-#if ADVEC_IS_MULTISCALE
-				      float inv_v_dx_y, float inv_v_dx_z,
-#endif
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  float p;			/* Particle position */
-  __RCOMP_I float s__ID__;	/* Particle scalar */
-  uint line_index; /* Current 1D problem index */
-
-  __RCOMP_I__local float gscal_loc__ID__[NB_I]; /* Local buffer for result */
-  __local float velocity_cache[V_NB_I]; /* Velocity cache */
-
-#ifdef NB_Z
-  for(gidZ=get_global_id(2); gidZ<NB_III; gidZ+=get_global_size(2)) {
-#else
-  gidZ=get_global_id(2); {
-#endif
-  for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-  line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-#if ADVEC_IS_MULTISCALE
-  fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, inv_v_dx_y, inv_v_dx_z, mesh);
-#else
-  fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, mesh);
-#endif
-
-  for(i=gidX; i<NB_I; i+=(WI_NB))
-    {
-      /* Initialize result buffer */
-      __RCOMP_Igscal_loc__ID__[noBC_id(i)] = 0.0;
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=1)
-    {
-      /* Read Particle scalar */
-      __RCOMP_Is__ID__ = pscal__ID__[i + line_index];
-      /* Compute particle position */
-      p = advection(i, dt, velocity_cache, mesh);
-      /* Remesh particle */
-      remesh(i, __RCOMP_Ps__ID__, p, __RCOMP_Pgscal_loc__ID__, mesh);
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX; i<NB_I; i+=(WI_NB))
-    {
-      /* Store result */
-      __RCOMP_Igscal__ID__[i + line_index] = gscal_loc__ID__[noBC_id(i)];
-    }
-
-  barrier(CLK_LOCAL_MEM_FENCE);
-}
-}
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/advection_euler_and_remeshing_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/advection_euler_and_remeshing_noVec.cl
deleted file mode 100644
index 99565be69e6702c197cb77d021df46636c59620f..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/advection_euler_and_remeshing_noVec.cl
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * @file advection_and_remeshing.cl
- * Euler advection and remeshing kernel.
- */
-
-/**
- * Performs advection and then remeshing of the particles scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group. Computations of 1D problems are placed in loops over gidY and gidZ to adjust local workload and handle the work-item maximum size.
- * Each work-item computes NB_I/WI_NB particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param gvelo Velocity field
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param dt Time step
- * @param min_position Domain lower coordinate
- * @param dx Space step
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark WI_NB corresponds to the work-item number.
- * @remark \__N__ is expanded at compilation time by vector width.
- * @remark \__NN__ is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void advection_and_remeshing(__global const float* gvelo,
-				      __RCOMP_P__global const float* pscal__ID__,
-				      __RCOMP_P__global float* gscal__ID__,
-				      float dt,
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  float p,c;			/* Particle position */
-  __RCOMP_I float s__ID__;	/* Particle scalar */
-  uint line_index; /* Current 1D problem index */
-
-  __RCOMP_I__local float gscal_loc__ID__[NB_I]; /* Local buffer for result */
-
-  for(gidZ=get_global_id(2);
-#ifdef NB_Z
-      gidZ<NB_III;
-#else
-      gidZ<=get_global_id(2); // Single element loop
-#endif
-      gidZ+=get_global_size(2)) {
-    for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-
-      // 1D computations
-      line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-      for(i=gidX; i<NB_I; i+=(WI_NB)) {
-	/* Initialize result buffer */
-	__RCOMP_Igscal_loc__ID__[noBC_id(i)] = 0.0;
-      }
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=1) {
-	/* Read Particle scalar */
-	__RCOMP_Is__ID__ = pscal__ID__[i + line_index];
-	/* Compute particle position */
-	c = fma(i, mesh->dx.x, mesh->min_position);
-	p = fma(dt, gvelo[i+line_index], c);
-	/* Remesh particle */
-	remesh(i, __RCOMP_Ps__ID__, p, __RCOMP_Pgscal_loc__ID__, mesh);
-      }
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX; i<NB_I; i+=(WI_NB)) {
-	/* Store result */
-	__RCOMP_Igscal__ID__[i + line_index] = gscal_loc__ID__[noBC_id(i)];
-      }
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/advection_euler_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/advection_euler_noVec.cl
deleted file mode 100644
index df90575a224d57ebd3485bd9a8e3f9128249c0be..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/advection_euler_noVec.cl
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * @file advection_euler_noVec.cl
- * Advection kernel, basic version for Euler integrator for simple scale problems (no need velocity cache).
- */
-
-/**
- * Computes particles positions from the velocity field.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes NB_I/WI_NB particles positions.
- *
- * @param gvelo Velocity.
- * @param ppos Particle position.
- * @param dt Time step.
- * @param min_position Domain lower coordinate.
- * @param dx Space step.
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark WI_NB corresponds to the work-item number.
- */
-__kernel void advection_kernel(__global const float* gvelo,
-			       __global float* ppos,
-			       float dt,
-			       __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  uint line_index; /* Current 1D problem index */
-  float c;
-
-  for(gidZ=get_global_id(2);
-#ifdef NB_Z
-      gidZ<NB_III;
-#else
-      gidZ<=get_global_id(2); // Single element loop
-#endif
-      gidZ+=get_global_size(2)) {
-    for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-
-      //1D computations
-      line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-      for(i=gidX; i<NB_I; i+=WI_NB) {
-	c = fma(i, mesh->dx.x, mesh->min_position);
-	ppos[i+line_index] =  fma(dt, gvelo[i+line_index], c);
-      }
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/advection_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/advection_noVec.cl
deleted file mode 100644
index 78ca64d6108809df5d4db707017f94a7d96d5b72..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/advection_noVec.cl
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * @file advection_noVec.cl
- * Advection kernel, basic version.
- */
-
-/**
- * Computes particles positions from the velocity field.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes NB_I/WI_NB particles positions.
- *
- * @param gvelo Velocity.
- * @param ppos Particle position.
- * @param dt Time step.
- * @param min_position Domain lower coordinate.
- * @param dx Space step.
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- * @remark WI_NB corresponds to the work-item number.
- */
-__kernel void advection_kernel(__global const float* gvelo,
-			       __global float* ppos,
-			       float dt,
-#if ADVEC_IS_MULTISCALE
-			       float inv_v_dx_y, float inv_v_dx_z,
-#endif
-			       __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  uint line_index; /* Current 1D problem index */
-
-  __local float velocity_cache[V_NB_I]; /* Velocity cache */
-
-  for(gidZ=get_global_id(2);
-#ifdef NB_Z
-      gidZ<NB_III;
-#else
-      gidZ<=get_global_id(2);  // Single element loop
-#endif
-      gidZ+=get_global_size(2)) {
-    for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-
-      // 1D computation
-      line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-#if ADVEC_IS_MULTISCALE
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, inv_v_dx_y, inv_v_dx_z, mesh);
-#else
-      fill_velocity_cache(gvelo, gidX, gidY, gidZ, velocity_cache, mesh);
-#endif
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=gidX; i<NB_I; i+=WI_NB) {
-	ppos[i+line_index] = advection(i, dt, velocity_cache, mesh);
-      }
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/comm_MS_advection_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/comm_MS_advection_noVec.cl
deleted file mode 100644
index 2f7193be4698c21f37cb2e49ebd7b9ae18c7364f..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/comm_MS_advection_noVec.cl
+++ /dev/null
@@ -1,87 +0,0 @@
-
-
-
-
-__kernel void buff_advec(__global const float* gvelo,
-			 __global float* ppos,
-			 __global float* buffer_l,
-			 __global float* buffer_r,
-			 float dt,
-			 float inv_v_dx_y, float inv_v_dx_z,
-			 __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  int gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index = gidY*NB_I+gidZ*NB_I*NB_II; /* Current 1D problem index */
-  float p,v,c;
-  float2 hY, hZ;
-  int i_ind, i_indY, i_indZ;
-
-
-  __local float velocity_cache[V_NB_I];
-  __local float buff_l_loc[V_BUFF_WIDTH];
-  __local float buff_r_loc[V_BUFF_WIDTH];
-  __local float* loc_ptr;
-
-
-  hY.s0 = (gidY * mesh->dx.y) * inv_v_dx_y;
-  hZ.s0 = (gidZ * mesh->dx.z) * inv_v_dx_z;
-  i_indY = convert_int_rtn(hY.s0);
-  i_indZ = convert_int_rtn(hZ.s0);
-  hY.s0 = hY.s0 - convert_float(i_indY);
-  hZ.s0 = hZ.s0 - convert_float(i_indZ);
-  hY.s1 = (1.0-hY.s0);
-  hZ.s1 = (1.0-hZ.s0);
-
-  i_indY = i_indY + V_GHOSTS_NB;
-  i_indZ = i_indZ + V_GHOSTS_NB;
-
-  for(i=gidX; i<V_NB_I; i+=(WI_NB)){
-    velocity_cache[noBC_id(i)] = hY.s1*hZ.s1 * gvelo[i + i_indY * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s1*hZ.s0 * gvelo[i + i_indY * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s1 * gvelo[i + (i_indY + 1) * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s0 * gvelo[i + (i_indY + 1) * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-  }
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-    buff_l_loc[i] = hY.s1*hZ.s1*buffer_l[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    buff_l_loc[i] += hY.s1*hZ.s0*buffer_l[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    buff_l_loc[i] += hY.s0*hZ.s1*buffer_l[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    buff_l_loc[i] += hY.s0*hZ.s0*buffer_l[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-    buff_r_loc[i] = hY.s1*hZ.s1*buffer_r[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    buff_r_loc[i] += hY.s1*hZ.s0*buffer_r[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    buff_r_loc[i] += hY.s0*hZ.s1*buffer_r[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    buff_r_loc[i] += hY.s0*hZ.s0*buffer_r[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      c = i * mesh->dx.x + mesh->min_position;
-      // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-      p = c * mesh->v_invdx;
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB) - MS_INTERPOL_SHIFT;
-      v = mix(velocity_cache[noBC_id(i_ind)],
-	      velocity_cache[noBC_id(i_ind+1)],p);
-      p = (c + 0.5*dt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p) - MS_INTERPOL_SHIFT;
-      p = p - convert_float(i_ind);
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? buff_l_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1)  : buff_r_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = (1.0-p)*(*loc_ptr);
-      i_ind = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? buff_l_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1)  : buff_r_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v += p*(*loc_ptr);
-      ppos[i+line_index] = c + dt * v;
-    }
-
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/comm_advection_MS_and_remeshing_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/comm_advection_MS_and_remeshing_noVec.cl
deleted file mode 100644
index 8e9ea780a2aafb23ff5067028ee7dfb3d7966667..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/comm_advection_MS_and_remeshing_noVec.cl
+++ /dev/null
@@ -1,547 +0,0 @@
-
-
-
-
-__kernel void buff_advec_and_remesh_l(__global const float* gvelo,
-				      __global float* v_l_buff,
-				      __global const float* pscal,
-				      __global float* s_l_buff,
-				      int used_width,
-				      float dt,
-				      float inv_v_dx_y, float inv_v_dx_z,
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index = gidY*NB_I+gidZ*NB_I*NB_II; /* Current 1D problem index */
-  float p,v,c,s,y,w;
-  float2 hY, hZ;
-  int i_ind, i_indY, i_indZ;
-  int ind, index;
-
-
-  float velocity_cache[V_NB_I];
-  float v_l_buff_loc[V_BUFF_WIDTH];
-  float s_l_buff_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for (i=0; i<used_width; i++)
-    s_l_buff_loc[i] = 0.0;
-
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  hY.s0 = (gidY * mesh->dx.y) * inv_v_dx_y;
-  hZ.s0 = (gidZ * mesh->dx.z) * inv_v_dx_z;
-  i_indY = convert_int_rtn(hY.s0);
-  i_indZ = convert_int_rtn(hZ.s0);
-  hY.s0 = hY.s0 - convert_float(i_indY);
-  hZ.s0 = hZ.s0 - convert_float(i_indZ);
-  hY.s1 = (1.0-hY.s0);
-  hZ.s1 = (1.0-hZ.s0);
-
-  i_indY = i_indY + V_GHOSTS_NB;
-  i_indZ = i_indZ + V_GHOSTS_NB;
-
-  for (i=0; i<V_NB_I; i++){
-    velocity_cache[noBC_id(i)] = hY.s1*hZ.s1 * gvelo[i + i_indY * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s1*hZ.s0 * gvelo[i + i_indY * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s1 * gvelo[i + (i_indY + 1) * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s0 * gvelo[i + (i_indY + 1) * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-  }
-
-  for (i=0; i<V_BUFF_WIDTH; i++){
-    v_l_buff_loc[i] = hY.s1*hZ.s1*v_l_buff[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    v_l_buff_loc[i] += hY.s1*hZ.s0*v_l_buff[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    v_l_buff_loc[i] += hY.s0*hZ.s1*v_l_buff[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    v_l_buff_loc[i] += hY.s0*hZ.s0*v_l_buff[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=0; i<2*BUFF_WIDTH; i++)
-    {
-      c = i * mesh->dx.x + mesh->min_position;
-      // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-      p = c * mesh->v_invdx;
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB) - MS_INTERPOL_SHIFT;
-      v = mix(velocity_cache[noBC_id(i_ind)],
-	      velocity_cache[noBC_id(i_ind+1)],p);
-      p = (c + 0.5*dt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p) - MS_INTERPOL_SHIFT;
-      p = p - convert_float(i_ind);
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : v_l_buff_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1);
-      v = (1.0-p)*(*loc_ptr);
-      i_ind = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : v_l_buff_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1);
-      v += p*(*loc_ptr);
-      p = c + dt * v;
-
-
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index<START_INDEX) {loc_ptr = s_l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  // Store buffers
-  for (i=0; i<used_width; i++)
-    s_l_buff[i + gidY*used_width + gidZ*used_width*NB_II] = s_l_buff_loc[i];
-
-}
-
-__kernel void buff_advec_and_remesh_r(__global const float* gvelo,
-				      __global float* v_r_buff,
-				      __global const float* pscal,
-				      __global float* s_r_buff,
-				      int used_width,
-				      float dt,
-				      float inv_v_dx_y, float inv_v_dx_z,
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index = gidY*NB_I+gidZ*NB_I*NB_II; /* Current 1D problem index */
-  float p,v,c,s,y,w;
-  float2 hY, hZ;
-  int i_ind, i_indY, i_indZ;
-  int ind, index;
-
-
-  float velocity_cache[V_NB_I];
-  float v_r_buff_loc[V_BUFF_WIDTH];
-  float s_r_buff_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for(i=0; i<used_width; i++)
-    s_r_buff_loc[i] = 0.0;
-
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  hY.s0 = (gidY * mesh->dx.y) * inv_v_dx_y;
-  hZ.s0 = (gidZ * mesh->dx.z) * inv_v_dx_z;
-  i_indY = convert_int_rtn(hY.s0);
-  i_indZ = convert_int_rtn(hZ.s0);
-  hY.s0 = hY.s0 - convert_float(i_indY);
-  hZ.s0 = hZ.s0 - convert_float(i_indZ);
-  hY.s1 = (1.0-hY.s0);
-  hZ.s1 = (1.0-hZ.s0);
-
-  i_indY = i_indY + V_GHOSTS_NB;
-  i_indZ = i_indZ + V_GHOSTS_NB;
-
-  for(i=0;i<V_NB_I; i++){
-    velocity_cache[noBC_id(i)] = hY.s1*hZ.s1 * gvelo[i + i_indY * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s1*hZ.s0 * gvelo[i + i_indY * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s1 * gvelo[i + (i_indY + 1) * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s0 * gvelo[i + (i_indY + 1) * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-  }
-
-  for(i=0;i<V_BUFF_WIDTH; i++){
-    v_r_buff_loc[i] = hY.s1*hZ.s1*v_r_buff[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    v_r_buff_loc[i] += hY.s1*hZ.s0*v_r_buff[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    v_r_buff_loc[i] += hY.s0*hZ.s1*v_r_buff[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    v_r_buff_loc[i] += hY.s0*hZ.s0*v_r_buff[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=NB_I-2*BUFF_WIDTH; i<NB_I; i++)
-    {
-      c = i * mesh->dx.x + mesh->min_position;
-      // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-      p = c * mesh->v_invdx;
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB) - MS_INTERPOL_SHIFT;
-      v = mix(velocity_cache[noBC_id(i_ind)],
-	      velocity_cache[noBC_id(i_ind+1)],p);
-      p = (c + 0.5*dt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p) - MS_INTERPOL_SHIFT;
-      p = p - convert_float(i_ind);
-      loc_ptr = (i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : v_r_buff_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = (1.0-p)*(*loc_ptr);
-      i_ind = i_ind + 1;
-      loc_ptr = (i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : v_r_buff_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v += p*(*loc_ptr);
-      p = c + dt * v;
-
-
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  // Store buffers
-  for(i=0;i<used_width;i++)
-    s_r_buff[i + gidY*used_width + gidZ*used_width*NB_II] = s_r_buff_loc[i];
-
-}
-
-__kernel void buff_advec_and_remesh(__global const float* gvelo,
-				      __global float* v_l_buff,
-				      __global float* v_r_buff,
-				      __global const float* pscal,
-				      __global float* gscal,
-				      float dt,
-				      float inv_v_dx_y, float inv_v_dx_z,
-				      __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  int gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index = gidY*NB_I+gidZ*NB_I*NB_II; /* Current 1D problem index */
-  float p,v,c,s,y,w;
-  float2 hY, hZ;
-  int i_ind, i_indY, i_indZ;
-  int ind, index;
-
-
-  __local float velocity_cache[V_NB_I];
-  __local float v_l_buff_loc[V_BUFF_WIDTH];
-  __local float v_r_buff_loc[V_BUFF_WIDTH];
-  __local float gscal_loc[NB_I];
-  __local float* loc_ptr;
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      /* Initialize result buffer */
-      gscal_loc[i] = 0.0;
-    }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-
-
-  hY.s0 = (gidY * mesh->dx.y) * inv_v_dx_y;
-  hZ.s0 = (gidZ * mesh->dx.z) * inv_v_dx_z;
-  i_indY = convert_int_rtn(hY.s0);
-  i_indZ = convert_int_rtn(hZ.s0);
-  hY.s0 = hY.s0 - convert_float(i_indY);
-  hZ.s0 = hZ.s0 - convert_float(i_indZ);
-  hY.s1 = (1.0-hY.s0);
-  hZ.s1 = (1.0-hZ.s0);
-
-  i_indY = i_indY + V_GHOSTS_NB;
-  i_indZ = i_indZ + V_GHOSTS_NB;
-
-  for(i=gidX; i<V_NB_I; i+=(WI_NB)){
-    velocity_cache[noBC_id(i)] = hY.s1*hZ.s1 * gvelo[i + i_indY * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s1*hZ.s0 * gvelo[i + i_indY * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s1 * gvelo[i + (i_indY + 1) * V_NB_I + i_indZ * V_NB_I * V_NB_II];
-    velocity_cache[noBC_id(i)] += hY.s0*hZ.s0 * gvelo[i + (i_indY + 1) * V_NB_I + (i_indZ + 1) * V_NB_I * V_NB_II];
-  }
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-    v_l_buff_loc[i] = hY.s1*hZ.s1*v_l_buff[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    v_l_buff_loc[i] += hY.s1*hZ.s0*v_l_buff[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    v_l_buff_loc[i] += hY.s0*hZ.s1*v_l_buff[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    v_l_buff_loc[i] += hY.s0*hZ.s0*v_l_buff[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-    v_r_buff_loc[i] = hY.s1*hZ.s1*v_r_buff[i + V_BUFF_WIDTH*(i_indY + i_indZ*V_NB_II)];
-    v_r_buff_loc[i] += hY.s1*hZ.s0*v_r_buff[i + V_BUFF_WIDTH*(i_indY + (i_indZ+1)*V_NB_II)];
-    v_r_buff_loc[i] += hY.s0*hZ.s1*v_r_buff[i + V_BUFF_WIDTH*(i_indY+1 + i_indZ*V_NB_II)];
-    v_r_buff_loc[i] += hY.s0*hZ.s0*v_r_buff[i + V_BUFF_WIDTH*(i_indY+1 + (i_indZ+1)*V_NB_II)];
-  }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=1)
-    {
-      c = i * mesh->dx.x + mesh->min_position;
-      // multi-scale : interpolate v from velocity buffer (of length V_NB_I)
-      p = c * mesh->v_invdx;
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind = i_ind - (V_START_INDEX-V_GHOSTS_NB) - MS_INTERPOL_SHIFT;
-      v = mix(velocity_cache[noBC_id(i_ind)],
-	      velocity_cache[noBC_id(i_ind+1)],p);
-      p = (c + 0.5*dt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p) - MS_INTERPOL_SHIFT;
-      p = p - convert_float(i_ind);
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? v_l_buff_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1)  : v_r_buff_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = (1.0-p)*(*loc_ptr);
-      i_ind = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+noBC_id(i_ind - (V_START_INDEX-V_GHOSTS_NB)) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? v_l_buff_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1)  : v_r_buff_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v += p*(*loc_ptr);
-      p = c + dt * v;
-
-
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      /* Store result */
-      gscal[i + line_index] = gscal_loc[i];
-    }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/comm_advection_and_remeshing_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/comm_advection_and_remeshing_noVec.cl
deleted file mode 100644
index 1648c70e4d99e9145364ff331bfff2f13695f92c..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/comm_advection_and_remeshing_noVec.cl
+++ /dev/null
@@ -1,468 +0,0 @@
-
-
-
-__kernel void buff_advec_and_remesh_l(__global const float* gvelo,
-				      __global float* v_buffer_l,
-				      __global const float* pscal,
-				      __global float* s_buffer_l,
-				      int used_width,
-				      float dt, __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index ; /* Current 1D problem index */
-
-  float v,vp,p,c,s,y,w, hdt = 0.5 * dt;
-  int i_ind, i_ind_p, ind, index;
-
-  float velocity_cache[V_NB_I];
-  float v_buff_l_loc[V_BUFF_WIDTH];
-  float s_buff_l_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for (i=0;i<used_width;i++)
-    s_buff_l_loc[i] = 0.0;
-
-  for(i=0; i<V_BUFF_WIDTH; i++)
-    v_buff_l_loc[i] = v_buffer_l[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-
-  line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II;
-  /* Read velocity */
-  /* Fill velocity cache */
-  for(i=0;i<V_NB_I;i++)
-      velocity_cache[i] = gvelo[i+line_index];
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  line_index = gidY*NB_I+gidZ*NB_I*NB_II;
-  for(i=0; i<2*BUFF_WIDTH; i++)
-    {
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-      c = i * mesh->dx.x + mesh->min_position;
-      v = velocity_cache[i + V_GHOSTS_NB];
-      p = (c + hdt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind_p = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB)) ? velocity_cache + i_ind - (V_START_INDEX-V_GHOSTS_NB) : v_buff_l_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1);
-      v = *loc_ptr;
-
-      loc_ptr = (i_ind_p>=(V_START_INDEX-V_GHOSTS_NB)) ? velocity_cache+i_ind_p - (V_START_INDEX-V_GHOSTS_NB) : v_buff_l_loc+i_ind_p-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1);
-      vp = *loc_ptr;
-
-      v = (p*(vp-v) + v);
-      p = c + dt * v;
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index<START_INDEX){ loc_ptr = s_buff_l_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  // Store buffers
-  for(i=0;i<used_width;i++)
-    s_buffer_l[i + gidY*used_width + gidZ*used_width*NB_II] = s_buff_l_loc[i];
-}
-
-
-
-
-
-
-
-
-__kernel void buff_advec_and_remesh_r(__global const float* gvelo,
-				      __global float* v_buffer_r,
-				      __global const float* pscal,
-				      __global float* s_buffer_r,
-				      int used_width,
-				      float dt, __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index ; /* Current 1D problem index */
-
-  float v,vp,p,c,s,y,w, hdt = 0.5 * dt;
-  int i_ind, i_ind_p, ind, index;
-
-  float velocity_cache[V_NB_I];
-  float v_buff_r_loc[V_BUFF_WIDTH];
-  float s_buff_r_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for(i=0;i<used_width;i++)
-    s_buff_r_loc[i] = 0.0;
-
-  for(i=0;i<V_BUFF_WIDTH;i++)
-    v_buff_r_loc[i] = v_buffer_r[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-
-  line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II;
-  /* Read velocity */
-  /* Fill velocity cache */
-  for(i=0;i<V_NB_I; i++)
-      velocity_cache[i] = gvelo[i+line_index];
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  line_index = gidY*NB_I+gidZ*NB_I*NB_II;
-  for(i=NB_I-2*BUFF_WIDTH; i<NB_I; i++)
-    {
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-      c = i * mesh->dx.x + mesh->min_position;
-      v = velocity_cache[i + V_GHOSTS_NB];
-      p = (c + hdt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind_p = i_ind + 1;
-      loc_ptr = (i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache + i_ind - (V_START_INDEX-V_GHOSTS_NB) : v_buff_r_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = *loc_ptr;
-
-      loc_ptr = (i_ind_p <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+i_ind_p - (V_START_INDEX-V_GHOSTS_NB) : v_buff_r_loc+i_ind_p-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      vp = *loc_ptr;
-
-      v = (p*(vp-v) + v);
-      p = c + dt * v;
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index > STOP_INDEX){ loc_ptr = s_buff_r_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=0;i<used_width;i++)
-    s_buffer_r[i + gidY*used_width + gidZ*used_width*NB_II] = s_buff_r_loc[i];
-
-}
-
-
-__kernel void buff_advec_and_remesh(__global const float* gvelo,
-				    __global float* v_buffer_l,
-				    __global float* v_buffer_r,
-				    __global const float* pscal,
-				    __global float* gscal,
-				    float dt, __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  int gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index ; /* Current 1D problem index */
-
-  float v,vp,p,c,s,y,w, hdt = 0.5 * dt;
-  int i_ind, i_ind_p, ind, index;
-
-  __local float velocity_cache[V_NB_I];
-  __local float v_buff_l_loc[V_BUFF_WIDTH];
-  __local float v_buff_r_loc[V_BUFF_WIDTH];
-  __local float gscal_loc[NB_I];
-  __local float* loc_ptr;
-
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    /* Initialize result buffer */
-    gscal_loc[i] = 0.0;
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB))
-    v_buff_l_loc[i] = v_buffer_l[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-
-  for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB))
-    v_buff_r_loc[i] = v_buffer_r[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-
-  line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II;
-  /* Read velocity */
-  /* Fill velocity cache */
-  for(i=gidX; i<V_NB_I; i+=(WI_NB))
-      velocity_cache[i] = gvelo[i+line_index];
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  line_index = gidY*NB_I+gidZ*NB_I*NB_II;
-  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=1)
-    {
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-
-      c = i * mesh->dx.x + mesh->min_position;
-      v = velocity_cache[i + V_GHOSTS_NB];
-      p = (c + hdt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind_p = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache + i_ind - (V_START_INDEX-V_GHOSTS_NB) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? v_buff_l_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1) : v_buff_r_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = *loc_ptr;
-
-      loc_ptr = (i_ind_p>=(V_START_INDEX-V_GHOSTS_NB) && i_ind_p <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+i_ind_p - (V_START_INDEX-V_GHOSTS_NB) : (i_ind_p<(V_START_INDEX-V_GHOSTS_NB)) ? v_buff_l_loc+i_ind_p-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1) : v_buff_r_loc+i_ind_p-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      vp = *loc_ptr;
-
-      v = (p*(vp-v) + v);
-      p = c + dt * v;
-
-
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX){ loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      /* Store result */
-      gscal[i + line_index] = gscal_loc[i];
-    }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/comm_advection_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/comm_advection_noVec.cl
deleted file mode 100644
index d10675fede5770453fa488f5472fc9b90b06f372..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/comm_advection_noVec.cl
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
-
-__kernel void buff_advec(__global const float* gvelo,
-			 __global float* ppos,
-			 __global float* buffer_l,
-			 __global float* buffer_r,
-			 float dt, __constant struct AdvectionMeshInfo* mesh)
-{
-  int gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  int gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  int line_index ; /* Current 1D problem index */
-
-  float v,vp,p,c, hdt = 0.5 * dt;
-  int i_ind, i_ind_p;
-
-  __local float velocity_cache[V_NB_I];
-  __local float buff_l_loc[V_BUFF_WIDTH];
-  __local float buff_r_loc[V_BUFF_WIDTH];
-  __local float* loc_ptr;
-
-    for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-      buff_l_loc[i] = buffer_l[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-    }
-
-    for(i=gidX; i<V_BUFF_WIDTH; i+=(WI_NB)){
-      buff_r_loc[i] = buffer_r[i + V_BUFF_WIDTH*(gidY + gidZ*V_NB_II)];
-    }
-
-  line_index = gidY*V_NB_I + gidZ*V_NB_I*V_NB_II;
-  for(i=gidX; i<V_NB_I; i+=(WI_NB))
-    {
-      /* Read velocity */
-      /* Fill velocity cache */
-      velocity_cache[i] = gvelo[i+line_index];
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  line_index = gidY*NB_I+gidZ*NB_I*NB_II;
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      c = i * mesh->dx.x + mesh->min_position;
-      v = velocity_cache[i + V_GHOSTS_NB];
-      p = (c + hdt*v) * mesh->v_invdx;
-
-      i_ind = convert_int_rtn(p);
-      p = p - convert_float(i_ind);
-      i_ind_p = i_ind + 1;
-      loc_ptr = (i_ind>=(V_START_INDEX-V_GHOSTS_NB) && i_ind <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache + i_ind - (V_START_INDEX-V_GHOSTS_NB) : (i_ind<(V_START_INDEX-V_GHOSTS_NB)) ? buff_l_loc+i_ind-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1) : buff_r_loc+i_ind-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      v = *loc_ptr;
-
-      loc_ptr = (i_ind_p>=(V_START_INDEX-V_GHOSTS_NB) && i_ind_p <= (V_STOP_INDEX+V_GHOSTS_NB)) ? velocity_cache+i_ind_p - (V_START_INDEX-V_GHOSTS_NB) : (i_ind_p<(V_START_INDEX-V_GHOSTS_NB)) ? buff_l_loc+i_ind_p-(V_START_INDEX-V_GHOSTS_NB-1-V_BUFF_WIDTH+1) : buff_r_loc+i_ind_p-(V_STOP_INDEX+V_GHOSTS_NB+1) ;
-      vp = *loc_ptr;
-
-      v = (p*(vp-v) + v);
-      p = c + dt * v;
-      ppos[i+line_index] = p;
-    }
-
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/comm_remeshing_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/comm_remeshing_noVec.cl
deleted file mode 100644
index 89a3dac244ff1cf5a6f72da66ccf9cc8514087ae..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/comm_remeshing_noVec.cl
+++ /dev/null
@@ -1,409 +0,0 @@
-/**
- * @file comm_remeshing_noVec.cl
- * Remeshing kernel.
- */
-/**
- * Performs remeshing of the particles' scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes <code>NB_I/WI_NB</code> particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param ppos Particle position
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param buffer_l Buffer for storing out of domain contributions (to left)
- * @param buffer_r Buffer for storing out of domain contributions (to right)
- * @param min_position Domain lower coordinate
- * @param dx Space step
- * @param l_nb buffer_l sizes
- * @param r_nb buffer_r sizes
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>WI_NB</code> corresponds to the work-item number.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void buff_remesh_l(__global const float* ppos,
-			    __global const float* pscal,
-			    __global float* buffer_l,
-			    int used_width,
-			    __constant struct AdvectionMeshInfo* mesh
-			    )
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  float p;			/* Particle position */
-  float s;      /* Particle scalar */
-float y;			/* Normalized distance to nearest left grid point */
-  int ind;			/* Integer coordinate */
-  int index;		/* Remeshing index */
-  float w;
-
-  uint line_index = gidY*NB_I+ gidZ*NB_I*NB_II; /* Current 1D problem index */
-
-  float l_buff_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for(i=0; i<used_width; i++)
-    l_buff_loc[i] = 0.0;
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  //for(i=lid*PART_NB_PER_WI; i<(lid + 1)*PART_NB_PER_WI; i+=1)
-  for(i=0; i<2*BUFF_WIDTH; i++)
-    {
-      /* Read particle position */
-      p = ppos[i + line_index];
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-      /* Remesh particle */
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if(index<START_INDEX){ loc_ptr = l_buff_loc+index-(START_INDEX-1-used_width+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  // Store buffers
-  for(i=0; i<used_width; i++)
-    buffer_l[i + gidY*used_width + gidZ*used_width*NB_II] = l_buff_loc[i];
-}
-
-__kernel void buff_remesh_r(__global const float* ppos,
-			    __global const float* pscal,
-			    __global float* buffer_r,
-			    int used_width,
-			    __constant struct AdvectionMeshInfo* mesh
-			    )
-{
-  int gidY = get_global_id(0); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(1); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  float p;			/* Particle position */
-  float s;      /* Particle scalar */
-  float y;			/* Normalized distance to nearest left grid point */
-  int ind;			/* Integer coordinate */
-  int index;		/* Remeshing index */
-  float w;
-
-  uint line_index = gidY*NB_I+ gidZ*NB_I*NB_II; /* Current 1D problem index */
-
-  float r_buff_loc[BUFF_WIDTH];
-  float* loc_ptr;
-
-  // Initialize buffers
-  for(i=0; i<used_width; i++)
-    r_buff_loc[i] = 0.0;
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=NB_I-2*BUFF_WIDTH; i<NB_I; i++)
-    {
-      /* Read particle position */
-      p = ppos[i + line_index];
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-      /* Remesh particle */
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if(index > STOP_INDEX){ loc_ptr = loc_ptr = r_buff_loc + index-(STOP_INDEX+1);
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  // Store buffers
-  for(i=0; i<used_width; i++)
-    buffer_r[i + gidY*used_width + gidZ*used_width*NB_II] = r_buff_loc[i];
-
-}
-
-__kernel void remesh(__global const float* ppos,
-			  __global const float* pscal,
-			  __global float* gscal,
-			  __constant struct AdvectionMeshInfo* mesh
-			  )
-{
-  int lid = get_local_id(0);	/* OpenCL work-itme global index (X) */
-  int gidY = get_global_id(1); /* OpenCL work-itme global index (Y) */
-  int gidZ = get_global_id(2); /* OpenCL work-itme global index (Z) */
-  int i;			/* Particle index in 1D problem */
-  float p;			/* Particle position */
-  float s;      /* Particle scalar */
-float y;			/* Normalized distance to nearest left grid point */
-  int ind;			/* Integer coordinate */
-  int index;		/* Remeshing index */
-  float w;
-
-  uint line_index = gidY*NB_I+ gidZ*NB_I*NB_II; /* Current 1D problem index */
-
-  __local float gscal_loc[NB_I];
-  __local float* loc_ptr;
-
-  /* Initialize result buffer */
-  for(i=lid; i<NB_I; i+=WI_NB)
-      gscal_loc[i] = 0.0;
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=lid*PART_NB_PER_WI; i<(lid + 1)*PART_NB_PER_WI; i+=1)
-    {
-      /* Read particle position */
-      p = ppos[i + line_index];
-      /* Read particle scalar */
-      s = pscal[i + line_index];
-      /* Remesh particle */
-
-      ind = convert_int_rtn(p * mesh->invdx);
-      y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-      index = ind - REMESH_SHIFT;
-
-      w = REMESH(alpha)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(beta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(gamma)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(delta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-      index = index + 1;
-      w = REMESH(eta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(zeta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-      index = index + 1;
-      w = REMESH(theta)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(iota)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-      index = index + 1;
-      w = REMESH(kappa)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      index = index + 1;
-      w = REMESH(mu)(y);
-      if (index>=START_INDEX && index <= STOP_INDEX) {loc_ptr = gscal_loc +index-START_INDEX;
-      w = w * s;
-      (*loc_ptr) += w;}
-      barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  /* Store result */
-  for(i=lid; i<NB_I; i+=WI_NB)
-      gscal[i + line_index] = gscal_loc[i];
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/copy.cl b/hysop/old/gpu.old/cl_src/kernels/copy.cl
deleted file mode 100644
index 41faadc113a169365148846fad602bd8efa64961..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/copy.cl
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * @file copy.cl
- * Copy kernel, vectorized version.
- */
-
-/**
- * Performs a copy from in to out. Data are read by blocs of <code>__N__</code> contiguously.
- *
- * @param in Input data.
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- */
-__kernel void copy(__global const float* in,
-		   __global float* out)
-{
-  uint xIndex = (get_group_id(0) * TILE_DIM_COPY + get_local_id(0)*__N__);
-  uint yIndex = get_group_id(1) * TILE_DIM_COPY + get_local_id(1);
-  uint zIndex = get_global_id(2);
-  uint index = xIndex + yIndex * NB_I + zIndex*NB_I*NB_II;
-  float x__NN__;
-
-  for(uint i=0; i<TILE_DIM_COPY; i+=BLOCK_ROWS_COPY)
-    {
-      x__NN__ = in[index + __NN__ + i*NB_I];
-      out[index + __NN__ + i*NB_I] = x__NN__;
-    }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/copy_locMem.cl b/hysop/old/gpu.old/cl_src/kernels/copy_locMem.cl
deleted file mode 100644
index ea51b77ab13c8100dd27011bba351c76129ab43a..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/copy_locMem.cl
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * @file copy_locMem.cl
- * Copy kernel, use local memory.
- */
-
-/**
- * Performs a copy from in to out. Data are moved to local memory buffer.
- *
- * @param in Input data.
- * @param out Output data
- *
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-__kernel void copy(__global const float* in,
-		   __global float* out)
-{
-  uint xIndex = get_group_id(0) * TILE_DIM_COPY + get_local_id(0);
-  uint yIndex = get_group_id(1) * TILE_DIM_COPY + get_local_id(1);
-  uint zIndex = get_global_id(2);
-  uint index = xIndex + yIndex * NB_I + zIndex*NB_I*NB_II;
-
-  __local float tile[TILE_DIM_COPY][TILE_DIM_COPY];
-
-  for(uint i=0; i<TILE_DIM_COPY; i+=BLOCK_ROWS_COPY)
-    {
-      tile[get_local_id(1)+i][get_local_id(0)] = in[index + i*NB_I];
-    }
-  barrier(CLK_LOCAL_MEM_FENCE);
-  for(uint i=0; i<TILE_DIM_COPY; i+=BLOCK_ROWS_COPY)
-    {
-      out[index + i*NB_I] = tile[get_local_id(1)+i][get_local_id(0)];
-    }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/copy_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/copy_noVec.cl
deleted file mode 100644
index fb4cea67ba2882f759e857234236ec9b4b5c4049..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/copy_noVec.cl
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * @file copy.cl
- * Copy kernel, basic version.
- */
-
-/**
- * Performs a copy from in to out.
- *
- * @param in Input data.
- * @param out Output data
- * @remark NB_I, NB_II, NB_III : points number in directions from 1st varying index to last.
- */
-__kernel void copy(__global const float* in,
-		   __global float* out)
-{
-  uint xIndex = get_group_id(0) * TILE_DIM_COPY + get_local_id(0);
-  uint yIndex = get_group_id(1) * TILE_DIM_COPY + get_local_id(1);
-  uint zIndex = get_global_id(2);
-  uint index = xIndex + yIndex * NB_I + zIndex*NB_I*NB_II;
-
-  for(uint i=0; i<TILE_DIM_COPY; i+=BLOCK_ROWS_COPY)
-    {
-        out[index + i*NB_I] = in[index + i*NB_I];
-    }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/diffusion.cl b/hysop/old/gpu.old/cl_src/kernels/diffusion.cl
deleted file mode 100644
index edbfa572a5e09fb97d792c3860d77ba5b6e93215..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/diffusion.cl
+++ /dev/null
@@ -1,183 +0,0 @@
-/**
- * @file diffusion.cl
- * Diffusion kernel.
- */
-
-/**
- * Computes diffusion operator with finite differences.
- * Stencil computation is performed within a 2D index space of size <code>TILE_SIZE</code> by a work-group. The 3rd direction is traversed in a loop for data reuse.
- *
- * @param scal_in Input scalar field
- * @param ghostsX Ghosts array if X is a communication direction
- * @param ghostsY Ghosts array if Y is a communication direction
- * @param ghostsZ Ghosts array if Z is a communication direction
- * @param scal_out Output scalar field
- * @param nudt Diffusion coefficient
- * @param dx Mesh space step
- *
- * @remark <code>NB_X</code>, <code>NB_Y</code>, <code>NB_Z</code> : points number in physical space directions.
- * @remark <code>NB_PART</code> Particles number per work-item in computing direction
- * @remark <code>CUT_DIT_X</code>, <code>CUT_DIT_Y</code> and <code>CUT_DIT_Z</code> : flags for communication direction
- * @remark <code>NB_GROUPS_I</code> and <code>NB_GROUPS_II</code> : tiles number in X and Y directions.
- * @remark <code>L_WIDTH</code> : work-item number in tile.
- */
-
-__kernel void diffusion(__global const float* scal_in,
-#if CUT_DIR_X == 1
-			__global const float* ghostsX,
-#endif
-#if CUT_DIR_Y == 1
-			__global const float* ghostsY,
-#endif
-#if CUT_DIR_Z == 1
-			__global const float* ghostsZ,
-#endif
-			__global float* scal_out,
-			float nudt,
-			float4 dx)
-{
-  int t_gidX, t_gidY;
-  int lidX, lidY;
-  int gidX, gidY, gidZ;
-  float cx, cy, cz;
-  float scal_z_m[NB_PART];
-  float scal_z[NB_PART];
-  float scal_z_p[NB_PART];
-  float s;
-  uint i;
-
-  __local float tile_XY[TILE_SIZE+2][TILE_SIZE+2];
-
-  for (t_gidX=get_group_id(0); t_gidX<NB_GROUPS_I; t_gidX+=get_num_groups(0)) {
-    for (t_gidY=get_group_id(1); t_gidY<NB_GROUPS_II; t_gidY+=get_num_groups(1)) {
-
-      // Tile computation
-      lidX = get_local_id(0);
-      lidY = get_local_id(1);
-      gidX = t_gidX*TILE_SIZE + lidX; /* OpenCL work-item global index (X) */
-      gidY = t_gidY*TILE_SIZE + lidY; /* OpenCL work-item global index (Y) */
-      cx = nudt/(dx.x*dx.x);
-      cy = nudt/(dx.y*dx.y);
-      cz = nudt/(dx.z*dx.z);
-
-      for(i=0;i<NB_PART;i++) {
-#if CUT_DIR_Z == 1
-	scal_z_m[i] = ghostsZ[gidX + (gidY+i*L_WIDTH)*NB_X + NB_X*NB_Y];
-#else
-	scal_z_m[i] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X + (NB_Z-1)*NB_X*NB_Y];
-#endif
-	scal_z[i] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X];
-      }
-
-      lidX += 1;
-      lidY += 1;
-
-      // loop over Z indices but last.
-      for (gidZ=0; gidZ<(NB_Z-1); gidZ++) {
-	for(i=0;i<NB_PART;i++) {
-	  // fill the tile
-	  tile_XY[lidX][lidY+i*L_WIDTH] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-
-	  /* // fill tile edges */
-#if CUT_DIR_X == 1
-	  tile_XY[0][lidY+i*L_WIDTH] = (t_gidX*TILE_SIZE>=1) ? scal_in[t_gidX*TILE_SIZE-1 + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y] : ghostsX[1 + (gidY+i*L_WIDTH)*2 + gidZ*2*NB_Y];
-	  tile_XY[TILE_SIZE+1][lidY+i*L_WIDTH] = ((t_gidX+1)*TILE_SIZE<NB_X) ? scal_in[(t_gidX+1)*TILE_SIZE + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y]: ghostsX[(gidY+i*L_WIDTH)*2 + gidZ*2*NB_Y];
-#else
-	  tile_XY[0][lidY+i*L_WIDTH] = scal_in[((t_gidX*TILE_SIZE-1+NB_X)%NB_X) + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-	  tile_XY[TILE_SIZE+1][lidY+i*L_WIDTH] = scal_in[(((t_gidX+1)*TILE_SIZE+NB_X)%NB_X) + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-#endif
-	}
-#if CUT_DIR_Y == 1
-	tile_XY[lidX][0] = (t_gidY*TILE_SIZE>=1)? scal_in[gidX + (t_gidY*TILE_SIZE-1)*NB_X + gidZ*NB_X*NB_Y] : ghostsY[gidX + NB_X + gidZ*NB_X*2];
-	tile_XY[lidX][TILE_SIZE+1] = ((t_gidY+1)*TILE_SIZE<NB_Y) ? scal_in[gidX + (t_gidY+1)*TILE_SIZE*NB_X + gidZ*NB_X*NB_Y] : ghostsY[gidX + gidZ*NB_X*2];
-#else
-	tile_XY[lidX][0] = scal_in[gidX + ((t_gidY*TILE_SIZE-1+NB_Y)%NB_Y)*NB_X + gidZ*NB_X*NB_Y];
-	tile_XY[lidX][TILE_SIZE+1] = scal_in[gidX + (((t_gidY+1)*TILE_SIZE+NB_Y)%NB_Y)*NB_X + gidZ*NB_X*NB_Y];
-#endif
-
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(i=0;i<NB_PART;i++) {
-	  /* get scalar value in Z direction */
-	  scal_z_p[i] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X + (gidZ+1)*NB_X*NB_Y];
-
-	  // Compute stencil
-	  // central point
-	  s = scal_z[i] * (1.0 - 2.0 * (cx + cy + cz));
-
-	  s += cz*(scal_z_m[i] + scal_z_p[i]);
-
-	  s += cy * tile_XY[lidX][lidY+i*L_WIDTH-1];
-	  s += cy * tile_XY[lidX][lidY+i*L_WIDTH+1];
-	  s += cx * tile_XY[lidX-1][lidY+i*L_WIDTH];
-	  s += cx * tile_XY[lidX+1][lidY+i*L_WIDTH];
-
-	  // write result
-	  scal_out[gidX + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y] = s;
-	}
-
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(i=0;i<NB_PART;i++) {
-	  // Shift Z values
-	  scal_z_m[i] = scal_z[i];
-	  scal_z[i] = scal_z_p[i];
-	}
-      }
-
-      // Compute last point (from ghosts)
-      gidZ = NB_Z - 1;
-
-      for(i=0;i<NB_PART;i++) {
-	// fill the tile
-	tile_XY[lidX][lidY+i*L_WIDTH] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-
-	/* // fill tile edges */
-#if CUT_DIR_X == 1
-	tile_XY[0][lidY+i*L_WIDTH] = (t_gidX*TILE_SIZE>=1) ? scal_in[t_gidX*TILE_SIZE-1 + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y] : ghostsX[1 + (gidY+i*L_WIDTH)*2 + gidZ*2*NB_Y];
-	tile_XY[TILE_SIZE+1][lidY+i*L_WIDTH] = ((t_gidX+1)*TILE_SIZE<NB_X) ? scal_in[(t_gidX+1)*TILE_SIZE + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y]: ghostsX[(gidY+i*L_WIDTH)*2 + gidZ*2*NB_Y];
-#else
-	tile_XY[0][lidY+i*L_WIDTH] = scal_in[((t_gidX*TILE_SIZE-1+NB_X)%NB_X) + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-	tile_XY[TILE_SIZE+1][lidY+i*L_WIDTH] = scal_in[(((t_gidX+1)*TILE_SIZE+NB_X)%NB_X) + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y];
-#endif
-      }
-#if CUT_DIR_Y == 1
-      tile_XY[lidX][0] = (t_gidY*TILE_SIZE>=1)? scal_in[gidX + (t_gidY*TILE_SIZE-1)*NB_X + gidZ*NB_X*NB_Y] : ghostsY[gidX + NB_X + gidZ*NB_X*2];
-      tile_XY[lidX][TILE_SIZE+1] = ((t_gidY+1)*TILE_SIZE<NB_Y) ? scal_in[gidX + (t_gidY+1)*TILE_SIZE*NB_X + gidZ*NB_X*NB_Y] : ghostsY[gidX + gidZ*NB_X*2];
-#else
-      tile_XY[lidX][0] = scal_in[gidX + ((t_gidY*TILE_SIZE-1+NB_Y)%NB_Y)*NB_X + gidZ*NB_X*NB_Y];
-      tile_XY[lidX][TILE_SIZE+1] = scal_in[gidX + (((t_gidY+1)*TILE_SIZE+NB_Y)%NB_Y)*NB_X + gidZ*NB_X*NB_Y];
-#endif
-
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for(i=0;i<NB_PART;i++) {
-	/* // get scalar value in Z direction */
-#if CUT_DIR_Z == 1
-	scal_z_p[i] = ghostsZ[gidX + (gidY+i*L_WIDTH)*NB_X];
-#else
-	scal_z_p[i] = scal_in[gidX + (gidY+i*L_WIDTH)*NB_X];
-#endif
-
-	// Compute stencil
-	/* // central point */
-	s = scal_z[i] * (1.0 - 2.0 * (cx + cy + cz));
-
-	s += cz*(scal_z_m[i] + scal_z_p[i]);
-
-	s += cy * tile_XY[lidX][lidY+i*L_WIDTH-1];
-	s += cy * tile_XY[lidX][lidY+i*L_WIDTH+1];
-	s += cx * tile_XY[lidX-1][lidY+i*L_WIDTH];
-	s += cx * tile_XY[lidX+1][lidY+i*L_WIDTH];
-
-	// write result
-	scal_out[gidX + (gidY+i*L_WIDTH)*NB_X + gidZ*NB_X*NB_Y] = s;
-      }
-      /* Synchronize work-group */
-      barrier(CLK_LOCAL_MEM_FENCE);
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/fine_to_coarse_filter.cl b/hysop/old/gpu.old/cl_src/kernels/fine_to_coarse_filter.cl
deleted file mode 100644
index 2851c60383e2d634a729ab5cbeb0360e5c5da5a9..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/fine_to_coarse_filter.cl
+++ /dev/null
@@ -1,178 +0,0 @@
-__kernel void initialize_output(__global float* scal_out) {
-  scal_out[get_global_id(0) + get_global_id(1)*NB_OUT_X + get_global_id(2)*NB_OUT_X*NB_OUT_Y] = 0.0;
-}
-
-__kernel void coarse_to_fine_filter(__global const float* scal_in,
-				    __global float* scal_out,
-				    float scale_factor,
-				    float4 dx_in, float4 dx_out, float4 origin,
-				    int offset_y, int offset_z) {
-  // Work-group is computed from coarse grid (without ghosts)
-  // globalsize(1) = (NB_OUT_Y - 2*GHOSTS_OUT_Y) / PTS_PER_CELL_Y
-  // globalsize(2) = (NB_OUT_Z - 2*GHOSTS_OUT_Z) / PTS_PER_CELL_X
-  // Resolutions are linked by: (NB_OUT - 2*GHOSTS_OUT) * PTS_PER_CELL = NB_IN
-  // A work-group is in charge of a subdomain corresponding to:
-  //   - [NB_OUT_X, L_STENCIL, L_STENCIL] for the coarse grid
-  //   - [NB_IN_X, PTS_PER_CELL_Y, PTS_PER_CELL] for the fine grid
-  // Data in the fine grid are read only once for the whole computation.
-  // Because of the stencil, these data are spread over multiple coarse grid cells -> we need a global memory synchronization.
-  // The global synchronization is obtained by several kernel launch with an offset
-  unsigned int lid = get_local_id(0);
-  unsigned int gid_y = get_global_id(1);
-  unsigned int gid_z = get_global_id(2);
-  unsigned int iy_c = gid_y*L_STENCIL+offset_y;
-  unsigned int iz_c = gid_z*L_STENCIL+offset_z;
-  unsigned int iy_f = iy_c*PTS_PER_CELL_Y;
-  unsigned int iz_f = iz_c*PTS_PER_CELL_Z;
-  unsigned int i, j, k, b_id, pt_x, pt_y, pt_z;
-  float4 coord_in;
-  float4 coord_out;
-  float4 d;
-#if FORMULA==L2_1
-  float4 wx, wy, wz;
-#endif
-  __local float line[WG*PTS_PER_CELL_X];
-  __local float result[NB_OUT_X][L_STENCIL][L_STENCIL];
-  __private float p_res[L_STENCIL][L_STENCIL][L_STENCIL];
-
-  // Fill local arrays
-  // Output data
-  for (k=0;k<L_STENCIL;k++)
-    for (j=0;j<L_STENCIL;j++)
-      for (i=lid;i<NB_OUT_X;i+=WG)
-  	result[i][j][k] = scal_out[i + (GHOSTS_OUT_Y+iy_c-SHIFT_STENCIL+j)*NB_OUT_X +
-				   (GHOSTS_OUT_Z+iz_c-SHIFT_STENCIL+k)*NB_OUT_X*NB_OUT_Y];
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for (b_id=0;b_id<NB_IN_X/(WG*PTS_PER_CELL_X);b_id++)
-    {
-      // Compute a bloc of: (b_id is the bloc number in X direction)
-      //   - [WG*PTS_PER_CELL_X, PTS_PER_CELL_Y, PTS_PER_CELL_Z] points in fine grid
-      //   - [WG, L_STENCIL, L_STENCIL] points in coarse grid
-      // Each work-item is computing a coarse cell (looping in 3D over PTS_PER_CELL thanks to pt_x, pt_y and pt_z indices)
-      // global fine grid data are cached line by line in the X direction
-      coord_out = ((float4)(b_id*WG+lid, iy_c, iz_c, 0.0)) * dx_out;
-      // Initialize the register corresponding to the current cell
-      for (pt_z=0;pt_z<L_STENCIL;pt_z++)
-	for (pt_y=0;pt_y<L_STENCIL;pt_y++)
-	  for (pt_x=0;pt_x<L_STENCIL;pt_x++)
-	    p_res[pt_x][pt_y][pt_z] = 0.0;
-
-      // Loop over PTS_PER_CELL_Z: fine grid points in the curent cell
-      for (pt_z=0;pt_z<PTS_PER_CELL_Z;pt_z++)
-	{
-	  // Loop over PTS_PER_CELL_Y: fine grid points in the curent cell
-	  for (pt_y=0;pt_y<PTS_PER_CELL_Y;pt_y++)
-	    {
-	      // Input cache
-	      for (i=lid;i<WG*PTS_PER_CELL_X;i+=WG)
-		line[i] = scal_in[b_id*(WG*PTS_PER_CELL_X) + i + (iy_f+pt_y)*NB_IN_X + (iz_f+pt_z)*NB_IN_X*NB_IN_Y];
-	      barrier(CLK_LOCAL_MEM_FENCE);
-
-	      // Loop over PTS_PER_CELL_X: fine grid points in the curent cell
-	      for (pt_x=0;pt_x<PTS_PER_CELL_X;pt_x++)
-		{
-		  coord_in = ((float4)(b_id*(WG*PTS_PER_CELL_X) + lid*PTS_PER_CELL_X + pt_x, iy_f+pt_y, iz_f+pt_z, 0.0)) * dx_in;
-		  d = (coord_in  - coord_out) / dx_out;
-		  #if FORMULA==LINEAR
-		  p_res[0][0][0] += scale_factor * (1.0 - d.x) * (1.0 - d.y) * (1.0 - d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][0][1] += scale_factor * (1.0 - d.x) * (1.0 - d.y) * (d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][0] += scale_factor * (1.0 - d.x) * (d.y) * (1.0 - d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][1] += scale_factor * (1.0 - d.x) * (d.y) * (d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][0][0] += scale_factor * (d.x) * (1.0 - d.y) * (1.0 - d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][0][1] += scale_factor * (d.x) * (1.0 - d.y) * (d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][0] += scale_factor * (d.x) * (d.y) * (1.0 - d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][1] += scale_factor * (d.x) * (d.y) * (d.z) * line[lid*PTS_PER_CELL_X+pt_x];
-		  #elif FORMULA==L2_1
-		  wx = (float4)(alpha_l2_1(d.x), beta_l2_1(d.x), gamma_l2_1(d.x), delta_l2_1(d.x));
-		  wy = (float4)(alpha_l2_1(d.y), beta_l2_1(d.y), gamma_l2_1(d.y), delta_l2_1(d.y));
-		  wz = (float4)(alpha_l2_1(d.z), beta_l2_1(d.z), gamma_l2_1(d.z), delta_l2_1(d.z));
-		  p_res[0][0][0] += scale_factor * wx.x * wy.x * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][0][1] += scale_factor * wx.x * wy.x * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][0][2] += scale_factor * wx.x * wy.x * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][0][3] += scale_factor * wx.x * wy.x * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][0] += scale_factor * wx.x * wy.y * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][1] += scale_factor * wx.x * wy.y * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][2] += scale_factor * wx.x * wy.y * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][1][3] += scale_factor * wx.x * wy.y * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][2][0] += scale_factor * wx.x * wy.z * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][2][1] += scale_factor * wx.x * wy.z * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][2][2] += scale_factor * wx.x * wy.z * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][2][3] += scale_factor * wx.x * wy.z * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][3][0] += scale_factor * wx.x * wy.w * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][3][1] += scale_factor * wx.x * wy.w * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][3][2] += scale_factor * wx.x * wy.w * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[0][3][3] += scale_factor * wx.x * wy.w * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-
-		  p_res[1][0][0] += scale_factor * wx.y * wy.x * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][0][1] += scale_factor * wx.y * wy.x * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][0][2] += scale_factor * wx.y * wy.x * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][0][3] += scale_factor * wx.y * wy.x * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][0] += scale_factor * wx.y * wy.y * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][1] += scale_factor * wx.y * wy.y * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][2] += scale_factor * wx.y * wy.y * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][1][3] += scale_factor * wx.y * wy.y * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][2][0] += scale_factor * wx.y * wy.z * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][2][1] += scale_factor * wx.y * wy.z * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][2][2] += scale_factor * wx.y * wy.z * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][2][3] += scale_factor * wx.y * wy.z * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][3][0] += scale_factor * wx.y * wy.w * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][3][1] += scale_factor * wx.y * wy.w * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][3][2] += scale_factor * wx.y * wy.w * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[1][3][3] += scale_factor * wx.y * wy.w * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-
-		  p_res[2][0][0] += scale_factor * wx.z * wy.x * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][0][1] += scale_factor * wx.z * wy.x * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][0][2] += scale_factor * wx.z * wy.x * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][0][3] += scale_factor * wx.z * wy.x * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][1][0] += scale_factor * wx.z * wy.y * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][1][1] += scale_factor * wx.z * wy.y * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][1][2] += scale_factor * wx.z * wy.y * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][1][3] += scale_factor * wx.z * wy.y * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][2][0] += scale_factor * wx.z * wy.z * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][2][1] += scale_factor * wx.z * wy.z * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][2][2] += scale_factor * wx.z * wy.z * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][2][3] += scale_factor * wx.z * wy.z * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][3][0] += scale_factor * wx.z * wy.w * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][3][1] += scale_factor * wx.z * wy.w * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][3][2] += scale_factor * wx.z * wy.w * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[2][3][3] += scale_factor * wx.z * wy.w * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-
-		  p_res[3][0][0] += scale_factor * wx.w * wy.x * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][0][1] += scale_factor * wx.w * wy.x * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][0][2] += scale_factor * wx.w * wy.x * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][0][3] += scale_factor * wx.w * wy.x * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][1][0] += scale_factor * wx.w * wy.y * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][1][1] += scale_factor * wx.w * wy.y * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][1][2] += scale_factor * wx.w * wy.y * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][1][3] += scale_factor * wx.w * wy.y * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][2][0] += scale_factor * wx.w * wy.z * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][2][1] += scale_factor * wx.w * wy.z * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][2][2] += scale_factor * wx.w * wy.z * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][2][3] += scale_factor * wx.w * wy.z * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][3][0] += scale_factor * wx.w * wy.w * wz.x * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][3][1] += scale_factor * wx.w * wy.w * wz.y * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][3][2] += scale_factor * wx.w * wy.w * wz.z * line[lid*PTS_PER_CELL_X+pt_x];
-		  p_res[3][3][3] += scale_factor * wx.w * wy.w * wz.w * line[lid*PTS_PER_CELL_X+pt_x];
-
-		  #endif
-		}
-	    }
-	}
-      // Store the registers results in local memory
-      for (pt_z=0;pt_z<L_STENCIL;pt_z++)
-	for (pt_y=0;pt_y<L_STENCIL;pt_y++)
-	  for (pt_x=0;pt_x<L_STENCIL;pt_x++) {
-	    result[GHOSTS_OUT_X+b_id*WG+lid-SHIFT_STENCIL+pt_x][pt_y][pt_z] += p_res[pt_x][pt_y][pt_z];
-	    barrier(CLK_LOCAL_MEM_FENCE);
-	  }
-    }
-
-  // Write result in output array
-  for (k=0;k<L_STENCIL;k++)
-    for (j=0;j<L_STENCIL;j++)
-      for (i=lid;i<NB_OUT_X;i+=WG)
-	scal_out[i + (GHOSTS_OUT_Y+iy_c-SHIFT_STENCIL+j)*NB_OUT_X +
-		 (GHOSTS_OUT_Z+iz_c-SHIFT_STENCIL+k)*NB_OUT_X*NB_OUT_Y] = result[i][j][k];
-
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/multiphase_baroclinic_rhs.cl b/hysop/old/gpu.old/cl_src/kernels/multiphase_baroclinic_rhs.cl
deleted file mode 100644
index d9938aa9994f0c5e1c37b8d56fe82b45db3405e0..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/multiphase_baroclinic_rhs.cl
+++ /dev/null
@@ -1,321 +0,0 @@
-/** Computes the right hand side of the baroclinic term.
- * The pressure gradient is given in input at a coarse scale.
- * The density is given at a fine scale.
- * Result is computed ans returned at fine scale.
- */
-#define C_TILE_IDX(x,y) x+GHOSTS_C_X+(y+GHOSTS_C_Y)*C_TILE_WIDTH
-#if FD_ORDER == FD_C_2
-#define GRAD_GH 1
-#endif
-#if FD_ORDER == FD_C_4
-#define GRAD_GH 2
-#endif
-
-float compute_density(float x);
-float compute_density(float x){
-  return __USER_DENSITY_FUNCTION_FROM_GIVEN_INPUT__;
-}
-
-float interpolate(__local float* loc_gradp_zm, __local float* loc_gradp_zp,
-		  float *h, int lidx, int lidy,
-		  int cellx, int celly, int cellz);
-
-float interpolate(__local float* loc_zm,
-		  __local float* loc_zp,
-		  float *h,
-		  int lidx,
-		  int lidy,
-		  int cellx,
-		  int celly,
-		  int cellz) {
-  float res = 0.0;
-  res += (1.0 - h[cellz]) * (1.0 - h[cellx]) * (1.0 - h[celly]) * loc_zm[C_TILE_IDX(lidx,lidy)];
-  res += (1.0 - h[cellz]) * (h[cellx]) * (1.0 - h[celly]) * loc_zm[C_TILE_IDX(lidx+1,lidy)];
-  res += (1.0 - h[cellz]) * (1.0 - h[cellx]) * (h[celly]) * loc_zm[C_TILE_IDX(lidx,lidy+1)];
-  res += (1.0 - h[cellz]) * (h[cellx]) * (h[celly]) * loc_zm[C_TILE_IDX(lidx+1,lidy+1)];
-  res += (h[cellz]) * (1.0 - h[cellx]) * (1.0 - h[celly]) * loc_zp[C_TILE_IDX(lidx,lidy)];
-  res += (h[cellz]) * (h[cellx]) * (1.0 - h[celly]) * loc_zp[C_TILE_IDX(lidx+1,lidy)];
-  res += (h[cellz]) * (1.0 - h[cellx]) * (h[celly]) * loc_zp[C_TILE_IDX(lidx,lidy+1)];
-  res += (h[cellz]) * (h[cellx]) * (h[celly]) * loc_zp[C_TILE_IDX(lidx+1,lidy+1)];
-  return res;
-}
-
-void fill_loc_rho_cache(__local float *loc_rho,
-			__global const float* rho,
-#if CUT_DIR_Y == 1
-			__global const float* rho_ghostsY,
-#endif
-			int lidx,
-			int lidy,
-			int gidx,
-			int gidy,
-			int idz);
-void fill_loc_rho_cache(__local float *loc_rho,
-			__global const float* rho,
-#if CUT_DIR_Y == 1
-			__global const float* rho_ghostsY,
-#endif
-			int lidx,
-			int lidy,
-			int gidx,
-			int gidy,
-			int idz) {
-  int celly, cellx;
-  if (gidx > 0 && gidx < ((int)get_num_groups(0))-1 && gidy > 0 && gidy < ((int)get_num_groups(1))-1) {
-    for (celly=lidy; celly<F_TILE_SIZE+2*GRAD_GH; celly+=get_local_size(1)) {
-      for (cellx=lidx; cellx<F_TILE_SIZE+2*GRAD_GH; cellx+=get_local_size(0)) {
-	loc_rho[cellx + (celly)*(F_TILE_SIZE+2*GRAD_GH)] =
-	  compute_density(rho[cellx-GRAD_GH + gidx*F_TILE_SIZE +
-			      (celly-GRAD_GH + gidy*F_TILE_SIZE)*NB_F_X +
-			      idz*NB_F_X*NB_F_Y]);
-      }
-    }
-  } else {
-    for (celly=lidy; celly<F_TILE_SIZE+2*GRAD_GH; celly+=get_local_size(1)) {
-      for (cellx=lidx; cellx<F_TILE_SIZE+2*GRAD_GH; cellx+=get_local_size(0)) {
-#if CUT_DIR_Y == 1
-	if (celly-GRAD_GH + gidy*F_TILE_SIZE >= NB_F_Y)
-	  loc_rho[cellx + (celly)*(F_TILE_SIZE+2*GRAD_GH)] =
-	    compute_density(rho_ghostsY[(cellx-GRAD_GH + gidx*F_TILE_SIZE+NB_F_X)%NB_F_X +
-					(celly-GRAD_GH + gidy*F_TILE_SIZE - NB_F_Y)*NB_F_X +
-					idz*NB_F_X*2*GRAD_GH]);
-	else if (celly-GRAD_GH + gidy*F_TILE_SIZE < 0)
-	  loc_rho[cellx + (celly)*(F_TILE_SIZE+2*GRAD_GH)] =
-	    compute_density(rho_ghostsY[(cellx-GRAD_GH + gidx*F_TILE_SIZE+NB_F_X)%NB_F_X +
-					(2*GRAD_GH + (celly-GRAD_GH + gidy*F_TILE_SIZE))*NB_F_X +
-					idz*NB_F_X*2*GRAD_GH]);
-	else
-	  loc_rho[cellx + (celly)*(F_TILE_SIZE+2*GRAD_GH)] =
-	    compute_density(rho[(cellx-GRAD_GH + gidx*F_TILE_SIZE+NB_F_X)%NB_F_X +
-				(celly-GRAD_GH + gidy*F_TILE_SIZE)*NB_F_X +
-				idz*NB_F_X*NB_F_Y]);
-#else
-	loc_rho[cellx + (celly)*(F_TILE_SIZE+2*GRAD_GH)] =
-	  compute_density(rho[(cellx-GRAD_GH + gidx*F_TILE_SIZE+NB_F_X)%NB_F_X +
-			      ((celly-GRAD_GH + gidy*F_TILE_SIZE+NB_F_Y)%NB_F_Y)*NB_F_X +
-			      idz*NB_F_X*NB_F_Y]);
-#endif
-      }
-    }
-  }
-}
-
-__kernel void baroclinic_rhs(__global float* rhs_x,
-			     __global float* rhs_y,
-			     __global float* rhs_z,
-			     __global const float* rho,
-#if CUT_DIR_Y == 1
-			     __global const float* rho_ghostsY,
-#endif
-#if CUT_DIR_Z == 1
-			     __global const float* rho_ghostsZ,
-#endif
-			     __global const float* gradp,
-			     float4 dx_coarse,
-			     float4 dx_fine)
-{
-  /* Space index refers to the coarse grid comute points */
-  int lidx = get_local_id(0);
-  int lidy = get_local_id(1);
-  int gidx = get_group_id(0);
-  int gidy = get_group_id(1);
-  int rhs_idx = lidx*N_PER_CELL + gidx*F_TILE_SIZE + (lidy*N_PER_CELL + gidy*F_TILE_SIZE)*NB_F_X;
-  float h[N_PER_CELL];
-  int i, cellx, celly, cellz;
-  int idz, c_idz;
-  float p_gradp, gradrho_x, gradrho_y, gradrho_z;
-  float rho_zm[N_PER_CELL][N_PER_CELL];
-#if FD_ORDER == FD_C_4
-  float rho_zmm[N_PER_CELL][N_PER_CELL];
-  float rho_zp[N_PER_CELL][N_PER_CELL];
-  float rho_zpp;
-#endif
-
-  __local float loc_rho[(F_TILE_SIZE+2*GRAD_GH)*(F_TILE_SIZE+2*GRAD_GH)];
-  __local float loc_gradp_zm[C_TILE_WIDTH*C_TILE_HEIGHT];
-  __local float loc_gradp_zp[C_TILE_WIDTH*C_TILE_HEIGHT];
-
-
-  // Compute distances from fine grid points to coarse left point cell.
-  for (i=0; i<N_PER_CELL; i++)
-    h[i] = i * 1.0 / (1.0 * N_PER_CELL);
-
-  idz = 0; 			/* Fine grid Z indice */
-  c_idz=GHOSTS_C_Z;
-  // Fill gradp z cache for first iteration
-  for (celly=lidy; celly<C_TILE_HEIGHT; celly+=get_local_size(1)) {
-    for (cellx=lidx; cellx<C_TILE_WIDTH; cellx+=get_local_size(0)) {
-      loc_gradp_zm[cellx + celly*(C_TILE_WIDTH)] =
-	gradp[cellx + gidx*C_TILE_SIZE + (celly + gidy*C_TILE_SIZE)*NB_C_X + c_idz*NB_C_X*NB_C_Y];
-    }
-  }
-
-  for (celly=0; celly<N_PER_CELL; celly++) {
-    for (cellx=0; cellx<N_PER_CELL; cellx++) {
-#if FD_ORDER == FD_C_4
-#if CUT_DIR_Z == 1
-      rho_zm[cellx][celly] = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X + 3*NB_F_X*NB_F_Y]);
-      rho_zmm[cellx][celly] = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X + 2*NB_F_X*NB_F_Y]);
-      rho_zp[cellx][celly] = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + NB_F_X*NB_F_Y]);
-#else
-      rho_zm[cellx][celly] = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + (NB_F_Z-1)*NB_F_X*NB_F_Y]);
-      rho_zmm[cellx][celly] = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + (NB_F_Z-2)*NB_F_X*NB_F_Y]);
-      rho_zp[cellx][celly] = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + NB_F_X*NB_F_Y]);
-#endif
-#else
-#if CUT_DIR_Z == 1
-      rho_zm[cellx][celly] = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X + NB_F_X*NB_F_Y]);
-#else
-      rho_zm[cellx][celly] = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + (NB_F_Z-1)*NB_F_X*NB_F_Y]));
-#endif
-#endif
-    }
-  }
-
-  for (c_idz=GHOSTS_C_Z; c_idz<NB_C_Z-GHOSTS_C_Z; c_idz++) {
-
-    if((c_idz-GHOSTS_C_Z)%2 == 0)
-      for (celly=lidy; celly<C_TILE_HEIGHT; celly+=get_local_size(1)) {
-	for (cellx=lidx; cellx<C_TILE_WIDTH; cellx+=get_local_size(0)) {
-	  loc_gradp_zp[cellx + celly*(C_TILE_WIDTH)] =
-	    gradp[cellx + gidx*C_TILE_SIZE + (celly + gidy*C_TILE_SIZE)*NB_C_X + (c_idz+1)*NB_C_X*NB_C_Y];
-	}
-      }
-    else
-      for (celly=lidy; celly<C_TILE_HEIGHT; celly+=get_local_size(1)) {
-	for (cellx=lidx; cellx<C_TILE_WIDTH; cellx+=get_local_size(0)) {
-	  loc_gradp_zm[cellx + celly*(C_TILE_WIDTH)] =
-	    gradp[cellx + gidx*C_TILE_SIZE + (celly + gidy*C_TILE_SIZE)*NB_C_X + (c_idz+1)*NB_C_X*NB_C_Y];
-	}
-      }
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    for (cellz=0; cellz<N_PER_CELL; cellz++) {
-      //fill rho cache
-#if CUT_DIR_Y == 1
-      fill_loc_rho_cache(loc_rho, rho, rho_ghostsY, lidx, lidy, gidx, gidy, idz);
-#else
-      fill_loc_rho_cache(loc_rho, rho, lidx, lidy, gidx, gidy, idz);
-#endif
-
-      barrier(CLK_LOCAL_MEM_FENCE);
-
-      for (celly=0; celly<N_PER_CELL; celly++) {
-	for (cellx=0; cellx<N_PER_CELL; cellx++) {
-	  if((c_idz-GHOSTS_C_Z)%2 == 0)
-	    p_gradp = interpolate(loc_gradp_zm, loc_gradp_zp, h, lidx, lidy, cellx, celly, cellz);
-	  else
-	    p_gradp = interpolate(loc_gradp_zp, loc_gradp_zm, h, lidx, lidy, cellx, celly, cellz);
-
-	  ///// TEMP WRITE GRADP TO RHS
-	  //rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = p_gradp;
-	  ///// END TEMP WRITE GRADP TO RHS
-
-#if FD_ORDER == FD_C_2
-	  gradrho_x = loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx+1 +
-			      (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx-1 +
-			       (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x /= (2.0*dx_fine.x);
-
-	  gradrho_y = loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			      (GRAD_GH+lidy*N_PER_CELL+celly+1)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			       (GRAD_GH+lidy*N_PER_CELL+celly-1)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y /= (2.0*dx_fine.y);
-
-#if CUT_DIR_Z == 1
-	  if (idz==NB_F_Z-1)
-	    gradrho_z = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X]);
-	  else
-	    gradrho_z = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + ((idz+1)%NB_F_Z)*NB_F_X*NB_F_Y]);
-#else
- 	  gradrho_z = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + ((idz+1)%NB_F_Z)*NB_F_X*NB_F_Y]);
-#endif
-	  gradrho_z -= rho_zm[cellx][celly];
-	  gradrho_z /= (2.0*dx_fine.z);
-
-#endif
-#if FD_ORDER == FD_C_4
-	  gradrho_x = loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx+1 +
-			      (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx-1 +
-			      (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x *= 8.0;
-	  gradrho_x += loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx-2 +
-			      (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx+2 +
-			      (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_x /= (12.0*dx_fine.x);
-
-	  gradrho_y = loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			      (GRAD_GH+lidy*N_PER_CELL+celly+1)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			      (GRAD_GH+lidy*N_PER_CELL+celly-1)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y *= 8.0;
-	  gradrho_y += loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			      (GRAD_GH+lidy*N_PER_CELL+celly-2)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y -= loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-			      (GRAD_GH+lidy*N_PER_CELL+celly+2)*(F_TILE_SIZE+2*GRAD_GH)];
-	  gradrho_y /= (12.0*dx_fine.y);
-
-#if CUT_DIR_Z == 1
-	  if (idz==NB_F_Z-1)
-	    rho_zpp = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X + NB_F_X*NB_F_Y]);
-	  else if (idz==NB_F_Z-2)
-	    rho_zpp = compute_density(rho_ghostsZ[rhs_idx + cellx + celly*NB_F_X]);
-	  else
-	    rho_zpp = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + ((idz+2)%NB_F_Z)*NB_F_X*NB_F_Y]);
-#else
-	  rho_zpp = compute_density(rho[rhs_idx + cellx + celly*NB_F_X + ((idz+2)%NB_F_Z)*NB_F_X*NB_F_Y]);
-#endif
-	  gradrho_z = rho_zp[cellx][celly];
-	  gradrho_z -= rho_zm[cellx][celly];
-	  gradrho_z *= 8.0;
-	  gradrho_z += rho_zmm[cellx][celly];
-	  gradrho_z -= rho_zpp;
-	  gradrho_z /= (12.0*dx_fine.z);
-#endif
-
-	  ///// TEMP WRITE GRADrho_X TO RHS
-	  //rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = gradrho_x;
-	  ///// END TEMP WRITE GRADrho TO RHS
-	  ///// TEMP WRITE GRADrho_Y TO RHS
-	  //rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = gradrho_y;
-	  ///// END TEMP WRITE GRADrho TO RHS
-	  ///// TEMP WRITE GRADrho_Z TO RHS
-	  //rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = gradrho_z;
-	  ///// END TEMP WRITE GRADrho TO RHS
-
-	  // Using gradp X component as gradp and assuming this kernel run first to initialise output
-#if GRADP_COMP == 0
-	  rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = 0.0;
-	  rhs_y[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = -gradrho_z*p_gradp;
-	  rhs_z[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] = gradrho_y*p_gradp;
-#endif
-	  // Using gradp Y component as gradp
-#if GRADP_COMP == 1
-	  rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] += gradrho_z*p_gradp;
-	  rhs_z[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] -= gradrho_x*p_gradp;
-#endif
-	  // Using gradp Z component as gradp
-#if GRADP_COMP == 2
-	  rhs_x[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] -= gradrho_y*p_gradp;
-	  rhs_y[rhs_idx + cellx + celly*NB_F_X + idz*NB_F_X*NB_F_Y] += gradrho_x*p_gradp;
-#endif
-
-
-	  // For next iteration we swap values in cache.
-#if FD_ORDER == FD_C_4
-	  rho_zp[cellx][celly] = rho_zpp;
-	  rho_zmm[cellx][celly] = rho_zm[cellx][celly];
-#endif
-	  rho_zm[cellx][celly] = loc_rho[GRAD_GH+lidx*N_PER_CELL+cellx +
-					 (GRAD_GH+lidy*N_PER_CELL+celly)*(F_TILE_SIZE+2*GRAD_GH)];
-	}
-      }
-      idz++;
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/remeshing.cl b/hysop/old/gpu.old/cl_src/kernels/remeshing.cl
deleted file mode 100644
index 809c5ad32567ca2b91cc51773fffb480c05de721..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/remeshing.cl
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * @file remeshing.cl
- * Remeshing kernel.
- */
-
-/**
- * Performs remeshing of the particles' scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes <code>NB_I/WI_NB</code> particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param ppos Particle position
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param min_position Domain lower coordinate
- * @param dx Space step
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>WI_NB</code> corresponds to the work-item number.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void remeshing_kernel(__global const float* ppos,
-			       __RCOMP_P__global const float* pscal__ID__,
-			       __RCOMP_P__global float* gscal__ID__,
-			       __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  //  float invdx = 1.0/dx;         /* Space step inverse */
-  uint i;			/* Particle index in 1D problem */
-  float__N__ p;			/* Particle position */
-  __RCOMP_I float__N__ s__ID__; /* Particle scalar */
-  uint line_index; /* Current 1D problem index */
-
-  __RCOMP_I__local float gscal_loc__ID__[NB_I]; /* Local buffer for result */
-
-#ifdef NB_Z
-  for(gidZ=get_global_id(2); gidZ<NB_III; gidZ+=get_global_size(2)) {
-#else
-  gidZ=get_global_id(2); {
-#endif
-  for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-  line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
-    {
-      /* Initialize result buffer */
-      __RCOMP_Igscal_loc__ID__[i+__NN__] = 0.0;
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=__N__)
-    {
-      /* Read particle position */
-      p = vload__N__((i + line_index)/__N__, ppos);
-      /* Read particle scalar */
-      __RCOMP_Is__ID__ = vload__N__((i + line_index)/__N__, pscal__ID__);
-      /* Remesh particle */
-      remesh(i, __RCOMP_Ps__ID__, p, __RCOMP_Pgscal_loc__ID__, mesh);
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX*__N__; i<NB_I; i+=(WI_NB*__N__))
-    {
-      /* Store result */
-      __RCOMP_Ivstore__N__((float__N__)(gscal_loc__ID__[noBC_id(i+__NN__)],
-			       ),(i + line_index)/__N__, gscal__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-}
-}
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/remeshing_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/remeshing_noVec.cl
deleted file mode 100644
index 15db5730cedc2cf8b347c831ed6dfe3b79a48985..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/remeshing_noVec.cl
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * @file remeshing.cl
- * Remeshing kernel.
- */
-
-/**
- * Performs remeshing of the particles' scalar.
- * A work-group is handling a 1D problem. Thus, gidY and gidZ are constants among work-items of a work-group.
- * Each work-item computes <code>NB_I/WI_NB</code> particles positions. To avoid concurrent witings, in case of strong velocity gradients, work-items computes contiguous particles.
- * Particle are computed through OpenCL vector types of lenght 2, 4 or 8.
- * Scalar results are stored in a local buffer as a cache and then copied to global memory buffer.
- *
- * @param ppos Particle position
- * @param pscal Particle scalar
- * @param gscal Grid scalar
- * @param min_position Domain lower coordinate
- * @param dx Space step
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>WI_NB</code> corresponds to the work-item number.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void remeshing_kernel(__global const float* ppos,
-			       __RCOMP_P__global const float* pscal__ID__,
-			       __RCOMP_P__global float* gscal__ID__,
-			       __constant struct AdvectionMeshInfo* mesh)
-{
-  uint gidX = get_global_id(0);	/* OpenCL work-itme global index (X) */
-  uint gidY; /* OpenCL work-itme global index (Y) */
-  uint gidZ; /* OpenCL work-itme global index (Z) */
-  uint i;			/* Particle index in 1D problem */
-  float p;			/* Particle position */
-  __RCOMP_I float s__ID__;      /* Particle scalar */
-  uint line_index; /* Current 1D problem index */
-
-  __RCOMP_I__local float gscal_loc__ID__[NB_I]; /* Local buffer for result */
-
-#ifdef NB_Z
-  for(gidZ=get_global_id(2); gidZ<NB_III; gidZ+=get_global_size(2)) {
-#else
-  gidZ=get_global_id(2); {
-#endif
-  for(gidY=get_global_id(1); gidY<NB_II; gidY+=get_global_size(1)) {
-  line_index = gidY*NB_I+ gidZ*NB_I*NB_II;
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      /* Initialize result buffer */
-      __RCOMP_Igscal_loc__ID__[i] = 0.0;
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX*PART_NB_PER_WI; i<(gidX + 1)*PART_NB_PER_WI; i+=1)
-    {
-      /* Read particle position */
-      p = ppos[i + line_index];
-      /* Read particle scalar */
-      __RCOMP_Is__ID__ = pscal__ID__[i + line_index];
-      /* Remesh particle */
-      remesh(i, __RCOMP_Ps__ID__, p, __RCOMP_Pgscal_loc__ID__, mesh);
-    }
-
-  /* Synchronize work-group */
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  for(i=gidX; i<NB_I; i+=WI_NB)
-    {
-      /* Store result */
-      __RCOMP_Igscal__ID__[i + line_index] = gscal_loc__ID__[noBC_id(i)];
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-}
-}
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/rendering.cl b/hysop/old/gpu.old/cl_src/kernels/rendering.cl
deleted file mode 100644
index 567c44d9abcb4a9a2f7a52ef9d1cda57effc7f81..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/rendering.cl
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * @file rendering.cl
- * Rendering kernels.
- */
-
-/**
- * Colorize regarding scalar values.
- *
- * @param scalar Scalar values used
- * @param color Color data array that contains RGBA values for each grid point
- */
-__kernel void colorize(__global const float* scalar,
-		       __global float* color
-)
-{
-  __private uint ind;
-  __private float c;
-  __private int ix, iy;
-  ix = get_global_id(0);
-  iy = get_global_id(1);
-  ind = ix + iy*NB_X;
-
-  //plain colors
-  /* c = (scalar[ind] > 0.5f ? 1.0: 0.0); */
-  /* color[4*ind + 0] = c; //Red */
-  /* color[4*ind + 1] = 0.0; //Green */
-  /* color[4*ind + 2] = 0.0; //Blue */
-  /* color[4*ind + 3] = 1.0; //Alpha */
-
-  //shaded colors
-  c = scalar[ind];
-  color[4*ind + 0] = 2.0*c; //Red
-  color[4*ind + 1] = 2.0*c-0.5; //Green
-  color[4*ind + 2] = 2.0*c-1.0; //Blue
-  color[4*ind + 3] = 1.0; //Alpha
-}
-
-
-/**
- * Compute grid point coordinates from OpenCL index space.
- *
- * @param pos Coordinates ax XY values for each grid point.
- * @param minPos Domain origin.
- * @param size Mesh size.
- */
-__kernel void initPointCoordinates(__global float* pos, float4 minPos, float4 size)
-{
-  __private uint ind;
-  __private int ix, iy;
-  ix = get_global_id(0);
-  iy = get_global_id(1);
-  ind = ix + iy*NB_X;
-
-  pos[2*ind + 0] = ix*size.x;
-  pos[2*ind + 1] = iy*size.y;
-}
-
diff --git a/hysop/old/gpu.old/cl_src/kernels/transpose_xy.cl b/hysop/old/gpu.old/cl_src/kernels/transpose_xy.cl
deleted file mode 100644
index a8701738f17f67659f27c616cf6ad75c1f29bcec..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/transpose_xy.cl
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * @file transpose_xy.cl
- * Transposition in XY plane, coalesced, diagonal coordinates, vectorized version.
- */
-
-/**
- * Performs a transposition in xy plane.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoid banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- * A work group handle transposition for a tile. Transposition is done when reading data in tile.
- * Work-group layout: \code
- * ________________________
- * |0,0 | 1,0 | ...
- * |N,0 | 0,1 | 1,2 | ...
- * | .     .  | 0,2 | ...
- * | .     .
- * | .     .
- * |
- * \endcode
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XY</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void transpose_xy(__global const float* in,
-        __global float* out)
-{
-    float__N__ temp;			/* Temporary variable */
-    uint group_id_x;			/* Work-group coordinate in global space index X */
-    uint group_id_y;			/* Work-group coordinate in global space index Y */
-    uint lid_x = get_local_id(0);
-    uint lid_y = get_local_id(1);
-
-    uint xIndex, yIndex, zIndex;
-    uint index_in, index_out;
-    uint gidI, gidII, i;
-
-    __local float tile[TILE_DIM_XY][TILE_DIM_XY+PADDING_XY]; /* Tile with padding */
-
-#ifdef NB_III
-    for(zIndex=get_global_id(2); zIndex<NB_III; zIndex+=get_global_size(2))
-#else
-        zIndex=get_global_id(2);
-#endif
-    {
-        for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-            for(gidII=get_group_id(1); gidII<NB_GROUPS_II; gidII+=get_num_groups(1)) {
-
-                /* Use of diagonal coordinates */
-#if NB_II == NB_I
-                group_id_x = (gidI + gidII) % NB_GROUPS_I;
-                group_id_y = gidI;
-#else
-                uint bid = gidI + gidII * NB_GROUPS_I;
-                group_id_y = bid%NB_GROUPS_II;
-                group_id_x = ((bid/NB_GROUPS_II) + group_id_y)%NB_GROUPS_I;
-#endif
-
-                /* Global input index for work-item */
-                xIndex = group_id_x * TILE_DIM_XY + lid_x*__N__;
-                yIndex = group_id_y * TILE_DIM_XY + lid_y;
-                //zIndex = get_global_id(2);
-                index_in = xIndex + yIndex * NB_II + zIndex * NB_II * NB_I;
-
-                /* Global output index */
-                xIndex = group_id_y * TILE_DIM_XY + lid_x*__N__;
-                yIndex = group_id_x * TILE_DIM_XY + lid_y;
-                index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-
-                for(i=0; i<TILE_DIM_XY; i+=BLOCK_ROWS_XY) {
-                    /* Fill the tile */
-                    temp = vload__N__((index_in + i * NB_II)/__N__, in);
-                    tile[lid_y + i][lid_x*__N__+__NN__] = temp.s__NN__;
-                }
-
-                /* Synchronize work-group */
-                barrier(CLK_LOCAL_MEM_FENCE);
-
-                for(i=0; i<TILE_DIM_XY; i+=BLOCK_ROWS_XY) {
-                    /* Write transposed data */
-                    temp = (float__N__)(tile[lid_x*__N__+__NN__][lid_y + i],
-                            );
-                    vstore__N__(temp, (index_out + i*NB_I)/__N__, out);
-                }
-                barrier(CLK_LOCAL_MEM_FENCE);
-            }
-        }
-    }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/transpose_xy_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/transpose_xy_noVec.cl
deleted file mode 100644
index 083d86eb2153e7224eab88c8c1f07ac021aa4477..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/transpose_xy_noVec.cl
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * @file transpose_xy.cl
- * Transposition in XY plane, coalesced, diagonal coordinates, vectorized version.
- */
-
-/**
- * Performs a transposition in xy plane.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoir banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- * A work group handle transposition for a tile. Transposition is done when reading data in tile.
- * Work-group layout: \code
- * ________________________
- * |0,0 | 1,0 | ...
- * |N,0 | 0,1 | 1,2 | ...
- * | .     .  | 0,2 | ...
- * | .     .
- * | .     .
- * |
- * \endcode
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XY</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- */
-__kernel void transpose_xy(__global const float* in,
-			   __global float* out)
-{
-  uint group_id_x;			/* Work-group coordinate in global space index X */
-  uint group_id_y;			/* Work-group coordinate in global space index Y */
-  uint lid_x = get_local_id(0);
-  uint lid_y = get_local_id(1);
-
-  uint xIndex, yIndex, zIndex;
-  uint index_in, index_out;
-  uint gidI, gidII, i;
-
-  __local float tile[TILE_DIM_XY][TILE_DIM_XY+PADDING_XY]; /* Tile with padding */
-
-#ifdef NB_Z
-  for(zIndex=get_global_id(2); zIndex<NB_III; zIndex+=get_global_size(2))
-#else
-  zIndex=get_global_id(2);
-#endif
-  {
-    for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-      for(gidII=get_group_id(1); gidII<NB_GROUPS_II; gidII+=get_num_groups(1)) {
-
-	/* Use of diagonal coordinates */
-#if NB_II == NB_I
-	group_id_x = (gidI + gidII) % NB_GROUPS_I;
-	group_id_y = gidI;
-#else
-	uint bid = gidI + gidII * NB_GROUPS_I;
-	group_id_y = bid%NB_GROUPS_II;
-	group_id_x = ((bid/NB_GROUPS_II) + group_id_y)%NB_GROUPS_I;
-#endif
-
-	/* Global input index for work-item */
-	xIndex = group_id_x * TILE_DIM_XY + lid_x;
-	yIndex = group_id_y * TILE_DIM_XY + lid_y;
-	index_in = xIndex + yIndex * NB_II + zIndex * NB_II * NB_I;
-
-	/* Global output index */
-	xIndex = group_id_y * TILE_DIM_XY + lid_x;
-	yIndex = group_id_x * TILE_DIM_XY + lid_y;
-	index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-	for(i=0; i<TILE_DIM_XY; i+=BLOCK_ROWS_XY) {
-	  /* Fill the tile */
-	  tile[lid_y + i][lid_x] = in[index_in + i * NB_II];
-	}
-
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(i=0; i<TILE_DIM_XY; i+=BLOCK_ROWS_XY) {
-	  /* Write transposed data */
-	  out[index_out + i*NB_I] = tile[lid_x][lid_y + i];
-	}
-
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-      }
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/transpose_xz.cl b/hysop/old/gpu.old/cl_src/kernels/transpose_xz.cl
deleted file mode 100644
index 7d646eca97e3c18eecfe7b1f0f7ec25fc43b7ca1..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/transpose_xz.cl
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * @file transpose_xz.cl
- * Transposition in XZ plane, coalesced, diagonal coordinates, 3D tiles.
- */
-
-/**
- * Perfoms a transposition in XZ plane. As data have to be contiguously read an write in global memory, we use a 3D tile.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoir banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XZ</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- * @see transpose_xy.cl
- */
-__kernel void transpose_xz(__global const float* in,
-			   __global float* out)
-{
-  float__N__ temp;			/* Temporary variable */
-  uint group_id_x;			/* Work-group coordinate in global space index X */
-  uint group_id_z;			/* Work-group coordinate in global space index Y */
-  uint lid_x = get_local_id(0);
-  uint lid_y = get_local_id(1);
-  uint lid_z = get_local_id(2);
-
-  uint xIndex, yIndex, zIndex;
-  uint index_in, index_out, i, j;
-  uint gidI, gidII, gidIII;
-
-  __local float tile[TILE_DIM_XZ][TILE_DIM_XZ][TILE_DIM_XZ+PADDING_XZ]; /* Tile with padding */
-
-
-  for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-    for(gidII=get_group_id(1); gidII<NB_GROUPS_II; gidII+=get_num_groups(1)) {
-      for(gidIII=get_group_id(2); gidIII<NB_GROUPS_III; gidIII+=get_num_groups(2)) {
-
-	/* Use of diagonal coordinates */
-#if NB_III == NB_I
-	group_id_x = (gidI + gidIII) % NB_GROUPS_I;
-	group_id_z = gidI;
-#else
-	uint bid = gidI + gidIII * NB_GROUPS_I;
-	group_id_z = bid%NB_GROUPS_III;
-	group_id_x = ((bid/NB_GROUPS_III) + group_id_z)%NB_GROUPS_I;
-#endif
-
-	/* Global input index for work-item */
-	xIndex = group_id_x * TILE_DIM_XZ + lid_x*__N__;
-	yIndex = gidII * TILE_DIM_XZ + lid_y;
-	zIndex = group_id_z * TILE_DIM_XZ + lid_z;
-	index_in = xIndex + yIndex * NB_III + zIndex * NB_III * NB_II;
-
-	/* Global output index */
-	xIndex = group_id_z * TILE_DIM_XZ + lid_x*__N__;
-	zIndex = group_id_x * TILE_DIM_XZ + lid_z;
-	index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  for(i=0; i<TILE_DIM_XZ; i+=BLOCK_ROWS_XZ) {
-	    /* Fill the tile */
-	    temp = vload__N__((index_in + i*NB_III + j*NB_III*NB_II)/__N__, in);
-	    tile[lid_z + j][lid_y + i][lid_x*__N__+__NN__] = temp.s__NN__;
-	}
-
-	}
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  for(i=0; i<TILE_DIM_XZ; i+=BLOCK_ROWS_XZ) {
-	    /* Write transposed data */
-	    temp = (float__N__)(tile[lid_x*__N__+__NN__][lid_y+i][lid_z + j],
-				);
-	    vstore__N__(temp, (index_out + i*NB_I + j*NB_I*NB_II)/__N__, out);
-	  }
-	}
-      }
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/transpose_xz_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/transpose_xz_noVec.cl
deleted file mode 100644
index 475bc9aaebe9d5c4a2ebf2c335e08c2a6413733e..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/transpose_xz_noVec.cl
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * @file transpose_xz.cl
- * Transposition in XZ plane, coalesced, diagonal coordinates, 3D tiles.
- */
-
-/**
- * Perfoms a transposition in XZ plane. As data have to be contiguously read an write in global memory, we use a 3D tile.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoir banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XZ</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- * @see transpose_xy.cl
- */
-__kernel void transpose_xz(__global const float* in,
-			   __global float* out)
-{
-  uint group_id_x;			/* Work-group coordinate in global space index X */
-  uint group_id_z;			/* Work-group coordinate in global space index Y */
-  uint lid_x = get_local_id(0);
-  uint lid_y = get_local_id(1);
-  uint lid_z = get_local_id(2);
-
-  uint xIndex, yIndex, zIndex;
-  uint index_in, index_out, i, j;
-  uint gidI, gidII, gidIII;
-
-  __local float tile[TILE_DIM_XZ][TILE_DIM_XZ][TILE_DIM_XZ+PADDING_XZ]; /* Tile with padding */
-
-  for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-    for(gidII=get_group_id(1); gidII<NB_GROUPS_II; gidII+=get_num_groups(1)) {
-      for(gidIII=get_group_id(2); gidIII<NB_GROUPS_III; gidIII+=get_num_groups(2)) {
-
-	/* Use of diagonal coordinates */
-#if NB_III == NB_I
-	group_id_x = (gidI + gidIII) % NB_GROUPS_I;
-	group_id_z = gidI;
-#else
-	uint bid = gidI + gidIII * NB_GROUPS_I;
-	group_id_z = bid%NB_GROUPS_III;
-	group_id_x = ((bid/NB_GROUPS_III) + group_id_z)%NB_GROUPS_I;
-#endif
-
-	/* Global input index for work-item */
-	xIndex = group_id_x * TILE_DIM_XZ + lid_x;
-	yIndex = gidII * TILE_DIM_XZ + lid_y;
-	zIndex = group_id_z * TILE_DIM_XZ + lid_z;
-	index_in = xIndex + yIndex * NB_III + zIndex * NB_III * NB_II;
-
-	/* Global output index */
-	xIndex = group_id_z * TILE_DIM_XZ + lid_x;
-	zIndex = group_id_x * TILE_DIM_XZ + lid_z;
-	index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  for(i=0; i<TILE_DIM_XZ; i+=BLOCK_ROWS_XZ) {
-	    /* Fill the tile */
-	    tile[lid_z + j][lid_y + i][lid_x] = in[index_in + i*NB_III + j*NB_III*NB_II];
-	  }
-	}
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  for(i=0; i<TILE_DIM_XZ; i+=BLOCK_ROWS_XZ) {
-	    /* Write transposed data */
-	    out[index_out + i*NB_I + j*NB_I*NB_II] = tile[lid_x][lid_y+i][lid_z + j];
-	  }
-	}
-	barrier(CLK_LOCAL_MEM_FENCE);
-      }
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/transpose_xz_slice.cl b/hysop/old/gpu.old/cl_src/kernels/transpose_xz_slice.cl
deleted file mode 100644
index ec394f6cbd3690fe71ea7c3d97749077869c4f34..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/transpose_xz_slice.cl
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * @file transpose_xz.cl
- * Transposition in XZ plane, coalesced, diagonal coordinates, 3D tiles.
- */
-
-/**
- * Perfoms a transposition in XZ plane. As data have to be contiguously read an write in global memory, we use a 3D tile.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoir banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XZ</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- * @see transpose_xy.cl
- */
-__kernel void transpose_xz(__global const float* in,
-			   __global float* out)
-{
-  float__N__ temp;			/* Temporary variable */
-  uint group_id_x;			/* Work-group coordinate in global space index X */
-  uint group_id_z;			/* Work-group coordinate in global space index Y */
-  uint lid_x = get_local_id(0);
-  uint lid_z = get_local_id(2);
-
-  uint xIndex, yIndex, zIndex;
-  uint index_in, index_out;
-  uint gidI, gidIII, j;
-
-  __local float tile[TILE_DIM_XZ][TILE_DIM_XZ+PADDING_XZ]; /* Tile with padding */
-
-  for(yIndex=get_global_id(1); yIndex<NB_II; yIndex+=get_global_size(1)) {
-    for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-      for(gidIII=get_group_id(2); gidIII<NB_GROUPS_III; gidIII+=get_num_groups(2)) {
-
-	/* Use of diagonal coordinates */
-#if NB_III == NB_I
-	group_id_x = (gidI + gidIII) % NB_GROUPS_I;
-	group_id_z = gidI;
-#else
-	uint bid = gidI + gidIII * NB_GROUPS_I;
-	group_id_z = bid%NB_GROUPS_III;
-	group_id_x = ((bid/NB_GROUPS_III) + group_id_z)%NB_GROUPS_I;
-#endif
-
-	/* Global input index for work-item */
-	xIndex = group_id_x * TILE_DIM_XZ + lid_x*__N__;
-	zIndex = group_id_z * TILE_DIM_XZ + lid_z;
-	index_in = xIndex + yIndex * NB_III + zIndex * NB_III * NB_II;
-
-	/* Global output index */
-	xIndex = group_id_z * TILE_DIM_XZ + lid_x*__N__;
-	zIndex = group_id_x * TILE_DIM_XZ + lid_z;
-	index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  /* Fill the tile */
-	  temp = vload__N__((index_in + j*NB_III*NB_II)/__N__, in);
-	  tile[lid_z + j][lid_x*__N__+__NN__] = temp.s__NN__;
-
-	}
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  /* Write transposed data */
-	  temp = (float__N__)(tile[lid_x*__N__+__NN__][lid_z + j],
-			      );
-	  vstore__N__(temp, (index_out + j*NB_I*NB_II)/__N__, out);
-	}
-      }
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/kernels/transpose_xz_slice_noVec.cl b/hysop/old/gpu.old/cl_src/kernels/transpose_xz_slice_noVec.cl
deleted file mode 100644
index d97cb925e5d5b0defdb8025e8f11565f8d34048c..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/kernels/transpose_xz_slice_noVec.cl
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * @file transpose_xz.cl
- * Transposition in XZ plane, coalesced, diagonal coordinates, 3D tiles.
- */
-
-/**
- * Perfoms a transposition in XZ plane. As data have to be contiguously read an write in global memory, we use a 3D tile.
- * Optimizations used are:
- *   - Coalesced reads and writes by means of local memory buffer (tile),
- *   - Local memory padding to avoir banck conflicts (optional),
- *   - Work groups are mapped to diagonal coordinates in global memory,
- *   - Reads and writes are performed by OpenCL vector types.
- *
- *
- * @param in Input data
- * @param out Output data
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last. Output layout is <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code>.
- * @remark <code>PADDING_XZ</code> : local memory padding width.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @see hysop.gpu.tools.parse_file
- * @see transpose_xy.cl
- */
-__kernel void transpose_xz(__global const float* in,
-			   __global float* out)
-{
-  uint group_id_x;			/* Work-group coordinate in global space index X */
-  uint group_id_z;			/* Work-group coordinate in global space index Y */
-  uint lid_x = get_local_id(0);
-  uint lid_z = get_local_id(2);
-
-  /* Global input index for work-item */
-  uint xIndex, yIndex, zIndex;
-  uint index_in, index_out;
-  uint gidI, gidIII, j;
-
-  __local float tile[TILE_DIM_XZ][TILE_DIM_XZ+PADDING_XZ]; /* Tile with padding */
-
-  for(yIndex=get_global_id(1); yIndex<NB_II; yIndex+=get_global_size(1)) {
-    for(gidI=get_group_id(0); gidI<NB_GROUPS_I; gidI+=get_num_groups(0)) {
-      for(gidIII=get_group_id(2); gidIII<NB_GROUPS_III; gidIII+=get_num_groups(2)) {
-
-	/* Use of diagonal coordinates */
-#if NB_III == NB_I
-	group_id_x = (gidI + gidIII) % NB_GROUPS_I;
-	group_id_z = gidI;
-#else
-	uint bid = gidI + gidIII * NB_GROUPS_I;
-	group_id_z = bid%NB_GROUPS_III;
-	group_id_x = ((bid/NB_GROUPS_III) + group_id_z)%NB_GROUPS_I;
-#endif
-
-	xIndex = group_id_x * TILE_DIM_XZ + lid_x;
-	zIndex = group_id_z * TILE_DIM_XZ + lid_z;
-	index_in = xIndex + yIndex * NB_III + zIndex * NB_III * NB_II;
-
-	/* Global output index */
-	xIndex = group_id_z * TILE_DIM_XZ + lid_x;
-	zIndex = group_id_x * TILE_DIM_XZ + lid_z;
-	index_out = xIndex + yIndex * NB_I + zIndex * NB_I * NB_II;
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  /* Fill the tile */
-	  tile[lid_z + j][lid_x] = in[index_in + j*NB_III*NB_II];
-	}
-	/* Synchronize work-group */
-	barrier(CLK_LOCAL_MEM_FENCE);
-
-	for(j=0; j<TILE_DIM_XZ; j+=BLOCK_DEPH_XZ) {
-	  /* Write transposed data */
-	  out[index_out + j*NB_I*NB_II] = tile[lid_x][lid_z + j];
-	  tile[lid_x][lid_z + j] = 0.0;
-	}
-	barrier(CLK_LOCAL_MEM_FENCE);
-      }
-    }
-  }
-}
diff --git a/hysop/old/gpu.old/cl_src/remeshing/basic.cl b/hysop/old/gpu.old/cl_src/remeshing/basic.cl
deleted file mode 100644
index e2fd02f4d55a0b76891dc21d5ae4be546bb00cbf..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/remeshing/basic.cl
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * @file remeshing/basic.cl
- * Remeshing function, vectorized version.
- */
-
-void remesh(uint i, __RCOMP_P float__N__ s__ID__, float__N__ p, __RCOMP_P__local float* gscal_loc__ID__, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Remesh particles in local buffer.
- *
- * Remeshing formula is given a compiling time.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index
- * @param dx Space step
- * @param invdx 1/dx
- * @param s Particle scalar
- * @param p Particle position
- * @param gscal_loc Local buffer for result
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
- * @see hysop.gpu.tools.parse_file
- * @see hysop.gpu.cl_src.common
- */
-void remesh(uint i,
-	    __RCOMP_P float__N__ s__ID__,
-	    float__N__ p,
-	    __RCOMP_P__local float* gscal_loc__ID__,
-	    __constant struct AdvectionMeshInfo* mesh){
-  float__N__ y;			/* Normalized distance to nearest left grid point */
-  int__N__ ind;			/* Integer coordinate */
-  uint__N__ index;		/* Remeshing index */
-  float w__NN__;
-
-  p = p - mesh->min_position;
-
-  ind = convert_int__N___rtn(p * mesh->invdx);
-  y = (p - convert_float__N__(ind) * mesh->dx.x) * mesh->invdx;
-
-  index = convert_uint__N__((ind - REMESH_SHIFT + NB_I) % NB_I);
-
-  w__NN__ = REMESH(alpha)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(beta)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(gamma)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(delta)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(eta)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(zeta)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(theta)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(iota)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#endif
-
-#if REMESH_SHIFT > 3
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(kappa)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w__NN__ = REMESH(mu)(y.s__NN__);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += (w__NN__ * s__ID__.s__NN__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#endif
-}
diff --git a/hysop/old/gpu.old/cl_src/remeshing/basic_noVec.cl b/hysop/old/gpu.old/cl_src/remeshing/basic_noVec.cl
deleted file mode 100644
index a2b75e98926a6a246f1e54af288fd4ab48a7143e..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/remeshing/basic_noVec.cl
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * @file remeshing/basic_noVec.cl
- * Remeshing function, vectorized version.
- */
-
-void remesh(uint i, __RCOMP_P float s__ID__, float p, __RCOMP_P__local float* gscal_loc__ID__, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Remesh particles in local buffer.
- *
- * Remeshing formula is given a compiling time.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index
- * @param dx Space step
- * @param invdx 1/dx
- * @param s Particle scalar
- * @param p Particle position
- * @param gscal_loc Local buffer for result
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
- * @see hysop.gpu.tools.parse_file
- * @see hysop.gpu.cl_src.common
- */
-void remesh(uint i,
-	    __RCOMP_P float s__ID__,
-	    float p,
-	    __RCOMP_P__local float* gscal_loc__ID__,
-	    __constant struct AdvectionMeshInfo* mesh){
-  float y;			/* Normalized distance to nearest left grid point */
-  int ind;			/* Integer coordinate */
-  uint index;		/* Remeshing index */
-  float w;
-
-  p = p - mesh->min_position;
-
-  ind = convert_int_rtn(p * mesh->invdx);
-  y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-  index = convert_uint((ind - REMESH_SHIFT + NB_I) % NB_I);
-
-  w = REMESH(alpha)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(beta)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(gamma)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(delta)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-  index = (index + 1) % NB_I;
-  w = REMESH(eta)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(zeta)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-  index = (index + 1) % NB_I;
-  w = REMESH(theta)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(iota)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-  index = (index + 1) % NB_I;
-  w = REMESH(kappa)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(mu)(y);
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-}
-
-
-/* Flop number
-   - distance to grid point : 5flop
-   - contributions : 2*Stencil*Nbcomponents
-   - poids (horner) : (d*fma+1)*Stencil (d=degré, +1 for the coefficient)
-
-*/
diff --git a/hysop/old/gpu.old/cl_src/remeshing/comm_basic_noVec.cl b/hysop/old/gpu.old/cl_src/remeshing/comm_basic_noVec.cl
deleted file mode 100644
index e1e886cfcb88dbaad5a237ad9c210a7d8f945568..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/remeshing/comm_basic_noVec.cl
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * @file remeshing/comm_basic_noVec.cl
- * Remeshing function, vectorized version.
- */
-
-void remesh(uint i, __RCOMP_P float s__ID__, float p, __RCOMP_P__local float* gscal_loc__ID__, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Remesh particles in local buffer.
- *
- * Remeshing formula is given a compiling time.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- *
- * @param i Particle index
- * @param dx Space step
- * @param invdx 1/dx
- * @param s Particle scalar
- * @param p Particle position
- * @param gscal_loc Local buffer for result
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark T_NB_I: global points number in the 1st direction (mpi cutted direction)
- * @remark START_INDEX Global staring index for computational points
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
- * @see hysop.gpu.tools.parse_file
- * @see hysop.gpu.cl_src.common
- */
-void remesh(uint i,
-	    __RCOMP_P float s__ID__,
-	    float p,
-	    __RCOMP_P__local float* gscal_loc__ID__,
-	    __constant struct AdvectionMeshInfo* mesh){
-  float y;			/* Normalized distance to nearest left grid point */
-  int ind;			/* Integer coordinate */
-  int index;		/* Remeshing index */
-  float w;
-
-  ind = convert_int_rtn(p * mesh->invdx);
-  y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-  index = ((ind - REMESH_SHIFT + T_NB_I) % T_NB_I) - START_INDEX;
-
-  if (index>=0 && index < NB_I){
-    w = REMESH(alpha)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(beta)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(gamma)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(delta)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(eta)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(zeta)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(theta)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(iota)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(kappa)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = index + 1;
-  if (index>=0 && index < NB_I){
-    w = REMESH(mu)(y);
-    __RCOMP_Igscal_loc__ID__[noBC_id(index)] += (w * s__ID__);
-  }
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-}
diff --git a/hysop/old/gpu.old/cl_src/remeshing/private.cl b/hysop/old/gpu.old/cl_src/remeshing/private.cl
deleted file mode 100644
index 18943652bf3885a13dc77e77babc7280c28c319c..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/remeshing/private.cl
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * @file private.cl
- * Remeshing function, vectorized, private variable.
- */
-
-void remesh(uint i, __RCOMP_P float__N__ s__ID__, float__N__ p, __RCOMP_P__local float* gscal_loc__ID__, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Remesh particles in local buffer.
- *
- * Remeshing formula is given a compiling time.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- * Use of a private temporary variable for remeshing weights.
- *
- * @param i Particle index
- * @param dx Space step
- * @param invdx 1/dx
- * @param s Particle scalar
- * @param p Particle position
- * @param gscal_loc Local buffer for result
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
- * @see hysop.gpu.tools.parse_file
- * @see hysop.gpu.cl_src.common
- */
-void remesh(uint i,
-	    __RCOMP_P float__N__ s__ID__,
-	    float__N__ p,
-	    __RCOMP_P__local float* gscal_loc__ID__,
-	    __constant struct AdvectionMeshInfo* mesh){
-  float__N__ y,			   /* Normalized distance to nearest left grid point */
-     w;
-  __RCOMP_I float__N__ temp__ID__; /* Temporary remeshing weights */
-  int__N__ ind;		   	   /* Integer coordinate */
-  uint__N__ index;		   /* Remeshing index */
-
-  p = p - mesh->min_position;
-
-  ind = convert_int__N___rtn(p * mesh->invdx);
-  y = (p - convert_float__N__(ind) * mesh->dx.x) * mesh->invdx;
-
-  index = convert_uint__N__((ind - REMESH_SHIFT + NB_I) % NB_I);
-
-  w = REMESH(alpha)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(beta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(gamma)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(delta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-  index = (index + 1) % NB_I;
-  w = REMESH(eta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(zeta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-  index = (index + 1) % NB_I;
-  w = REMESH(theta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(iota)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-  index = (index + 1) % NB_I;
-  w = REMESH(kappa)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(mu)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index.s__NN__)] += temp__ID__.s__NN__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-}
diff --git a/hysop/old/gpu.old/cl_src/remeshing/private_noVec.cl b/hysop/old/gpu.old/cl_src/remeshing/private_noVec.cl
deleted file mode 100644
index 7bafbe37de8e4a92fa839336045d4566374f167c..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/remeshing/private_noVec.cl
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * @file private.cl
- * Remeshing function, vectorized, private variable.
- */
-
-void remesh(uint i, __RCOMP_P float s__ID__, float p, __RCOMP_P__local float* gscal_loc__ID__, __constant struct AdvectionMeshInfo* mesh);
-
-
-/**
- * Remesh particles in local buffer.
- *
- * Remeshing formula is given a compiling time.
- * Use of builtin OpenCL functions fma and mix. Computations through OpenCL vector types.
- * Use of a private temporary variable for remeshing weights.
- *
- * @param i Particle index
- * @param dx Space step
- * @param invdx 1/dx
- * @param s Particle scalar
- * @param p Particle position
- * @param gscal_loc Local buffer for result
- *
- * @remark <code>NB_I</code>, <code>NB_II</code>, <code>NB_III</code> : points number in directions from 1st varying index to last.
- * @remark <code>__N__</code> is expanded at compilation time by vector width.
- * @remark <code>__NN__</code> is expanded at compilation time by a sequence of integer for each vector component.
- * @remark <code>FORMULA</code> : remeshing formula flag {<code>M4PRIME</code>, <code>M6PRIME</code>, <code>M8PRIME</code>, <code>L6STAR</code>}
- * @remark <code>__RCOMP_I</code> flag is for instruction expansion for the different remeshed components.
- * @remark <code>__RCOMP_P</code> flag is for function parameter expansion for the different remeshed components.
- * @remark <code>__ID__</code> is replaced by the remeshed component id in an expansion.
- * @remark <code>REMESH</code> is a function-like macro expanding to the proper remeshing formula (i.e.: <code>REMESH(alpha)</code> -> <code>alpha_l2_1</code>)
- * @see hysop.gpu.tools.parse_file
- * @see hysop.gpu.cl_src.common
- */
-void remesh(uint i,
-	    __RCOMP_P float s__ID__,
-	    float p,
-	    __RCOMP_P__local float* gscal_loc__ID__,
-	    __constant struct AdvectionMeshInfo* mesh){
-  float y,			/* Normalized distance to nearest left grid point */
-    w;			/* Temporary remeshing weights */
-  __RCOMP_I float temp__ID__;
-  int ind;			/* Integer coordinate */
-  uint index;		/* Remeshing index */
-
-  p = p - mesh->min_position;
-
-  ind = convert_int_rtn(p * mesh->invdx);
-  y = (p - convert_float(ind) * mesh->dx.x) * mesh->invdx;
-
-  index = convert_uint((ind - REMESH_SHIFT + NB_I) % NB_I);
-
-  w = REMESH(alpha)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(beta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(gamma)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(delta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-#if REMESH_SHIFT > 1
-  index = (index + 1) % NB_I;
-  w = REMESH(eta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(zeta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 2
-  index = (index + 1) % NB_I;
-  w = REMESH(theta)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(iota)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-#endif
-
-#if REMESH_SHIFT > 3
-  index = (index + 1) % NB_I;
-  w = REMESH(kappa)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-
-  index = (index + 1) % NB_I;
-  w = REMESH(mu)(y);
-  __RCOMP_Itemp__ID__ = w * s__ID__;
-  __RCOMP_Igscal_loc__ID__[noBC_id(index)] += temp__ID__;
-  barrier(CLK_LOCAL_MEM_FENCE);
-  #endif
-}
diff --git a/hysop/old/gpu.old/cl_src/remeshing/weights.cl b/hysop/old/gpu.old/cl_src/remeshing/weights.cl
deleted file mode 100644
index d101fffed4fac3f0a97327d1c1186f3d0682d486..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/remeshing/weights.cl
+++ /dev/null
@@ -1,202 +0,0 @@
-/**
- * @file weights.cl
- * Remeshing formulas, vectorized version.
- * Polynomials under Horner form.
- */
-
-inline float__N__ alpha_l2_1(float__N__ y){
-  return ((y * (y * (-y + 2.0) - 1.0)) * 0.5);}
-inline float__N__ beta_l2_1(float__N__ y){
-  return ((y * y * (3.0 * y - 5.0) + 2.0) * 0.5);}
-inline float__N__ gamma_l2_1(float__N__ y){
-  return ((y * (y * (-3.0 * y + 4.0) + 1.0)) * 0.5);}
-inline float__N__ delta_l2_1(float__N__ y){
-  return ((y * y * (y - 1.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_2(float__N__ y){
-  return ((y * (y * (y * (y * (2.0 * y - 5.0) + 3.0) + 1.0) - 1.0)) * 0.5);}
-inline float__N__ beta_l2_2(float__N__ y){
-  return ((y * y * (y * (y * (-6.0 * y + 15.0) - 9.0) - 2.0) + 2.0) * 0.5);}
-inline float__N__ gamma_l2_2(float__N__ y){
-  return ((y * (y * (y * (y * (6.0 * y - 15.0) + 9.0) + 1.0) + 1.0)) * 0.5);}
-inline float__N__ delta_l2_2(float__N__ y){
-  return ((y * y * y * (y * (-2.0 * y + 5.0) - 3.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_3(float__N__ y){
-  return ((y * (y * (y * y * (y * (y * (-6.0 * y + 21.0) - 25.0) + 10.0) + 1.0) - 1.0)) * 0.5);}
-inline float__N__ beta_l2_3(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (18.0 * y - 63.0) + 75.0) - 30.0) - 2.0) + 2.0) * 0.5);}
-inline float__N__ gamma_l2_3(float__N__ y){
-  return ((y * (y * (y * y * (y * (y * (-18.0 * y + 63.0) - 75.0) + 30.0) + 1.0) + 1.0)) * 0.5);}
-inline float__N__ delta_l2_3(float__N__ y){
-  return ((y * y * y * y * (y * (y * (6.0 * y - 21.0) + 25.0) - 10.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_4(float__N__ y){
-  return ((y * (y * (y * y * y * (y * (y * (y * (20.0 * y - 90.0) + 154.0) - 119.0) + 35.0) + 1.0) - 1.0)) * 0.5);}
-inline float__N__ beta_l2_4(float__N__ y){
-  return ((y * y * (y * y * y * (y * (y * (y * (-60.0 * y + 270.0) - 462.0) + 357.0) - 105.0) - 2.0) + 2.0) * 0.5);}
-inline float__N__ gamma_l2_4(float__N__ y){
-  return ((y * (y * (y * y * y * (y * (y * (y * (60.0 * y - 270.0) + 462.0) - 357.0) + 105.0) + 1.0) + 1.0)) * 0.5);}
-inline float__N__ delta_l2_4(float__N__ y){
-  return ((y * y * y * y * y * (y * (y * (y * (-20.0 * y + 90.0) - 154.0) + 119.0) - 35.0)) * 0.5);}
-
-
-inline float__N__ alpha_l4_2(float__N__ y){
-  return ((y * (y * (y * (y * (-5.0 * y + 13.0) - 9.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_2(float__N__ y){
-  return ((y * (y * (y * (y * (25.0 * y - 64.0) + 39.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_2(float__N__ y){
-  return ((y * y * (y * (y * (-50.0 * y + 126.0) - 70.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_2(float__N__ y){
-  return ((y * (y * (y * (y * (50.0 * y - 124.0) + 66.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_2(float__N__ y){
-  return ((y * (y * (y * (y * (-25.0 * y + 61.0) - 33.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_2(float__N__ y){
-  return ((y * y * y * (y * (5.0 * y - 12.0) + 7.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_l4_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (14.0 * y - 49.0) + 58.0) - 22.0) - 2.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-70.0 * y + 245.0) - 290.0) + 111.0) + 4.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_3(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (140.0 * y - 490.0) + 580.0) - 224.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-140.0 * y + 490.0) - 580.0) + 226.0) - 4.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (70.0 * y - 245.0) + 290.0) - 114.0) + 2.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_3(float__N__ y){
-  return ((y * y * y * y * (y * (y * (-14.0 * y + 49.0) - 58.0) + 23.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_l4_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-46.0 * y + 207.0) - 354.0) + 273.0) - 80.0) + 1.0) - 2.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (230.0 * y - 1035.0) + 1770.0) - 1365.0) + 400.0) - 4.0) + 4.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_4(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-460.0 * y + 2070.0) - 3540.0) + 2730.0) - 800.0) + 6.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (460.0 * y - 2070.0) + 3540.0) - 2730.0) + 800.0) - 4.0) - 4.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-230.0 * y + 1035.0) - 1770.0) + 1365.0) - 400.0) + 1.0) + 2.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_4(float__N__ y){
-  return ((y * y * y * y * y * (y * (y * (y * (46.0 * y - 207.0) + 354.0) - 273.0) + 80.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_M8p(float__N__ y){
-  return ((y*(y*(y*(y*(y*(y*(-10.0*y + 21.0) + 28.0) - 105.0) + 70.0) + 35.0) - 56.0) + 17.0) * 0.00029761904761904765);}
-inline float__N__ beta_M8p(float__N__ y){
-  return ((y*(y*(y*(y*(y*(y*(70.0*y - 175.0) - 140.0) + 770.0) - 560.0) - 350.0) + 504.0) - 102.0) * 0.00029761904761904765);}
-inline float__N__ gamma_M8p(float__N__ y){
-  return ((y*(y*(y*(y*(y*(y*(-210.0*y + 609.0) + 224.0) - 2135.0) + 910.0) + 2765.0) - 2520.0) + 255.0) * 0.00029761904761904765);}
-inline float__N__ delta_M8p(float__N__ y){
-  return ((y*y* (y*y* (y*y* (70.0*y - 231.0) + 588.0) - 980.0) + 604.0) * 0.001488095238095238);}
-inline float__N__ eta_M8p(float__N__ y){
-  return ((y*(y*(y*(y*(y*(y*(-70.0*y+ 259.0) - 84.0) - 427.0) - 182.0)+ 553.0) + 504.0)+ 51.0) * 0.001488095238095238);}
-inline float__N__ zeta_M8p(float__N__ y){
-  return ((y*(y*(y*(y*(y*(y*(210.0*y- 861.0) + 532.0) + 770.0) + 560.0) - 350.0) - 504.0) - 102.0) * 0.00029761904761904765);}
-inline float__N__ theta_M8p(float__N__ y){
-  return ((y* (y* (y* (y* (y* (y* (-70.0* y+ 315.0) -280.0) -105.0) -70.0) +35.0)+ 56.0) +17.0) * 0.00029761904761904765);}
-inline float__N__ iota_M8p(float__N__ y){
-  return ((y * y * y * y * y * (y * (10.0 * y - 49.0) + 56.0)) * 0.00029761904761904765);}
-
-
-inline float__N__ alpha_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-89.0 * y + 312.0) - 370.0) + 140.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (623.0 * y - 2183.0) + 2581.0) - 955.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-1869.0 * y + 6546.0) - 7722.0) + 2850.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_3(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (3115.0 * y - 10905.0) + 12845.0) - 4795.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-3115.0 * y + 10900.0) - 12830.0) + 4880.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (1869.0 * y - 6537.0) + 7695.0) - 2985.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_3(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (-623.0 * y + 2178.0) - 2566.0) + 1010.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_3(float__N__ y){
-  return ((y * y * y * y * (y * (y * (89.0 * y - 311.0) + 367.0) - 145.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (290.0 * y - 1305.0) + 2231.0) - 1718.0) + 500.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-2030.0 * y + 9135.0) - 15617.0) + 12027.0) - 3509.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (6090.0 * y - 27405.0) + 46851.0) - 36084.0) + 10548.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_4(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-10150.0 * y + 45675.0) - 78085.0) + 60145.0) - 17605.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (10150.0 * y - 45675.0) + 78085.0) - 60150.0) + 17620.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-6090.0 * y + 27405.0) - 46851.0) + 36093.0) - 10575.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (2030.0 * y - 9135.0) + 15617.0) - 12032.0) + 3524.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_4(float__N__ y){
-  return ((y * y * y * y * y * (y * (y * (y * (-290.0 * y + 1305.0) - 2231.0) + 1719.0) - 503.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-1006.0 * y + 5533.0) - 12285.0) + 13785.0) - 7829.0) + 1803.0) - 3.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (7042.0 * y - 38731.0) + 85995.0) - 96495.0) + 54803.0) - 12620.0) + 12.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-21126.0 * y + 116193.0) - 257985.0) + 289485.0) - 164409.0) + 37857.0) - 15.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_5(float__N__ y){
-  return ((y * y * (y * y * (y * y * (y * (y * (y * (y * (35210.0 * y - 193655.0) + 429975.0) - 482475.0) + 274015.0) - 63090.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-35210.0 * y + 193655.0) - 429975.0) + 482475.0) - 274015.0) + 63085.0) + 15.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (21126.0 * y - 116193.0) + 257985.0) - 289485.0) + 164409.0) - 37848.0) - 12.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_5(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-7042.0 * y + 38731.0) - 85995.0) + 96495.0) - 54803.0) + 12615.0) + 3.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_5(float__N__ y){
-  return ((y * y * y * y * y * y * (y * (y * (y * (y * (1006.0 * y - 5533.0) + 12285.0) - 13785.0) + 7829.0) - 1802.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (3604.0 * y - 23426.0) + 63866.0) - 93577.0) + 77815.0) - 34869.0) + 6587.0) + 1.0) - 3.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-25228.0 * y + 163982.0) - 447062.0) + 655039.0) - 544705.0) + 244083.0) - 46109.0) - 6.0) + 12.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (75684.0 * y - 491946.0) + 1341186.0) - 1965117.0) + 1634115.0) - 732249.0) + 138327.0) + 15.0) - 15.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_6(float__N__ y){
-  return ((y * y * (y * y * (y * y * (y * (y * (y * (y * (y * (y * (-126140.0 * y + 819910.0) - 2235310.0) + 3275195.0) - 2723525.0) + 1220415.0) - 230545.0) - 20.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (126140.0 * y - 819910.0) + 2235310.0) - 3275195.0) + 2723525.0) - 1220415.0) + 230545.0) + 15.0) + 15.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-75684.0 * y + 491946.0) - 1341186.0) + 1965117.0) - 1634115.0) + 732249.0) - 138327.0) - 6.0) - 12.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_6(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (25228.0 * y - 163982.0) + 447062.0) - 655039.0) + 544705.0) - 244083.0) + 46109.0) + 1.0) + 3.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_6(float__N__ y){
-  return ((y * y * y * y * y * y * y * (y * (y * (y * (y * (y * (-3604.0 * y + 23426.0) - 63866.0) + 93577.0) - 77815.0) + 34869.0) - 6587.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-3569.0 * y + 16061.0) - 27454.0) + 21126.0) - 6125.0) + 49.0) - 196.0) - 36.0) + 144.0)) * 2.48015873015873e-05);}
-inline float__N__ beta_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (32121.0 * y - 144548.0) + 247074.0) - 190092.0) + 55125.0) - 672.0) + 2016.0) + 512.0) - 1536.0)) * 2.48015873015873e-05);}
-inline float__N__ gamma_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-128484.0 * y + 578188.0) - 988256.0) + 760312.0) - 221060.0) + 4732.0) - 9464.0) - 4032.0) + 8064.0)) * 2.48015873015873e-05);}
-inline float__N__ delta_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (299796.0 * y - 1349096.0) + 2305856.0) - 1774136.0) + 517580.0) - 13664.0) + 13664.0) + 32256.0) - 32256.0)) * 2.48015873015873e-05);}
-inline float__N__ eta_l8_4(float__N__ y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-449694.0 * y + 2023630.0) - 3458700.0) + 2661540.0) - 778806.0) + 19110.0) - 57400.0) + 40320.0) * 2.48015873015873e-05);}
-inline float__N__ zeta_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (449694.0 * y - 2023616.0) + 3458644.0) - 2662016.0) + 780430.0) - 13664.0) - 13664.0) + 32256.0) + 32256.0)) * 2.48015873015873e-05);}
-inline float__N__ theta_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-299796.0 * y + 1349068.0) - 2305744.0) + 1775032.0) - 520660.0) + 4732.0) + 9464.0) - 4032.0) - 8064.0)) * 2.48015873015873e-05);}
-inline float__N__ iota_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (128484.0 * y - 578168.0) + 988176.0) - 760872.0) + 223020.0) - 672.0) - 2016.0) + 512.0) + 1536.0)) * 2.48015873015873e-05);}
-inline float__N__ kappa_l8_4(float__N__ y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-32121.0 * y + 144541.0) - 247046.0) + 190246.0) - 55685.0) + 49.0) + 196.0) - 36.0) - 144.0)) * 2.48015873015873e-05);}
-inline float__N__ mu_l8_4(float__N__ y){
-  return ((y * y * y * y * y * (y * (y * (y * (3569.0 * y - 16060.0) + 27450.0) - 21140.0) + 6181.0)) * 2.48015873015873e-05);}
-
-
-
-#endif
diff --git a/hysop/old/gpu.old/cl_src/remeshing/weights_builtin.cl b/hysop/old/gpu.old/cl_src/remeshing/weights_builtin.cl
deleted file mode 100644
index cd1827937456989bf6eb51b36bf5025c5d4bbd55..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/remeshing/weights_builtin.cl
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * @file weights_builtin.cl
- * Remeshing formulas, vectorized version, use of builtin OpenCL fma.
- * Polynomials under Horner form.
- */
-
-inline float__N__ alpha_l2_1(float__N__ y){
-  return (y*fma(y,fma(y,-1.0, 2.0), - 1.0) * 0.5);}
-inline float__N__ beta_l2_1(float__N__ y){
-  return (fma(y*y, fma(y, 3.0, -5.0), 2.0) * 0.5);}
-inline float__N__ gamma_l2_1(float__N__   y){
-  return ((y * fma(y , fma(-3.0, y, 4.0), 1.0)) * 0.5);}
-inline float__N__ delta_l2_1(float__N__ y){
-  return ((y * y * fma(1.0, y, - 1.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 2.0, -5.0), 3.0), 1.0), -1.0)) * 0.5);}
-inline float__N__ beta_l2_2(float__N__ y){
-  return (fma(y * y, fma(y, fma(y, fma(y, -6.0, 15.0), -9.0), -2.0), 2.0) * 0.5);}
-inline float__N__ gamma_l2_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 6.0, -15.0), 9.0), 1.0), 1.0)) * 0.5);}
-inline float__N__ delta_l2_2(float__N__ y){
-  return ((y * y * y * fma(y, fma(y, -2.0, 5.0), -3.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_3(float__N__ y){
-  return ((y * fma(y, fma(y * y, fma(y, fma(y, fma(y, -6.0, 21.0), -25.0), 10.0), 1.0), -1.0)) * 0.5);}
-inline float__N__ beta_l2_3(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 18.0, -63.0), 75.0), -30.0), -2.0), 2.0) * 0.5);}
-inline float__N__ gamma_l2_3(float__N__ y){
-  return ((y * fma(y, fma(y * y, fma(y, fma(y, fma(y, -18.0, 63.0), -75.0), 30.0), 1.0), 1.0)) * 0.5);}
-inline float__N__ delta_l2_3(float__N__ y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, 6.0, -21.0), 25.0), -10.0)) * 0.5);}
-
-
-inline float__N__ alpha_l2_4(float__N__ y){
-  return ((y * fma(y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, 20.0, -90.0), 154.0), -119.0), 35.0), 1.0), -1.0)) * 0.5);}
-inline float__N__ beta_l2_4(float__N__ y){
-  return (fma(y * y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, -60.0, 270.0), -462.0), 357.0), -105.0), -2.0), 2.0) * 0.5);}
-inline float__N__ gamma_l2_4(float__N__ y){
-  return ((y * fma(y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, 60.0, -270.0), 462.0), -357.0), 105.0), 1.0), 1.0)) * 0.5);}
-inline float__N__ delta_l2_4(float__N__ y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, -20.0, 90.0), -154.0), 119.0), -35.0)) * 0.5);}
-
-
-inline float__N__ alpha_l4_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, -5.0, 13.0), -9.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 25.0, -64.0), 39.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_2(float__N__ y){
-  return (fma(y * y, fma(y, fma(y, fma(y, -50.0, 126.0), -70.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 50.0, -124.0), 66.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_2(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, -25.0, 61.0), -33.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_2(float__N__ y){
-  return ((y * y * y * fma(y, fma(y, 5.0, -12.0), 7.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_l4_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 14.0, -49.0), 58.0), -22.0), -2.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -70.0, 245.0), -290.0), 111.0), 4.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_3(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 140.0, -490.0), 580.0), -224.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -140.0, 490.0), -580.0), 226.0), -4.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 70.0, -245.0), 290.0), -114.0), 2.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_3(float__N__ y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, -14.0, 49.0), -58.0), 23.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_l4_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -46.0, 207.0), -354.0), 273.0), -80.0), 1.0), -2.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float__N__ beta_l4_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 230.0, -1035.0), 1770.0), -1365.0), 400.0), -4.0), 4.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float__N__ gamma_l4_4(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -460.0, 2070.0), -3540.0), 2730.0), -800.0), 6.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float__N__ delta_l4_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 460.0, -2070.0), 3540.0), -2730.0), 800.0), -4.0), -4.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float__N__ eta_l4_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -230.0, 1035.0), -1770.0), 1365.0), -400.0), 1.0), 2.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float__N__ zeta_l4_4(float__N__ y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, 46.0, -207.0), 354.0), -273.0), 80.0)) * 0.041666666666666664);}
-
-
-inline float__N__ alpha_M8p(float__N__ y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-10.0,y, + 21.0), + 28.0), - 105.0), + 70.0), + 35.0), - 56.0), + 17.0) * 0.00029761904761904765);}
-inline float__N__ beta_M8p(float__N__ y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(70.0,y, - 175.0), - 140.0), + 770.0), - 560.0), - 350.0), + 504.0), - 102.0) * 0.00029761904761904765);}
-inline float__N__ gamma_M8p(float__N__ y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-210.0,y, + 609.0), + 224.0), - 2135.0), + 910.0), + 2765.0), - 2520.0), + 255.0) * 0.00029761904761904765);}
-inline float__N__ delta_M8p(float__N__ y){
-  return (fma(y*y, fma(y*y, fma(y*y, fma(70.0,y, - 231.0), + 588.0), - 980.0), + 604.0) * 0.001488095238095238);}
-inline float__N__ eta_M8p(float__N__ y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-70.0,y, 259.0), - 84.0), - 427.0), - 182.0), + 553.0), + 504.0), + 51.0) * 0.001488095238095238);}
-inline float__N__ zeta_M8p(float__N__ y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(210.0,y,- 861.0), + 532.0), + 770.0), + 560.0), - 350.0), - 504.0), - 102.0) * 0.00029761904761904765);}
-inline float__N__ theta_M8p(float__N__ y){
-  return (fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(-70.0, y, 315.0), -280.0), -105.0), -70.0), 35.0), 56.0), 17.0) * 0.00029761904761904765);}
-inline float__N__ iota_M8p(float__N__ y){
-  return ((y * y * y * y * y * fma(y , fma(10.0 , y ,- 49.0) , 56.0)) * 0.00029761904761904765);}
-
-
-inline float__N__ alpha_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -89.0, 312.0), -370.0), 140.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 623.0, -2183.0), 2581.0), -955.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -1869.0, 6546.0), -7722.0), 2850.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_3(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 3115.0, -10905.0), 12845.0), -4795.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3115.0, 10900.0), -12830.0), 4880.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 1869.0, -6537.0), 7695.0), -2985.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_3(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -623.0, 2178.0), -2566.0), 1010.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_3(float__N__ y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, 89.0, -311.0), 367.0), -145.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 290.0, -1305.0), 2231.0), -1718.0), 500.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -2030.0, 9135.0), -15617.0), 12027.0), -3509.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 6090.0, -27405.0), 46851.0), -36084.0), 10548.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_4(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -10150.0, 45675.0), -78085.0), 60145.0), -17605.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 10150.0, -45675.0), 78085.0), -60150.0), 17620.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -6090.0, 27405.0), -46851.0), 36093.0), -10575.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 2030.0, -9135.0), 15617.0), -12032.0), 3524.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_4(float__N__ y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, -290.0, 1305.0), -2231.0), 1719.0), -503.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -1006.0, 5533.0), -12285.0), 13785.0), -7829.0), 1803.0), -3.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 7042.0, -38731.0), 85995.0), -96495.0), 54803.0), -12620.0), 12.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -21126.0, 116193.0), -257985.0), 289485.0), -164409.0), 37857.0), -15.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_5(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, 35210.0, -193655.0), 429975.0), -482475.0), 274015.0), -63090.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -35210.0, 193655.0), -429975.0), 482475.0), -274015.0), 63085.0), 15.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 21126.0, -116193.0), 257985.0), -289485.0), 164409.0), -37848.0), -12.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_5(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -7042.0, 38731.0), -85995.0), 96495.0), -54803.0), 12615.0), 3.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_5(float__N__ y){
-  return ((y * y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, fma(y, 1006.0, -5533.0), 12285.0), -13785.0), 7829.0), -1802.0)) * 0.001388888888888889);}
-
-
-inline float__N__ alpha_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 3604.0, -23426.0), 63866.0), -93577.0), 77815.0), -34869.0), 6587.0), 1.0), -3.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float__N__ beta_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -25228.0, 163982.0), -447062.0), 655039.0), -544705.0), 244083.0), -46109.0), -6.0), 12.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float__N__ gamma_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 75684.0, -491946.0), 1341186.0), -1965117.0), 1634115.0), -732249.0), 138327.0), 15.0), -15.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float__N__ delta_l6_6(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -126140.0, 819910.0), -2235310.0), 3275195.0), -2723525.0), 1220415.0), -230545.0), -20.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float__N__ eta_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 126140.0, -819910.0), 2235310.0), -3275195.0), 2723525.0), -1220415.0), 230545.0), 15.0), 15.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float__N__ zeta_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -75684.0, 491946.0), -1341186.0), 1965117.0), -1634115.0), 732249.0), -138327.0), -6.0), -12.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float__N__ theta_l6_6(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 25228.0, -163982.0), 447062.0), -655039.0), 544705.0), -244083.0), 46109.0), 1.0), 3.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float__N__ iota_l6_6(float__N__ y){
-  return ((y * y * y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3604.0, 23426.0), -63866.0), 93577.0), -77815.0), 34869.0), -6587.0)) * 0.001388888888888889);}
-
-
-
-inline float__N__ alpha_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3569.0, 16061.0), -27454.0), 21126.0), -6125.0), 49.0), -196.0), -36.0), 144.0)) * 2.48015873015873e-05);}
-inline float__N__ beta_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 32121.0, -144548.0), 247074.0), -190092.0), 55125.0), -672.0), 2016.0), 512.0), -1536.0)) * 2.48015873015873e-05);}
-inline float__N__ gamma_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -128484.0, 578188.0), -988256.0), 760312.0), -221060.0), 4732.0), -9464.0), -4032.0), 8064.0)) * 2.48015873015873e-05);}
-inline float__N__ delta_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 299796.0, -1349096.0), 2305856.0), -1774136.0), 517580.0), -13664.0), 13664.0), 32256.0), -32256.0)) * 2.48015873015873e-05);}
-inline float__N__ eta_l8_4(float__N__ y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -449694.0, 2023630.0), -3458700.0), 2661540.0), -778806.0), 19110.0), -57400.0), 40320.0) * 2.48015873015873e-05);}
-inline float__N__ zeta_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 449694.0, -2023616.0), 3458644.0), -2662016.0), 780430.0), -13664.0), -13664.0), 32256.0), 32256.0)) * 2.48015873015873e-05);}
-inline float__N__ theta_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -299796.0, 1349068.0), -2305744.0), 1775032.0), -520660.0), 4732.0), 9464.0), -4032.0), -8064.0)) * 2.48015873015873e-05);}
-inline float__N__ iota_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 128484.0, -578168.0), 988176.0), -760872.0), 223020.0), -672.0), -2016.0), 512.0), 1536.0)) * 2.48015873015873e-05);}
-inline float__N__ kappa_l8_4(float__N__ y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -32121.0, 144541.0), -247046.0), 190246.0), -55685.0), 49.0), 196.0), -36.0), -144.0)) * 2.48015873015873e-05);}
-inline float__N__ mu_l8_4(float__N__ y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, 3569.0, -16060.0), 27450.0), -21140.0), 6181.0)) * 2.48015873015873e-05);}
diff --git a/hysop/old/gpu.old/cl_src/remeshing/weights_noVec.cl b/hysop/old/gpu.old/cl_src/remeshing/weights_noVec.cl
deleted file mode 100644
index a46f89e31324df10b568c2ca10d0923bc428ecbc..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/remeshing/weights_noVec.cl
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * @file weights_noVec.cl
- * Remeshing formulas, basic version.
- * Polynomials under Horner form.
- */
-
-inline float alpha_l2_1(float y){
-  return ((y * (y * (-y + 2.0) - 1.0)) * 0.5);}
-inline float beta_l2_1(float y){
-  return ((y * y * (3.0 * y - 5.0) + 2.0) * 0.5);}
-inline float gamma_l2_1(float y){
-  return ((y * (y * (-3.0 * y + 4.0) + 1.0)) * 0.5);}
-inline float delta_l2_1(float y){
-  return ((y * y * (y - 1.0)) * 0.5);}
-
-
-inline float alpha_l2_2(float y){
-  return ((y * (y * (y * (y * (2.0 * y - 5.0) + 3.0) + 1.0) - 1.0)) * 0.5);}
-inline float beta_l2_2(float y){
-  return ((y * y * (y * (y * (-6.0 * y + 15.0) - 9.0) - 2.0) + 2.0) * 0.5);}
-inline float gamma_l2_2(float y){
-  return ((y * (y * (y * (y * (6.0 * y - 15.0) + 9.0) + 1.0) + 1.0)) * 0.5);}
-inline float delta_l2_2(float y){
-  return ((y * y * y * (y * (-2.0 * y + 5.0) - 3.0)) * 0.5);}
-
-
-inline float alpha_l2_3(float y){
-  return ((y * (y * (y * y * (y * (y * (-6.0 * y + 21.0) - 25.0) + 10.0) + 1.0) - 1.0)) * 0.5);}
-inline float beta_l2_3(float y){
-  return ((y * y * (y * y * (y * (y * (18.0 * y - 63.0) + 75.0) - 30.0) - 2.0) + 2.0) * 0.5);}
-inline float gamma_l2_3(float y){
-  return ((y * (y * (y * y * (y * (y * (-18.0 * y + 63.0) - 75.0) + 30.0) + 1.0) + 1.0)) * 0.5);}
-inline float delta_l2_3(float y){
-  return ((y * y * y * y * (y * (y * (6.0 * y - 21.0) + 25.0) - 10.0)) * 0.5);}
-
-
-inline float alpha_l2_4(float y){
-  return ((y * (y * (y * y * y * (y * (y * (y * (20.0 * y - 90.0) + 154.0) - 119.0) + 35.0) + 1.0) - 1.0)) * 0.5);}
-inline float beta_l2_4(float y){
-  return ((y * y * (y * y * y * (y * (y * (y * (-60.0 * y + 270.0) - 462.0) + 357.0) - 105.0) - 2.0) + 2.0) * 0.5);}
-inline float gamma_l2_4(float y){
-  return ((y * (y * (y * y * y * (y * (y * (y * (60.0 * y - 270.0) + 462.0) - 357.0) + 105.0) + 1.0) + 1.0)) * 0.5);}
-inline float delta_l2_4(float y){
-  return ((y * y * y * y * y * (y * (y * (y * (-20.0 * y + 90.0) - 154.0) + 119.0) - 35.0)) * 0.5);}
-
-
-inline float alpha_l4_2(float y){
-  return ((y * (y * (y * (y * (-5.0 * y + 13.0) - 9.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float beta_l4_2(float y){
-  return ((y * (y * (y * (y * (25.0 * y - 64.0) + 39.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float gamma_l4_2(float y){
-  return ((y * y * (y * (y * (-50.0 * y + 126.0) - 70.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float delta_l4_2(float y){
-  return ((y * (y * (y * (y * (50.0 * y - 124.0) + 66.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float eta_l4_2(float y){
-  return ((y * (y * (y * (y * (-25.0 * y + 61.0) - 33.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float zeta_l4_2(float y){
-  return ((y * y * y * (y * (5.0 * y - 12.0) + 7.0)) * 0.041666666666666664);}
-
-
-inline float alpha_l4_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (14.0 * y - 49.0) + 58.0) - 22.0) - 2.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float beta_l4_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-70.0 * y + 245.0) - 290.0) + 111.0) + 4.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float gamma_l4_3(float y){
-  return ((y * y * (y * y * (y * (y * (140.0 * y - 490.0) + 580.0) - 224.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float delta_l4_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-140.0 * y + 490.0) - 580.0) + 226.0) - 4.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float eta_l4_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (70.0 * y - 245.0) + 290.0) - 114.0) + 2.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float zeta_l4_3(float y){
-  return ((y * y * y * y * (y * (y * (-14.0 * y + 49.0) - 58.0) + 23.0)) * 0.041666666666666664);}
-
-
-inline float alpha_l4_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-46.0 * y + 207.0) - 354.0) + 273.0) - 80.0) + 1.0) - 2.0) - 1.0) + 2.0)) * 0.041666666666666664);}
-inline float beta_l4_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (230.0 * y - 1035.0) + 1770.0) - 1365.0) + 400.0) - 4.0) + 4.0) + 16.0) - 16.0)) * 0.041666666666666664);}
-inline float gamma_l4_4(float y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-460.0 * y + 2070.0) - 3540.0) + 2730.0) - 800.0) + 6.0) - 30.0) + 24.0) * 0.041666666666666664);}
-inline float delta_l4_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (460.0 * y - 2070.0) + 3540.0) - 2730.0) + 800.0) - 4.0) - 4.0) + 16.0) + 16.0)) * 0.041666666666666664);}
-inline float eta_l4_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-230.0 * y + 1035.0) - 1770.0) + 1365.0) - 400.0) + 1.0) + 2.0) - 1.0) - 2.0)) * 0.041666666666666664);}
-inline float zeta_l4_4(float y){
-  return ((y * y * y * y * y * (y * (y * (y * (46.0 * y - 207.0) + 354.0) - 273.0) + 80.0)) * 0.041666666666666664);}
-
-
-inline float alpha_M8p(float y){
-  return ((y*(y*(y*(y*(y*(y*(-10.0*y + 21.0) + 28.0) - 105.0) + 70.0) + 35.0) - 56.0) + 17.0) * 0.00029761904761904765);}
-inline float beta_M8p(float y){
-  return ((y*(y*(y*(y*(y*(y*(70.0*y - 175.0) - 140.0) + 770.0) - 560.0) - 350.0) + 504.0) - 102.0) * 0.00029761904761904765);}
-inline float gamma_M8p(float y){
-  return ((y*(y*(y*(y*(y*(y*(-210.0*y + 609.0) + 224.0) - 2135.0) + 910.0) + 2765.0) - 2520.0) + 255.0) * 0.00029761904761904765);}
-inline float delta_M8p(float y){
-  return ((y*y* (y*y* (y*y* (70.0*y - 231.0) + 588.0) - 980.0) + 604.0) * 0.001488095238095238);}
-inline float eta_M8p(float y){
-  return ((y*(y*(y*(y*(y*(y*(-70.0*y+ 259.0) - 84.0) - 427.0) - 182.0)+ 553.0) + 504.0)+ 51.0) * 0.001488095238095238);}
-inline float zeta_M8p(float y){
-  return ((y*(y*(y*(y*(y*(y*(210.0*y- 861.0) + 532.0) + 770.0) + 560.0) - 350.0) - 504.0) - 102.0) * 0.00029761904761904765);}
-inline float theta_M8p(float y){
-  return ((y* (y* (y* (y* (y* (y* (-70.0* y+ 315.0) -280.0) -105.0) -70.0) +35.0)+ 56.0) +17.0) * 0.00029761904761904765);}
-inline float iota_M8p(float y){
-  return ((y * y * y * y * y * (y * (10.0 * y - 49.0) + 56.0)) * 0.00029761904761904765);}
-
-
-inline float alpha_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-89.0 * y + 312.0) - 370.0) + 140.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float beta_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (623.0 * y - 2183.0) + 2581.0) - 955.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-1869.0 * y + 6546.0) - 7722.0) + 2850.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float delta_l6_3(float y){
-  return ((y * y * (y * y * (y * (y * (3115.0 * y - 10905.0) + 12845.0) - 4795.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float eta_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-3115.0 * y + 10900.0) - 12830.0) + 4880.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (1869.0 * y - 6537.0) + 7695.0) - 2985.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float theta_l6_3(float y){
-  return ((y * (y * (y * (y * (y * (y * (-623.0 * y + 2178.0) - 2566.0) + 1010.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float iota_l6_3(float y){
-  return ((y * y * y * y * (y * (y * (89.0 * y - 311.0) + 367.0) - 145.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (290.0 * y - 1305.0) + 2231.0) - 1718.0) + 500.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float beta_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-2030.0 * y + 9135.0) - 15617.0) + 12027.0) - 3509.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (6090.0 * y - 27405.0) + 46851.0) - 36084.0) + 10548.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float delta_l6_4(float y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-10150.0 * y + 45675.0) - 78085.0) + 60145.0) - 17605.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float eta_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (10150.0 * y - 45675.0) + 78085.0) - 60150.0) + 17620.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-6090.0 * y + 27405.0) - 46851.0) + 36093.0) - 10575.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float theta_l6_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (2030.0 * y - 9135.0) + 15617.0) - 12032.0) + 3524.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float iota_l6_4(float y){
-  return ((y * y * y * y * y * (y * (y * (y * (-290.0 * y + 1305.0) - 2231.0) + 1719.0) - 503.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-1006.0 * y + 5533.0) - 12285.0) + 13785.0) - 7829.0) + 1803.0) - 3.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float beta_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (7042.0 * y - 38731.0) + 85995.0) - 96495.0) + 54803.0) - 12620.0) + 12.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-21126.0 * y + 116193.0) - 257985.0) + 289485.0) - 164409.0) + 37857.0) - 15.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float delta_l6_5(float y){
-  return ((y * y * (y * y * (y * y * (y * (y * (y * (y * (35210.0 * y - 193655.0) + 429975.0) - 482475.0) + 274015.0) - 63090.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float eta_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-35210.0 * y + 193655.0) - 429975.0) + 482475.0) - 274015.0) + 63085.0) + 15.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (21126.0 * y - 116193.0) + 257985.0) - 289485.0) + 164409.0) - 37848.0) - 12.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float theta_l6_5(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-7042.0 * y + 38731.0) - 85995.0) + 96495.0) - 54803.0) + 12615.0) + 3.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float iota_l6_5(float y){
-  return ((y * y * y * y * y * y * (y * (y * (y * (y * (1006.0 * y - 5533.0) + 12285.0) - 13785.0) + 7829.0) - 1802.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (3604.0 * y - 23426.0) + 63866.0) - 93577.0) + 77815.0) - 34869.0) + 6587.0) + 1.0) - 3.0) - 5.0) + 15.0) + 4.0) - 12.0)) * 0.001388888888888889);}
-inline float beta_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-25228.0 * y + 163982.0) - 447062.0) + 655039.0) - 544705.0) + 244083.0) - 46109.0) - 6.0) + 12.0) + 60.0) - 120.0) - 54.0) + 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (75684.0 * y - 491946.0) + 1341186.0) - 1965117.0) + 1634115.0) - 732249.0) + 138327.0) + 15.0) - 15.0) - 195.0) + 195.0) + 540.0) - 540.0)) * 0.001388888888888889);}
-inline float delta_l6_6(float y){
-  return ((y * y * (y * y * (y * y * (y * (y * (y * (y * (y * (y * (-126140.0 * y + 819910.0) - 2235310.0) + 3275195.0) - 2723525.0) + 1220415.0) - 230545.0) - 20.0) + 280.0) - 980.0) + 720.0) * 0.001388888888888889);}
-inline float eta_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (126140.0 * y - 819910.0) + 2235310.0) - 3275195.0) + 2723525.0) - 1220415.0) + 230545.0) + 15.0) + 15.0) - 195.0) - 195.0) + 540.0) + 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (-75684.0 * y + 491946.0) - 1341186.0) + 1965117.0) - 1634115.0) + 732249.0) - 138327.0) - 6.0) - 12.0) + 60.0) + 120.0) - 54.0) - 108.0)) * 0.001388888888888889);}
-inline float theta_l6_6(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (y * (25228.0 * y - 163982.0) + 447062.0) - 655039.0) + 544705.0) - 244083.0) + 46109.0) + 1.0) + 3.0) - 5.0) - 15.0) + 4.0) + 12.0)) * 0.001388888888888889);}
-inline float iota_l6_6(float y){
-  return ((y * y * y * y * y * y * y * (y * (y * (y * (y * (y * (-3604.0 * y + 23426.0) - 63866.0) + 93577.0) - 77815.0) + 34869.0) - 6587.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-3569.0 * y + 16061.0) - 27454.0) + 21126.0) - 6125.0) + 49.0) - 196.0) - 36.0) + 144.0)) * 2.48015873015873e-05);}
-inline float beta_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (32121.0 * y - 144548.0) + 247074.0) - 190092.0) + 55125.0) - 672.0) + 2016.0) + 512.0) - 1536.0)) * 2.48015873015873e-05);}
-inline float gamma_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-128484.0 * y + 578188.0) - 988256.0) + 760312.0) - 221060.0) + 4732.0) - 9464.0) - 4032.0) + 8064.0)) * 2.48015873015873e-05);}
-inline float delta_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (299796.0 * y - 1349096.0) + 2305856.0) - 1774136.0) + 517580.0) - 13664.0) + 13664.0) + 32256.0) - 32256.0)) * 2.48015873015873e-05);}
-inline float eta_l8_4(float y){
-  return ((y * y * (y * y * (y * (y * (y * (y * (-449694.0 * y + 2023630.0) - 3458700.0) + 2661540.0) - 778806.0) + 19110.0) - 57400.0) + 40320.0) * 2.48015873015873e-05);}
-inline float zeta_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (449694.0 * y - 2023616.0) + 3458644.0) - 2662016.0) + 780430.0) - 13664.0) - 13664.0) + 32256.0) + 32256.0)) * 2.48015873015873e-05);}
-inline float theta_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-299796.0 * y + 1349068.0) - 2305744.0) + 1775032.0) - 520660.0) + 4732.0) + 9464.0) - 4032.0) - 8064.0)) * 2.48015873015873e-05);}
-inline float iota_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (128484.0 * y - 578168.0) + 988176.0) - 760872.0) + 223020.0) - 672.0) - 2016.0) + 512.0) + 1536.0)) * 2.48015873015873e-05);}
-inline float kappa_l8_4(float y){
-  return ((y * (y * (y * (y * (y * (y * (y * (y * (-32121.0 * y + 144541.0) - 247046.0) + 190246.0) - 55685.0) + 49.0) + 196.0) - 36.0) - 144.0)) * 2.48015873015873e-05);}
-inline float mu_l8_4(float y){
-  return ((y * y * y * y * y * (y * (y * (y * (3569.0 * y - 16060.0) + 27450.0) - 21140.0) + 6181.0)) * 2.48015873015873e-05);}
diff --git a/hysop/old/gpu.old/cl_src/remeshing/weights_noVec_builtin.cl b/hysop/old/gpu.old/cl_src/remeshing/weights_noVec_builtin.cl
deleted file mode 100644
index 4c0e124803bf7a58c73fa796ccda0f0c18e919ba..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/cl_src/remeshing/weights_noVec_builtin.cl
+++ /dev/null
@@ -1,198 +0,0 @@
-/**
- * @file weights_noVec_builtin.cl
- * Remeshing formulas, vectorized version, use of builtin OpenCL fma.
- * Polynomials under Horner form.
- */
-
-inline float alpha_l2_1(float y){
-  return (y*fma(y,fma(y,-1.0, 2.0), - 1.0) * 0.5);}
-inline float beta_l2_1(float y){
-  return (fma(y*y, fma(y, 3.0, -5.0), 2.0) * 0.5);}
-inline float gamma_l2_1(float   y){
-  return ((y * fma(y , fma(-3.0, y, 4.0), 1.0)) * 0.5);}
-inline float delta_l2_1(float y){
-  return ((y * y * fma(1.0, y, - 1.0)) * 0.5);}
-
-
-inline float alpha_l2_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 2.0, -5.0), 3.0), 1.0), -1.0)) * 0.5);}
-inline float beta_l2_2(float y){
-  return (fma(y * y, fma(y, fma(y, fma(y, -6.0, 15.0), -9.0), -2.0), 2.0) * 0.5);}
-inline float gamma_l2_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 6.0, -15.0), 9.0), 1.0), 1.0)) * 0.5);}
-inline float delta_l2_2(float y){
-  return ((y * y * y * fma(y, fma(y, -2.0, 5.0), -3.0)) * 0.5);}
-
-
-inline float alpha_l2_3(float y){
-  return ((y * fma(y, fma(y * y, fma(y, fma(y, fma(y, -6.0, 21.0), -25.0), 10.0), 1.0), -1.0)) * 0.5);}
-inline float beta_l2_3(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 18.0, -63.0), 75.0), -30.0), -2.0), 2.0) * 0.5);}
-inline float gamma_l2_3(float y){
-  return ((y * fma(y, fma(y * y, fma(y, fma(y, fma(y, -18.0, 63.0), -75.0), 30.0), 1.0), 1.0)) * 0.5);}
-inline float delta_l2_3(float y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, 6.0, -21.0), 25.0), -10.0)) * 0.5);}
-
-
-inline float alpha_l2_4(float y){
-  return ((y * fma(y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, 20.0, -90.0), 154.0), -119.0), 35.0), 1.0), -1.0)) * 0.5);}
-inline float beta_l2_4(float y){
-  return (fma(y * y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, -60.0, 270.0), -462.0), 357.0), -105.0), -2.0), 2.0) * 0.5);}
-inline float gamma_l2_4(float y){
-  return ((y * fma(y, fma(y * y * y, fma(y, fma(y, fma(y, fma(y, 60.0, -270.0), 462.0), -357.0), 105.0), 1.0), 1.0)) * 0.5);}
-inline float delta_l2_4(float y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, -20.0, 90.0), -154.0), 119.0), -35.0)) * 0.5);}
-
-
-inline float alpha_l4_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, -5.0, 13.0), -9.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float beta_l4_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 25.0, -64.0), 39.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float gamma_l4_2(float y){
-  return (fma(y * y, fma(y, fma(y, fma(y, -50.0, 126.0), -70.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float delta_l4_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, 50.0, -124.0), 66.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float eta_l4_2(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, -25.0, 61.0), -33.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float zeta_l4_2(float y){
-  return ((y * y * y * fma(y, fma(y, 5.0, -12.0), 7.0)) * 0.041666666666666664);}
-
-
-inline float alpha_l4_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 14.0, -49.0), 58.0), -22.0), -2.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float beta_l4_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -70.0, 245.0), -290.0), 111.0), 4.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float gamma_l4_3(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 140.0, -490.0), 580.0), -224.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float delta_l4_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -140.0, 490.0), -580.0), 226.0), -4.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float eta_l4_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 70.0, -245.0), 290.0), -114.0), 2.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float zeta_l4_3(float y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, -14.0, 49.0), -58.0), 23.0)) * 0.041666666666666664);}
-
-
-inline float alpha_l4_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -46.0, 207.0), -354.0), 273.0), -80.0), 1.0), -2.0), -1.0), 2.0)) * 0.041666666666666664);}
-inline float beta_l4_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 230.0, -1035.0), 1770.0), -1365.0), 400.0), -4.0), 4.0), 16.0), -16.0)) * 0.041666666666666664);}
-inline float gamma_l4_4(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -460.0, 2070.0), -3540.0), 2730.0), -800.0), 6.0), -30.0), 24.0) * 0.041666666666666664);}
-inline float delta_l4_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 460.0, -2070.0), 3540.0), -2730.0), 800.0), -4.0), -4.0), 16.0), 16.0)) * 0.041666666666666664);}
-inline float eta_l4_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -230.0, 1035.0), -1770.0), 1365.0), -400.0), 1.0), 2.0), -1.0), -2.0)) * 0.041666666666666664);}
-inline float zeta_l4_4(float y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, 46.0, -207.0), 354.0), -273.0), 80.0)) * 0.041666666666666664);}
-
-
-inline float alpha_M8p(float y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-10.0,y, + 21.0), + 28.0), - 105.0), + 70.0), + 35.0), - 56.0), + 17.0) * 0.00029761904761904765);}
-inline float beta_M8p(float y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(70.0,y, - 175.0), - 140.0), + 770.0), - 560.0), - 350.0), + 504.0), - 102.0) * 0.00029761904761904765);}
-inline float gamma_M8p(float y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-210.0,y, + 609.0), + 224.0), - 2135.0), + 910.0), + 2765.0), - 2520.0), + 255.0) * 0.00029761904761904765);}
-inline float delta_M8p(float y){
-  return (fma(y*y, fma(y*y, fma(y*y, fma(70.0,y, - 231.0), + 588.0), - 980.0), + 604.0) * 0.001488095238095238);}
-inline float eta_M8p(float y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(-70.0,y, 259.0), - 84.0), - 427.0), - 182.0), + 553.0), + 504.0), + 51.0) * 0.001488095238095238);}
-inline float zeta_M8p(float y){
-  return (fma(y,fma(y,fma(y,fma(y,fma(y,fma(y,fma(210.0,y,- 861.0), + 532.0), + 770.0), + 560.0), - 350.0), - 504.0), - 102.0) * 0.00029761904761904765);}
-inline float theta_M8p(float y){
-  return (fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(-70.0, y, 315.0), -280.0), -105.0), -70.0), 35.0), 56.0), 17.0) * 0.00029761904761904765);}
-inline float iota_M8p(float y){
-  return ((y * y * y * y * y * fma(y , fma(10.0 , y ,- 49.0) , 56.0)) * 0.00029761904761904765);}
-
-
-inline float alpha_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -89.0, 312.0), -370.0), 140.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float beta_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 623.0, -2183.0), 2581.0), -955.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -1869.0, 6546.0), -7722.0), 2850.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float delta_l6_3(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, 3115.0, -10905.0), 12845.0), -4795.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float eta_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3115.0, 10900.0), -12830.0), 4880.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 1869.0, -6537.0), 7695.0), -2985.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float theta_l6_3(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -623.0, 2178.0), -2566.0), 1010.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float iota_l6_3(float y){
-  return ((y * y * y * y * fma(y, fma(y, fma(y, 89.0, -311.0), 367.0), -145.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 290.0, -1305.0), 2231.0), -1718.0), 500.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float beta_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -2030.0, 9135.0), -15617.0), 12027.0), -3509.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 6090.0, -27405.0), 46851.0), -36084.0), 10548.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float delta_l6_4(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -10150.0, 45675.0), -78085.0), 60145.0), -17605.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float eta_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 10150.0, -45675.0), 78085.0), -60150.0), 17620.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -6090.0, 27405.0), -46851.0), 36093.0), -10575.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float theta_l6_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 2030.0, -9135.0), 15617.0), -12032.0), 3524.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float iota_l6_4(float y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, -290.0, 1305.0), -2231.0), 1719.0), -503.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -1006.0, 5533.0), -12285.0), 13785.0), -7829.0), 1803.0), -3.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float beta_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 7042.0, -38731.0), 85995.0), -96495.0), 54803.0), -12620.0), 12.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -21126.0, 116193.0), -257985.0), 289485.0), -164409.0), 37857.0), -15.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float delta_l6_5(float y){
-  return (fma(y * y, fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, 35210.0, -193655.0), 429975.0), -482475.0), 274015.0), -63090.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float eta_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -35210.0, 193655.0), -429975.0), 482475.0), -274015.0), 63085.0), 15.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 21126.0, -116193.0), 257985.0), -289485.0), 164409.0), -37848.0), -12.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float theta_l6_5(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -7042.0, 38731.0), -85995.0), 96495.0), -54803.0), 12615.0), 3.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float iota_l6_5(float y){
-  return ((y * y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, fma(y, 1006.0, -5533.0), 12285.0), -13785.0), 7829.0), -1802.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 3604.0, -23426.0), 63866.0), -93577.0), 77815.0), -34869.0), 6587.0), 1.0), -3.0), -5.0), 15.0), 4.0), -12.0)) * 0.001388888888888889);}
-inline float beta_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -25228.0, 163982.0), -447062.0), 655039.0), -544705.0), 244083.0), -46109.0), -6.0), 12.0), 60.0), -120.0), -54.0), 108.0)) * 0.001388888888888889);}
-inline float gamma_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 75684.0, -491946.0), 1341186.0), -1965117.0), 1634115.0), -732249.0), 138327.0), 15.0), -15.0), -195.0), 195.0), 540.0), -540.0)) * 0.001388888888888889);}
-inline float delta_l6_6(float y){
-  return (fma(y * y, fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -126140.0, 819910.0), -2235310.0), 3275195.0), -2723525.0), 1220415.0), -230545.0), -20.0), 280.0), -980.0), 720.0) * 0.001388888888888889);}
-inline float eta_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 126140.0, -819910.0), 2235310.0), -3275195.0), 2723525.0), -1220415.0), 230545.0), 15.0), 15.0), -195.0), -195.0), 540.0), 540.0)) * 0.001388888888888889);}
-inline float zeta_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -75684.0, 491946.0), -1341186.0), 1965117.0), -1634115.0), 732249.0), -138327.0), -6.0), -12.0), 60.0), 120.0), -54.0), -108.0)) * 0.001388888888888889);}
-inline float theta_l6_6(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 25228.0, -163982.0), 447062.0), -655039.0), 544705.0), -244083.0), 46109.0), 1.0), 3.0), -5.0), -15.0), 4.0), 12.0)) * 0.001388888888888889);}
-inline float iota_l6_6(float y){
-  return ((y * y * y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3604.0, 23426.0), -63866.0), 93577.0), -77815.0), 34869.0), -6587.0)) * 0.001388888888888889);}
-
-
-inline float alpha_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -3569.0, 16061.0), -27454.0), 21126.0), -6125.0), 49.0), -196.0), -36.0), 144.0)) * 2.48015873015873e-05);}
-inline float beta_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 32121.0, -144548.0), 247074.0), -190092.0), 55125.0), -672.0), 2016.0), 512.0), -1536.0)) * 2.48015873015873e-05);}
-inline float gamma_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -128484.0, 578188.0), -988256.0), 760312.0), -221060.0), 4732.0), -9464.0), -4032.0), 8064.0)) * 2.48015873015873e-05);}
-inline float delta_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 299796.0, -1349096.0), 2305856.0), -1774136.0), 517580.0), -13664.0), 13664.0), 32256.0), -32256.0)) * 2.48015873015873e-05);}
-inline float eta_l8_4(float y){
-  return (fma(y * y, fma(y * y, fma(y, fma(y, fma(y, fma(y, fma(y, -449694.0, 2023630.0), -3458700.0), 2661540.0), -778806.0), 19110.0), -57400.0), 40320.0) * 2.48015873015873e-05);}
-inline float zeta_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 449694.0, -2023616.0), 3458644.0), -2662016.0), 780430.0), -13664.0), -13664.0), 32256.0), 32256.0)) * 2.48015873015873e-05);}
-inline float theta_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -299796.0, 1349068.0), -2305744.0), 1775032.0), -520660.0), 4732.0), 9464.0), -4032.0), -8064.0)) * 2.48015873015873e-05);}
-inline float iota_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, 128484.0, -578168.0), 988176.0), -760872.0), 223020.0), -672.0), -2016.0), 512.0), 1536.0)) * 2.48015873015873e-05);}
-inline float kappa_l8_4(float y){
-  return ((y * fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, fma(y, -32121.0, 144541.0), -247046.0), 190246.0), -55685.0), 49.0), 196.0), -36.0), -144.0)) * 2.48015873015873e-05);}
-inline float mu_l8_4(float y){
-  return ((y * y * y * y * y * fma(y, fma(y, fma(y, fma(y, 3569.0, -16060.0), 27450.0), -21140.0), 6181.0)) * 2.48015873015873e-05);}
diff --git a/hysop/old/gpu.old/config_cayman.py b/hysop/old/gpu.old/config_cayman.py
deleted file mode 100644
index af7389adc1bd44146983ad65d231915a905f44fc..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/config_cayman.py
+++ /dev/null
@@ -1,176 +0,0 @@
-"""
-@file config_cayman.py
-
-OpenCL kernels configurations.
-"""
-from hysop.constants import np
-FLOAT_GPU, DOUBLE_GPU = np.float32, np.float64
-
-#build empty dictionaries
-kernels_config = {}
-kernels_config[2] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-kernels_config[3] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-
-# Copy kernel:
-def copy_space_index_2d(size, t_dim, b_rows, vec):
-    gwi = (int(size[0] / vec), int(b_rows * size[1] / t_dim), 1)
-    lwi = (t_dim / vec, b_rows, 1)
-    return gwi, lwi
-def copy_space_index_3d(size, t_dim, b_rows, vec):
-    gwi = (int(size[0] / vec), int(b_rows * size[1] / t_dim), int(size[2]))
-    lwi = (t_dim / vec, b_rows, 1)
-    return gwi, lwi
-# Configs : sources, tile size, block rows, vector size, index space function
-kernels_config[3][FLOAT_GPU]['copy'] = \
-    ('kernels/copy.cl', 16, 8, 4, copy_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['copy'] = \
-    ('kernels/copy_locMem.cl', 32, 8, 1, copy_space_index_3d)
-kernels_config[2][FLOAT_GPU]['copy'] = \
-    ('kernels/copy.cl', 16, 8, 2, copy_space_index_2d)
-kernels_config[2][DOUBLE_GPU]['copy'] = \
-    ('kernels/copy.cl', 32, 2, 2, copy_space_index_2d)
-
-# Transpositions kernels:
-# XY transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xy_space_index_2d(size, t_dim, b_rows, vec):
-    gwi = (int(size[1] / vec), int(b_rows * size[0] / t_dim), 1)
-    lwi = (t_dim / vec, b_rows, 1)
-    return gwi, lwi
-def xy_space_index_3d(size, t_dim, b_rows, vec):
-    gwi = (int(size[1] / vec), int(b_rows * size[0] / t_dim), int(size[2]))
-    lwi = (t_dim / vec, b_rows, 1)
-    return gwi, lwi
-# Configs : sources, tile size, block rows, is padding, vector size,
-#              index space function
-kernels_config[3][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy.cl', 16, 8, True, 2, xy_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy.cl', 32, 4, True, 4, xy_space_index_3d)
-kernels_config[2][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy.cl', 32, 8, True, 4, xy_space_index_2d)
-kernels_config[2][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy.cl', 32, 2, True, 4, xy_space_index_2d)
-
-# XZ transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xz_space_index_3d(size, t_dim, b_rows, b_deph, vec):
-    gwi = (int(size[2] / vec), int(b_rows * size[1] / t_dim), int(b_deph * size[0] / t_dim))
-    lwi = (t_dim / vec, b_rows, b_deph)
-    return gwi, lwi
-# Configs : sources, tile size, block rows, block depth, is padding,
-#              vector size, index space function
-kernels_config[3][FLOAT_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz.cl', 16, 4, 4, True, 1, xy_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz.cl', 8, 2, 2, False, 1, xy_space_index_3d)
-
-
-def computational_kernels_index_space(size, vec):
-    dim = len(size)
-    if dim == 3:
-        wi = 64
-    if dim == 2:
-        wi = 256
-    # Change work-item regarding problem size
-    if size[0] % wi > 0:
-        if dim == 3:
-            print "Warning : GPU best performances obtained for",
-            print "problem sizes multiples of 64"
-        else:
-            print "Warning : GPU best performances obtained for",
-            print "problem sizes multiples of 256"
-    while(size[0] % wi > 0):
-        wi = wi / 2
-    # Change work-item regarding vector_width
-    if wi * vec > size[0]:
-        if size[0] % vec > 0:
-            raise ValueError(
-                "Resolution ({0}) must be a multiple of {1}".format(
-                    size[0], vec))
-        wi = size[0] // vec
-    if dim == 3:
-        gwi = (int(wi), int(size[1]), int(size[2]))
-        lwi = (int(wi), 1, 1)
-    else:
-        gwi = (int(wi), int(size[1]))
-        lwi = (int(wi), 1)
-    return gwi, lwi
-
-# Advection kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection.cl"],
-     False, 4, computational_kernels_index_space)
-kernels_config[3][DOUBLE_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection.cl"],
-     False, 2, computational_kernels_index_space)
-kernels_config[2][FLOAT_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection.cl"],
-     False, 4, computational_kernels_index_space)
-kernels_config[2][DOUBLE_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_noVec.cl"],
-     False, 1, computational_kernels_index_space)
-
-# Remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_builtin.cl", "remeshing/private.cl",
-      "kernels/remeshing.cl"],
-     False, 4, computational_kernels_index_space)
-kernels_config[3][DOUBLE_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_builtin.cl", "remeshing/private.cl",
-      "kernels/remeshing.cl"],
-     False, 4, computational_kernels_index_space)
-kernels_config[2][FLOAT_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "kernels/remeshing.cl"],
-     True, 4, computational_kernels_index_space)
-kernels_config[2][DOUBLE_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "kernels/remeshing.cl"],
-     True, 4, computational_kernels_index_space)
-
-# Advection and remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_builtin.cl", "remeshing/private.cl",
-      "advection/velocity_cache.cl","advection/builtin_RKN.cl",
-      "kernels/advection_and_remeshing.cl"],
-     False, 4, computational_kernels_index_space)
-kernels_config[3][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_builtin.cl", "remeshing/private.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection_and_remeshing.cl"],
-     True, 4, computational_kernels_index_space)
-kernels_config[2][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection_and_remeshing.cl"],
-     True, 8, computational_kernels_index_space)
-kernels_config[2][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "advection/velocity_cache.cl", "advection/builtin_RKN.cl",
-      "kernels/advection_and_remeshing.cl"],
-     True, 4, computational_kernels_index_space)
-
-
-
diff --git a/hysop/old/gpu.old/config_default.py b/hysop/old/gpu.old/config_default.py
deleted file mode 100644
index aaa3f6ebb5541a4f41291a17e04c93e8552eebf1..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/config_default.py
+++ /dev/null
@@ -1,272 +0,0 @@
-"""
-@file config_default.py
-
-OpenCL kernels default configurations.
-"""
-from hysop.constants import np
-FLOAT_GPU, DOUBLE_GPU = np.float32, np.float64
-MAX_GWI = (256, 256, 256)
-
-#build empty dictionaries
-kernels_config = {}
-kernels_config[2] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-kernels_config[3] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-
-def _clamp_max(w, m):
-    while w > m:
-        w /= 2
-    return int(w)
-
-
-def check_max(t_gwi):
-    return tuple([_clamp_max(w, m) for w, m in zip(t_gwi, MAX_GWI)])
-
-## Copy kernel is replaced by copy function from OpenCL API
-# # Copy kernel:
-# def copy_space_index_2d(size, t_dim, b_rows, vec):
-#     gwi = check_max((size[0] / vec, b_rows * size[1] / t_dim, 1))
-#     lwi = (t_dim / vec, b_rows, 1)
-#     blocks_nb = ((size[0] / vec) / lwi[0],
-#                  (b_rows * size[1] / t_dim) / lwi[1], None)
-#     return gwi, lwi, blocks_nb
-# def copy_space_index_3d(size, t_dim, b_rows, vec):
-#     gwi = check_max((size[0] / vec, b_rows * size[1] / t_dim, size[2]))
-#     lwi = (t_dim / vec, b_rows, 1)
-#     blocks_nb = ((size[0] / vec) / lwi[0],
-#                  (b_rows * size[1] / t_dim) / lwi[1], None)
-#     return gwi, lwi, blocks_nb
-# # Configs : sources, tile size, block rows, vector size, index space function
-# kernels_config[3][FLOAT_GPU]['copy'] = \
-#     ('kernels/copy_noVec.cl', 32, 2, 1, copy_space_index_3d)
-# kernels_config[3][DOUBLE_GPU]['copy'] = \
-#     ('kernels/copy_noVec.cl', 32, 2, 1, copy_space_index_3d)
-# kernels_config[2][FLOAT_GPU]['copy'] = \
-#     ('kernels/copy_noVec.cl', 32, 2, 1, copy_space_index_2d)
-# kernels_config[2][DOUBLE_GPU]['copy'] = \
-#     ('kernels/copy_noVec.cl', 32, 2, 1, copy_space_index_2d)
-
-# Transpositions kernels:
-# XY transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xy_space_index_2d(size, t_dim, b_rows, vec):
-    gwi = check_max((size[1] / vec, b_rows * size[0] / t_dim, 1))
-    lwi = (t_dim / vec, b_rows, 1)
-    blocs_nb = ((size[1] / vec) / lwi[0],
-                (b_rows * size[0] / t_dim) / lwi[1], None)
-    return gwi, lwi, blocs_nb
-def xy_space_index_3d(size, t_dim, b_rows, vec):
-    gwi = check_max((size[1] / vec, b_rows * size[0] / t_dim, size[2]))
-    lwi = (t_dim / vec, b_rows, 1)
-    blocs_nb = ((size[1] / vec) / lwi[0],
-                (b_rows * size[0] / t_dim) / lwi[1], None)
-    return gwi, lwi, blocs_nb
-# Configs : sources, tile size, block rows, is padding, vector size,
-#              index space function
-kernels_config[3][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 2, True, 1, xy_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 2, True, 1, xy_space_index_3d)
-kernels_config[2][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 2, True, 1, xy_space_index_2d)
-kernels_config[2][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 2, True, 1, xy_space_index_2d)
-
-# XZ transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xz_space_index_3d(size, t_dim, b_rows, b_deph, vec):
-    gwi = check_max((size[2] / vec, size[1], b_deph * size[0] / t_dim))
-    lwi = (t_dim / vec, 1, b_deph)
-    blocs_nb = (((size[2]) / vec) / lwi[0], None,
-                (b_deph * (size[0]) / t_dim) / lwi[2])
-    return gwi, lwi, blocs_nb
-# Configs : sources, tile size, block rows, is padding, vector size,
-#              index space function
-kernels_config[3][FLOAT_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz_slice_noVec.cl', 32, 1, 2, True, 1, xz_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz_slice_noVec.cl', 32, 1, 2, True, 1, xz_space_index_3d)
-
-def computational_kernels_index_space(wi, size, vec):
-    # Change work-item regarding vector_width
-    if wi * vec > size[0]:
-        if size[0] % vec > 0:
-            raise ValueError(
-                "Resolution ({0}) must be a multiple of {1}".format(
-                    size[0], vec))
-        wi = size[0] // vec
-
-    if len(size) == 3:
-        gwi = (int(wi),
-               _clamp_max(size[1], MAX_GWI[1]),
-               _clamp_max(size[2], MAX_GWI[2]))
-        lwi = (int(wi), 1, 1)
-    else:
-        gwi = (int(wi), _clamp_max(size[1], MAX_GWI[1]), 1)
-        lwi = (int(wi), 1, 1)
-    return gwi, lwi
-
-def advection_index_space_3d(size, vec):
-    wi = min(max(32, size[0] / 2), 128)
-    return computational_kernels_index_space(wi, size, vec)
-def advection_index_space_2d_SP(size, vec):
-    wi = min(max(32, size[0] / 2), 128)
-    return computational_kernels_index_space(wi, size, vec)
-def advection_index_space_2d_DP(size, vec):
-    wi = min(max(32, size[0] / 2), 128)
-    return computational_kernels_index_space(wi, size, vec)
-
-def remeshing_index_space_3d(size, vec):
-    wi = min(max(32, size[0] / 2), 128)
-    return computational_kernels_index_space(wi, size, vec)
-def remeshing_index_space_2d(size, vec):
-    wi = min(max(32, size[0] / 2), 128)
-    return computational_kernels_index_space(wi, size, vec)
-
-def advection_and_remeshing_index_space(size, vec):
-    wi = min(size[0] / 2, 128)
-    return computational_kernels_index_space(wi, size, vec)
-
-
-# Advection kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec'] = \
-    (["common.cl", "advection/velocity_cache_noVec.cl",
-      "advection/builtin_RKN_noVec.cl", "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec'] = \
-    (["common.cl", "advection/velocity_cache_noVec.cl",
-      "advection/builtin_RKN_noVec.cl", "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_3d)
-kernels_config[2][FLOAT_GPU]['advec'] = \
-    (["common.cl", "advection/velocity_cache_noVec.cl",
-      "advection/builtin_RKN_noVec.cl", "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_2d_SP)
-kernels_config[2][DOUBLE_GPU]['advec'] = \
-    (["common.cl", "advection/velocity_cache_noVec.cl",
-      "advection/builtin_RKN_noVec.cl", "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_2d_DP)
-
-# Remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['remesh'] = \
-    (["common.cl", "remeshing/weights_noVec_builtin.cl",
-      "remeshing/basic_noVec.cl", "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['remesh'] = \
-    (["common.cl", "remeshing/weights_noVec_builtin.cl",
-      "remeshing/basic_noVec.cl", "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_3d)
-kernels_config[2][FLOAT_GPU]['remesh'] = \
-    (["common.cl", "remeshing/weights_noVec_builtin.cl",
-      "remeshing/basic_noVec.cl", "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_2d)
-kernels_config[2][DOUBLE_GPU]['remesh'] = \
-    (["common.cl", "remeshing/weights_noVec_builtin.cl",
-      "remeshing/basic_noVec.cl", "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_2d)
-
-
-# Advection and remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[3][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[2][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[2][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-
-
-def diffusion_space_index_3d(size, nb_part, tile):
-    gwi = check_max((size[0], size[1] / nb_part))
-    lwi = (tile, tile / nb_part)
-    blocs_nb = (size[0] / tile, size[1] / tile)
-    return gwi, lwi, blocs_nb
-
-
-kernels_config[3][FLOAT_GPU]['diffusion'] = \
-    (["common.cl", "kernels/diffusion.cl"],
-     16, 1, 1, diffusion_space_index_3d)
-
-
-kernels_config[3][DOUBLE_GPU]['advec_comm'] = \
-    (['common.cl', 'kernels/comm_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec_MS_comm'] = \
-    (['common.cl', "remeshing/weights_noVec_builtin.cl",
-      'kernels/comm_MS_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_remeshing_noVec.cl'],
-     False, 1, remeshing_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[3][DOUBLE_GPU]['advec_MS_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_MS_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-
-
-kernels_config[3][FLOAT_GPU]['advec_comm'] = \
-    (['common.cl', 'kernels/comm_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][FLOAT_GPU]['advec_MS_comm'] = \
-    (['common.cl', "remeshing/weights_noVec_builtin.cl",
-      'kernels/comm_MS_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][FLOAT_GPU]['remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_remeshing_noVec.cl'],
-     False, 1, remeshing_index_space_3d)
-kernels_config[3][FLOAT_GPU]['advec_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[3][FLOAT_GPU]['advec_MS_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_MS_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-
-
-def fine_to_coarse_filter_index_space(size, stencil_width):
-    wg = size[0] / (2 * stencil_width)
-    return ((wg, size[1] / stencil_width, size[2] / stencil_width),
-            (wg, 1, 1))
-
-
-kernels_config[3][FLOAT_GPU]['fine_to_coarse_filter'] = \
-    (["common.cl", 'remeshing/weights_noVec.cl',
-      "kernels/fine_to_coarse_filter.cl"],
-     1, fine_to_coarse_filter_index_space)
-
-
-def multiphase_baroclinic_index_space(size, tile):
-    wg = (tile, tile, 1)
-    ws = (int(size[0]), int(size[1]), 1)
-    return ws, wg
-
-kernels_config[3][FLOAT_GPU]['multiphase_baroclinic'] = \
-    (["common.cl", "kernels/multiphase_baroclinic_rhs.cl"],
-     8, 1, multiphase_baroclinic_index_space)
diff --git a/hysop/old/gpu.old/config_k20m.py b/hysop/old/gpu.old/config_k20m.py
deleted file mode 100644
index 9203c92e60ac77cbcccf5b23913e11e69e5d7b7e..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/config_k20m.py
+++ /dev/null
@@ -1,265 +0,0 @@
-"""
-@file config_k20m.py
-
-OpenCL kernels configurations.
-"""
-from hysop.constants import np
-FLOAT_GPU, DOUBLE_GPU = np.float32, np.float64
-MAX_GWI = (1024, 1024, 1024)
-
-
-def _clamp_max(w, m):
-    while w > m:
-        w /= 2
-    return int(w)
-
-
-def check_max(t_gwi):
-    return tuple([_clamp_max(w, m) for w, m in zip(t_gwi, MAX_GWI)])
-
-
-#build empty dictionaries
-kernels_config = {}
-kernels_config[2] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-kernels_config[3] = {FLOAT_GPU: {}, DOUBLE_GPU: {}}
-
-## Copy kernel is replaced by copy function from OpenCL API
-# # Copy kernel:
-# def copy_space_index_2d(size, t_dim, b_rows, vec):
-#     gwi = (int(size[0] / vec), int(b_rows * size[1] / t_dim), 1)
-#     lwi = (t_dim / vec, b_rows, 1)
-#     return gwi, lwi
-# def copy_space_index_3d(size, t_dim, b_rows, vec):
-#     gwi = (int(size[0] / vec), int(b_rows * size[1] / t_dim), int(size[2]))
-#     lwi = (t_dim / vec, b_rows, 1)
-#     return gwi, lwi
-# # Configs : sources, tile size, block rows, vector size, index space function
-# kernels_config[3][FLOAT_GPU]['copy'] = \
-#     ('kernels/copy_noVec.cl', 32, 8, 1, copy_space_index_3d)
-# kernels_config[3][DOUBLE_GPU]['copy'] = \
-#     ('kernels/copy_noVec.cl', 16, 16, 1, copy_space_index_3d)
-# kernels_config[2][FLOAT_GPU]['copy'] = \
-#     ('kernels/copy_noVec.cl', 32, 8, 1, copy_space_index_2d)
-# kernels_config[2][DOUBLE_GPU]['copy'] = \
-#     ('kernels/copy_noVec.cl', 16, 16, 1, copy_space_index_2d)
-
-# Transpositions kernels:
-# XY transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xy_space_index_2d(size, t_dim, b_rows, vec):
-    gwi = check_max((size[1] / vec, b_rows * size[0] / t_dim, 1))
-    lwi = (t_dim / vec, b_rows, 1)
-    blocs_nb = ((size[1] / vec) / lwi[0],
-                (b_rows * size[0] / t_dim) / lwi[1], None)
-    return gwi, lwi, blocs_nb
-def xy_space_index_3d(size, t_dim, b_rows, vec):
-    gwi = check_max((size[1] / vec, b_rows * size[0] / t_dim, size[2]))
-    lwi = (t_dim / vec, b_rows, 1)
-    block_nb = ((size[1] / vec) / lwi[0],
-                (b_rows * size[0] / t_dim) / lwi[1], None)
-    return gwi, lwi, block_nb
-# Configs : sources, tile size, block rows, is padding, vector size,
-#              index space function
-kernels_config[3][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 4, True, 1, xy_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 16, True, 1, xy_space_index_3d)
-kernels_config[2][FLOAT_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 2, True, 1, xy_space_index_2d)
-kernels_config[2][DOUBLE_GPU]['transpose_xy'] = \
-    ('kernels/transpose_xy_noVec.cl', 32, 8, True, 1, xy_space_index_2d)
-
-# XZ transposition
-# Settings are taken from destination layout as current layout.
-# gwi is computed form input layout (appears as transposed layout)
-def xz_space_index_3d(size, t_dim, b_rows, b_deph, vec):
-    gwi = check_max((size[2] / vec, size[1], b_deph * size[0] / t_dim))
-    lwi = (t_dim / vec, 1, b_deph)
-    blocs_nb = ((size[2] / vec) / lwi[0], None,
-                (b_deph * size[0] / t_dim) / lwi[2])
-    return gwi, lwi, blocs_nb
-# Configs : sources, tile size, block rows, is padding, vector size,
-#              index space function
-kernels_config[3][FLOAT_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz_slice_noVec.cl', 32, 1, 8, True, 1, xz_space_index_3d)
-kernels_config[3][DOUBLE_GPU]['transpose_xz'] = \
-    ('kernels/transpose_xz_slice_noVec.cl', 32, 1, 8, True, 1, xz_space_index_3d)
-
-def computational_kernels_index_space(wi, size, vec):
-    # Change work-item regarding vector_width
-    if wi * vec > size[0]:
-        if size[0] % vec > 0:
-            raise ValueError(
-                "Resolution ({0}) must be a multiple of {1}".format(
-                    size[0], vec))
-        wi = size[0] // vec
-
-    if len(size) == 3:
-        gwi = (int(wi),
-               _clamp_max(size[1], MAX_GWI[1]),
-               _clamp_max(size[2], MAX_GWI[2]))
-        lwi = (int(wi), 1, 1)
-    else:
-        gwi = (int(wi), _clamp_max(size[1], MAX_GWI[1]))
-        lwi = (int(wi), 1)
-    return gwi, lwi
-
-def advection_index_space_3d(size, vec):
-    wi = min(size[0] / 4, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-def advection_index_space_2d_SP(size, vec):
-    wi = min(size[0] / 8, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-def advection_index_space_2d_DP(size, vec):
-    wi = min(size[0] / 4, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-
-def remeshing_index_space_3d(size, vec):
-    wi = min(size[0] / 2, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-def remeshing_index_space_2d(size, vec):
-    wi = min(size[0] / 4, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-
-def advection_and_remeshing_index_space(size, vec):
-    wi = min(size[0] / 2, 1024)
-    return computational_kernels_index_space(wi, size, vec)
-
-
-# Advection kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_3d)
-kernels_config[2][FLOAT_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_2d_SP)
-kernels_config[2][DOUBLE_GPU]['advec'] = \
-    (["common.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_noVec.cl"],
-     False, 1, advection_index_space_2d_DP)
-
-# Remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic_noVec.cl",
-      "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic_noVec.cl",
-      "kernels/remeshing_noVec.cl"],
-     False, 1, remeshing_index_space_3d)
-kernels_config[2][FLOAT_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "kernels/remeshing.cl"],
-     True, 2, remeshing_index_space_2d)
-kernels_config[2][DOUBLE_GPU]['remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/basic.cl",
-      "kernels/remeshing.cl"],
-     True, 2, remeshing_index_space_2d)
-
-
-# Advection and remeshing kernel
-# Configs sources, is noBC, vector size, index space function
-kernels_config[3][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[3][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[2][FLOAT_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[2][DOUBLE_GPU]['advec_and_remesh'] = \
-    (["common.cl",
-      "remeshing/weights_noVec_builtin.cl", "remeshing/private_noVec.cl",
-      "advection/velocity_cache_noVec.cl", "advection/builtin_RKN_noVec.cl",
-      "kernels/advection_and_remeshing_noVec.cl"],
-     False, 1, advection_and_remeshing_index_space)
-
-
-def diffusion_space_index_3d(size, nb_part, tile):
-    gwi = check_max((size[0], size[1] / nb_part))
-    lwi = (tile, tile / nb_part)
-    blocs_nb = (size[0] / tile, size[1] / tile)
-    return gwi, lwi, blocs_nb
-
-
-kernels_config[3][DOUBLE_GPU]['diffusion'] = \
-    (["common.cl", "kernels/diffusion.cl"],
-     16, 4, 1, diffusion_space_index_3d)
-
-
-kernels_config[3][DOUBLE_GPU]['advec_comm'] = \
-    (['common.cl', 'kernels/comm_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec_MS_comm'] = \
-    (['common.cl', "remeshing/weights_noVec_builtin.cl",
-      'kernels/comm_MS_advection_noVec.cl'],
-     False, 1, advection_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_remeshing_noVec.cl'],
-     False, 1, remeshing_index_space_3d)
-kernels_config[3][DOUBLE_GPU]['advec_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-kernels_config[3][DOUBLE_GPU]['advec_MS_and_remesh_comm'] = \
-    (['common.cl', 'remeshing/weights_noVec.cl',
-      'kernels/comm_advection_MS_and_remeshing_noVec.cl'],
-     False, 1, advection_and_remeshing_index_space)
-
-
-def fine_to_coarse_filter_index_space(size, stencil_width):
-    wg = size[0] / (2 * stencil_width)
-    return ((wg, size[1] / stencil_width, size[2] / stencil_width),
-            (wg, 1, 1))
-
-
-kernels_config[3][FLOAT_GPU]['fine_to_coarse_filter'] = \
-    (["common.cl", 'remeshing/weights_noVec.cl',
-      "kernels/fine_to_coarse_filter.cl"],
-     1, fine_to_coarse_filter_index_space)
-kernels_config[3][DOUBLE_GPU]['fine_to_coarse_filter'] = \
-    (["common.cl", 'remeshing/weights_noVec.cl',
-      "kernels/fine_to_coarse_filter.cl"],
-     1, fine_to_coarse_filter_index_space)
-
-
-
-def multiphase_baroclinic_index_space(size, tile):
-    wg = (tile, tile, 1)
-    ws = (int(size[0]), int(size[1]), 1)
-    return ws, wg
-
-kernels_config[3][FLOAT_GPU]['multiphase_baroclinic'] = \
-    (["common.cl", "kernels/multiphase_baroclinic_rhs.cl"],
-     8, 1, multiphase_baroclinic_index_space)
-kernels_config[3][DOUBLE_GPU]['multiphase_baroclinic'] = \
-    (["common.cl", "kernels/multiphase_baroclinic_rhs.cl"],
-     8, 1, multiphase_baroclinic_index_space)
diff --git a/hysop/old/gpu.old/directional/__init__.py b/hysop/old/gpu.old/directional/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/hysop/old/gpu.old/directional/directional_codegen.py b/hysop/old/gpu.old/directional/directional_codegen.py
deleted file mode 100644
index 0d6fc0831a49a5173eea8ff64b42bd1afd02e81d..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/directional/directional_codegen.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-from hysop.backend.device.opencl.gpu_codegen import OpenClCodegenOperator
-
-class OpenClCodegenDirectionalOperator(OpenClCodegenOperator):
-
-    def __init__(self,direction,splitting_dim,**kargs):
-        super(OpenClCodegenDirectionalOperator,self).__init__(direction=direction,**kargs)
-        assert direction<splitting_dim
-        self.direction     = direction
-        self.splitting_dim = splitting_dim
-
diff --git a/hysop/old/gpu.old/gpu_codegen.py b/hysop/old/gpu.old/gpu_codegen.py
deleted file mode 100644
index 5d9e587a48312fface40e9fff4d326e220cd7eb5..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_codegen.py
+++ /dev/null
@@ -1,9 +0,0 @@
-
-from hysop.backend.device.opencl.gpu_operator import GPUOperator
-from hysop.core.graph.computational_operator import ComputationalGraphOperator
-from hysop.backend.device.kernel_config import KernelConfig
-
-class OpenClCodegenOperator(ComputationalGraphOperator):
-
-    def __init__(self, **kargs):
-        super(OpenClCodegenOperator,self).__init__(**kargs)
diff --git a/hysop/old/gpu.old/gpu_diffusion.py b/hysop/old/gpu.old/gpu_diffusion.py
deleted file mode 100644
index ed83290901698aa8911763d162c8b1405957e6ab..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_diffusion.py
+++ /dev/null
@@ -1,294 +0,0 @@
-"""
-@file gpu_diffusion.py
-
-Diffusion on GPU
-"""
-from hysop.constants import debug, np, DirectionLabels, hysop.core.mpi_REAL, ORDERMPI, \
-    HYSOP_REAL, ORDER
-from hysop.tools.numpywrappers import npw
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.operator.discrete.discrete import get_extra_args_from_method
-from hysop.backend.device.opencl import cl
-from hysop.backend.device.opencl.gpu_operator import GPUOperator
-from hysop.backend.device.opencl.opencl_kernel import OpenClKernelLauncher
-from hysop.backend.device.opencl.opencl_discrete import OpenClDiscreteField
-from hysop.tools.profiler import FProfiler
-from hysop.core.mpi import Wtime
-
-
-class GPUDiffusion(DiscreteOperator, GPUOperator):
-
-    @debug
-    def __init__(self, field, viscosity, **kwds):
-        super(GPUDiffusion, self).__init__(variables=[field], **kwds)
-        ## Discretization of the solution field
-        self.field = self.variables[0]
-        ## Viscosity.
-        self.viscosity = viscosity
-        self.input = [self.field]
-        self.output = [self.field]
-        self.direction = 0
-        self._cl_work_size = 0
-
-        GPUOperator.__init__(
-            self,
-            platform_id=get_extra_args_from_method(self, 'platform_id', None),
-            device_id=get_extra_args_from_method(self, 'device_id', None),
-            device_type=get_extra_args_from_method(self, 'device_type', None),
-            **kwds)
-
-        ## GPU allocation.
-        alloc = not isinstance(self.field, OpenClDiscreteField)
-        OpenClDiscreteField.fromField(self.cl_env, self.field,
-                                   self.gpu_precision, simple_layout=False)
-        if not self.field.gpu_allocated:
-            self.field.allocate()
-        if alloc:
-            self.size_global_alloc += self.field.mem_size
-
-        self.field_tmp = get_extra_args_from_method(self, 'field_tmp', None),
-
-        topo = self.field.topology
-        self._cutdir_list = np.where(topo.cutdir)[0].tolist()
-        self._comm = topo.comm
-        self._comm_size = self._comm.Get_size()
-        self._comm_rank = self._comm.Get_rank()
-        if self._comm_size > 1:
-            self._to_send = [None] * self.dim
-            self._to_recv_buf = [None] * self.dim
-            self._to_recv = [None] * self.dim
-            self._pitches_host = [None] * self.dim
-            self._pitches_buff = [None] * self.dim
-            self._region_size = [None] * self.dim
-            self._r_orig = [None] * self.dim
-            self._br_orig = [None] * self.dim
-            self.mpi_type_diff_l = {}
-            self.mpi_type_diff_r = {}
-            self.profiler += FProfiler('comm_diffusion')
-            for d in self._cutdir_list:
-                shape = list(self.field.data[0].shape)
-                shape_b = list(self.field.data[0].shape)
-                start_l = [0, ] * 3
-                start_r = [0, ] * 3
-                start_r[d] = 1
-                shape[d] = 2
-                shape_b[d] = 1
-                # _to_send[..., 0] contains [..., 0] data
-                # _to_send[..., 1] contains [..., Nz-1] data
-                # _to_recv[..., 0] contains [..., Nz] data (right ghosts)
-                # _to_recv[..., 1] contains [..., -1] data (left ghosts)
-                self._to_send[d] = npw.zeros(tuple(shape))
-                _to_recv = npw.zeros(tuple(shape))
-                self.mpi_type_diff_l[d] = hysop.core.mpi_REAL.Create_subarray(
-                    shape, shape_b, start_l, order=ORDERMPI)
-                self.mpi_type_diff_l[d].Commit()
-                self.mpi_type_diff_r[d] = hysop.core.mpi_REAL.Create_subarray(
-                    shape, shape_b, start_r, order=ORDERMPI)
-                self.mpi_type_diff_r[d].Commit()
-                self._to_recv_buf[d] = self.cl_env.global_allocation(_to_recv)
-                self._to_recv[d], evt = cl.enqueue_map_buffer(
-                    self.cl_env.queue,
-                    self._to_recv_buf[d],
-                    offset=0,
-                    shape=shape,
-                    dtype=HYSOP_REAL,
-                    flags=cl.map_flags.READ | cl.map_flags.WRITE,
-                    is_blocking=False,
-                    order=ORDER)
-                evt.wait()
-                self._pitches_host[d] = (int(self._to_send[d][:, 0, 0].nbytes),
-                                         int(self._to_send[d][:, :, 0].nbytes))
-                self._pitches_buff[d] = (int(self.field.data[0][:, 0, 0].nbytes),
-                                         int(self.field.data[0][:, :, 0].nbytes))
-                cl.enqueue_copy(
-                    self.cl_env.queue,
-                    self._to_recv_buf[d],
-                    self._to_recv[d],
-                    buffer_origin=(0, 0, 0),
-                    host_origin=(0, 0, 0),
-                    region=(self._to_recv[d][0, 0, 0].nbytes, )).wait()
-                self._cl_work_size += self._to_recv[d].nbytes
-
-                r_orig = [0, ] * self.dim
-                br_orig = [0, ] * self.dim
-                r_orig[d] = self.field.data[0].shape[d] - 1
-                br_orig[d] = 1
-                if d == 0:
-                    r_orig[d] *= self._to_send[d][0, 0, 0].nbytes
-                    br_orig[d] *= self._to_send[d][0, 0, 0].nbytes
-                self._r_orig[d] = tuple(r_orig)
-                self._br_orig[d] = tuple(br_orig)
-                l_sl = [slice(None), ] * 3
-                r_sl = [slice(None), ] * 3
-                l_sl[d] = slice(0, 1)
-                r_sl[d] = slice(1, 2)
-                l_sl = tuple(l_sl)
-                r_sl = tuple(r_sl)
-                self._region_size[d] = list(self.field.data[0].shape)
-                if d == 0:
-                    self._region_size[d][0] = self._to_send[d][0, 0, 0].nbytes
-                else:
-                    self._region_size[d][0] = self._to_send[d][:, 0, 0].nbytes
-                    self._region_size[d][d] = 1
-
-            self._compute = self._compute_diffusion_comm
-        else:
-            self._compute = self._compute_diffusion
-
-        self._mesh_size = npw.ones(4, dtype=self.gpu_precision)
-        self._mesh_size[:self.dim] = topo.mesh.space_step
-        shape = topo.mesh.local_resolution
-        resol = shape.copy()
-        self.resol_dir = npw.dim_ones((self.dim,))
-        self.resol_dir[:self.dim] = shape
-        self._append_size_constants(resol)
-
-        src, tile_size, nb_part_per_wi, vec, f_space = \
-            self._kernel_cfg['diffusion']
-
-        build_options = self._size_constants
-        build_options += " -D TILE_SIZE=" + str(tile_size)
-        build_options += " -D NB_PART=" + str(nb_part_per_wi)
-        build_options += " -D L_WIDTH=" + str(tile_size / nb_part_per_wi)
-        for d in xrange(self.dim):
-            build_options += " -D CUT_DIR" + DirectionLabels[d] + "="
-            build_options += str(1 if topo.shape[d] > 1 else 0)
-
-        gwi, lwi, blocs_nb = f_space(self.field.data[0].shape,
-                                     nb_part_per_wi, tile_size)
-        build_options += " -D NB_GROUPS_I={0}".format(blocs_nb[0])
-        build_options += " -D NB_GROUPS_II={0}".format(blocs_nb[1])
-        prg = self.cl_env.build_src(src, build_options, vec)
-        self.num_diffusion = OpenClKernelLauncher(
-            prg.diffusion, self.cl_env.queue, gwi, lwi)
-        self.copy = OpenClKernelLauncher(cl.enqueue_copy,
-                                   self.cl_env.queue)
-
-    def _compute_diffusion(self, simulation):
-        assert self.field_tmp is not None
-        wait_evt = self.field.events
-        d_evt = self.num_diffusion(
-            self.field.gpu_data[0],
-            self.field_tmp,
-            self.gpu_precision(self.viscosity * simulation.time_step),
-            self._mesh_size,
-            wait_for=wait_evt)
-        c_evt = self.copy.launch_sizes_in_args(
-            self.field.gpu_data[0], self.field_tmp, wait_for=[d_evt])
-        #c_evt = cl.enqueue_copy(self.cl_env.queue, self.field.gpu_data[0],
-        #                        self.field_tmp, wait_for=[d_evt])
-        self.field.events.append(c_evt)
-
-    def set_field_tmp(self, field_tmp):
-        self.field_tmp = field_tmp
-
-    def _compute_diffusion_comm(self, simulation):
-        assert self.field_tmp is not None
-        # Compute OpenCL transfer parameters
-        tc = Wtime()
-        topo = self.field.topology
-        first_cut_dir = topo.cutdir.tolist().index(True)
-        wait_evt = []
-        send_l = [None, ] * self.dim
-        send_r = [None, ] * self.dim
-        recv_l = [None, ] * self.dim
-        recv_r = [None, ] * self.dim
-        e_l = [None, ] * self.dim
-        e_r = [None, ] * self.dim
-        for d in self._cutdir_list:
-            wait_events = self.field.events
-            e_l[d] = cl.enqueue_copy(self.cl_env.queue, self._to_send[d],
-                                     self.field.gpu_data[0],
-                                     host_origin=(0, 0, 0),
-                                     buffer_origin=(0, 0, 0),
-                                     host_pitches=self._pitches_host[d],
-                                     buffer_pitches=self._pitches_buff[d],
-                                     region=tuple(self._region_size[d]),
-                                     wait_for=wait_events,
-                                     is_blocking=False)
-            e_r[d] = cl.enqueue_copy(self.cl_env.queue, self._to_send[d],
-                                     self.field.gpu_data[0],
-                                     host_origin=self._br_orig[d],
-                                     buffer_origin=self._r_orig[d],
-                                     host_pitches=self._pitches_host[d],
-                                     buffer_pitches=self._pitches_buff[d],
-                                     region=tuple(self._region_size[d]),
-                                     wait_for=wait_events,
-                                     is_blocking=False)
-
-        for d in self._cutdir_list:
-            # MPI send
-            R_rk = topo.neighbours[1, d - first_cut_dir]
-            L_rk = topo.neighbours[0, d - first_cut_dir]
-            recv_r[d] = self._comm.Irecv(
-                [self._to_recv[d], 1, self.mpi_type_diff_l[d]],
-                source=R_rk, tag=123 + R_rk + 19 * d)
-            recv_l[d] = self._comm.Irecv(
-                [self._to_recv[d], 1, self.mpi_type_diff_r[d]],
-                source=L_rk, tag=456 + L_rk + 17 * d)
-        for d in self._cutdir_list:
-            R_rk = topo.neighbours[1, d - first_cut_dir]
-            L_rk = topo.neighbours[0, d - first_cut_dir]
-            e_l[d].wait()
-            e_r[d].wait()
-            send_l[d] = self._comm.Issend(
-                [self._to_send[d], 1, self.mpi_type_diff_l[d]],
-                dest=L_rk, tag=123 + self._comm_rank + 19 * d)
-            send_r[d] = self._comm.Issend(
-                [self._to_send[d], 1, self.mpi_type_diff_r[d]],
-                dest=R_rk, tag=456 + self._comm_rank + 17 * d)
-
-        for d in self._cutdir_list:
-            # _to_recv[..., 0] contains [..., Nz] data (right ghosts)
-            # _to_recv[..., 1] contains [..., -1] data (left ghosts)
-            send_r[d].Wait()
-            send_l[d].Wait()
-            recv_r[d].Wait()
-            recv_l[d].Wait()
-            wait_evt.append(cl.enqueue_copy(self.cl_env.queue,
-                                            self._to_recv_buf[d],
-                                            self._to_recv[d],
-                                            is_blocking=False))
-        self.profiler['comm_diffusion'] += Wtime() - tc
-
-        if len(self._cutdir_list) == 1:
-            d_evt = self.num_diffusion(
-                self.field.gpu_data[0],
-                self._to_recv_buf[self._cutdir_list[0]],
-                self.field_tmp,
-                self.gpu_precision(self.viscosity * simulation.time_step),
-                self._mesh_size,
-                wait_for=wait_evt)
-        if len(self._cutdir_list) == 2:
-            d_evt = self.num_diffusion(
-                self.field.gpu_data[0],
-                self._to_recv_buf[self._cutdir_list[0]],
-                self._to_recv_buf[self._cutdir_list[1]],
-                self.field_tmp,
-                self.gpu_precision(self.viscosity * simulation.time_step),
-                self._mesh_size,
-                wait_for=wait_evt)
-        if len(self._cutdir_list) == 3:
-            d_evt = self.num_diffusion(
-                self.field.gpu_data[0],
-                self._to_recv_buf[self._cutdir_list[0]],
-                self._to_recv_buf[self._cutdir_list[1]],
-                self._to_recv_buf[self._cutdir_list[2]],
-                self.field_tmp,
-                self.gpu_precision(self.viscosity * simulation.time_step),
-                self._mesh_size,
-                wait_for=wait_evt)
-        #c_evt = cl.enqueue_copy(self.cl_env.queue, self.field.gpu_data[0],
-        #                        self.field_tmp, wait_for=[d_evt])
-        c_evt = self.copy.launch_sizes_in_args(
-            self.field.gpu_data[0], self.field_tmp, wait_for=[d_evt])
-        self.field.events.append(c_evt)
-
-    def apply(self, simulation):
-        self._compute(simulation)
-
-    def get_profiling_info(self):
-        for k in [self.num_diffusion, self.copy]:
-            if k is not None:
-                for p in k.profile:
-                    self.profiler += p
diff --git a/hysop/old/gpu.old/gpu_discrete.py b/hysop/old/gpu.old/gpu_discrete.py
deleted file mode 100644
index 51b4e43352c4f3b7fef6a9689358d805480b8ede..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_discrete.py
+++ /dev/null
@@ -1,445 +0,0 @@
-"""Discrete field defined on device (GPU)
-"""
-from hysop import __VERBOSE__
-from hysop.constants import ORDER, np,\
-    debug, HYSOP_REAL, DirectionLabels
-from hysop.fields.discrete_field import DiscreteField
-from hysop.backend.device.opencl import cl, CL_PROFILE
-from hysop.backend.device.opencl.opencl_kernel import OpenClKernelLauncher, KernelListLauncher
-from hysop.tools.profiler import FProfiler
-
-fromLayoutMgrFunc_3D_seq = [
-    lambda a, shape: a.reshape(shape, order=ORDER)[...],
-    lambda a, shape: a.reshape(shape, order=ORDER).swapaxes(0, 1)[...],
-    lambda a, shape: a.reshape(
-        shape, order=ORDER).swapaxes(0, 2).swapaxes(0, 1)[...]
-    ]
-shapeFunc_3D_seq = [
-    lambda shape: (shape[0], shape[1], shape[2]),
-    lambda shape: (shape[1], shape[0], shape[2]),
-    lambda shape: (shape[2], shape[0], shape[1]),
-    ]
-toLayoutMgrFunc_3D_seq = [
-    lambda a: a.ravel(order=ORDER)[...],
-    lambda a: a.swapaxes(0, 1).ravel(order=ORDER)[...],
-    lambda a: a.swapaxes(0, 1).swapaxes(0, 2).ravel(order=ORDER)[...]
-    ]
-fromLayoutMgrFunc_3D = [
-    lambda a, shape: a.reshape(shape, order=ORDER)[...],
-    lambda a, shape: a.reshape(shape, order=ORDER).swapaxes(0, 1)[...],
-    lambda a, shape: a.reshape(shape, order=ORDER).swapaxes(0, 2)[...]
-    ]
-shapeFunc_3D = [
-    lambda shape: (shape[0], shape[1], shape[2]),
-    lambda shape: (shape[1], shape[0], shape[2]),
-    lambda shape: (shape[2], shape[1], shape[0]),
-    ]
-toLayoutMgrFunc_3D = [
-    lambda a: a.ravel(order=ORDER)[...],
-    lambda a: a.swapaxes(0, 1).ravel(order=ORDER)[...],
-    lambda a: a.swapaxes(0, 2).ravel(order=ORDER)[...]
-    ]
-
-fromLayoutMgrFunc_2D = [
-    lambda a, shape: a.reshape(shape, order=ORDER)[...],
-    lambda a, shape: a.reshape(shape, order=ORDER).swapaxes(0, 1)[...]
-    ]
-shapeFunc_2D = [
-    lambda shape: (shape[0], shape[1]),
-    lambda shape: (shape[1], shape[0])
-    ]
-toLayoutMgrFunc_2D = [
-    lambda a: a.ravel(order=ORDER)[...],
-    lambda a: a.swapaxes(0, 1).ravel(order=ORDER)[...]
-    ]
-
-
-fromLayoutMgrFunc_1D = [
-    lambda a, shape: a.reshape(shape, order=ORDER)[...],
-    ]
-shapeFunc_1D = [
-    lambda shape: shape
-    ]
-toLayoutMgrFunc_1D = [
-    lambda a: a.ravel(order=ORDER)[...]
-    ]
-
-
-class OpenClDiscreteField(DiscreteField):
-    """GPU Discrete vector field implementation.
-    Allocates OpenCL device memory for the field.
-    """
-    def __init__(self, cl_env, topology=None, is_vector=False, name="?",
-                 precision=HYSOP_REAL, layout=True, simple_layout=False):
-        """GPU Discrete vector field implementation.
-        Allocates OpenCL device memory for the field.
-
-        Parameters
-        ----------
-
-        queue : OpenCL queue
-        topology : :class:`~hysop.topology.topology.CartesianTopology`, optional
-            mpi topology and local meshes info
-        precision : np type, optional
-            Floating point precision,
-            default=:data:`~hysop.constants.HYSOP_REAL`
-        is_vector: boolean, optional
-            true if parent field is a vector field, default=False
-        name : string, optional
-            Field name
-        layour : boolean, optional
-            indicates if components are arranged in memory, default=True
-            i.e. all components are considered in the same way.
-        simple_layout : boolean, optional
-            indicates if in the Z direction, layout is ZYX (simple) or ZXY.
-        """
-        # init base class
-        super(OpenClDiscreteField, self).__init__(topology, is_vector, name)
-        # OpenCL environment
-        self.cl_env = cl_env
-        # Precision for the field
-        self.precision = precision
-        # Memory used
-        self.mem_size = 0
-        ## Initialization OpenCL kernel as OpenClKernelLauncher
-        self.init_kernel = None
-        self._isReleased = False
-        ## OpenCL Buffer pointer
-        self.gpu_data = [None] * self.nb_components
-        # True if device allocations have been done,
-        # (self.allocate call)
-        self.gpu_allocated = False
-        ## OpenCL Events list modifying this field
-        self.events = []
-
-        # Get the ids of processes involved in the field discretisation.
-        # Default = all, otherwise, get info from input topology if given.
-        if topology is None:
-            from hysop.core.mpi import main_rank
-            self._rank = main_rank
-        else:
-            self._rank = topology.rank
-
-        # Data layout is direction dependant
-        self.layout = layout
-        # Layout for the Z direction
-        self.simple_layout = simple_layout
-        
-        ## Layout and shape managers
-        dim = self.domain.dim
-        if dim == 3:
-            if self.simple_layout:
-                self._shapeFunc         = shapeFunc_3D
-                self._fromLayoutMgrFunc = fromLayoutMgrFunc_3D
-                self._toLayoutMgrFunc   = toLayoutMgrFunc_3D
-            else:
-                self._shapeFunc         = shapeFunc_3D_seq
-                self._fromLayoutMgrFunc = fromLayoutMgrFunc_3D_seq
-                self._toLayoutMgrFunc   = toLayoutMgrFunc_3D_seq
-        elif dim == 2:
-            self._shapeFunc         = shapeFunc_2D
-            self._fromLayoutMgrFunc = fromLayoutMgrFunc_2D
-            self._toLayoutMgrFunc   = toLayoutMgrFunc_2D
-        elif dim == 1:
-            self._shapeFunc         = shapeFunc_1D
-            self._fromLayoutMgrFunc = fromLayoutMgrFunc_1D
-            self._toLayoutMgrFunc   = toLayoutMgrFunc_1D
-        else:
-            msg = 'Shape functions not implemented yet for dimension {}!'.format(dim)
-            raise NotImplementedError(msg)
-
-        self.profiler += FProfiler("Transfer_to_host")
-        self.profiler += FProfiler("Transfer_to_device")
-        
-        # Transfer size counter (to device)
-        self.to_dev_size = 0.
-        # Transfer size counter (to host)
-        self.to_host_size = 0.
-
-        # Temporary cpu buffer to change data layout between cpu ang gpu
-        self.host_data_pinned = [None, ] * self.nb_components
-
-    def allocate(self):
-        """Device memory allocations no batch."""
-        if not self.gpu_allocated:
-            evt = [None, ] * self.nb_components
-            for d in xrange(self.nb_components):
-                # convert data to required precision
-                self.data[d] = np.asarray(self.data[d],
-                                          dtype=self.precision, order=ORDER)
-                # create on-device buffer
-                self.gpu_data[d] = self.cl_env.global_allocation(self.data[d])
-                # update memory counter
-                self.mem_size += self.gpu_data[d].size
-                self.host_data_pinned[d], evt[d] = cl.enqueue_map_buffer(
-                    self.cl_env.queue,
-                    self.gpu_data[d],
-                    offset=0, shape=(int(np.prod(self.data[0].shape)), ),
-                    flags=cl.map_flags.READ | cl.map_flags.WRITE,
-                    dtype=HYSOP_REAL, is_blocking=False, order=ORDER)
-            for d in xrange(self.nb_components):
-                evt[d].wait()
-            self.gpu_allocated = True
-            if __VERBOSE__:
-                print self.name, self.mem_size, "Bytes (",
-                print self.mem_size / (1024 ** 2), "MB)"
-            return True
-        else:
-            return False
-
-    @classmethod
-    def fromField(cls, cl_env, vfield, precision=HYSOP_REAL,
-                  layout=True, simple_layout=False):
-        """
-        Contructor from a discrete vector field.
-        Mutates the given VectorField to a GPUVectorField.
-        @param cls : Class of the class method (GPUVectorField)
-        @param queue : OpenCL queue
-        @param vfield : VectorField
-        @param precision : Floating point precision
-        @param layout : Boolean indicating if components are arranged in memory
-        @param simple_layout : Boolean indicating if in the Z direction,
-        """
-        if not isinstance(vfield, OpenClDiscreteField):
-            vfield.__class__ = cls
-            OpenClDiscreteField.__init__(
-                vfield, cl_env,
-                vfield.topology, vfield.nb_components > 1, vfield.name,
-                precision, layout, simple_layout)
-
-    def setInitializationKernel(self, kernel):
-        """
-        Set the initialization kernel
-        @param kernel : OpenClKernelLauncher to use for initialize field.
-        """
-        self.init_kernel = kernel
-
-    @debug
-    def dump(self, filename):
-        """
-        @remark Synchronized OpenCL calls (waiting for event(s) completion)
-        """
-        self.to_host()
-        self.wait()
-        DiscreteField.dump(self, filename)
-
-    @debug
-    def load(self, filename, fieldname=None):
-        """
-        @remark Synchronized OpenCL calls (waiting for event(s) completion)
-        """
-        DiscreteField.load(self, filename, fieldname)
-        self.to_device()
-
-    @debug
-    def initialize(self, formula=None, vectorize_formula=False, time=0.,
-                   *args):
-        """
-        GPU data initialization.
-        Performs the initialization from different ways if device not already
-        contains up-to-date data:
-          - with an OpenCL kernel,
-          - with a python formula (as VectorField) and the copy data to device.
-        @param formula : Formula to use.
-        @param args : formula extra parameters
-        @remark Synchronized OpenCL calls (waiting for event(s) completion)
-        """
-        t = self.precision(time)
-        if __VERBOSE__:
-            print "{" + str(self._rank) + "}", "Initialize", self.name
-        isGPUKernel = isinstance(formula, OpenClKernelLauncher) \
-            or isinstance(formula, KernelListLauncher)
-        if not isGPUKernel and self.init_kernel is None:
-            DiscreteField.initialize(self, formula, False, time, *args)
-            for d in xrange(self.nb_components):
-                self.data[d] = np.asarray(
-                    self.data[d],
-                    dtype=self.precision, order=ORDER)
-            self.to_device()
-        else:
-            if isGPUKernel:
-                self.init_kernel = formula
-            coord_min = np.ones(4, dtype=self.precision)
-            mesh_size = np.ones(4, dtype=self.precision)
-            coord_min[:self.dimension] = np.asarray(
-                self.topology.mesh.origin,
-                dtype=self.precision)
-            mesh_size[:self.dimension] = np.asarray(
-                self.topology.mesh.space_step,
-                dtype=self.precision)
-            if self.nb_components == 2:
-                evt = self.init_kernel(self.gpu_data[0],
-                                       self.gpu_data[1],
-                                       coord_min, mesh_size, t,
-                                       *args,
-                                       wait_for=self.events)
-            elif self.nb_components == 3:
-                evt = self.init_kernel(self.gpu_data[0],
-                                       self.gpu_data[1],
-                                       self.gpu_data[2],
-                                       coord_min, mesh_size, t,
-                                       *args,
-                                       wait_for=self.events)
-            else:
-                evt = self.init_kernel(self.gpu_data[0],
-                                       coord_min, mesh_size, t,
-                                       *args,
-                                       wait_for=self.events)
-            self.events.append(evt)
-
-    def finalize(self):
-        if not self._isReleased:
-            if __VERBOSE__:
-                print "deallocate :", self.name,
-                print " (" + str(self.mem_size / (1024. ** 2)) + " MBytes)"
-            self.wait()
-            for d in xrange(self.nb_components):
-                self.host_data_pinned[d].base.release(self.cl_env.queue)
-                self.cl_env.global_deallocation(self.gpu_data[d])
-            self._isReleased = True
-
-    def get_profiling_info(self):
-        if self.init_kernel is not None:
-            for p in self.init_kernel.profile:
-                self.profiler += p
-
-    def to_device(self, component=None, layoutDir=None):
-        """
-        Host to device method.
-        @param component : Component to consider (Default : all components)
-        @param layoutDir : layout to use
-        If the field have a layout per component, layoutDir is unused. Other
-        fields can be transfered with a given layout.
-
-        Performs a direct OpenCL copy from numpy arrays
-        to OpenCL Buffers.\n
-        Arrange memory on device so that vector components are
-        contiguous in the direction of the component, if layout flag is True.\n
-        Example : A 3D vector field F(x,y,z) is made up of 3
-        OpenCL Buffers Fx, Fy, Fz. The memory layout is :
-        - Fx : x-major ordering. On device,
-        Fx[i + j*WIDTH + k*WIDTH*HEIGHT] access to Fx(i,j,k)
-        - Fy : y-major ordering. On device,
-        Fy[i + j*WIDTH + k*WIDTH*HEIGHT] access to Fy(j,i,k)
-        - Fz : z-major ordering. On device,
-        Fz[i + j*WIDTH + k*WIDTH*HEIGHT] access to Fz(k,i,j)
-        """
-        if component is None:
-            range_components = xrange(self.nb_components)
-            evt = [None] * self.nb_components
-        else:
-            range_components = [component]
-            evt = [None]
-        self.wait()
-        mem_transfered = 0
-        for d_id, d in enumerate(range_components):
-            if self.layout:
-                layoutDir = d
-            if layoutDir is None:
-                layoutDir = 0
-            if __VERBOSE__:
-                print "{" + str(self._rank) + "}", "host->device :", \
-                    self.name, DirectionLabels[d], layoutDir
-            self.host_data_pinned[d][...] = \
-                self._toLayoutMgrFunc[layoutDir](self.data[d])
-            evt[d_id] = cl.enqueue_copy(
-                self.cl_env.queue, self.gpu_data[d], self.host_data_pinned[d],
-                is_blocking=False)
-            mem_transfered += self.gpu_data[d].size
-        for e in evt:
-            self.events.append(e)
-        time = 0.
-        self.to_dev_size += mem_transfered / (1024. ** 3)
-        if CL_PROFILE:
-            for e in evt:
-                if e is not None:
-                    e.wait()
-                    time += (e.profile.end - e.profile.start) * 1e-9
-                    self.profiler['Transfer_to_device'] += time
-
-            if __VERBOSE__ and time!=0:
-                print self.mem_size/(1024**2), "MBytes transfered at ",
-                print "{0:.3f} GBytes/sec".format(
-                    mem_transfered / (time * 1024 ** 3))
-
-    def to_host(self, component=None, layoutDir=None):
-        """
-        Device to host method.
-        @param component : Component to consider (Default : all components)
-        @param layoutDir : layout to use
-        If the field have a layout per component, layoutDir is unused. Other
-        fields can be transfered with a given layout.
-
-        Performs a direct OpenCL copy from OpenCL Buffers
-        to numpy arrays.\n
-        As memory layout, if set, is arranged on device, not only a
-        copy is performed but also transpositions to have numpy
-        arrays consistent to each other.
-        """
-        self.wait()
-        if component is None:
-            range_components = xrange(self.nb_components)
-            evt = [None] * self.nb_components
-        else:
-            range_components = [component]
-            evt = [None]
-
-        mem_transfered = 0
-        for d_id, d in enumerate(range_components):
-            if self.layout:
-                layoutDir = d
-            if layoutDir is None:
-                layoutDir = 0
-            if __VERBOSE__:
-                print "{" + str(self._rank) + "}", "device->host :", \
-                    self.name, DirectionLabels[d], layoutDir
-            evt[d_id] = cl.enqueue_copy(self.cl_env.queue,
-                                        self.host_data_pinned[d],
-                                        self.gpu_data[d],
-                                        wait_for=self.events,
-                                        is_blocking=False)
-            mem_transfered += self.gpu_data[d].size
-        for d_id, d in enumerate(range_components):
-            shape = self._shapeFunc[layoutDir](self.data[d].shape)
-            evt[d_id].wait()
-            self.data[d][...] = self._fromLayoutMgrFunc[layoutDir](
-                self.host_data_pinned[d], shape)
-        for e in evt:
-            self.events.append(e)
-        time = 0.
-        self.to_host_size += mem_transfered / (1024. ** 3)
-        if CL_PROFILE:
-            for e in evt:
-                if e is not None:
-                    e.wait()
-                    time += (e.profile.end - e.profile.start) * 1e-9
-            self.profiler['Transfer_to_host'] += time
-            if __VERBOSE__ and time!=0:
-                print self.mem_size/(1024**2), "MBytes transfered at ",
-                print "{0:.3f} GBytes/sec".format(
-                    mem_transfered / (time * 1024 ** 3))
-
-    def wait(self):
-        """
-        Waiting for all events completion in the field list.
-        Resets the events list.
-        """
-        if __VERBOSE__:
-            print "{" + str(self._rank) + "}", "Wait events :", self.name
-        for e in self.events:
-            e.wait()
-        self.events = []
-
-    def clean_events(self):
-        """
-        Waiting for all events completion in the field list.
-        Resets the events list.
-        """
-        if __VERBOSE__:
-            print "{" + str(self._rank) + "}", "Clean events :", \
-                self.name, len(self.events)
-        c = cl.command_execution_status.COMPLETE
-        for e in self.events:
-            e.wait()
-        self.events = [e for e in self.events
-                       if e.command_execution_status != c]
diff --git a/hysop/old/gpu.old/gpu_kernel.py b/hysop/old/gpu.old/gpu_kernel.py
deleted file mode 100644
index 7107623728ff2193154637f5bd6c6d8fafed219f..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_kernel.py
+++ /dev/null
@@ -1,176 +0,0 @@
-"""
-"""
-from hysop.constants import debug, DirectionLabels
-from hysop import __VERBOSE__, __DEBUG__
-from hysop.backend.device.opencl import cl, CL_PROFILE
-from hysop.tools.profiler import FProfiler
-
-
-class KernelListLauncher(object):
-    """OpenCL kernel list launcher.
-
-    Manage launching of OpenCL kernels as a list.
-    """
-    @debug
-    def __init__(self, kernel, queue, gsize, lsize=None):
-        """Create a kernel list launcher.
-
-        Parameters
-        ----------
-
-        kernel : list
-        queue : OpenCL command queue
-        gsize : int
-            OpenCL global size index.
-        lsize : int, optional
-            OpenCL local size index.
-        """
-        ## OpenCL Kernel list
-        self.kernel = kernel
-        #print [k.function_name for k in self.kernel]
-        ## OpenCL command queue
-        self.queue = queue
-        ## OpenCL global size index.
-        self.global_size = gsize
-        ## OpenCL local size index.
-        self.local_size = lsize
-        if CL_PROFILE:
-            if len(self.kernel) == 1:
-                try:
-                    self.profile = [FProfiler("OpenCL_" + k.function_name)
-                                    for k in self.kernel]
-                except AttributeError:
-                    self.profile = [FProfiler("OpenCL_" + k.__name__)
-                                    for k in self.kernel]
-            else:
-                self.profile = [
-                    FProfiler("OpenCL_" + k.function_name + DirectionLabels[d])
-                    for d, k in enumerate(self.kernel)]
-        else:
-            self.profile = []
-
-    @debug
-    def __call__(self, d, *args, **kwargs):
-        """
-        Launch a kernel.
-
-        OpenCL global size and local sizes are not given in
-        args. Class member are used.
-
-        @param d : kernel index in kernel list.
-        @param args : kernel arguments.
-        @return OpenCL Event
-        """
-        return KernelListLauncher.launch_sizes_in_args(
-            self, d, self.global_size[d], self.local_size[d], *args, **kwargs)
-
-    def _kernel_name(self,d):
-        try:
-            kname = self.kernel[d].function_name
-        except AttributeError:
-            kname = self.kernel[d].__name__
-        return kname
-
-
-    @debug
-    def launch_sizes_in_args(self, d, *args, **kwargs):
-        """
-        Launch a kernel.
-
-        Opencl global and local sizes are given in args.
-
-        @param d : kernel index in kernel list.
-        @param args : kernel arguments.
-        @return OpenCL Event.
-        """
-        if __VERBOSE__:
-            try:
-                print '  ',self.kernel[d].function_name, d, args[0], args[1]
-                # print "\targs: ",args[2:]
-                # print "\tkwargs: ", kwargs
-            except AttributeError:
-                print '  ', self.kernel[d].__name__
-        evt = self.kernel[d](self.queue, *args, **kwargs)
-        if CL_PROFILE:
-            evt.wait()
-            self.profile[d] += (evt.profile.end - evt.profile.start) * 1e-9
-        if __VERBOSE__:
-            try:
-                evt.wait()
-                status = evt.command_execution_status 
-                if status < 0:
-                    msg='\n/!\ Execution error {} while waiting kernel {} to finish /!\\\n'.format(status,self._kernel_name(d))
-                    raise RuntimeError(msg)
-            except Exception, e:
-                msg='\n/!\ Unknown execution error while waiting kernel {} to finish /!\\\n'.format(self._kernel_name(d))
-                print msg
-                raise e
-
-
-        return evt
-
-    def function_name(self, d=None):
-        """Prints OpenCL Kernels function names informations"""
-        if d is not None:
-            kernel = self.kernel[d]
-            try:
-                name = kernel.function_name
-            except:
-                name = kernel.__name__
-            return name
-        else:
-            return [self.function_name(d)
-                    for d in xrange(len(self.kernel))]
-
-
-class OpenClKernelLauncher(KernelListLauncher):
-    """
-    OpenCL kernel launcher.
-
-    Manage launching of one OpenCL kernel as a KernelListLauncher
-    with a list of one kernel.
-    """
-    @debug
-    def __init__(self, kernel, queue, gsize=None, lsize=None):
-        """
-        Create a OpenClKernelLauncher.
-
-        Create a KernelListLauncher with a list of one kernel.
-
-        @param kernel : kernel.
-        @param queue : OpenCL command queue.
-        @param gsize : OpenCL global size index.
-        @param lsize : OpenCL local size index.
-        """
-        KernelListLauncher.__init__(self, [kernel], queue, [gsize], [lsize])
-
-    @debug
-    def launch_sizes_in_args(self, *args, **kwargs):
-        """
-        Launch the kernel.
-
-        Opencl global and local sizes are given in args.
-
-        @param args : kernel arguments.
-        @return OpenCL Event.
-        """
-        return KernelListLauncher.launch_sizes_in_args(
-            self, 0, *args, **kwargs)
-
-    @debug
-    def __call__(self, *args, **kwargs):
-        """
-        Launch the kernel.
-
-        OpenCL global size and local sizes are not given in args.
-        Class member are used.
-
-        @param args : kernel arguments.
-        @return OpenCL Event
-        """
-        return KernelListLauncher.__call__(self, 0, *args, **kwargs)
-
-    def function_name(self):
-        """Prints OpenCL Kernel function name informations"""
-        res = KernelListLauncher.function_name(self, 0)
-        return res
diff --git a/hysop/old/gpu.old/gpu_multiphase_baroclinic_rhs.py b/hysop/old/gpu.old/gpu_multiphase_baroclinic_rhs.py
deleted file mode 100644
index 6e5dbc0e03565f96b346e5259e4c37af6d1f4b2c..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_multiphase_baroclinic_rhs.py
+++ /dev/null
@@ -1,422 +0,0 @@
-"""
-@file multiphase_baroclinic_rhs.py
-
-Multiscale baroclinic term on GPU
-"""
-from hysop.constants import debug, np, DirectionLabels, hysop.core.mpi_REAL, ORDERMPI, \
-    HYSOP_REAL, ORDER
-from hysop.tools.numpywrappers import npw
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.operator.discrete.discrete import get_extra_args_from_method
-from hysop.backend.device.opencl import cl
-from hysop.backend.device.opencl.gpu_operator import GPUOperator
-from hysop.backend.device.opencl.opencl_kernel import KernelListLauncher
-from hysop.backend.device.opencl.opencl_discrete import OpenClDiscreteField
-from hysop.tools.profiler import FProfiler
-from hysop.core.mpi import Wtime
-from hysop.methods import SpaceDiscretization
-from hysop.numerics.finite_differences import FDC4
-
-
-class BaroclinicRHS(DiscreteOperator, GPUOperator):
-    """
-    This operator computes the right hand side of the baroclinic term:
-    \f{eqnarray*}
-    \frac{\partial \omega}{\partial t} &=& -\frac{\nabla \rho}{\rho} \times \left(-\frac{\nabla P}{\rho}\right)
-    \f}
-    This operator works only in a multiscale context and from an input variable containing the
-    \f{eqnarray*}
-    \left(-\frac{\nabla P}{\rho}\right)
-    \f} term. This term needs to be interpolated at fine scale. The implementation
-    of such an interpolation is shared memory consuming since each component is to be interpolated.
-    We introduce a splitting of the whole term from the different compontents of the pressure gradient term:
-    \f{eqnarray*}
-    -A\times B =
-  \begin{pmatrix}
-    0 \\
-    -A_Z B_X\\
-    A_Y B_X
-  \end{pmatrix}+
-  \begin{pmatrix}
-     A_Z B_Y \\
-    0 \\
-    -A_X B_Y
-  \end{pmatrix}+
-  \begin{pmatrix}
-    -A_Y B_Z  \\
-     A_X B_Z \\
-    0
-  \end{pmatrix}
-    \f}
-    Finally, the density field may came from a levelset function.
-    By default we assume that the density itself is given in input.
-    In some cases, user can give a custom function to compute the
-    density from the levelset points by points.
-    """
-    @debug
-    def __init__(self, rhs, rho, gradp, **kwds):
-        super(BaroclinicRHS, self).__init__(
-            variables=[rhs, rho, gradp], **kwds)
-        self.rhs = rhs
-        self.rho = rho
-        self.gradp = gradp
-        self.input = [self.rho, self.gradp]
-        self.output = [self.rhs, ]
-        self.direction = 0
-        self._cl_work_size = 0
-
-        GPUOperator.__init__(
-            self,
-            platform_id=get_extra_args_from_method(self, 'platform_id', None),
-            device_id=get_extra_args_from_method(self, 'device_id', None),
-            device_type=get_extra_args_from_method(self, 'device_type', None),
-            **kwds)
-
-        # GPU allocation.
-        for field in self.variables:
-            alloc = not isinstance(field, OpenClDiscreteField)
-            OpenClDiscreteField.fromField(self.cl_env, field,
-                                       self.gpu_precision, layout=False)
-            if not field.gpu_allocated:
-                field.allocate()
-            if alloc:
-                self.size_global_alloc += field.mem_size
-
-        topo_coarse = self.gradp.topology
-        topo_fine = self.rho.topology
-        self._cutdir_list = np.where(topo_coarse.cutdir)[0].tolist()
-        self._comm = topo_coarse.comm
-        self._comm_size = self._comm.Get_size()
-        self._comm_rank = self._comm.Get_rank()
-        if self._comm_size > 1:
-            self._to_send = [None] * self.dim
-            self._to_recv_buf = [None] * self.dim
-            self._to_recv = [None] * self.dim
-            self._pitches_host = [None] * self.dim
-            self._pitches_buff = [None] * self.dim
-            self._region_size = [None] * self.dim
-            self._r_orig = [None] * self.dim
-            self._br_orig = [None] * self.dim
-            self.mpi_type_diff_l = {}
-            self.mpi_type_diff_r = {}
-            self.profiler += FProfiler('comm_baroclinic_rhs')
-            if 0 in self._cutdir_list:
-                raise ValueError("Not yet implemented with comm in X dir")
-            for d in self._cutdir_list:
-                if self.method[SpaceDiscretization] == FDC4:
-                    gh = 2
-                else:
-                    gh = 1
-                shape = list(self.rho.data[0].shape)
-                shape_b = list(self.rho.data[0].shape)
-                start_l = [0, ] * 3
-                start_r = [0, ] * 3
-                start_r[d] = gh
-                shape[d] = 2 * gh
-                shape_b[d] = gh
-                # _to_send[..., 0] contains [..., 0] data
-                # _to_send[..., 1] contains [..., Nz-1] data
-                # _to_recv[..., 0] contains [..., Nz] data (right ghosts)
-                # _to_recv[..., 1] contains [..., -1] data (left ghosts)
-                self._to_send[d] = npw.zeros(tuple(shape))
-                _to_recv = npw.zeros(tuple(shape))
-                self.mpi_type_diff_l[d] = hysop.core.mpi_REAL.Create_subarray(
-                    shape, shape_b, start_l, order=ORDERMPI)
-                self.mpi_type_diff_l[d].Commit()
-                self.mpi_type_diff_r[d] = hysop.core.mpi_REAL.Create_subarray(
-                    shape, shape_b, start_r, order=ORDERMPI)
-                self.mpi_type_diff_r[d].Commit()
-                self._to_recv_buf[d] = self.cl_env.global_allocation(_to_recv)
-                self._to_recv[d], evt = cl.enqueue_map_buffer(
-                    self.cl_env.queue,
-                    self._to_recv_buf[d],
-                    offset=0,
-                    shape=shape,
-                    dtype=HYSOP_REAL,
-                    flags=cl.map_flags.READ | cl.map_flags.WRITE,
-                    is_blocking=False,
-                    order=ORDER)
-                evt.wait()
-                self._pitches_host[d] = (int(self._to_send[d][:, 0, 0].nbytes),
-                                         int(self._to_send[d][:, :, 0].nbytes))
-                self._pitches_buff[d] = (int(self.rho.data[0][:, 0, 0].nbytes),
-                                         int(self.rho.data[0][:, :, 0].nbytes))
-                cl.enqueue_copy(
-                    self.cl_env.queue,
-                    self._to_recv_buf[d],
-                    self._to_recv[d],
-                    buffer_origin=(0, 0, 0),
-                    host_origin=(0, 0, 0),
-                    region=(self._to_recv[d][0, 0, 0].nbytes, )).wait()
-                self._cl_work_size += self._to_recv[d].nbytes
-
-                r_orig = [0, ] * self.dim
-                br_orig = [0, ] * self.dim
-                r_orig[d] = self.rho.data[0].shape[d] - gh
-                br_orig[d] = gh
-                if d == 0:
-                    r_orig[d] *= self._to_send[d][0, 0, 0].nbytes
-                    br_orig[d] *= self._to_send[d][0, 0, 0].nbytes
-                self._r_orig[d] = tuple(r_orig)
-                self._br_orig[d] = tuple(br_orig)
-                l_sl = [slice(None), ] * 3
-                r_sl = [slice(None), ] * 3
-                l_sl[d] = slice(0, gh)
-                r_sl[d] = slice(gh, 2 * gh)
-                l_sl = tuple(l_sl)
-                r_sl = tuple(r_sl)
-                self._region_size[d] = list(self.rho.data[0].shape)
-                if d == 0:
-                    self._region_size[d][0] = self._to_send[d][0, 0, 0].nbytes
-                else:
-                    self._region_size[d][0] = self._to_send[d][:, 0, 0].nbytes
-                    self._region_size[d][d] = gh
-            self._compute = self._compute_baroclinic_rhs_comm
-            if len(self._cutdir_list) == 1:
-                self._call_kernel = self._call_kernel_one_ghost
-            if len(self._cutdir_list) == 2:
-                self._call_kernel = self._call_kernel_two_ghost
-        else:
-            self._compute = self._compute_baroclinic_rhs
-
-        self._coarse_mesh_size = npw.ones(4, dtype=self.gpu_precision)
-        self._fine_mesh_size = npw.ones(4, dtype=self.gpu_precision)
-        self._coarse_mesh_size[:self.dim] = \
-            self._reorderVect(topo_coarse.mesh.space_step)
-        self._fine_mesh_size[:self.dim] = \
-            self._reorderVect(topo_fine.mesh.space_step)
-
-        shape_coarse = topo_coarse.mesh.local_resolution
-        resol_coarse = shape_coarse.copy()
-        self.resol_coarse = npw.dim_ones((self.dim,))
-        self.resol_coarse[:self.dim] = self._reorderVect(shape_coarse)
-        self._append_size_constants(resol_coarse, prefix='NB_C')
-        compute_coarse = shape_coarse - 2 * topo_coarse.ghosts()
-        shape_fine = topo_fine.mesh.local_resolution
-        resol_fine = shape_fine.copy()
-        self.resol_fine = npw.dim_ones((self.dim,))
-        self.resol_fine[:self.dim] = self._reorderVect(shape_fine)
-        self._append_size_constants(resol_fine, prefix='NB_F')
-        compute_fine = shape_fine
-        pts_per_cell = compute_fine / compute_coarse
-        assert np.all(pts_per_cell == pts_per_cell[0]), \
-            "Resolutions ratio must be the same in all directions"
-        pts_per_cell = pts_per_cell[0]
-        if pts_per_cell == 1:
-            raise ValueError('Not yet implemented for single scale')
-
-        src, tile_size_c, vec, f_space = \
-            self._kernel_cfg['multiphase_baroclinic']
-        tile_size_f = tile_size_c * pts_per_cell
-
-        self._append_size_constants(topo_coarse.ghosts(), prefix='GHOSTS_C')
-        build_options = self._size_constants
-        build_options += " -D C_TILE_SIZE=" + str(tile_size_c)
-        build_options += " -D C_TILE_WIDTH=" + str(
-            tile_size_c + 2 * topo_coarse.ghosts()[0])
-        build_options += " -D C_TILE_HEIGHT=" + str(
-            tile_size_c + 2 * topo_coarse.ghosts()[1])
-        build_options += " -D F_TILE_SIZE=" + str(tile_size_f)
-        build_options += " -D N_PER_CELL=" + str(pts_per_cell)
-        for d in xrange(self.dim):
-            build_options += " -D CUT_DIR" + DirectionLabels[d] + "="
-            build_options += str(1 if topo_coarse.shape[d] > 1 else 0)
-        build_options += " -D FD_ORDER=" + \
-            str(self.method[SpaceDiscretization].__name__)
-        build_options += " -D GRADP_COMP=__GRADP_COMPONENT__"
-        macros = {'__USER_DENSITY_FUNCTION_FROM_GIVEN_INPUT__':
-                  get_extra_args_from_method(self, 'density_func', 'x')}
-
-        gwi, lwi = f_space(compute_coarse, tile_size_c)
-        clkernel_baroclinic = [None, ] * self.dim
-        for i in xrange(self.dim):
-            prg = self.cl_env.build_src(
-                src, build_options.replace('__GRADP_COMPONENT__', str(i)),
-                vec, macros=macros)
-            clkernel_baroclinic[i] = prg.baroclinic_rhs
-        self.num_baroclinic = KernelListLauncher(
-            clkernel_baroclinic, self.cl_env.queue,
-            [gwi, ] * self.dim, [lwi, ] * self.dim)
-
-    def _compute_baroclinic_rhs(self, simulation):
-        """Launch kernels without communication"""
-        wait_evt = self.rhs.events + self.gradp.events + self.rho.events
-        evt_x = self.num_baroclinic(
-            0,
-            self.rhs.gpu_data[0],
-            self.rhs.gpu_data[1],
-            self.rhs.gpu_data[2],
-            self.rho.gpu_data[0],
-            self.gradp.gpu_data[0],
-            self._coarse_mesh_size,
-            self._fine_mesh_size,
-            wait_for=wait_evt)
-        evt_y = self.num_baroclinic(
-            1,
-            self.rhs.gpu_data[0],
-            self.rhs.gpu_data[1],
-            self.rhs.gpu_data[2],
-            self.rho.gpu_data[0],
-            self.gradp.gpu_data[1],
-            self._coarse_mesh_size,
-            self._fine_mesh_size,
-            wait_for=wait_evt + [evt_x])
-        evt_z = self.num_baroclinic(
-            2,
-            self.rhs.gpu_data[0],
-            self.rhs.gpu_data[1],
-            self.rhs.gpu_data[2],
-            self.rho.gpu_data[0],
-            self.gradp.gpu_data[2],
-            self._coarse_mesh_size,
-            self._fine_mesh_size,
-            wait_for=wait_evt + [evt_y])
-        self.rhs.events.append(evt_z)
-
-    def _compute_baroclinic_rhs_comm(self, simulation):
-        """Compute operator with communications"""
-        tc = Wtime()
-        topo = self.rho.topology
-        first_cut_dir = topo.cutdir.tolist().index(True)
-        wait_evt = []
-        send_l = [None, ] * self.dim
-        send_r = [None, ] * self.dim
-        recv_l = [None, ] * self.dim
-        recv_r = [None, ] * self.dim
-        e_l = [None, ] * self.dim
-        e_r = [None, ] * self.dim
-        for d in self._cutdir_list:
-            wait_events = self.rho.events
-            e_l[d] = cl.enqueue_copy(self.cl_env.queue, self._to_send[d],
-                                     self.rho.gpu_data[0],
-                                     host_origin=(0, 0, 0),
-                                     buffer_origin=(0, 0, 0),
-                                     host_pitches=self._pitches_host[d],
-                                     buffer_pitches=self._pitches_buff[d],
-                                     region=tuple(self._region_size[d]),
-                                     wait_for=wait_events,
-                                     is_blocking=False)
-            e_r[d] = cl.enqueue_copy(self.cl_env.queue, self._to_send[d],
-                                     self.rho.gpu_data[0],
-                                     host_origin=self._br_orig[d],
-                                     buffer_origin=self._r_orig[d],
-                                     host_pitches=self._pitches_host[d],
-                                     buffer_pitches=self._pitches_buff[d],
-                                     region=tuple(self._region_size[d]),
-                                     wait_for=wait_events,
-                                     is_blocking=False)
-
-        for d in self._cutdir_list:
-            # MPI send
-            R_rk = topo.neighbours[1, d - first_cut_dir]
-            L_rk = topo.neighbours[0, d - first_cut_dir]
-            recv_r[d] = self._comm.Irecv(
-                [self._to_recv[d], 1, self.mpi_type_diff_l[d]],
-                source=R_rk, tag=123 + R_rk + 19 * d)
-            recv_l[d] = self._comm.Irecv(
-                [self._to_recv[d], 1, self.mpi_type_diff_r[d]],
-                source=L_rk, tag=456 + L_rk + 17 * d)
-        for d in self._cutdir_list:
-            R_rk = topo.neighbours[1, d - first_cut_dir]
-            L_rk = topo.neighbours[0, d - first_cut_dir]
-            e_l[d].wait()
-            e_r[d].wait()
-            send_l[d] = self._comm.Issend(
-                [self._to_send[d], 1, self.mpi_type_diff_l[d]],
-                dest=L_rk, tag=123 + self._comm_rank + 19 * d)
-            send_r[d] = self._comm.Issend(
-                [self._to_send[d], 1, self.mpi_type_diff_r[d]],
-                dest=R_rk, tag=456 + self._comm_rank + 17 * d)
-
-        for d in self._cutdir_list:
-            # _to_recv[..., 0] contains [..., Nz] data (right ghosts)
-            # _to_recv[..., 1] contains [..., -1] data (left ghosts)
-            send_r[d].Wait()
-            send_l[d].Wait()
-            recv_r[d].Wait()
-            recv_l[d].Wait()
-            wait_evt.append(cl.enqueue_copy(self.cl_env.queue,
-                                            self._to_recv_buf[d],
-                                            self._to_recv[d],
-                                            is_blocking=False))
-        self.profiler['comm_baroclinic_rhs_comm'] += Wtime() - tc
-        self.rhs.events.append(self._call_kernel(wait_evt))
-
-    def _call_kernel_one_ghost(self, wait_evt):
-        """Launch kernels with one directions of communication (Z)"""
-        evt_x = self.num_baroclinic(
-            0,
-            self.rhs.gpu_data[0],
-            self.rhs.gpu_data[1],
-            self.rhs.gpu_data[2],
-            self.rho.gpu_data[0],
-            self._to_recv_buf[self._cutdir_list[0]],
-            self.gradp.gpu_data[0],
-            self._coarse_mesh_size,
-            self._fine_mesh_size,
-            wait_for=wait_evt)
-        evt_y = self.num_baroclinic(
-            1,
-            self.rhs.gpu_data[0],
-            self.rhs.gpu_data[1],
-            self.rhs.gpu_data[2],
-            self.rho.gpu_data[0],
-            self._to_recv_buf[self._cutdir_list[0]],
-            self.gradp.gpu_data[1],
-            self._coarse_mesh_size,
-            self._fine_mesh_size,
-            wait_for=wait_evt + [evt_x])
-        return self.num_baroclinic(
-            2,
-            self.rhs.gpu_data[0],
-            self.rhs.gpu_data[1],
-            self.rhs.gpu_data[2],
-            self.rho.gpu_data[0],
-            self._to_recv_buf[self._cutdir_list[0]],
-            self.gradp.gpu_data[2],
-            self._coarse_mesh_size,
-            self._fine_mesh_size,
-            wait_for=wait_evt + [evt_y])
-
-    def _call_kernel_two_ghost(self, wait_evt):
-        """Launch kernels with two directions of communication (Y and Z)"""
-        evt_x = self.num_baroclinic(
-            0,
-            self.rhs.gpu_data[0],
-            self.rhs.gpu_data[1],
-            self.rhs.gpu_data[2],
-            self.rho.gpu_data[0],
-            self._to_recv_buf[self._cutdir_list[0]],
-            self._to_recv_buf[self._cutdir_list[1]],
-            self.gradp.gpu_data[0],
-            self._coarse_mesh_size,
-            self._fine_mesh_size,
-            wait_for=wait_evt)
-        evt_y = self.num_baroclinic(
-            1,
-            self.rhs.gpu_data[0],
-            self.rhs.gpu_data[1],
-            self.rhs.gpu_data[2],
-            self.rho.gpu_data[0],
-            self._to_recv_buf[self._cutdir_list[0]],
-            self._to_recv_buf[self._cutdir_list[1]],
-            self.gradp.gpu_data[1],
-            self._coarse_mesh_size,
-            self._fine_mesh_size,
-            wait_for=wait_evt + [evt_x])
-        return self.num_baroclinic(
-            2,
-            self.rhs.gpu_data[0],
-            self.rhs.gpu_data[1],
-            self.rhs.gpu_data[2],
-            self.rho.gpu_data[0],
-            self._to_recv_buf[self._cutdir_list[0]],
-            self._to_recv_buf[self._cutdir_list[1]],
-            self.gradp.gpu_data[2],
-            self._coarse_mesh_size,
-            self._fine_mesh_size,
-            wait_for=wait_evt + [evt_y])
-
-
-    def apply(self, simulation):
-        self._compute(simulation)
diff --git a/hysop/old/gpu.old/gpu_multiresolution_filter.py b/hysop/old/gpu.old/gpu_multiresolution_filter.py
deleted file mode 100644
index 6fdd83eb799f8dbbb9c3afa6fe2ef20d49469955..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_multiresolution_filter.py
+++ /dev/null
@@ -1,324 +0,0 @@
-"""
-@file gpu/multiresolution_filter.py
-Filter values from a fine grid to a coarse grid.
-GPU version.
-"""
-from hysop.constants import debug, np
-from hysop.tools.numpywrappers import npw
-from hysop.operator.discrete.multiresolution_filter import FilterFineToCoarse
-from hysop.backend.device.opencl.gpu_operator import GPUOperator
-from hysop.operator.discrete.discrete import get_extra_args_from_method
-from hysop.backend.device.opencl.opencl_discrete import OpenClDiscreteField
-from hysop.backend.device.opencl.opencl_kernel import OpenClKernelLauncher
-from hysop.methods import Remesh
-
-
-class GPUFilterFineToCoarse(FilterFineToCoarse, GPUOperator):
-    """
-    Discretized operator for filtering from fine to coarse grid on GPU.
-    """
-    @debug
-    def __init__(self, field_in, field_out, **kwds):
-        super(GPUFilterFineToCoarse, self).__init__(
-            field_in, field_out, **kwds)
-        assert len(self.field_in) == 1 and len(self.field_out) == 1, \
-            "This operator is implemented only for single field"
-        self.direction = 0
-        self._cl_work_size = 0
-        gh_in = self.field_in[0].topology.ghosts()
-        resol_in = self._mesh_in.resolution - 2 * gh_in
-        resol_out = self._mesh_out.resolution - 2 * self.gh_out
-        pts_per_cell = resol_in / resol_out
-
-        GPUOperator.__init__(
-            self,
-            platform_id=get_extra_args_from_method(self, 'platform_id', None),
-            device_id=get_extra_args_from_method(self, 'device_id', None),
-            device_type=get_extra_args_from_method(self, 'device_type', None),
-            **kwds)
-
-        #GPU allocations
-        alloc = not isinstance(self.field_in[0], OpenClDiscreteField)
-        OpenClDiscreteField.fromField(self.cl_env, self.field_in[0],
-                                   self.gpu_precision, layout=False)
-        if not self.field_in[0].gpu_allocated:
-            self.field_in[0].allocate()
-        if alloc:
-            self.size_global_alloc += self.field_in[0].mem_size
-
-        alloc = not isinstance(self.field_out[0], OpenClDiscreteField)
-        OpenClDiscreteField.fromField(self.cl_env, self.field_out[0],
-                                   self.gpu_precision, layout=False)
-        if not self.field_out[0].gpu_allocated:
-            self.field_out[0].allocate()
-        if alloc:
-            self.size_global_alloc += self.field_out[0].mem_size
-
-        topo_in = self.field_in[0].topology
-        topo_out = self.field_out[0].topology
-
-        self._mesh_size_in = npw.ones(4, dtype=self.gpu_precision)
-        self._mesh_size_in[:self.dim] = \
-            self._reorderVect(topo_in.mesh.space_step)
-        self._mesh_size_out = npw.ones(4, dtype=self.gpu_precision)
-        self._mesh_size_out[:self.dim] = \
-            self._reorderVect(topo_out.mesh.space_step)
-        self._domain_origin = npw.ones(4, dtype=self.gpu_precision)
-        self._domain_origin[:self.dim] = self.field_in[0].domain.origin
-        shape_in = topo_in.mesh.local_resolution
-        shape_out = topo_out.mesh.local_resolution
-        resol_in = shape_in.copy()
-        resol_out = shape_out.copy()
-        self.resol_in = npw.dim_ones((self.dim,))
-        self.resol_in[:self.dim] = self._reorderVect(shape_in)
-        self.resol_out = npw.dim_ones((self.dim,))
-        self.resol_out[:self.dim] = self._reorderVect(shape_out)
-        self._append_size_constants(resol_in, prefix='NB_IN')
-        self._append_size_constants(resol_out, prefix='NB_OUT')
-        self._append_size_constants(self.gh_out, prefix='GHOSTS_OUT')
-        self._append_size_constants(pts_per_cell, prefix='PTS_PER_CELL')
-
-        # multi-gpu ghosts buffers for communication
-        if self._comm_size == 1:
-            self._exchange_ghosts = self._exchange_ghosts_local
-        else:
-            self._exchange_ghosts = self._exchange_ghosts_mpi
-            self._gh_from_l = [None] * self.dim
-            self._gh_from_r = [None] * self.dim
-            self._gh_to_l = [None] * self.dim
-            self._gh_to_r = [None] * self.dim
-            # self._mpi_to_l = [None] * self.dim
-            # self._mpi_to_r = [None] * self.dim
-            for d in self._cutdir_list:
-                shape = list(self.field_out[0].data[0].shape)
-                shape[d] = self.gh_out[d]
-                self._gh_from_l[d] = npw.zeros(tuple(shape))
-                self._gh_from_r[d] = npw.zeros(tuple(shape))
-                self._gh_to_l[d] = npw.zeros(tuple(shape))
-                self._gh_to_r[d] = npw.zeros(tuple(shape))
-
-        # # Ghosts temp arrays for the second version of ghosts exchange
-        # self.gh_x = npw.zeros((4 * self.gh_out[0], shape_out[1], shape_out[2]))
-        # self.gh_y = npw.zeros((shape_out[0], 4 * self.gh_out[1], shape_out[2]))
-        # self.gh_z = npw.zeros((shape_out[0], shape_out[1], 4 * self.gh_out[2]))
-        # print self.gh_x.shape, self.gh_y.shape, self.gh_z.shape
-        # self._pitches_host_x = (int(self.gh_x[:, 0, 0].nbytes),
-        #                         int(self.gh_x[:, :, 0].nbytes))
-        # self._pitches_host_y = (int(self.gh_y[:, 0, 0].nbytes),
-        #                         int(self.gh_y[:, :, 0].nbytes))
-        # self._pitches_host_z = (int(self.gh_z[:, 0, 0].nbytes),
-        #                         int(self.gh_z[:, :, 0].nbytes))
-        # self._pitches_buff = (int(self.field_out[0].data[0][:, 0, 0].nbytes),
-        #                       int(self.field_out[0].data[0][:, :, 0].nbytes))
-
-        src, vec, f_space = \
-            self._kernel_cfg['fine_to_coarse_filter']
-        build_options = self._size_constants
-        self._rmsh = self.method[Remesh]()
-        gwi, lwi = f_space(self.field_out[0].data[0].shape -
-                           2 * topo_out.ghosts(), len(self._rmsh.weights))
-        build_options += " -D L_STENCIL=" + str(len(self._rmsh.weights))
-        build_options += " -D SHIFT_STENCIL=" + str(self._rmsh.shift)
-        build_options += " -D WG=" + str(lwi[0])
-        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
-        prg = self.cl_env.build_src(src, build_options, vec)
-        self.fine_to_coarse = OpenClKernelLauncher(
-            prg.coarse_to_fine_filter, self.cl_env.queue, gwi, lwi)
-        self.initialize = OpenClKernelLauncher(
-            prg.initialize_output, self.cl_env.queue,
-            self.field_out[0].data[0].shape, None)
-        self._evts = [None, ] * self.field_in[0].dimension
-
-    def apply(self, simulation=None):
-        #evts = []
-        self.field_in[0].toHost()
-        self.field_in[0].wait()
-        for d in xrange(self.field_in[0].nb_components):
-            self._evts[d] = []
-            self._evts[d].append(
-                self.initialize(self.field_out[0].gpu_data[d],
-                                wait_for=self.field_out[0].events))
-        for iy in xrange(len(self._rmsh.weights)):
-            for iz in xrange(len(self._rmsh.weights)):
-                for d in xrange(self.field_in[0].nb_components):
-                    evt = self.fine_to_coarse(self.field_in[0].gpu_data[d],
-                                              self.field_out[0].gpu_data[d],
-                                          self.scale_factor,
-                                          self._mesh_size_in,
-                                          self._mesh_size_out,
-                                          self._domain_origin,
-                                          np.int32(iy), np.int32(iz),
-                                              wait_for=self._evts[d])
-                self._evts[d].append(evt)
-        # Ghosts values must be exchanged either on process or through mpi
-        # communications. Values must be moved to host.
-        # We developp 2 versions:
-        #  - copy of the entire field data
-        #  - rect-copy of only needed data
-        # The first one is running much faster than the second because of
-        # the use of the mapping of device buffer in host pinned memory.
-        # The second version is kept in comments (for sequential case)
-        self.field_out[0].toHost()
-        self.field_out[0].wait()
-        self._exchange_ghosts()
-        self.field_out[0].toDevice()
-
-        # # Get ghosts values and in-domain layer
-        # # X-direction
-        # s_gh = self.gh_out[0]
-        # get_gh_xl = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.gh_x, self.field_out[0].gpu_data[0],
-        #     host_origin=(0, 0, 0),
-        #     buffer_origin=(0, 0, 0),
-        #     host_pitches=self._pitches_host_x,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_x[:2 * s_gh, 0, 0].nbytes,
-        #             self.gh_x.shape[1],
-        #             self.gh_x.shape[2]),
-        #     wait_for=evts)
-        # get_gh_xr = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.gh_x, self.field_out[0].gpu_data[0],
-        #     host_origin=(self.gh_x[:2 * s_gh, 0, 0].nbytes, 0, 0),
-        #     buffer_origin=(self.field_out[0].data[0][:, 0, 0].nbytes -
-        #                    self.gh_x[:2 * s_gh, 0, 0].nbytes, 0, 0),
-        #     host_pitches=self._pitches_host_x,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_x[:2 * s_gh, 0, 0].nbytes,
-        #             self.gh_x.shape[1],
-        #             self.gh_x.shape[2]),
-        #     wait_for=evts)
-        # get_gh_xl.wait()
-        # get_gh_xr.wait()
-        # # Add ghosts contributions in domain layer
-        # self.gh_x[2 * s_gh:3 * s_gh, :, :] += \
-        #     self.gh_x[0 * s_gh:1 * s_gh, :, :]
-        # self.gh_x[1 * s_gh:2 * s_gh, :, :] += \
-        #     self.gh_x[3 * s_gh:4 * s_gh, :, :]
-        # set_gh_xl = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.field_out[0].gpu_data[0], self.gh_x,
-        #     host_origin=(self.gh_x[:1 * s_gh, 0, 0].nbytes, 0, 0),
-        #     buffer_origin=(self.gh_x[:1 * s_gh, 0, 0].nbytes, 0, 0),
-        #     host_pitches=self._pitches_host_x,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_x[:1 * s_gh, 0, 0].nbytes,
-        #             self.gh_x.shape[1],
-        #             self.gh_x.shape[2]),
-        #     wait_for=evts)
-        # set_gh_xr = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.field_out[0].gpu_data[0], self.gh_x,
-        #     host_origin=(self.gh_x[:2 * s_gh, 0, 0].nbytes, 0, 0),
-        #     buffer_origin=(self.field_out[0].data[0][:, 0, 0].nbytes -
-        #                    self.gh_x[:2 * s_gh, 0, 0].nbytes, 0, 0),
-        #     host_pitches=self._pitches_host_x,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_x[:1 * s_gh, 0, 0].nbytes,
-        #             self.gh_x.shape[1],
-        #             self.gh_x.shape[2]),
-        #     wait_for=evts)
-        # set_gh_xl.wait()
-        # set_gh_xr.wait()
-
-        # # Y-direction
-        # s_gh = self.gh_out[1]
-        # get_gh_yl = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.gh_y, self.field_out[0].gpu_data[0],
-        #     host_origin=(0, 0, 0),
-        #     buffer_origin=(0, 0, 0),
-        #     host_pitches=self._pitches_host_y,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_y[:, 0, 0].nbytes, 2 * s_gh, self.gh_y.shape[2]),
-        #     wait_for=evts)
-        # get_gh_yr = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.gh_y, self.field_out[0].gpu_data[0],
-        #     host_origin=(0, 2 * s_gh, 0),
-        #     buffer_origin=(0, self.field_out[0].data[0].shape[1] - 2 * s_gh, 0),
-        #     host_pitches=self._pitches_host_y,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_y[:, 0, 0].nbytes, 2 * s_gh, self.gh_y.shape[2]),
-        #     wait_for=evts)
-        # get_gh_yl.wait()
-        # get_gh_yr.wait()
-        # # Add ghosts contributions in domain layer
-        # self.gh_y[:, 2 * s_gh:3 * s_gh, :] += \
-        #     self.gh_y[:, 0 * s_gh:1 * s_gh, :]
-        # self.gh_y[:, 1 * s_gh:2 * s_gh, :] += \
-        #     self.gh_y[:, 3 * s_gh:4 * s_gh, :]
-        # set_gh_yl = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.field_out[0].gpu_data[0], self.gh_y,
-        #     host_origin=(0, 1 * s_gh, 0),
-        #     buffer_origin=(0, 1 * s_gh, 0),
-        #     host_pitches=self._pitches_host_y,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_y[:, 0, 0].nbytes, 1 * s_gh, self.gh_y.shape[2]),
-        #     wait_for=evts)
-        # set_gh_yr = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.field_out[0].gpu_data[0], self.gh_y,
-        #     host_origin=(0, 2 * s_gh, 0),
-        #     buffer_origin=(0, self.field_out[0].data[0].shape[1] - 2 * s_gh, 0),
-        #     host_pitches=self._pitches_host_y,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_y[:, 0, 0].nbytes, 1 * s_gh, self.gh_y.shape[2]),
-        #     wait_for=evts)
-        # set_gh_yl.wait()
-        # set_gh_yr.wait()
-
-        # # Z-direction
-        # s_gh = self.gh_out[2]
-        # get_gh_zl = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.gh_z, self.field_out[0].gpu_data[0],
-        #     host_origin=(0, 0, 0),
-        #     buffer_origin=(0, 0, 0),
-        #     host_pitches=self._pitches_host_z,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_z[:, 0, 0].nbytes, self.gh_z.shape[1], 2 * s_gh),
-        #     wait_for=evts)
-        # get_gh_zr = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.gh_z, self.field_out[0].gpu_data[0],
-        #     host_origin=(0, 0, 2 * s_gh),
-        #     buffer_origin=(0, 0, self.field_out[0].data[0].shape[2] - 2 * s_gh),
-        #     host_pitches=self._pitches_host_z,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_z[:, 0, 0].nbytes, self.gh_z.shape[1], 2 * s_gh),
-        #     wait_for=evts)
-        # get_gh_zl.wait()
-        # get_gh_zr.wait()
-        # # Add ghosts contributions in domain layer
-        # self.gh_z[:, :, 2 * s_gh:3 * s_gh] += \
-        #     self.gh_z[:, :, 0 * s_gh:1 * s_gh]
-        # self.gh_z[:, :, 1 * s_gh:2 * s_gh] += \
-        #     self.gh_z[:, :, 3 * s_gh:4 * s_gh]
-        # set_gh_zl = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.field_out[0].gpu_data[0], self.gh_z,
-        #     host_origin=(0, 0, 1 * s_gh),
-        #     buffer_origin=(0, 0, 1 * s_gh),
-        #     host_pitches=self._pitches_host_z,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_z[:, 0, 0].nbytes, self.gh_z.shape[1], 1 * s_gh),
-        #     wait_for=evts)
-        # set_gh_zr = cl.enqueue_copy(
-        #     self.cl_env.queue,
-        #     self.field_out[0].gpu_data[0], self.gh_z,
-        #     host_origin=(0, 0, 2 * s_gh),
-        #     buffer_origin=(0, 0, self.field_out[0].data[0].shape[2] - 2 * s_gh),
-        #     host_pitches=self._pitches_host_z,
-        #     buffer_pitches=self._pitches_buff,
-        #     region=(self.gh_z[:, 0, 0].nbytes, self.gh_z.shape[1], 1 * s_gh),
-        #     wait_for=evts)
-        # set_gh_zl.wait()
-        # set_gh_zr.wait()
-
-    def get_profiling_info(self):
-        for k in (self.fine_to_coarse, self.initialize):
-            for p in k.profile:
-                self.profiler += p
diff --git a/hysop/old/gpu.old/gpu_operator.py b/hysop/old/gpu.old/gpu_operator.py
deleted file mode 100644
index a2a91b24d34f09de8fe238d0eba4745fbf0cefcd..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_operator.py
+++ /dev/null
@@ -1,137 +0,0 @@
-"""Abstract class providing a common interface to all
-discrete operators working on GPU.
-
-* :class:`~hysop.gpu_operator.GPUOperator` is an abstract class
-    used to provide a common interface to all discrete operators
-    working on GPU.
-    See for example :class:`~hysop.gpu.gpu_diffusion.GPUDiffusion` or
-    :class:`~hysop.gpu.gpu_particle_advection.GPUParticleAdvection`.
-
-"""
-from abc import ABCMeta
-from hysop.constants import HYSOP_REAL, DirectionLabels
-from hysop.backend.device.opencl.opencl_tools import get_opencl_environment
-from hysop.methods import ExtraArgs
-from hysop.operator.discrete.discrete import get_extra_args_from_method
-from hysop.backend.device.kernel_config import KernelConfig
-from hysop.methods import Precision
-
-class GPUOperator(object):
-    """Abstract class for discrete operators working on GPU.
-    """
-    __metaclass__ = ABCMeta
-
-    def __init__(self, platform_id=None, device_id=None, device_type=None,
-                 direction=None, gpu_precision=HYSOP_REAL, **kwds):
-        """
-        Create the common attributes of all GPU discrete operators.
-        All in this interface is independant of a discrete operator.
-
-        Parameters
-        ----------
-        direction : int
-            leading direction of work (for example advection dir)
-        platform_id : int, optional
-            OpenCL id, default = 0.
-        device_id : int, optional
-            OpenCL id, default = 0.
-        device_type : string, optional.
-            OpenCL selected device, default = 'gpu'.
-        gpu_precision : numpy.dtype, optional
-            floating precision for gpu, default=HYSOP_REAL
-        """
-        
-        super(GPUOperator, self).__init__(**kwds)
-
-        ## real type precision on GPU
-        self.gpu_precision = gpu_precision
-        if 'method' in kwds:
-            method = kwds['method']
-            assert (method is not None)
-            
-            if Precision in method:
-                self.gpu_precision = method[Precision]
-            
-            if KernelConfig in method:
-                config = method[KernelConfig]
-                platform_id = config.platform_id
-                device_id   = config.device_id
-                device_type = config.device_type
-            elif ExtraArgs in method:
-                platform_id = get_extra_args_from_method(self,'platform_id',platform_id)
-                device_id   = get_extra_args_from_method(self,'device_id',device_id)
-                device_type = get_extra_args_from_method(self,'device_type',device_type)
-
-        # Initialize opencl environment
-        comm_ref = self.variables[0].topology.comm
-
-        self.cl_env = get_opencl_environment(
-            platform_id=platform_id, device_id=device_id,
-            device_type=device_type, precision=self.gpu_precision,
-            comm=comm_ref)
-        
-        # In case of directional splitting, define a function
-        # to get the appropriate vectors for the current provided direction.
-        # Only define _reorder_vect if direction is provided to avoid bugs.
-        dim = self.domain.dim
-        if (direction is not None):
-            if (direction<0) or (direction >= dim):
-                msg = "Bad direction '{}' for dimension {}.".format(direction,dim)
-                raise ValueError(msg)
-
-            if   dim == 1:
-                _reorder_vect = lambda v: (v[0],)
-            elif dim == 2:
-                if direction == 0:
-                    _reorder_vect = lambda v: (v[0], v[1])
-                elif direction == 1:
-                    _reorder_vect = lambda v: (v[1], v[0])
-            elif dim == 3: 
-                if direction == 0:
-                    _reorder_vect = lambda v: (v[0], v[1], v[2])
-                elif direction == 1:
-                    _reorder_vect = lambda v: (v[1], v[0], v[2])
-                elif direction == 2:
-                    _reorder_vect = lambda v: (v[2], v[0], v[1])
-            else:
-                msg = 'Dimension {} not implemented yet!'.format(dim)
-                raise NotImplementedError(msg)
-            
-            self._reorder_vect = _reorder_vect
-    
-        self.dim = dim
-        self.direction = direction
-
-        # Size constants for local mesh size
-        self._size_constants = ''
-    
-        # Try to load kernel configurations
-        try:
-            self._kernel_cfg = \
-                self.cl_env.kernels_config[self.dim][self.gpu_precision]
-        except:
-            self._kernel_cfg = None
-
-        # Global memory allocated on gpu by this operator
-        self.size_global_alloc = 0
-        # Local memory allocated on gpu by this operator
-        self.size_local_alloc = 0
-        
-        self._num_locMem = None
-
-    def _append_size_constants(self, values, prefix='NB', suffix=None):
-        """Append to the string containing the constants for building kernels.
-
-        Parameters
-        ----------
-        values : list
-        prefix : string, optional
-        suffix : list of strings, optional
-           directions, default = `hysop.constants.DirectionLabels`.
-        """
-        if suffix is None:
-            suffix = DirectionLabels
-        assert len(values) <= len(suffix), str(values) + str(suffix)
-        for v, s in zip(values, suffix):
-            self._size_constants += " -D " + prefix + s + "=" + str(v)
-
diff --git a/hysop/old/gpu.old/gpu_particle_advection.py b/hysop/old/gpu.old/gpu_particle_advection.py
deleted file mode 100644
index de66dd241727433b2e07aba1be18dd366524654a..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_particle_advection.py
+++ /dev/null
@@ -1,781 +0,0 @@
-"""Discrete advection for GPU
-"""
-from abc import abstractmethod, ABCMeta
-from hysop import __VERBOSE__
-from hysop.constants import np, debug, DirectionLabels, HYSOP_REAL
-from hysop.methods import TimeIntegrator, Remesh, \
-    Support, Splitting, MultiScale, Precision
-from hysop.numerics.odesolvers import Euler
-from hysop.operator.discrete.particle_advection import ParticleAdvection
-from hysop.operator.discrete.discrete import get_extra_args_from_method
-from hysop.backend.device.opencl import cl
-from hysop.backend.device.opencl.opencl_kernel import OpenClKernelLauncher
-import hysop.default_methods as default
-from hysop.tools.numpywrappers import npw
-from hysop.backend.device.opencl.opencl_discrete import OpenClDiscreteField
-from hysop.backend.device.opencl.gpu_operator import GPUOperator
-from hysop.tools.profiler import profile
-from hysop.numerics.update_ghosts import UpdateGhostsFull
-from hysop.tools.misc import WorkSpaceTools
-
-
-class GPUParticleAdvection(ParticleAdvection, GPUOperator):
-    """Particle advection solver on GPU
-
-    """
-    __metaclass__ = ABCMeta
-
-    @debug
-    def __init__(self, **kwds):
-        """Particular advection of field(s) in a given direction,
-        on GPU.
-
-        OpenCL kernels are build once per dimension in order to handle
-        directional splitting with resolution non uniform in directions.
-
-        Parameters
-        ----------
-        kwds : base classes parameters
-
-        Note
-        ----
-        * warning : this operator is derived from ParticleAdvection AND
-        GPUOperator. kwds must then handle argument of both classes.
-        """
-        # Set default method if unknown
-        if 'method' not in kwds:
-            kwds['method'] = default.ADVECTION
-            kwds['method'][Support] = 'gpu_2k'
-
-        # init base class (i.e. ParticleAdvection)
-        super(GPUParticleAdvection, self).__init__(**kwds)
-
-        # mpi comm is set to advected field topology communicator.
-        self.fields_topo = self.fields_on_grid[0].topology
-        self._comm = self.fields_topo.comm
-        self._comm_size = self._comm.Get_size()
-        self._comm_rank = self._comm.Get_rank()
-        assert self._comm == self.velocity.topology.comm
-
-        # init second base class (i.e GPUOperator)
-        if Precision in self.method:
-            gpu_precision = self.method[Precision]
-        else:
-            gpu_precision = None
-        GPUOperator.__init__(
-            self, topology=self.velocity.topology,
-            direction=self.direction,
-            platform_id=get_extra_args_from_method(self, 'platform_id', None),
-            device_id=get_extra_args_from_method(self, 'device_id', None),
-            device_type=get_extra_args_from_method(self, 'device_type', None),
-            gpu_precision=gpu_precision)
-
-        # Choose between one kernel for all operations (default) ...
-        self._use_2_kernels = False
-        if self.method[Support].find('gpu_2k') >= 0:
-            # ... or two different kernels for advection and remesh
-            self._use_2_kernels = True
-
-        self._synchronize = None
-        self._isMultiScale = False
-        if MultiScale in self.method and self.method[MultiScale] is not None:
-            self._isMultiScale = True
-            self._synchronize = UpdateGhostsFull(
-                self.velocity.topology, self.velocity.nb_components)
-
-        # Compute resolutions for kernels for each direction.
-        # Resolution of the local mesh but reoganized regarding
-        # splitting direction:
-        # direction X : XYZ
-        # direction Y : YXZ
-        # direction Z : ZYX in parallel, ZXY in sequentiel.
-        self.resol_dir = npw.dim_ones((self._dim,))
-        self.v_resol_dir = npw.dim_ones((self._dim,))
-        shape = self.fields_topo.mesh.local_resolution
-        v_shape = self.velocity.topology.mesh.local_resolution
-        # Local mesh resolution
-        self.resol_dir[:self._dim] = self._reorderVect(shape)
-        self.v_resol_dir[:self._dim] = self._reorderVect(v_shape)
-        self._init_size_constants()
-
-        # Init mesh info
-        self._cl_mesh_info = None
-        self._init_cl_mesh_info()
-
-        # user defined opencl sources
-        self.prg = None
-        # get extra (opencl kernels) files from method, if any
-        self._collect_usr_cl_src()
-
-        # Set copy kernel
-        self.copy = self._collect_kernels_cl_src_copy()
-
-        # Set transposition kernels
-        self.transpose_xy, self.transpose_xy_r = None, None
-        self.transpose_xz, self.transpose_xz_r = None, None
-        self._collect_kernels_cl_src_transpositions_xy()
-        if self._dim == 3:
-            self._collect_kernels_cl_src_transpositions_xz()
-
-        # Set advection and remesh kernels
-        self.num_advec, self.num_remesh = None, None
-        self.num_advec_and_remesh = None
-        if self._use_2_kernels:
-            self._collect_kernels_cl_src_2k()
-            self._compute = self._compute_2k
-        else:
-            self._collect_kernels_cl_src_1k()
-            if self._isMultiScale:
-                self._compute = self._compute_1k_multiechelle
-            else:
-                if self.method[TimeIntegrator] is Euler:
-                    self._compute = self._compute_1k_euler_simpleechelle
-                else:
-                    self._compute = self._compute_1k_simpleechelle
-
-        self._buffer_allocations()
-        if self.direction == 0:
-            self._buffer_initialisations()
-
-        # List of executions
-        self.exec_list = None
-        self._build_exec_list()
-
-        # Particle initialisation OpenCL events for each field:
-        self._init_events = {self.fields_on_grid[0]: []}
-
-    def _init_size_constants(self):
-        """Fill size_constants attribute in
-        (_size_constant belongs to gpu_operator)
-        """
-        v_shape = self.velocity.topology.mesh.local_resolution
-        f_shape = self.fields_topo.mesh.local_resolution
-        self._append_size_constants(f_shape)
-        self._append_size_constants(v_shape, prefix='V_NB')
-        self._append_size_constants(
-            [self.velocity.topology.ghosts()[self.direction]],
-            prefix='V_GHOSTS_NB', suffix=[''])
-        enum = ['I', 'II', 'III']
-        self._append_size_constants(
-            self._reorderVect(['NB' + d for d in DirectionLabels[:self._dim]]),
-            prefix='NB_', suffix=enum[:self._dim])
-        self._append_size_constants(
-            self._reorderVect(['V_NB' + d for d in DirectionLabels[:self._dim]]),
-            prefix='V_NB_', suffix=enum[:self._dim])
-
-    def _init_cl_mesh_info(self):
-        """Collect mesh info from fields and velocity,
-        set and send opencl buffer (self._cl_mesh_info) to device.
-        """
-        # Space step for fields
-        mesh_size = npw.ones(4, dtype=self.gpu_precision)
-        mesh_size[:self._dim] = self._reorderVect(
-            self.fields_topo.mesh.space_step)
-
-        # Space step for velocity
-        self._v_mesh_size = npw.ones(4, dtype=self.gpu_precision)
-        self._v_mesh_size[:self._dim] = self._reorderVect(
-            self.velocity.topology.mesh.space_step)
-
-        mesh_info = npw.ones((12, ), dtype=self.gpu_precision)
-        mesh_info[:4] = mesh_size
-        mesh_info[4:8] = self._v_mesh_size
-        # Coordinate of the local origin in advection dir.
-        mesh_info[8] = self.fields_topo.mesh.origin[self.direction]
-        mesh_info[9] = 1. / mesh_size[0]
-        mesh_info[10] = 1. / self._v_mesh_size[0]
-        assert mesh_size.dtype == self.gpu_precision
-        # returns an opencl buffer
-        self._cl_mesh_info = cl.Buffer(self.cl_env.ctx, cl.mem_flags.READ_ONLY,
-                                       size=mesh_info.nbytes)
-        cl.enqueue_write_buffer(self.cl_env.queue,
-                                self._cl_mesh_info, mesh_info).wait()
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-
-        topo = self.fields_on_grid[0].topology
-        # Find number and shape of required work arrays
-        # For GPU version, no need of numerics works
-        # Shape of reference comes from fields, not from velocity
-        rwork_length = np.sum([f.nb_components for f in self.fields_on_grid])
-        if self.method[Support].find('gpu_2k') >= 0:
-            rwork_length += 1  # work array for positions
-
-        # check and/or allocate work arrays according to properties above
-        subshape = tuple(topo.mesh.local_resolution)
-        self._rwork = WorkSpaceTools.check_work_array(rwork_length, subshape,
-                                                      rwork, HYSOP_REAL)
-
-    @abstractmethod
-    def global_memory_usage(self, v_shape, shape):
-        """Returns an estimation of memory usage
-
-        Parameters
-        ----------
-        v_shape, shape : tuples
-             shapes of the velocity and advected fields
-        """
-        pass
-
-    def _build_exec_list(self):
-        """Prepare GPU kernels sequence
-        """
-        # Build execution list regarding splitting:
-        # Splitting Strang 2nd order:
-        #   3D: X(dt/2), Y(dt/2), Z(dt), Y(dt/2), X(dt/2)
-        #   2D: X(dt/2), Y(dt), X(dt/2)
-        if self.method[Splitting] == 'o2':
-            if self._dim == 2:
-                self.exec_list = [
-                    [self._init_copy, self._compute],  # X(dt/2)
-                    [self._init_transpose_xy, self._compute],  # Y(dt)
-                    [self._init_transpose_xy, self._compute]  # X(dt/2)
-                ]
-            elif self._dim == 3:
-                self.exec_list = [
-                    [self._init_copy, self._compute],  # X(dt/2)
-                    [self._init_transpose_xy, self._compute],  # Y(dt/2)
-                    [self._init_transpose_xz, self._compute],  # Z(dt)
-                    [self._init_transpose_xz, self._compute],  # Y(dt/2)
-                    [self._init_transpose_xy, self._compute]  # X(dt/2)
-                ]
-
-        # Splitting Strang 2nd order (fullHalf):
-        #   X(dt/2), Y(dt/2), Z(dt/2), Z(dt/2), Y(dt/2), X(dt/2)
-        elif self.method[Splitting] == 'o2_FullHalf':
-            if self._dim == 2:
-                self.exec_list = [
-                    [self._init_copy, self._compute],  # X(dt/2)
-                    [self._init_transpose_xy, self._compute],  # Y(dt)
-                    [self._init_copy, self._compute],  # Y(dt)
-                    [self._init_transpose_xy, self._compute]  # X(dt/2)
-                ]
-            elif self._dim == 3:
-                self.exec_list = [
-                    [self._init_copy, self._compute],  # X(dt/2)
-                    [self._init_transpose_xy, self._compute],  # Y(dt/2)
-                    [self._init_transpose_xz, self._compute],  # Z(dt/2)
-                    [self._init_copy, self._compute],  # Z(dt/2)
-                    [self._init_transpose_xz, self._compute],  # Y(dt/2)
-                    [self._init_transpose_xy, self._compute]  # X(dt/2)
-                ]
-        elif self.method[Splitting] == 'x_only':
-            self.exec_list = [
-                [self._init_copy, self._compute],  # X(dt)
-                #[self._init_copy, self._init_copy_r],  # X(dt)
-                ]
-        else:
-            raise ValueError('Splitting type not yet implemeted on GPU: ' +
-                             self.method[Splitting])
-
-    def global_memory_usage(self, v_shape, shape):
-        if self._use_2_kernels:
-            r = (self.velocity.nb_components * v_shape.prod() +
-                 (2 * self.fields_on_grid[0].nb_components + 1) * shape.prod())
-        else:
-            r = (self.velocity.nb_components * v_shape.prod() +
-                 2 * self.fields_on_grid[0].nb_components * shape.prod())
-        return r * self.cl_env.prec_size
-
-    def _configure_numerical_methods(self):
-        pass
-
-    def _buffer_allocations(self):
-        """Allocate OpenCL buffers for velocity and advected field.
-        """
-        # Velocity.
-        alloc = not isinstance(self.velocity, OpenClDiscreteField)
-        OpenClDiscreteField.fromField(self.cl_env, self.velocity,
-                                   self.gpu_precision, simple_layout=False)
-        if alloc:
-            self.size_global_alloc += self.velocity.mem_size
-
-        # Transported field.
-        alloc = not isinstance(self.fields_on_grid[0], OpenClDiscreteField)
-        OpenClDiscreteField.fromField(self.cl_env,
-                                   self.fields_on_grid[0],
-                                   self.gpu_precision,
-                                   layout=False)
-        if alloc:
-            self.size_global_alloc += self.fields_on_grid[0].mem_size
-
-        # Fields on particles
-        start = 0
-        for f in self.fields_on_grid:
-            for i in xrange(start, start + f.nb_components):
-                if isinstance(self._rwork[i], np.ndarray):
-                    self._rwork[i] = \
-                        self.cl_env.global_allocation(self._rwork[i])
-            self.fields_on_part[f] = self._rwork[start: start + f.nb_components]
-            start += f.nb_components
-
-        if self._use_2_kernels:
-            # Particles position
-            if isinstance(self._rwork[start], np.ndarray):
-                self._rwork[start] = \
-                    self.cl_env.global_allocation(self._rwork[start])
-            self.part_position = self._rwork[start:start + 1]
-
-        self._work = self.fields_on_part.values()
-
-    def _buffer_initialisations(self):
-        """
-        OpenCL buffer initializations from user OpenCL kernels.
-        Looking for kernels named <code>init<FieldName></code>.
-        """
-        for gpudf in self.variables:
-            match = 'init' + '_'.join(gpudf.name.split('_')[:-1])
-            # Looking for initKernel
-            if self.prg is not None:
-                for k in self.prg.all_kernels():
-                    k_name = k.get_info(cl.kernel_info.FUNCTION_NAME)
-                    if match.find(k_name) >= 0:
-                        if __VERBOSE__:
-                            print gpudf.name, '-> OpenCL Kernel', k_name
-                        if gpudf == self.velocity:
-                            workItemNumber, gwi, lwi = \
-                                self.cl_env.get_work_items(self.v_resol_dir)
-                        else:
-                            workItemNumber, gwi, lwi = \
-                                self.cl_env.get_work_items(self.resol_dir)
-                        gpudf.setInitializationKernel(OpenClKernelLauncher(
-                            cl.Kernel(self.prg, k_name), self.cl_env.queue,
-                            gwi, lwi))
-
-    def _collect_kernels_cl_src_copy(self):
-        """
-        Compile OpenCL sources for copy kernel.
-        """
-        # # copy settings
-        # src, t_dim, b_rows, vec, f_space = self._kernel_cfg['copy']
-        # while t_dim > self.resol_dir[0] or (self.resol_dir[0] % t_dim) > 0:
-        #     t_dim /= 2
-        # gwi, lwi = f_space(self.resol_dir, t_dim, b_rows, vec)
-
-        # # Build code
-        # build_options += " -D TILE_DIM_COPY={0}".format(t_dim)
-        # build_options += " -D BLOCK_ROWS_COPY={0}".format(b_rows)
-        # build_options += self._size_constants
-        # prg = self.cl_env.build_src(
-        #     src,
-        #     build_options,
-        #     vec)
-        # self.copy = OpenClKernelLauncher(prg.copy,
-        #                            self.cl_env.queue, gwi, lwi)
-        return OpenClKernelLauncher(cl.enqueue_copy, self.cl_env.queue)
-
-    def _collect_kernels_cl_src_transpositions_xy(self):
-        """Compile OpenCL sources for transpositions kernel.
-
-        Notes
-        -----
-
-        * Transpositions kernels are launched at initialization.
-
-        This routine sets transpose_xy and transpose_xy_r.
-        """
-        resol = self.fields_topo.mesh.local_resolution
-
-        # XY transposition settings
-        is_XY_needed = self.direction == 1 or self.direction == 0
-        if is_XY_needed:
-            resol_tmp = resol.copy()
-            if self.direction == 1:  # (XY -> YX)
-                resol_tmp[0] = resol[1]
-                resol_tmp[1] = resol[0]
-                ocl_cte = " -D NB_I=NB_Y -D NB_II=NB_X -D NB_III=NB_Z"
-            else:
-                #  self.direction == 0:  # (YX -> XY) only for sequential
-                ocl_cte = " -D NB_I=NB_X -D NB_II=NB_Y -D NB_III=NB_Z"
-
-            self.transpose_xy = self._make_transpose_xy(resol_tmp, ocl_cte)
-
-    def _make_transpose_xy(self, resolution, ocl_cte):
-        """Perform xy transposition
-
-        Parameters
-        ----------
-        resolution : numpy array
-            required shape for?
-        ocl_cte: string
-            compilation options
-        """
-        build_options = self._size_constants
-        src, t_dim, b_rows, is_padding, vec, f_space = \
-            self._kernel_cfg['transpose_xy']
-        while t_dim > resolution[0] or t_dim > resolution[1] or \
-                (resolution[0] % t_dim) > 0 or (resolution[1] % t_dim) > 0:
-            t_dim /= 2
-        gwi, lwi, blocs_nb = f_space(resolution, t_dim, b_rows, vec)
-
-        if is_padding:
-            build_options += " -D PADDING_XY=1"
-        else:
-            build_options += " -D PADDING_XY=0"
-        build_options += " -D TILE_DIM_XY={0}".format(t_dim)
-        build_options += " -D BLOCK_ROWS_XY={0}".format(b_rows)
-        build_options += " -D NB_GROUPS_I={0}".format(blocs_nb[0])
-        build_options += " -D NB_GROUPS_II={0}".format(blocs_nb[1])
-        build_options += ocl_cte
-        prg = self.cl_env.build_src(src, build_options, vec)
-        return OpenClKernelLauncher(prg.transpose_xy, self.cl_env.queue, gwi, lwi)
-
-    def _collect_kernels_cl_src_transpositions_xz(self):
-        resol = self.fields_topo.mesh.local_resolution
-        resol_tmp = npw.zeros_like(resol)
-
-        is_XZ_needed = self.direction == 2 or self.direction == 1
-        # XZ transposition settings
-        if is_XZ_needed:
-            resol_tmp[...] = resol[...]
-            if self.direction == 1:  # ZXY -> YXZ (only for seqential)
-                resol_tmp[0] = resol[1]
-                resol_tmp[1] = resol[0]
-                resol_tmp[2] = resol[2]
-                ocl_cte = " -D NB_I=NB_Y -D NB_II=NB_X -D NB_III=NB_Z"
-            elif self.direction == 2:
-                # YXZ -> ZXY
-                resol_tmp[0] = resol[2]
-                resol_tmp[1] = resol[0]
-                resol_tmp[2] = resol[1]
-                ocl_cte = " -D NB_I=NB_Z -D NB_II=NB_X -D NB_III=NB_Y"
-                # else:  # XYZ -> ZYX
-                #     resol_tmp[0] = resol[2]
-                #     resol_tmp[1] = resol[1]
-                #     resol_tmp[2] = resol[0]
-                #     ocl_cte = " -D NB_I=NB_Z -D NB_II=NB_Y -D NB_III=NB_X"
-            self.transpose_xz = self._make_transpose_xz(resol_tmp, ocl_cte)
-
-        # is_XZ_r_needed = self.direction == 2 and self._comm_size > 1
-        # if is_XZ_r_needed:
-        #     # Reversed XZ transposition settings (ZYX -> XYZ)
-        #     resol_tmp[...] = resol[...]
-        #     ocl_cte = " -D NB_I=NB_X -D NB_II=NB_Y -D NB_III=NB_Z"
-        #     self.transpose_xz_r = self._make_transpose_xz(resol_tmp, ocl_cte)
-
-    def _make_transpose_xz(self, resol_tmp, ocl_cte):
-
-        build_options = self._size_constants
-        src, t_dim, b_rows, b_deph, is_padding, vec, f_space = \
-            self._kernel_cfg['transpose_xz']
-
-        while t_dim > resol_tmp[0] or t_dim > resol_tmp[2] or \
-                (resol_tmp[0] % t_dim) > 0 or (resol_tmp[2] % t_dim) > 0:
-            t_dim /= 2
-        gwi, lwi, blocs_nb = f_space(resol_tmp, t_dim, b_rows, b_deph, vec)
-        if is_padding:
-            build_options += " -D PADDING_XZ=1"
-        else:
-            build_options += " -D PADDING_XZ=0"
-        build_options += " -D TILE_DIM_XZ={0}".format(t_dim)
-        build_options += " -D BLOCK_ROWS_XZ={0}".format(b_rows)
-        build_options += " -D BLOCK_DEPH_XZ={0}".format(b_deph)
-        build_options += " -D NB_GROUPS_I={0}".format(blocs_nb[0])
-        build_options += " -D NB_GROUPS_III={0}".format(blocs_nb[2])
-        build_options += ocl_cte
-        prg = self.cl_env.build_src(
-            src,
-            build_options,
-            vec)
-        return OpenClKernelLauncher(prg.transpose_xz, self.cl_env.queue, gwi, lwi)
-
-    def _collect_usr_cl_src(self):
-        """Build user sources.
-
-        """
-        # get extra (opencl kernels) files from method, if any
-        user_src = get_extra_args_from_method(self, 'user_src', None)
-        if user_src is not None:
-            build_options = self._size_constants
-            workItemNb, gwi, lwi = self.cl_env.get_work_items(self.resol_dir)
-            v_workItemNb, gwi, lwi = self.cl_env.get_work_items(self.v_resol_dir)
-            build_options += " -D WI_NB=" + str(workItemNb)
-            build_options += " -D V_WI_NB=" + str(v_workItemNb)
-            self.prg = self.cl_env.build_src(user_src, build_options, 1)
-
-
-    def _collect_kernels_cl_src_1k(self):
-        """
-        Compile OpenCL sources for advection and remeshing kernel.
-        """
-        build_options = self._size_constants
-        src, is_noBC, vec, f_space = self._kernel_cfg['advec_and_remesh']
-        gwi, lwi = f_space(self.resol_dir, vec)
-        WINb = lwi[0]
-        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
-        if self._isMultiScale:
-            build_options += " -D MS_FORMULA="
-            build_options += self.method[MultiScale].__name__.upper()
-        if is_noBC:
-            build_options += " -D WITH_NOBC=1"
-        build_options += " -D WI_NB=" + str(WINb)
-        build_options += " -D PART_NB_PER_WI="
-        build_options += str(self.resol_dir[0] / WINb)
-        # Build code
-        src = [s.replace('RKN', self.method[TimeIntegrator].__name__.lower())
-               for s in src]
-        # Euler integrator
-        if self.method[TimeIntegrator] is Euler:
-            if not self._isMultiScale:
-                src = [s for s in src if s.find(Euler.__name__.lower()) < 0]
-                src[-1] = src[-1].replace('advection', 'advection_euler')
-        prg = self.cl_env.build_src(
-            src, build_options, vec,
-            nb_remesh_components=self.fields_on_grid[0].nb_components)
-
-        self.num_advec_and_remesh = OpenClKernelLauncher(
-            prg.advection_and_remeshing, self.cl_env.queue, gwi, lwi)
-
-    def _collect_kernels_cl_src_2k(self):
-        """
-        Compile OpenCL sources for advection and remeshing kernel.
-        """
-        # Advection
-        build_options = self._size_constants
-        src, is_noBC, vec, f_space = self._kernel_cfg['advec']
-        gwi, lwi = f_space(self.resol_dir, vec)
-        WINb = lwi[0]
-        if self._isMultiScale:
-            build_options += " -D MS_FORMULA="
-            build_options += self.method[MultiScale].__name__.upper()
-            self._compute_advec = self._compute_advec_multiechelle
-        else:
-            self._compute_advec = self._compute_advec_simpleechelle
-
-        if is_noBC:
-            build_options += " -D WITH_NOBC=1"
-        build_options += " -D WI_NB=" + str(WINb)
-        build_options += " -D PART_NB_PER_WI="
-        build_options += str(self.resol_dir[0] / WINb)
-        # Build code
-        src = [s.replace('RKN', self.method[TimeIntegrator].__name__.lower())
-               for s in src]
-        # Adding remeshing weights for the multiscale advection
-        if self._isMultiScale:
-            src.insert(1, self._kernel_cfg['remesh'][0][1])
-        # Euler integrator
-        if self.method[TimeIntegrator] is Euler:
-            if not self._isMultiScale:
-                src = [s for s in src if s.find(Euler.__name__.lower()) < 0]
-                src[-1] = src[-1].replace('advection', 'advection_euler')
-                self._compute_advec = self._compute_advec_euler_simpleechelle
-        prg = self.cl_env.build_src(
-            src,
-            build_options,
-            vec,
-            nb_remesh_components=self.fields_on_grid[0].nb_components)
-
-        self.num_advec = OpenClKernelLauncher(
-            prg.advection_kernel, self.cl_env.queue, gwi, lwi)
-
-        # remeshing
-        build_options = self._size_constants
-        src, is_noBC, vec, f_space = self._kernel_cfg['remesh']
-        gwi, lwi = f_space(self.resol_dir, vec)
-        WINb = lwi[0]
-
-        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
-        if is_noBC:
-            build_options += " -D WITH_NOBC=1"
-        build_options += " -D WI_NB=" + str(WINb)
-        build_options += " -D PART_NB_PER_WI="
-        build_options += str(self.resol_dir[0] / WINb)
-        # Build code
-        prg = self.cl_env.build_src(
-            src, build_options, vec,
-            nb_remesh_components=self.fields_on_grid[0].nb_components)
-        self.num_remesh = OpenClKernelLauncher(
-            prg.remeshing_kernel, self.cl_env.queue, gwi, lwi)
-
-    @debug
-    @profile
-    def apply(self, simulation, dt_coeff, split_id, old_dir):
-        """
-        Apply operator along specified splitting direction.
-        @param t : Current time
-        @param dt : Time step
-        @param d : Splitting direction
-        @param split_id : Splitting step id
-        """
-        # If first direction of advection, wait for work gpu fields
-        # It avoid wait_for lists to increase indefinitely
-        # In practice, all events are terminated so wait() resets events list
-        if split_id == 0:
-            for v in self.fields_on_grid + [self.velocity]:
-                v.clean_events()
-        for exe in self.exec_list[split_id]:
-            exe(simulation, dt_coeff, split_id, old_dir)
-
-    def _init_copy(self, simulation, dt_coeff, split_id, old_dir):
-        wait_evt = self.fields_on_grid[0].events
-        for g, p in zip(self.fields_on_grid[0].gpu_data,
-                        self.fields_on_part[self.fields_on_grid[0]]):
-            evt = self.copy.launch_sizes_in_args(p, g, wait_for=wait_evt)
-            #evt = self.copy(g, p, wait_for=wait_evt)
-            self._init_events[self.fields_on_grid[0]].append(evt)
-
-    # def _init_copy_r(self, simulation, dt_coeff, split_id, old_dir):
-    #     wait_evt = self.fields_on_grid[0].events
-    #     for g, p in zip(self.fields_on_grid[0].gpu_data,
-    #                  self.fields_on_part[self.fields_on_grid[0]]):
-    #         evt = self.copy.launch_sizes_in_args(g, p, wait_for=wait_evt)
-    #         #evt = self.copy(p, g, wait_for=wait_evt)
-    #         self._init_events[self.fields_on_grid[0]].append(evt)
-
-    def _init_transpose_xy(self, simulation, dt_coeff, split_id, old_dir):
-        wait_evt = self.fields_on_grid[0].events
-        for g, p in zip(self.fields_on_grid[0].gpu_data,
-                        self.fields_on_part[self.fields_on_grid[0]]):
-            evt = self.transpose_xy(g, p, wait_for=wait_evt)
-            self._init_events[self.fields_on_grid[0]].append(evt)
-
-    # def _init_transpose_xy_r(self, simulation, dt_coeff, split_id, old_dir):
-    #     wait_evt = self.fields_on_grid[0].events
-    #     for g, p in zip(self.fields_on_grid[0].gpu_data,
-    #                     self.fields_on_part[self.fields_on_grid[0]]):
-    #         evt = self.transpose_xy_r(p, g, wait_for=wait_evt)
-    #         self._init_events[self.fields_on_grid[0]].append(evt)
-
-    def _init_transpose_xz(self, simulation, dt_coeff, split_id, old_dir):
-        wait_evt = self.fields_on_grid[0].events
-        for g, p in zip(self.fields_on_grid[0].gpu_data,
-                        self.fields_on_part[self.fields_on_grid[0]]):
-            evt = self.transpose_xz(g, p, wait_for=wait_evt)
-            self._init_events[self.fields_on_grid[0]].append(evt)
-
-    # def _init_transpose_xz_r(self, simulation, dt_coeff, split_id, old_dir):
-    #     wait_evt = self.fields_on_grid[0].events
-    #     for g, p in zip(self.fields_on_grid[0].gpu_data,
-    #                     self.fields_on_part[self.fields_on_grid[0]]):
-    #         evt = self.transpose_xz_r(p, g, wait_for=wait_evt)
-    #         self._init_events[self.fields_on_grid[0]].append(evt)
-
-    def _compute_advec_euler_simpleechelle(self, simulation, dt_coeff, split_id, old_dir):
-        dt = simulation.time_step * dt_coeff
-        wait_evts = self.velocity.events + \
-            self._init_events[self.fields_on_grid[0]]
-        # Advection
-        evt = self.num_advec(
-            self.velocity.gpu_data[self.direction],
-            self.part_position[0],
-            self.gpu_precision(dt),
-            self._cl_mesh_info,
-            wait_for=wait_evts)
-        self._init_events[self.fields_on_grid[0]].append(evt)
-
-    def _compute_advec_simpleechelle(self, simulation, dt_coeff, split_id, old_dir):
-        dt = simulation.time_step * dt_coeff
-        wait_evts = self.velocity.events + \
-            self._init_events[self.fields_on_grid[0]]
-        # Advection
-        evt = self.num_advec(
-            self.velocity.gpu_data[self.direction],
-            self.part_position[0],
-            self.gpu_precision(dt),
-            self._cl_mesh_info,
-            wait_for=wait_evts)
-        self._init_events[self.fields_on_grid[0]].append(evt)
-
-    def _compute_advec_multiechelle(self, simulation, dt_coeff, split_id, old_dir):
-        dt = simulation.time_step * dt_coeff
-        wait_evts = self.velocity.events + \
-            self._init_events[self.fields_on_grid[0]]
-        # Advection
-        evt = self.num_advec(
-            self.velocity.gpu_data[self.direction],
-            self.part_position[0],
-            self.gpu_precision(dt),
-            self.gpu_precision(1. / self._v_mesh_size[1]),
-            self.gpu_precision(1. / self._v_mesh_size[2]),
-            self._cl_mesh_info,
-            wait_for=wait_evts)
-        self._init_events[self.fields_on_grid[0]].append(evt)
-
-    def _compute_2k(self, simulation, dt_coeff, split_id, old_dir):
-        self._compute_advec(simulation, dt_coeff, split_id, old_dir)
-        wait_evts = self._init_events[self.fields_on_grid[0]] + \
-            self.fields_on_grid[0].events
-        nbc = self.fields_on_grid[0].nb_components
-        evt = self.num_remesh(*tuple(
-            [self.part_position[0], ] +
-            [self.fields_on_part[self.fields_on_grid[0]][i]
-             for i in xrange(nbc)] +
-            [self.fields_on_grid[0].gpu_data[i] for i in xrange(nbc)] +
-            [self._cl_mesh_info, ]),
-                              wait_for=wait_evts)
-        self.fields_on_grid[0].events.append(evt)
-        self._init_events[self.fields_on_grid[0]] = []
-
-    def _compute_1k_multiechelle(self, simulation, dt_coeff, split_id, old_dir):
-        if split_id==0 and self._synchronize is not None:
-            self._synchronize(self.velocity.data)
-            self.velocity.toDevice()
-        dt = simulation.time_step * dt_coeff
-        wait_evts = self.velocity.events + \
-            self._init_events[self.fields_on_grid[0]] + \
-            self.fields_on_grid[0].events
-        nbc = self.fields_on_grid[0].nb_components
-        evt = self.num_advec_and_remesh(*tuple(
-            [self.velocity.gpu_data[self.direction], ] +
-            [self.fields_on_part[self.fields_on_grid[0]][i]
-             for i in xrange(nbc)] +
-            [self.fields_on_grid[0].gpu_data[i] for i in xrange(nbc)] +
-            [self.gpu_precision(dt),
-             self.gpu_precision(1. / self._v_mesh_size[1]),
-             self.gpu_precision(1. / self._v_mesh_size[2]),
-             self._cl_mesh_info]),
-                                        wait_for=wait_evts)
-        self.fields_on_grid[0].events.append(evt)
-        self._init_events[self.fields_on_grid[0]] = []
-
-    def _compute_1k_simpleechelle(self, simulation, dt_coeff, split_id, old_dir):
-        dt = simulation.time_step * dt_coeff
-        wait_evts = self.velocity.events + \
-            self._init_events[self.fields_on_grid[0]] + \
-            self.fields_on_grid[0].events
-        nbc = self.fields_on_grid[0].nb_components
-        evt = self.num_advec_and_remesh(*tuple(
-            [self.velocity.gpu_data[self.direction], ] +
-            [self.fields_on_part[self.fields_on_grid[0]][i]
-             for i in xrange(nbc)] +
-            [self.fields_on_grid[0].gpu_data[i] for i in xrange(nbc)] +
-            [self.gpu_precision(dt), self._cl_mesh_info]),
-                                        wait_for=wait_evts)
-        self.fields_on_grid[0].events.append(evt)
-        self._init_events[self.fields_on_grid[0]] = []
-
-
-    def _compute_1k_euler_simpleechelle(self, simulation, dt_coeff, split_id, old_dir):
-        dt = simulation.time_step * dt_coeff
-        wait_evts = self.velocity.events + \
-            self._init_events[self.fields_on_grid[0]] + \
-            self.fields_on_grid[0].events
-        nbc = self.fields_on_grid[0].nb_components
-        evt = self.num_advec_and_remesh(*tuple(
-            [self.velocity.gpu_data[self.direction], ] +
-            [self.fields_on_part[self.fields_on_grid[0]][i]
-             for i in xrange(nbc)] +
-            [self.fields_on_grid[0].gpu_data[i] for i in xrange(nbc)] +
-            [self.gpu_precision(dt), self._cl_mesh_info]),
-                                        wait_for=wait_evts)
-        self.fields_on_grid[0].events.append(evt)
-        self._init_events[self.fields_on_grid[0]] = []
-
-    def get_profiling_info(self):
-        for k in [self.copy, self.transpose_xy, self.transpose_xy_r,
-                  self.transpose_xz, self.transpose_xz_r,
-                  self.num_advec_and_remesh,
-                  self.num_advec, self.num_remesh]:
-            if k is not None:
-                for p in k.profile:
-                    self.profiler += p
-
-    @debug
-    def finalize(self):
-        """
-        Cleaning, if required.
-        """
-        pass
-        # for w in self._rwork:
-        #     self.cl_env.global_deallocation(w)
-        # self.cl_env.global_deallocation(self._cl_mesh_info)
diff --git a/hysop/old/gpu.old/gpu_particle_advection_dir.py b/hysop/old/gpu.old/gpu_particle_advection_dir.py
deleted file mode 100644
index 2b1e548fcd6d47e1f48c212a6f8569114a436f9f..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_particle_advection_dir.py
+++ /dev/null
@@ -1,788 +0,0 @@
-"""Discrete advection for GPU
-"""
-from hysop import __VERBOSE__, __DEBUG__, __PROFILE__
-from hysop.constants import HYSOP_REAL, debug
-from hysop.tools.profiler import profile
-from hysop.tools.misc import WorkSpaceTools
-
-from hysop.operator.discrete.particle_advection_dir import ParticleAdvectionDir
-from hysop.numerics.update_ghosts import UpdateGhostsFull
-
-from hysop.backend.device.opencl import cl
-from hysop.backend.device.opencl.opencl_discrete import OpenClDiscreteField
-from hysop.backend.device.opencl.gpu_operator import GPUOperator
-from hysop.backend.device.opencl.opencl_kernel   import OpenClKernelLauncher
-from hysop.backend.device.kernel_autotuner import AutotunerConfig
-
-from hysop.backend.device.codegen.structs.mesh_info import MeshInfoStruct, TranspositionState, MeshDirection
-
-from hysop.methods import TimeIntegrator, Remesh, ExtraArgs, \
-        DeviceSupport, Splitting, MultiScale, Interpolation, Precision, \
-        StretchingFormulation, DirectionalSplitting, Backend, ExplicitRungeKutta
-
-from hysop.numerics.odesolvers    import Euler, RK2, RK3, RK4
-from hysop.numerics.interpolation.interpolation import Linear
-
-from hysop.numerics.remeshing import L2_1, L2_2, L2_3, L2_4
-from hysop.numerics.remeshing import L4_2, L4_3, L4_4
-from hysop.numerics.remeshing import L6_3, L6_4, L6_5, L6_6
-from hysop.numerics.remeshing import L8_4
-
-from hysop.constants import callback_profiler
-
-from hysop.constants import np
-
-class InstanceOf(object):
-    def __init__(self, cls):
-        self.cls = cls
-    def match_instance(self, obj):
-        return isinstance(obj,self.cls)
-
-
-class GPUParticleAdvectionDir(ParticleAdvectionDir, GPUOperator):
-    """
-    Interface for directionally splitted particle advection solvers on GPU
-    """
-    
-    from abc import ABCMeta, abstractmethod
-    __metaclass__ = ABCMeta
-
-    _default_method = {
-            TimeIntegrator: RK2, 
-            Interpolation:  Linear,
-            Backend:        Backend.OPENCL,
-            DeviceSupport:  DeviceSupport.DEVICE_GPU,
-            Splitting:      DirectionalSplitting.STRANG_FIRST_ORDER,
-            Remesh:         L2_1, 
-            MultiScale:     L2_1,
-            Precision:      HYSOP_REAL,
-            ExtraArgs:  {
-                'user_src':None,
-                'build_options':'',
-                'platform_id':None,
-                'device_id':None,
-                'device_type':None,
-                'stretching':None,
-                'diffusion':None,
-                'split_kernels':True,
-                'use_builtin_copy':True,
-                'autotuner_config':AutotunerConfig()
-            }
-        }
-    
-    _valid_method_values = {
-        TimeIntegrator: [Euler,RK2,RK3,RK4,InstanceOf(ExplicitRungeKutta)], 
-        Interpolation:  [Linear],
-        Remesh:         [L2_1,L2_2,L2_3,L2_4,L4_2,L4_3,L4_4,L6_3,L6_4,L6_5,L6_6,L8_4], 
-        DeviceSupport:  [DeviceSupport.DEVICE_GPU], 
-        Backend:        [Backend.OPENCL],
-        Splitting:      [DirectionalSplitting.STRANG_FIRST_ORDER, 
-                         DirectionalSplitting.STRANG_SECOND_ORDER],
-        Precision:      [np.float32,np.float64]
-    }
-    
-    @staticmethod
-    def check_input_method(user_method={}):
-        default_method      = GPUParticleAdvectionDir._default_method
-        valid_method_values = GPUParticleAdvectionDir._valid_method_values
-
-        for k in user_method.keys():
-            if k not in default_method.keys():
-                msg = "Unknown method key '{}', valid keys are:\n\t{}"
-                raise ValueError(msg.format(k, default_method.keys()))
-            if k==ExtraArgs:
-                default_extra_args = default_method[ExtraArgs]
-                user_extra_args    = user_method[ExtraArgs]
-                vke = default_extra_args.keys()
-                for ke in user_extra_args.keys():
-                    if ke not in vke:
-                        msg="WARNING: Unknown extra arg '{}', ' \
-                            + 'valid values are:\n\t{}".format(ke,vke)
-                        print msg
-            elif (valid_method_values[k] is not None):
-                valid_vals = [v for v in valid_method_values[k]     if not isinstance(v,InstanceOf)]
-                valid_cls  = [v.cls for v in valid_method_values[k] if     isinstance(v,InstanceOf)]
-                if (user_method[k] not in valid_vals \
-                        and user_method[k].__class__ not in valid_cls):
-                    pass
-                    msg = "Unknown method value '{}', valid values for key '{}' are:\n\t{}"
-                    raise ValueError(msg.format(user_method[k],k,valid_method_values[k]))
-        
-        method = default_method.copy()
-        method.update(user_method)
-        
-        method[ExtraArgs] = default_method[ExtraArgs].copy()
-        if ExtraArgs in user_method.keys():
-            method[ExtraArgs].update(user_method[ExtraArgs])
-        
-        if __DEBUG__:
-            print "Checked input method for ParticleAdvection:"
-            for k,v in method.iteritems():
-                print "\t{}: {}".format(k,v)
-            print
-
-        return method
-
-    @debug
-    def __init__(self, **kwds):
-        """Particular advection of field(s) in a given direction,
-        on GPU.
-
-        OpenCL kernels are build once per dimension in order to handle
-        directional splitting with resolution non uniform in directions.
-
-        Parameters
-        ----------
-        kwds : 
-            Base classes parameters
-
-        Note
-        ----
-        * warning : this operator is derived from ParticleAdvectionDir AND
-        GPUOperator. kwds must then handle argument of both classes.
-        """
-
-        # Set or overwrite default method by user specified one
-        if 'method' in kwds:
-            method = self.check_input_method(kwds['method'])
-        else:
-            method = self.check_input_method()
-        kwds['method'] = method
-        
-        # Kernel configuration
-        extra_args = method[ExtraArgs]
-        self._split_kernels    = extra_args.pop('split_kernels')
-        self._user_src         = extra_args.pop('user_src')
-        self._build_options    = extra_args.pop('build_options')
-        self._use_builtin_copy = extra_args.pop('use_builtin_copy')
-        self._autotuner_config = extra_args.pop('autotuner_config')
-    
-        stretching = extra_args.pop('stretching')
-        if (stretching is not None):
-            self._has_stretching = True
-            self._stretching = stretching
-        else:
-            self._has_stretching = False
-
-        diffusion  = extra_args.pop('diffusion')
-        if (diffusion is not None):
-            self._has_diffusion = True
-            self._diffusion = diffusion
-        else:
-            self._has_diffusion = False
-
-        super(GPUParticleAdvectionDir, self).__init__(**kwds)
-        self.fields_on_grid = self.advected_fields
-        
-        self._synchronize = None
-        if self._is_multi_scale:
-            self._synchronize = UpdateGhostsFull(
-                self.velocity_topo, self.velocity.nb_components)
-        
-        # Initialize base classes
-        GPUOperator.__init__(self,
-            platform_id = extra_args.pop('platform_id'),
-            device_id   = extra_args.pop('device_id'),
-            device_type = extra_args.pop('device_type'),
-            **kwds)
-
-        if extra_args:
-             raise ValueError('Some extra arguments were not used:\n\t{}\n'.format(extra_args))
-        
-        
-    def _check(self):
-        super(GPUParticleAdvectionDir,self)._check()
-        cls = self.__class__.__name__
-        if (self._split_kernels)  and (not self.supports_kernel_splitting()):
-            msg='Kernel splitting has not been implemented in {} yet !'.format(cls)
-            raise NotImplementedError(msg)
-        if (self._user_src is not None) and (not self.supports_user_kernels()):
-            msg='{} does not support user provided kernels.'.format(cls)
-            raise NotImplementedError(msg)
-        if (self._has_stretching)  and (not self.supports_stretching()):
-            msg='Stretching has not been implemented in {} yet !'.format(cls)
-            raise NotImplementedError(msg)
-        if (self._has_diffusion) and (not self.supports_diffusion()):
-            msg='Diffusion has not been implemented in {} yet !'.format(cls)
-            raise NotImplementedError(msg)
-
-    def _initialize(self):
-        super(GPUParticleAdvectionDir,self)._initialize()
-        self._configure()
-        self._initialize_cl_env()
-    
-    def _setup(self):
-        self._device_buffer_allocations()
-        
-        self._collect_copy_kernels()
-        self._collect_transposition_kernels()
-        self._collect_advection_remesh_kernels()
-        self._collect_user_kernels()
-        self._collect_extra_kernels()
-
-        self._device_buffer_initializations()
-        
-        self._build_exec_list()
-        self._initialize_events()
-        
-    
-    def _configure(self):
-        """
-        Compute resolutions for kernels for each direction.
-         Resolution of the local mesh but reoganized regarding
-         splitting direction (see hysop.gpu.gpu_operator.GPUOperator._reorder_vect).
-           direction X : XYZ
-           direction Y : YXZ
-           direction Z : ZYX in parallel, ZXY in sequential.
-        """
-
-        direction=self.direction
-        
-        mesh_dir = MeshDirection[direction]
-        if direction==MeshDirection.X:
-            mesh_state = TranspositionState.XYZ
-        elif direction==MeshDirection.Y:
-            mesh_state = TranspositionState.YXZ
-        elif direction==MeshDirection.Z:
-            mesh_state = TranspositionState.ZYX if (self._is_distributed) else TranspositionState.ZXY
-        else:
-            raise ValueError()
-
-        f_resol     = self.fields_topo.mesh.local_resolution
-        v_resol     = self.velocity_topo.mesh.local_resolution
-        f_resol_dir = self._reorder_vect(f_resol)
-        v_resol_dir = self._reorder_vect(v_resol)
-       
-        self.mesh_dir    = mesh_dir
-        self.mesh_state  = mesh_state
-
-        self.f_resol     = f_resol
-        self.v_resol     = v_resol
-        self.f_resol_dir = f_resol_dir
-        self.v_resol_dir = v_resol_dir
-    
-    def _collect_copy_kernels(self):
-        self.copy = OpenClKernelLauncher(cl.enqueue_copy,
-                                   self.cl_env.queue)
-    
-    def _collect_transposition_kernels(self):
-        """
-        Compile OpenCL sources for transposition kernels.
-           2D: XY -> YX -> XY
-           3D: 
-            *sequential: XYZ -> YXZ -> ZXY -> YXZ -> XYZ
-            *parallel:   XYZ -> YXZ -> ZYX -> YXZ -> XYZ
-        Kernels are only collected if the dimension and direction match.
-        """
-
-        dim       = self.dim
-        direction = self.direction
-        
-        smethod = self.method[Splitting]
-
-        is_XY_needed, is_YX_needed = False, False
-        is_XZ_needed, is_ZX_needed = False, False
-        
-        if dim==1:
-            pass
-        elif dim==2:
-            if smethod==DirectionalSplitting.STRANG_FIRST_ORDER:
-                is_XY_needed = (dim>1) and (direction == MeshDirection.X)
-                is_YX_needed = (dim>1) and (direction == MeshDirection.Y)
-            elif smethod==DirectionalSplitting.STRANG_SECOND_ORDEVR:
-                is_XY_needed = (dim>1) and (direction == MeshDirection.X)
-                is_YX_needed = (dim>1) and (direction == MeshDirection.Y)
-        elif dim==3:
-            if smethod==DirectionalSplitting.STRANG_FIRST_ORDER:
-                is_XY_needed = (dim>1) and (direction == MeshDirection.X)
-                is_YX_needed = (dim>1) and (direction == MeshDirection.Z)
-                is_XZ_needed = (dim>2) and (direction == MeshDirection.Y) \
-                        and (not self._is_distributed)
-                is_ZX_needed = (dim>2) and (direction == MeshDirection.Z) \
-                        and (not self._is_distributed)
-            elif smethod==DirectionalSplitting.STRANG_SECOND_ORDER:
-                is_XY_needed = (dim>1) and (direction == MeshDirection.X)
-                is_YX_needed = (dim>1) and (direction == MeshDirection.Y)
-
-                is_XZ_needed = (dim>2) and (direction == MeshDirection.Y) \
-                        and (not self._is_distributed)
-                is_ZX_needed = (dim>2) and (direction == MeshDirection.Z) \
-                        and (not self._is_distributed)
-        else:
-            msg = 'Dimension {} not implemented yet!'.format(dim)
-            raise NotImplementedError(msg)
-        
-        
-        transpose_xy, transpose_yx = None, None
-        transpose_zx, transpose_xz = None, None
-
-        if is_XY_needed:
-            transpose_xy = self._collect_transposition_kernel_xy()
-        if is_YX_needed:
-            transpose_yx = self._collect_transposition_kernel_yx()
-        if is_XZ_needed:
-            transpose_xz = self._collect_transposition_kernel_xz()
-        if is_ZX_needed:
-            transpose_zx = self._collect_transposition_kernel_zx()
-
-        self._build_tranpose_funcs(transpose_xy, transpose_yx, transpose_xz, transpose_zx)
-    
-    def _collect_advection_remesh_kernels(self):
-        self._advec, self._remesh = None, None
-        self._advec_and_remesh    = None
-
-        if self._split_kernels:
-            self._collect_advec_kernel()
-            self._collect_remesh_kernel()
-        else:
-            self._collect_advec_remesh_kernel()
-        
-        # point _do_compute_impl function to the right implementation
-        if self._is_distributed:
-            if self._split_kernels:
-                if self._is_multi_scale:
-                    impl = self._do_compute_2k_multiscale_comm
-                else:
-                    impl = self._do_compute_2k_monoscale_comm
-            else:
-                if self._is_multi_scale:
-                    impl = self._do_compute_1k_multiscale_comm
-                else:
-                    impl = self._do_compute_1k_monoscale_comm
-        else:
-            if self._split_kernels:
-                if self._is_multi_scale:
-                    impl = self._do_compute_2k_multiscale
-                else:
-                    impl = self._do_compute_2k_monoscale
-            else:
-                if self._is_multi_scale:
-                    impl = self._do_compute_1k_multiscale
-                else:
-                    impl = self._do_compute_1k_monoscale
-        self._do_compute_impl = impl
-
-
-    def _do_sync(self,simulation,**kargs):
-        if self._is_multi_scale:
-            if __VERBOSE__:
-                print '_do_sync'
-            self._synchronize(self.velocity.data)
-            self.velocity.to_device()
-
-    def _do_compute(self,simulation,dt_coeff,**kargs):
-        if __VERBOSE__:
-            print '_do_compute'
-        dt = simulation.time_step * dt_coeff
-        self._do_compute_impl(dt=dt,**kargs)
-
-    
-    def _device_buffer_allocations(self):
-        """Allocate OpenCL buffers for velocity and advected field.
-        """
-        if (self._rwork is None) or (self._iwork is None):
-            raise RuntimeError('_set_work_arrays has not been implemented properly.')
-        
-        # Velocity
-        OpenClDiscreteField.fromField(self.cl_env, self.velocity,
-                                   self.gpu_precision, simple_layout=False)
-        if self.velocity.allocate():
-            self.size_global_alloc += self.velocity.mem_size
-        
-        # Fields on grids
-        for fg in self.fields_on_grid:
-            OpenClDiscreteField.fromField(self.cl_env,
-                                       fg,
-                                       self.gpu_precision,
-                                       layout=False)
-            if fg.allocate():
-                self.size_global_alloc += fg.mem_size
-
-        # Fields on particles
-        self.fields_on_part = {}
-        start = 0
-        for fg in self.fields_on_grid:
-            for i in xrange(start, start + fg.nb_components):
-                if type(self._rwork[i]) is np.ndarray:
-                    self._rwork[i] = \
-                        self.cl_env.global_allocation(self._rwork[i])
-            self.fields_on_part[fg] = self._rwork[start: start + fg.nb_components]
-            start += fg.nb_components
-
-        # Particles positions (only used if advection and remesh kernels are splitted)
-        if self._split_kernels:
-            if type(self._rwork[start]) is np.ndarray:
-                self._rwork[start] = \
-                    self.cl_env.global_allocation(self._rwork[start])
-            self._particle_position = self._rwork[start]
-            start += 1
-            
-        if start != len(self._rwork):
-            raise RuntimeError('GPU allocation error!')
-    
-    def _device_buffer_initializations(self):
-        """
-        OpenCL buffer initializations from user OpenCL kernels.
-        Looking for kernels named init<FieldName>.
-        """
-        if (self._user_prg is None) or (self.direction != MeshDirection.X):
-            return
-        
-        f_resolution = self.f_resolution
-        v_resolution = self.v_resolution
-
-        for gpudf in self.variables:
-            #Looking for init_{field_name} kernel
-            match = 'init' + '_'.join(gpudf.name.split('_')[:-1])
-            for k in self.prg.all_kernels():
-                k_name = k.get_info(cl.kernel_info.FUNCTION_NAME)
-                if match.find(k_name) >= 0:
-                    if __VERBOSE__:
-                        print gpudf.name, '-> OpenCL Kernel', k_name
-                    if gpudf == self.velocity:
-                        workItemNumber, gwi, lwi = \
-                            self.cl_env.get_work_items(v_resolution)
-                    else:
-                        workItemNumber, gwi, lwi = \
-                            self.cl_env.get_work_items(f_resolution)
-                    
-                    init_field_kernel = cl.Kernel(self.prg, k_name)
-                    gpudf.setInitializationKernel(
-                        OpenClKernelLauncher(init_field_kernel,self.cl_env.queue,gwi,lwi)
-                    )
-    
-   
-    
-    def _build_exec_list(self):
-        """Prepare GPU kernels sequence
-         Build execution list regarding splitting:
-            
-            **Splitting Strang 1st order**
-               1D: X(dt)
-               2D: X(dt), Y(dt)
-               3D: X(dt), Y(dt), Z(dt)
-             
-            **Splitting Strang 2nd order**
-               1D: X(dt)
-               2D: X(dt/2), Y(dt), X(dt/2)
-               3D: X(dt/2), Y(dt/2), Z(dt), Y(dt/2), X(dt/2)
-
-        Parameter list is passed to kernel callers but can be completed
-        or overrided during this operator apply call.
-        """
-        
-        dim = self.dim
-        smethod = self.method[Splitting]
-                
-        dim_msg    = 'Unsupported dimension {} for splitting method {}.'.format(dim,smethod)
-        method_msg = 'Splitting method {} not implemeted yet on GPU.'.format(smethod)
-
-        param_list,exec_list = None,None
-
-        if smethod == DirectionalSplitting.STRANG_FIRST_ORDER:
-            if self.dim == 1:
-                param_list = [
-                    {'dir':MeshDirection.X, 'dt_coeff':1.0},
-                ]
-                exec_list = [
-                    [self._do_sync, self._do_copy, self._do_compute], # X(dt)
-                ]
-            elif dim == 2:
-                param_list = [
-                    {'dir':MeshDirection.X, 'dt_coeff':1.0},
-                    {'dir':MeshDirection.Y, 'dt_coeff':1.0},
-                ]
-                exec_list = [
-                    [self._do_sync, self._do_copy, self._do_compute, self._do_transpose_xy_g2p], # X(dt)
-                    [                              self._do_compute, self._do_transpose_yx_g2g], # Y(dt)
-                ]
-            elif dim == 3:
-                param_list = [
-                    {'dir':MeshDirection.X, 'dt_coeff':1.0},
-                    {'dir':MeshDirection.Y, 'dt_coeff':1.0},
-                    {'dir':MeshDirection.Z, 'dt_coeff':1.0},
-                ]
-                exec_list = [
-                    [self._do_sync, self._do_copy, self._do_compute, self._do_transpose_xy_g2p], # X(dt)
-                    [                              self._do_compute, self._do_transpose_xz_g2p], # Y(dt)
-                    [                              self._do_compute, self._do_transpose_zx_g2p,  \
-                                                                     self._do_transpose_yx_p2g] # Z(dt)
-                ]
-            else:
-                raise ValueError(dim_msg)
-        elif smethod == DirectionalSplitting.STRANG_SECOND_ORDER:
-            if self.dim == 1:
-                param_list = [
-                    {'dir':MeshDirection.X, 'dt_coeff':1.0},
-                ]
-                exec_list = [
-                    [self._do_sync, self._do_copy, self._do_compute], # X(dt)
-                ]
-            elif dim == 2:
-                param_list = [
-                    {'dir':MeshDirection.X, 'dt_coeff':0.5},
-                    {'dir':MeshDirection.Y, 'dt_coeff':1.0},
-                    {'dir':MeshDirection.X, 'dt_coeff':0.5},
-                ]
-                exec_list = [
-                    [self._do_sync, self._do_copy, self._do_compute, self._do_transpose_xy_g2p], # X(dt/2)
-                    [                              self._do_compute, self._do_transpose_yx_g2p], # Y(dt)
-                    [                              self._do_compute]                             # X(dt/2)
-                ]
-            elif dim == 3:
-                param_list = [
-                    {'dir':MeshDirection.X, 'dt_coeff':0.5},
-                    {'dir':MeshDirection.Y, 'dt_coeff':0.5},
-                    {'dir':MeshDirection.Z, 'dt_coeff':1.0},
-                    {'dir':MeshDirection.Y, 'dt_coeff':0.5},
-                    {'dir':MeshDirection.X, 'dt_coeff':0.5},
-                ]
-                exec_list = [
-                    [self._do_sync, self._do_copy, self._do_compute, self._do_transpose_xy_g2p], # X(dt/2)
-                    [                              self._do_compute, self._do_transpose_xz_g2p], # Y(dt/2)
-                    [                              self._do_compute, self._do_transpose_zx_g2p], # Z(dt)
-                    [                              self._do_compute, self._do_transpose_yx_g2p], # Y(dt/2)
-                    [                              self._do_compute]                             # X(dt/2)
-                ]
-            else:
-                raise ValueError(dim_msg)
-        else:
-            raise ValueError(method_msg)
-
-        if (len(exec_list)!=len(param_list)):
-            raise ValueError('param_list and exec_list length mismatch!')
-        
-        self._param_list = param_list
-        self._exec_list  = exec_list
-
-    def _initialize_events(self):
-        # Particle initialisation OpenCL events for each field
-        pass
-
-
-    def step_directions(self):
-        dirs = []
-        for p in self._param_list:
-            dirs.append(p['dir'])
-        return dirs
-    
-## 
-## Discrete operator interface
-##
-    def get_work_properties(self):
-        # Shape of reference comes from fields, not from velocity
-        res = self.f_resol
-        
-        iw,rw = 0,0
-        for f in self.fields_on_grid:
-            rw += f.nb_components
-        if self._split_kernels:
-            rw += 1  # work array for particles positions
-
-        work = {}
-        work['rwork'] = np.asarray((res,)*rw)
-        work['iwork'] = np.asarray((res,)*iw)
-        return work
-
-    def setup(self, rwork, iwork):
-        self._rwork = rwork
-        self._iwork = iwork
-        self._setup()
-    
-    @debug
-    @profile
-    def apply(self, simulation, step_id, **extra_params):
-        """
-        Apply operator along specified splitting direction.
-        @param simulation : Simulation to which his operator applies
-        @param step_id    : Splitting step id
-        @param extra_params: Extra kernel parameters that complete or override 
-            step parameters param_list[step_id].
-        """
-
-        if (simulation is None):
-            raise ValueError('simulation is None.')
-        if (step_id<0) or (step_id>=len(self._exec_list)): 
-            raise ValueError('step_id is out of bounds.')
-
-        # in first step, synchronize previous work on gpu fields and clean events
-        if step_id == 0:
-            for v in self.variables:
-                v.clean_events()
-        
-        self._pre_apply()
-    
-        import copy
-        parameters = copy.deepcopy(self._param_list[step_id])
-        parameters.update(extra_params)
-       
-        my_dir  = self.direction
-        req_dir = parameters['dir']
-        if my_dir != req_dir:
-            msg = 'Discrete splitting operator called for wrong direction, \
-splitting step id {} requires direction {} but currently applied operator \
-direction is {}!'.format(step_id,MeshDirection[req_dir],MeshDirection[my_dir])
-            raise RuntimeError(msg)
-
-        exec_list = self._exec_list[step_id]
-        for exe in exec_list:
-            exe(simulation=simulation, **parameters)
-    
-    def get_profiling_info(self):
-        for exe in [self.copy, 
-                  self.transpose_xy, self.transpose_xz,
-                  self.transpose_xz, self.transpose_zx,
-                  self._advec_and_remesh,
-                  self._advec, self._remesh]:
-            if exe is not None:
-                if isinstance(exe,dict):
-                    exe = exe.values()
-                if not isinstance(exe,list):
-                    exe = [exe]
-
-                for k in exe:
-                    for p in k.profile:
-                        self.profiler += p
-                
-    
-    @debug
-    def finalize(self):
-         for work in self._rwork:
-             self.cl_env.global_deallocation(work)
-         for work in self._iwork:
-             self.cl_env.global_deallocation(work)
-
-
-##  
-## OpenCL kernel interface
-##
-    @abstractmethod
-    def _initialize_cl_env(self):
-        pass
-    
-    def _collect_transposition_kernel_xy(self):
-        raise NotImplementedError()
-    def _collect_transposition_kernel_yx(self):
-        raise NotImplementedError()
-    def _collect_transposition_kernel_xz(self):
-        raise NotImplementedError()
-    def _collect_transposition_kernel_zx(self):
-        raise NotImplementedError()
-
-    def _collect_advec_remesh_kernel():
-        raise NotImplementedError()
-    def _collect_advec_kernel():
-        raise NotImplementedError()
-    def _collect_remesh_kernel():
-        raise NotImplementedError()
-
-    def _collect_user_kernels():
-        if self.supports_user_kernels():
-            raise NotImplementedError()
-        else:
-            self._user_prg = None
-
-    def _collect_extra_kernels(self):
-        if self._has_stretching or self._has_diffusion:
-            raise NotImplementedError()
-        else: 
-            pass
-    
-    def _do_copy(self,**kargs):
-        raise NotImplementedError()
-    
-    def _do_compute_1k_monoscale(self, dt):
-        raise NotImplementedError()
-    def _do_compute_1k_multiscale(self, dt):
-        raise NotImplementedError()
-    def _do_compute_2k_monoscale(self, dt):
-        raise NotImplementedError()
-    def _do_compute_2k_multiscale(self, dt):
-        raise NotImplementedError()
-
-    def _do_compute_1k_monoscale_comm(self, dt):
-        raise NotImplementedError()
-    def _do_compute_1k_multiscale_comm(self, dt):
-        raise NotImplementedError()
-    def _do_compute_2k_monoscale_comm(self, dt):
-        raise NotImplementedError()
-    def _do_compute_2k_multiscale_comm(self, dt):
-        raise NotImplementedError()
-
-    def _pre_apply(self):
-        pass
-    
-    @staticmethod
-    def supports_kernel_splitting():
-        return False
-    @staticmethod
-    def supports_user_kernels():
-        return False
-    @staticmethod
-    def supports_stretching():
-        return False
-    @staticmethod
-    def supports_diffusion():
-        return False
-    
-    
-    def _exec_transpose(self,kernel,mode):
-        velocity = self.velocity
-        all_evts = []
-        evts = []
-        for Vi in velocity.gpu_data:
-            evt = kernel(Vi,Vi,wait_for=velocity.events)
-            evts.append(evt)
-        velocity.events += evts
-        all_evts+=evts
-
-        for (fg,fp) in self.fields_on_part.iteritems():
-            evts = []
-            for (g,p) in zip(fg.gpu_data,fp):
-                if mode=='g2g':
-                    evt = kernel(g,g,wait_for=fg.events)
-                elif mode=='g2p':
-                    evt = kernel(g,p,wait_for=fg.events)
-                elif mode=='p2g':
-                    evt = kernel(p,g,wait_for=fg.events)
-                else:
-                    raise ValueError('Unknown mode \'{}\'.'.format(mode))
-                evts.append(evt)
-            fg.events += evts
-            all_evts  += evts
-        return all_evts
-
-    def _build_tranpose_funcs(self,Txy,Tyx,Txz,Tzx):
-        
-        def build_none(fname):
-            def transposition_kernel(**kargs):
-                msg='The transposition kernel {} has not been collected.'
-                raise RuntimeError(msg.format(fname))
-            return transposition_kernel
-        
-        def build_func(fprefix,fname,src2dst,kernel):
-            def transposition_kernel(**kargs):
-                if __VERBOSE__:
-                    print fname
-                callback_profiler.tic(fprefix)
-                evts = self._exec_transpose(kernel,src2dst)
-                callback_profiler.tac(fprefix,evt=evts)
-                return evts
-            return transposition_kernel
-            
-        axes    = ['xy','yx','xz','zx']
-        kernels = [Txy,Tyx,Txz,Tzx]
-        for axe,kernel in zip(axes,kernels):
-            fprefix = 'do_transpose_{}'.format(axe)
-            for src2dst in ['g2g','g2p','p2g']:
-                fname = '_{}_{}'.format(fprefix,src2dst)
-                if kernel is None:
-                    func = build_none(fname)
-                else:
-                    func = build_func(fprefix,fname,src2dst,kernel)
-                    callback_profiler.register_tasks(fprefix)
-                setattr(self,fname,func)
-       
-   
-    
-    
diff --git a/hysop/old/gpu.old/gpu_stretching.py b/hysop/old/gpu.old/gpu_stretching.py
deleted file mode 100644
index 66b77d0cf47abe65cbb6b7f02c4d5139ee367bbe..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_stretching.py
+++ /dev/null
@@ -1,233 +0,0 @@
-"""
-@file gpu_stretching.py
-
-Stretching on GPU
-"""
-from hysop import __VERBOSE__, __DEBUG__
-from hysop.constants import debug, np, DirectionLabels, hysop.core.mpi_REAL, ORDERMPI, \
-    HYSOP_REAL, ORDER
-from hysop.tools.numpywrappers import npw
-from hysop.tools.io_utils import IO
-from hysop.operator.discrete.discrete import DiscreteOperator, get_extra_args_from_method
-
-from hysop.core.mpi import Wtime
-from hysop.backend.device.opencl import cl
-from hysop.backend.device.opencl.gpu_operator import GPUOperator
-from hysop.backend.device.opencl.opencl_kernel   import OpenClKernelLauncher
-from hysop.backend.device.opencl.opencl_discrete import OpenClDiscreteField
-from hysop.tools.profiler   import FProfiler
-
-from hysop.methods                 import TimeIntegrator, SpaceDiscretization, Formulation, Support
-from hysop.numerics.odesolvers          import Euler, RK2, RK3, RK4
-from hysop.numerics.finite_differences  import FDC2, FDC4
-from hysop.operator.discrete.stretching import Conservative, GradUW
-
-from hysop.backend.device.codegen.kernels.stretching import CachedStretchingKernel
-from hysop.backend.device.opencl.opencl_tools import KernelAutotuner
-
-class GPUStretching(DiscreteOperator, GPUOperator):
-
-    _supported_time_integrators        = [Euler]
-    _supported_stretching_formulations = [GradUW]
-    _supported_space_discretizations   = [FDC2, FDC4]
-
-    @debug
-    def __init__(self, velocity, vorticity, **kwds):
-        if (velocity  is None): raise ValueError('Set velocity  is None!')
-        if (vorticity is None): raise ValueError('Set vorticity is None!')
-        variables = [velocity, vorticity]
-
-        if 'method' not in kwds:
-            import hysop.default_methods as default
-            kwds['method'] = default.GPU_STRETCHING
-        self._check_method(kwds['method'])
-
-        super(GPUStretching, self).__init__(variables=variables, **kwds)
-        self.input  = [velocity, vorticity]
-        self.output = [vorticity]
-        
-        GPUOperator.__init__(self,
-            platform_id = get_extra_args_from_method(self, 'platform_id', None),
-            device_id   = get_extra_args_from_method(self, 'device_id',   None),
-            device_type = get_extra_args_from_method(self, 'device_type', None),
-            **kwds)
-        
-        # Build options
-        build_opts = []
-        build_opts += get_extra_args_from_method(self, 'build_opts', [])
-        self.build_opts = filter(None, set(build_opts))
-        
-        # Kernel autotuner and caching options
-        self.autotuner_runs    = get_extra_args_from_method(self, 'autotuner_runs', 10)
-        self.force_renew_cache = get_extra_args_from_method(self, 'force_renew_cache', False)
-
-        # discrete fields
-        self.vorticity = vorticity
-        self.velocity  = velocity
-        
-        # order of spatial scheme
-        self.order = 2 if self.method[SpaceDiscretization] is FDC2 else 4
-        
-        # Worksize handling
-        self._cl_work_size = 0
-
-        ## GPU allocations
-        for field in variables:
-            OpenClDiscreteField.fromField(self.cl_env, field,
-                                    self.gpu_precision, simple_layout=False)
-            if field.allocate():
-                self.size_global_alloc += field.mem_size
-
-        topo = self.velocity.topology
-        self._cutdir_list = np.where(topo.cutdir)[0].tolist()
-        self._comm        = topo.comm
-        self._comm_size   = self._comm.Get_size()
-        self._comm_rank   = self._comm.Get_rank()
-        if self._comm_size > 1:
-            self._compute = None
-            raise RuntimeError('GPU Stretching does not support mutiple processes for now!')
-        else:
-            self._compute = self._compute_stretching
-        
-        # generate source and compile kernels
-        self._gen_cl_src()
-
-    def _check_method(self, method):
-        if Support not in method.keys() or method[Support].lower().find('gpu')<0:
-            msg = 'GPU Stretching operator must be called with explicit GPU Support!'
-            raise ValueError(msg)
-
-        if TimeIntegrator not in method.keys():
-            msg = 'No time integrator specified for the GPU Stretching Operator!'
-            raise ValueError(msg)
-        elif method[TimeIntegrator] not in self._supported_time_integrators:
-            msg =  'Asked for a \'{}\' time integrator '.format(method[TimeIntegrator])
-            msg += 'but only {} time integrators have been implemented in the GPU Stretching Operator!'.format(self._supported_time_integrators)
-            raise NotImplementedError(msg)
-        
-        if SpaceDiscretization not in method.keys():
-            msg = 'No space discretization specified for the GPU Stretching Operator!'
-            raise ValueError(msg)
-        elif method[SpaceDiscretization] not in self._supported_space_discretizations:
-            msg =  'Asked for a \'{}\' space discretization '.format(method[SpaceDiscretization])
-            msg += 'but only {} space discretizations have been implemented in the GPU Stretching Operator!'.format(self._supported_space_discretizations)
-            raise NotImplementedError(msg)
-
-        if Formulation not in method.keys():
-            msg = 'No formulation specified for the GPU Stretching Operator!'
-            raise ValueError(msg)
-        elif method[Formulation] not in self._supported_stretching_formulations:
-            msg =  'Asked for a \'{}\' stretching formulation '.format(method[Formulation])
-            msg += 'but only {} formulations have been implemented in the GPU Stretching Operator!'.format(self._supported_stretching_formulations)
-            raise NotImplementedError(msg)
-
-    def _gen_cl_src(self):
-        cl_env   = self.cl_env
-        context  = cl_env.ctx
-        device   = cl_env.device
-        platform = cl_env.platform
-        typegen  = cl_env.typegen
-
-        topo     = self.velocity.topology
-        work_dim = self.dim
-        mesh     = topo.mesh
-        gwi      = mesh.local_resolution
-        
-        build_opts        = self.build_opts
-        autotuner_runs    = self.autotuner_runs
-        force_renew_cache = self.force_renew_cache
-        
-        dt = typegen.make_floatn([0.01],1)
-        
-        from hysop.backend.device.codegen.structs.mesh_info import MeshInfoStruct
-        mesh_info = MeshInfoStruct.build_instance_from_mesh(typegen, mesh)
-        mesh_info_buffer = cl.Buffer(context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
-                hostbuf=mesh_info)
-
-        
-        kernel_args    = [dt]                                               \
-                        + self.vorticity.gpu_data                           \
-                        + self.velocity.gpu_data                            \
-                        + [mesh_info_buffer]                                \
-                        + [None] #local memory buffer
-        
-        name = CachedStretchingKernel.codegen_name(typegen.fbtype, work_dim)
-        autotuner = KernelAutotuner(name,work_dim,typegen.fbtype,build_opts,
-                nruns=autotuner_runs,force_renew_cache=force_renew_cache)
-        autotuner.add_filter('3d_shape', autotuner.min_workitems_per_direction)
-
-        (gwi, lwi, stats) = autotuner.bench(context, platform, device, gwi, kernel_args, 
-                kernel_generator=self._gen_and_build_kernel, dump_src=False, verbose=False,
-                min_local_size=4, min_load_factor=4)
-
-        (kernel, kernel_args, cached_bytes) = self._gen_and_build_kernel(lwi, gwi, kernel_args, 
-                dump_src=True, verbose=True)
-
-
-        self.kernel_args = kernel_args
-        self.size_local_alloc += cached_bytes
-        
-        kernels = {}
-        kernels['stretching'] = OpenClKernelLauncher(kernel, cl_env.queue, gwi, lwi)
-        self.kernels = kernels
-
-    def _gen_and_build_kernel(self, local_size, global_size, kernel_args, 
-            dump_src=False, build_opts=None, verbose=False, **kargs):
-        cl_env  = self.cl_env
-        context = cl_env.ctx
-        device  = cl_env.device
-        typegen = cl_env.typegen
-
-        work_dim      = self.dim 
-        symbolic_mode = __VERBOSE__ or __DEBUG__
-        order         = self.order
-        known_vars    = { 'local_size':  local_size }
-
-        codegen = CachedStretchingKernel(typegen=typegen,
-                work_dim=work_dim,
-                order=order,
-                device=device, 
-                context=context,
-                known_vars=known_vars,
-                symbolic_mode=symbolic_mode)
-        
-        src = codegen.__str__()
-        if dump_src:
-            dump_folder=IO.default_path()+'/generated_kernels'
-            codegen.to_file(dump_folder, codegen.name+'.cl')
-            if __VERBOSE__ or __DEBUG__:
-                print '{} source dumped to {}/{}.'.format(codegen.name, dump_folder, codegen.name+'.cl')
-        
-        cached_bytes = codegen.cache_alloc_bytes(local_size)
-        local_mem       = cl.LocalMemory(cached_bytes)
-        kernel_args[-1] = local_mem
-
-        build_opts = [] if (build_opts is None) else build_opts
-
-        prg        = cl_env.build_raw_src(src, build_opts, force_verbose=verbose)
-        kernel     = prg.all_kernels()[0]
-
-        return (kernel, kernel_args, cached_bytes)
-
-    def _compute_stretching(self, simulation):
-        dt = self.cl_env.typegen.make_floatn(simulation.time_step,1)
-        
-        kernel = self.kernels['stretching']
-        kernel_args = self.kernel_args
-        kernel_args[0] = dt
-
-        input_events   = [evt for input in self.input for evt in input.events]
-        stretching_evt = kernel(*kernel_args, wait_for=input_events)
-        output_events  = [stretching_evt]
-        
-        for var in self.variables:
-            var.events += output_events
-
-    def apply(self, simulation):
-        self._compute(simulation)
-
-    def get_profiling_info(self):
-        for k in self.kernels.values():
-            if k is not None:
-                for p in k.profile:
-                    self.profiler += p
diff --git a/hysop/old/gpu.old/gpu_transfer.py b/hysop/old/gpu.old/gpu_transfer.py
deleted file mode 100644
index 2b889765f008460497e10b259593a20ddf97a6a5..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/gpu_transfer.py
+++ /dev/null
@@ -1,144 +0,0 @@
-from hysop.operator.computational import Computational
-from hysop.methods import Support
-from hysop.operator.continuous import opsetup, opapply
-from hysop.numerics.update_ghosts import UpdateGhostsFull
-
-class DataTransfer(Computational):
-    """Operator for moving data between CPU and GPU."""
-
-    def __init__(self, source, target, component=None,
-                 run_till=None, freq=1, **kwds):
-        """
-        @param way: HostToDevice or DeviceToHost flag setting
-        the data copy direction.
-        """
-        super(DataTransfer, self).__init__(**kwds)
-
-        self.input = self.variables
-        self.output = self.variables
-
-        ## Transfer frequency in iteration number
-        self.freq = freq
-
-        # Object (may be an operator or a topology) which handles the
-        # fields to be transfered
-        self._source = source
-        # Object (may an operator or a topology) which handles the fields
-        # to be filled in from source.
-        self._target = target
-
-        self.component = component
-        if component is None:
-            # All components are considered
-            self._range_components = lambda v: xrange(v.nb_components)
-        else:
-            # Only the given component is considered
-            assert self.component >= 0, 'component value must be positive.'
-            self._range_components = lambda v: (self.component)
-
-        # Which operator must wait for this one before
-        # any computation
-        # Exp : run_till = op1 means that op1 will
-        # wait for the end of this operator before
-        # op1 starts its apply.
-        if run_till is None:
-            run_till = []
-
-        assert isinstance(run_till, list)
-        self._run_till = run_till
-
-        from hysop.topology.cartesian_topology import CartesianTopology
-        if not isinstance(self._target, CartesianTopology):
-            # target operator must wait for
-            # the end of this operator to apply.
-            self._run_till.append(self._target)
-
-        self._transfer = None
-        self._is_discretized = True
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        for op in self._run_till:
-            op.wait_for(self)
-        topo = self.variables.values()[0]
-        self._d_var = [v.discrete_fields[topo] for v in self.variables]
-
-        from hysop.topology.cartesian_topology import CartesianTopology
-        source_is_topo = isinstance(self._source, CartesianTopology)
-        target_is_topo = isinstance(self._target, CartesianTopology)
-
-        source_is_gpu = False
-        try:
-            if self._source.method[Support].find('gpu') >= 0:
-                source_is_gpu = True
-        except:
-            pass
-        target_is_gpu = False
-        try:
-            if self._target.method[Support].find('gpu') >= 0:
-                target_is_gpu = True
-        except:
-            pass
-
-        ## Current transfer function
-        if source_is_gpu and not target_is_gpu:
-            self._transfer = self._apply_toHost
-        elif target_is_gpu and not source_is_gpu:
-            self._transfer = self._apply_toDevice
-        else:
-            if source_is_gpu and target_is_gpu:
-                raise RuntimeError(
-                    "One of source or target must be a GPU operator.")
-            if not source_is_gpu and not target_is_gpu:
-                if source_is_topo:
-                    print self.name, "Assume this is a toHost transfer"
-                    self._transfer = self._apply_toHost
-                elif target_is_topo:
-                    print self.name, "Assume this is a toDevice transfer"
-                    self._transfer = self._apply_toDevice
-                else:
-                    raise RuntimeError(
-                        "One of source or target must be a GPU operator " +
-                        self.name + ".")
-        # Function to synchronize ghosts before send data to device.
-        self._ghosts_synchro = None
-        # This function is needed only in a toDevice transfer and if the target operator needs ghosts
-        if self._transfer == self._apply_toDevice:
-            d_target = self._target.discrete_op
-            # Test for advection operator
-            if self._target._is_discretized and d_target is None:
-                d_target = self._target.advec_dir[0].discrete_op
-            if  d_target._synchronize is not None and d_target._synchronize:
-                self._ghosts_synchro = UpdateGhostsFull(
-                    self._d_var[0].topology, 
-                    self._d_var[0].nb_components)
-                
-    @opapply
-    def apply(self, simulation=None):
-        ite = simulation.current_iteration
-        if ite % self.freq == 0:
-            self._transfer()
-
-    def _apply_toHost(self):
-        for df in self._d_var:
-            for c in self._range_components(df):
-                df.toHost(component=c)
-
-    def _apply_toDevice(self):
-        if self._ghosts_synchro is not None:
-            for df in self._d_var:
-                # Ghosts Synchronization before sending
-                self._ghosts_synchro(df.data)
-        for df in self._d_var:
-            for c in self._range_components(df):
-                df.toDevice(component=c)
-
-    def wait(self):
-        for df in self._d_var:
-            df.wait()
-
-    def finalize(self):
-        pass
-
-    def computation_time(self):
-        pass
diff --git a/hysop/old/gpu.old/kernel_autotuner.py b/hysop/old/gpu.old/kernel_autotuner.py
deleted file mode 100644
index 95bd0340994b64271113a207806ca596ca42d652..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/kernel_autotuner.py
+++ /dev/null
@@ -1,598 +0,0 @@
-
-from hysop.constants import __DEBUG__, __VERBOSE__, __KERNEL_DEBUG__, \
-    DEFAULT_AUTOTUNER_FLAG, DEFAULT_AUTOTUNER_PRUNE_THRESHOLD
-from hysop.tools.io_utils import IO
-from hysop.tools.misc import Utils
-
-from hysop.constants import np, AutotunerFlags
-from hysop.backend.device.opencl import cl, KERNEL_DUMP_FOLDER
-
-import os, itertools, hashlib, gzip, pickle
-
-class KernelGenerationError(RuntimeError):
-    pass
-    
-class OpenClKernelStatistics(object):
-    """Execution statistics from kernel events.
-    """
-    def __init__(self,nruns=0,events=None):
-        if events is not None:
-            p0 = events[0].profile
-            t0 = p0.end - p0.start
-            total = 0
-            maxi = t0 
-            mini = t0
-            for evt in events:
-                dt = evt.profile.end - evt.profile.start
-                total += dt
-                if dt<mini: 
-                    mini = dt
-                if dt>maxi:
-                    maxi = dt
-            
-            self.tot = total
-            self.min = mini
-            self.max = maxi
-            self.mean = total/len(events)
-        else:
-            self.tot  = None
-            self.min  = None
-            self.max  = None
-            self.mean = None
-        self.nruns = nruns
-
-    def __str__(self):
-        mini  = self.min   * 1e-6
-        maxi  = self.max   * 1e-6
-        total = self.tot   * 1e-6
-        mean  = self.mean  * 1e-6
-        return 'min={:.2f}ms, max={:.2f}ms, mean={:.2f}ms (nruns={})'.format(mini,maxi,mean,self.nruns)
-
-    @staticmethod
-    def cmp(lhs,rhs):
-        if lhs.mean==rhs.mean:
-            if lhs.max==rhs.max:
-                if lhs.min==rhs.min:
-                    return 0
-                elif lhs.min>rhs.min:
-                    return 1
-                else:
-                    return -1
-            elif lhs.max > rhs.max:
-                return 1
-            else:
-                return -1
-        elif lhs.mean>rhs.mean:
-            return 1
-        else:
-            return -1
-    def __lt__(self, other):
-        return self.cmp(self, other) < 0
-    def __gt__(self, other):
-        return self.cmp(self, other) > 0
-    def __eq__(self, other):
-        return self.cmp(self, other) == 0
-    def __le__(self, other):
-        return self.cmp(self, other) <= 0
-    def __ge__(self, other):
-        return self.cmp(self, other) >= 0
-    def __ne__(self, other):
-        return self.cmp(self, other) != 0
-
-class AutotunerConfig(object):
-
-    _default_initial_runs = {
-        AutotunerFlags.ESTIMATE: 1,
-        AutotunerFlags.MEASURE: 2,
-        AutotunerFlags.PATIENT: 4,
-        AutotunerFlags.EXHAUSTIVE: 8
-    } 
-
-    def __init__(self, 
-            autotuner_flag=DEFAULT_AUTOTUNER_FLAG,
-            prune_threshold=DEFAULT_AUTOTUNER_PRUNE_THRESHOLD, 
-            verbose=__VERBOSE__,
-            debug=__KERNEL_DEBUG__,
-            dump_folder=KERNEL_DUMP_FOLDER,
-            override_cache=False,
-            nruns=None):
-
-        self.autotuner_flag  = autotuner_flag
-        self.prune_threshold = prune_threshold
-        self.verbose = verbose
-        self.debug   = debug
-        self.override_cache  = override_cache
-        if (nruns is None):
-            self.nruns = AutotunerConfig._default_initial_runs[autotuner_flag]
-        else:
-            if (nruns<1):
-                raise ValueError('nruns<1.')
-            self.nruns = nruns
-
-class KernelAutotuner(object):
-    cache_dir      = IO.cache_path() + '/autotune'
-    config_file    = cache_dir+'/configs.pklz'
-    if not os.path.exists(cache_dir):
-        os.makedirs(cache_dir)
-    
-    """OpenCl kernel work group size autotuner.
-    """
-    def __init__(self,name,work_dim,build_opts,autotuner_config,local_work_dim=None):
-        """Initialize a KernelAutotuner.
-        
-        Parameters
-        ----------
-        work_dim: int
-            Work dimension used in targetted OpenCL kernels.
-        local_work_dim: int
-            Work dimension used in workgroup sizes.
-        """
-
-        if not isinstance(autotuner_config, AutotunerConfig):
-            raise ValueError('autotuner_config is not an AutotunerConfig.')
-        
-        self.name           = name
-        self.work_dim       = work_dim
-        self.local_work_dim = work_dim if (local_work_dim is None) else local_work_dim
-        self.build_opts = build_opts
-        self.autotuner_config = autotuner_config
-        
-        self.enable_variable_workload = False
-        self.workloads = [(1,1,1)]
-
-        self.extra_parameters = None
-
-        self._init_and_load_cache()
-        self._load_default_filters()
-    
-    @staticmethod 
-    def _hash_func():
-        return hashlib.new('sha256')
-
-    @staticmethod
-    def _load_configs():
-        configs = {}
-        if os.path.isfile(KernelAutotuner.config_file):
-            with gzip.open(KernelAutotuner.config_file, 'rb') as f:
-                configs.update(pickle.loads(f.read()))
-        return configs
-
-    @staticmethod
-    def _make_config_key(work_dim, typegen, build_opts):
-        concat_unique_list = lambda L: '['+'_'.join([str(val) for val in frozenset(L)])+']'
-        hasher = KernelAutotuner._hash_func()
-        hasher.update('{}_{}_{}'.format(work_dim, concat_unique_list(build_opts),
-             typegen.__repr__()))
-        return hasher.hexdigest() 
-    
-    def _update_configs(self):
-        configs = KernelAutotuner._load_configs()
-        if configs.keys() != self.configs.keys():
-            configs.update(self.configs)
-            with gzip.open(KernelAutotuner.config_file, 'wb') as f:
-                pickle.dump(configs,f)
-        self.configs = configs
-
-    def _init_and_load_cache(self):
-        cache_file = '{}/{}.pklz'.format(KernelAutotuner.cache_dir,self.name.replace(' ','_'))
-        if os.path.isfile(cache_file):
-            with gzip.open(cache_file, 'rb') as f:
-                self.results = pickle.loads(f.read())
-        else:
-            self.results = {}
-        self.configs    = KernelAutotuner._load_configs()
-        self.cache_file = cache_file
-
-    def _dump_cache(self):
-        with gzip.open(self.cache_file, 'wb') as f:
-            pickle.dump(self.results,f)
-
-
-    def enable_variable_workitem_workload(self, max_workitem_workload):
-        max_workitem_workload = np.asarray(max_workitem_workload)
-        assert (max_workitem_workload>0).all()
-        
-        def _compute_pows(max_workload):
-            pows = [1]
-            workload = 1
-            while(workload<max_workload):
-                workload <<= 1
-                pows.append(workload)
-            return pows
-
-        self.enable_variable_workload = True
-        if np.isscalar(max_workitem_workload):
-            workloads = _compute_pows(max_workitem_workload)
-            workloads = itertools.product(workloads, repeat=3)
-        else:
-            workloads = [_compute_pows(mww) for mww in max_workitem_workload]
-            workloads = itertools.product(*workloads)
-        self.workloads = [w for w in workloads]
-   
-    def register_extra_parameter(self,name, values):
-        if self.extra_parameters is None:
-            self.extra_parameters = {}
-        self.extra_parameters[name] = values
-
-    def add_filter(self,fname, f):
-        self.filters[fname] = f
-        return self
-
-    def get_candidates(self,**kargs):
-        return np.asarray([c for c in self._get_wi_candidates(**kargs)])
-    
-    def get_workloads(self,global_size):
-        def F(wl):
-            return (wl[0]<=global_size[0]) and (wl[1]<=global_size[1]) and (wl[2]<=global_size[2])
-        candidates = itertools.ifilter(F, self.workloads)
-        return np.asarray([c for c in candidates])
-
-    def get_extra_parameters(self):
-        if self.extra_parameters is None:
-            return [None]
-        else:
-            return itertools.product(*self.extra_parameters.values())
-
-    def bench(self,typegen,work_size,kernel_args,
-            kernel_generator    = None,
-            get_max_global_size = None,
-            **kargs):
-
-        assert 'global_size' not in kargs, \
-                'global_size has been replaced by work_size, due to the variable work per workitem option.'
-
-        if np.isscalar(work_size):
-            work_size = [work_size]
-        else:
-            work_size = list(work_size)
-        work_size += [1]*(3-len(work_size))
-        work_size = np.asarray(work_size)
-        ws = tuple(work_size,)
-
-        if not isinstance(kernel_args, list):
-            msg='kernel_args should be a list.'
-            raise ValueError(msg)
-        
-        if (get_max_global_size is None):
-            get_max_global_size = lambda work_size, work_load, **kargs: \
-                    (work_size+workload-1)/workload
-        
-        platform = typegen.platform
-        device   = typegen.device
-        ctx      = typegen.context
-
-        verbose = self.autotuner_config.verbose
-        if verbose:
-            print '== Kernel {} Autotuning =='.format(self.name)
-            print 'platform: {}'.format(platform.name)
-            print 'device: {}'.format(device.name)
-            print 'ctx: {}'.format(ctx)
-            print 'work_size: {}'.format(work_size)
-            print 'fbtype: {}'.format(typegen.fbtype)
-            print 'build_opts: {}'.format(self.build_opts)
-        
-        config = KernelAutotuner._make_config_key(self.work_dim, typegen, self.build_opts)
-        if config not in self.configs.keys():
-            self.configs[config] = {'work_dim':self.work_dim, 
-                                    'build_opts':self.build_opts, 
-                                    'typegen':typegen.__repr__()}
-            self._update_configs()
-        if config not in self.results.keys():
-            self.results[config] = {}
-        results = self.results[config]
-        
-        best_workload     = None
-        best_global_size  = None
-        best_local_size   = None
-        best_stats        = None
-        best_extra_params = None
-                
-        dump_cache = False
-
-        separator = '_'*100
-        indent = lambda i: '  '*i
-       
-        for extra_parameters in self.get_extra_parameters():
-            if self.extra_parameters is None:
-                extra_parameters = {}
-            else:
-                extra_parameters = dict(zip(self.extra_parameters.keys(), extra_parameters))
-
-            params_hash = hashlib.sha256(str(hash(frozenset(sorted(extra_parameters.items()))))).hexdigest()
-            params_hash = params_hash[:8]
-                
-            best_params_workload    = None
-            best_params_global_size = None
-            best_params_local_size  = None
-            best_params_stats       = None
-                
-            if verbose:
-                print separator
-                print '::Current tuning parameters:: {}'.format(extra_parameters)
-            
-            workloads = self.get_workloads(work_size) 
-            for workload in workloads:
-                workload=np.asarray(workload)
-                max_global_size = get_max_global_size(work_size, workload, **extra_parameters)
-            
-                best_workload_global_size = None
-                best_workload_local_size  = None
-                best_workload_stats       = None
-                best_workload_candidate   = None
-                best_workload_ids         = None
-                
-                if verbose:
-                    print separator
-                    print indent(1)+'::Current workload {}::'.format(workload)
-                    print indent(2)+'-> global_size is set to {}'.format(max_global_size)
-                
-                candidates = self.get_candidates(ctx=ctx,device=device,
-                        max_global_size=max_global_size,**kargs)
-                unpruned_candidates = np.zeros_like(candidates)
-                nruns=self.autotuner_config.nruns
-
-                step=0
-                all_pruned=False
-                while step==0 or candidates.shape[0]>1:
-                    stats = []
-                    pruned_count = 0
-                    unpruned_count = 0
-
-                    if verbose and candidates.shape[0]>0:
-                        msg='\n'+indent(2)+'Step {} :: running {} candidates over {} runs:'
-                        print msg.format(step,candidates.shape[0],nruns)
-                    
-                    for local_work_size in candidates:
-                        local_work_size  = np.asarray(local_work_size)
-                        lwi = tuple(local_work_size)
-                        
-                        try:
-                            (_kernel,_kernel_args,src_hash, global_size) = kernel_generator(
-                                    ctx=ctx,device=device, 
-                                    work_size=work_size,work_load=workload,local_work_size=lwi,
-                                    build_opts=self.build_opts, 
-                                    kernel_args=kernel_args,
-                                    extra_parameters=extra_parameters,
-                                    **kargs)
-                        except KernelGenerationError:
-                            pruned_count += 1
-                            continue
-                
-                        global_size = np.asarray(global_size) 
-                        gwi         = tuple(global_size)
-                        
-                        update=False
-                        pms = params_hash
-                        
-                        if (not self.autotuner_config.override_cache)    \
-                            and src_hash in results.keys()               \
-                            and pms in results[src_hash].keys()          \
-                            and ws  in results[src_hash][pms].keys()     \
-                            and gwi in results[src_hash][pms][ws].keys() \
-                            and lwi in results[src_hash][pms][ws][gwi].keys():
-                            
-                            stat = self.results[config][src_hash][pms][ws][gwi][lwi]
-                            if stat.nruns >= nruns:
-                                if self.autotuner_config.verbose:
-                                    print indent(3)+'{} {} => {} (cached)'.format(gwi, lwi, stat)
-                                unpruned_candidates[unpruned_count,:] = local_work_size
-                                unpruned_count+=1
-                                stats.append(stat)
-                                continue
-                            else:
-                                update=True
-                               
-                        if (best_stats is not None):
-                            current_best_stats = best_stats
-                        elif (best_params_stats is not None):
-                            current_best_stats = best_params_stats
-                        elif (best_workload_stats is not None):
-                            current_best_stats = best_workload_stats
-                        else:
-                            current_best_stats = None
-
-                        (stat,pruned) = self._bench_one(ctx,device,gwi,lwi,_kernel,_kernel_args,nruns,
-                                current_best_stats)
-
-                        if not pruned:
-                            unpruned_candidates[unpruned_count,:] = local_work_size
-                            unpruned_count+=1
-                            status='update' if update else 'new'
-                            stats.append(stat)
-                        else:
-                            pruned_count+=1
-                            status='pruned'
-
-                        if verbose:
-                            print indent(3)+'{} {} => {} ({})'.format(gwi, lwi, stat, status)
-
-                        if not pruned:
-                            if src_hash not in results.keys():
-                                results[src_hash] = {}
-                            if pms not in results[src_hash].keys():
-                                results[src_hash][pms] = {}
-                            if ws not in results[src_hash][pms].keys():
-                                results[src_hash][pms][ws] = {}
-                            if gwi not in results[src_hash][pms][ws].keys():
-                                results[src_hash][pms][ws][gwi] = {}
-                            results[src_hash][pms][ws][gwi][lwi] = stat
-                            dump_cache = True
-                    
-                    all_pruned = (pruned_count==candidates.shape[0])
-                    if unpruned_count+pruned_count!=candidates.shape[0]:
-                        raise RuntimeError()
-                    if all_pruned:
-                        break
-                    
-                    keep  = max(1,unpruned_count//2)
-                    best_workload_ids=Utils.argsort(stats)[:keep]
-                    candidates=unpruned_candidates[best_workload_ids,:]
-
-                    nruns *= 2
-                    step += 1
-                   
-                if all_pruned:
-                    if verbose:
-                        print separator
-                        print indent(1)+' Workload {} winner for kernel {}:'.format(workload, self.name)
-                        print indent(2)+'no winner (all candidates were pruned)'
-                    continue
-
-                if (candidates.shape[0]!=1 or len(best_workload_ids)!=1):
-                    raise RuntimeError()
-            
-                if dump_cache:
-                    self.results[config] = results
-                    self._dump_cache()
-                    dump_cache=False
-                
-                best_workload_id        = best_workload_ids[0]
-                best_workload_stats     = stats[best_workload_id]
-                best_workload_candidate = candidates[0]
-                
-                best_workload_local_size  = best_workload_candidate
-                (_,_,_,best_workload_global_size) = kernel_generator(
-                                    ctx=ctx,device=device, 
-                                    work_size=work_size,work_load=workload,
-                                    local_work_size=best_workload_local_size,
-                                    build_opts=self.build_opts, 
-                                    kernel_args=kernel_args,
-                                    extra_parameters=extra_parameters,
-                                    **kargs)
-                
-                if verbose:
-                    print separator
-                    print indent(1)+' Workload {} winner for kernel {}:'.format(workload, self.name)
-                    print indent(2)+'{} {} => {}'.format(best_workload_global_size, 
-                            best_workload_local_size, best_workload_stats)
-
-                if (best_params_stats is None) or (best_workload_stats<best_workload_stats):
-                    best_params_workload    = workload
-                    best_params_stats       = best_workload_stats
-                    best_params_global_size = best_workload_global_size
-                    best_params_local_size  = best_workload_local_size
-                
-            if verbose:
-                print separator
-                print ' Current parameters winner for kernel {}:'.format(self.name)
-                if (best_params_stats is None):
-                    print indent(1)+'no winner (all candidates were pruned)'
-                else:
-                    print indent(1)+'{} {} => {}'.format(best_params_global_size, 
-                            best_params_local_size, best_params_stats)
-
-            if (best_params_stats is not None) and \
-                    ((best_stats is None) or (best_params_stats<best_stats)):
-                best_workload     = best_params_workload
-                best_stats        = best_params_stats
-                best_global_size  = best_params_global_size
-                best_local_size   = best_params_local_size
-                best_extra_params = extra_parameters
-        
-        if verbose:
-            print separator
-            print ' BEST OVERALL RESULT for kernel {}:'.format(self.name)
-            print ' => Extra params: {}'.format(best_extra_params)
-            print ' => WL={} G={} L={} => {}'.format(best_workload, best_global_size, best_local_size, 
-                    best_stats)
-            print separator
-            print
-
-        return (best_global_size, best_local_size, best_stats, best_workload, best_extra_params)
-
-    
-    def _bench_one(self,ctx,device,global_size,local_work_size,kernel,kernel_args,nruns,
-                best_stat):
-        
-        assert(nruns>=1)
-        profiling_enable=cl.command_queue_properties.PROFILING_ENABLE
-        kernel.set_args(*kernel_args)
-
-
-        evts = []
-        with cl.CommandQueue(ctx,device,profiling_enable) as queue:
-            evt = cl.enqueue_nd_range_kernel(queue, kernel, global_size, local_work_size)
-            evts.append(evt)
-        stats = OpenClKernelStatistics(events=evts, nruns=1)
-        if (best_stat is None):
-            pruned = False
-        else:
-            pruned = (stats.min > self.autotuner_config.prune_threshold*best_stat.max)
-        
-        if not pruned and nruns>1: 
-            with cl.CommandQueue(ctx,device,profiling_enable) as queue:
-                for i in xrange(nruns-1):
-                    evt = cl.enqueue_nd_range_kernel(queue, kernel, global_size, local_work_size)
-                    evts.append(evt)
-            stats = OpenClKernelStatistics(events=evts,nruns=nruns)
-        return (stats, pruned)
-
-    def _get_wi_candidates(self,ctx,device,max_global_size,**kargs):
-        pows = []
-        size = 1
-        
-        max_size = device.max_work_group_size
-        while(size<=max_size):
-            pows.append(size)
-            size <<= 1
-        pows = np.asarray(pows)
-        
-        product = []
-        for i in xrange(self.work_dim):
-            good = (pows<=max_global_size[i])
-            product.append(pows[good])
-        for i in xrange(3-self.work_dim):
-            product.append([1])
-        candidates = itertools.product(*product)
-
-        for fname,f in self.filters.iteritems():
-            F = f(ctx=ctx,device=device,max_global_size=max_global_size,**kargs)
-            candidates = itertools.ifilter(F, candidates)
-        return candidates
-
-    def _load_default_filters(self):
-        self.filters = {}
-        self.add_filter('dim_reqs',self._dim_filter)
-        self.add_filter('minmax_wi',self._minmax_workitems_filter)
-
-    #default filters
-    def _dim_filter(self,device,**kargs):
-            work_dim   = self.local_work_dim
-            max_wi_dim = device.max_work_item_dimensions
-            return lambda local_work_size: (work_dim<=max_wi_dim) and  \
-                (work_dim==3 
-                or (work_dim==2 and local_work_size[2]==1)
-                or (work_dim==1 and local_work_size[1]==1 and local_work_size[2]==1))
-    def _minmax_workitems_filter(self, device, min_load_factor=None, **kargs):
-        def filter(local_work_size, **kargs):
-            max_wg_size = device.max_work_group_size
-            wi=1
-            for i in xrange(3):
-                wi*=local_work_size[i]
-            if min_load_factor is None:
-                return (wi<=max_wg_size)
-            else:
-                return (wi>=max_wg_size/min_load_factor) and (wi<=max_wg_size)
-        return filter
-    
-    #user available filters
-    def ordering_filter(self, **kargs):
-        return lambda local_work_size: (local_work_size[2]<=local_work_size[1]) \
-                and (local_work_size[1]<=local_work_size[0])
-    def min_workitems_per_direction(self, min_local_size, **kargs):
-        if np.isscalar(min_local_size):
-            min_local_size = [min_local_size]
-        else:
-            min_local_size = list(min_local_size)
-        min_local_size = np.asarray(min_local_size)
-        wd = self.work_dim
-        return lambda local_work_size,**kargs: (local_work_size[:wd]>=min_local_size).all()
-    def max_workitems_per_direction(self, max_local_size, **kargs):
-        if np.isscalar(max_local_size):
-            max_local_size = [max_local_size]
-        else:
-            max_local_size = list(max_local_size)
-        max_local_size = np.asarray(max_local_size)
-        wd = self.work_dim
-        return lambda local_work_size,**kargs: (local_work_size[:wd]<=max_local_size).all()
-            
diff --git a/hysop/old/gpu.old/kernel_benchmark.py b/hysop/old/gpu.old/kernel_benchmark.py
deleted file mode 100644
index c7d739937404a17067800e800475738f1e65f266..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/kernel_benchmark.py
+++ /dev/null
@@ -1,441 +0,0 @@
-"""
-@file kernel_benchmark.py
-
-Package for benchmarking OpenCL kernels.
-"""
-from hysop.backend.device.opencl import cl
-from hysop.constants import np, HYSOP_REAL
-import pickle
-
-
-class BenchmarkSuite(object):
-    """Benchark suite management"""
-
-    def __init__(self, sizes, name,
-                 kernels, configs, versions, setupFunction,
-                 test=False, true_res=None, arg_to_test=0,
-                 inputs=None, file_name="Benchmarks_data",
-                 precision=HYSOP_REAL, nb_run=20):
-        """
-        Creates a benchmak suite, that consists in a list of Benchmark.
-
-        @param sizes : list of different problem sizes to benchmark.
-        @param name : name of the kernel to benchmark.
-        @param kernels : list of tuples containing kernel versions
-        (kernel sources file, OpenCL kernel name ).
-        @param configs : dictionary of configurations.
-        keys are kernel OpenCL name,
-        values are tuples containing (kernel parameters vectorization
-        and identifier in last position).
-        @param versions : kernel versions to bench (used as dictionaries keys)
-        @param setupFunction : Function that returns building options and
-        kernel arguments (assuming arrays are numpy arrays) depending on a
-        given config, size and input dictionary.
-        @param test : by default no results tests are performed
-        @param true_res : function to compute true results
-        @param arg_to_test : index of kernel arguments that contains result
-        @param inputs : input data
-        @param file_name : name of file to store results in
-        @param precision : Floating point precision for kernels
-        @param nb_run : Launch number to compute an average
-
-        On creation, data are loaded from a serialized version of timings
-        in the file represented by file_name parameter.
-        If no such file, a new database is created.
-        """
-        self.pickle_file_name = file_name
-        if precision == HYSOP_REAL:
-            self.pickle_file_name += '_DP'
-        else:
-            self.pickle_file_name += '_SP'
-        self.sizes = sizes
-        self.versions = versions
-        self.kernels_files = kernels
-        self.configs = configs
-        if inputs is None:
-            self.inputs = {}
-        else:
-            self.inputs = inputs
-        self.test = test
-        self.compute_true_res = true_res
-        self.arg_to_test = arg_to_test
-        self.kernel_name = name
-        self.precision = precision
-        self.nb_run = nb_run
-        self.setupFunction = setupFunction
-
-        if not self.test:
-            try:
-                print 'Loading form pickled file ...',
-                self.timings = pickle.load(
-                    open(self.pickle_file_name + '.pickle', 'r'))
-                print 'Done.'
-            except IOError:
-                print 'No such file : ', self.pickle_file_name + '.pickle'
-                print 'start new database'
-                self.timings = {}
-        else:
-            assert not true_res is None
-            assert arg_to_test >= 2
-            self.timings = {}
-        self.complete_timings()
-
-    def complete_timings(self):
-        """
-        Manage dictionary structure of timings.
-
-        Add all new keys in dictionaries.
-        """
-        if self.kernel_name not in self.timings.keys():
-            self.timings[self.kernel_name] = {}
-        for v in self.versions:
-            if not v in self.timings[self.kernel_name].keys():
-                self.timings[self.kernel_name][v] = {}
-            for c in self.configs[v]:
-                if not c[-1] in self.timings[self.kernel_name][v].keys():
-                    self.timings[self.kernel_name][v][c[-1]] = {}
-
-    def launch(self):
-        """
-        Performs the benchmark for all kernel versions and all configs.
-
-        If test flag is set to True, results is compared to the true result
-        and timings are not saved.
-        Else, timings are added to timings dictionary and then serialized in
-        a file. A text version is also writed.
-        """
-        if self.test:
-            self.true_res = {}
-            self.compute_true_res(self.sizes, self.true_res, self.inputs)
-        for v in self.versions:
-            conf_list = self.configs[v]
-            for conf in conf_list:
-                b = Benchmark(
-                    self.kernels_files[v][0], self.kernels_files[v][1],
-                    self.sizes, conf, self.setupFunction,
-                    inputs=self.inputs,
-                    precision=self.precision, nb_run=self.nb_run)
-                print self.kernel_name, v, conf[-1]
-                if self.test:
-                    b.test(self.true_res, self.arg_to_test)
-                else:
-                    b.launch()
-                    [self.timings[self.kernel_name][v][conf[-1]].__setitem__(
-                        t[0], t[1]) for t in b.timings.items()]
-                    pickle.dump(
-                        self.timings, open(
-                            self.pickle_file_name + '.pickle', 'w'), 0)
-                    self.write_file()
-
-    def write_file(self):
-        """
-        Write a text version of database.
-
-        Two outputs are created :
-        @li full : kernels versions and configs are given in columns and sizes
-        in rows.
-        @li hist : all data is given in rows to enable gnuplot to plot
-        histograms.
-        """
-        f = open(self.pickle_file_name + '_full.dat', 'w')
-        #build size set
-        sizes_set = set()
-        config_set = set()
-        cols_lists = {}
-        for k in self.timings.keys():
-            for v in self.timings[k].keys():
-                cols_lists[v] = []
-                for c in self.timings[k][v].keys():
-                    for s in self.timings[k][v][c]:
-                        sizes_set.add(s)
-                        config_set.add(c)
-        f.write("size dim ")
-        i = 0
-        for k in sorted(self.timings.keys()):
-            for v in sorted(self.timings[k].keys()):
-                for c in sorted(self.timings[k][v].keys()):
-                    f.write(v + '_' + c + ' ')
-                    cols_lists[v].append(i)
-                    i += 1
-        f.write("\n")
-        for s in sorted(sizes_set):
-            f.write(str(s[0]) + " " + str(len(s)) + " ")
-            for k in sorted(self.timings.keys()):
-                for v in sorted(self.timings[k].keys()):
-                    for c in sorted(self.timings[k][v].keys()):
-                        try:
-                            f.write(str(self.timings[k][v][c][s]) + " ")
-                        except KeyError as ke:
-                            if ke.message is s:
-                                f.write("- ")
-                            else:
-                                raise ke
-            f.write("\n")
-        for k in sorted(self.timings.keys()):
-            for v in sorted(self.timings[k].keys()):
-                f.write('#' + v + '=')
-                for i in cols_lists[v]:
-                    f.write(str(i) + ' ')
-                f.write('\n')
-        f.close()
-        f = open(self.pickle_file_name + '_hist.dat', 'w')
-        f.write("#kernel_nb=" + str(len(self.timings.keys())) + "\n")
-        f.write("#kernel_names=")
-        for k in sorted(self.timings.keys()):
-            f.write(k + " ")
-        f.write("\n")
-        f.write("#version_nb=")
-        for k in sorted(self.timings.keys()):
-            f.write(str(len(self.timings[k].keys())) + " ")
-        f.write("\n")
-        f.write("#config_nb=" + str(len(config_set)) + "\n")
-        for i, s in enumerate(sorted(sizes_set)):
-            f.write("#Block_{0}_{1}={2}\n".format(s[0], len(s), i))
-        for s in sorted(sizes_set):
-            for c in sorted(config_set):
-                for k in sorted(self.timings.keys()):
-                    for v in sorted(self.timings[k].keys()):
-                        f.write(str(s[0]) + " " + str(len(s)) + " ")
-                        f.write(k + " ")
-                        f.write(v + " ")
-                        f.write(c + " ")
-                        #print c
-                        # Compute work-item number from configuration string:
-                        # If config, start with 'wi=N', work-item number is set to N
-                        # Else, it assume a configuration matching 'A[xB]+[_fn]?'
-                        # It replace 'x' by '*' and divide by n. String is evaluated as python instruction
-                        if c[0:3] == 'wi=':
-                            cse = c.split('=')[1].split('_')[0]
-                        else:
-                            cs = c.replace(
-                                'Nx', str(s[0])).replace('x', '*').split('_')
-                            cse = cs[0] + '/' + cs[1][1] if len(cs) == 2 else cs[0]
-                        #print cse
-                        f.write(str(eval(cse)) + ' ')
-                        try:
-                            f.write(str(self.timings[k][v][c][s]) + "\n")
-                        except:
-                            f.write('-\n')
-            f.write("\n")
-        f.close()
-
-
-def find_min(filename, kernel=None, version=None, config=None, size=None):
-    d = pickle.load(open(filename, 'r'))
-    cc = {}
-    kl = d.keys() if kernel is None else [kernel] \
-        if isinstance(kernel, str) else kernel
-    for k in kl:
-        vl = d[k].keys() if version is None else [version] \
-            if isinstance(version, str) else version
-        for v in vl:
-            cl = d[k][v].keys() if config is None else [config] \
-                if isinstance(config, str) else config
-            for c in cl:
-                sl = d[k][v][c].keys() if size is None else [size] \
-                    if isinstance(size, tuple) else size
-                for s in sl:
-                    cc[k + '_' + v + '_' + c] = d[k][v][c][s]
-    print cc[min(cc, key=cc.get)], min(cc, key=cc.get)
-
-
-class Benchmark(object):
-    """Benchmark management"""
-
-    def __init__(self, cl_env, kernel_file, kernel_name, sizes,
-                 config, setupFunction,
-                 nb_run=20, inputs=None):
-        """
-        Creates a benchmark for a given source kernel_file, kernel for
-        different problem sizes.
-
-        @param kernel_file : kernels source file
-        @param kernel_name : name of the kernel to benchmark as a string
-        @param sizes : list of different problem sizes to launch kernel
-        @param config : list of kernel parameters
-        @param setupFunction : Function that returns building options and
-        kernel arguments (assuming arrays are numpy arrays) depending on a
-        given config, size and input dictionary.
-        @param nb_run : number of launches to average time (default = 20)
-        @param inputs : input data
-        @param precision : Floating point precision for kernels
-        """
-        self.cl_env = cl_env
-        self.platform = self.cl_env.platform
-        self.device = self.cl_env.device
-        self.ctx = self.cl_env.ctx
-        self.queue = self.cl_env.queue
-        ## OpenCL Source kernel_file
-        self.kernel_file = kernel_file
-        ## Kernel name
-        self.kernel = kernel_name
-        ## Compiled programs
-        self.prg = {}
-        ## Kernel timings
-        self.timings = {}
-        ## Kernel arguments
-        self.kernel_args = {}
-        ## Run number
-        self.nb_run = nb_run
-        ## Problems sizes
-        self.sizes = sizes
-        ## Problems inputs
-        self.inputs = inputs
-        ## Function to test size
-        self.setupFunction = setupFunction
-
-        self.setup = {}
-        if self.kernel_file is not None:
-            for size in self.sizes:
-                self.setup[size] = self.setupFunction(config, size, self.inputs)
-                if self.setup[size] is not None:
-                    toDelete = False
-                    #print np.prod(self.setup[size][1][1]), "WI (",self.device.max_work_group_size ," max )"
-                    if np.prod(self.setup[size][1][1]) > self.device.max_work_group_size:
-                        toDelete = True
-                    global_mem_used=0
-                    for arg in self.setup[size][1]:
-                        if isinstance(arg, np.ndarray) and \
-                                len(arg.shape) > 1:
-                            #print "Alloc : ", arg.nbytes, "Bytes (", self.device.max_mem_alloc_size, "max)"
-                            if arg.nbytes > self.device.max_mem_alloc_size:
-                                toDelete = True
-                            global_mem_used += arg.nbytes
-                            #print "Total Alloc : ", global_mem_used, "Bytes (", self.device.global_mem_size, "max)"
-                    if global_mem_used > self.device.global_mem_size:
-                        toDelete = True
-                        #print "Local Alloc : ", self.setup[size][2], "Bytes (", self.device.local_mem_size, "max)"
-                    if self.setup[size][2] > self.device.local_mem_size:
-                        toDelete = True
-                    if toDelete:
-                        self.setup[size] = None
-        if self.kernel_file is not None:
-            print kernel_file
-            for size in self.sizes:
-                if self.setup[size] is not None:
-                    print self.setup[size][0]
-                    self.prg[size] = self.cl_env.build_src(
-                        kernel_file, **self.setup[size][0])
-
-    def test(self, true_res, ind_res):
-        """
-        Testing result validity against a given 'true_res'.
-
-        @param true_res : expected result
-        @param ind_res : kernel argument index containig result
-        """
-        print "Testing : "
-        for size in self.sizes:
-            if self.setup[size] is not None:
-                kernel = eval('self.prg[size].' + self.kernel)
-                kernelArgs = self.setup[size][1]
-                clkernelArgs = [None]*len(self.setup[size][1])
-                res = np.empty_like(kernelArgs[ind_res])
-                mem_used = 0
-                for i in xrange(len(kernelArgs)):
-                    if isinstance(kernelArgs[i], np.ndarray):
-                        print "buffer", kernelArgs[i].shape
-                        buff = cl.Buffer(
-                            self.ctx, cl.mem_flags.READ_WRITE,
-                            size=kernelArgs[i].nbytes)
-                        cl.enqueue_copy(self.queue, buff, kernelArgs[i])
-                        mem_used += kernelArgs[i].nbytes
-                        clkernelArgs[i] = buff
-                    else:
-                        clkernelArgs[i] = kernelArgs[i]
-                print "Memory used : {0:.5f} GiB ({1:.2f}%)".format(
-                    mem_used / (1024. ** 3),
-                    100. * mem_used / (self.device.global_mem_size * 1.),
-                    mem_used / (1024. ** 3))
-                self.queue.finish()
-                print size, clkernelArgs[0:2],
-                kernel(self.queue, *tuple(clkernelArgs))
-                self.queue.finish()
-                cl.enqueue_copy(self.queue, res, clkernelArgs[ind_res])
-                self.queue.finish()
-                for i in xrange(len(kernelArgs)):
-                    if isinstance(clkernelArgs[i], cl.Buffer):
-                        print "Released", clkernelArgs[i]
-                        clkernelArgs[i].release()
-                try:
-                    # if len(res.shape) == 3:
-                    #     res = res[:size[0], :size[1], :size[2]]
-                    # else:
-                    #     res = res[:size[0], :size[1]]
-                    if np.float64 == HYSOP_REAL:
-                        exp = 15
-                    else:
-                        exp = 6
-                    print res.shape, size, true_res[size].shape
-                    np.testing.assert_array_almost_equal(
-                        res, true_res[size], decimal=exp)
-                    print 'Ok'
-                except AssertionError as ae:
-                    print 'Fail'
-                    print res.shape, true_res[size].shape, res - true_res[size]
-                    i = 0
-                    nb_bad_values = \
-                        res[np.where(np.abs(res - true_res[size]) >=
-                            eval('1e-' + str(exp - i)))].shape[0]
-                    print nb_bad_values,
-                    print "bad elements ~ 1e-{0}".format(exp - i)
-                    while nb_bad_values > 0:
-                        i += 1
-                        nb_bad_values = \
-                            res[np.where(np.abs(res - true_res[size]) >=
-                                         eval('1e-' + str(exp - i)))].shape[0]
-                        print nb_bad_values,
-                        print "bad elements ~ 1e-{0}".format(exp - i)
-                        if i == 4:
-                            raise ae
-
-    def launch(self, d=True):
-        """
-        Perform kernel benchmark.
-
-        Kernels are run nb_run times plus one. The first run is not
-        taken in timing average.
-        """
-        if d:
-            print "\nRunning : "
-        for size in self.sizes:
-            if d:
-                print size,
-            if self.setup[size] is not None:
-                kernel = eval('self.prg[size].' + self.kernel)
-                kernelArgs = self.setup[size][1]
-                mem_used = 0
-                for i in xrange(len(kernelArgs)):
-                    if isinstance(kernelArgs[i], np.ndarray):
-                        buff = cl.Buffer(
-                            self.ctx, cl.mem_flags.READ_WRITE,
-                            size=kernelArgs[i].nbytes)
-                        cl.enqueue_copy(self.queue, buff, kernelArgs[i])
-                        mem_used += kernelArgs[i].nbytes
-                        kernelArgs[i] = buff
-                if d:
-                    print "Memory used : {0:.5f} GiB ({1:.2f}%)".format(
-                        mem_used / (1024. ** 3),
-                        100. * mem_used / (self.device.global_mem_size * 1.)),
-                self.queue.finish()
-                if d:
-                    print kernelArgs[0:2],
-                evt = kernel(self.queue, *tuple(kernelArgs))
-                self.queue.finish()
-                evts = []
-                for i in xrange(self.nb_run):
-                    evt = kernel(self.queue, *tuple(kernelArgs))
-                    evts.append(evt)
-                self.queue.finish()
-                time = 0.
-                for evt in evts:
-                    time += (evt.profile.end - evt.profile.start) * 1e-9
-                self.timings[size] = time / self.nb_run
-                self.kernel_args[size] = kernelArgs[0:2]
-                if d:
-                    print self.timings[size]
-                for i in xrange(len(kernelArgs)):
-                    if isinstance(kernelArgs[i], cl.Buffer):
-                        kernelArgs[i].release()
-            else:
-                print "Incompatible sizes"
diff --git a/hysop/old/gpu.old/multi_gpu_particle_advection.py b/hysop/old/gpu.old/multi_gpu_particle_advection.py
deleted file mode 100644
index dc80317eb37fc2a386efbfbad428402b7408683f..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/multi_gpu_particle_advection.py
+++ /dev/null
@@ -1,1030 +0,0 @@
-"""
-@file multi_gpu_particle_advection.py
-
-Discrete advection representation for Multi-GPU architecture.
-"""
-from abc import ABCMeta
-from hysop.constants import np, debug, HYSOP_INTEGER, HYSOP_REAL, ORDER,\
-    hysop.core.mpi_REAL, SIZEOF_HYSOP_REAL
-from hysop.backend.device.opencl.gpu_particle_advection import GPUParticleAdvection
-from hysop.operator.discrete.discrete import get_extra_args_from_method
-from hysop.methods import TimeIntegrator, MultiScale, Remesh
-from hysop.numerics.odesolvers import RK2
-from hysop.numerics.remeshing import Linear as Linear_rmsh
-from hysop.backend.device.opencl.opencl_kernel import OpenClKernelLauncher
-from hysop.tools.profiler import FProfiler
-from hysop.backend.device.opencl import cl, CL_PROFILE
-from hysop.core.mpi import Wtime
-from hysop.tools.numpywrappers import npw
-
-
-class MultiGPUParticleAdvection(GPUParticleAdvection):
-    """
-    Particle advection operator representation on multi-GPU.
-
-    """
-    __metaclass__ = ABCMeta
-
-    @debug
-    def __init__(self, **kwds):
-        """
-        Create a Advection operator.
-        Work on a given field (scalar or vector) at a given velocity to compute
-        advected values.
-        OpenCL kernels are build once per dimension in order to handle
-        directional splitting with resolution non uniform in directions.
-
-        @param max_velocity : maximum velocity estimation for computing communications buffer sizes.
-        The estimation may be global or by components.
-        @param max_dt : maximum time step estimation.
-        @remark : Buffer widths are computed from max_velocity, max_dt and the mesh sizes:
-          - velocity buffers : |max_velocity * max_dt / v_dx| + 1
-          - scalar buffers   : |max_velocity * max_dt / s_dx| + 1 + remesh_stencil/2
-        @remark : by default, velocity data are supposed to be on the host. If not, user
-        should set the arttribute velocity_only_on_device to True.
-        """
-        # fields_topo = kwds['fields_on_grid'][0].topology
-        # direction = kwds['direction']
-        # self._cut_dir = fields_topo.cutdir
-        # self._is_cut_dir = self._cut_dir[direction]
-
-        super(MultiGPUParticleAdvection, self).__init__(**kwds)
-        max_velocity = get_extra_args_from_method(self, 'max_velocity', None)
-        max_dt       = get_extra_args_from_method(self, 'max_dt', None)
-        max_cfl      = get_extra_args_from_method(self, 'max_cfl', None)
-        
-        self._velocity_only_on_device = get_extra_args_from_method(
-            self, 'velocity_only_on_device', False)
-        
-        if self._velocity_only_on_device:
-            self._get_velocity_buffers = self._get_velocity_buffers_from_device
-        else:
-            self._get_velocity_buffers = self._get_velocity_buffers_from_host
-
-        msg = "The Multi-GPU works only with the RK2 TimeIntegrator"
-        assert self.method[TimeIntegrator] == RK2, msg
-        assert self._comm_size > 1, 'Parallel only'
-        assert self.dim == 3, 'A 2D multi-GPU version is not yet available'
-
-        msg = "Either max_dt and _max_velocity or max_cfl must be given to advection "
-        msg += "for computing communication buffer sizes."
-        assert (max_dt is not None and max_velocity is not None) or max_cfl is not None
-
-        assert self.fields_topo.cutdir[self.direction]
-        assert self.fields_topo.shape[self.direction] > 1
-
-        ## Neighbours process in the current direction
-        first_cut_dir = self.fields_topo.cutdir.tolist().index(True)
-        msh = self.fields_topo.mesh
-        v_msh = self.velocity_topo.mesh
-        # Global start index (lowest computed point, excluding ghosts)
-        self._start_index = HYSOP_INTEGER(
-            msh.start()[self.direction])
-        # Velocity local start index (lowest computed point, excluding ghosts)
-        self._v_start_index = HYSOP_INTEGER(
-            v_msh.start()[self.direction])
-        # Global end  index (highest computed point, excluding ghosts)
-        self._stop_index = HYSOP_INTEGER(
-            self._start_index + msh.resolution[self.direction] - 1
-            - 2 * self.fields_topo.ghosts()[self.direction])
-        # Velocity global end  index (highest computed point, excluding ghosts)
-        self._v_stop_index = HYSOP_INTEGER(
-            self._v_start_index + v_msh.resolution[self.direction] - 1
-            - 2 * self.velocity_topo.ghosts()[self.direction])
-        if self.fields_topo.cutdir[self.direction]:
-            self._L_rk = self.fields_topo.neighbours[
-                0, self.direction - first_cut_dir]
-            self._R_rk = self.fields_topo.neighbours[
-                1, self.direction - first_cut_dir]
-
-        # Global resolution
-        self.t_nb = \
-            msh.discretization.resolution[self.direction] - 1
-        # Velocity global resolution
-        self.v_t_nb = \
-            v_msh.discretization.resolution[self.direction] - 1
-        i_s = [0] * self.dim
-        v_i_s = [0] * self.dim
-        i_s[self.direction] = self.fields_topo.ghosts()[self.direction]
-        self._start_coord = msh.coords[self.direction][tuple(i_s)]
-        v_i_s[self.direction] = self.velocity_topo.ghosts()[self.direction]
-        self._v_start_coord = v_msh.coords[self.direction][tuple(v_i_s)]
-        i_s[self.direction] = -1 - self.fields_topo.ghosts()[self.direction]
-        self._stop_coord = msh.coords[self.direction][tuple(i_s)]
-        v_i_s[self.direction] = -1 - \
-            self.velocity_topo.ghosts()[self.direction]
-        self._v_stop_coord = v_msh.coords[self.direction][tuple(v_i_s)]
-        # mesh step
-        self._space_step = msh.space_step
-        self._v_space_step = v_msh.space_step
-
-        # Maximum cfl for velocity and scalar
-        if max_cfl is not None:
-            scale_factor = self._v_space_step[self.direction]/self._space_step[self.direction]
-            try:
-                self.max_cfl_s = int(max_cfl[self.direction] * scale_factor) + 1
-                self.max_cfl_v = int(max_cfl[self.direction]) + 1
-            except TypeError:
-                self.max_cfl_s = int(max_cfl * scale_factor) + 1
-                self.max_cfl_v = int(max_cfl) + 1
-        else:
-            try:
-                self.max_cfl_s = int(max_velocity[self.direction] * max_dt /
-                                     self._space_step[self.direction]) + 1
-                self.max_cfl_v = int(max_velocity[self.direction] * max_dt /
-                                     self._v_space_step[self.direction]) + 1
-            except TypeError:
-                self.max_cfl_s = int(max_velocity * max_dt /
-                                     self._space_step[self.direction]) + 1
-                self.max_cfl_v = int(max_velocity * max_dt /
-                                     self._v_space_step[self.direction]) + 1
-
-        # Slice
-        self._sl_dim = slice(self.dim, 2 * self.dim)
-        self._cl_work_size = 0
-
-        #Advection variables
-        self._v_buff_width = self.max_cfl_v
-        _v_r_buff = npw.zeros((self._v_buff_width,
-                               self.v_resol_dir[1],
-                               self.v_resol_dir[2]))
-        _v_l_buff = npw.zeros_like(_v_r_buff)
-        self._v_r_buff_loc = npw.zeros_like(_v_r_buff)
-        self._v_l_buff_loc = npw.zeros_like(_v_r_buff)
-        self._cl_v_r_buff = self.cl_env.global_allocation(_v_l_buff)
-        self._cl_v_l_buff = self.cl_env.global_allocation(_v_r_buff)
-        cl.enqueue_copy(self.cl_env.queue,
-                        self._cl_v_r_buff, _v_r_buff).wait()
-        cl.enqueue_copy(self.cl_env.queue,
-                        self._cl_v_l_buff, _v_l_buff).wait()
-        self._cl_work_size += 2 * _v_l_buff.nbytes
-        self._v_r_buff, evt = cl.enqueue_map_buffer(
-            self.cl_env.queue,
-            self._cl_v_r_buff,
-            offset=0,
-            shape=_v_r_buff.shape,
-            dtype=HYSOP_REAL,
-            flags=cl.map_flags.READ | cl.map_flags.WRITE,
-            is_blocking=False,
-            order=ORDER)
-        evt.wait()
-        self._v_l_buff, evt = cl.enqueue_map_buffer(
-            self.cl_env.queue,
-            self._cl_v_l_buff,
-            offset=0,
-            shape=_v_l_buff.shape,
-            dtype=HYSOP_REAL,
-            flags=cl.map_flags.READ | cl.map_flags.WRITE,
-            is_blocking=False,
-            order=ORDER)
-        evt.wait()
-        self._v_l_buff_flat = self._v_l_buff.ravel(order='F')
-        self._v_r_buff_flat = self._v_r_buff.ravel(order='F')
-        self._v_l_buff_loc_flat = self._v_l_buff_loc.ravel(order='F')
-        self._v_r_buff_loc_flat = self._v_r_buff_loc.ravel(order='F')
-
-        self._v_buff_size = self._v_buff_width * \
-            self.v_resol_dir[1] * self.v_resol_dir[2]
-        self._v_pitches_host = (int(_v_l_buff[:, 0, 0].nbytes),
-                                int(_v_l_buff[:, :, 0].nbytes))
-        self._v_buffer_region = (
-            int(self._v_buff_width * SIZEOF_HYSOP_REAL),
-            int(self.v_resol_dir[1]),
-            int(self.v_resol_dir[2]))
-        self._v_block_size = 1024 * 1024  # 1MByte
-        while self._v_l_buff.nbytes % self._v_block_size != 0:
-            self._v_block_size /= 2
-        w = "WARNING: block size for pipelined GPU-to-GPU transfer is small, "
-        if self._v_block_size < 256 * 1024:
-            self._v_block_size = self._v_l_buff.nbytes / 4
-            print w + "use blocks of {0} MB (4 blocks velocity)".format(
-                self._v_block_size / (1024. * 1024.))
-        self._v_n_blocks = self._v_l_buff.nbytes / self._v_block_size
-        self._v_elem_block = np.prod(self._v_l_buff.shape) / self._v_n_blocks
-        self._l_recv_v = [None, ] * self._v_n_blocks
-        self._r_recv_v = [None, ] * self._v_n_blocks
-        self._send_to_l_v = [None, ] * self._v_n_blocks
-        self._send_to_r_v = [None, ] * self._v_n_blocks
-        self._evt_l_v = [None, ] * self._v_n_blocks
-        self._evt_r_v = [None, ] * self._v_n_blocks
-        self._v_block_slice = [None, ] * self._v_n_blocks
-        for b in xrange(self._v_n_blocks):
-            self._v_block_slice[b] = slice(
-                b * self._v_elem_block, (b + 1) * self._v_elem_block)
-
-        ## Python remeshing formula for the multiscale interpolation
-        self._py_ms_formula = self.method[MultiScale]
-        if self._isMultiScale:
-            if self._py_ms_formula is not Linear_rmsh:
-                raise ValueError('Not yet implemented' +
-                                 str(self.method[MultiScale]))
-        ## Python remeshing formula
-        self._py_remesh = self.method[Remesh]()
-        ## Number of weights
-        self._nb_w = len(self._py_remesh.weights)
-        self._s_buff_width = self.max_cfl_s + self._nb_w / 2
-        _s_l_buff = npw.zeros(
-            (self._s_buff_width * self.resol_dir[1] * self.resol_dir[2], ))
-        _s_r_buff = npw.zeros(
-            (self._s_buff_width * self.resol_dir[1] * self.resol_dir[2], ))
-        self._s_froml_buff_max = npw.zeros((self._s_buff_width,
-                                            self.resol_dir[1],
-                                            self.resol_dir[2]))
-        self._s_fromr_buff_max = npw.zeros_like(self._s_froml_buff_max)
-        self._cl_s_r_buff = self.cl_env.global_allocation(_s_l_buff)
-        self._cl_s_l_buff = self.cl_env.global_allocation(_s_r_buff)
-        cl.enqueue_copy(self.cl_env.queue,
-                        self._cl_s_r_buff, _s_r_buff).wait()
-        cl.enqueue_copy(self.cl_env.queue,
-                        self._cl_s_l_buff, _s_l_buff).wait()
-        self._cl_work_size += 2 * self._s_froml_buff_max.nbytes
-        self._s_l_buff, evt = cl.enqueue_map_buffer(
-            self.cl_env.queue,
-            self._cl_s_l_buff,
-            offset=0,
-            shape=_s_l_buff.shape,
-            dtype=HYSOP_REAL,
-            flags=cl.map_flags.READ | cl.map_flags.WRITE,
-            is_blocking=False,
-            order=ORDER)
-        evt.wait()
-        self._s_r_buff, evt = cl.enqueue_map_buffer(
-            self.cl_env.queue,
-            self._cl_s_r_buff,
-            offset=0,
-            shape=_s_r_buff.shape,
-            dtype=HYSOP_REAL,
-            flags=cl.map_flags.READ | cl.map_flags.WRITE,
-            is_blocking=False,
-            order=ORDER)
-        evt.wait()
-        self._s_froml_buff_flat = self._s_froml_buff_max.ravel(order='F')
-        self._s_fromr_buff_flat = self._s_fromr_buff_max.ravel(order='F')
-        # attributes declarations, values are recomputed at each time
-        self._s_buff_width_loc_p, self._s_buff_width_loc_m = 0, 0
-        self._s_buff_width_from_l, self._s_buff_width_from_r = 0, 0
-        self._s_froml_buff, self._s_locl_buff = None, None
-        self._s_fromr_buff, self._s_locr_buff = None, None
-        self._s_buffer_region_on_l, self._s_buffer_region_on_r = None, None
-        self._origin_locl, self._origin_locr = None, None
-        self._s_block_size_to_r, self._s_block_size_to_l = None, None
-        self._s_block_size_from_r, self._s_block_size_from_l = None, None
-        self._s_n_blocks_to_r, self._s_n_blocks_to_l = None, None
-        self._s_n_blocks_from_r, self._s_n_blocks_from_l = None, None
-        self._s_elem_block_to_r, self._s_elem_block_to_l = None, None
-        self._s_elem_block_from_r, self._s_elem_block_from_l = None, None
-        self._s_block_slice_to_r, self._s_block_slice_to_l = None, None
-        self._s_block_slice_from_r, self._s_block_slice_from_l = None, None
-        self._r_recv, self._l_recv = None, None
-        self._evt_get_l, self._evt_get_r = None, None
-        self._l_send, self._r_send = None, None
-
-        self._queue_comm_m = self.cl_env.create_other_queue()
-        self._queue_comm_p = self.cl_env.create_other_queue()
-
-        self.profiler += FProfiler('comm_gpu_advec_set')
-        self.profiler += FProfiler('comm_cpu_advec_get')
-        self.profiler += FProfiler('comm_cpu_advec')
-        self.profiler += FProfiler('comm_gpu_remesh_get')
-        self.profiler += FProfiler('comm_gpu_remesh_get_loc')
-        self.profiler += FProfiler('comm_gpu_remesh_set_loc')
-        self.profiler += FProfiler('comm_cpu_remesh')
-        self.profiler += FProfiler('comm_calc_remesh')
-
-        # Collect sources for communication
-        self._compute = self._compute_1c_comm
-        if self._is2kernel:
-            self._collect_kernels_cl_src_2k_comm()
-            self._num_comm_l = self._num_2k_comm_l
-            self._num_comm_r = self._num_2k_comm_r
-            self._num_comm = self._num_2k_comm
-        else:
-            self._collect_kernels_cl_src_1k_comm()
-            if self._isMultiScale:
-                self._num_comm_l = self._num_1k_ms_comm_l
-                self._num_comm_r = self._num_1k_ms_comm_r
-                self._num_comm = self._num_1k_ms_comm
-            else:
-                self._num_comm_l = self._num_1k_comm_l
-                self._num_comm_r = self._num_1k_comm_r
-                self._num_comm = self._num_1k_comm
-
-        if self.direction == 2:
-            # Device is in ZXY layout
-            self._pitches_dev = (
-                int(self.fields_on_grid[0].data[0][0, 0, :].nbytes),
-                int(self.fields_on_grid[0].data[0][:, 0, :].nbytes))
-            self._v_pitches_dev = (
-                int(self.velocity.data[0][0, 0, :].nbytes),
-                int(self.velocity.data[0][:, 0, :].nbytes))
-        elif self.direction == 1:
-            # Device is in YXZ layout
-            self._pitches_dev = (
-                int(self.fields_on_grid[0].data[0][0, :, 0].nbytes),
-                int(self.fields_on_grid[0].data[0][:, :, 0].nbytes))
-            self._v_pitches_dev = (
-                int(self.velocity.data[0][0, :, 0].nbytes),
-                int(self.velocity.data[0][:, :, 0].nbytes))
-        elif self.direction == 0:
-            # Device is in XYZ layout
-            self._pitches_dev = (
-                int(self.fields_on_grid[0].data[0][:, 0, 0].nbytes),
-                int(self.fields_on_grid[0].data[0][:, :, 0].nbytes))
-            self._v_pitches_dev = (
-                int(self.velocity.data[0][:, 0, 0].nbytes),
-                int(self.velocity.data[0][:, :, 0].nbytes))
-        # Beanching the proper _compute function
-        if self.fields_on_grid[0].nb_components > 1:
-            raise ValueError("Not yet implemented")
-
-        self._build_exec_list()
-
-    def _collect_kernels_cl_src_2k(self):
-        pass
-
-    def _collect_kernels_cl_src_1k(self):
-        pass
-
-    def _collect_kernels_cl_src_1k_comm(self):
-        """
-        Compile OpenCL sources for advection and remeshing kernel when
-        communications needed.
-        """
-        build_options = self.build_options + self._size_constants
-        if self._isMultiScale:
-            src, is_noBC, vec, f_space = \
-                self._kernel_cfg['advec_MS_and_remesh_comm']
-        else:
-            src, is_noBC, vec, f_space = \
-                self._kernel_cfg['advec_and_remesh_comm']
-        gwi, lwi = f_space(self.resol_dir, vec)
-        WINb = lwi[0]
-
-        build_options += " -D WI_NB=" + str(WINb)
-        if self._isMultiScale:
-            build_options += " -D MS_FORMULA="
-            build_options += self.method[MultiScale].__name__.upper()
-        build_options += " -D V_START_INDEX=" + str(self._v_start_index)
-        build_options += " -D V_STOP_INDEX=" + str(self._v_stop_index)
-        build_options += " -D START_INDEX=" + str(self._start_index)
-        build_options += " -D STOP_INDEX=" + str(self._stop_index)
-        build_options += " -D V_BUFF_WIDTH=" + str(self._v_buff_width)
-        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
-        build_options += " -D PART_NB_PER_WI="
-        build_options += str(self.resol_dir[0] / WINb)
-        build_options += " -D BUFF_WIDTH=" + str(self._s_buff_width)
-        prg = self.cl_env.build_src(
-            src, build_options, 1)
-        self.num_advec_and_remesh_comm_l = OpenClKernelLauncher(
-            prg.buff_advec_and_remesh_l, self.cl_env.queue,
-            (gwi[1], gwi[2]), (32, 1))
-        self.num_advec_and_remesh_comm_r = OpenClKernelLauncher(
-            prg.buff_advec_and_remesh_r, self.cl_env.queue,
-            (gwi[1], gwi[2]), (32, 1))
-        self.num_advec_and_remesh = OpenClKernelLauncher(
-            prg.buff_advec_and_remesh, self.cl_env.queue,
-            gwi, lwi)
-
-    def _collect_kernels_cl_src_2k_comm(self):
-        """
-        Compile OpenCL sources for advection and remeshing kernel when
-        communications needed.
-        """
-        build_options = self.build_options + self._size_constants
-        if self._isMultiScale:
-            src, is_noBC, vec, f_space = self._kernel_cfg['advec_MS_comm']
-        else:
-            src, is_noBC, vec, f_space = self._kernel_cfg['advec_comm']
-        gwi, lwi = f_space(self.resol_dir, vec)
-        WINb = lwi[0]
-
-        build_options += " -D WI_NB=" + str(WINb)
-        if self._isMultiScale:
-            build_options += " -D MS_FORMULA="
-            build_options += self.method[MultiScale].__name__.upper()
-        build_options += " -D V_START_INDEX=" + str(self._v_start_index)
-        build_options += " -D V_STOP_INDEX=" + str(self._v_stop_index)
-        build_options += " -D START_INDEX=" + str(self._start_index)
-        build_options += " -D STOP_INDEX=" + str(self._stop_index)
-        build_options += " -D V_BUFF_WIDTH=" + str(self._v_buff_width)
-        prg = self.cl_env.build_src(
-            src, build_options, 1)
-        self.num_advec = OpenClKernelLauncher(
-            prg.buff_advec, self.cl_env.queue,
-            gwi, lwi)
-
-        ## remeshing
-        build_options = self.build_options + self._size_constants
-        src, is_noBC, vec, f_space = self._kernel_cfg['remesh_comm']
-        gwi, lwi = f_space(self.resol_dir, vec)
-        WINb = lwi[0]
-
-        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
-        build_options += " -D WI_NB=" + str(WINb)
-        build_options += " -D PART_NB_PER_WI="
-        build_options += str(self.resol_dir[0] / WINb)
-        #Build code
-        build_options += " -D START_INDEX=" + str(self._start_index)
-        build_options += " -D STOP_INDEX=" + str(self._stop_index)
-        build_options += " -D BUFF_WIDTH=" + str(self._s_buff_width)
-        prg = self.cl_env.build_src(
-            src, build_options, 1)
-        self.num_remesh_comm_l = OpenClKernelLauncher(
-            prg.buff_remesh_l, self._queue_comm_m,
-            (gwi[1], gwi[2]), (32, 1))
-        self.num_remesh_comm_r = OpenClKernelLauncher(
-            prg.buff_remesh_r, self._queue_comm_p,
-            (gwi[1], gwi[2]), (32, 1))
-        self.num_remesh = OpenClKernelLauncher(
-            prg.remesh, self.cl_env.queue,
-            gwi, lwi)
-
-    def _recompute_scal_buffers(self, max_velo_m, max_velo_p, dt):
-        dx = self._space_step[self.direction]
-        max_cfl_p_s = int(max_velo_p * dt / dx) + 1
-        max_cfl_m_s = int(max_velo_m * dt / dx) + 1
-        self._s_buff_width_loc_p = max_cfl_p_s + self._nb_w / 2
-        self._s_buff_width_loc_m = max_cfl_m_s + self._nb_w / 2
-        assert self._s_froml_buff_max.shape[0] >= self._s_buff_width_loc_p, \
-            "Multi-GPU Comm R-Buffer too small: {0} >= {1}".format(
-                self._s_froml_buff_max.shape[0] >= self._s_buff_width_loc_p)
-        assert self._s_froml_buff_max.shape[0] >= self._s_buff_width_loc_m, \
-            "Multi-GPU Comm L-Buffer too small: {0} >= {1}".format(
-                self._s_froml_buff_max.shape[0] >= self._s_buff_width_loc_m)
-        self._s_buff_width_from_l = self._comm.sendrecv(
-            sendobj=self._s_buff_width_loc_p, dest=self._R_rk,
-            sendtag=1 + 7 * self._R_rk,
-            source=self._L_rk,
-            recvtag=1 + 7 * self._comm_rank)
-        self._s_buff_width_from_r = self._comm.sendrecv(
-            sendobj=self._s_buff_width_loc_m, dest=self._L_rk,
-            sendtag=10000 + 9 * self._L_rk,
-            source=self._R_rk,
-            recvtag=10000 + 9 * self._comm_rank)
-
-        s = self._s_buff_width_from_l * \
-            self.resol_dir[1] * self.resol_dir[2]
-        self._s_froml_buff = self._s_froml_buff_flat[:s].reshape(
-            (self._s_buff_width_from_l,
-             self.resol_dir[1],
-             self.resol_dir[2]), order=ORDER)
-        self._s_locl_buff = \
-            self.fields_on_grid[0].host_data_pinned[0].reshape(
-                self.resol_dir, order=ORDER)[:self._s_buff_width_from_l, :, :]
-        s = self._s_buff_width_from_r * \
-            self.resol_dir[1] * self.resol_dir[2]
-        self._s_fromr_buff = self._s_fromr_buff_flat[:s].reshape(
-            (self._s_buff_width_from_r,
-             self.resol_dir[1],
-             self.resol_dir[2]), order=ORDER)
-        self._s_locr_buff = \
-            self.fields_on_grid[0].host_data_pinned[0].reshape(
-                self.resol_dir, order=ORDER)[-self._s_buff_width_from_r:, :, :]
-
-        self._s_buffer_region_on_l = (
-            int(SIZEOF_HYSOP_REAL * self._s_buff_width_from_l),
-            int(self.resol_dir[1]),
-            int(self.resol_dir[2]))
-        self._origin_locl = (0, 0, 0)
-        self._s_buffer_region_on_r = (
-            int(SIZEOF_HYSOP_REAL * self._s_buff_width_from_r),
-            int(self.resol_dir[1]),
-            int(self.resol_dir[2]))
-        self._origin_locr = (
-            int((self.resol_dir[0] - self._s_buff_width_from_r)
-                * SIZEOF_HYSOP_REAL), 0, 0)
-
-        # Recompute blocks number and block size
-        self._s_block_size_to_r, self._s_n_blocks_to_r, \
-            self._s_elem_block_to_r, self._s_block_slice_to_r = \
-            self._compute_block_number_and_size(
-                SIZEOF_HYSOP_REAL * self._s_buff_width_loc_p *
-                self.resol_dir[1] * self.resol_dir[2])
-        self._s_block_size_to_l, self._s_n_blocks_to_l, \
-            self._s_elem_block_to_l, self._s_block_slice_to_l = \
-            self._compute_block_number_and_size(
-                SIZEOF_HYSOP_REAL * self._s_buff_width_loc_m *
-                self.resol_dir[1] * self.resol_dir[2])
-        self._s_block_size_from_r, self._s_n_blocks_from_r, \
-            self._s_elem_block_from_r, self._s_block_slice_from_r = \
-            self._compute_block_number_and_size(self._s_fromr_buff.nbytes)
-        self._s_block_size_from_l, self._s_n_blocks_from_l, \
-            self._s_elem_block_from_l, self._s_block_slice_from_l = \
-            _block_number_and_size(self._s_froml_buff.nbytes)
-        # print "[" + str(self._comm_rank) + \
-        #     "] Multi-GPU comm: send to L=({0} MB, {1} bloc, {2}, {3}),".format(
-        #         self._s_block_size_to_l * self._s_n_blocks_to_l /
-        #         (1024. * 1024),
-        #         self._s_n_blocks_to_l,
-        #         self._s_buff_width_loc_m, self._s_froml_buff_max.shape[0]) + \
-        #     "  R=({0} MB, {1} bloc, {2}, {3})".format(
-        #         self._s_block_size_to_r * self._s_n_blocks_to_r /
-        #         (1024. * 1024),
-        #         self._s_n_blocks_to_r,
-        #         self._s_buff_width_loc_p, self._s_froml_buff_max.shape[0]) + \
-        #     "; recv from L=({0} MB, {1} bloc),".format(
-        #         self._s_block_size_from_l * self._s_n_blocks_from_l /
-        #         (1024. * 1024),
-        #        self._s_n_blocks_from_l) + \
-        #     "  R=({0} MB, {1} bloc)".format(
-        #         self._s_block_size_from_r * self._s_n_blocks_from_r /
-        #         (1024. * 1024),
-        #         self._s_n_blocks_from_r)
-
-        # Events lists
-        self._r_recv = [None, ] * self._s_n_blocks_from_r
-        self._l_recv = [None, ] * self._s_n_blocks_from_l
-        self._evt_get_l = [None, ] * self._s_n_blocks_to_l
-        self._evt_get_r = [None, ] * self._s_n_blocks_to_r
-        self._l_send = [None, ] * self._s_n_blocks_to_l
-        self._r_send = [None, ] * self._s_n_blocks_to_r
-
-    def _compute_block_number_and_size(self, buff_size):
-        block = 1024 * 1024  # 1MByte
-        while buff_size % block != 0:
-            block /= 2
-        if block < 256 * 1024:
-            block = buff_size / 4
-        n_b = buff_size / block
-        n_elem = block / SIZEOF_HYSOP_REAL
-        slices = [None, ] * n_b
-        for b in xrange(n_b):
-            slices[b] = slice(b * n_elem, (b + 1) * n_elem)
-        return int(block), int(n_b), n_elem, slices
-
-    def _get_velocity_buffers_from_host(self, ghosts):
-        if self.direction == 0:
-            velo_sl = (slice(self.v_resol_dir[0] - self._v_buff_width - ghosts,
-                             self.v_resol_dir[0] - ghosts),
-                       slice(None), slice(None),)
-            self._v_r_buff_loc[...] = self.velocity.data[0][velo_sl]
-            velo_sl = (slice(0 + ghosts, self._v_buff_width + ghosts),
-                       slice(None), slice(None))
-            self._v_l_buff_loc[...] = self.velocity.data[0][velo_sl]
-        if self.direction == 1:
-            velo_sl = (slice(None),
-                       slice(self.v_resol_dir[0] - self._v_buff_width - ghosts,
-                             self.v_resol_dir[0] - ghosts),
-                       slice(None))
-            self._v_r_buff_loc[...] = \
-                self.velocity.data[1][velo_sl].swapaxes(0, 1)
-            velo_sl = (slice(None),
-                       slice(0 + ghosts, self._v_buff_width + ghosts),
-                       slice(None))
-            self._v_l_buff_loc[...] = \
-                self.velocity.data[1][velo_sl].swapaxes(0, 1)
-        if self.direction == 2:
-            velo_sl = (slice(None), slice(None),
-                       slice(self.v_resol_dir[0] - self._v_buff_width - ghosts,
-                             self.v_resol_dir[0] - ghosts))
-            self._v_r_buff_loc[...] = \
-                self.velocity.data[2][velo_sl].swapaxes(0, 1).swapaxes(0, 2)
-            velo_sl = (slice(None), slice(None),
-                       slice(0 + ghosts, self._v_buff_width + ghosts))
-            self._v_l_buff_loc[...] = \
-                self.velocity.data[2][velo_sl].swapaxes(0, 1).swapaxes(0, 2)
-
-    def _get_velocity_buffers_from_device(self, ghosts):
-        _evt_l_v = cl.enqueue_copy(
-            self._queue_comm_m,
-            self._v_l_buff_loc,
-            self.velocity.gpu_data[self.direction],
-            host_origin=(0, 0, 0),
-            host_pitches=self._v_pitches_host,
-            buffer_origin=(int(SIZEOF_HYSOP_REAL * ghosts), 0, 0),
-            buffer_pitches=self._v_pitches_dev,
-            region=self._v_buffer_region,
-            is_blocking=False)
-        _evt_r_v = cl.enqueue_copy(
-            self._queue_comm_m,
-            self._v_r_buff_loc,
-            self.velocity.gpu_data[self.direction],
-            host_origin=(0, 0, 0),
-            host_pitches=self._v_pitches_host,
-            buffer_origin=(int(SIZEOF_HYSOP_REAL * (
-                self.v_resol_dir[0] - self._v_buff_width - ghosts)), 0, 0),
-            buffer_pitches=self._v_pitches_dev,
-            region=self._v_buffer_region,
-            is_blocking=False)
-        _evt_l_v.wait()
-        _evt_r_v.wait()
-
-    def _exchange_velocity_buffers(self, dt):
-        ctime = Wtime()
-        ghosts = self.velocity_topo.ghosts()[self.direction]
-        if self.direction == 0:
-            max_velo_p = np.max(self.velocity.data[0][-ghosts - 1:, :, :])
-            max_velo_m = np.max(self.velocity.data[0][:ghosts + 1, :, :])
-        if self.direction == 1:
-            max_velo_p = np.max(self.velocity.data[1][:, -ghosts - 1:, :])
-            max_velo_m = np.max(self.velocity.data[1][:, :ghosts + 1, :])
-        if self.direction == 2:
-            max_velo_p = np.max(self.velocity.data[2][:, :, -ghosts - 1:])
-            max_velo_m = np.max(self.velocity.data[2][:, :, :ghosts + 1])
-        self._recompute_scal_buffers(max_velo_m, max_velo_p, dt)
-        self._get_velocity_buffers(ghosts)
-        self.profiler['comm_cpu_advec_get'] += Wtime() - ctime
-
-        ctime = Wtime()
-        for b in xrange(self._v_n_blocks):
-            self._l_recv_v[b] = self._comm.Irecv(
-                [self._v_l_buff_flat[self._v_block_slice[b]],
-                 self._v_elem_block, hysop.core.mpi_REAL],
-                source=self._L_rk, tag=17 + 19 * self._L_rk + 59 * b)
-            self._r_recv_v[b] = self._comm.Irecv(
-                [self._v_r_buff_flat[self._v_block_slice[b]],
-                 self._v_elem_block, hysop.core.mpi_REAL],
-                source=self._R_rk, tag=29 + 23 * self._R_rk + 57 * b)
-        for b in xrange(self._v_n_blocks):
-            self._send_to_r_v[b] = self._comm.Issend(
-                [self._v_r_buff_loc_flat[self._v_block_slice[b]],
-                 self._v_elem_block, hysop.core.mpi_REAL],
-                dest=self._R_rk, tag=17 + 19 * self._comm_rank + 59 * b)
-            self._send_to_l_v[b] = self._comm.Issend(
-                [self._v_l_buff_loc_flat[self._v_block_slice[b]],
-                 self._v_elem_block, hysop.core.mpi_REAL],
-                dest=self._L_rk, tag=29 + 23 * self._comm_rank + 57 * b)
-        if CL_PROFILE:
-            for b in xrange(self._v_n_blocks):
-                self._l_recv_v[b].Wait()
-                self._r_recv_v[b].Wait()
-        self.profiler['comm_cpu_advec'] += Wtime() - ctime
-
-    def _todevice_velocity_buffers(self):
-        for b in xrange(self._v_n_blocks):
-            self._l_recv_v[b].Wait()
-            self._evt_l_v[b] = cl.enqueue_copy(
-                self._queue_comm_m,
-                self._cl_v_l_buff, self._v_l_buff,
-                host_origin=(b * self._v_block_size, 0, 0),
-                host_pitches=(self._v_l_buff.nbytes, 0),
-                buffer_origin=(b * self._v_block_size, 0, 0),
-                buffer_pitches=(self._v_l_buff.nbytes, 0),
-                region=(self._v_block_size, 1, 1),
-                is_blocking=False)
-        for b in xrange(self._v_n_blocks):
-            self._r_recv_v[b].Wait()
-            self._evt_r_v[b] = cl.enqueue_copy(
-                self._queue_comm_p,
-                self._cl_v_r_buff, self._v_r_buff,
-                host_origin=(b * self._v_block_size, 0, 0),
-                host_pitches=(self._v_r_buff.nbytes, 0),
-                buffer_origin=(b * self._v_block_size, 0, 0),
-                buffer_pitches=(self._v_r_buff.nbytes, 0),
-                region=(self._v_block_size, 1, 1),
-                is_blocking=False)
-
-        if CL_PROFILE:
-            advec_gpu_time = 0.
-            for evt in self._evt_l_v + self._evt_r_v:
-                evt.wait()
-                advec_gpu_time += (evt.profile.end - evt.profile.start) * 1e-9
-            self.profiler['comm_gpu_advec_set'] += advec_gpu_time
-
-    def _init_copy(self, simulation, dt_coeff, split_id, old_dir):
-        self._exchange_velocity_buffers(simulation.time_step * dt_coeff)
-        wait_evt = self.fields_on_grid[0].events
-        for g, p in zip(self.fields_on_grid[0].gpu_data,
-                        self.fields_on_part[self.fields_on_grid[0]]):
-            evt = self.copy.launch_sizes_in_args(p, g, wait_for=wait_evt)
-            #evt = self.copy(g, p, wait_for=wait_evt)
-            self._init_events[self.fields_on_grid[0]].append(evt)
-
-    def _init_transpose_xy(self, simulation, dt_coeff, split_id, old_dir):
-        self._exchange_velocity_buffers(simulation.time_step * dt_coeff)
-        wait_evt = self.fields_on_grid[0].events
-        for g, p in zip(self.fields_on_grid[0].gpu_data,
-                        self.fields_on_part[self.fields_on_grid[0]]):
-            evt = self.transpose_xy(g, p, wait_for=wait_evt)
-            self._init_events[self.fields_on_grid[0]].append(evt)
-
-    def _init_transpose_xz(self, simulation, dt_coeff, split_id, old_dir):
-        self._exchange_velocity_buffers(simulation.time_step * dt_coeff)
-        wait_evt = self.fields_on_grid[0].events
-        for g, p in zip(self.fields_on_grid[0].gpu_data,
-                        self.fields_on_part[self.fields_on_grid[0]]):
-            evt = self.transpose_xz(g, p, wait_for=wait_evt)
-            self._init_events[self.fields_on_grid[0]].append(evt)
-
-    def _compute_advec_comm(self, simulation, dt_coeff, split_id, old_dir):
-        dt = simulation.time_step * dt_coeff
-
-        self._todevice_velocity_buffers()
-        wait_evts = self.velocity.events + self._evt_l_v + self._evt_r_v + \
-            self._init_events[self.fields_on_grid[0]]
-        if self._isMultiScale:
-            evt = self.num_advec(
-                self.velocity.gpu_data[self.direction],
-                self.part_position[0],
-                self._cl_v_l_buff,
-                self._cl_v_r_buff,
-                self.gpu_precision(dt),
-                self.gpu_precision(1. / self._v_mesh_size[1]),
-                self.gpu_precision(1. / self._v_mesh_size[2]),
-                self._cl_mesh_info,
-                wait_for=wait_evts)
-        else:
-            evt = self.num_advec(
-                self.velocity.gpu_data[self.direction],
-                self.part_position[0],
-                self._cl_v_l_buff,
-                self._cl_v_r_buff,
-                self.gpu_precision(dt),
-                self._cl_mesh_info,
-                wait_for=wait_evts)
-        self._init_events[self.fields_on_grid[0]].append(evt)
-
-    def _num_2k_comm_l(self, wait_list, dt):
-        return self.num_remesh_comm_l(
-            self.part_position[0],
-            self.fields_on_part[self.fields_on_grid[0]][0],
-            self._cl_s_l_buff,
-            HYSOP_INTEGER(self._s_buff_width_loc_m),
-            self._cl_mesh_info,
-            wait_for=wait_list)
-
-    def _num_2k_comm_r(self, wait_list, dt):
-        return self.num_remesh_comm_r(
-            self.part_position[0],
-            self.fields_on_part[self.fields_on_grid[0]][0],
-            self._cl_s_r_buff,
-            HYSOP_INTEGER(self._s_buff_width_loc_p),
-            self._cl_mesh_info,
-            wait_for=wait_list)
-
-    def _num_2k_comm(self, wait_list, dt):
-        return self.num_remesh(
-            self.part_position[0],
-            self.fields_on_part[self.fields_on_grid[0]][0],
-            self.fields_on_grid[0].gpu_data[0],
-            self._cl_mesh_info,
-            wait_for=wait_list)
-
-    def _num_1k_ms_comm_l(self, wait_list, dt):
-        return self.num_advec_and_remesh_comm_l(
-            self.velocity.gpu_data[self.direction],
-            self._cl_v_l_buff,
-            self.fields_on_part[self.fields_on_grid[0]][0],
-            self._cl_s_l_buff,
-            HYSOP_INTEGER(self._s_buff_width_loc_m),
-            self.gpu_precision(dt),
-            self.gpu_precision(1. / self._v_mesh_size[1]),
-            self.gpu_precision(1. / self._v_mesh_size[2]),
-            self._cl_mesh_info,
-            wait_for=wait_list)
-
-    def _num_1k_ms_comm_r(self, wait_list, dt):
-        return self.num_advec_and_remesh_comm_r(
-            self.velocity.gpu_data[self.direction],
-            self._cl_v_r_buff,
-            self.fields_on_part[self.fields_on_grid[0]][0],
-            self._cl_s_r_buff,
-            HYSOP_INTEGER(self._s_buff_width_loc_p),
-            self.gpu_precision(dt),
-            self.gpu_precision(1. / self._v_mesh_size[1]),
-            self.gpu_precision(1. / self._v_mesh_size[2]),
-            self._cl_mesh_info,
-            wait_for=wait_list)
-
-    def _num_1k_ms_comm(self, wait_list, dt):
-        return self.num_advec_and_remesh(
-            self.velocity.gpu_data[self.direction],
-            self._cl_v_l_buff,
-            self._cl_v_r_buff,
-            self.fields_on_part[self.fields_on_grid[0]][0],
-            self.fields_on_grid[0].gpu_data[0],
-            self.gpu_precision(dt),
-            self.gpu_precision(1. / self._v_mesh_size[1]),
-            self.gpu_precision(1. / self._v_mesh_size[2]),
-            self._cl_mesh_info,
-            wait_for=wait_list)
-
-    def _num_1k_comm_l(self, wait_list, dt):
-        return self.num_advec_and_remesh_comm_l(
-            self.velocity.gpu_data[self.direction],
-            self._cl_v_l_buff,
-            self.fields_on_part[self.fields_on_grid[0]][0],
-            self._cl_s_l_buff,
-            HYSOP_INTEGER(self._s_buff_width_loc_m),
-            self.gpu_precision(dt),
-            self._cl_mesh_info,
-            wait_for=wait_list)
-
-    def _num_1k_comm_r(self, wait_list, dt):
-        return self.num_advec_and_remesh_comm_r(
-            self.velocity.gpu_data[self.direction],
-            self._cl_v_r_buff,
-            self.fields_on_part[self.fields_on_grid[0]][0],
-            self._cl_s_r_buff,
-            HYSOP_INTEGER(self._s_buff_width_loc_p),
-            self.gpu_precision(dt),
-            self._cl_mesh_info,
-            wait_for=wait_list)
-
-    def _num_1k_comm(self, wait_list, dt):
-        return self.num_advec_and_remesh(
-            self.velocity.gpu_data[self.direction],
-            self._cl_v_l_buff,
-            self._cl_v_r_buff,
-            self.fields_on_part[self.fields_on_grid[0]][0],
-            self.fields_on_grid[0].gpu_data[0],
-            self.gpu_precision(dt),
-            self._cl_mesh_info,
-            wait_for=wait_list)
-
-    def _compute_1c_comm(self, simulation, dt_coeff, split_id, old_dir):
-        dt = simulation.time_step * dt_coeff
-        if self._is2kernel:
-            self._compute_advec_comm(simulation, dt_coeff, split_id, old_dir)
-        else:
-            self._todevice_velocity_buffers()
-        wait_evts = self.velocity.events + \
-            self._init_events[self.fields_on_grid[0]] + \
-            self.fields_on_grid[0].events
-        if not self._is2kernel:
-            wait_evts += self._evt_l_v + self._evt_r_v
-
-        # Prepare the MPI receptions
-        for b in xrange(self._s_n_blocks_from_l):
-            self._l_recv[b] = self._comm.Irecv(
-                [self._s_froml_buff_flat[self._s_block_slice_from_l[b]],
-                 self._s_elem_block_from_l, hysop.core.mpi_REAL],
-                source=self._L_rk, tag=888 + self._L_rk + 19 * b)
-        for b in xrange(self._s_n_blocks_from_r):
-            self._r_recv[b] = self._comm.Irecv(
-                [self._s_fromr_buff_flat[self._s_block_slice_from_r[b]],
-                 self._s_elem_block_from_r, hysop.core.mpi_REAL],
-                source=self._R_rk, tag=333 + self._R_rk + 17 * b)
-
-        # Fill and get the left buffer
-        evt_comm_l = self._num_comm_l(wait_evts, dt)
-        s = int(self._s_buff_width_loc_m *
-                self.resol_dir[1] * self.resol_dir[2])
-        for b in xrange(self._s_n_blocks_to_l):
-            self._evt_get_l[b] = cl.enqueue_copy(
-                self._queue_comm_m,
-                self._s_l_buff, self._cl_s_l_buff,
-                host_origin=(b * self._s_block_size_to_l, 0, 0),
-                host_pitches=(s * SIZEOF_HYSOP_REAL, 0),
-                buffer_origin=(b * self._s_block_size_to_l, 0, 0),
-                buffer_pitches=(s * SIZEOF_HYSOP_REAL, 0),
-                region=(self._s_block_size_to_l, 1, 1),
-                is_blocking=False,
-                wait_for=[evt_comm_l])
-
-        # Send the left buffer
-        ctime = Wtime()
-        for b in xrange(self._s_n_blocks_to_l):
-            self._evt_get_l[b].wait()
-            self._l_send[b] = self._comm.Issend(
-                [self._s_l_buff[self._s_block_slice_to_l[b]],
-                 self._s_elem_block_to_l, hysop.core.mpi_REAL],
-                dest=self._L_rk, tag=333 + self._comm_rank + 17 * b)
-        ctime_send_l = Wtime() - ctime
-
-        # Fill and get the right buffer
-        evt_comm_r = self._num_comm_r(wait_evts, dt)
-        s = int(self._s_buff_width_loc_p *
-                self.resol_dir[1] * self.resol_dir[2])
-        for b in xrange(self._s_n_blocks_to_r):
-            self._evt_get_r[b] = cl.enqueue_copy(
-                self._queue_comm_p,
-                self._s_r_buff, self._cl_s_r_buff,
-                host_origin=(b * self._s_block_size_to_r, 0, 0),
-                host_pitches=(s * SIZEOF_HYSOP_REAL, 0),
-                buffer_origin=(b * self._s_block_size_to_r, 0, 0),
-                buffer_pitches=(s * SIZEOF_HYSOP_REAL, 0),
-                region=(self._s_block_size_to_r, 1, 1),
-                is_blocking=False,
-                wait_for=[evt_comm_r])
-        # Send the right buffer
-        ctime = Wtime()
-        for b in xrange(self._s_n_blocks_to_r):
-            self._evt_get_r[b].wait()
-            self._r_send[b] = self._comm.Issend(
-                [self._s_r_buff[self._s_block_slice_to_r[b]],
-                 self._s_elem_block_to_r, hysop.core.mpi_REAL],
-                dest=self._R_rk, tag=888 + self._comm_rank + 19 * b)
-        ctime_send_r = Wtime() - ctime
-
-        # remesh in-domain particles and get left-right layer
-        evt = self._num_comm(wait_evts, dt)
-        evt_get_locl = cl.enqueue_copy(
-            self.cl_env.queue,
-            self.fields_on_grid[0].host_data_pinned[0],
-            self.fields_on_grid[0].gpu_data[0],
-            host_origin=self._origin_locl,
-            buffer_origin=self._origin_locl,
-            buffer_pitches=self._pitches_dev,
-            host_pitches=self._pitches_dev,
-            region=self._s_buffer_region_on_l,
-            is_blocking=False,
-            wait_for=[evt])
-        evt_get_locr = cl.enqueue_copy(
-            self.cl_env.queue,
-            self.fields_on_grid[0].host_data_pinned[0],
-            self.fields_on_grid[0].gpu_data[0],
-            host_origin=self._origin_locr,
-            buffer_origin=self._origin_locr,
-            buffer_pitches=self._pitches_dev,
-            host_pitches=self._pitches_dev,
-            region=self._s_buffer_region_on_r,
-            is_blocking=False,
-            wait_for=[evt])
-
-        ctime = Wtime()
-        # Wait MPI transfer of data from left, add them to local
-        # data and send back to device
-        for b in xrange(self._s_n_blocks_to_r):
-            self._r_send[b].Wait()
-        for b in xrange(self._s_n_blocks_from_l):
-            self._l_recv[b].Wait()
-        evt_get_locl.wait()
-        ctime_wait_l = Wtime() - ctime
-
-        calctime = Wtime()
-        self._s_locl_buff += self._s_froml_buff
-        self.profiler['comm_calc_remesh'] += Wtime() - calctime
-        evt_set_locl = cl.enqueue_copy(
-            self.cl_env.queue,
-            self.fields_on_grid[0].gpu_data[0],
-            self.fields_on_grid[0].host_data_pinned[0],
-            host_origin=self._origin_locl,
-            buffer_origin=self._origin_locl,
-            buffer_pitches=self._pitches_dev,
-            host_pitches=self._pitches_dev,
-            region=self._s_buffer_region_on_l,
-            is_blocking=False)
-
-        # Wait MPI transfer of data from right, add them to local
-        # data and send back to device
-        ctime = Wtime()
-        for b in xrange(self._s_n_blocks_to_l):
-            self._l_send[b].Wait()
-        for b in xrange(self._s_n_blocks_from_r):
-            self._r_recv[b].Wait()
-        evt_get_locr.wait()
-        ctime_wait_r = Wtime() - ctime
-        calctime = Wtime()
-        self._s_locr_buff += self._s_fromr_buff
-        self.profiler['comm_calc_remesh'] += Wtime() - calctime
-        evt_set_locr = cl.enqueue_copy(
-            self.cl_env.queue,
-            self.fields_on_grid[0].gpu_data[0],
-            self.fields_on_grid[0].host_data_pinned[0],
-            host_origin=self._origin_locr,
-            buffer_origin=self._origin_locr,
-            buffer_pitches=self._pitches_dev,
-            host_pitches=self._pitches_dev,
-            region=self._s_buffer_region_on_r,
-            is_blocking=False)
-
-        if CL_PROFILE:
-            evt_set_locl.wait()
-            evt_set_locr.wait()
-
-        self.fields_on_grid[0].events.append(evt_set_locr)
-        self.fields_on_grid[0].events.append(evt_set_locl)
-        self.profiler['comm_cpu_remesh'] += ctime_wait_r + ctime_wait_l + \
-            ctime_send_r + ctime_send_l
-
-        if CL_PROFILE:
-            rmsh_gpu_time = 0.
-            for evt in self._evt_get_l + self._evt_get_r:
-                evt.wait()
-                rmsh_gpu_time += (evt.profile.end - evt.profile.start) * 1e-9
-            self.profiler['comm_gpu_remesh_get'] += rmsh_gpu_time
-            rmsh_gpu_time = 0.
-            for evt in [evt_get_locr, evt_get_locl]:
-                evt.wait()
-                rmsh_gpu_time += (evt.profile.end - evt.profile.start) * 1e-9
-            self.profiler['comm_gpu_remesh_get_loc'] += rmsh_gpu_time
-            rmsh_gpu_time = 0.
-            for evt in [evt_set_locl, evt_set_locr]:
-                evt.wait()
-                rmsh_gpu_time += (evt.profile.end - evt.profile.start) * 1e-9
-            self.profiler['comm_gpu_remesh_set_loc'] += rmsh_gpu_time
-
-    @debug
-    def finalize(self):
-        """
-        Cleaning, if required.
-        """
-        super(MultiGPUParticleAdvection, self).finalize()
-        self._s_l_buff.base.release(self.cl_env.queue)
-        self._s_r_buff.base.release(self.cl_env.queue)
-        self._v_r_buff.base.release(self.cl_env.queue)
-        self._v_l_buff.base.release(self.cl_env.queue)
-
-    def get_profiling_info(self):
-        super(MultiGPUParticleAdvection, self).get_profiling_info()
-        if self._is2kernel:
-            for k in (self.num_remesh_comm_l,
-                      self.num_remesh_comm_r):
-                for p in k.profile:
-                    self.profiler += p
-        else:
-            for k in (self.num_advec_and_remesh_comm_l,
-                      self.num_advec_and_remesh_comm_r):
-                for p in k.profile:
-                    self.profiler += p
diff --git a/hysop/old/gpu.old/static_gpu_particle_advection_dir.py b/hysop/old/gpu.old/static_gpu_particle_advection_dir.py
deleted file mode 100644
index 28faac124cc76b051bbb7acbb4db5bff610190e1..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/static_gpu_particle_advection_dir.py
+++ /dev/null
@@ -1,666 +0,0 @@
-
-from hysop import __VERBOSE__
-
-from hysop.backend.device.opencl import cl
-from hysop.backend.device.opencl.opencl_kernel   import OpenClKernelLauncher
-from hysop.backend.device.opencl.gpu_particle_advection_dir import GPUParticleAdvectionDir, MeshDirection
-
-from hysop.methods import TimeIntegrator, Remesh, ExtraArgs, \
-    Support, Splitting, MultiScale, Interpolation, Precision,\
-    StretchingFormulation, BoundaryCondition
-
-from hysop.numerics.odesolvers    import Euler, RK2, RK3, RK4
-from hysop.numerics.interpolation.interpolation import Linear
-
-from hysop.numerics.remeshing import L2_1, L2_2, L2_3, L2_4
-from hysop.numerics.remeshing import L4_2, L4_3, L4_4
-from hysop.numerics.remeshing import L6_3, L6_4, L6_5, L6_6
-from hysop.numerics.remeshing import L8_4
-
-from hysop.constants import np, DirectionLabels 
-from hysop.tools.numpywrappers import npw
-
-from hysop.backend.device.codegen.structs.mesh_info import MeshInfoStruct
-from hysop.backend.device.codegen.kernels.copy_kernel import CopyKernel
-
-from hysop.backend.device.codegen.kernels.directional_advection  import DirectionalAdvectionKernel
-from hysop.backend.device.codegen.kernels.directional_stretching import DirectionalStretchingKernel
-
-from hysop.constants import callback_profiler
-
-class StaticGPUParticleAdvectionDir(GPUParticleAdvectionDir):
-    ##  
-    ## This implementation is based on GPUOperator._kernel_cfg
-    ## and kernels located in hysop/gpu/cl_src.
-    ##
-    ## This class only overrides the particles advection kernel backend methods 
-    ## (mostly kernel parsing and building, as well as kernel calls used in _exec_list).
-    ##
-    ## See hysop.gpu.CodegenGPUParticleAdvectionDir for the runtime generated kernels implementation.
-    ##
-    def __init__(self,**kargs): 
-        super(self.__class__, self).__init__(**kargs)
-        
-        # Additional method and configuration checks
-        self._check()
-
-        # Initialize and configure everything
-        self._initialize()
-
-
-    def _check(self):
-        super(StaticGPUParticleAdvectionDir,self)._check()
-        cls = self.__class__.__name__
-        dim = self.dim
-    
-        if dim not in [2,3]:
-            msg='{} only supports 2D and 3D domains.'.format(cls)
-            raise NotImplementedError(msg)
-
-        _kernel_cfg = self._kernel_cfg
-        if _kernel_cfg is None:
-            msg='{} requires a kernel configuration (_kernel_cfg) to be present !'\
-                    .format(cls)
-            raise ValueError(msg)
-
-        required_kernel_configs = ['advec','remesh','advec_and_remesh']
-        if dim>=2: 
-            required_kernel_configs.append('transpose_xy')
-        if dim>=3: 
-            required_kernel_configs.append('transpose_xz')
-
-        for rkc in required_kernel_configs:
-            msg = "{} requires kernel configuration '{}' but it is not present.".format(cls, rkc)
-            if rkc not in _kernel_cfg.keys():
-               raise KeyError(msg)
-
-        if self._has_stretching:
-            stretching = self._stretching
-            if not isinstance(stretching,dict):
-                msg='Stretching value should be a dict containing a formulation and an order.'
-                raise ValueError(msg)
-            reqkeys =  ['formulation','order']
-            for k in reqkeys:
-                if k not in stretching.keys():
-                    msg ="Missing key '{}' in streching dictionnary.".format(k)
-                    raise ValueError(msg)
-            formulation = stretching['formulation']
-            order = stretching['order']
-            if order<0:
-                msg='Stretching order < 0'
-                raise ValueError(msg)
-            if not isinstance(formulation,StretchingFormulation):
-                msg='Stretching formulation is not one of {}.'.format(StretchingFormulation.svalues())
-                raise ValueError(msg)
-
-    def _initialize_cl_env(self):
-        self._initialize_cl_build_options()
-        self._initialize_cl_size_constants()
-        self._initialize_cl_mesh_info()
-        
-        required_components = set()
-        for fg in self.fields_on_grid:
-            required_components.add(fg.nb_components)
-        self.required_components = required_components
-
-    def _initialize_cl_build_options(self):
-        self._build_options += ""
-
-    def _initialize_cl_size_constants(self):
-        """
-        Compile time constants for kernels.
-        """
-        self._append_size_constants(self.f_resol)
-        self._append_size_constants(self.v_resol, prefix='V_NB')
-        
-        self._append_size_constants(
-            [self.velocity_topo.ghosts()[self.direction]],
-            prefix='V_GHOSTS_NB', suffix=[''])
-
-        self._append_size_constants([self._is_multi_scale*1],
-                prefix='ADVEC_IS_MULTISCALE', suffix=[''])
-    
-    def _initialize_cl_mesh_info(self):
-
-        # Coordinates of the local origin
-        coord_min = npw.ones(4, dtype=self.gpu_precision)
-        coord_min[:self.dim] = self.fields_topo.mesh.origin
-
-        # Space step for fields
-        mesh_size = npw.ones(4, dtype=self.gpu_precision)
-        mesh_size[:self.dim] = self._reorder_vect(
-            self.fields_topo.mesh.space_step)
-
-        # Space step for velocity
-        v_mesh_size = npw.ones(4, dtype=self.gpu_precision)
-        v_mesh_size[:self.dim] = self._reorder_vect(
-            self.velocity_topo.mesh.space_step)
-
-        mesh_info = npw.ones((12, ))
-        mesh_info[:4]  = mesh_size
-        mesh_info[4:8] = v_mesh_size
-        mesh_info[8]   = coord_min[self.direction]
-        mesh_info[9]   = 1. / mesh_size[0]
-        mesh_info[10]  = 1. / v_mesh_size[0]
-
-        cl_mesh_info   = cl.Buffer(self.cl_env.ctx, cl.mem_flags.READ_ONLY,
-                                       size=mesh_info.nbytes)
-        cl.enqueue_write_buffer(self.cl_env.queue,
-                                cl_mesh_info, mesh_info).wait()
-    
-        self._mesh_info    = mesh_info
-        self._cl_mesh_info = cl_mesh_info
-            
-        
-        #TODO remove
-        fields_mesh = self.fields_topo.mesh
-        (fields_mesh_info,fields_mesh_info_var) = MeshInfoStruct.create_from_mesh('field_mesh_info',
-                self.cl_env, self.fields_topo.mesh, self.mesh_dir, self.mesh_state)
-
-        self._fields_mesh_info      = fields_mesh_info
-        self._fields_mesh_info_var  = fields_mesh_info_var
-        
-        velocity_mesh = self.velocity_topo.mesh
-        (velocity_mesh_info,velocity_mesh_info_var) = MeshInfoStruct.create_from_mesh('velocity_mesh_info',
-                self.cl_env, self.velocity_topo.mesh, self.mesh_dir, self.mesh_state)
-
-        self._velocity_mesh_info      = velocity_mesh_info
-        self._velocity_mesh_info_var  = velocity_mesh_info_var
-            
-    def _collect_transposition_kernel_xy(self):
-        # Only collected for direction Y (XYZ -> YXZ)
-        resolution = self.f_resol_dir
-        defines = ' -D NB_I=NB_Y -D NB_II=NB_X -D NB_III=NB_Z'
-        return self._build_transpose_kernel_xy(resolution, defines)
-    
-    def _collect_transposition_kernel_xz(self):
-        # Only collected for direction Z (YXZ -> ZXY)
-        resolution = self.f_resol_dir
-        defines = ' -D NB_I=NB_Z -D NB_II=NB_X -D NB_III=NB_Y'
-        return self._build_transpose_kernel_xz(resolution, defines)
-    
-    def _collect_transposition_kernel_zx(self):
-        # Only collected for direction Y (ZXY -> YXZ) in sequential
-        resolution = self.f_resol_dir
-        defines = ' -D NB_I=NB_Y -D NB_II=NB_X -D NB_III=NB_Z'
-        return self._build_transpose_kernel_xz(resolution, defines)
-
-    def _collect_transposition_kernel_yx(self):
-        # Only collected for direction X (YXZ -> XYZ)
-        resolution = self.f_resol_dir
-        defines = ' -D NB_I=NB_X -D NB_II=NB_Y -D NB_III=NB_Z'
-        return self._build_transpose_kernel_xy(resolution, defines)
-
-    def _build_transpose_kernel_xy(self, resolution, defines):
-        build_options = self._build_options + self._size_constants
-
-        src, t_dim, b_rows, has_padding, vec, f_space = \
-            self._kernel_cfg['transpose_xy']
-
-        while t_dim > resolution[0] or t_dim > resolution[1] or \
-                (resolution[0] % t_dim) > 0 or (resolution[1] % t_dim) > 0:
-            t_dim /= 2
-        gwi, lwi, blocs_nb = f_space(resolution, t_dim, b_rows, vec)
-
-        if has_padding:
-            build_options += " -D PADDING_XY=1"
-        else:
-            build_options += " -D PADDING_XY=0"
-        build_options += " -D TILE_DIM_XY={0}".format(t_dim)
-        build_options += " -D BLOCK_ROWS_XY={0}".format(b_rows)
-        build_options += " -D NB_GROUPS_I={0}".format(blocs_nb[0])
-        build_options += " -D NB_GROUPS_II={0}".format(blocs_nb[1])
-        build_options += defines
-
-        prg = self.cl_env.build_src(src, build_options, vec)
-        return OpenClKernelLauncher(prg.transpose_xy, self.cl_env.queue, gwi, lwi)
-    
-    def _build_transpose_kernel_xz(self, resolution, defines):
-        build_options = self._build_options + self._size_constants
-
-        src, t_dim, b_rows, b_deph, is_padding, vec, f_space = \
-            self._kernel_cfg['transpose_xz']
-
-        while t_dim > resolution[0] or t_dim > resolution[2] or \
-                (resolution[0] % t_dim) > 0 or (resolution[2] % t_dim) > 0:
-            t_dim /= 2
-        gwi, lwi, blocs_nb = f_space(resolution, t_dim, b_rows, b_deph, vec)
-        if is_padding:
-            build_options += " -D PADDING_XZ=1"
-        else:
-            build_options += " -D PADDING_XZ=0"
-        build_options += " -D TILE_DIM_XZ={0}".format(t_dim)
-        build_options += " -D BLOCK_ROWS_XZ={0}".format(b_rows)
-        build_options += " -D BLOCK_DEPH_XZ={0}".format(b_deph)
-        build_options += " -D NB_GROUPS_I={0}".format(blocs_nb[0])
-        build_options += " -D NB_GROUPS_III={0}".format(blocs_nb[2])
-        build_options += defines
-
-        prg = self.cl_env.build_src(src,build_options,vec)
-        return OpenClKernelLauncher(prg.transpose_xz, self.cl_env.queue, gwi, lwi)
-    
-    # def _collect_advec_kernel(self):
-        # """
-        # Compile OpenCL sources for advection and remeshing kernel.
-        # """
-        # Advection
-        # build_options = self._build_options + self._size_constants
-        # src, is_noBC, vec, f_space = self._kernel_cfg['advec']
-        # gwi, lwi = f_space(self.f_resol_dir, vec)
-        # WINb = lwi[0]
-
-        # if self._is_multi_scale:
-            # build_options += " -D MS_FORMULA="
-            # build_options += self.method[MultiScale].__name__.upper()
-
-        # if is_noBC:
-            # build_options += " -D WITH_NOBC=1"
-        # build_options += " -D WI_NB=" + str(WINb)
-        # build_options += " -D PART_NB_PER_WI="
-        # build_options += str(self.f_resol_dir[0] / WINb)
-        
-        # Build code
-        # src = [s.replace('RKN', self.method[TimeIntegrator].__name__.lower())
-               # for s in src]
-        
-        # Adding remeshing weights for the multiscale advection
-        # if self._is_multi_scale:
-            # src.insert(1, self._kernel_cfg['remesh'][0][1])
-        
-        # Euler integrator
-        # if self.method[TimeIntegrator] is Euler:
-            # if not self._is_multi_scale:
-                # src = [s for s in src if s.find(Euler.__name__.lower()) < 0]
-                # src[-1] = src[-1].replace('advection', 'advection_euler')
-
-        # prg = self.cl_env.build_src(
-            # src,
-            # build_options,
-            # vec,
-            # nb_remesh_components=self.velocity.nb_components)
-
-        # callback_profiler.register_tasks('advection')
-        # self._advec = OpenClKernelLauncher(
-            # prg.advection_kernel, self.cl_env.queue, gwi, lwi)
-
-    def _collect_remesh_kernel(self):
-        # remeshing
-        build_options = self._build_options + self._size_constants
-        src, is_noBC, vec, f_space = self._kernel_cfg['remesh']
-        gwi, lwi = f_space(self.f_resol_dir, vec)
-        WINb = lwi[0]
-
-        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
-        if is_noBC:
-            build_options += " -D WITH_NOBC=1"
-        build_options += " -D WI_NB=" + str(WINb)
-        build_options += " -D PART_NB_PER_WI="
-        build_options += str(self.f_resol_dir[0] / WINb)
-            
-        enum = ['I', 'II', 'III']
-        comp  = self._reorder_vect(['NB' + d for d in DirectionLabels[:self.dim]])
-        vcomp = self._reorder_vect(['V_NB' + d for d in DirectionLabels[:self.dim]])
-        for i,suffix in enumerate(enum[:self.dim]):
-            build_options += ' -D NB_{}={}'.format(suffix,comp[i])
-        for i,suffix in enumerate(enum[:self.dim]):
-            build_options += ' -D V_NB_{}={}'.format(suffix,vcomp[i])
-
-        self._remesh = {}
-        cnames = []
-        for rc in self.required_components:
-            prg = self.cl_env.build_src(
-                src, build_options, vec,
-                nb_remesh_components=rc)
-            self._remesh[rc] = OpenClKernelLauncher(
-                prg.remeshing_kernel, self.cl_env.queue, gwi, lwi)
-            
-            cname = 'remesh' if len(self.required_components)==1 \
-                    else 'remesh_{}'.format(rc)
-            cnames.append(cname)
-            callback_profiler.register_tasks(cname)
-       
-        if len(cnames)>1:
-            callback_profiler.register_group('remesh',cnames)
-
-    def _collect_advec_remesh_kernel(self):
-        """
-        Compile OpenCL sources for advection and remeshing kernel.
-        """
-        build_options = self._build_options + self._size_constants
-
-        src, is_noBC, vec, f_space = self._kernel_cfg['advec_and_remesh']
-        gwi, lwi = f_space(self.f_resol_dir, vec)
-
-        WINb = lwi[0]
-        build_options += " -D FORMULA=" + self.method[Remesh].__name__.upper()
-
-        if self._is_multi_scale:
-            build_options += " -D MS_FORMULA="
-            build_options += self.method[MultiScale].__name__.upper()
-        if is_noBC:
-            build_options += " -D WITH_NOBC=1"
-
-        build_options += " -D WI_NB=" + str(WINb)
-        build_options += " -D PART_NB_PER_WI=" + str(self.f_resol_dir[0] / WINb)
-        
-        # Build code
-        src = [s.replace('RKN', self.method[TimeIntegrator].__name__.lower())
-               for s in src]
-        
-        # Euler integrator
-        if self.method[TimeIntegrator] is Euler:
-            if not self._is_multiScale:
-                src = [s for s in src if s.find(Euler.__name__.lower()) < 0]
-                src[-1] = src[-1].replace('advection', 'advection_euler')
-        
-        self._advec_and_remesh = {}
-        cnames = []
-        for rc in self.required_components:
-            prg = self.cl_env.build_src(
-                src, build_options, vec,
-                nb_remesh_components=rc)
-            self._advec_and_remesh[rc] = OpenClKernelLauncher(
-                prg.advection_and_remeshing, self.cl_env.queue, gwi, lwi)
-            
-            cname = 'advec_remesh' if len(self.required_components)==1 \
-                    else 'advec_remesh_{}'.format(rc)
-            callback_profiler.register_tasks(cname)
-            cnames.append(cname)
-        
-        if len(cnames)>1:
-            callback_profiler.register_group('remesh',cnames)
-
-       
-    def _collect_user_kernels(self):
-        """
-        Build user kernel sources.
-        """
-
-        user_src = self._user_src
-
-        if user_src is not None:
-            build_options = self._build_options + self._size_constants
-
-            f_resol_dir = self.f_resol_dir 
-            v_resol_dir = self.v_resol_dir
-            
-            f_work_items, gwi, lwi = self.cl_env.get_work_items(f_resol_dir)
-            v_work_items, gwi, lwi = self.cl_env.get_work_items(v_resol_dir)
-
-            build_options += " -D WI_NB="   + str(f_work_items)
-            build_options += " -D V_WI_NB=" + str(v_work_items)
-
-            self._user_prg = self.cl_env.build_src(usr_src, build_options, 1)
-        else:
-            self._user_prg = None
-
-    def _collect_extra_kernels(self):
-        if self._has_stretching:
-            self._collect_stretching_kernels()
-        if self._has_diffusion:
-            self._collect_diffusion_kernels()
-    
-    
-    def _collect_copy_kernels(self):
-        
-        if self._use_builtin_copy:
-            super(StaticGPUParticleAdvectionDir,self)._collect_copy_kernels()
-        else:
-            tg = self.cl_env.typegen
-            (fg,fp) = self.fields_on_part.items()[0]
-            src = fp[0]
-            dst = fg.gpu_data[0]
-            vtype = tg.fbtype
-            size = fg.mem_size // (3*tg.FLT_BYTES[vtype])
-
-            (kernel_launcher, kernel_args, kernel_args_mapping, cached_bytes) = \
-                    CopyKernel.autotune(self.cl_env,
-                            src, dst, vtype, size, 
-                            restrict=True, 
-                            build_opts=self._build_options,
-                            autotuner_config=self._autotuner_config)
-            self.copy = kernel_launcher
-    
-    def _collect_advec_kernel(self):
-
-        ftype              = self.cl_env.typegen.fbtype
-        compute_resolution = self.fields_topo.mesh.compute_resolution
-        boundary           = BoundaryCondition.PERIODIC
-        rk_scheme          = self.method[TimeIntegrator]
-
-        velocity    = self.velocity
-        position    = self._particle_position
-
-        field_mesh_info = self._fields_mesh_info_var
-        velo_mesh_info  = self._velocity_mesh_info_var
-
-        max_dt=0.01
-        max_velocity=1
-        velocity_dx = velo_mesh_info.value['dx'][0]
-
-        (kernel_launcher, kernel_args, kernel_args_mapping, 
-                total_work, per_work_statistic, cached_bytes) = \
-                DirectionalAdvectionKernel.autotune(self.cl_env, ftype,
-                        self.dim,self.direction,
-                        velocity, position,
-                        compute_resolution, field_mesh_info,
-                        boundary, rk_scheme,
-                        max_dt, max_velocity, velocity_dx,
-                        build_opts=self._build_options,
-                        autotuner_config=self._autotuner_config)
-    
-        callback_profiler.register_tasks('advection')
-                # ftype=ftype,
-                # total_work=total_work, 
-                # per_work_statistic=per_work_statistic)
-
-        def do_advec(dt,**kargs):
-            callback_profiler.tic('advection')
-            kernel_args[kernel_args_mapping['dt']] = self.cl_env.precision(dt)
-            evt = kernel_launcher(*kernel_args)
-            callback_profiler.tac('advection',evt=evt)
-            return evt
-
-        self._do_advec = do_advec
-        
-
-    def _collect_stretching_kernels(self):
-
-        self.vorticity = self.fields_on_grid[0]
-
-        ftype = self.cl_env.typegen.fbtype
-        compute_resolution = self.fields_topo.mesh.compute_resolution
-        boundary=self._stretching['boundary']
-        order=self._stretching['order']
-        formulation=self._stretching['formulation']
-        rk_scheme=self._stretching['rk_scheme']
-
-        mesh_info = self._fields_mesh_info_var
-        dt=0.1
-
-        (kernel_launcher, kernel_args, kernel_args_mapping, 
-                total_work, per_work_statistic, cached_bytes) = \
-                DirectionalStretchingKernel.autotune(self.cl_env, ftype,
-                        self.dim,self.direction,
-                        compute_resolution,
-                        boundary,order,
-                        formulation, rk_scheme,
-                        self.velocity, self.vorticity, 
-                        mesh_info, dt,
-                        build_opts=self._build_options,
-                        autotuner_config=self._autotuner_config)
-
-        callback_profiler.register_tasks('stretching', 
-                ftype=ftype,
-                total_work=total_work, 
-                per_work_statistic=per_work_statistic)
-
-        def do_stretch(dt,**kargs):
-            callback_profiler.tic('stretching')
-            kernel_args[kernel_args_mapping['dt']] = self.cl_env.precision(dt)
-            evt = kernel_launcher(*kernel_args)
-            callback_profiler.tac('stretching',evt=evt)
-            return evt
-
-        self._do_stretch = do_stretch
-
-    def _collect_diffusion_kernels(self):
-        self._diffuse = None
-        #callback_profiler.register_tasks('diffusion')
-
-
-    def _exec_kernel(self,kernel):
-        for (fg,fp) in self.fields_on_part.iteritems():
-            evts = []
-            for (g,p) in zip(fg.gpu_data,fp):
-                if kernel is self.copy and self._use_builtin_copy:
-                    evt = kernel.launch_sizes_in_args(g,p,wait_for=fg.events)
-                else:
-                    evt = kernel(p,g,wait_for=fg.events)
-                evts.append(evt)
-            fg.events += evts
-        return evts
-        
-    def _do_copy(self,**kargs): 
-        if __VERBOSE__:
-            print '_do_copy'
-        callback_profiler.tic('cpu2gpu')
-        evts = self._exec_kernel(self.copy)
-        callback_profiler.tac('cpu2gpu',evts=evts)
-    
-    def _do_compute_1k_monoscale(self, dt, **kargs):
-
-        velocity  = self.velocity
-        for (fg,fp) in self.fields_on_part.iteritems():
-            nbc = fg.nb_components
-            wait_evts = velocity.events + fg.events
-
-            args = tuple(
-                  [velocity.gpu_data[self.direction]] 
-                + [fp[i]          for i in xrange(nbc)] 
-                + [fg.gpu_data[i] for i in xrange(nbc)] 
-                + [self.gpu_precision(dt)]
-                + [self._cl_mesh_info]
-            )
-            
-            cname = 'advec_remesh' if len(self.required_components)==1 \
-                    else 'advec_remesh_{}'.format(nbc)
-            callback_profiler.tic(cname)
-            evt = self._advec_and_remesh[nbc](*args, wait_for=wait_evts)
-            callback_profiler.tac(cname)
-
-            fg.events.append(evt)
-            velocity.events.append(evt)
-    
-    def _do_compute_2k_monoscale(self, dt, **kargs):
-
-        velocity  = self.velocity
-        nbc = velocity.nb_components
-
-        # Advection
-        advec_evt = self._do_advec(dt=dt,wait_for=velocity.events)
-        velocity.events.append(advec_evt)
-        
-        # Remesh
-        for (fg,fp) in self.fields_on_part.iteritems():
-            nbc = fg.nb_components
-            cname = 'remesh' if len(self.required_components)==1 \
-                    else 'remesh_{}'.format(nbc)
-            callback_profiler.tic(cname)
-            args = tuple(
-                  [self._particle_position]
-                + [fp[i]          for i in xrange(nbc)]
-                + [fg.gpu_data[i] for i in xrange(nbc)]
-                + [self._cl_mesh_info]
-            )
-            remesh_evt = self._remesh[nbc](*args, wait_for=[advec_evt])
-            fg.events.append(remesh_evt)
-            callback_profiler.tac(cname)
-
-        if self._has_stretching:
-            evt=self._do_stretch(dt)
-    
-    
-    def _do_compute_1k_multiscale(self, dt, **kargs):
-
-        velocity  = self.velocity
-        
-        v_mesh_size            = npw.ones(4, dtype=self.gpu_precision)
-        v_mesh_size[:self.dim] = self._reorder_vect(self.velocity_topo.mesh.space_step)
-
-        for (fg,fp) in self.fields_on_part.iteritems():
-            nbc = fg.nb_components
-            wait_evts = velocity.events + fg.events
-        
-            args = tuple(
-                  [velocity.gpu_data[self.direction]] 
-                + [fp[i]          for i in xrange(nbc)] 
-                + [fg.gpu_data[i] for i in xrange(nbc)] 
-                + [self.gpu_precision(dt)]
-                + [self.gpu_precision(1.0 / v_mesh_size[1])]
-                + [self.gpu_precision(1.0 / v_mesh_size[2])]
-                + [self._cl_mesh_info]
-            )
-
-            evt = self._advec_and_remesh[nbc](*args, wait_for=wait_evts)
-            fg.events.append(evt)
-    
-    def _do_compute_2k_multiscale(self, dt, **kargs):
-
-        velocity  = self.velocity
-        nbc = velocity.nb_components
-        
-        v_mesh_size            = npw.ones(4, dtype=self.gpu_precision)
-        v_mesh_size[:self.dim] = self._reorder_vect(self.velocity_topo.mesh.space_step)
-
-        # Advection
-        args = tuple([
-            velocity.gpu_data[self.direction],
-            self._particle_position,
-            self.gpu_precision(dt),
-            self.gpu_precision(1.0 / v_mesh_size[1]),
-            self.gpu_precision(1.0 / v_mesh_size[2]),
-            self._cl_mesh_info
-        ])
-        advec_evt = self._advec(*args,wait_for=velocity.events)
-        velocity.events.append(advec_evt)
-        
-        for (fg,fp) in self.fields_on_part.iteritems():
-            # Remesh
-            nbc = fg.nb_components
-            args = tuple(
-                  [self._particle_position]
-                + [fp[i]          for i in xrange(nbc)]
-                + [fg.gpu_data[i] for i in xrange(nbc)]
-                + [self._cl_mesh_info]
-            )
-            remesh_evt = self._remesh[nbc](*args, wait_for=[advec_evt])
-            fg.events.append(remesh_evt)
-
-    def _pre_apply(self):
-        super(StaticGPUParticleAdvectionDir,self)._pre_apply()
-        if 'cpu2gpu' not in callback_profiler.tasks:
-            membytes=0
-            for fg in self.fields_on_part.keys():
-                 membytes += fg.mem_size
-            callback_profiler.register_tasks('cpu2gpu',membytes=membytes)
-            
-            #callback_profiler.register_group('transpose',
-                #['transpose_xy','transpose_yx','transpose_zx','transpose_xz'])
-            
-
-    @staticmethod
-    def supports_multiscale():
-        return True
-    @staticmethod
-    def supports_kernel_splitting():
-        return True
-    @staticmethod
-    def supports_user_kernels():
-        return True
-    @staticmethod
-    def supports_stretching():
-        return True
-    @staticmethod
-    def supports_mpi():
-        return False
diff --git a/hysop/old/gpu.old/tests/__init__.py b/hysop/old/gpu.old/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/hysop/old/gpu.old/tests/test_advection_nullVelocity.py b/hysop/old/gpu.old/tests/test_advection_nullVelocity.py
deleted file mode 100644
index ef2f8db58cfb6f31abe1b46f88ee7c74d8828344..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/tests/test_advection_nullVelocity.py
+++ /dev/null
@@ -1,992 +0,0 @@
-"""
-@file hysop.gpu.tests.test_advection_nullVelocity
-Testing advection kernels with a null velocity. Basic functionnal test.
-"""
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.advection import Advection
-from hysop.constants import np, HYSOP_REAL
-from hysop.problem.simulation import Simulation
-from hysop.methods import TimeIntegrator, Interpolation, Remesh, \
-    Support, Splitting, Precision
-from hysop.numerics.integrators.runge_kutta2 import RK2
-from hysop.numerics.interpolation.interpolation import Linear
-from hysop.numerics.remeshing import L2_1, L4_2, L6_3, M8Prime
-from hysop.tools.parameters import Discretization
-from hysop.tools.numpywrappers import npw
-
-
-def setup_2D():
-    box = Box(length=[1., 1.], origin=[0., 0.])
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-    return scal, velo
-
-
-def setup_3D():
-    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-    return scal, velo
-
-
-def assertion_2D(scal, velo, advec):
-    advec.discretize()
-    advec.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[1][...] = npw.zeros_like(scal_d.data[0])
-    scal_init = scal_d.data[0].copy()
-    scal_d.toDevice()
-    velo_d.toDevice()
-    scal_d.wait()
-    velo_d.wait()
-
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-
-    scal_d.toHost()
-    scal_d.wait()
-    advec.finalize()
-    return np.allclose(scal_init, scal_d.data[0])
-
-
-def assertion_2D_withPython(scal, velo, advec, advec_py):
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(
-        np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.zeros(scal_d.data[0].shape)
-    velo_d.data[1][...] = npw.zeros(scal_d.data[0].shape)
-    scal_d.toDevice()
-    velo_d.toDevice()
-    scal_d.wait()
-    velo_d.wait()
-
-    advec_py.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-
-    py_res = scal_d.data[0].copy()
-    scal_d.toHost()
-    scal_d.wait()
-
-    advec.finalize()
-    print py_res, scal_d.data[0]
-    print py_res - scal_d.data[0]
-    return np.allclose(py_res, scal_d.data[0])
-
-
-def assertion_3D(scal, velo, advec):
-    advec.discretize()
-    advec.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[1][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[2][...] = npw.zeros_like(scal_d.data[0])
-    scal_init = scal_d.data[0].copy()
-    scal_d.toDevice()
-    velo_d.toDevice()
-    scal_d.wait()
-    velo_d.wait()
-
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-
-    scal_d.toHost()
-    scal_d.wait()
-
-    advec.finalize()
-    return np.allclose(scal_init, scal_d.data[0])
-
-
-def assertion_3D_withPython(scal, velo, advec, advec_py):
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[1][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[2][...] = npw.zeros_like(scal_d.data[0])
-    scal_d.toDevice()
-    velo_d.toDevice()
-    scal_d.wait()
-    velo_d.wait()
-
-    advec_py.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-
-    py_res = scal_d.data[0].copy()
-    scal_d.toHost()
-    scal_d.wait()
-
-    advec.finalize()
-    return np.allclose(py_res, scal_d.data[0])
-
-
-d2d = Discretization([33,33])
-d3d = Discretization([17, 17, 17])
-
-# M6 tests
-def test_2D_m6_1k():
-    """
-    Testing M6 remeshing formula in 2D, 1 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2',
-                              Precision: HYSOP_REAL}
-                      )
-    advec_py = Advection(velo, scal,discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assert assertion_2D_withPython(scal, velo, advec, advec_py)
-    #assert assertion_2D(scal, velo, advec)
-
-
-def test_2D_m6_2k():
-    """
-    Testing M6 remeshing formula in 2D, 2 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2',
-                              Precision: HYSOP_REAL}
-                      )
-    assert assertion_2D(scal, velo, advec)
-
-
-def test_2D_m6_1k_sFH():
-    """
-    Testing M6 remeshing formula in 2D, 1 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf',
-                              Precision: HYSOP_REAL}
-                      )
-    assert assertion_2D(scal, velo, advec)
-
-
-def test_2D_m6_2k_sFH():
-    """
-    Testing M6 remeshing formula in 2D, 2 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf',
-                              Precision: HYSOP_REAL}
-                      )
-    assert assertion_2D(scal, velo, advec)
-
-
-def test_3D_m6_1k():
-    """
-    Testing M6 remeshing formula in 3D, 1 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2',
-                              Precision: HYSOP_REAL}
-                      )
-    assert assertion_3D(scal, velo, advec)
-
-
-def test_3D_m6_2k():
-    """
-    Testing M6 remeshing formula in 3D, 2 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2',
-                              Precision: HYSOP_REAL}
-                        )
-    assert assertion_3D(scal, velo, advec)
-
-
-def test_3D_m6_1k_sFH():
-    """
-    Testing M6 remeshing formula in 3D, 1 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf',
-                              Precision: HYSOP_REAL}
-                      )
-    assert assertion_3D(scal, velo, advec)
-
-
-def test_3D_m6_2k_sFH():
-    """
-    Testing M6 remeshing formula in 3D, 2 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf',
-                              Precision: HYSOP_REAL},
-                      )
-    assert assertion_3D(scal, velo, advec)
-
-
-# M4 testing
-def test_2D_m4_1k():
-    """
-    Testing M4 remeshing formula in 2D, 1 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2',
-                              Precision: HYSOP_REAL}
-                        )
-    advec_py = Advection(velo, scal,discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2'}
-                         )
-    assert assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m4_2k():
-    """
-    Testing M4 remeshing formula in 2D, 2 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2',
-                              Precision: HYSOP_REAL
-                              }
-                        )
-    advec_py = Advection(velo, scal,discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assert assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m4_1k_sFH():
-    """
-    Testing M4 remeshing formula in 2D, 1 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf',
-                              Precision: HYSOP_REAL},
-                        )
-    advec_py = Advection(velo, scal,discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assert assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m4_2k_sFH():
-    """
-    Testing M4 remeshing formula in 2D, 2 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal,discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assert assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m4_1k():
-    """
-    Testing M4 remeshing formula in 3D, 1 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                        )
-    advec_py = Advection(velo, scal,discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assert assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m4_2k():
-    """
-    Testing M4 remeshing formula in 3D, 2 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                            )
-    advec_py = Advection(velo, scal,discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assert assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m4_1k_sFH():
-    """
-    Testing M4 remeshing formula in 3D, 1 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf'})
-    advec_py = Advection(velo, scal,discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assert assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m4_2k_sFH():
-    """
-    Testing M4 remeshing formula in 3D, 2 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal,discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assert assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-# M8 testing
-def test_2D_m8_1k():
-    """
-    Testing M8 remeshing formula in 2D, 1 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal,discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assert assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m8_2k():
-    """
-    Testing M8 remeshing formula in 2D, 2 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal,discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assert assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m8_1k_sFH():
-    """
-    Testing M8 remeshing formula in 2D, 1 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal,discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assert assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m8_2k_sFH():
-    """
-    Testing M8 remeshing formula in 2D, 2 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal,discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assert assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m8_1k():
-    """
-    Testing M8 remeshing formula in 3D, 1 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal,discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assert assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m8_2k():
-    """
-    Testing M8 remeshing formula in 3D, 2 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal,discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assert assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m8_1k_sFH():
-    """
-    Testing M8 remeshing formula in 3D, 1 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal,discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assert assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m8_2k_sFH():
-    """
-    Testing M8 remeshing formula in 3D, 2 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal,discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assert assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_l6_2k():
-    """
-    Testing Lambda6star remeshing formula in 2D, 2 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L6_3,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    assert assertion_2D(scal, velo, advec)
-
-
-def test_2D_l6_1k_sFH():
-    """
-    Testing Lambda6star remeshing formula in 2D, 1 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L6_3,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    assert assertion_2D(scal, velo, advec)
-
-
-def test_2D_l6_2k_sFH():
-    """
-    Testing Lambda6star remeshing formula in 2D, 2 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal,discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L6_3,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    assert assertion_2D(scal, velo, advec)
-
-
-def test_3D_l6_1k():
-    """
-    Testing Lambda6star remeshing formula in 3D, 1 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L6_3,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    assert assertion_3D(scal, velo, advec)
-
-
-def test_3D_l6_2k():
-    """
-    Testing Lambda6star remeshing formula in 3D, 2 kernel,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L6_3,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                      )
-    assert assertion_3D(scal, velo, advec)
-
-
-def test_3D_l6_1k_sFH():
-    """
-    Testing Lambda6star remeshing formula in 3D, 1 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L6_3,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    assert assertion_3D(scal, velo, advec)
-
-
-def test_3D_l6_2k_sFH():
-    """
-    Testing Lambda6star remeshing formula in 3D, 2 kernel,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L6_3,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    assert assertion_3D(scal, velo, advec)
-
-
-def test_rectangular_domain2D():
-    box = Box(length=[1., 1.], origin=[0., 0.])
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-    advec = Advection(velo, scal, discretization=Discretization([65, 33]),
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=Discretization([65, 33]),
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[1][...] = npw.zeros_like(scal_d.data[0])
-    scal_init = scal_d.data[0].copy()
-    scal_d.toDevice()
-    velo_d.toDevice()
-
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    advec_py.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    scal_py_res = scal_d.data[0].copy()
-
-    scal_d.toHost()
-
-    advec.finalize()
-    assert np.allclose(scal_init, scal_d.data[0])
-    assert np.allclose(scal_init, scal_py_res)
-
-
-def test_rectangular_domain3D():
-    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-
-    advec = Advection(velo, scal, discretization=Discretization([65, 33, 17]),
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=Discretization([65, 33, 17]),
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[1][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[2][...] = npw.zeros_like(scal_d.data[0])
-    scal_init = scal_d.data[0].copy()
-    scal_d.toDevice()
-    velo_d.toDevice()
-
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    advec_py.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    scal_py_res = scal_d.data[0].copy()
-
-    scal_d.toHost()
-
-    advec.finalize()
-    assert np.allclose(scal_init, scal_d.data[0])
-    assert np.allclose(scal_init, scal_py_res)
-
-
-def test_2D_vector():
-    box = Box(length=[1., 1.], origin=[0., 0.])
-    scal = Field(domain=box, name='Scalar', is_vector=True)
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-
-    advec = Advection(velo, scal, discretization=Discretization([129, 129]),
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=Discretization([129, 129]),
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    scal_d.data[1][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[1][...] = npw.zeros_like(scal_d.data[0])
-    scal_init_X = scal_d.data[0].copy()
-    scal_init_Y = scal_d.data[1].copy()
-    scal_d.toDevice()
-    velo_d.toDevice()
-
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    advec_py.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    scal_py_res_X = scal_d.data[0].copy()
-    scal_py_res_Y = scal_d.data[1].copy()
-
-    scal_d.toHost()
-
-    advec.finalize()
-    assert np.allclose(scal_init_X, scal_d.data[0])
-    assert np.allclose(scal_init_Y, scal_d.data[1])
-    assert np.allclose(scal_init_X, scal_py_res_X)
-    assert np.allclose(scal_init_Y, scal_py_res_Y)
-
-
-def test_3D_vector():
-    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
-    scal = Field(domain=box, name='Scalar', is_vector=True)
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-
-    advec = Advection(velo, scal,discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                        )
-    advec_py = Advection(velo, scal,discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    scal_d.data[1][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    scal_d.data[2][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[1][...] = npw.zeros_like(scal_d.data[0])
-    velo_d.data[2][...] = npw.zeros_like(scal_d.data[0])
-    scal_init_X = scal_d.data[0].copy()
-    scal_init_Y = scal_d.data[1].copy()
-    scal_init_Z = scal_d.data[2].copy()
-    scal_d.toDevice()
-    velo_d.toDevice()
-
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    advec_py.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    scal_py_res_X = scal_d.data[0].copy()
-    scal_py_res_Y = scal_d.data[1].copy()
-    scal_py_res_Z = scal_d.data[2].copy()
-
-    scal_d.toHost()
-
-    advec.finalize()
-    assert np.allclose(scal_init_X, scal_d.data[0])
-    assert np.allclose(scal_init_Y, scal_d.data[1])
-    assert np.allclose(scal_init_Z, scal_d.data[2])
-    assert np.allclose(scal_init_X, scal_py_res_X)
-    assert np.allclose(scal_init_Y, scal_py_res_Y)
-    assert np.allclose(scal_init_Z, scal_py_res_Z)
diff --git a/hysop/old/gpu.old/tests/test_copy.py b/hysop/old/gpu.old/tests/test_copy.py
deleted file mode 100644
index 754551f211b9bbb5c220a5122a2e312f90fab499..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/tests/test_copy.py
+++ /dev/null
@@ -1,229 +0,0 @@
-"""
-@file hysop.gpu.tests.test_copy
-Testing copy kernels.
-"""
-from hysop.backend.device.opencl import cl
-from hysop.constants import np
-from hysop.backend.device.opencl.opencl_tools import get_opencl_environment
-from hysop.backend.device.opencl.opencl_kernel import OpenClKernelLauncher
-from hysop.tools.numpywrappers import npw
-
-
-def test_copy2D():
-    resolution = (256, 256)
-    cl_env = get_opencl_environment()
-    vec = 2
-    src_copy = 'kernels/copy.cl'
-    build_options = ""
-    build_options += " -D NB_I=256 -D NB_II=256"
-    build_options += " -D TILE_DIM_COPY=16"
-    build_options += " -D BLOCK_ROWS_COPY=8"
-    gwi = (int(resolution[0] / 2),
-           int(resolution[1] / 2))
-    lwi = (8, 8)
-    prg = cl_env.build_src(src_copy, build_options, vec)
-    copy = OpenClKernelLauncher(prg.copy, cl_env.queue, gwi, lwi)
-
-    data_in = npw.asrealarray(np.random.random(resolution))
-    data_out = npw.empty_like(data_in)
-    data_gpu_in = cl.Buffer(cl_env.ctx,
-                            cl.mem_flags.READ_WRITE,
-                            size=data_in.nbytes)
-    data_gpu_out = cl.Buffer(cl_env.ctx,
-                             cl.mem_flags.READ_WRITE,
-                             size=data_out.nbytes)
-    cl.enqueue_copy(cl_env.queue, data_gpu_in, data_in)
-    cl_env.queue.finish()
-
-    copy(data_gpu_in, data_gpu_out)
-    cl_env.queue.finish()
-    cl.enqueue_copy(cl_env.queue, data_out, data_gpu_out)
-    cl_env.queue.finish()
-    assert np.allclose(data_out, data_in)
-
-    data_gpu_in.release()
-    data_gpu_out.release()
-
-
-def test_copy2D_rect():
-    resolution = (256, 512)
-    resolutionT = (512, 256)
-    cl_env = get_opencl_environment()
-    vec = 2
-    src_copy = 'kernels/copy.cl'
-    build_options = ""
-    build_options += " -D NB_I=256 -D NB_II=512"
-    build_options += " -D TILE_DIM_COPY=16"
-    build_options += " -D BLOCK_ROWS_COPY=8"
-    gwi = (int(resolution[0] / 2),
-           int(resolution[1] / 2))
-    lwi = (8, 8)
-    prg = cl_env.build_src(src_copy, build_options, vec)
-    copy_x = OpenClKernelLauncher(prg.copy, cl_env.queue, gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=512 -D NB_II=256"
-    build_options += " -D TILE_DIM_COPY=16"
-    build_options += " -D BLOCK_ROWS_COPY=8"
-    gwi = (int(resolution[1] / 2),
-           int(resolution[0] / 2))
-    lwi = (8, 8)
-    prg = cl_env.build_src(src_copy, build_options, vec)
-    copy_y = OpenClKernelLauncher(prg.copy, cl_env.queue, gwi, lwi)
-
-    data_in = npw.asrealarray(np.random.random(resolution))
-    data_out = npw.empty_like(data_in)
-    data_gpu_in = cl.Buffer(cl_env.ctx,
-                            cl.mem_flags.READ_WRITE,
-                            size=data_in.nbytes)
-    data_gpu_out = cl.Buffer(cl_env.ctx,
-                             cl.mem_flags.READ_WRITE,
-                             size=data_out.nbytes)
-    cl.enqueue_copy(cl_env.queue, data_gpu_in, data_in)
-    cl.enqueue_copy(cl_env.queue, data_gpu_out, data_out)
-    cl_env.queue.finish()
-
-    copy_x(data_gpu_in, data_gpu_out)
-    cl_env.queue.finish()
-    cl.enqueue_copy(cl_env.queue, data_out, data_gpu_out)
-    cl_env.queue.finish()
-    assert np.allclose(data_out, data_in)
-
-    data_in = npw.asrealarray(np.random.random(resolutionT))
-    data_out = npw.empty_like(data_in)
-    cl.enqueue_copy(cl_env.queue, data_gpu_in, data_in)
-    cl.enqueue_copy(cl_env.queue, data_gpu_out, data_out)
-    cl_env.queue.finish()
-
-    copy_y(data_gpu_in, data_gpu_out)
-    cl_env.queue.finish()
-    cl.enqueue_copy(cl_env.queue, data_out, data_gpu_out)
-    cl_env.queue.finish()
-    assert np.allclose(data_out, data_in)
-
-    data_gpu_in.release()
-    data_gpu_out.release()
-
-
-def test_copy3D():
-    resolution = (64, 64, 64)
-    cl_env = get_opencl_environment()
-    vec = 4
-    src_copy = 'kernels/copy.cl'
-    build_options = ""
-    build_options += " -D NB_I=64 -D NB_II=64 -D NB_III=64"
-    build_options += " -D TILE_DIM_COPY=16"
-    build_options += " -D BLOCK_ROWS_COPY=8"
-    gwi = (int(resolution[0] / 4),
-           int(resolution[1] / 2),
-           int(resolution[2]))
-    lwi = (4, 8, 1)
-
-    # Build code
-    prg = cl_env.build_src(src_copy, build_options, vec)
-    init_copy = OpenClKernelLauncher(prg.copy, cl_env.queue, gwi, lwi)
-
-    data_in = npw.asrealarray(np.random.random(resolution))
-    data_out = npw.empty_like(data_in)
-    data_gpu_in = cl.Buffer(cl_env.ctx,
-                            cl.mem_flags.READ_WRITE,
-                            size=data_in.nbytes)
-    data_gpu_out = cl.Buffer(cl_env.ctx,
-                             cl.mem_flags.READ_WRITE,
-                             size=data_out.nbytes)
-    cl.enqueue_copy(cl_env.queue, data_gpu_in, data_in)
-
-    cl_env.queue.finish()
-    init_copy(data_gpu_in, data_gpu_out)
-    cl_env.queue.finish()
-    cl.enqueue_copy(cl_env.queue, data_out, data_gpu_out)
-    cl_env.queue.finish()
-    assert np.allclose(data_out, data_in)
-
-    data_gpu_in.release()
-    data_gpu_out.release()
-
-
-def test_copy3D_rect():
-    resolution_x = (16, 32, 64)
-    resolution_y = (32, 16, 64)
-    resolution_z = (64, 16, 32)
-    cl_env = get_opencl_environment()
-    vec = 4
-    src_copy = 'kernels/copy.cl'
-
-    build_options = ""
-    build_options += " -D NB_I=16 -D NB_II=32 -D NB_III=64"
-    build_options += " -D TILE_DIM_COPY=16"
-    build_options += " -D BLOCK_ROWS_COPY=8"
-    gwi = (int(resolution_x[0] / 4),
-           int(resolution_x[1] / 2),
-           int(resolution_x[2]))
-    lwi = (4, 8, 1)
-    prg = cl_env.build_src(src_copy, build_options, vec)
-    init_copy_x = OpenClKernelLauncher(prg.copy, cl_env.queue, gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=16 -D NB_III=64"
-    build_options += " -D TILE_DIM_COPY=16"
-    build_options += " -D BLOCK_ROWS_COPY=8"
-    gwi = (int(resolution_x[1] / 4),
-           int(resolution_x[0] / 2),
-           int(resolution_x[2]))
-    lwi = (4, 8, 1)
-    prg = cl_env.build_src(src_copy, build_options, vec)
-    init_copy_y = OpenClKernelLauncher(prg.copy, cl_env.queue, gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=64 -D NB_II=16 -D NB_III=32"
-    build_options += " -D TILE_DIM_COPY=16"
-    build_options += " -D BLOCK_ROWS_COPY=8"
-    gwi = (int(resolution_x[2] / 4),
-           int(resolution_x[0] / 2),
-           int(resolution_x[1]))
-    lwi = (4, 8, 1)
-    prg = cl_env.build_src(src_copy, build_options, vec)
-    init_copy_z = OpenClKernelLauncher(prg.copy, cl_env.queue, gwi, lwi)
-
-    data_in = npw.asrealarray(np.random.random(resolution_x))
-    data_out = np.empty_like(data_in)
-    data_gpu_in = cl.Buffer(cl_env.ctx,
-                            cl.mem_flags.READ_WRITE,
-                            size=data_in.nbytes)
-    data_gpu_out = cl.Buffer(cl_env.ctx,
-                             cl.mem_flags.READ_WRITE,
-                             size=data_out.nbytes)
-    cl.enqueue_copy(cl_env.queue, data_gpu_in, data_in)
-    cl.enqueue_copy(cl_env.queue, data_gpu_out, data_out)
-
-    cl_env.queue.finish()
-    init_copy_x(data_gpu_in, data_gpu_out)
-    cl_env.queue.finish()
-    cl.enqueue_copy(cl_env.queue, data_out, data_gpu_out)
-    cl_env.queue.finish()
-    assert np.allclose(data_out, data_in)
-
-    data_in = npw.asrealarray(np.random.random(resolution_y))
-    data_out = npw.empty_like(data_in)
-    cl.enqueue_copy(cl_env.queue, data_gpu_in, data_in)
-    cl.enqueue_copy(cl_env.queue, data_gpu_out, data_out)
-    cl_env.queue.finish()
-    init_copy_y(data_gpu_in, data_gpu_out)
-    cl_env.queue.finish()
-    cl.enqueue_copy(cl_env.queue, data_out, data_gpu_out)
-    cl_env.queue.finish()
-    assert np.allclose(data_out, data_in)
-
-    data_in = npw.asrealarray(np.random.random(resolution_z))
-    data_out = npw.empty_like(data_in)
-    cl.enqueue_copy(cl_env.queue, data_gpu_in, data_in)
-    cl.enqueue_copy(cl_env.queue, data_gpu_out, data_out)
-    cl_env.queue.finish()
-    init_copy_z(data_gpu_in, data_gpu_out)
-    cl_env.queue.finish()
-    cl.enqueue_copy(cl_env.queue, data_out, data_gpu_out)
-    cl_env.queue.finish()
-    assert np.allclose(data_out, data_in)
-
-    data_gpu_in.release()
-    data_gpu_out.release()
diff --git a/hysop/old/gpu.old/tests/test_gpu_advection_null_velocity.py b/hysop/old/gpu.old/tests/test_gpu_advection_null_velocity.py
deleted file mode 100644
index d904e3c19f6b74fe23f85917b3958e1f22c8d3c5..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/tests/test_gpu_advection_null_velocity.py
+++ /dev/null
@@ -1,436 +0,0 @@
-"""Testing advection kernels with null velocity.
-"""
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.advection import Advection
-from hysop.constants import np, HYSOP_REAL
-from hysop.problem.simulation import Simulation
-from hysop.methods import TimeIntegrator, Interpolation, Remesh, \
-    Support, Splitting, Precision
-from hysop.numerics.odesolvers import RK2
-from hysop.numerics.interpolation.interpolation import Linear
-from hysop.numerics.remeshing import L2_1, L4_2, L6_3, M8Prime
-from hysop.tools.parameters import Discretization
-from hysop.tools.numpywrappers import npw
-from hysop.problem.simulation import O2, O2FULLHALF
-
-
-d2d = Discretization([33, 33])
-d3d = Discretization([17, 17, 17])
-m1k = {TimeIntegrator: RK2, Interpolation: Linear,
-       Remesh: L4_2, Support: 'gpu_1k', Splitting: O2,
-       Precision: HYSOP_REAL}
-m2k = {TimeIntegrator: RK2, Interpolation: Linear,
-       Remesh: L4_2, Support: 'gpu_2k', Splitting: O2,
-       Precision: HYSOP_REAL}
-
-
-def run_advection(discr, vector_field, method=None):
-    """Create advection operator, ref operator
-    and fields, run scales and python advection,
-    compare results.
-
-    Parameters
-    ----------
-    dicr : :class:`~hysop.tools.parameters.Discretization`
-        chosen discretization for operator --> set domain dimension
-    vector_field: bool
-        True to advect a vector field, else scalar field.
-    method : dictionnary
-        Set scales remeshing type.
-    """
-    dimension = len(discr.resolution)
-    box = Box(length=[1., ] * dimension, origin=[0., ] * dimension)
-    scal = Field(domain=box, name='Scalar', is_vector=vector_field)
-    scal_ref = Field(domain=box, name='Scalar_ref', is_vector=vector_field)
-    velo = Field(domain=box, name='Velocity',
-                 formula=lambda x, y, z, t: (0., 0., 0.), is_vector=True,
-                 vectorize_formula=True)
-    # build gpu advection
-    if method is None:
-        method = m1k
-
-    advec = Advection(velocity=velo,
-                      advected_fields=scal,
-                      discretization=discr,
-                      method=method)
-    advec.discretize()
-    advec.setup()
-    
-    # Get and randomize discrete fields
-    topo = advec.advected_fields_topology()
-    scal_d = scal.randomize(topo)
-    assert (velo.norm(topo) == 0).all()
-    ic = topo.mesh.compute_index
-    # copy data for reference
-    scal_ref.copy(scal, topo)
-    scal_ref_d = scal_ref.discretize(topo)
-    topo_velo = advec.velocity_topology()
-    velo_d = velo.discretize(topo_velo)
-    # transfer data to gpu
-    scal_d.toDevice()
-    velo_d.toDevice()
-    scal_d.wait()
-    velo_d.wait()
-
-    advec.apply(Simulation(start=0., end=0.1, nb_iter=1))
-    # Get data back
-    scal_d.toHost()
-    scal_d.wait()
-    advec.finalize()
-    for d in xrange(len(scal_d.data)):
-        assert np.allclose(scal_ref_d.data[d][ic], scal_d.data[d][ic])
-
-
-def test_2D_m6_1k():
-    """Test M6 remeshing formula in 2D, 1 kernel,
-    o2 splitting.
-    """
-    run_advection(d2d, False, m1k)
-
-
-# def test_2D_m6_2k():
-#     """Test M6 remeshing formula in 2D, 2 kernels,
-#     o2 splitting.
-#     """
-#     run_advection(d2d, False, m2k)
-
-
-# def test_2D_m6_1k_sFH():
-#     """Test M6 remeshing formula in 2D, 1 kernel,
-#     o2 full-half splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L4_2, Support: 'gpu_1k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_2D_m6_2k_sFH():
-#     """Test M6 remeshing formula in 2D, 2 kernels,
-#     o2 full-half splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L4_2, Support: 'gpu_2k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_3D_m6_1k():
-#     """Test M6 remeshing formula in 3D, 1 kernel,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L4_2, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_m6_2k():
-#     """Test M6 remeshing formula in 3D, 2 kernels,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L4_2, Support: 'gpu_2k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_m6_1k_sFH():
-#     """Test M6 remeshing formula in 3D, 1 kernel,
-#     o2 fh splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L4_2, Support: 'gpu_1k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_m6_2k_sFH():
-#     """Test M6 remeshing formula in 3D, 2 kernels,
-#     o2 fh splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L4_2, Support: 'gpu_2k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_2D_m4_1k():
-#     """Test M4 remeshing formula in 2D, 1 kernel,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L2_1, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_2D_m4_2k():
-#     """Test M4 remeshing formula in 2D, 2 kernels,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L2_1, Support: 'gpu_2k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_2D_m4_1k_sFH():
-#     """Test M4 remeshing formula in 2D, 1 kernel,
-#     o2 full-half splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L2_1, Support: 'gpu_1k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_2D_m4_2k_sFH():
-#     """Test M4 remeshing formula in 2D, 2 kernels,
-#     o2 full-half splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L2_1, Support: 'gpu_2k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_3D_m4_1k():
-#     """Test M4 remeshing formula in 3D, 1 kernel,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L2_1, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_m4_2k():
-#     """Test M4 remeshing formula in 3D, 2 kernels,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L2_1, Support: 'gpu_2k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_m4_1k_sFH():
-#     """Test M4 remeshing formula in 3D, 1 kernel,
-#     o2 fh splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L2_1, Support: 'gpu_1k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_m4_2k_sFH():
-#     """Test M4 remeshing formula in 3D, 2 kernels,
-#     o2 fh splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L2_1, Support: 'gpu_2k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_2D_m8_1k():
-#     """Test M8 remeshing formula in 2D, 1 kernel,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: M8Prime, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_2D_m8_2k():
-#     """Test M8 remeshing formula in 2D, 2 kernels,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: M8Prime, Support: 'gpu_2k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_2D_m8_1k_sFH():
-#     """Test M8 remeshing formula in 2D, 1 kernel,
-#     o2 full-half splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: M8Prime, Support: 'gpu_1k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_2D_m8_2k_sFH():
-#     """Test M8 remeshing formula in 2D, 2 kernels,
-#     o2 full-half splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: M8Prime, Support: 'gpu_2k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_3D_m8_1k():
-#     """Test M8 remeshing formula in 3D, 1 kernel,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: M8Prime, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_m8_2k():
-#     """Test M8 remeshing formula in 3D, 2 kernels,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: M8Prime, Support: 'gpu_2k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_m8_1k_sFH():
-#     """Test M8 remeshing formula in 3D, 1 kernel,
-#     o2 fh splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: M8Prime, Support: 'gpu_1k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_m8_2k_sFH():
-#     """Test M8 remeshing formula in 3D, 2 kernels,
-#     o2 fh splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: M8Prime, Support: 'gpu_2k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_2D_l6_1k():
-#     """Test L6 remeshing formula in 2D, 1 kernel,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L6_3, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_2D_l6_2k():
-#     """Test L6 remeshing formula in 2D, 2 kernels,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L6_3, Support: 'gpu_2k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_2D_l6_1k_sFH():
-#     """Test L6 remeshing formula in 2D, 1 kernel,
-#     o2 full-half splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L6_3, Support: 'gpu_1k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_2D_l6_2k_sFH():
-#     """Test L6 remeshing formula in 2D, 2 kernels,
-#     o2 full-half splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L6_3, Support: 'gpu_2k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d2d, False, meth)
-
-
-# def test_3D_l6_1k():
-#     """Test L6 remeshing formula in 3D, 1 kernel,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L6_3, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_l6_2k():
-#     """Test L6 remeshing formula in 3D, 2 kernels,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L6_3, Support: 'gpu_2k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_l6_1k_sFH():
-#     """Test L6 remeshing formula in 3D, 1 kernel,
-#     o2 fh splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L6_3, Support: 'gpu_1k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_3D_l6_2k_sFH():
-#     """Test L6 remeshing formula in 3D, 2 kernels,
-#     o2 fh splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L6_3, Support: 'gpu_2k', Splitting: O2FULLHALF,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
-
-
-# def test_rectangular_domain2D():
-#     """Test remeshing formula in 2D, with different resolutions in each dir,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L4_2, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(Discretization([65, 33]), False, meth)
-
-
-# def test_rectangular_domain3D():
-#     """Test remeshing formula in 3D, with different resolutions in each dir,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L4_2, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(Discretization([65, 33, 17]), False, meth)
-
-
-# def test_vector_2D():
-#     """Test remeshing formula in 2D, advection of a vector field,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L4_2, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(Discretization([129, 129]), True, meth)
-
-
-# def test_vector_3D():
-#     """Test remeshing formula in 3D, advection of a vector field,
-#     o2 splitting.
-#     """
-#     meth = {TimeIntegrator: RK2, Interpolation: Linear,
-#             Remesh: L4_2, Support: 'gpu_1k', Splitting: O2,
-#             Precision: HYSOP_REAL}
-#     run_advection(d3d, False, meth)
diff --git a/hysop/old/gpu.old/tests/test_gpu_advection_random_velocity.py b/hysop/old/gpu.old/tests/test_gpu_advection_random_velocity.py
deleted file mode 100644
index 5b6375fd009d76b02d06fee4b46350c280f7ab84..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/tests/test_gpu_advection_random_velocity.py
+++ /dev/null
@@ -1,874 +0,0 @@
-"""
-@file hysop.gpu.tests.test_advection_randomVelocity
-Testing advection kernels with a random velocity field.
-"""
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.advection import Advection
-from hysop.constants import np
-from hysop.problem.simulation import Simulation
-from hysop.methods import TimeIntegrator, Interpolation, Remesh, \
-    Support, Splitting
-from hysop.numerics.integrators.runge_kutta2 import RK2
-from hysop.numerics.interpolation.interpolation import Linear
-from hysop.numerics.remeshing import L2_1, L4_2, M8Prime
-from hysop.tools.parameters import Discretization
-from hysop.tools.numpywrappers import npw
-
-
-def setup_2D():
-    box = Box(length=[1., 1.], origin=[0., 0.])
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-    return scal, velo
-
-
-def setup_3D():
-    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-    return scal, velo
-
-
-def assertion_2D_withPython(scal, velo, advec, advec_py):
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(
-        np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(scal_d.data[0].shape)) / (2. * scal_d.resolution[1])
-    scal_d.toDevice()
-    velo_d.toDevice()
-
-    advec_py.apply(Simulation(start=0., end=0.1, nb_iter=1))
-    advec.apply(Simulation(start=0., end=0.1, nb_iter=1))
-
-    py_res = scal_d.data[0].copy()
-    scal_d.toHost()
-
-    advec.finalize()
-    err_m = np.max(np.abs(py_res - scal_d.data[0]))
-    assert np.allclose(py_res, scal_d.data[0], rtol=1e-04, atol=1e-07), str(err_m)
-
-
-def assertion_3D_withPython(scal, velo, advec, advec_py):
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(
-        np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.zeros_like(
-        scal_d.data[0]) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = npw.zeros_like(
-        scal_d.data[0]) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = npw.zeros_like(
-        scal_d.data[0]) / (2. * scal_d.resolution[2])
-    scal_d.toDevice()
-    velo_d.toDevice()
-
-    advec_py.apply(Simulation(start=0., end=0.1, nb_iter=1))
-    advec.apply(Simulation(start=0., end=0.1, nb_iter=1))
-
-    py_res = scal_d.data[0].copy()
-    scal_d.toHost()
-
-    advec.finalize()
-    err_m = np.max(np.abs(py_res - scal_d.data[0]))
-    assert np.allclose(py_res, scal_d.data[0], rtol=1e-04, atol=1e-07), str(err_m)
-
-d2d = Discretization([33, 33])
-
-# M6 testing
-def test_2D_m6_1k():
-    """
-    Testing M6 remeshing formula in 2D, 1 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m6_2k():
-    """
-    Testing M6 remeshing formula in 2D, 2 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m6_1k_sFH():
-    """
-    Testing M6 remeshing formula in 2D, 1 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m6_2k_sFH():
-    """
-    Testing M6 remeshing formula in 2D, 2 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m6_1k():
-    """
-    Testing M6 remeshing formula in 3D, 1 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m6_2k():
-    """
-    Testing M6 remeshing formula in 3D, 2 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'}
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m6_1k_sFH():
-    """
-    Testing M6 remeshing formula in 3D, 1 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m6_2k_sFH():
-    """
-    Testing M6 remeshing formula in 3D, 2 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-# M4 testing
-def test_2D_m4_1k():
-    """
-    Testing M4 remeshing formula in 2D, 1 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m4_2k():
-    """
-    Testing M4 remeshing formula in 2D, 2 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m4_1k_sFH():
-    """
-    Testing M4 remeshing formula in 2D, 1 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m4_2k_sFH():
-    """
-    Testing M4 remeshing formula in 2D, 2 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m4_1k():
-    """
-    Testing M4 remeshing formula in 3D, 1 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m4_2k():
-    """
-    Testing M4 remeshing formula in 3D, 2 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m4_1k_sFH():
-    """
-    Testing M4 remeshing formula in 3D, 1 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m4_2k_sFH():
-    """
-    Testing M4 remeshing formula in 3D, 2 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L2_1,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L2_1,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-# M8 testing
-def test_2D_m8_1k():
-    """
-    Testing M8 remeshing formula in 2D, 1 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m8_2k():
-    """
-    Testing M8 remeshing formula in 2D, 2 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m8_1k_sFH():
-    """
-    Testing M8 remeshing formula in 2D, 1 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-
-def test_2D_m8_2k_sFH():
-    """
-    Testing M8 remeshing formula in 2D, 2 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d2d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_2D_withPython(scal, velo, advec, advec_py)
-
-d3d = Discretization([17, 17, 17])
-
-def test_3D_m8_1k():
-    """
-    Testing M8 remeshing formula in 3D, 1 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m8_2k():
-    """
-    Testing M8 remeshing formula in 3D, 2 kernel, simple precision,
-    o2 splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m8_1k_sFH():
-    """
-    Testing M8 remeshing formula in 3D, 1 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_3D_m8_2k_sFH():
-    """
-    Testing M8 remeshing formula in 3D, 2 kernel, simple precision,
-    o2_FullHalf splitting.
-    """
-    scal, velo = setup_3D()
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: M8Prime,
-                              Support: 'gpu_2k',
-                              Splitting: 'o2_FullHalf'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: M8Prime,
-                                 Support: '',
-                                 Splitting: 'o2_FullHalf'}
-                         )
-    assertion_3D_withPython(scal, velo, advec, advec_py)
-
-
-def test_rectangular_domain2D():
-    box = Box(length=[1., 1.], origin=[0., 0.])
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-
-    advec = Advection(velo, scal, discretization=Discretization([65, 33]),
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=Discretization([65, 33]),
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = np.asarray(
-        np.random.random(velo_d.data[0].shape)) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = np.asarray(
-        np.random.random(velo_d.data[1].shape)) / (2. * scal_d.resolution[1])
-    scal_d.toDevice()
-    velo_d.toDevice()
-
-    advec_py.apply(Simulation(start=0., end=0.1, nb_iter=1))
-    advec.apply(Simulation(start=0., end=0.1, nb_iter=1))
-
-    py_res = scal_d.data[0].copy()
-    scal_d.toHost()
-
-    err_m = np.max(np.abs(py_res - scal_d.data[0]))
-    assert np.allclose(py_res, scal_d.data[0], rtol=1e-04, atol=1e-07), str(err_m)
-    advec.finalize()
-
-
-def test_rectangular_domain3D():
-    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-
-    advec = Advection(velo, scal, discretization=Discretization([65, 33, 17]),
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=Discretization([65, 33, 17]),
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.asrealarray(
-        np.random.random(velo_d.data[0].shape)) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = npw.asrealarray(
-        np.random.random(velo_d.data[1].shape)) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = npw.asrealarray(
-        np.random.random(velo_d.data[2].shape)) / (2. * scal_d.resolution[2])
-    scal_d.toDevice()
-    velo_d.toDevice()
-
-    advec_py.apply(Simulation(start=0., end=0.1, nb_iter=1))
-    advec.apply(Simulation(start=0., end=0.1, nb_iter=1))
-
-    py_res = scal_d.data[0].copy()
-    scal_d.toHost()
-
-    err_m = np.max(np.abs(py_res - scal_d.data[0]))
-    assert np.allclose(py_res, scal_d.data[0], rtol=1e-04, atol=1e-07), str(err_m) + " " + \
-        str(np.where(np.abs(py_res - scal_d.data[0]) > 1e-07+1e-04*np.abs(scal_d.data[0])))
-    advec.finalize()
-
-
-def test_vector_2D():
-    box = Box(length=[1., 1.], origin=[0., 0.])
-    scal = Field(domain=box, name='Scalar', is_vector=True)
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-
-    advec = Advection(velo, scal, discretization=Discretization([129, 129]),
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=Discretization([129, 129]),
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(np.random.random(scal_d.data[0].shape))
-    scal_d.data[1][...] = npw.asrealarray(np.random.random(scal_d.data[1].shape))
-    velo_d.data[0][...] = npw.asarray(
-        np.random.random(velo_d.data[0].shape)) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = npw.asarray(
-        np.random.random(velo_d.data[1].shape)) / (2. * scal_d.resolution[1])
-    scal_d.toDevice()
-    velo_d.toDevice()
-
-    advec_py.apply(Simulation(start=0., end=0.1, nb_iter=1))
-    advec.apply(Simulation(start=0., end=0.1, nb_iter=1))
-
-    py_res_X = scal_d.data[0].copy()
-    py_res_Y = scal_d.data[1].copy()
-    scal_d.toHost()
-
-    err_m = np.max(np.abs(py_res_X - scal_d.data[0]))
-    assert np.allclose(py_res_X, scal_d.data[0], rtol=1e-04, atol=1e-07), str(err_m) + " " + \
-        str(np.where(np.abs(py_res_X - scal_d.data[0]) > 1e-07+1e-04*np.abs(scal_d.data[0])))
-    err_m = np.max(np.abs(py_res_Y - scal_d.data[1]))
-    assert np.allclose(py_res_Y, scal_d.data[1], rtol=1e-04, atol=1e-07), str(err_m) + " "+ \
-        str(np.where(np.abs(py_res_Y - scal_d.data[1]) > 1e-07+1e-04*np.abs(scal_d.data[1])))
-    advec.finalize()
-
-
-def test_vector_3D():
-    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
-    scal = Field(domain=box, name='Scalar', is_vector=True)
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-
-    advec = Advection(velo, scal, discretization=d3d,
-                      method={TimeIntegrator: RK2,
-                              Interpolation: Linear,
-                              Remesh: L4_2,
-                              Support: 'gpu_1k',
-                              Splitting: 'o2'}
-                      )
-    advec_py = Advection(velo, scal, discretization=d3d,
-                         method={TimeIntegrator: RK2,
-                                 Interpolation: Linear,
-                                 Remesh: L4_2,
-                                 Support: '',
-                                 Splitting: 'o2'},
-                         )
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    velo_d = velo.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(
-        np.random.random(scal_d.data[0].shape))
-    scal_d.data[1][...] = npw.asrealarray(
-        np.random.random(scal_d.data[0].shape))
-    scal_d.data[2][...] = npw.asrealarray(
-        np.random.random(scal_d.data[0].shape))
-    velo_d.data[0][...] = npw.asrealarray(
-        np.random.random(velo_d.data[0].shape)) / (2. * scal_d.resolution[0])
-    velo_d.data[1][...] = npw.asrealarray(
-        np.random.random(velo_d.data[1].shape)) / (2. * scal_d.resolution[1])
-    velo_d.data[2][...] = npw.asrealarray(
-        np.random.random(velo_d.data[2].shape)) / (2. * scal_d.resolution[2])
-    scal_d.toDevice()
-    velo_d.toDevice()
-
-    advec_py.apply(Simulation(start=0., end=0.1, nb_iter=1))
-    advec.apply(Simulation(start=0., end=0.1, nb_iter=1))
-
-    py_res_X = scal_d.data[0].copy()
-    py_res_Y = scal_d.data[1].copy()
-    py_res_Z = scal_d.data[2].copy()
-    scal_d.toHost()
-
-    err_m = np.max(np.abs(py_res_X - scal_d.data[0]))
-    assert np.allclose(py_res_X, scal_d.data[0], rtol=1e-04, atol=1e-07), str(err_m) + " " + \
-        str(np.where(np.abs(py_res_X_- scal_d.data[0]) > 1e-07+1e-04*np.abs(scal_d.data[0])))
-    err_m = np.max(np.abs(py_res_Y - scal_d.data[0]))
-    assert np.allclose(py_res_Y, scal_d.data[1], rtol=1e-04, atol=1e-07), str(err_m) + " " + \
-        str(np.where(np.abs(py_res_Y_- scal_d.data[1]) > 1e-07+1e-04*np.abs(scal_d.data[1])))
-    err_m = np.max(np.abs(py_res_Z - scal_d.data[0]))
-    assert np.allclose(py_res_Z, scal_d.data[2], rtol=1e-04, atol=1e-07), str(err_m) + " " + \
-        str(np.where(np.abs(py_res_Z_- scal_d.data[2]) > 1e-07+1e-04*np.abs(scal_d.data[2])))
-    advec.finalize()
diff --git a/hysop/old/gpu.old/tests/test_gpu_multiresolution_filter.py b/hysop/old/gpu.old/tests/test_gpu_multiresolution_filter.py
deleted file mode 100755
index fe8dfea245df59282dc1e273951804caf4a16520..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/tests/test_gpu_multiresolution_filter.py
+++ /dev/null
@@ -1,127 +0,0 @@
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization, MPIParams
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.multiresolution_filter import MultiresolutionFilter
-from hysop.tools.numpywrappers import npw
-import numpy as np
-from hysop.methods import Remesh, Support, ExtraArgs
-from hysop.methods import Rmsh_Linear, L2_1
-# In parallel we need to use as many threads as gpu
-from hysop.core.mpi import main_size, main_rank
-import pyopencl as cl
-n_gpu = len(cl.get_platforms()[0].get_devices(
-    device_type=cl.device_type.GPU))
-PROC_TASKS = [0, ] * main_size
-if main_rank < n_gpu:
-    PROC_TASKS[main_rank] = 1
-
-L = [1., 1., 1.]
-O = [0., 0., 0.]
-simu = Simulation(start=0., end=0.1, nb_iter=1)
-PY_COMPARE = True
-
-
-def func(res, x, y, z, t=0):
-    res[0][...] = np.cos(2. * np.pi * x) * \
-                  np.sin(2. * np.pi * y) * np.cos(4. * np.pi * z)
-    return res
-
-
-def test_filter_linear():
-    """This test compares the GPU linear filter with python implementation"""
-    box = Box(length=L, origin=O, proc_tasks=PROC_TASKS)
-    mpi_p = MPIParams(comm=box.task_comm, task_id=1)
-    f = Field(box, formula=func, is_vector=False, name='f1')
-    d_fine = Discretization([513, 513, 513])
-    d_coarse = Discretization([257, 257, 257], ghosts=[1, 1, 1])
-    op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,
-                               variables={f: d_coarse},
-                               method={Remesh: Rmsh_Linear,
-                                       Support: 'gpu',
-                                       ExtraArgs: {'device_id': main_rank, }},
-                               mpi_params=mpi_p)
-    if box.is_on_task(1):
-        op.discretize()
-        op.setup()
-        topo_coarse = op.discrete_fields[f].topology
-        topo_fine = [t for t in f.discrete_fields.keys()
-                     if not t is topo_coarse][0]
-        f.initialize(topo=topo_fine)
-        f_out = f.discrete_fields[topo_coarse]
-        f_out.toDevice()
-        op.apply(simu)
-        f_out.toHost()
-        f_out.wait()
-        valid = [npw.zeros(f_out[0].shape), ]
-        valid = func(valid, *topo_coarse.mesh.coords)
-        assert np.allclose(valid[0][topo_coarse.mesh.compute_index],
-                           f_out[0][topo_coarse.mesh.compute_index],
-                           atol=1e-4, rtol=1e-3), \
-            np.max(np.abs(valid[0][topo_coarse.mesh.compute_index] -
-                          f_out[0][topo_coarse.mesh.compute_index]))
-        if PY_COMPARE:
-            f_py = Field(box, formula=func, name='fpy')
-            op_py = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,
-                                          variables={f_py: d_coarse},
-                                          method={Remesh: Rmsh_Linear, },
-                                          mpi_params=mpi_p)
-            op_py.discretize()
-            op_py.setup()
-            f_py.initialize(topo=topo_fine)
-            op_py.apply(simu)
-            valid = f_py.discrete_fields[topo_coarse]
-            assert np.allclose(valid[0][topo_coarse.mesh.compute_index],
-                               f_out[0][topo_coarse.mesh.compute_index]), \
-                np.max(np.abs(valid[0][topo_coarse.mesh.compute_index] -
-                              f_out[0][topo_coarse.mesh.compute_index]))
-
-
-def test_filter_L2_1():
-    """
-    This test compares the GPU L2_1 filter with the expected result
-    on the coarse grid and with python implementation.
-    """
-    box = Box(length=L, origin=O, proc_tasks=PROC_TASKS)
-    mpi_p = MPIParams(comm=box.task_comm, task_id=1)
-    f = Field(box, formula=func, name='f1')
-    d_fine = Discretization([513, 513, 513])
-    d_coarse = Discretization([257, 257, 257], ghosts=[2, 2, 2])
-    if box.is_on_task(1):
-        op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,
-                                   variables={f: d_coarse},
-                                   method={Remesh: L2_1,
-                                           Support: 'gpu',
-                                           ExtraArgs: {'device_id': main_rank, }},
-                                   mpi_params=mpi_p)
-        op.discretize()
-        op.setup()
-        topo_coarse = op.discrete_fields[f].topology
-        topo_fine = [t for t in f.discrete_fields.keys()
-                     if not t is topo_coarse][0]
-        f.initialize(topo=topo_fine)
-        f_out = f.discrete_fields[topo_coarse]
-        f_out.toDevice()
-        op.apply(simu)
-        f_out.toHost()
-        f_out.wait()
-        valid = [npw.zeros(f_out[0].shape), ]
-        valid = func(valid, *topo_coarse.mesh.coords)
-        assert np.allclose(valid[0][topo_coarse.mesh.compute_index],
-                           f_out[0][topo_coarse.mesh.compute_index]), \
-            np.max(np.abs(valid[0][topo_coarse.mesh.compute_index] -
-                          f_out[0][topo_coarse.mesh.compute_index]))
-        if PY_COMPARE:
-            f_py = Field(box, formula=func, name='fpy')
-            op_py = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,
-                                          variables={f_py: d_coarse},
-                                          method={Remesh: L2_1, })
-            op_py.discretize()
-            op_py.setup()
-            f_py.initialize(topo=topo_fine)
-            op_py.apply(simu)
-            valid = f_py.discrete_fields[topo_coarse]
-            assert np.allclose(valid[0][topo_coarse.mesh.compute_index],
-                               f_out[0][topo_coarse.mesh.compute_index]), \
-                np.max(np.abs(valid[0][topo_coarse.mesh.compute_index] -
-                              f_out[0][topo_coarse.mesh.compute_index]))
diff --git a/hysop/old/gpu.old/tests/test_multiphase_baroclinic.py b/hysop/old/gpu.old/tests/test_multiphase_baroclinic.py
deleted file mode 100755
index a65665f72584e51d29af76e193b1e9d430dd5a75..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/tests/test_multiphase_baroclinic.py
+++ /dev/null
@@ -1,133 +0,0 @@
-
-"""Testing baroclinic right hand side vector computing"""
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization, MPIParams
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.multiphase_baroclinic_rhs import MultiphaseBaroclinicRHS
-from hysop.numerics.finite_differences import FDC4, FDC2
-from hysop.tools.numpywrappers import npw
-import numpy as np
-from hysop.methods import Support, SpaceDiscretization, ExtraArgs
-from hysop.constants import HYSOP_REAL
-
-def test_baroclinic_rhs():
-    def func(res, x, y, z, t=0):
-        res[0][...] = np.cos(2. * np.pi * x) * \
-                      np.sin(2. * np.pi * y) * \
-                      np.cos(2. * np.pi * z)
-        return res
-
-    def grad_func(res, x, y, z, t=0):
-        res[0][...] = -2 * np.pi * np.sin(2. * np.pi * x) * \
-                      np.sin(2. * np.pi * y) * \
-                      np.cos(2. * np.pi * z)
-        res[1][...] = np.cos(2. * np.pi * x) * \
-                      2 * np.pi * np.cos(2. * np.pi * y)* \
-                      np.cos(2. * np.pi * z)
-        res[2][...] = np.cos(2. * np.pi * x) * \
-                      np.sin(2. * np.pi * y) * \
-                      -2 * np.pi * np.sin(2. * np.pi * z)
-        return res
-
-    def vfunc(res, x, y, z, t=0):
-        res[0][...] = np.sin(2. * np.pi * x)
-        res[1][...] = 2.5 * np.cos(2. * np.pi * y)
-        res[2][...] = -2. * np.sin(2. * np.pi * z)
-        return res
-
-    call_operator(func, grad_func, vfunc)
-
-
-def test_baroclinic_rhs_nonperiodic():
-    def func(res, x, y, z, t=0):
-        res[0][...] = x * y * z
-        return res
-
-    def grad_func(res, x, y, z, t=0):
-        """Values obtained for non periodic function with periodic FD scheme"""
-        res[2][...] = x * y
-        res[2][:,:,np.bitwise_or(z<2./256., z>253./256.)[0,0,:]] = x * y
-        res[2][:,:,np.bitwise_or(z<1./256., z>254./256.)[0,0,:]] = -127. * x * y
-        res[2][:,:,np.bitwise_or(z<2./256., z>253./256.)[0,0,:]] = 22.333333333333332 * x * y
-        res[2][:,:,np.bitwise_or(z<1./256., z>254./256.)[0,0,:]] = -148.33333333333334 * x * y
-
-        res[1][...] = x * z
-        res[1][:,np.bitwise_or(y<2./256., y>253./256.)[0,:,0],:] = x * z
-        res[1][:,np.bitwise_or(y<1./256., y>254./256.)[0,:,0],:] = -127. * x * z
-        res[1][:,np.bitwise_or(y<2./256., y>253./256.)[0,:,0],:] = 22.333333333333332 * x * z
-        res[1][:,np.bitwise_or(y<1./256., y>254./256.)[0,:,0],:] = -148.33333333333334 * x * z
-
-        res[0][...] = y * z
-        res[0][np.bitwise_or(x<2./256., x>253./256.)[:,0,0],:,:] = y * z
-        res[0][np.bitwise_or(x<1./256., x>254./256.)[:,0,0],:,:] = -127. * y * z
-        res[0][np.bitwise_or(x<2./256., x>253./256.)[:,0,0],:,:] = 22.333333333333332 * y * z
-        res[0][np.bitwise_or(x<1./256., x>254./256.)[:,0,0],:,:] = -148.33333333333334 * y * z
-        return res
-
-    def vfunc(res, x, y, z, t=0):
-        res[0][...] = np.sin(2. * np.pi * x)
-        res[1][...] = 2.5 * np.cos(2. * np.pi * y)
-        res[2][...] = -2. * np.sin(2. * np.pi * z)
-        return res
-
-    call_operator(func, grad_func, vfunc)
-
-
-def call_operator(func, grad_func, vfunc):
-    """Call the baroclinic rhs operator from given initialization functions"""
-    simu = Simulation(start=0.0, end=1.0, time_step=0.1, max_iter=1)
-    box = Box()
-    rhs = Field(box, is_vector=True, name='rhs')
-    gradp = Field(box, is_vector=True, formula=vfunc, name='gradp')
-    rho = Field(box, is_vector=False, formula=func, name='rho')
-    gradrho = Field(box, is_vector=True, formula=grad_func, name='gradrho')
-    gradp_fine = Field(box, is_vector=True, formula=vfunc, name='fine')
-    true_rhs = Field(box, is_vector=True, name='ref')
-    d_fine = Discretization([257, 257, 257])
-    d_coarse = Discretization([129, 129, 129], ghosts=[2, 2, 2])
-    op = MultiphaseBaroclinicRHS(rhs, rho, gradp,
-                                 variables={rhs: d_fine,
-                                            gradp: d_coarse,
-                                            rho: d_fine},
-                                 method={Support: 'gpu',
-                                         SpaceDiscretization: FDC4,
-                                         ExtraArgs: {'density_func': 'x', }})
-    op.discretize()
-    op.setup()
-    topo_coarse = op.discrete_fields[gradp].topology
-    topo_fine = op.discrete_fields[rho].topology
-    d_rhs = rhs.discrete_fields[topo_fine]
-    d_gradp = gradp.discrete_fields[topo_coarse]
-    d_rho = rho.discrete_fields[topo_fine]
-    rhs.initialize(topo=topo_fine)
-    gradp.initialize(topo=topo_coarse)
-    rho.initialize(topo=topo_fine)
-    op.apply(simu)
-    d_rhs.toHost()
-    d_rhs.wait()
-
-    gradrho.initialize(topo=topo_fine)
-    d_gradrho = gradrho.discrete_fields[topo_fine]
-    gradp_fine.initialize(topo=topo_fine)
-    d_gradp_fine = gradp_fine.discrete_fields[topo_fine]
-    true_rhs.initialize(topo=topo_fine)
-    d_true_rhs = true_rhs.discrete_fields[topo_fine]
-    d_true_rhs[0] = d_gradrho[2] * d_gradp_fine[1] - \
-        d_gradrho[1] * d_gradp_fine[2]
-    d_true_rhs[1] = d_gradrho[0] * d_gradp_fine[2] - \
-        d_gradrho[2] * d_gradp_fine[0]
-    d_true_rhs[2] = d_gradrho[1] * d_gradp_fine[0] - \
-        d_gradrho[0] * d_gradp_fine[1]
-
-    max_val = [np.max(np.abs(r)) for r in d_true_rhs]
-
-    print np.max(np.abs(d_true_rhs[0]-d_rhs[0]) / max_val[0])
-    print np.where((np.abs(d_true_rhs[0]-d_rhs[0]) / max_val[0]) > 0.4)
-    assert np.allclose(d_rhs[0] / max_val[0], d_true_rhs[0] / max_val[0],
-                       atol=1e-8 if HYSOP_REAL != np.float32 else 5e-4)
-    assert np.allclose(d_rhs[1] / max_val[1], d_true_rhs[1] / max_val[1],
-                       atol=1e-8 if HYSOP_REAL != np.float32 else 5e-4)
-    assert np.allclose(d_rhs[2] / max_val[2], d_true_rhs[2] / max_val[2],
-                       atol=1e-8 if HYSOP_REAL != np.float32 else 5e-4)
-
diff --git a/hysop/old/gpu.old/tests/test_opencl_environment.py b/hysop/old/gpu.old/tests/test_opencl_environment.py
deleted file mode 100644
index 995239ac2bbbd7174c255690120c396824805535..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/tests/test_opencl_environment.py
+++ /dev/null
@@ -1,176 +0,0 @@
-"""Test hysop implementation of OpenCL basic functionnalities"""
-import numpy as np
-from hysop.backend.device.opencl.opencl_tools import get_opencl_environment, explore, OpenClEnvironment
-from hysop.constants import HYSOP_REAL
-FLOAT_GPU = np.float32
-from hysop.core.mpi import main_comm
-import pyopencl as cl
-
-
-def test_opencl_env_default():
-    """Test gpu tools. Just call get_opencl_environment and explore functions.
-    """
-    # Display devices info.
-    explore()
-    # Create default opencl env.
-    cl_env = get_opencl_environment()
-    assert isinstance(cl_env, OpenClEnvironment)
-    assert cl_env.device is not None
-    assert cl_env.ctx is not None
-    assert cl_env.queue is not None
-    assert cl_env.precision == HYSOP_REAL
-
-
-def test_opencl_env():
-    """Test gpu tools. Just call get_opencl_environment and explore functions.
-    """
-    # Display devices info.
-    explore()
-    # Create default opencl env.
-    comm = main_comm.Dup()
-    plt = cl.get_platforms()[-1]
-    device = plt.get_devices()[-1]
-    nb_platforms = len(cl.get_platforms())
-    nb_devices = len(plt.get_devices())
-    cl_env = get_opencl_environment(platform_id=nb_platforms - 1,
-                                    device_id=nb_devices - 1,
-                                    precision=FLOAT_GPU, comm=comm)
-    assert isinstance(cl_env, OpenClEnvironment)
-    assert cl_env.platform == plt
-    assert cl_env.device == device
-    assert cl_env.ctx is not None
-    assert cl_env.queue is not None
-    assert cl_env.precision == FLOAT_GPU
-
-
-def test_queue_unique_creation():
-    """
-    Testing that only one queue is created when multiples calls to get
-    an environment.
-    """
-    cl_env = get_opencl_environment()
-    cl_env_id = id(cl_env)
-    cl_envb = get_opencl_environment()
-    cl_envb_id = id(cl_envb)
-    assert cl_env_id == cl_envb_id
-
-
-def test_parse_src_expand_floatN():
-    """
-    """
-    import StringIO
-    cl_env = get_opencl_environment()
-    str_as_src = """
-    vstore__N__((float__N__)(gscal_loc[noBC_id(i+__NN__,nb_part)],
-    ), (i + gidY*WIDTH)/__N__, gscal);
-    """
-    parsed_str_as_src = """
-    vstore4((float4)(gscal_loc[noBC_id(i+0,nb_part)],""" + \
-        """gscal_loc[noBC_id(i+1,nb_part)],""" + \
-        """gscal_loc[noBC_id(i+2,nb_part)],gscal_loc[noBC_id(i+3,nb_part)]
-    ), (i + gidY*WIDTH)/4, gscal);
-    """
-    buf = StringIO.StringIO(str_as_src)
-    res = cl_env.parse_file(buf, n=4)
-    assert len(parsed_str_as_src) == len(res)
-    for s1, s2 in zip(parsed_str_as_src, res):
-        assert s1 == s2
-
-
-def test_parse_src_expand():
-    """
-    """
-    import StringIO
-    cl_env = get_opencl_environment()
-    str_as_src = """
-    gvelo_loc[noBC_id(i+__NN__,nb_part)] = v.s__NN__;
-    """
-    parsed_str_as_src = """
-    gvelo_loc[noBC_id(i+0,nb_part)] = v.s0;
-    gvelo_loc[noBC_id(i+1,nb_part)] = v.s1;
-    gvelo_loc[noBC_id(i+2,nb_part)] = v.s2;
-    gvelo_loc[noBC_id(i+3,nb_part)] = v.s3;
-    """
-    buf = StringIO.StringIO(str_as_src)
-    res = cl_env.parse_file(buf, n=4)
-    assert len(parsed_str_as_src) == len(res)
-    for s1, s2 in zip(parsed_str_as_src, res):
-        assert s1 == s2
-
-
-def test_parse_expand_remeshed_component():
-    """
-    """
-    import StringIO
-    cl_env = get_opencl_environment()
-    str_as_src = """
-    __kernel void advection_and_remeshing(__global const float* gvelo,
-                      __RCOMP_P__global const float* pscal__ID__,
-                      __RCOMP_P__global float* gscal__ID__,
-                      __local float* gvelo_loc,
-                      __RCOMP_P__local float* gscal_loc__ID__,
-                      float dt,float min_position, float dx)
-    {
-     __RCOMP_I gscal_loc__ID__[noBC_id(i)] = 0.0;
-      remesh(i, dx, invdx, s, p, __RCOMP_Pgscal_loc__ID__);
-      test(__RCOMP_Pgscal_loc__ID__, __RCOMP_Ppscal__ID__);
-     __RCOMP_I gscal__ID__[i + line_index] = gscal_loc__ID__[noBC_id(i)];
-     __RCOMP_I vstore__N__((float__N__)(gscal_loc__ID__[noBC_id(i+__NN__)],
-                   ), (i + line_index)/__N__, gscal__ID__);
-
-    """
-    parsed_str_as_src_2components = """
-    __kernel void advection_and_remeshing(__global const float* gvelo,
-                      """ + \
-        """__global const float* pscal0, __global const float* pscal1,
-                      __global float* gscal0, __global float* gscal1,
-                      __local float* gvelo_loc,
-                      __local float* gscal_loc0, __local float* gscal_loc1,
-                      float dt,float min_position, float dx)
-    {
-      gscal_loc0[noBC_id(i)] = 0.0; gscal_loc1[noBC_id(i)] = 0.0;
-      remesh(i, dx, invdx, s, p, gscal_loc0, gscal_loc1);
-      test(gscal_loc0, gscal_loc1, pscal0, pscal1);
-      gscal0[i + line_index] = gscal_loc0[noBC_id(i)]; """ + \
-        """gscal1[i + line_index] = gscal_loc1[noBC_id(i)];
-      vstore4((float4)(gscal_loc0[noBC_id(i+0)],""" + \
-        """gscal_loc0[noBC_id(i+1)],gscal_loc0[noBC_id(i+2)],""" + \
-        """gscal_loc0[noBC_id(i+3)]
-                   ), (i + line_index)/4, gscal0); """ + \
-        """vstore4((float4)(gscal_loc1[noBC_id(i+0)],""" + \
-        """gscal_loc1[noBC_id(i+1)],gscal_loc1[noBC_id(i+2)],""" + \
-        """gscal_loc1[noBC_id(i+3)]
-                   ), (i + line_index)/4, gscal1);
-
-    """
-    parsed_str_as_src_1components = """
-    __kernel void advection_and_remeshing(__global const float* gvelo,
-                      __global const float* pscal0,
-                      __global float* gscal0,
-                      __local float* gvelo_loc,
-                      __local float* gscal_loc0,
-                      float dt,float min_position, float dx)
-    {
-      gscal_loc0[noBC_id(i)] = 0.0;
-      remesh(i, dx, invdx, s, p, gscal_loc0);
-      test(gscal_loc0, pscal0);
-      gscal0[i + line_index] = gscal_loc0[noBC_id(i)];
-      vstore4((float4)(gscal_loc0[noBC_id(i+0)],""" + \
-        """gscal_loc0[noBC_id(i+1)],gscal_loc0[noBC_id(i+2)],""" + \
-        """gscal_loc0[noBC_id(i+3)]
-                   ), (i + line_index)/4, gscal0);
-
-    """
-    buf = StringIO.StringIO(str_as_src)
-    res = cl_env.parse_file(buf, n=4, nb_remesh_components=1)
-    print res
-    assert len(parsed_str_as_src_1components) == len(res)
-    for s1, s2 in zip(parsed_str_as_src_1components, res):
-        assert s1 == s2
-
-    buf = StringIO.StringIO(str_as_src)
-    res = cl_env.parse_file(buf, n=4, nb_remesh_components=2)
-    print res
-    assert len(parsed_str_as_src_2components) == len(res)
-    for s1, s2 in zip(parsed_str_as_src_2components, res):
-        assert s1 == s2
diff --git a/hysop/old/gpu.old/tests/test_transposition.py b/hysop/old/gpu.old/tests/test_transposition.py
deleted file mode 100644
index 34c7a39938265ede3e8caa5c5da8c2e56bc8b97d..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/tests/test_transposition.py
+++ /dev/null
@@ -1,549 +0,0 @@
-"""
-@file hysop.gpu.tests.test_transposition
-Testing copy kernels.
-"""
-from hysop.backend.device.opencl import cl
-from hysop.constants import np
-from hysop.backend.device.opencl.opencl_tools import get_opencl_environment
-from hysop.backend.device.opencl.opencl_kernel import OpenClKernelLauncher
-from hysop.tools.numpywrappers import npw
-
-
-def _comparison(resolution, resolutionT,
-                transpose_f, transpose_b,
-                gwi, lwi, cl_env, axe=1):
-
-    data_in = npw.asrealarray(np.random.random(resolution))
-    data_out = npw.realempty(resolutionT)
-    data_out2 = npw.realempty(resolution)
-    data_gpu_in = cl.Buffer(cl_env.ctx,
-                            cl.mem_flags.READ_WRITE,
-                            size=data_in.nbytes)
-    data_gpu_out = cl.Buffer(cl_env.ctx,
-                             cl.mem_flags.READ_WRITE,
-                             size=data_out.nbytes)
-    data_gpu_out2 = cl.Buffer(cl_env.ctx,
-                              cl.mem_flags.READ_WRITE,
-                              size=data_out2.nbytes)
-    cl.enqueue_copy(cl_env.queue, data_gpu_in, data_in)
-    cl.enqueue_copy(cl_env.queue, data_gpu_out, data_out)
-    cl.enqueue_copy(cl_env.queue, data_gpu_out2, data_out2)
-    cl_env.queue.finish()
-
-    # gpu_out <- gpu_in.T
-    transpose_f(data_gpu_in, data_gpu_out)
-    cl_env.queue.finish()
-    cl.enqueue_copy(cl_env.queue, data_out, data_gpu_out)
-    cl_env.queue.finish()
-    assert np.allclose(data_out, data_in.swapaxes(0, axe))
-
-    # gpu_in <- gpu_out.T ( = gpu_in.T.T = gpu_in)
-    transpose_b(data_gpu_out, data_gpu_out2)
-    cl_env.queue.finish()
-    cl.enqueue_copy(cl_env.queue, data_out2, data_gpu_out2)
-    cl_env.queue.finish()
-    assert np.allclose(data_out2, data_in)
-
-    data_gpu_in.release()
-    data_gpu_out.release()
-    data_gpu_out2.release()
-
-
-def test_transposition_xy2D():
-    resolution = (256, 256)
-    cl_env = get_opencl_environment()
-    vec = 4
-    src_transpose_xy = 'kernels/transpose_xy.cl'
-    build_options = ""
-    build_options += " -D NB_I=256 -D NB_II=256"
-    build_options += " -D PADDING_XY=1"
-    build_options += " -D TILE_DIM_XY=32 -D BLOCK_ROWS_XY=8"
-    gwi = (int(resolution[0] / 4), int(resolution[1]) / 4)
-    lwi = (8, 8)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0] / 4) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 4) / lwi[1])
-
-    # Build code
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy = OpenClKernelLauncher(
-        prg.transpose_xy, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolution,
-                init_transpose_xy, init_transpose_xy,
-                gwi, lwi, cl_env)
-
-
-def test_transposition_xy2D_noVec():
-    resolution = (256, 256)
-    cl_env = get_opencl_environment()
-    src_transpose_xy = 'kernels/transpose_xy_noVec.cl'
-    build_options = ""
-    build_options += " -D NB_I=256 -D NB_II=256"
-    build_options += " -D PADDING_XY=1"
-    build_options += " -D TILE_DIM_XY=32 -D BLOCK_ROWS_XY=8"
-    gwi = (int(resolution[0]), int(resolution[1]) / 4)
-    lwi = (32, 8)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0]) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 4) / lwi[1])
-
-    # Build code
-    prg = cl_env.build_src(src_transpose_xy, build_options)
-    init_transpose_xy = OpenClKernelLauncher(
-        prg.transpose_xy, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolution,
-                init_transpose_xy, init_transpose_xy,
-                gwi, lwi, cl_env)
-
-
-def test_transposition_xy2D_rect():
-    resolution = (512, 256)
-    resolutionT = (256, 512)
-    cl_env = get_opencl_environment()
-    vec = 4
-    src_transpose_xy = 'kernels/transpose_xy.cl'
-    build_options = ""
-    # Settings are taken from destination layout as current layout.
-    # gwi is computed form input layout (appears as transposed layout)
-    build_options += " -D NB_I=256 -D NB_II=512"
-    build_options += " -D TILE_DIM_XY=32 -D BLOCK_ROWS_XY=8 -D PADDING_XY=1"
-    gwi = (int(resolution[0] / 4),
-           int(resolution[1]) / 4)
-    lwi = (8, 8)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0] / 4) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 4) / lwi[1])
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy_x = OpenClKernelLauncher(prg.transpose_xy,
-                                         cl_env.queue,
-                                         gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=512 -D NB_II=256"
-    build_options += " -D TILE_DIM_XY=32 -D BLOCK_ROWS_XY=8 -D PADDING_XY=1"
-    gwi = (int(resolution[1] / 4),
-           int(resolution[0]) / 4)
-    lwi = (8, 8)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[1] / 4) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[0] / 4) / lwi[1])
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy_y = OpenClKernelLauncher(prg.transpose_xy,
-                                         cl_env.queue,
-                                         gwi, lwi)
-    _comparison(resolution, resolutionT,
-                init_transpose_xy_x, init_transpose_xy_y,
-                gwi, lwi, cl_env)
-
-
-def test_transposition_xy2D_noVec_rect():
-    resolution = (512, 256)
-    resolutionT = (256, 512)
-    cl_env = get_opencl_environment()
-    vec = 4
-    src_transpose_xy = 'kernels/transpose_xy_noVec.cl'
-    build_options = ""
-    # Settings are taken from destination layout as current layout.
-    # gwi is computed form input layout (appears as transposed layout)
-    build_options += " -D NB_I=256 -D NB_II=512"
-    build_options += " -D TILE_DIM_XY=32 -D BLOCK_ROWS_XY=8 -D PADDING_XY=1"
-    gwi = (int(resolution[0]),
-           int(resolution[1]) / 4)
-    lwi = (32, 8)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0]) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 4) / lwi[1])
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy_x = OpenClKernelLauncher(prg.transpose_xy,
-                                         cl_env.queue,
-                                         gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=512 -D NB_II=256"
-    build_options += " -D TILE_DIM_XY=32 -D BLOCK_ROWS_XY=8 -D PADDING_XY=1"
-    gwi = (int(resolution[1]),
-           int(resolution[0]) / 4)
-    lwi = (32, 8)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[1]) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[0] / 4) / lwi[1])
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy_y = OpenClKernelLauncher(prg.transpose_xy,
-                                         cl_env.queue,
-                                         gwi, lwi)
-    _comparison(resolution, resolutionT,
-                init_transpose_xy_x, init_transpose_xy_y,
-                gwi, lwi, cl_env)
-
-
-def test_transposition_xy3D():
-    resolution = (32, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 2
-    src_transpose_xy = 'kernels/transpose_xy.cl'
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XY=16 -D BLOCK_ROWS_XY=8 -D PADDING_XY=1"
-    gwi = (int(resolution[0] / 2),
-           int(resolution[1] / 2),
-           int(resolution[2]))
-    lwi = (8, 8, 1)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0] / 2) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 2) / lwi[1])
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy = OpenClKernelLauncher(
-        prg.transpose_xy, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolution,
-                init_transpose_xy, init_transpose_xy,
-                gwi, lwi, cl_env)
-
-
-def test_transposition_xy3D_noVec():
-    resolution = (32, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 2
-    src_transpose_xy = 'kernels/transpose_xy_noVec.cl'
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XY=16 -D BLOCK_ROWS_XY=8 -D PADDING_XY=1"
-    gwi = (int(resolution[0]),
-           int(resolution[1] / 2),
-           int(resolution[2]))
-    lwi = (16, 8, 1)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0]) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 2) / lwi[1])
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy = OpenClKernelLauncher(
-        prg.transpose_xy, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolution,
-                init_transpose_xy, init_transpose_xy,
-                gwi, lwi, cl_env)
-
-
-def test_transposition_xy3D_rect():
-    resolution = (32, 64, 32)
-    resolutionT = (64, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 2
-    src_transpose_xy = 'kernels/transpose_xy.cl'
-    build_options = ""
-    # Settings are taken from destination layout as current layout.
-    # gwi is computed form input layout (appears as transposed layout)
-    build_options += " -D NB_I=64 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XY=16 -D BLOCK_ROWS_XY=8 -D PADDING_XY=1"
-    gwi = (int(resolution[0] / 2),
-           int(resolution[1] / 2),
-           int(resolution[2]))
-    lwi = (8, 8, 1)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0] / 2) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 2) / lwi[1])
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy_x = OpenClKernelLauncher(
-        prg.transpose_xy, cl_env.queue, gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=64 -D NB_III=32"
-    build_options += " -D TILE_DIM_XY=16 -D BLOCK_ROWS_XY=8 -D PADDING_XY=1"
-    gwi = (int(resolution[1] / 2),
-           int(resolution[0] / 2),
-           int(resolution[2]))
-    lwi = (8, 8, 1)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[1] / 2) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[0] / 2) / lwi[1])
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy_y = OpenClKernelLauncher(
-        prg.transpose_xy, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolutionT,
-                init_transpose_xy_x, init_transpose_xy_y,
-                gwi, lwi, cl_env)
-
-
-def test_transposition_xy3D_noVec_rect():
-    resolution = (32, 64, 32)
-    resolutionT = (64, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 2
-    src_transpose_xy = 'kernels/transpose_xy_noVec.cl'
-    build_options = ""
-    # Settings are taken from destination layout as current layout.
-    # gwi is computed form input layout (appears as transposed layout)
-    build_options += " -D NB_I=64 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XY=16 -D BLOCK_ROWS_XY=8 -D PADDING_XY=1"
-    gwi = (int(resolution[0]),
-           int(resolution[1] / 2),
-           int(resolution[2]))
-    lwi = (16, 8, 1)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0]) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 2) / lwi[1])
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy_x = OpenClKernelLauncher(
-        prg.transpose_xy, cl_env.queue, gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=64 -D NB_III=32"
-    build_options += " -D TILE_DIM_XY=16 -D BLOCK_ROWS_XY=8 -D PADDING_XY=1"
-    gwi = (int(resolution[1]),
-           int(resolution[0] / 2),
-           int(resolution[2]))
-    lwi = (16, 8, 1)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[1]) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[0] / 2) / lwi[1])
-    prg = cl_env.build_src(src_transpose_xy, build_options, vec)
-    init_transpose_xy_y = OpenClKernelLauncher(
-        prg.transpose_xy, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolutionT,
-                init_transpose_xy_x, init_transpose_xy_y,
-                gwi, lwi, cl_env)
-
-
-def test_transposition_xz3D():
-    resolution = (32, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 2
-    src_transpose_xz = 'kernels/transpose_xz.cl'
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=4"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int((resolution[0] / 2)),
-           int(resolution[1] / 4),
-           int(resolution[2] / 4))
-    lwi = (8, 4, 4)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0] / 2) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 4) / lwi[1])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[2] / 4) / lwi[2])
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolution,
-                init_transpose_xz, init_transpose_xz,
-                gwi, lwi, cl_env, axe=2)
-
-
-def test_transposition_xz3D_noVec():
-    resolution = (32, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 1
-    src_transpose_xz = 'kernels/transpose_xz_noVec.cl'
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=4"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int(resolution[0]),
-           int(resolution[1] / 4),
-           int(resolution[2] / 4))
-    lwi = (16, 4, 4)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0]) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 4) / lwi[1])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[2] / 4) / lwi[2])
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolution,
-                init_transpose_xz, init_transpose_xz,
-                gwi, lwi, cl_env, axe=2)
-
-
-def test_transposition_xz3D_rect():
-    resolution = (32, 32, 64)
-    resolutionT = (64, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 2
-    src_transpose_xz = 'kernels/transpose_xz.cl'
-    build_options = ""
-    # Settings are taken from destination layout as current layout.
-    # gwi is computed form input layout (appears as transposed layout)
-    build_options += " -D NB_I=64 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=4"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int((resolution[0] / 2)),
-           int(resolution[1] / 4),
-           int(resolution[2] / 4))
-    lwi = (8, 4, 4)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0] / 2) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 4) / lwi[1])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[2] / 4) / lwi[2])
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz_x = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=32 -D NB_III=64"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=4"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int(resolution[2] / 2),
-           int(resolution[1] / 4),
-           int(resolution[0] / 4))
-    lwi = (8, 4, 4)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[2] / 2) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 4) / lwi[1])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[0] / 4) / lwi[2])
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz_z = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolutionT,
-                init_transpose_xz_x, init_transpose_xz_z,
-                gwi, lwi, cl_env, axe=2)
-
-
-def test_transposition_xz3D_noVec_rect():
-    resolution = (32, 32, 64)
-    resolutionT = (64, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 1
-    src_transpose_xz = 'kernels/transpose_xz_noVec.cl'
-    build_options = ""
-    # Settings are taken from destination layout as current layout.
-    # gwi is computed form input layout (appears as transposed layout)
-    build_options += " -D NB_I=64 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=4"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int(resolution[0]),
-           int(resolution[1] / 4),
-           int(resolution[2] / 4))
-    lwi = (16, 4, 4)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0]) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 4) / lwi[1])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[2] / 4) / lwi[2])
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz_x = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=32 -D NB_III=64"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=4"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int(resolution[2]),
-           int(resolution[1] / 4),
-           int(resolution[0] / 4))
-    lwi = (16, 4, 4)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[2]) / lwi[0])
-    build_options += " -D NB_GROUPS_II=" + str((resolution[1] / 4) / lwi[1])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[0] / 4) / lwi[2])
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz_z = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolutionT,
-                init_transpose_xz_x, init_transpose_xz_z,
-                gwi, lwi, cl_env, axe=2)
-
-
-def test_transposition_xz3Dslice():
-    resolution = (32, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 2
-    src_transpose_xz = 'kernels/transpose_xz_slice.cl'
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=1"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int(resolution[0] / 2),
-           int(resolution[1]),
-           int(resolution[2] / 4))
-    lwi = (8, 1, 4)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0] / 2) / lwi[0])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[2] / 4) / lwi[2])
-
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolution,
-                init_transpose_xz, init_transpose_xz,
-                gwi, lwi, cl_env, axe=2)
-
-def test_transposition_xz3Dslice_noVec():
-    resolution = (32, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 1
-    src_transpose_xz = 'kernels/transpose_xz_slice_noVec.cl'
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=1"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int(resolution[0]),
-           int(resolution[1]),
-           int(resolution[2] / 4))
-    lwi = (16, 1, 4)
-    build_options += " -D NB_GROUPS_I=" + str(resolution[0] / lwi[0])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[2] / 4) / lwi[2])
-
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolution,
-                init_transpose_xz, init_transpose_xz,
-                gwi, lwi, cl_env, axe=2)
-
-
-def test_transposition_xz3Dslice_rect():
-    resolution = (32, 32, 64)
-    resolutionT = (64, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 2
-    src_transpose_xz = 'kernels/transpose_xz_slice.cl'
-    build_options = ""
-    # Settings are taken from destination layout as current layout.
-    # gwi is computed form input layout (appears as transposed layout)
-    build_options += " -D NB_I=64 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=1"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int(resolution[0] / 2),
-           int(resolution[1]),
-           int(resolution[2] / 4))
-    lwi = (8, 1, 4)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[0] / 2) / lwi[0])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[2] / 4) / lwi[2])
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz_x = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=32 -D NB_III=64"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=1"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int(resolution[2] / 2),
-           int(resolution[1]),
-           int(resolution[0] / 4))
-    lwi = (8, 1, 4)
-    build_options += " -D NB_GROUPS_I=" + str((resolution[2] / 2) / lwi[0])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[0] / 4) / lwi[2])
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz_z = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolutionT,
-                init_transpose_xz_x, init_transpose_xz_z,
-                gwi, lwi, cl_env, axe=2)
-
-def test_transposition_xz3Dslice_noVec_rect():
-    resolution = (32, 32, 64)
-    resolutionT = (64, 32, 32)
-    cl_env = get_opencl_environment()
-    vec = 1
-    src_transpose_xz = 'kernels/transpose_xz_slice_noVec.cl'
-    build_options = ""
-    # Settings are taken from destination layout as current layout.
-    # gwi is computed form input layout (appears as transposed layout)
-    build_options += " -D NB_I=64 -D NB_II=32 -D NB_III=32"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=1"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int(resolution[0]),
-           int(resolution[1]),
-           int(resolution[2] / 4))
-    lwi = (16, 1, 4)
-    build_options += " -D NB_GROUPS_I=" + str(resolution[0] / lwi[0])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[2] / 4) / lwi[2])
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz_x = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-
-    build_options = ""
-    build_options += " -D NB_I=32 -D NB_II=32 -D NB_III=64"
-    build_options += " -D TILE_DIM_XZ=16 -D BLOCK_ROWS_XZ=1"
-    build_options += " -D BLOCK_DEPH_XZ=4 -D PADDING_XZ=1"
-    gwi = (int(resolution[2]),
-           int(resolution[1]),
-           int(resolution[0] / 4))
-    lwi = (16, 1, 4)
-    build_options += " -D NB_GROUPS_I=" + str(resolution[2] / lwi[0])
-    build_options += " -D NB_GROUPS_III=" + str((resolution[0] / 4) / lwi[2])
-    prg = cl_env.build_src(src_transpose_xz, build_options, vec)
-    init_transpose_xz_z = OpenClKernelLauncher(
-        prg.transpose_xz, cl_env.queue, gwi, lwi)
-    _comparison(resolution, resolutionT,
-                init_transpose_xz_x, init_transpose_xz_z,
-                gwi, lwi, cl_env, axe=2)
-
diff --git a/hysop/old/gpu.old/tools.py b/hysop/old/gpu.old/tools.py
deleted file mode 100644
index d03fe95bdc57c8f296f3045c4a420ada2072a3b8..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/tools.py
+++ /dev/null
@@ -1,1013 +0,0 @@
-"""Classes and tools used to handle openCL interface.
-
-
-* :class:`~hysop.gpu.tools.OpenClEnvironment`:
-   object handling opencl platform, device ... info.
-* :func:`~hysop.gpu.tools.get_opengl_shared_environment`:
-   build or get an OpenCL environment with openGL properties.
-* :func:`~hysop.gpu.tools.get_opencl_environment`:
-   build or get an OpenCL environment.
-* :func:`~hysop.gpu.tools.explore`
-   explore system and display platform, devices, memory ... info.
-
-
-
-"""
-import os, re, itertools, hashlib, pickle, gzip
-
-# from __future__ import print_function
-
-from hysop import __VERBOSE__, __KERNEL_DEBUG__, \
-        __DEFAULT_PLATFORM_ID__, __DEFAULT_DEVICE_ID__
-from hysop.tools.io_utils import IO
-from hysop.constants import np, HYSOP_REAL
-from hysop.backend.device.opencl import cl, GPU_SRC, CL_PROFILE, KERNEL_DUMP_FOLDER
-
-from hysop.core.mpi import MPI
-
-
-FLOAT_GPU, DOUBLE_GPU = np.float32, np.float64
-
-__cl_env = None
-"""Global variable handling the OpenCL Environment instance """
-
-class KernelError(Exception):
-    """Custom exception for kernel errors.
-    """
-    def __init__(self, msg, err):
-        super(KernelError,self).__init__(msg)
-        self.msg = msg
-        self.err = err
-
-    def __str__(self):
-        return self.err + ': ' + self.msg
-
-
-class OpenClEnvironment(object):
-    """OpenCL environment informations and useful functions.
-    """
-
-    def __init__(self, platform_id, device_id, device_type,
-                 precision, gl_sharing=False, comm=None):
-        """Initialize an OpenCL environment
-
-        Parameters
-        ----------
-        platform_id : int
-            chosen platform id.
-        device_id : int
-            chosen device id.
-        device_type : string
-            chosen device type.
-        precision : int
-            required precision for real data.
-            Must be FLOAT_GPU or DOUBLE_GPU.
-        gl_sharing : bool, optional
-            True to build a context shared between OpenGL and OpenCL.
-            Default=False.
-        comm : mpi communicator, optional
-            Communicator which handles the OpenCL env.
-            Default = :data:`~hysop.core.mpi.main_comm`
-
-        """
-        self._platform_id = platform_id
-        self._device_id   = device_id
-        self._device_type = device_type
-        self._gl_sharing  = gl_sharing
-        # OpenCL platform
-        self.platform = self._get_platform(platform_id)
-        # OpenCL device
-        self.device = self._get_device(self.platform, device_id, device_type)
-        # Device available memory
-        self.available_mem = self.device.global_mem_size
-        # OpenCL context
-        self.ctx = self._get_context(self.device, gl_sharing)
-        # OpenCL queue
-        self.queue = self._get_queue(self.ctx)
-
-        # MPI sub-communicator for all processes attached to the same device
-        if comm is None:
-            from hysop.core.mpi import main_comm
-        else:
-            main_comm = comm
-        
-        # Splitting the mpi communicator by the device id is not enough:
-        # the id of the first gpu of each node is 0
-        # We build color from the processor name and the id
-        import hashlib
-        # The md5 sum of the proc name is tuncated to obtain an integer
-        # for fortran (32bit)
-        hash_name = hashlib.md5(MPI.Get_processor_name()).hexdigest()[-7:]
-        self.gpu_comm = main_comm.Split(
-            color=int(hash_name, 16) + device_id,
-            key=main_comm.Get_rank())
-
-        # Floating point codegeneration mode
-        from hysop.backend.device.opencl.opencl_types import OpenClTypeGen
-        _kargs = {'device':self.device, 'context':self.ctx, 'platform':self.platform }
-        if __KERNEL_DEBUG__:
-            _kargs['float_dump_mode'] = 'dec'
-        else:
-            _kargs['float_dump_mode'] = 'hex'
-
-        # Floating point precision
-        if precision is FLOAT_GPU:
-            _kargs['fbtype'] = 'float'
-        elif precision is DOUBLE_GPU:
-            _kargs['fbtype'] = 'double'
-        else:
-            raise ValueError('Unknown floating point precision {}!'.format(precision))
-
-        typegen = OpenClTypeGen(**_kargs)
-    
-        self.typegen   = typegen
-        self.prec_size = typegen.FLT_BYTES[typegen.fbtype]
-        self.precision = precision
-
-        self.macros = {}
-        self.default_build_opts = ""
-        if CL_PROFILE and self.device.vendor.find('NVIDIA') >= 0:
-            self.default_build_opts += " -cl-nv-verbose"
-        #self.default_build_opts += "-Werror" + self._get_precision_opts()
-        self.default_build_opts += self._get_precision_opts()
-
-        # Kernels configuration dictionary
-        if self.device.name == "Cayman":
-            from hysop.backend.device.opencl.config_cayman import kernels_config as kernel_cfg
-        elif self.device.name == "Tesla K20m" or \
-                self.device.name == "Tesla K20Xm":
-            from hysop.backend.device.opencl.config_k20m import kernels_config as kernel_cfg
-        else:
-            print("/!\\ Get a defautl kernels config for", self.device.name)
-            from hysop.backend.device.opencl.config_default import kernels_config as kernel_cfg
-        self.kernels_config = kernel_cfg
-        self._locMem_Buffers = {}
-
-    def modify(self, platform_id, device_id, device_type,
-               precision, gl_sharing=False):
-        """Modify OpenCL environment parameters.
-
-        Parameters
-        ----------
-        platform_id : int
-            chosen platform id.
-        device_id : int
-            chosen device id.
-        device_type : string
-            chosen device type.
-        precision : int
-            required precision for real data.
-            Must be FLOAT_GPU or DOUBLE_GPU.
-        gl_sharing : bool, optional
-            True to build a context shared between OpenGL and OpenCL.
-            Default=False.
-        """
-        platform_changed, device_changed = False, False
-        if not platform_id == self._platform_id:
-            print("platform changed")
-            self._platform_id = platform_id
-            self.platform = self._get_platform(platform_id)
-            platform_changed = True
-        if platform_changed or not (device_id is self._device_id and
-                                    device_type == self._device_type):
-            print("device changed")
-            self._device_id = device_id
-            self._device_type = device_type
-            self.device = self._get_device(self.platform,
-                                           device_id, device_type)
-            self.available_mem = self.device.global_mem_size
-            device_changed = True
-        if platform_changed or device_changed or \
-                (not self._gl_sharing and gl_sharing is not self._gl_sharing):
-            if self._gl_sharing and not gl_sharing:
-                print("Warning: Loosing Gl shared context.")
-            self._gl_sharing = gl_sharing
-            self.ctx = self._get_context(self.device, gl_sharing)
-            self.queue = self._get_queue(self.ctx)
-        if self.precision is not precision and precision is not None:
-            if self.precision is not None:
-                print("Warning, GPU precision is overrided from",)
-                print(self.precision, 'to', precision)
-            self.precision = precision
-            self.default_build_opts = ""
-            if CL_PROFILE and self.device.vendor.find('NVIDIA') >= 0:
-                self.default_build_opts += " -cl-nv-verbose"
-            self.default_build_opts += "-Werror" + self._get_precision_opts()
-        
-        ## update opencl typegen
-        # Floating point codegeneration mode
-        from hysop.backend.device.opencl.opencl_types import OpenClTypeGen
-        _kargs = {'device':self.device, 'context':self.ctx, 'platform':self.platform }
-        if __KERNEL_DEBUG__:
-            _kargs['float_dump_mode'] = 'dec'
-        else:
-            _kargs['float_dump_mode'] = 'hex'
-
-        # Floating point precision
-        if precision is FLOAT_GPU:
-            _kargs['fbtype'] = 'float'
-        elif precision is DOUBLE_GPU:
-            _kargs['fbtype'] = 'double'
-        else:
-            raise ValueError('Unknown floating point precision {}!'.format(precision))
-
-        typegen = OpenClTypeGen(**_kargs)
-    
-        self.typegen   = typegen
-        self.prec_size = typegen.FLT_BYTES[typegen.fbtype]
-
-    @staticmethod
-    def _get_platform(platform_id):
-        """Returns an OpenCL platform
-        :param platform_id : OpenCL platform id
-
-        """
-        try:
-            # OpenCL platform
-            platform = cl.get_platforms()[platform_id]
-        except IndexError:
-            plist = cl.get_platforms()
-            platform = plist[0]
-            print("  Incorrect platform_id :", platform_id, ".",)
-            print(" Only ", len(plist), " available.",)
-            print(" --> getting default platform ", platform.name)
-        if __VERBOSE__:
-            print("  Platform   ")
-            print("  - Name       :", platform.name)
-            print("  - Version    :", platform.version)
-        return platform
-
-    @staticmethod
-    def _get_device(platform, device_id, device_type):
-        """Returns an OpenCL device
-
-        Parameters
-        ----------
-        platform_id : int
-            chosen platform id.
-        device_id : int
-            chosen device id.
-        device_type : string
-            chosen device type.
-
-        Try to use given parameters and in case of fails, use pyopencl context
-        creation function.
-        """
-        display = False
-        try:
-            if device_type is not None:
-                device_type_id = cl.device_type.__getattribute__(
-                    cl.device_type, str(device_type.upper()))
-                device = platform.get_devices(device_type_id)[device_id]
-            else:
-                device = platform.get_devices()[device_id]
-        except cl.RuntimeError as e:
-            print("RuntimeError:", e)
-            device = cl.create_some_context().devices[0]
-            display = True
-        except AttributeError as e:
-            print("AttributeError:", e)
-            device = cl.create_some_context().devices[0]
-            display = True
-        except IndexError:
-            print("  Incorrect device_id :", device_id, ".",)
-            print(" Only ", len(platform.get_devices()), " available.",)
-            if device_type is not None:
-                device_type = str(device_type.upper())
-                print(" Getting first device of type " + device_type)
-            else:
-                print(" Getting first device of the platform")
-            device = platform.get_devices()[0]
-            display = True
-        if device_type is not None:
-            assert device_type.upper() == cl.device_type.to_string(device.type)
-        if display or __VERBOSE__:
-            print("  Device")
-            print("  - id                :", device_id)
-            print("  - Name                :",)
-            print(device.name)
-            print("  - Type                :",)
-            print(cl.device_type.to_string(device.type))
-            print ("  - C Version           :",)
-            print (device.opencl_c_version)
-            print ("  - Global mem size     :",)
-            print(device.global_mem_size / (1024 ** 3), "GB")
-        return device
-
-    def _get_context(self, device, gl_sharing):
-        """Returns OpenCL context
-
-        Parameters
-        ----------
-        device : OpenCL device
-            which handles the context.
-        gl_sharing : bool
-            True to build a context shared between OpenGL and OpenCL.
-            Default=False.
-
-        """
-        props = None
-        if gl_sharing:
-            from pyopencl.tools import get_gl_sharing_context_properties
-            import sys
-            if sys.platform == "darwin":
-                props = get_gl_sharing_context_properties()
-            else:
-                # Some OSs prefer clCreateContextFromType, some prefer
-                # clCreateContext. Try both.
-                props = \
-                    [(cl.context_properties.PLATFORM, self.platform)] \
-                    + get_gl_sharing_context_properties()
-            ctx = cl.Context(properties=props, devices=[device])
-        else:
-            ctx = cl.Context([device])
-        if __VERBOSE__:
-            print(" Context:")
-            if props is not None:
-                print("  - properties           :", props)
-        return ctx
-
-    @staticmethod
-    def _get_queue(ctx):
-        """Returns OpenCL queue from context
-
-        :param ctx : OpenCL context
-
-        """
-        props = None
-        if CL_PROFILE:
-            props = cl.command_queue_properties.PROFILING_ENABLE
-            queue = cl.CommandQueue(ctx, properties=props)
-        else:
-            queue = cl.CommandQueue(ctx)
-        if __VERBOSE__:
-            print(" Queue")
-            if props is not None:
-                print("  - properties           :", props)
-            print("===")
-        return queue
-
-    def create_other_queue(self):
-        """Create OpenCL queue from current context
-        """
-        return self._get_queue(self.ctx)
-
-    @staticmethod
-    def get_work_items(resolution, vector_width=1):
-        """Set the optimal work-item number and OpenCL space index.
-
-        Parameters
-        ----------
-        resolution : tuple
-            local mesh resolution
-        vector_width : int
-            OpenCL vector types width
-
-        Returns
-        -------
-        int : work-item number
-        tuple : global space index
-        tuple : local space index
-
-        Use 64 work-items in 3D and 256 in 2D.
-        \todo Use Both the number from device capability
-        The problem must be a multiple of and greater
-        than work-item number * vector_width
-        """
-        # Optimal work item number
-        if len(resolution) == 3:
-            workItemNumber = 64 if min(resolution) >= 64 \
-                else min(resolution)
-        else:
-            workItemNumber = 256 if min(resolution) >= 256 \
-                else min(resolution)
-        # Change work-item regarding problem size
-        if resolution[0] % workItemNumber > 0:
-            if len(resolution) == 3:
-                print("Warning : GPU best performances obtained for",)
-                print("problem sizes multiples of 64")
-            else:
-                print("Warning : GPU best performances obtained for",)
-                print("problem sizes multiples of 256")
-        while resolution[0] % workItemNumber > 0:
-            workItemNumber = workItemNumber / 2
-        # Change work-item regarding vector_width
-        if workItemNumber * vector_width > resolution[0]:
-            if resolution[0] % vector_width > 0:
-                raise ValueError(
-                    "Resolution ({0}) must be a multiple of {1}".format(
-                        resolution[0], vector_width))
-            workItemNumber = resolution[0] // vector_width
-        if len(resolution) == 3:
-            gwi = (int(workItemNumber),
-                   int(resolution[1]), int(resolution[2]))
-            lwi = (int(workItemNumber), 1, 1)
-        else:
-            gwi = (int(workItemNumber),
-                   int(resolution[1]))
-            lwi = (int(workItemNumber), 1)
-        return workItemNumber, gwi, lwi
-
-    def _get_precision_opts(self):
-        """Check if device is capable to work with given precision
-        and returns build options considering this precision
-        """
-        opts = ""
-        # Precision supported
-        fp32_rounding_flag = True
-        if self.precision is FLOAT_GPU:
-            opts += " -cl-single-precision-constant"
-            prec = "single"
-        else:
-            if self.device.double_fp_config <= 0:
-                raise ValueError("Double Precision is not supported by device")
-            prec = "double"
-        if __VERBOSE__:
-            print(" Precision capability  ",)
-            print("for " + prec + " Precision: ")
-        for v in ['DENORM', 'INF_NAN',
-                  'ROUND_TO_NEAREST', 'ROUND_TO_ZERO', 'ROUND_TO_INF',
-                  'FMA', 'CORRECTLY_ROUNDED_DIVIDE_SQRT', 'SOFT_FLOAT']:
-            try:
-                if eval('(self.device.' + prec + '_fp_config &' +
-                        ' cl.device_fp_config.' +
-                        v + ') == cl.device_fp_config.' + v):
-                    if __VERBOSE__:
-                        print(v)
-                else:
-                    if v is 'CORRECTLY_ROUNDED_DIVIDE_SQRT':
-                        fp32_rounding_flag = False
-            except AttributeError as ae:
-                if v is 'CORRECTLY_ROUNDED_DIVIDE_SQRT':
-                    fp32_rounding_flag = False
-                if __VERBOSE__:
-                    print(v, 'is not supported in OpenCL C 1.2.\n',)
-                    print('   Exception catched : ', ae)
-        if fp32_rounding_flag:
-            opts += " -cl-fp32-correctly-rounded-divide-sqrt"
-        return opts
-
-    def _create_cl_program(self, file_list, 
-                           vector_width=4,
-                           nb_remesh_components=1,
-                           options="",
-                           force_verbose=None, force_debug=None):
-        """Build OpenCL sources
-
-        Parameters
-        ----------
-        files : string or list of strings
-            user defined files names
-        vector_width : int, optional
-            OpenCL vector type width, default=4
-        nb_remesh_components : int, optional
-            number of remeshed components, default=1
-        options: string
-            additional OpenCL compile flags
-        force_verbose: bool, optional, default=None
-            force verbose mode
-        force_debug: bool, optional, default=None
-            force debug mode (kernel source dumping and preprocessing)
-        
-        Returns OpenCL kernel
-        Parse the sources to handle single and double precision.
-        """
-        VERBOSE = __VERBOSE__       if (force_verbose is None) else force_verbose
-        DEBUG   = __KERNEL_DEBUG__  if (force_debug is None)   else force_debug
-
-        gpu_src = ""
-        if cl.device_type.to_string(self.device.type) == 'GPU' and \
-                self.precision is DOUBLE_GPU:
-            gpu_src += '#pragma OPENCL EXTENSION cl_khr_fp64: enable \n'
-        
-        if isinstance(files, list):
-            file_list = files
-        else:
-            file_list = [files]
-
-        if VERBOSE:
-            print '=== Kernel sources compiling ==='
-            for sd in file_list:
-                print '  - ', sf
-
-        for sf in file_list:
-            # search and open cl file.
-            try:
-                f = open(sf, 'r')
-            except IOError as ioe:
-                if ioe.errno == 2:
-                    # path to cl files inside hysop.gpu package
-                    f = open(GPU_SRC + sf, 'r')
-                else:
-                    raise ioe
-            gpu_src += "".join(
-                self.parse_file(f, vector_width, nb_remesh_components))
-            f.close()
-        
-        # print gpu_src
-        if self.macros is not None:
-            for k in self.macros:
-                gpu_src = gpu_src.replace(k, str(self.macros[k]))
-        
-        if self.precision is FLOAT_GPU:
-            # Rexexp to add 'f' suffix to float constants
-            # Match 1.2, 1.234, 1.2e3, 1.2E-05
-            float_replace = re.compile(r'(?P<float>\d\.\d+((e|E)-?\d+)?)')
-            gpu_src = float_replace.sub(r'\g<float>f', gpu_src)
-        else:
-            gpu_src = gpu_src.replace('float', 'double')
-       
-        # Log final opencl generated code for debug purposes
-        if DEBUG:
-            kernel_name = (file_list[-1].split('/')[-1]).replace('.cl','_parsed')
-            def listformat(L):
-                if isinstance(L,str):
-                    L=L.replace('-D ','').split(' ')
-                L=list(L)
-                for empty in ['', ' ']:
-                    if empty in L:
-                        L.remove(empty)
-                return '\n\t\t'+'\n\t\t'.join(L)
-            dump_prefix = \
-'''
-/* 
-Dumped OpenCL Kernel '{}'
-    vector_width: {}
-    nb_remesh_components: {}
-    source_files: {}
-    default_build_opts: {}
-    additional options: {}
-*/
-'''.format(kernel_name,
-           vector_width, 
-           nb_remesh_components, 
-           listformat(file_list),
-           listformat(self.default_build_opts), 
-           listformat(options))
-   
-
-            dumped_src = dump_prefix + gpu_src
-    
-            dump_folder=IO.default_path()+'/'+KERNEL_DUMP_FOLDER
-            dump_file_prefix=dump_folder+'/'+kernel_name
-            tmp_dump_file=dump_file_prefix+'.c'
-            dump_file=dump_file_prefix+'.cl'
-            if not os.path.exists(dump_folder):
-                os.makedirs(dump_folder)
-            with open(tmp_dump_file, 'w+') as f:
-                f.write(dumped_src)
-
-            try:
-                #try to preprocess sources
-                import subprocess
-                opts = self.default_build_opts + options
-                opts = re.sub('-cl-([a-z0-9]+-?)+ ','',opts)
-                cmd = ['gcc',opts,'-E','-c',tmp_dump_file,'-o',dump_file_prefix+'_preprocessed.cl']
-                subprocess.check_call(' '.join(cmd), shell=True);  
-            finally:
-                os.rename(tmp_dump_file,dump_file)
-
-            if VERBOSE:
-                msg = 'OpenCL kernel {} source dumped to {}.'.format(kernel_name, dump_file)
-                print msg
-
-        # OpenCL program
-        prg = cl.Program(self.ctx, gpu_src)
-        return prg
-
-    def build_src(self, files, 
-                  options="", 
-                  vector_width=4,
-                  nb_remesh_components=1):
-        """Build OpenCL sources
-
-        Parameters
-        ----------
-        files : string or list of strings
-            user defined file names
-        options : string, optional
-            Compiler options, default=""
-        vector_width : int, optional
-            OpenCL vector type width, default=4
-        nb_remesh_components : int, optional
-            number of remeshed components, default=1
-        force_verbose: bool, optional
-            force verbose mode
-        force_debug: bool, optional
-            force debug mode (kernel dumping)
-
-        Returns OpenCL binaries
-
-        Parse the sources to handle single and double precision.
-        """
-
-        if isinstance(files, list):
-            file_list = files
-        else:
-            file_list = [files]
-
-        if __VERBOSE__:
-            print("=== Kernel sources compiling ===")
-            for sf in file_list:
-                print("   - ", sf)
-
-        # --- create kernel from cl files ---
-        prg = self._create_cl_program(files=file_list, 
-                                      options=options,
-                                      vector_width=vector_width,
-                                      nb_remesh_components=nb_remesh_components,
-                                      force_verbose=force_verbose,
-                                      force_debug=force_debug)
-        
-        # --- Build kernel ---
-        try:
-            build = prg.build(self.default_build_opts + options)
-        except Exception, e:
-            print("Build files : ")
-            for sf in file_list:
-                print("   - ", sf)
-            print("Build options : ", self.default_build_opts + options)
-            print("Vectorization : ", vector_width)
-            raise e
-
-        # display post-build info
-        if __VERBOSE__:
-            # print options
-            print "Build options : ",
-            print build.get_build_info(
-                self.device, cl.program_build_info.OPTIONS)
-            print "Compiler status : ",
-            print build.get_build_info(
-                self.device, cl.program_build_info.STATUS)
-            print "Compiler log : ",
-            print build.get_build_info(self.device,
-                             cl.program_build_info.LOG)
-            print "===\n"
-        return build
-    
-    def build_raw_src(self, src, options="", 
-            kernel_name=None,
-            force_verbose=None, force_debug=None):
-        """Build raw OpenCL sources
-
-        Parameters
-        ----------
-        src : string 
-            OpenCL source code
-        options : string
-            Compiler options to use for buildind
-        Returns OpenCL binaries
-        """
-        VERBOSE = __VERBOSE__      if (force_verbose is None) else force_verbose
-        DEBUG   = __KERNEL_DEBUG__ if (force_debug is None)   else force_debug
-
-        gpu_src = src
-        
-        src_hash = hashlib.sha1(gpu_src).hexdigest()
-        if (kernel_name is None):
-            kernel_name = src_hash
-        else:
-            kernel_name += '_{}'.format(src_hash[:4])
-
-        soptions = ' '.join(options)
-        if VERBOSE:
-            print "=== Kernel raw source compiling ==="
-        prg = cl.Program(self.ctx, gpu_src)
-            
-            
-        dump_folder=IO.default_path()+'/'+KERNEL_DUMP_FOLDER
-        if not os.path.exists(dump_folder):
-            os.makedirs(dump_folder)
-
-        # Build OpenCL program
-        try:
-            build = prg.build(self.default_build_opts + soptions)
-        except Exception, e:
-            # always dump source when build fails
-            dump_file=dump_folder+'/'+'{}_build_fail.cl'.format(kernel_name)
-            with open(dump_file, 'w+') as f:
-                f.write(gpu_src)
-            print "Build options : ", self.default_build_opts + soptions
-            print 'Build Failed: dumped source to {}.'.format(dump_file) 
-            raise e
-
-        if VERBOSE:
-            #print options
-            print "Build options : ",
-            print build.get_build_info(
-                self.device, cl.program_build_info.OPTIONS)
-            print "Compiler status : ",
-            print build.get_build_info(
-                self.device, cl.program_build_info.STATUS)
-            print "Compiler log : ",
-            print build.get_build_info(self.device,
-                                       cl.program_build_info.LOG)
-        if DEBUG:
-            # dump kernel source while in debug mode
-            dump_file=dump_folder+'/'+'{}_dump.cl'.format(kernel_name)
-            print 'Dumping kernel src at \'{}\'.'.format(dump_file)
-            with open(dump_file, 'w+') as f:
-                f.write(gpu_src)
-
-        if VERBOSE:
-            print "===\n"
-
-        return build
-
-    @staticmethod
-    def parse_file(f, n=8, nb_remesh_components=1):
-        """Parse a file containing OpenCL sources.
-
-        Parameters
-        ----------
-        f : string
-            file name
-        n : int, optional
-            vector width, default=8
-        nb_remesh_components : int
-            number of remeshed components
-
-        Returns
-        -------
-        string, the parsed sources.
-
-        Notes
-        -----
-        * __N__ is expanded as an integer corresponding to vector width.
-        * __NN__ instruction is duplicated to operate on each vector component:
-
-          * if line ends with ';', the whole instruciton is
-            duplicated.
-          * if line ends with ',' and contains
-            '(float__N__)(', the float element is duplicated
-
-        * Remeshing fields components are expanded as follows :
-          All code between '__RCOMPONENT_S__' and
-          '__RCOMPONENT_E__' flags are duplicated n times with n
-          the number of components to compute. In this duplicated code, the
-          flag '__ID__' is replaced by index of a range of lenght
-          the number of components. A flag '__RCOMPONENT_S__P__'
-          may be used and the duplicated elements are separated with ','
-          (for function parameters expanding).
-
-        Examples with a 4-width vector code::
-
-            float__N__ x;           ->  float4 x;
-
-            x.s__NN__ = 1.0f;       ->  x.s0 = 1.0f;
-                                        x.s1 = 1.0f;
-                                        x.s2 = 1.0f;
-                                        x.s3 = 1.0f;
-
-            x = (int__N__)(__NN__,  ->  x = (int4)(0,
-                            );                      1,
-                                                    2,
-                                                    3,
-                                                    );
-
-        Examples with a 2 components expansion code::
-
-            __RCOMP_P __global const float* var__ID__,
-            -> __global const float* var0,__global const float* var1,
-
-            __RCOMP_I var__ID__[i] = 0.0;
-            -> var0[i] = 0.0;var1[i] = 0.0;
-
-            aFunction(__RCOMP_P var__ID__, __RCOMP_P other__ID__);
-            -> aFunction(var0, var1, other0, other1);
-
-        """
-        src = ""
-        # replacement for floatN elements
-        vec_floatn = re.compile(r'\(float__N__\)\(')
-        vec_nn = re.compile('__NN__')
-        vec_n = re.compile('__N__')
-        for l in f.readlines():
-            # Expand floatN items
-            if vec_floatn.search(l) and vec_nn.search(l) and \
-                    l[-2] == ',':
-                sl = l.split("(float__N__)(")
-                l = sl[0] + "(float" + str(n) + ")("
-                el = sl[1].rsplit(',', 1)[0]
-                for i in xrange(n):
-                    l += vec_nn.sub(str(i), el) + ','
-                l = l[:-1] + '\n'
-            # Expand floatN elements access
-            elif vec_nn.search(l) and l[-2] == ';':
-                el = ""
-                for i in xrange(n):
-                    el += vec_nn.sub(str(i), l)
-                l = el
-            # Replace vector length
-            src += vec_n.sub(str(n), l)
-
-        # Replacement for remeshed components
-        re_instr = re.compile(r'__RCOMP_I([\w\s\.,()\[\]+*/=-]+;)')
-        # __RCOMP_I ...;
-
-        def repl_instruction(m):
-            return ''.join(
-                [m.group(1).replace('__ID__', str(i))
-                 for i in xrange(nb_remesh_components)])
-        # __RCOMP_P ..., ou __RCOMP_P ...)
-        re_param = re.compile(r'__RCOMP_P([\w\s\.\[\]+*/=-]+(?=,|\)))')
-
-        def repl_parameter(m):
-            return ', '.join(
-                [m.group(1).replace('__ID__', str(i))
-                 for i in xrange(nb_remesh_components)])
-
-        src = re_instr.sub(repl_instruction, src)
-        src = re_param.sub(repl_parameter, src)
-        return src
-
-    def global_allocation(self, array):
-        """Allocate and returns an opencl buffer
-
-        Parameters
-        ----------
-        array : numpy array
-            source buffer, on host
-        """
-        # create an opencl buffer from input array
-        clBuff = cl.Buffer(self.ctx,
-                           cl.mem_flags.ALLOC_HOST_PTR, size=array.nbytes)
-        # Touch the buffer on device to performs the allocation
-        # Transfers a single element in device (the precision does not matter)
-        e = np.zeros((1,), dtype=np.float64)
-        cl.enqueue_copy(self.queue, clBuff, e,
-                        buffer_origin=(0, 0, 0), host_origin=(0, 0, 0),
-                        region=(e.nbytes,)).wait()
-        # update memory counter
-        self.available_mem -= clBuff.size
-        return clBuff
-
-    def global_deallocation(self, cl_mem):
-        self.available_mem += cl_mem.size
-        cl_mem.release()
-
-    # def LocalMemAllocator(self, sizes_list, type_list=None):
-    #     """
-    #     Allocates spaces in device local memory.
-    #     @param sizes_list : list of sizes.
-    #     @param type_list : list of corresponding types
-    #     It returns a list of buffers of given size (one per size specified in
-    #     in the list) and the size of new buffers.
-    #     @remark : Buffers are stored and could be reused.
-    #     @remark : it assumes that all returned buffers are different
-    #     """
-    #     new_alloc = 0
-    #     if type_list is None:
-    #         type_list = [HYSOP_REAL] * len(sizes_list)
-    #     buff_list = []  # Returned list
-    #     keys_list = []
-    #     for s, t in zip(sizes_list, type_list):
-    #         keys_list.append(int(t(0).nbytes * s))
-
-    #     for size, key, t in zip(sizes_list, keys_list, type_list):
-    #         buff = None
-    #         try:
-    #             # List of existing buffers not already in the list
-    #             avail_buff = [b for b in self._locMem_Buffers[key]
-    #                           if b not in buff_list]
-    #             if len(avail_buff) > 0:
-    #                 # adding the first buffer
-    #                 buff = avail_buff[0]
-    #             else:
-    #                 # Allocate a new buffer
-    #                 buff = cl.LocalMemory(int(t(0).nbytes * size))
-    #                 new_alloc += buff.size
-    #                 self._locMem_Buffers[key].append(buff)
-    #         except KeyError:
-    #             # Allocate a fist buffer of given size
-    #             buff = cl.LocalMemory(int(t(0).nbytes * size))
-    #             new_alloc += buff.size
-    #             self._locMem_Buffers[key] = [buff]
-    #         buff_list.append(buff)
-    #     return buff_list, new_alloc
-
-
-def get_opengl_shared_environment(platform_id=None,
-                                  device_id=None,
-                                  device_type=None, precision=HYSOP_REAL,
-                                  comm=None):
-    """Build or get an OpenCL environment with openGL properties.
-
-    Parameters
-    ----------
-    platform_id : int
-        chosen platform id.
-    device_id : int
-       chosen device id.
-    device_type : string
-       chosen device type.
-    precision : int, optional
-       required precision for real data.
-       Default : HYSOP_REAL
-    comm : mpi communicator, optional
-        Communicator which handles the OpenCL env.
-        Default = hysop.core.mpi.main_comm
-
-    Returns
-    -------
-    :class:`~hysop.gpu.tools.OpenClEnvironment`
-        object handling OpenCL platform, device, context and queue
-
-    The context is obtained with gl-shared properties depending on the OS.
-    """
-    if platform_id is None:
-        platform_id = __DEFAULT_PLATFORM_ID__
-    if device_id is None:
-        device_id = __DEFAULT_DEVICE_ID__
-    global __cl_env
-    if __cl_env is None:
-        __cl_env = OpenClEnvironment(platform_id, device_id, device_type,
-                                     precision, gl_sharing=True, comm=comm)
-    else:
-        __cl_env.modify(platform_id, device_id, device_type,
-                        precision, gl_sharing=True)
-    return __cl_env
-
-
-def get_opencl_environment(platform_id=None,
-                           device_id=None,
-                           device_type=None, precision=HYSOP_REAL,
-                           comm=None):
-    """Build or get an OpenCL environment.
-
-    Parameters
-    ----------
-    platform_id : int
-        chosen platform id.
-    device_id : int
-       chosen device id.
-    device_type : string
-       chosen device type.
-    precision : int, optional
-       required precision for real data.
-       Default : HYSOP_REAL
-    comm : mpi communicator, optional
-        Communicator which handles the OpenCL env.
-        Default = hysop.core.mpi.main_comm
-
-    Returns
-    -------
-
-    :class:`~hysop.gpu.tools.OpenClEnvironment`
-        object handling OpenCL platform, device, context and queue
-
-    """
-    if platform_id is None:
-        platform_id = __DEFAULT_PLATFORM_ID__
-    if device_id is None:
-        device_id = __DEFAULT_DEVICE_ID__
-    global __cl_env
-    if __cl_env is None:
-        __cl_env = OpenClEnvironment(platform_id, device_id, device_type,
-                                     precision, comm=comm)
-    else:
-        __cl_env.modify(platform_id, device_id, device_type,
-                        precision)
-    return __cl_env
-
-
-def explore():
-    """Scan system and print OpenCL environment details"""
-    print("OpenCL exploration : ")
-    platforms = cl.get_platforms()
-    platforms_info = ["name", "version", "vendor", "profile", "extensions"]
-    devices_info = ["name",
-                    "version",
-                    "vendor",
-                    "profile",
-                    "extensions",
-                    "available",
-                    "type",
-                    "compiler_available",
-                    "double_fp_config",
-                    "single_fp_config",
-                    "global_mem_size",
-                    "global_mem_cache_type",
-                    "global_mem_cache_size",
-                    "global_mem_cacheline_size",
-                    "local_mem_size",
-                    "local_mem_type",
-                    "max_clock_frequency",
-                    "max_compute_units",
-                    "max_constant_buffer_size",
-                    "max_mem_alloc_size",
-                    "max_work_group_size",
-                    "max_work_item_dimensions",
-                    "max_work_item_sizes",
-                    "preferred_vector_width_double",
-                    "preferred_vector_width_float",
-                    "preferred_vector_width_int"]
-    for pltfm in platforms:
-        print("Platform:", pltfm.name)
-        for pltfm_info in platforms_info:
-            print("  |-", pltfm_info, ':', eval("pltfm." + pltfm_info))
-        devices = pltfm.get_devices()
-        for dvc in devices:
-            print("  |- Device:", dvc.name)
-            for dvc_info in devices_info:
-                print("    |-", dvc_info, ':', eval("dvc." + dvc_info))
diff --git a/hysop/old/gpu.old/visu/__init__.py b/hysop/old/gpu.old/visu/__init__.py
deleted file mode 100644
index 03685a701a45ec979a9fdc3ec3f6073e1f62c435..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/visu/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-## @package hysop.gpu.visu
-# Visualisation tools on GPU
diff --git a/hysop/old/gpu.old/visu/gpu-mc.cl b/hysop/old/gpu.old/visu/gpu-mc.cl
deleted file mode 100644
index 3f3545fa5223be1b78d945f5e46052687d532631..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/visu/gpu-mc.cl
+++ /dev/null
@@ -1,502 +0,0 @@
-#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
-
-__constant sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
-
-__constant int4 cubeOffsets[8] = {
-		{0, 0, 0, 0},
-		{1, 0, 0, 0},
-		{0, 0, 1, 0},
-		{1, 0, 1, 0},
-		{0, 1, 0, 0},
-		{1, 1, 0, 0},
-		{0, 1, 1, 0},
-		{1, 1, 1, 0},
-	};
-
-__kernel void constructHPLevel(
-		__read_only image3d_t readHistoPyramid,
-		__write_only image3d_t writeHistoPyramid
-	) {
-
-	int4 writePos = {get_global_id(0), get_global_id(1), get_global_id(2), 0};
-	int4 readPos = writePos*2;
-	int writeValue = read_imagei(readHistoPyramid, sampler, readPos).x + // 0
-		read_imagei(readHistoPyramid, sampler, readPos+cubeOffsets[1]).x + // 1
-		read_imagei(readHistoPyramid, sampler, readPos+cubeOffsets[2]).x + // 2
-		read_imagei(readHistoPyramid, sampler, readPos+cubeOffsets[3]).x + // 3
-		read_imagei(readHistoPyramid, sampler, readPos+cubeOffsets[4]).x + // 4
-		read_imagei(readHistoPyramid, sampler, readPos+cubeOffsets[5]).x + // 5
-		read_imagei(readHistoPyramid, sampler, readPos+cubeOffsets[6]).x + // 6
-		read_imagei(readHistoPyramid, sampler, readPos+cubeOffsets[7]).x; // 7
-
-	write_imagei(writeHistoPyramid, writePos, writeValue);
-}
-
-int4 scanHPLevel(int target, __read_only image3d_t hp, int4 current) {
-
-	int8 neighbors = {
-		read_imagei(hp, sampler, current).x,
-		read_imagei(hp, sampler, current + cubeOffsets[1]).x,
-		read_imagei(hp, sampler, current + cubeOffsets[2]).x,
-		read_imagei(hp, sampler, current + cubeOffsets[3]).x,
-		read_imagei(hp, sampler, current + cubeOffsets[4]).x,
-		read_imagei(hp, sampler, current + cubeOffsets[5]).x,
-		read_imagei(hp, sampler, current + cubeOffsets[6]).x,
-		read_imagei(hp, sampler, current + cubeOffsets[7]).x
-	};
-
-	int acc = current.s3 + neighbors.s0;
-	int8 cmp;
-	cmp.s0 = acc <= target;
-	acc += neighbors.s1;
-	cmp.s1 = acc <= target;
-	acc += neighbors.s2;
-	cmp.s2 = acc <= target;
-	acc += neighbors.s3;
-	cmp.s3 = acc <= target;
-	acc += neighbors.s4;
-	cmp.s4 = acc <= target;
-	acc += neighbors.s5;
-	cmp.s5 = acc <= target;
-	acc += neighbors.s6;
-	cmp.s6 = acc <= target;
-	cmp.s7 = 0;
-
-
-	current += cubeOffsets[(cmp.s0+cmp.s1+cmp.s2+cmp.s3+cmp.s4+cmp.s5+cmp.s6+cmp.s7)];
-	current.s0 = current.s0*2;
-	current.s1 = current.s1*2;
-	current.s2 = current.s2*2;
-	current.s3 = current.s3 +
-		cmp.s0*neighbors.s0 +
-		cmp.s1*neighbors.s1 +
-		cmp.s2*neighbors.s2 +
-		cmp.s3*neighbors.s3 +
-		cmp.s4*neighbors.s4 +
-		cmp.s5*neighbors.s5 +
-		cmp.s6*neighbors.s6 +
-		cmp.s7*neighbors.s7;
-	return current;
-
-}
-
-
-__constant char offsets3[72] = {
-			// 0
-			0,0,0,
-			1,0,0,
-			// 1
-			1,0,0,
-			1,0,1,
-			// 2
-			1,0,1,
-			0,0,1,
-			// 3
-			0,0,1,
-			0,0,0,
-			// 4
-			0,1,0,
-			1,1,0,
-			// 5
-			1,1,0,
-			1,1,1,
-			// 6
-			1,1,1,
-			0,1,1,
-			// 7
-			0,1,1,
-			0,1,0,
-			// 8
-			0,0,0,
-			0,1,0,
-			// 9
-			1,0,0,
-			1,1,0,
-			// 10
-			1,0,1,
-			1,1,1,
-			// 11
-			0,0,1,
-			0,1,1
-		};
-
-__constant char triTable[4096] =
-{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1,
-3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1,
-3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1,
-3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1,
-9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1,
-1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1,
-9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1,
-2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1,
-8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1,
-9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1,
-4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1,
-3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1,
-1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1,
-4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1,
-4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1,
-9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1,
-1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1,
-5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1,
-2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1,
-9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1,
-0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1,
-2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1,
-10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1,
-4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1,
-5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1,
-5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1,
-9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1,
-0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1,
-1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1,
-10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1,
-8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1,
-2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1,
-7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1,
-9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1,
-2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1,
-11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1,
-9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1,
-5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1,
-11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1,
-11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1,
-1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1,
-9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1,
-5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1,
-2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1,
-0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1,
-5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1,
-6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1,
-0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1,
-3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1,
-6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1,
-5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1,
-1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1,
-10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1,
-6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1,
-1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1,
-8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1,
-7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1,
-3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1,
-5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1,
-0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1,
-9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1,
-8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1,
-5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1,
-0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1,
-6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1,
-10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1,
-10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1,
-8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1,
-1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1,
-3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1,
-0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1,
-10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1,
-0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1,
-3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1,
-6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1,
-9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1,
-8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1,
-3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1,
-6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1,
-0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1,
-10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1,
-10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1,
-1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1,
-2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1,
-7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1,
-7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1,
-2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1,
-1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1,
-11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1,
-8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1,
-0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1,
-7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1,
-10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1,
-2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1,
-6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1,
-7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1,
-2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1,
-1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1,
-10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1,
-10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1,
-0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1,
-7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1,
-6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1,
-8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1,
-9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1,
-6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1,
-1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1,
-4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1,
-10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1,
-8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1,
-0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1,
-1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1,
-8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1,
-10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1,
-4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1,
-10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1,
-5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1,
-11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1,
-9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1,
-6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1,
-7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1,
-3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1,
-7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1,
-9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1,
-3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1,
-6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1,
-9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1,
-1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1,
-4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1,
-7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1,
-6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1,
-3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1,
-0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1,
-6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1,
-1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1,
-0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1,
-11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1,
-6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1,
-5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1,
-9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1,
-1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1,
-1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1,
-10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1,
-0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1,
-5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1,
-10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1,
-11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1,
-0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1,
-9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1,
-7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1,
-2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1,
-8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1,
-9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1,
-9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1,
-1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1,
-9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1,
-9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1,
-5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1,
-0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1,
-10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1,
-2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1,
-0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1,
-0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1,
-9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1,
-5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1,
-3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1,
-5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1,
-8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1,
-0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1,
-9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1,
-0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1,
-1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1,
-3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1,
-4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1,
-9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1,
-11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1,
-11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1,
-2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1,
-9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1,
-3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1,
-1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1,
-4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1,
-4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1,
-0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1,
-3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1,
-3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1,
-0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1,
-9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1,
-1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
--1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
-
-#define SIZE **HP_SIZE**
-__kernel void traverseHP(
-        __read_only image3d_t hp0, // Largest HP
-		__read_only image3d_t hp1,
-		__read_only image3d_t hp2,
-		__read_only image3d_t hp3,
-		__read_only image3d_t hp4,
-		__read_only image3d_t hp5,
-        #if SIZE > 64
-		__read_only image3d_t hp6,
-        #endif
-        #if SIZE > 128
-		__read_only image3d_t hp7,
-        #endif
-        #if SIZE > 256
-		__read_only image3d_t hp8,
-        #endif
-        #if SIZE > 512
-		__read_only image3d_t hp9,
-        #endif
-        __global float * VBOBuffer,
-		__private int isolevel,
-		__private int sum
-        ) {
-
-	int target = get_global_id(0);
-	if(target >= sum)
-		target = 0;
-
-	int4 cubePosition = {0,0,0,0}; // x,y,z,sum
-    #if SIZE > 512
-    cubePosition = scanHPLevel(target, hp9, cubePosition);
-    #endif
-    #if SIZE > 256
-    cubePosition = scanHPLevel(target, hp8, cubePosition);
-    #endif
-    #if SIZE > 128
-    cubePosition = scanHPLevel(target, hp7, cubePosition);
-    #endif
-    #if SIZE > 64
-    cubePosition = scanHPLevel(target, hp6, cubePosition);
-    #endif
-    cubePosition = scanHPLevel(target, hp5, cubePosition);
-    cubePosition = scanHPLevel(target, hp4, cubePosition);
-    cubePosition = scanHPLevel(target, hp3, cubePosition);
-    cubePosition = scanHPLevel(target, hp2, cubePosition);
-    cubePosition = scanHPLevel(target, hp1, cubePosition);
-    cubePosition = scanHPLevel(target, hp0, cubePosition);
-	cubePosition.x = cubePosition.x / 2;
-	cubePosition.y = cubePosition.y / 2;
-	cubePosition.z = cubePosition.z / 2;
-
-    char vertexNr = 0;
-	const int4 cubeData = read_imagei(hp0, sampler, cubePosition);
-
-	// max 5 triangles
-	for(int i = (target-cubePosition.s3)*3; i < (target-cubePosition.s3+1)*3; i++) { // for each vertex in triangle
-		const uchar edge = triTable[cubeData.y*16 + i];
-		const int3 point0 = (int3)(cubePosition.x + offsets3[edge*6], cubePosition.y + offsets3[edge*6+1], cubePosition.z + offsets3[edge*6+2]);
-		const int3 point1 = (int3)(cubePosition.x + offsets3[edge*6+3], cubePosition.y + offsets3[edge*6+4], cubePosition.z + offsets3[edge*6+5]);
-
-        // Store vertex in VBO
-
-        const float3 forwardDifference0 = (float3)(
-                (float)(-read_imagei(hp0, sampler, (int4)(point0.x+1, point0.y, point0.z, 0)).z+read_imagei(hp0, sampler, (int4)(point0.x-1, point0.y, point0.z, 0)).z),
-                (float)(-read_imagei(hp0, sampler, (int4)(point0.x, point0.y+1, point0.z, 0)).z+read_imagei(hp0, sampler, (int4)(point0.x, point0.y-1, point0.z, 0)).z),
-                (float)(-read_imagei(hp0, sampler, (int4)(point0.x, point0.y, point0.z+1, 0)).z+read_imagei(hp0, sampler, (int4)(point0.x, point0.y, point0.z-1, 0)).z)
-            );
-        const float3 forwardDifference1 = (float3)(
-                (float)(-read_imagei(hp0, sampler, (int4)(point1.x+1, point1.y, point1.z, 0)).z+read_imagei(hp0, sampler, (int4)(point1.x-1, point1.y, point1.z, 0)).z),
-                (float)(-read_imagei(hp0, sampler, (int4)(point1.x, point1.y+1, point1.z, 0)).z+read_imagei(hp0, sampler, (int4)(point1.x, point1.y-1, point1.z, 0)).z),
-                (float)(-read_imagei(hp0, sampler, (int4)(point1.x, point1.y, point1.z+1, 0)).z+read_imagei(hp0, sampler, (int4)(point1.x, point1.y, point1.z-1, 0)).z)
-            );
-
-	    const int value0 = read_imagei(hp0, sampler, (int4)(point0.x, point0.y, point0.z, 0)).z;
-		const float diff = native_divide(
-			(float)(isolevel-value0),
-			(float)(read_imagei(hp0, sampler, (int4)(point1.x, point1.y, point1.z, 0)).z - value0));
-
-		//const float3 vertex = mix((float3)(point0.x, point0.y, point0.z), (float3)(point1.x, point1.y, point1.z), diff);
-
-		//const float3 normal = mix(forwardDifference0, forwardDifference1, diff);
-        const float3 vertex = (point0.x + (point0.x - point1.x) * diff,
-            point0.y + (point0.y - point1.y) * diff,
-            point0.z + (point0.z - point1.z) * diff);
-
-        const float3 normal =
-          (forwardDifference0.x + (forwardDifference0.x - forwardDifference1.x) *diff,
-           forwardDifference0.y + (forwardDifference0.y - forwardDifference1.y) *diff,
-           forwardDifference0.z + (forwardDifference0.z - forwardDifference1.z) *diff);
-
-        vstore3(vertex, target*6 + vertexNr*2, VBOBuffer);
-		vstore3(normal, target*6 + vertexNr*2 + 1, VBOBuffer);
-
-        ++vertexNr;
-    }
-}
-
-__constant uchar nrOfTriangles[256] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 2, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 3, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 3, 2, 3, 3, 2, 3, 4, 4, 3, 3, 4, 4, 3, 4, 5, 5, 2, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 3, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 4, 2, 3, 3, 4, 3, 4, 2, 3, 3, 4, 4, 5, 4, 5, 3, 2, 3, 4, 4, 3, 4, 5, 3, 2, 4, 5, 5, 4, 5, 2, 4, 1, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 3, 2, 3, 3, 4, 3, 4, 4, 5, 3, 2, 4, 3, 4, 3, 5, 2, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 4, 3, 4, 4, 3, 4, 5, 5, 4, 4, 3, 5, 2, 5, 4, 2, 1, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 2, 3, 3, 2, 3, 4, 4, 5, 4, 5, 5, 2, 4, 3, 5, 4, 3, 2, 4, 1, 3, 4, 4, 5, 4, 5, 3, 4, 4, 5, 5, 2, 3, 4, 2, 1, 2, 3, 3, 2, 3, 4, 2, 1, 3, 2, 4, 1, 2, 1, 1, 0};
-
-__kernel void classifyCubes(
-		__write_only image3d_t histoPyramid,
-		__read_only image3d_t rawData,
-		__private int isolevel
-		) {
-    int4 pos = {get_global_id(0), get_global_id(1), get_global_id(2), 0};
-
-    // Find cube class nr
-	const uchar first = read_imagei(rawData, sampler, pos).x;
-    const uchar cubeindex =
-    ((first > isolevel)) |
-    ((read_imagei(rawData, sampler, pos + cubeOffsets[1]).x > isolevel) << 1) |
-    ((read_imagei(rawData, sampler, pos + cubeOffsets[3]).x > isolevel) << 2) |
-    ((read_imagei(rawData, sampler, pos + cubeOffsets[2]).x > isolevel) << 3) |
-    ((read_imagei(rawData, sampler, pos + cubeOffsets[4]).x > isolevel) << 4) |
-    ((read_imagei(rawData, sampler, pos + cubeOffsets[5]).x > isolevel) << 5) |
-    ((read_imagei(rawData, sampler, pos + cubeOffsets[7]).x > isolevel) << 6) |
-    ((read_imagei(rawData, sampler, pos + cubeOffsets[6]).x > isolevel) << 7);
-
-    // Store number of triangles
-	write_imageui(histoPyramid, pos, (uint4)(nrOfTriangles[cubeindex], cubeindex, first, 0));
-}
diff --git a/hysop/old/gpu.old/visu/marchingcube.py b/hysop/old/gpu.old/visu/marchingcube.py
deleted file mode 100644
index 36e89b0bb3a204352c112a2da585ddc867052bd5..0000000000000000000000000000000000000000
--- a/hysop/old/gpu.old/visu/marchingcube.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-@file marchingcube.py
-Marching cube algorithm to compute isosurface of particle field
-"""
-
-# Math functions
-from math import log
-
-# System call
-import ctypes
-
-from hysop.constant import HYSOP_REAL
-from hysop.backend.device.opencl import cl
-from hysop.backend.device.opencl.opencl_tools import get_opencl_environment
-
-
-class Marching_Cube(object):
-    """
-    Implement marching cube infrastructure for detecting
-    isosurface on GPU
-    """
-    def __init__(self, size):
-        """
-        Build necessary data structure for holding
-        hierarchical data
-        """
-        self._size_ = size
-        self.buffers = []
-        self.usr_src = "gpu-mc.cl"
-        self._cl_env = get_opencl_environment(0, 0, 'gpu', HYSOP_REAL)
-        self._create_cl_context_()
-
-    def _create_cl_context_(self):
-        """
-        Initialize buffer pyramid storing image particle count
-        """
-        buffer_size = self._size_
-        pitch_size = buffer_size * ctypes.c_ubyte
-        shap = (buffer_size, buffer_size, buffer_size)
-        pitc = (pitch_size, pitch_size)
-        img_format = cl.get_supported_image_formats(
-            self._cl_env.ctx, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE3D)[0]
-        img_flag = cl.mem_flags.READ_WRITE
-        self.buffers.append(cl.Image(self._cl_env.ctx, img_flag, img_format, shap))
-
-        buffer_size /= 8
-        shap = (buffer_size, buffer_size, buffer_size)
-        self.buffers.append(cl.Image(self._cl_env.ctx, img_flag, img_format, shap))
-
-        buffer_size /= 8
-        shap = (buffer_size, buffer_size, buffer_size)
-        pitch_size = buffer_size * ctypes.c_ubyte
-        self.buffers.append(cl.Image(self._cl_env.ctx, img_flag, img_format, shap))
-
-        buffer_size /= 8
-        shap = (buffer_size, buffer_size, buffer_size)
-        pitch_size = buffer_size * ctypes.c_ubyte
-        #self.buffers.append(cl.Image(self._cl_env.ctx, img_flag, img_format, shap))
-
-        buffer_size /= 8
-        pitch_size = buffer_size * ctypes.c_ubyte
-        shap = (buffer_size, buffer_size, buffer_size)
-        #self.buffers.append(cl.Image(self._cl_env.ctx, img_flag, img_format, shap))
-
-        for i in range(5,int(log(self._size_, 2))):
-            buffer_size /= 8
-            pitch_size = buffer_size * ctypes.c_ubyte
-            shap = (buffer_size, buffer_size, buffer_size)
-            #self.buffers.append(cl.Image(self._cl_env.ctx, img_flag, img_format, shap))
-
-        # Add cube index
-        buffer_size = self._size_ * self._size_ * self._size_
-        pitch_size = buffer_size * ctypes.c_ubyte
-        shap = (buffer_size, buffer_size, buffer_size)
-        #self.buffers.append(cl.Image(self._cl_env.ctx, img_flag, img_format, shap))
-
-
-        self.gpu_src = ""
-        ## Build code
-        #self.usr_src.
-        options = "-D HP_SIZE=" + str(self._size_)
-        self._cl_env.macros['**HP_SIZE**'] = self._size_
-        self.prg = self._cl_env.build_src(self.usr_src, options)
-        kernel_name = 'constructHPLevel' + self.field.name.split('_D')[0]
-        self.numMethod = OpenClKernelLauncher(eval('self.prg.' + kernel_name),
-                                        self.queue,
-                                        self.gwi,
-                                        self.lwi)
-        kernel_name = 'classifyCubes' + self.field.name.split('_D')[0]
-        kernel_name = 'traverseHP' + self.field.name.split('_D')[0]
-
-if __name__ == "__main__":
-
-    mc = Marching_Cube(256)
-
-    print mc.gpu_src
diff --git a/hysop/old/numerics.old/__init__.py b/hysop/old/numerics.old/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/hysop/old/numerics.old/differential_operations.py b/hysop/old/numerics.old/differential_operations.py
deleted file mode 100755
index 54c5946679e6d89993e7f0dd2bed341c1d6e57a2..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/differential_operations.py
+++ /dev/null
@@ -1,1078 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Library of functions used to perform classical vector calculus
-(diff operations like grad, curl ...) based on finite differences schemes.
-
-* :class:`~hysop.numerics.differential_operations.Curl`,
-* :class:`~hysop.numerics.differential_operations.DivRhoV`,
-* :class:`~hysop.numerics.differential_operations.DivWV`,
-* :class:`~hysop.numerics.differential_operations.Laplacian`,
-* :class:`~hysop.numerics.differential_operations.GradS`,
-* :class:`~hysop.numerics.differential_operations.GradV`,
-* :class:`~hysop.numerics.differential_operations.GradVxW`,
-* :class:`~hysop.numerics.differential_operations.DivAdvection`,
-* :class:`~hysop.numerics.differential_operations.Strain`,
-* :class:`~hysop.numerics.differential_operations.StrainCriteria`,
-* :class:`~hysop.numerics.differential_operations.MaxDiagGradV`,
-* :class:`~hysop.numerics.differential_operations.StretchLike`,
-* :class:`~hysop.numerics.differential_operations.DiagAndStretch`,
-* :class:`~hysop.numerics.differential_operations.StrainCriteria`,
-* :class:`~hysop.numerics.differential_operations.StrainAndStretch`,
-* :class:`~hysop.numerics.differential_operations.DifferentialOperation`,
- (abstract base class).
-
-Notes
------
-
-For coherence sake, all input/output variables used in differential operations
-are list of numpy arrays, even when only scalar fields are required.
-
-For example :
-
-* a = curl(b) in 3d, a and b vector fields and so lists of 3 arrays.
-* a = DivRhoV(c,d), d a vector field, a and c scalar fields. d is a list of
-3 arrays and a and c lists of 1 array.
-
-For practical examples of use, see test_differential_operations.py file.
-
-"""
-from hysop.constants import debug, XDIR, YDIR, ZDIR
-from abc import ABCMeta
-from hysop.numerics.finite_differences import FDC4, FDC2, FD2C2
-import numpy as np
-from hysop.tools.numpywrappers import npw
-from hysop.tools.misc import WorkSpaceTools, Utils
-
-
-class DifferentialOperation(object):
-    """Abstract base class for all operations
-    based on finite differences.
-    """
-    __metaclass__ = ABCMeta
-    _authorized_methods = []
-
-    # @debug
-    # def __new__(cls, *args, **kw):
-    #     return object.__new__(cls, *args, **kw)
-
-    @debug
-    def __init__(self, topo, indices=None, reduce_output_shape=False,
-                 method=None, work=None):
-        """
-        Parameters
-        ----------
-        topo : :class:`hysop.topology.topology.CartesianTopology`
-        indices : list of slices, optional
-            Represents the local mesh on which the operation
-            will be applied,
-            like compute_index in :class:`~hysop.domain.mesh.Mesh`.
-            See details in notes.
-        reduce_output_shape : boolean, optional
-            True to return the result in a reduced array. See notes below.
-        method : :class:`hysop.numerics.finite_differences.FiniteDifference`
-        , optional
-            the chosen FD scheme. If None, a default method is set, equal
-            to self._authorized_methods[0].
-        work : list of numpy arrays, optional
-            internal buffers
-
-        Notes
-        -----
-        * Two ways to compute outvar = operation(invar, ...)
-        ** 1 - either invar and outvar are arrays of the same shape, and
-        outvar[...] = op(invar[...]) will be computed. In that case
-        indices and reduce_output_shape are not required.
-        Indices will be set to topo.mesh.compute_index.
-        ** 2 - or outvar is a smaller array than invar, and
-        outvar[...] = op(invar(indices))
-        To use case 2, set reduce_output_shape = True and provide indices.
-        * See ClassName._authorized_methods[0] to find which default method
-        is set for each Operation.
-        * If work = None, some work arrays will be allocated internally.
-        Else, you must provide a list of lwk arrays of shape
-        topo.mesh.local_resolution,
-
-        """
-        self._dim = topo.domain.dim
-        # Set default method
-        if method is None:
-            method = self._authorized_methods[0]
-
-        self.method = method
-        if indices is None:
-            indices = topo.mesh.compute_index
-        self.in_indices = indices
-        # True if work has to be done for this set
-        # of indices on the current proc.
-        self._on_proc = Utils.is_on_proc(indices)
-        self.fd_scheme = self._init_fd_method(topo, reduce_output_shape)
-        self.output_indices = self.fd_scheme.output_indices
-        self._work = self._set_work_arrays(topo, reduce_output_shape, work)
-
-    def _set_work_arrays(self, topo, reduce_output_shape, rwork=None):
-        """Check and/or allocate internal work buffers.
-        """
-        wk_prop = self.get_work_properties(topo, self.in_indices)['rwork']
-        if wk_prop is None:
-            return []
-        if reduce_output_shape:
-            subshape = np.asarray([self.in_indices[i].stop -
-                                   self.in_indices[i].start
-                                   for i in xrange(self._dim)])
-        else:
-            subshape = topo.mesh.local_resolution
-        subshape = tuple(subshape)
-        return WorkSpaceTools.check_work_array(len(wk_prop), subshape, rwork)
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        """Default : no work vector
-        """
-        return DifferentialOperation._find_work(0, topo, indices)
-
-    @staticmethod
-    def _find_work(lwork, topo, indices=None):
-        """Internal function to find the shape of work vector
-        for a given topo and a set of indices
-        """
-        if lwork == 0:
-            return {'rwork': None, 'iwork': None}
-
-        if indices is not None:
-            if not Utils.is_on_proc(indices):
-                shape = (0, ) * topo.domain.dim
-            else:
-                shape = np.prod(tuple([indices[i].stop - indices[i].start
-                                       for i in xrange(len(indices))]))
-        else:
-            shape = np.prod(topo.mesh.local_resolution)
-        return {'rwork': [(shape, ), ] * lwork, 'iwork': None}
-
-    def _init_fd_method(self, topo, reduce_output_shape):
-        """Build the finite difference scheme
-        """
-        msg = 'FD scheme Not yet implemented for this operation.'
-        assert self.method.__mro__[0] in self._authorized_methods, msg
-        if not self._on_proc:
-            empty_set = [slice(0, 0), ] * self._dim
-            return self.method(topo.mesh.space_step,
-                               empty_set)
-
-        fd_scheme = self.method(topo.mesh.space_step,
-                                self.in_indices,
-                                reduce_output_shape)
-        msg = 'Ghost layer is too small for the chosen FD scheme.'
-        required_ghost_layer = fd_scheme.ghosts_layer_size
-        assert (topo.ghosts() >= required_ghost_layer).all(), msg
-        return fd_scheme
-
-
-class Curl(DifferentialOperation):
-    """
-    Computes nabla X V, V being a vector field.
-    """
-    _authorized_methods = [FDC2, FDC4]
-    _lwork = 1
-
-    def __init__(self, **kwds):
-        """Curl of a vector field
-        """
-        super(Curl, self).__init__(**kwds)
-        assert len(self.in_indices) > 1
-        # connect to fd function call
-        self.fcall = self._central
-        if len(self.in_indices) == 2:
-            # a 'fake' curl for 2D case
-            self.fcall = self._central_2d
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        return Curl._find_work(Curl._lwork, topo, indices)
-
-    def __call__(self, variable, result):
-        if self._on_proc:
-            return self.fcall(variable, result)
-        else:
-            return result
-
-    def _central(self, variable, result):
-        """ 3D Curl
-
-        Parameters
-        ----------
-        variable : list of numpy arrays
-            the input vector field
-        result : list of numpy arrays
-            in/out result
-        """
-        assert len(result) == len(variable)
-        #--  d/dy vz -- in result[XDIR]
-        self.fd_scheme.compute(variable[ZDIR], YDIR, result[XDIR])
-        # -- -d/dz vy -- in work
-        self.fd_scheme.compute(variable[YDIR], ZDIR, self._work[0])
-        # result_x = d/dy vz - d/dz vy
-        result[XDIR][self.output_indices] -= self._work[0][self.output_indices]
-
-        #--  d/dz vx -- in result[YDIR]
-        self.fd_scheme.compute(variable[XDIR], ZDIR, result[YDIR])
-        # -- -d/dx vz -- in work
-        self.fd_scheme.compute(variable[ZDIR], XDIR, self._work[0])
-        # result_y = d/dz vx - d/dx vz
-        result[YDIR][self.output_indices] -= self._work[0][self.output_indices]
-
-        #-- d/dx vy in result[ZDIR]
-        self.fd_scheme.compute(variable[YDIR], XDIR, result[ZDIR])
-        # result_z = d/dx vy - d/dy vx
-        self.fd_scheme.compute(variable[XDIR], YDIR, self._work[0])
-        result[ZDIR][self.output_indices] -= self._work[0][self.output_indices]
-
-        return result
-
-    def _central_2d(self, variable, result):
-        """ 2D Curl
-        Parameters
-        ----------
-        variable : list of numpy arrays
-            the input vector field
-        result : list of numpy arrays
-            in/out result
-        """
-        assert len(result) == 1
-        #-- d/dx vy in result[ZDIR]
-        self.fd_scheme.compute(variable[YDIR], XDIR, result[0])
-        # result_z = d/dx vy - d/dy vx
-        self.fd_scheme.compute(variable[XDIR], YDIR, self._work[0])
-        result[0][self.output_indices] -= self._work[0][self.output_indices]
-        return result
-
-
-class DivRhoV(DifferentialOperation):
-    """
-    Computes \f$ \nabla.(\rho V) \f$, \f$ \rho\f$ a scalar field and
-    V a vector field.
-    Works for any dimension.
-
-    Methods : FDC4
-    """
-
-    _authorized_methods = [FDC4]
-
-    def __init__(self, **kwds):
-        """Divergence of rho V, rho a scalar field, V a vector field
-
-        Parameters
-        ----------
-        fd_optim : bool, optional
-            if 'CAA', compute result += div(rhoV) else
-            result = div(rhoV), which is the default.
-        **kwds : parameters for base class
-
-        Default method : FDC4
-        """
-        self._in_gh = None
-        self._wk_indices = None
-        self._fd_work = None
-        super(DivRhoV, self).__init__(**kwds)
-
-    def _init_fd_method(self, topo, reduce_output_shape):
-        """Build the finite difference scheme
-        """
-        msg = 'FD scheme Not yet implemented for this operation.'
-        assert self.method.__mro__[0] in self._authorized_methods, msg
-        # for this operation, the fd scheme uses
-        # work array as input, which means that work
-        # array must include ghost points. This is done
-        # thanks to self.wk_indices
-        if not self._on_proc:
-            empty_set = [slice(0, 0), ] * self._dim
-            self._fd_work = self.method(topo.mesh.space_step,
-                                        empty_set)
-            self._in_gh = empty_set
-            self._wk_indices = empty_set
-            return self._fd_work
-
-        gh = self.method.ghosts_layer_size
-        ref = self.in_indices
-        self._in_gh = [slice(ref[i].start - gh, ref[i].stop + gh)
-                       for i in xrange(self._dim)]
-        for sl in self._in_gh:
-            assert sl.start >= 0
-            assert sl.stop >= 0
-        wk_shape = tuple([self._in_gh[i].stop - self._in_gh[i].start
-                          for i in xrange(self._dim)])
-        self._wk_indices = [slice(gh, wk_shape[i] - gh)
-                            for i in xrange(self._dim)]
-        if reduce_output_shape:
-            iout = [slice(0, wk_shape[i] - 2 * gh)
-                    for i in xrange(self._dim)]
-        else:
-            iout = None
-        fd_scheme = self.method(topo.mesh.space_step,
-                                self._wk_indices,
-                                output_indices=iout)
-        self._fd_work = self.method(topo.mesh.space_step,
-                                    self._wk_indices)
-        msg = 'Ghost layer is too small for the chosen FD scheme.'
-        required_ghost_layer = fd_scheme.ghosts_layer_size
-        assert (topo.ghosts() >= required_ghost_layer).all(), msg
-        return fd_scheme
-
-    def _set_work_arrays(self, topo, reduce_output_shape, rwork=None):
-        """Check and allocate internal work buffers.
-        """
-        wk_prop = self.get_work_properties(topo, self.in_indices)['rwork']
-        if wk_prop is None:
-            return []
-        subshape = np.asarray([self.in_indices[i].stop -
-                               self.in_indices[i].start
-                               for i in xrange(self._dim)])
-        if self._on_proc:
-            subshape += 2 * self.fd_scheme.ghosts_layer_size
-        subshape = tuple(subshape)
-
-        return WorkSpaceTools.check_work_array(len(wk_prop), subshape, rwork)
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        # fd scheme will be applied on work so it needs ghost points.
-        if indices is not None:
-            if not Utils.is_on_proc(indices):
-                shape = (0, ) * topo.domain.dim
-            else:
-                shape = np.asarray([indices[i].stop - indices[i].start
-                                    for i in xrange(len(indices))])
-                shape += 2 * topo.ghosts()
-        else:
-            shape = np.asarray(topo.mesh.local_resolution).copy()
-        shape = np.prod(shape)
-        return {'rwork': [(shape,), ] * 2, 'iwork': None}
-
-    def __call__(self, var1, scal, result):
-        """Apply operation
-
-        Parameters
-        ----------
-        var1 : list of numpy arrays
-            the vector field 'V'
-        scal : list of numpy array
-            the scalar field
-        result : list of numpy arrays
-            in/out buffer
-        Returns
-        -------
-        numpy array
-
-        """
-        assert scal is not result
-        assert len(result) == len(scal)
-        for i in xrange(len(var1)):
-            assert var1[i] is not result
-        if self._on_proc:
-            return self._central4(var1, scal, result)
-        else:
-            return result
-
-    def _central4(self, var1, scal, result):
-        """
-        Compute central finite difference scheme, order 4.
-        No work vector provided by user --> self._work will be
-        used. It must be created at init and thus memshape is required.
-        """
-
-        # _work[0:1] are used as temporary space
-        # for computation
-        # div computations are accumulated into result.
-        # Result does not need initialisation to zero.
-
-        # d/dx (scal * var1x), saved into result
-        np.multiply(scal[0][self._in_gh],
-                    var1[XDIR][self._in_gh], self._work[0])
-        result[0] = self.fd_scheme.compute(self._work[0], XDIR, result[0])
-        # other components (if any) saved into _work[0] and added into result
-        # d/dy (scal * var1y), saved in work and added into result
-        # d/dz(scal * var1z), saved in work and added into result (if 3D)
-        for cdir in xrange(1, len(var1)):
-            np.multiply(scal[0][self._in_gh],
-                        var1[cdir][self._in_gh], self._work[0])
-            self._work[1] = self._fd_work.compute(self._work[0], cdir,
-                                                  self._work[1])
-            result[0][self.output_indices] += self._work[1][self._wk_indices]
-
-        return result
-
-
-class DivWV(DivRhoV):
-    """
-    Computes nabla.(W.Vx, W.Vy, W.Vz), W and V some vector fields.
-
-    """
-
-    _authorized_methods = [FDC4]
-
-    def __init__(self, **kwds):
-        """Divergence of (W.Vx, W.Vy, W.Vz), W, V two vector fields
-
-        Parameters
-        ----------
-        **kwds : parameters for base class
-
-        Default method : FDC4
-        """
-        super(DivWV, self).__init__(**kwds)
-        msg = 'Implemented only in 3D.'
-        assert self._dim == 3, msg
-
-    def __call__(self, var1, var2, result):
-        """Apply operation
-
-        Parameters
-        ----------
-        var1 : list of numpy arrays
-            the vector field 'W'
-        var2 : list of numpy arrays
-            the vector field 'V'
-        result : list of numpy arrays
-            in/out buffers
-        Returns
-        -------
-        list of numpy arrays
-
-        """
-        assert len(result) == len(var2)
-        if self._on_proc:
-            for cdir in xrange(len(var2)):
-                result[cdir:cdir + 1] = self._central4(
-                    var1, var2[cdir:cdir + 1], result=result[cdir:cdir + 1])
-
-        return result
-
-
-class Laplacian(DifferentialOperation):
-    """Computes  the laplacian of a field.
-    """
-    _authorized_methods = [FD2C2]
-    _lwork = 1
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        return Laplacian._find_work(Laplacian._lwork, topo, indices)
-
-    def __call__(self, var, result):
-        """"Apply laplacian
-
-        Parameters
-        ----------
-        var : list of numpy arrays
-            the scalar field
-        result : list of numpy arrays
-            in/out buffers
-        Returns
-        -------
-        numpy array
-        """
-        assert len(var) == len(result)
-        if self._on_proc:
-            for d in xrange(len(var)):
-                self.fd_scheme.compute(var[d], 0, result[d])
-                for cdir in xrange(1, self._dim):
-                    self.fd_scheme.compute_and_add(var[d], cdir, result[d],
-                                                   self._work[0])
-        return result
-
-
-class GradS(DifferentialOperation):
-    """Gradient of a scalar field
-    """
-    _authorized_methods = [FDC4, FDC2]
-
-    def __call__(self, scal, result):
-        """Apply gradient, with central finite difference scheme.
-
-        Parameters
-        ----------
-        scal : list of numpy arrays
-            the input scalar field
-        result : list of numpy arrays
-            in/out result
-        """
-        assert len(result) == self._dim
-        if self._on_proc:
-            for cdir in xrange(self._dim):
-                #  d/dcdir (scal), saved in data[cdir]
-                self.fd_scheme.compute(scal[0], cdir, result[cdir])
-
-        return result
-
-
-class GradV(DifferentialOperation):
-    """Gradient of a vector field
-    """
-    _authorized_methods = [FDC4, FDC2]
-
-    def __call__(self, var1, result):
-        """Apply gradient
-
-        Parameters
-        ----------
-        var1 : list of numpy arrays
-            the input vector field
-        result : list of numpy arrays
-            in/out result
-        """
-        nbc = len(var1)
-        assert len(result) == nbc * self._dim
-        if self._on_proc:
-            for cdir in xrange(self._dim):
-                pos = nbc * cdir
-                for ddir in xrange(self._dim):
-                    #  d/dddir (v[cdir]), saved in result[pos]
-                    self.fd_scheme.compute(var1[cdir], ddir, result[pos])
-                    pos += 1
-
-        return result
-
-
-class GradVxW(DifferentialOperation):
-    """Computes [nabla(V)][W] with
-    V and W some vector fields.
-    """
-    _authorized_methods = [FDC4, FDC2]
-    _lwork = 2
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        return DifferentialOperation._find_work(GradVxW._lwork, topo, indices)
-
-    def __call__(self, var1, var2, result, diagnostics):
-        """Apply gradient, with central finite difference scheme.
-
-        Parameters
-        ----------
-        var1, var2 : list of numpy arrays
-           the input vector fields
-        result : list of numpy arrays
-            in/out result. Overwritten.
-        diagnostics : numpy array
-            some internal diagnostics (like max of div(v) ...).
-            In/out param, overwritten
-        """
-        assert len(result) == len(var1)
-        if self._on_proc:
-            diagnostics[:] = 0.
-            nbc = len(var1)
-            for comp in xrange(nbc):
-                result[comp][...] = 0.0
-                self._work[1][...] = 0.0
-                for cdir in xrange(self._dim):
-                    # self._work = d/dcdir (var1_comp)
-                    self.fd_scheme.compute(var1[comp], cdir, self._work[0])
-                    # some diagnostics ...
-                    if cdir == comp:
-                        tmp = np.max(abs(self._work[0][self.output_indices]))
-                        diagnostics[0] = max(diagnostics[0], tmp)
-                    np.add(abs(self._work[0]), self._work[1], self._work[1])
-
-                    # compute self._work = self._work.var2[cdir]
-                    np.multiply(self._work[0][self.output_indices],
-                                var2[cdir][self.in_indices],
-                                self._work[0][self.output_indices])
-                    # sum to obtain nabla(var_comp) . var2,
-                    # saved into result[comp]
-                    npw.add(result[comp][self.output_indices],
-                            self._work[0][self.output_indices],
-                            result[comp][self.output_indices])
-                diagnostics[1] = max(
-                    diagnostics[1], np.max(self._work[1][self.output_indices]))
-
-        return result, diagnostics
-
-
-class DivAdvection(DifferentialOperation):
-    """ Computes -nabla .(V . nabla V) with V a vector field.
-    """
-    _authorized_methods = [FDC4, FDC2]
-    _lwork = 3
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        return DifferentialOperation._find_work(DivAdvection._lwork,
-                                                topo, indices)
-
-    def __call__(self, var1, result):
-        """Apply divergence, with central finite difference scheme.
-
-        Parameters
-        ----------
-        var1 : list of numpy arrays
-            the input vector field
-        result : list of numpy array
-            in/out result, overwritten.
-        """
-        assert len(result) == 1
-        nbc = len(var1)
-        if not self._on_proc:
-            return result
-        assert nbc == 3, 'Only 3D case is implemented.'
-        # Compute diff(var[dir], dir), saved in work[dir]
-        for d in xrange(nbc):
-            self.fd_scheme.compute(var1[d], d, self._work[d])
-
-        # result = Vx,x * Vy,y
-        np.multiply(self._work[XDIR][self.output_indices],
-                    self._work[YDIR][self.output_indices],
-                    result[0][self.output_indices])
-        # wk[0] = Vx,x * Vz,z
-        np.multiply(self._work[XDIR][self.output_indices],
-                    self._work[ZDIR][self.output_indices],
-                    self._work[XDIR][self.output_indices])
-        # result = result + Vx,x * Vz,z
-        np.add(self._work[XDIR][self.output_indices],
-               result[0][self.output_indices],
-               result[0][self.output_indices])
-        # wk[1] = Vy,y * Vz,z
-        np.multiply(self._work[YDIR][self.output_indices],
-                    self._work[ZDIR][self.output_indices],
-                    self._work[YDIR][self.output_indices])
-        # result = result + Vy,y * Vz,z
-        np.add(self._work[YDIR][self.output_indices],
-               result[0][self.output_indices],
-               result[0][self.output_indices])
-
-        self.fd_scheme.compute(var1[XDIR], YDIR, self._work[0])
-        self.fd_scheme.compute(var1[YDIR], XDIR, self._work[1])
-        # wk[0] = Vx,y * Vy,x
-        np.multiply(self._work[0][self.output_indices],
-                    self._work[1][self.output_indices],
-                    self._work[0][self.output_indices])
-        # result = result - Vx,y * Vy,x
-        np.subtract(result[0], self._work[0], result[0])
-
-        self.fd_scheme.compute(var1[XDIR], ZDIR, self._work[0])
-        self.fd_scheme.compute(var1[ZDIR], XDIR, self._work[1])
-        # wk[0] = Vx,z * Vz,x
-        np.multiply(self._work[0], self._work[1], self._work[0])
-        # result = result - Vx,z * Vz,x
-        np.subtract(result[0][self.output_indices],
-                    self._work[0][self.output_indices],
-                    result[0][self.output_indices])
-        self.fd_scheme.compute(var1[YDIR], ZDIR, self._work[0])
-        self.fd_scheme.compute(var1[ZDIR], YDIR, self._work[1])
-        # wk[0] = Vy,z * Vz,y
-        np.multiply(self._work[0][self.output_indices],
-                    self._work[1][self.output_indices],
-                    self._work[0][self.output_indices])
-        # result = result - Vy,z * Vz,y
-        np.subtract(result[0][self.output_indices],
-                    self._work[0][self.output_indices],
-                    result[0][self.output_indices])
-
-        result[0][self.output_indices] *= 2.0
-        return result
-
-
-class Strain(DifferentialOperation):
-    """Compute 0.5(grad U + grad U^T), U a vector field
-    """
-    _authorized_methods = [FDC4, FDC2]
-
-    def __init__(self, **kwds):
-        super(Strain, self).__init__(**kwds)
-        if self._dim == 3:
-            self._i1 = [0, 0, 1]
-            self._i2 = [1, 2, 2]
-        elif self._dim == 2:
-            self._i1 = [0, ]
-            self._i2 = [1, ]
-
-    def __call__(self, var1, result):
-        """Compute strain
-
-        Parameters
-        ----------
-        var1 : list of numpy arrays
-            the input vector field
-        result : list of numpy arrays
-            in/out result
-
-        result = [diagonal components, extra-diag comp]
-
-        For example in 3D :
-
-        result = [Strain00, Strain11, Strain22, Strain01, Strain02, Strain12]
-        """
-        nbc = len(var1)
-        assert len(result) == self._dim * (self._dim + 1) / 2
-        if not self._on_proc:
-            return result
-
-        pos = nbc
-        # Extra diagonal terms. result[0] is used as temporary
-        # work array.
-        for i1, i2 in zip(self._i1, self._i2):
-            self.fd_scheme.compute(var1[i1], i2, result[pos])
-            self.fd_scheme.compute_and_add(var1[i2], i1, result[pos],
-                                           result[0])
-            np.multiply(result[pos], 0.5, result[pos])
-            pos += 1
-        # Then diagonal terms
-        for cdir in xrange(self._dim):
-            self.fd_scheme.compute(var1[cdir], cdir, result[cdir])
-
-        return result
-
-
-class StrainCriteria(DifferentialOperation):
-    """For Strain = 0.5(grad U + grad U^T), U a vector field
-    compute max(sum_j abs(Strain_i,j))
-
-    Warning : this is a local (mpi) max computation, since input array var1
-    is also a local array.
-    """
-    _authorized_methods = [FDC4, FDC2]
-
-    def __init__(self, **kwds):
-        super(StrainCriteria, self).__init__(**kwds)
-        if self._dim == 3:
-            self._i1 = [0, 0, 1]
-            self._i2 = [1, 2, 2]
-            self._pos = [0, 1, 2]
-            self._adds = [[0, 1], [0, 2], [1, 2]]
-        elif self._dim == 2:
-            self._i1 = [0, ]
-            self._i2 = [1, ]
-            self._pos = [0]
-            self._adds = [[0, ], [0, ]]
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        dime = topo.domain.dim
-        if dime == 2:
-            lwork = 2
-        elif dime == 3:
-            lwork = 4
-
-        return DifferentialOperation._find_work(lwork, topo, indices)
-
-    def __call__(self, var1, result):
-        """Compute criteria
-
-        Parameters
-        ----------
-        var1 : list of numpy arrays
-            the input vector field
-        result : numpy array
-            criteria values for each direction.
-
-        """
-        work = self._work
-        assert len(result) == self._dim
-        if not self._on_proc:
-            return result
-
-        # First, compute extra-diagonal terms
-        for i1, i2, ipos in zip(self._i1, self._i2, self._pos):
-            self.fd_scheme.compute(var1[i1], i2, work[ipos])
-            self.fd_scheme.compute_and_add(var1[i2], i1, work[ipos],
-                                           work[-1])
-            np.multiply(work[ipos], 0.5, work[ipos])
-            np.abs(work[ipos], work[ipos])
-
-        # Then add those terms to diagonal term and compute max
-        for idiag in xrange(self._dim):
-            self.fd_scheme.compute(var1[idiag], idiag, work[-1])
-            np.abs(work[-1], work[-1])
-            for i1 in self._adds[idiag]:
-                np.add(work[i1], work[-1], work[-1])
-            if self._on_proc:
-                result[idiag] = np.max(work[-1][self.output_indices])
-            else:
-                result[idiag] = 0.0
-        return result
-
-
-class MaxDiagGradV(DifferentialOperation):
-    """Maximum of each diagonal term (abs) of the Gradient of a vector field
-    """
-    _authorized_methods = [FDC4, FDC2]
-    _lwork = 1
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        return DifferentialOperation._find_work(MaxDiagGradV._lwork,
-                                                topo, indices)
-
-    def __call__(self, var1, result):
-        """Compute max
-
-        Parameters
-        ----------
-        var1 : list of numpy arrays
-            the input vector field
-        result : numpy array
-            in/out result
-        """
-        assert len(result) == self._dim
-        if not self._on_proc:
-            return result
-
-        for cdir in xrange(self._dim):
-            #  d/dcdir (var1[cdir]) saved in work
-            self._work[0] = self.fd_scheme.compute(var1[cdir],
-                                                   cdir, self._work[0])
-            np.abs(self._work[0], self._work[0])
-            if self._on_proc:
-                result[cdir] = np.max(self._work[0][self.output_indices])
-
-        return result
-
-
-class StretchLike(DifferentialOperation):
-    """Maximum of the sum of each 'line' (abs) of the Gradient of a vector field
-
-    Warning: local (mpi) maximum.
-    """
-    _authorized_methods = [FDC4, FDC2]
-    _lwork = 2
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        return DifferentialOperation._find_work(StretchLike._lwork,
-                                                topo, indices)
-
-    def __call__(self, var1, result):
-        """Compute max
-
-        Parameters
-        ----------
-        var1 : list of numpy arrays
-            the input vector field
-        result : numpy array
-            in/out result
-
-        result[i] = max(sum_j abs(diff(u_i, j)) for i = 0..dim
-        """
-        assert len(result) == self._dim
-        result[...] = 0.
-        if not self._on_proc:
-            return result
-        for cdir in xrange(self._dim):
-            self._work[1][...] = 0.0
-            for ddir in xrange(self._dim):
-                #  d/dddir (var1[cdir]) saved in work
-                self._work[0] = self.fd_scheme.compute(var1[cdir],
-                                                       ddir, self._work[0])
-                np.abs(self._work[0], self._work[0])
-                np.add(self._work[0], self._work[1], self._work[1])
-            if self._on_proc:
-                result[cdir] = np.max(self._work[1][self.output_indices])
-
-        return result
-
-
-class DiagAndStretch(DifferentialOperation):
-    """Maximum of the sum of each 'line' of abs of the Gradient of a vector field
-    and max of each abs of 'diagonal' block of the gradient.
-
-    Warning : this is a local (mpi) max computation, since input array var1
-    is also a local array.
-    """
-    _authorized_methods = [FDC4, FDC2]
-    _lwork = 2
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        return DifferentialOperation._find_work(DiagAndStretch._lwork,
-                                                topo, indices)
-
-    def __call__(self, var1, result):
-        """Compute max
-
-        Parameters
-        ----------
-        var1 : list of numpy arrays
-            the input vector field
-        result : numpy array
-            in/out result
-
-        result[i] = max(abs(diff(u_i,i))) for i = 0..dim
-        result[dim:...] = max(sum_j abs(diff(u_i, j)) for i = 0..dim
-        """
-        assert len(result) == 2 * self._dim
-        result[...] = 0.
-
-        if not self._on_proc:
-            return result
-
-        pos = 0
-        for cdir in xrange(self._dim):
-            self._work[1][...] = 0.0
-            for ddir in xrange(self._dim):
-                #  d/dcdir (var1[cdir]) saved in work
-                self._work[0] = self.fd_scheme.compute(var1[cdir],
-                                                       ddir, self._work[0])
-                np.abs(self._work[0], self._work[0])
-                if cdir == ddir and self._on_proc:
-                    result[pos] = np.max(self._work[0][self.output_indices])
-                    pos += 1
-                np.add(self._work[0], self._work[1], self._work[1])
-            if self._on_proc:
-                result[self._dim + cdir] = np.max(
-                    self._work[1][self.output_indices])
-
-        return result
-
-
-class StrainAndStretch(DifferentialOperation):
-    """For Strain = 0.5(grad U + grad U^T), U a vector field
-    compute max(sum_j abs(Strain_i,j)) and maximum of the sum
-    of each 'line' (abs) of the Gradient of U.
-
-    Warning : this is a local (mpi) max computation, since input array var1
-    is also a local array.
-
-    result = [ max(sum_j(|strain_0,j|), ...],
-               max(sum_j(|u0,j|), max(sum_j(|u1,j|)], ...]
-    """
-    _authorized_methods = [FDC4, FDC2]
-
-    def __init__(self, **kwds):
-        super(StrainAndStretch, self).__init__(**kwds)
-        if self._dim == 3:
-            self._fcall = self._call_3d
-        elif self._dim == 2:
-            self._fcall = self._call_2d
-
-    @staticmethod
-    def get_work_properties(topo, indices=None):
-        dime = topo.domain.dim
-        if dime == 2:
-            lwork = 4
-        elif dime == 3:
-            lwork = 6
-
-        return DifferentialOperation._find_work(lwork, topo, indices)
-
-    def __call__(self, var1, result):
-        """Compute criteria
-
-        Parameters
-        ----------
-        var1 : list of numpy arrays
-            the input vector field
-        result : numpy array
-            criteria values for each direction.
-
-        """
-        if self._on_proc:
-            return self._fcall(var1, result)
-        else:
-            return result
-
-    def _call_3d(self, var1, result):
-        work = self._work
-        assert len(result) == 2 * self._dim
-        # work[0] = abs(eps_x,x)
-        self.fd_scheme.compute(var1[XDIR], XDIR, work[0])
-        np.abs(work[0], work[0])
-
-        # work[1] = diff(u(x), i2)
-        # work[2] = abs(work[1])
-        # work[3] = diff(u(x), i3)
-        # work[4] = abs(work[3])
-        self.fd_scheme.compute(var1[XDIR], YDIR, work[1])
-        np.abs(work[1], work[2])
-        self.fd_scheme.compute(var1[XDIR], ZDIR, work[3])
-        np.abs(work[3], work[4])
-        # max(sum_j(abs(diff(u(x),j))))
-        np.add(work[2], work[4], work[2])
-        np.add(work[2], work[0], work[2])
-        result[3] = np.max(work[2][self.output_indices])
-        # free: p[2, 4, 5]
-        # work[5] = diff(u(y), x)
-        self.fd_scheme.compute(var1[YDIR], XDIR, work[5])
-        # ... used to compute work[1] = abs(eps_x,y)
-        np.add(work[1], work[5], work[1])
-        np.multiply(work[1], 0.5, work[1])
-        np.abs(work[1], work[1])
-        # work[4] = diff(u(z), x)
-        self.fd_scheme.compute(var1[ZDIR], XDIR, work[4])
-        # ... used to compute work[2] = abs(eps_x,z)
-        np.add(work[4], work[3], work[2])
-        np.multiply(work[2], 0.5, work[2])
-        np.abs(work[2], work[2])
-        # At this point,
-        # work[p[0, 1, 2] = abs([epsxx, epsxy, epsxz])
-        # compute max(epsxx + epsxy + epsxz)
-        np.add(work[1], work[0], work[0])
-        np.add(work[2], work[0], work[0])
-        result[0] = np.max(work[0][self.output_indices])
-        # work[4] = abs(diff(u(z), x))
-        # work[5] = abs(diff(u(y), x))
-        np.abs(work[4], work[4])
-        np.abs(work[5], work[5])
-        # free : p[0, 3]
-        # work[0] = abs(eps_y,y)
-        self.fd_scheme.compute(var1[YDIR], YDIR, work[0])
-        np.abs(work[0], work[0])
-        np.add(work[0], work[1], work[1])
-        np.add(work[0], work[5], work[5])
-        # work[0] = abs(diff(u(y), z))
-        self.fd_scheme.compute(var1[YDIR], ZDIR, work[3])
-        np.abs(work[3], work[0])
-        np.add(work[0], work[5], work[5])
-        result[4] = np.max(work[5][self.output_indices])
-        # free : p[0, 5]
-        # work[0] = diff(u(i1), i2)
-        # work[5] = abs(diff(u(i1), i2))
-        self.fd_scheme.compute(var1[ZDIR], YDIR, work[0])
-        np.abs(work[0], work[5])
-        # ... used to compute work[0] = abs(eps_yz)
-        np.add(work[0], work[3], work[0])
-        np.multiply(work[0], 0.5, work[0])
-        np.abs(work[0], work[0])
-        # compute max(epsyy + epsxy + epsyz)
-        np.add(work[1], work[0], work[1])
-        result[1] = np.max(work[1][self.output_indices])
-        np.add(work[2], work[0], work[2])
-        # work[0] = abs(diff(uzz)
-        self.fd_scheme.compute(var1[2], 2, work[0])
-        np.abs(work[0], work[0])
-        np.add(work[2], work[0], work[2])
-        result[2] = np.max(work[2][self.output_indices])
-        np.add(work[0], work[4], work[0])
-        np.add(work[0], work[5], work[0])
-        result[5] = np.max(work[0][self.output_indices])
-        # ouf!
-
-        return result
-
-    def _call_2d(self, var1, result):
-        work = self._work
-        assert len(result) == 2 * self._dim
-        # work[0] = abs(eps_x,x)
-        self.fd_scheme.compute(var1[XDIR], XDIR, work[0])
-        np.abs(work[0], work[0])
-        
-        # work[1] = diff(u(x), y)
-        # work[2] = abs(diff(u(x), y))
-        # work[3] = diff(u(y), x)
-        self.fd_scheme.compute(var1[XDIR], YDIR, work[1])
-        np.abs(work[1], work[2])
-        self.fd_scheme.compute(var1[YDIR], XDIR, work[3])
-        np.add(work[0], work[2], work[2])
-        result[2] = np.max(work[2][self.output_indices])
-        # work[1] = epsxy
-        np.add(work[1], work[3], work[1])
-        np.multiply(work[1], 0.5, work[1])
-        np.abs(work[1], work[1])
-        np.add(work[0], work[1], work[0])
-        result[0] = np.max(work[0][self.output_indices])
-        # work[0] = abs(eps_y,y)
-        self.fd_scheme.compute(var1[YDIR], YDIR, work[0])
-        np.abs(work[0], work[0])
-        # work[2] = abs(uy,x)
-        np.abs(work[3], work[2])
-        np.add(work[2], work[0], work[2])
-        result[3] = np.max(work[2][self.output_indices])
-        np.add(work[0], work[1], work[0])
-        result[0] = np.max(work[0][self.output_indices])
-        return result
diff --git a/hysop/old/numerics.old/extras_f/arnoldi.f95 b/hysop/old/numerics.old/extras_f/arnoldi.f95
deleted file mode 100644
index 0fef4f5fca58717d404424588688dff57cbf7d60..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/extras_f/arnoldi.f95
+++ /dev/null
@@ -1,220 +0,0 @@
-!=========================================================================
-!Computation of global modes using a time stepping (snapshots) technique
-!=========================================================================
-!=========================================================================
-!Reads snapshots, constructs the Hessenberg matrix
-!and computes the eigen-values/eigen-functions
-!=========================================================================
-module arnoldi
-
-  use precision
-  use parameters
-
-  implicit none
-
-contains
-
-  !======================
-  subroutine arnoldi3d(Mu,ncli,nt,nfp,nmodes,Tps)
-    implicit none
-    integer, intent(in) :: ncli ! number of snapshot
-    integer, intent(in) :: nt  ! total number of points per snapshot
-    integer, intent(in) :: nmodes  ! number of desired modes
-    integer, intent(in) :: nfp ! number of desired eigen functions
-    real(wp), intent(in) :: Tps ! sampling time step
-    real(wp), dimension(:,:), intent(inout) :: Mu ! snapshots
-
-    real(wp), dimension(:,:), allocatable :: un  ! orthonomalized Krylov basis
-    real(wp), dimension(:,:), allocatable :: Hessenberg ! Hessenberg matrix
-    complex(wp), dimension(:), allocatable :: VP ! eigenvalues
-    complex(wp), dimension(:,:), allocatable :: FP, FP_J ! eigen functions
-    real(wp), dimension(:), allocatable :: reslog, res ! residuals
-    integer, dimension(:), allocatable :: t ! sorting array
-    real(wp), dimension(:), allocatable :: tab !
-    real(wp)  :: norm, prod, error !
-
-    integer :: i,j,nmax
-
-    allocate(un(nt,ncli),Hessenberg(ncli,ncli-1),VP(ncli-1),FP(ncli-1,ncli-1),FP_J(nt,ncli))
-
-    nmax=0
-    VP=(0.,0.); FP=(0.,0.); FP_J=(0.,0.)
-    Hessenberg=0.
-
-    !==================================
-    !   Arnoldi method
-    !==================================
-
-    norm = dot_product(Mu(:,1),Mu(:,1))
-    norm = sqrt(norm)
-    un(:,1) = Mu(:,1)/norm  ! first normalized vector u1
-
-    do j=2,ncli ! construct a normalized base u2... un
-       un(:,j)=Mu(:,j) ! w=Mu_j (We have Mu_j=U(:,j+1))
-       do i=1,j-1
-          Hessenberg(i,j-1)=dot_product(un(:,i),un(:,j))
-          un(:,j)=un(:,j)-un(:,i)*Hessenberg(i,j-1)
-       enddo
-
-       norm = dot_product(un(:,j),un(:,j))
-       Hessenberg(j,j-1) = sqrt(norm)
-
-       un(:,j) = un(:,j)/Hessenberg(j,j-1)! normalization
-
-    enddo
-
-    !  do i=1,nt
-    !    print *, 'Krylov basis:', un(i,:)
-    !  enddo
-
-    do i=1,ncli-1
-       print *, 'Hessenberg matrix:', Hessenberg(i,:)
-    enddo
-
-
-    !Check orthonormalization
-    !==================================
-
-    print *,'check ortho'
-
-    prod=0.
-    do i=1,ncli
-       do j=1,ncli
-          prod=dot_product(un(:,j),un(:,i))
-          if (abs(prod).gt.1e-14) then
-             print *,i,j,abs(prod)
-          endif
-       enddo
-    enddo
-
-
-    !Eigen-values and Eigen-functions related to Hessenberg matrix
-    ! +
-    !Eigen-values related to Jacobian matrix ==> spectra
-    !==============================================================
-
-    open(unit=10, file='spectrum.dat')
-    open(unit=11, file='spectrum_log.dat')
-
-    call spectrum(Hessenberg(1:ncli-1,:),ncli-1,VP,FP)
-
-    do i=1,ncli-1
-       write(10,*) dble(VP(i)), aimag(VP(i))
-       write(11,*) dble(log(VP(i)))/Tps, ATAN(aimag(VP(i)/DBLE(VP(i))))/Tps
-    enddo
-    close(10)
-    close(11)
-
-    !Eigen-functions related to Jacobian matrix
-    !==========================================
-    FP_J(1:nt,1:ncli-1)=matmul(un(1:nt,1:ncli-1),FP(1:ncli-1,1:ncli-1))
-    !  do i=1,nt
-    !    print *, 'FP_J', (FP_J(i,j),j=1,ncli-1)
-    !  enddo
-
-    !Residual calculation with respect to each mode
-    !==============================================
-
-    allocate(res(ncli-1),reslog(ncli-1))
-    error = Hessenberg(ncli,ncli-1)
-    print *,'last Hess',Hessenberg(ncli,ncli-1)
-
-    do i=1,ncli-1
-       res(i)   = abs(FP(ncli-1,i))*error
-       reslog(i)=-log10(res(i))
-       print *,'residual',reslog(i),res(i)
-    enddo
-
-
-    !Modes are sorted with respect to residual
-    !==========================================
-    allocate(t(ncli-1))
-
-    do i=1,ncli-1
-       t(i)=i
-    enddo
-
-    call sort(reslog,ncli-1,t)
-
-    open(unit=201,file='spectrum_res.dat')
-    write(201,*)'VARIABLES ="WR","WI","RES"'
-
-    do i=1,nmodes
-       write(201,100) dble(log(VP(t(i))))/Tps,&
-            ATAN(aimag(VP(t(i))/DBLE(VP(t(i)))))/Tps,&
-            res(t(i))
-    enddo
-    close(201)
-    !
-    !Write the associated eigen functions
-    !====================================
-    !  allocate(tab(nfp))
-    !
-    !  open(unit=107, file='table.dat')
-    !  open(unit=108, file='spectrum_sorted.dat')
-    !
-    !  do i=1,nfp
-    !!    call ecriture(FP_J(:,t(h)))
-    !    write(108,*) ATAN(aimag(VP(t(i))/DBLE(VP(t(i)))))/Tps,&
-    !                      dble(log(VP(t(i))))/Tps
-    !  enddo
-    !  close(108)
-    !
-100 format (5(2x,e19.13))
-
-  end subroutine arnoldi3d
-
-  !===================
-  !Spectrum subroutine
-  !===================
-  subroutine spectrum(A,n,VP,VR)
-    implicit none
-    integer              :: INFO
-    integer              :: n,LWORK
-    real(wp), dimension(n,n) :: A
-    real(wp), dimension(:), allocatable :: RWORK
-    complex(wp), dimension(1,n) :: VL
-    complex(wp), dimension(n,n) :: VR
-    complex(wp), dimension(:), allocatable :: WORK
-    complex(wp), dimension(n):: VP
-
-    LWORK=4*n
-    allocate(WORK(LWORK),RWORK(2*n))
-    call ZGEEV('N','V', n, A*(1.,0.), n, VP, VL, 1, VR, n,&
-         WORK, LWORK, RWORK, INFO )
-    print *, 'VP', VP
-
-  end subroutine spectrum
-
-  !==================
-  !Sorting subroutine
-  !==================
-  subroutine sort(t,n,ind)
-    implicit none
-    integer                  :: i, j, n, tp1
-    real(wp), dimension(1:n) :: t
-    real(wp)                 :: temp
-    integer, dimension(1:n)  :: ind
-
-    do i=1,n
-       do j=i+1,n
-          if ((t(i))<(t(j))) then
-
-             temp=t(i)
-             tp1=ind(i)
-
-             t(i)=t(j)
-             ind(i)=ind(j)
-
-             t(j)=temp
-             ind(j)=tp1
-
-          endif
-       enddo
-    enddo
-
-    return
-
-  end subroutine sort
-
-end module arnoldi
diff --git a/hysop/old/numerics.old/extras_f/arnoldi2py.pyf b/hysop/old/numerics.old/extras_f/arnoldi2py.pyf
deleted file mode 100644
index 7a2d95d5a161228a6d589f69cfe143f43619a8c2..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/extras_f/arnoldi2py.pyf
+++ /dev/null
@@ -1,14 +0,0 @@
-!    -*- f90 -*-
-! Note: the context of this file is case sensitive.
-
-module arnoldi ! in arnoldi2py.f90
-    use precision
-    subroutine arnoldi3d(Mu,ncli,nt,nfp,nmodes,Tps) ! in arnoldi2py.f90:arnoldi2py
-        real(kind=wp) dimension(:,:),intent(inout) :: Mu
-        integer intent(in) :: ncli
-        integer intent(in) :: nt
-        integer intent(in) :: nfp
-        integer intent(in) :: nmodes
-        real(kind=wp) intent(in) :: Tps
-    end subroutine arnoldi3d
-end module arnoldi
diff --git a/hysop/old/numerics.old/fftw_f/fft2d.f90 b/hysop/old/numerics.old/fftw_f/fft2d.f90
deleted file mode 100755
index 8dbeefccde39be5b4d61f99dcd806391a227ab52..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/fftw_f/fft2d.f90
+++ /dev/null
@@ -1,616 +0,0 @@
-!> Fast Fourier Transform routines (Fortran, based on fftw)
-!! to solve 2d Poisson and diffusion problems.
-!!
-!! This module provides :
-!! \li 1 - fftw routines for the "complex to complex" case :
-!! solves the problem for
-!! complex input/output. The names of these routines contain "c2c".
-!! \li 2 - fftw routines for the "real to complex" case :
-!!  solves the problem for
-!!  input fields which are real. The names of these routines contain "r2c".
-!! \li 3 - fftw routines for the "real to complex" case :
-!!  solves the problem for
-!! input fields which are real and using the "many" interface of the fftw.
-!! It means that transforms are applied to the 2 input fields at the same time.
-!! Names of these routines contain "many".
-!!
-!! Obviously, all the above cases should lead to the same results. By default
-!! case 2 must be chosen (if input is real). Case 1 and 3 are more or less
-!! dedicated to tests and validation.
-module fft2d
-
-  use, intrinsic :: iso_c_binding
-  use precision
-  use parameters
-  implicit none
-  include 'fftw3-mpi.f03'
-
-  private
-
-  public :: init_c2c_2d,init_r2c_2d, r2c_scalar_2d, c2c_2d,c2r_2d,c2r_scalar_2d,&
-       r2c_2d,cleanFFTW_2d, filter_poisson_2d, filter_curl_2d, getParamatersTopologyFFTW2d, &
-       filter_diffusion_2d, init_r2c_2dBIS
-
-
-  !> plan for fftw "c2c" forward or r2c transform
-  type(C_PTR) :: plan_forward1, plan_forward2
-  !> plan for fftw "c2c" backward or c2r transform
-  type(C_PTR) :: plan_backward1,plan_backward2
-  !> memory buffer for fftw
-  !! (input and output buffer will point to this location)
-  type(C_PTR) :: cbuffer1
-  !> second memory buffer for fftw (used for backward transform)
-  type(C_PTR) :: cbuffer2
-  !! Note Franck : check if local declarations of datain/out works and improve perfs.
-  !> Field (complex values) for fftw input
-  complex(C_DOUBLE_COMPLEX), pointer :: datain1(:,:),datain2(:,:)
-  !> Field (real values) for fftw input
-  real(C_DOUBLE), pointer :: rdatain1(:,:)
-  !> Field (complex values) for fftw (forward) output
-  complex(C_DOUBLE_COMPLEX), pointer :: dataout1(:,:)
-  !> Field (real values) for fftw output
-  real(C_DOUBLE), pointer :: rdatain2(:,:)
-  !> Field (complex values) for fftw (forward) output
-  complex(C_DOUBLE_COMPLEX), pointer :: dataout2(:,:)
-  !> GLOBAL number of points in each direction
-  integer(C_INTPTR_T),pointer :: fft_resolution(:)
-  !> LOCAL resolution
-  integer(c_INTPTR_T),dimension(2) :: local_resolution
-  !> Offset in the direction of distribution
-  integer(c_INTPTR_T),dimension(2) :: local_offset
-  !> wave numbers for fft in x direction
-  real(C_DOUBLE), pointer :: kx(:)
-  !> wave numbers for fft in y direction
-  real(C_DOUBLE), pointer :: ky(:)
-  !> log file for fftw
-  character(len=20),parameter :: filename ="hysopfftw.log"
-  !> normalization factor
-  real(C_DOUBLE) :: normFFT
-  !> true if all the allocation stuff for global variables has been done.
-  logical :: is2DUpToDate = .false.
-
-contains
-  !========================================================================
-  !   Complex to complex transforms
-  !========================================================================
-
-  !> Initialisation of the fftw context for complex
-  !! to complex transforms (forward and backward)
-  !! @param[in] resolution global domain resolution
-  subroutine init_c2c_2d(resolution,lengths)
-
-    !! global domain resolution
-    integer(kind=ip), dimension(2), intent(in) :: resolution
-    real(wp),dimension(2), intent(in) :: lengths
-
-    !! Size of the local memory required for fftw (cbuffer)
-    integer(C_INTPTR_T) :: alloc_local
-
-    if(is2DUpToDate) return
-
-    ! init fftw mpi context
-    call fftw_mpi_init()
-
-    if(rank==0) open(unit=21,file=filename,form="formatted")
-
-    ! set fft resolution
-    allocate(fft_resolution(2))
-    fft_resolution = resolution-1
-
-    ! compute "optimal" size (according to fftw) for local date
-    ! (warning : dimension reversal)
-    alloc_local = fftw_mpi_local_size_2d_transposed(fft_resolution(c_Y), &
-         fft_resolution(c_X),main_comm, local_resolution(c_Y), &
-         local_offset(c_Y), local_resolution(c_X),local_offset(c_X));
-
-    ! allocate local buffer (used to save datain/dataout1
-    ! ==> in-place transform!!)
-    cbuffer1 = fftw_alloc_complex(alloc_local)
-    ! link datain and dataout1 to cbuffer, setting the right dimensions
-    call c_f_pointer(cbuffer1, datain1, &
-         [fft_resolution(c_X),local_resolution(c_Y)])
-    call c_f_pointer(cbuffer1, dataout1, &
-         [fft_resolution(c_Y),local_resolution(c_X)])
-
-    ! second buffer used for backward transform. Used to copy dataout1
-    ! into dataout2 (input for backward transform and filter)
-    ! and to save (in-place) the transform of the second component
-    ! of the velocity
-    cbuffer2 = fftw_alloc_complex(alloc_local)
-    call c_f_pointer(cbuffer2, datain2,&
-         [fft_resolution(c_X),local_resolution(c_Y)])
-    call c_f_pointer(cbuffer2, dataout2, [fft_resolution(c_Y),local_resolution(c_X)])
-
-    !   create MPI plan for in-place forward/backward DFT (note dimension reversal)
-    plan_forward1 = fftw_mpi_plan_dft_2d(fft_resolution(c_Y), fft_resolution(c_X),datain1,dataout1,&
-         main_comm,FFTW_FORWARD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward1 = fftw_mpi_plan_dft_2d(fft_resolution(c_Y),fft_resolution(c_X),dataout1,datain1,&
-         main_comm,FFTW_BACKWARD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-    plan_backward2 = fftw_mpi_plan_dft_2d(fft_resolution(c_Y),fft_resolution(c_X),dataout2,datain2,&
-         main_comm,FFTW_BACKWARD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-
-    call computeKxC(lengths(c_X))
-    call computeKy(lengths(c_Y))
-    normFFT =  ONE/(fft_resolution(c_X)*fft_resolution(c_Y))
-    !! call fft2d_diagnostics(alloc_local)
-
-    is2DUpToDate = .true.
-
-!!$    write(*,'(a,i5,a,16f10.4)') 'kx[',rank,'] ', kx
-!!$    write(*,'(a,i5,a,16f10.4)') 'ky[',rank,'] ', ky
-!!$
-  end subroutine init_c2c_2d
-
-  !> Execute fftw forward transform, according to the pre-defined plans.
-  subroutine c2c_2d(inputData,velocity_x,velocity_y)
-    complex(wp),dimension(:,:) :: velocity_x,velocity_y
-    complex(wp),dimension(:,:),intent(in) :: inputData
-
-    integer(C_INTPTR_T) :: i, j
-
-    do j = 1, local_resolution(c_Y)
-       do i = 1, fft_resolution(c_X)
-          datain1(i, j) = inputData(i,j)
-       end do
-    end do
-
-    ! compute transform (as many times as desired)
-    call fftw_mpi_execute_dft(plan_forward1, datain1, dataout1)
-
-!!$    do i = 1, fft_resolution(c_Y)
-!!$       write(*,'(a,i5,a,16f10.4)') 'out[',rank,'] ', dataout1(i,1:local_resolution(c_X))
-!!$    end do
-!!$
-    call filter_poisson_2d()
-
-    call fftw_mpi_execute_dft(plan_backward1, dataout1, datain1)
-    call fftw_mpi_execute_dft(plan_backward2,dataout2,datain2)
-    do j = 1, local_resolution(c_Y)
-       do i = 1, fft_resolution(c_X)
-          velocity_x(i,j) = datain1(i,j)*normFFT
-          velocity_y(i,j) = datain2(i,j)*normFFT
-       end do
-    end do
-
-!!$    do i = 1, fft_resolution(c_X)
-!!$       write(*,'(a,i5,a,16f10.4)') 'vxx[',rank,'] ', velocity_x(i,1:local_resolution(c_Y))
-!!$    end do
-!!$    write(*,'(a,i5,a)') '[',rank,'] ==============================='
-!!$    do i = 1, fft_resolution(c_X)
-!!$       write(*,'(a,i5,a,16f10.4)') 'vyy[',rank,'] ', velocity_y(i,1:local_resolution(c_Y))
-!!$    end do
-!!$    write(*,'(a,i5,a)') '[',rank,'] ==============================='
-!!$
-  end subroutine c2c_2d
-
-  !========================================================================
-  !  Real to complex transforms
-  !========================================================================
-
-  !> Initialisation of the fftw context for real to complex transforms (forward and backward)
-  !! @param[in] resolution global domain resolution
-  subroutine init_r2c_2d(resolution,lengths)
-
-    integer(kind=ip), dimension(2), intent(in) :: resolution
-    real(wp),dimension(2), intent(in) :: lengths
-    !! Size of the local memory required for fftw (cbuffer)
-    integer(C_INTPTR_T) :: alloc_local,halfLength
-
-    if(is2DUpToDate) return
-
-    ! init fftw mpi context
-    call fftw_mpi_init()
-
-    if(rank==0) open(unit=21,file=filename,form="formatted")
-
-    allocate(fft_resolution(2))
-    fft_resolution(:) = resolution(:)-1
-    halfLength = fft_resolution(c_X)/2+1
-    ! allocate local buffer (used to save datain/dataout1 ==> in-place transform!!)
-    alloc_local = fftw_mpi_local_size_2d_transposed(fft_resolution(c_Y),halfLength,main_comm,local_resolution(c_Y),&
-         local_offset(c_Y),local_resolution(c_X),local_offset(c_X));
-
-    ! allocate local buffer (used to save datain/dataout1 ==> in-place transform!!)
-    cbuffer1 = fftw_alloc_complex(alloc_local)
-
-    ! link rdatain1 and dataout1 to cbuffer, setting the right dimensions for each
-    call c_f_pointer(cbuffer1, rdatain1, [2*halfLength,local_resolution(c_Y)])
-    call c_f_pointer(cbuffer1, dataout1, [fft_resolution(c_Y),local_resolution(c_X)])
-
-    ! second buffer used for backward transform. Used to copy dataout1 into dataout2 (input for backward transform and filter)
-    ! and to save (in-place) the transform of the second component of the velocity
-    cbuffer2 = fftw_alloc_complex(alloc_local)
-
-    call c_f_pointer(cbuffer2, rdatain2, [2*halfLength,local_resolution(c_Y)])
-    call c_f_pointer(cbuffer2, dataout2, [fft_resolution(c_Y),local_resolution(c_X)])
-
-    !   create MPI plans for in-place forward/backward DFT (note dimension reversal)
-    plan_forward1 = fftw_mpi_plan_dft_r2c_2d(fft_resolution(c_Y), fft_resolution(c_X), rdatain1, dataout1, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward1 = fftw_mpi_plan_dft_c2r_2d(fft_resolution(c_Y), fft_resolution(c_X), dataout1, rdatain1, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-    plan_backward2 = fftw_mpi_plan_dft_c2r_2d(fft_resolution(c_Y), fft_resolution(c_X), dataout2, rdatain2, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-
-    call computeKx(lengths(c_X))
-    call computeKy(lengths(c_Y))
-    normFFT = ONE/(fft_resolution(c_X)*fft_resolution(c_Y))
-    !! call fft2d_diagnostics(alloc_local)
-!!$
-!!$    write(*,'(a,i5,a,16f10.4)') 'kx[',rank,'] ', kx
-!!$    write(*,'(a,i5,a,16f10.4)') 'ky[',rank,'] ', ky
-!!$
-    is2DUpToDate = .true.
-
-  end subroutine init_r2c_2d
-
-
-  !> forward transform - The result is saved in local buffers
-  !! @param input data
-  subroutine r2c_scalar_2d(inputData, ghosts)
-
-    real(wp),dimension(:,:), intent(in) :: inputData
-    integer(kind=ip), dimension(2), intent(in) :: ghosts
-
-    integer(C_INTPTR_T) :: i, j, ig, jg
-    ! init
-    do j = 1, local_resolution(c_Y)
-       jg = j + ghosts(c_Y)
-       do i = 1, fft_resolution(c_X)
-          ig = i + ghosts(c_X)
-          rdatain1(i, j) = inputData(ig,jg)
-       end do
-    end do
-
-!!$    do i = 1, fft_resolution(c_X)
-!!$       write(*,'(a,i5,a,16f10.4)') 'rr[',rank,'] ', rdatain1(i,1:local_resolution(c_Y))
-!!$    end do
-!!$
-    ! compute transform (as many times as desired)
-    call fftw_mpi_execute_dft_r2c(plan_forward1, rdatain1, dataout1)
-
-!!$    do i = 1, fft_resolution(c_Y)
-!!$       write(*,'(a,i5,a,16f10.4)') 'aaaa[',rank,'] ', dataout1(i,1:local_resolution(c_X))
-!!$    end do
-
-  end subroutine r2c_scalar_2d
-
-  !> forward transform - The result is saved in local buffers
-  !! @param[in] input data
-  subroutine r2c_2d(input_x, input_y, ghosts)
-
-    real(wp),dimension(:,:), intent(in) :: input_x, input_y
-    integer(kind=ip), dimension(2), intent(in) :: ghosts
-
-    integer(C_INTPTR_T) :: i, j, ig, jg
-    ! init
-    do j = 1, local_resolution(c_Y)
-       jg = j + ghosts(c_Y)
-       do i = 1, fft_resolution(c_X)
-          ig = i + ghosts(c_X)
-          rdatain1(i, j) = input_x(ig,jg)
-          rdatain2(i, j) = input_y(ig,jg)    
-       end do
-    end do
-
-    ! compute transform (as many times as desired)
-    call fftw_mpi_execute_dft_r2c(plan_forward1, rdatain1, dataout1)
-    call fftw_mpi_execute_dft_r2c(plan_forward2, rdatain2, dataout2)
-
-  end subroutine r2c_2d
-
-  !> Backward transform
-  subroutine c2r_2d(velocity_x,velocity_y, ghosts)
-    real(wp),dimension(:,:),intent(inout) :: velocity_x,velocity_y
-    integer(kind=ip), dimension(2), intent(in) :: ghosts
-    integer(C_INTPTR_T) :: i, j, ig, jg
-
-    call fftw_mpi_execute_dft_c2r(plan_backward1,dataout1,rdatain1)
-    call fftw_mpi_execute_dft_c2r(plan_backward2,dataout2,rdatain2)
-    do j = 1, local_resolution(c_Y)
-       jg = j + ghosts(c_Y)
-       do i = 1, fft_resolution(c_X)
-          ig = i + ghosts(c_X)
-          velocity_x(ig,jg) = rdatain1(i,j)*normFFT
-          velocity_y(ig,jg) = rdatain2(i,j)*normFFT
-       end do
-    end do
-
-!!$
-!!$    do i = 1, fft_resolution(c_X)
-!!$       write(*,'(a,i5,a,16f10.4)') 'xx[',rank,'] ', velocity_x(i,1:local_resolution(c_Y))
-!!$    end do
-!!$    write(*,'(a,i5,a)') '[',rank,'] ==============================='
-!!$    do i = 1, fft_resolution(c_X)
-!!$       write(*,'(a,i5,a,16f10.4)') 'yy[',rank,'] ', velocity_y(i,1:local_resolution(c_Y))
-!!$    end do
-!!$    write(*,'(a,i5,a)') '[',rank,'] ==============================='
-
-
-  end subroutine c2r_2d
-
-  !> Backward transform for scalar field
-  subroutine c2r_scalar_2d(omega, ghosts)
-    real(wp),dimension(:,:),intent(inout) :: omega
-    integer(kind=ip), dimension(2), intent(in) :: ghosts
-    integer(C_INTPTR_T) :: i, j, ig, jg
-
-    call fftw_mpi_execute_dft_c2r(plan_backward1,dataout1,rdatain1)
-    do j = 1, local_resolution(c_Y)
-       jg = j + ghosts(c_Y)
-       do i = 1, fft_resolution(c_X)
-          ig = i + ghosts(c_X)
-          omega(ig,jg) = rdatain1(i,j)*normFFT
-       end do
-    end do
-
-!!$
-!!$    do i = 1, fft_resolution(c_X)
-!!$       write(*,'(a,i5,a,16f10.4)') 'xx[',rank,'] ', velocity_x(i,1:local_resolution(c_Y))
-!!$    end do
-!!$    write(*,'(a,i5,a)') '[',rank,'] ==============================='
-!!$    do i = 1, fft_resolution(c_X)
-!!$       write(*,'(a,i5,a,16f10.4)') 'yy[',rank,'] ', velocity_y(i,1:local_resolution(c_Y))
-!!$    end do
-!!$    write(*,'(a,i5,a)') '[',rank,'] ==============================='
-
-
-  end subroutine c2r_scalar_2d
-
-
-  !========================================================================
-  ! Common (r2c, c2c) subroutines
-  !========================================================================
-
-  !> Computation of frequencies coeff, over the distributed direction in the real/complex case
-  !> @param lengths size of the domain
-  subroutine computeKx(length)
-
-    real(wp),intent(in) :: length
-
-    !! Local loops indices
-    integer(C_INTPTR_T) :: i
-
-    !! Compute filter coefficients
-    allocate(kx(local_resolution(c_X)))
-
-    do i = local_offset(c_X)+1,local_offset(c_X)+local_resolution(c_X) !! global index
-       kx(i-local_offset(c_X)) =  2.*pi/length*(i-1)
-    end do
-
-  end subroutine computeKx
-
-  !> Computation of frequencies coeff, over the distributed direction in the complex/complex case
-  !> @param lengths size of the domain
-  subroutine computeKxC(length)
-
-    real(wp),intent(in) :: length
-
-    !! Local loops indices
-    integer(C_INTPTR_T) :: i
-
-    !! Compute filter coefficients
-    allocate(kx(local_resolution(c_X)))
-
-    !! x frequencies (distributed over proc)
-    !! If we deal with positive frequencies only
-    if(local_offset(c_X)+local_resolution(c_X) <= fft_resolution(c_X)/2+1 ) then
-       do i = 1,local_resolution(c_X)
-          kx(i) =  2.*pi/length*(local_offset(c_X)+i-1)
-       end do
-
-    else
-       !! else if we deal with negative frequencies only
-       if(local_offset(c_X)+1 > fft_resolution(c_X)/2+1 ) then
-          do i = 1,local_resolution(c_X)
-             kx(i) =  2.*pi/length*(local_offset(c_X)+i-1-fft_resolution(c_X))
-          end do
-          !! final case : start positive freq, end in negative ones
-       else
-          do i = local_offset(c_X)+1, fft_resolution(c_X)/2+1 !! global index
-             kx(i-local_offset(c_X)) =  2.*pi/length*(i-1)
-          end do
-          do i = fft_resolution(c_X)/2+2,local_resolution(c_X)+local_offset(c_X)
-             kx(i-local_offset(c_X)) =  2.*pi/length*(i-1-fft_resolution(c_X))
-          end do
-       end if
-    end if
-
-  end subroutine computeKxC
-
-  !> Computation of frequencies coeff, over non-distributed direction(s)
-  !> @param lengths size of the domain
-  subroutine computeKy(length)
-    real(wp), intent(in) :: length
-
-    !! Local loops indices
-    integer(C_INTPTR_T) :: i
-    allocate(ky(fft_resolution(c_Y)))
-
-    do i = 1, fft_resolution(c_Y)/2+1
-       ky(i) = 2.*pi/length*(i-1)
-    end do
-    do i = fft_resolution(c_Y)/2+2,fft_resolution(c_Y)
-       ky(i) = 2.*pi/length*(i-fft_resolution(c_Y)-1)
-    end do
-
-  end subroutine computeKy
-
-  subroutine filter_poisson_2d()
-
-    integer(C_INTPTR_T) :: i, j
-    complex(C_DOUBLE_COMPLEX) :: coeff
-    if(local_offset(c_X)==0) then
-       if(local_offset(c_Y) == 0) then
-          dataout1(1,1) = 0.0
-          dataout2(1,1) = 0.0
-       else
-          coeff = Icmplx/(kx(1)**2+ky(1)**2)
-          dataout2(1,1) = -coeff*kx(1)*dataout1(1,1)
-          dataout1(1,1) = coeff*ky(1)*dataout1(1,1)
-       endif
-
-       do j = 2, fft_resolution(c_Y)
-          coeff = Icmplx/(kx(1)**2+ky(j)**2)
-          dataout2(j,1) = -coeff*kx(1)*dataout1(j,1)
-          dataout1(j,1) = coeff*ky(j)*dataout1(j,1)
-       end do
-       do i = 2,local_resolution(c_X)
-          do j = 1, fft_resolution(c_Y)
-             coeff = Icmplx/(kx(i)**2+ky(j)**2)
-             dataout2(j,i) = -coeff*kx(i)*dataout1(j,i)
-             dataout1(j,i) = coeff*ky(j)*dataout1(j,i)
-          end do
-       end do
-    else
-       do i = 1,local_resolution(c_X)
-          do j = 1, fft_resolution(c_Y)
-             coeff = Icmplx/(kx(i)**2+ky(j)**2)
-             dataout2(j,i) = -coeff*kx(i)*dataout1(j,i)
-             dataout1(j,i) = coeff*ky(j)*dataout1(j,i)
-          end do
-       end do
-    end if
-
-!!$    do i = 1,local_resolution(c_X)
-!!$       write(*,'(a,i5,a,16f10.4)') 'xx[',rank,'] ', dataout1(1:fft_resolution(c_Y),i)
-!!$    end do
-!!$
-!!$    write(*,'(a,i5,a)') '[',rank,'] ==============================='
-!!$    do i = 1,local_resolution(c_X)
-!!$       write(*,'(a,i5,a,16f10.4)') 'yy[',rank,'] ', dataout2(1:fft_resolution(c_Y),i)
-!!$    end do
-!!$    write(*,'(a,i5,a)') '[',rank,'] ==============================='
-
-  end subroutine filter_poisson_2d
-
-  subroutine filter_diffusion_2d(nudt)
-
-    real(C_DOUBLE), intent(in) :: nudt
-    integer(C_INTPTR_T) :: i, j
-    complex(C_DOUBLE_COMPLEX) :: coeff
-
-    do i = 1,local_resolution(c_X)
-       do j = 1, fft_resolution(c_Y)
-          coeff = ONE/(ONE + nudt * (kx(i)**2+ky(j)**2))
-          dataout1(j,i) = coeff*dataout1(j,i)
-       end do
-    end do
-
-  end subroutine filter_diffusion_2d
-
-  !> Clean fftw context (free memory, plans ...)
-  subroutine cleanFFTW_2d()
-    call fftw_destroy_plan(plan_forward1)
-    call fftw_destroy_plan(plan_backward1)
-    !call fftw_destroy_plan(plan_forward2)
-    !call fftw_destroy_plan(plan_backward2)
-    call fftw_free(cbuffer1)
-    call fftw_free(cbuffer2)
-    call fftw_mpi_cleanup()
-    deallocate(fft_resolution)
-    if(rank==0) close(21)
-  end subroutine cleanFFTW_2d
-
-  !> Solve curl problem in the Fourier space :
-  !! \f{eqnarray*} \omega &=& \nabla \times v
-  subroutine filter_curl_2d()
-
-    integer(C_INTPTR_T) :: i,j,k
-    complex(C_DOUBLE_COMPLEX) :: coeff
-
-    !! mind the transpose -> index inversion between y and z
-    do j = 1,local_resolution(c_Y)
-       do k = 1, fft_resolution(c_Z)
-          do i = 1, local_resolution(c_X)
-             coeff = Icmplx
-             dataout1(j,i) = coeff*(kx(i)*dataout2(j,i) - ky(j)*dataout1(j,i))
-          end do
-       end do
-    end do
-
-  end subroutine filter_curl_2d
-
-  subroutine fft2d_diagnostics(nbelem)
-    integer(C_INTPTR_T), intent(in) :: nbelem
-    complex(C_DOUBLE_COMPLEX) :: memoryAllocated
-    memoryAllocated = real(nbelem*sizeof(memoryAllocated),wp)*1e-6
-    write(*,'(a,i5,a,i12,f10.2)') '[',rank,'] size of each buffer (elements / memory in MB):', &
-         nbelem, memoryAllocated
-    write(*,'(a,i5,a,2i12)') '[',rank,'] size of kx,y,z vectors (number of elements):', &
-         size(kx), size(ky)
-    write(*,'(a,i5,a,4i5)') '[',rank,'] local resolution and offset :', local_resolution, local_offset
-    memoryAllocated = 2*memoryAllocated + real(sizeof(kx) + sizeof(ky), wp)*1e-6
-    write(*,'(a,i5,a,f10.2)') '[',rank,'] Total memory used for fftw buffers (MB):', memoryAllocated
-
-  end subroutine fft2d_diagnostics
-
-  !> Get local size of input and output field on fftw topology
-  !! @param datashape local shape of the input field for the fftw process
-  !! @param offset index of the first component of the local field (in each dir) in the global set of indices
-  subroutine getParamatersTopologyFFTW2d(datashape,offset)
-    integer(kind=ip), intent(out),dimension(2) :: datashape
-    integer(kind=ip), intent(out),dimension(2) :: offset
-    integer(C_INTPTR_T) :: offsetx = 0
-    datashape = (/fft_resolution(c_X), local_resolution(c_Y)/)
-    offset = (/ offsetx, local_offset(c_Y)/)
-
-  end subroutine getParamatersTopologyFFTW2d
-  !> Initialisation of the fftw context for real to complex transforms (forward and backward)
-  !! @param[in] resolution global domain resolution
-  subroutine init_r2c_2dBIS(resolution,lengths)
-
-    integer(kind=ip), dimension(2), intent(in) :: resolution
-    real(wp),dimension(2), intent(in) :: lengths
-    !! Size of the local memory required for fftw (cbuffer)
-    integer(C_INTPTR_T) :: alloc_local,halfLength,howmany
-    integer(C_INTPTR_T), dimension(2) :: n
-
-    !> Field (real values) for fftw input
-    real(C_DOUBLE), pointer :: rdatain1Many(:,:,:)
-
-    ! init fftw mpi context
-    call fftw_mpi_init()
-    howmany = 1
-    if(rank==0) open(unit=21,file=filename,form="formatted")
-
-    allocate(fft_resolution(2))
-    fft_resolution(:) = resolution(:)-1
-    halfLength = fft_resolution(c_X)/2+1
-    n(1) = fft_resolution(2)
-    n(2) = halfLength
-    ! allocate local buffer (used to save datain/dataout1 ==> in-place transform!!)
-    alloc_local = fftw_mpi_local_size_many_transposed(2,n,howmany,FFTW_MPI_DEFAULT_BLOCK,&
-         FFTW_MPI_DEFAULT_BLOCK,main_comm,local_resolution(c_Y),&
-         local_offset(c_Y),local_resolution(c_X),local_offset(c_X));
-
-    ! allocate local buffer (used to save datain/dataout1 ==> in-place transform!!)
-    cbuffer1 = fftw_alloc_complex(alloc_local)
-
-    ! link rdatain1 and dataout1 to cbuffer, setting the right dimensions for each
-    call c_f_pointer(cbuffer1, rdatain1Many, [howmany,2*halfLength,local_resolution(c_Y)])
-    call c_f_pointer(cbuffer1, dataout1, [fft_resolution(c_Y),local_resolution(c_X)])
-
-    ! second buffer used for backward transform. Used to copy dataout1 into dataout2 (input for backward transform and filter)
-    ! and to save (in-place) the transform of the second component of the velocity
-    cbuffer2 = fftw_alloc_complex(alloc_local)
-
-    call c_f_pointer(cbuffer2, rdatain1Many, [howmany,2*halfLength,local_resolution(c_Y)])
-    call c_f_pointer(cbuffer2, dataout2, [fft_resolution(c_Y),local_resolution(c_X)])
-
-    !   create MPI plans for in-place forward/backward DFT (note dimension reversal)
-    plan_forward1 = fftw_mpi_plan_dft_r2c_2d(fft_resolution(c_Y), fft_resolution(c_X), rdatain1Many, dataout1, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward1 = fftw_mpi_plan_dft_c2r_2d(fft_resolution(c_Y), fft_resolution(c_X), dataout1, rdatain1, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-    plan_backward2 = fftw_mpi_plan_dft_c2r_2d(fft_resolution(c_Y), fft_resolution(c_X), dataout2, rdatain2, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-
-    call computeKx(lengths(c_X))
-    call computeKy(lengths(c_Y))
-    normFFT = ONE/(fft_resolution(c_X)*fft_resolution(c_Y))
-    !! call fft2d_diagnostics(alloc_local)
-
-  end subroutine init_r2c_2dBIS
-
-end module fft2d
diff --git a/hysop/old/numerics.old/fftw_f/fft3d.f90 b/hysop/old/numerics.old/fftw_f/fft3d.f90
deleted file mode 100755
index 23580aca7229ed2f6a37fec5f35624d46e81a0d3..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/fftw_f/fft3d.f90
+++ /dev/null
@@ -1,1106 +0,0 @@
-!> Fast Fourier Transform routines (Fortran, based on fftw) to solve 3d Poisson and diffusion problems.
-!!
-!! This module provides :
-!! \li 1 - fftw routines for the "complex to complex" case : solves the problem for
-!! complex input/output. The names of these routines contain "c2c".
-!! \li 2 - fftw routines for the "real to complex" case : solves the problem for
-!!  input fields which are real. The names of these routines contain "r2c".
-!! \li 3 - fftw routines for the "real to complex" case : solves the problem for
-!! input fields which are real and using the "many" interface of the fftw.
-!! It means that transforms are applied to the 3 input fields at the same time.
-!! Names of these routines contain "many".
-!!
-!! Obviously, all the above cases should lead to the same results. By default
-!! case 2 must be chosen (if input is real). Case 1 and 3 are more or less
-!! dedicated to tests and validation.
-module fft3d
-
-  use, intrinsic :: iso_c_binding
-  use mpi
-  use precision
-  use parameters
-
-  implicit none
-  include 'fftw3-mpi.f03'
-
-  private
-
-  public :: init_r2c_3d,init_c2c_3d,init_r2c_scalar_3d, r2c_3d,r2c_scalar_3d,c2c_3d,c2r_3d,c2r_scalar_3d,cleanFFTW_3d,&
-       getParamatersTopologyFFTW3d,filter_poisson_3d,filter_curl_diffusion_3d, &
-       init_r2c_3d_many, r2c_3d_many, c2r_3d_many, filter_diffusion_3d_many,&
-       filter_poisson_3d_many, filter_diffusion_3d, filter_curl_3d, filter_projection_om_3d,&
-       filter_multires_om_3d, filter_pressure_3d, r2c_3d_scal, filter_spectrum_3d
-
-  !> plan for fftw "c2c" forward or r2c transform
-  type(C_PTR) :: plan_forward1, plan_forward2, plan_forward3
-  !> plan for fftw "c2c" backward or c2r transform
-  type(C_PTR) :: plan_backward1, plan_backward2, plan_backward3
-  !> memory buffer for fftw (input and output buffer will point to this location)
-  type(C_PTR) :: cbuffer1
-  !> second memory buffer for fftw
-  type(C_PTR) :: cbuffer2
-  !> third memory buffer for fftw
-  type(C_PTR) :: cbuffer3
-  !! Note Franck : check if local declarations of datain/out works and improve perfs.
-  !> Field (complex values) for fftw input
-  complex(C_DOUBLE_COMPLEX), pointer :: datain1(:,:,:)=>NULL(), datain2(:,:,:)=>NULL(), datain3(:,:,:)=>NULL()
-  !> Field (real values) for fftw input (these are only pointers to the cbuffers)
-  real(C_DOUBLE), pointer :: rdatain1(:,:,:)=>NULL() ,rdatain2(:,:,:)=>NULL() ,rdatain3(:,:,:)=>NULL()
-  !> Field (real values) for fftw input in the fftw-many case
-  real(C_DOUBLE), pointer :: rdatain_many(:,:,:,:)=>NULL()
-  !> Field (complex values) for fftw (forward) output
-  complex(C_DOUBLE_COMPLEX), pointer :: dataout1(:,:,:)=>NULL() ,dataout2(:,:,:)=>NULL() ,dataout3(:,:,:)=>NULL()
-  !> Field (complex values) for fftw (forward) output in the fftw-many case
-  complex(C_DOUBLE_COMPLEX), pointer :: dataout_many(:,:,:,:)=>NULL()
-  !> GLOBAL number of points in each direction on which fft is applied (--> corresponds to "real" resolution - 1)
-  integer(C_INTPTR_T),pointer :: fft_resolution(:)=>NULL()
-  !> LOCAL number of points for fft
-  integer(c_INTPTR_T),dimension(3) :: local_resolution
-  !> Offset in the direction of distribution
-  integer(c_INTPTR_T),dimension(3) :: local_offset
-  !> wave numbers for fft in x direction
-  real(C_DOUBLE), pointer :: kx(:)
-  !> wave numbers for fft in y direction
-  real(C_DOUBLE), pointer :: ky(:)
-  !> wave numbers for fft in z direction
-  real(C_DOUBLE), pointer :: kz(:)
-  !> log file for fftw
-  character(len=20),parameter :: filename ="hysopfftw.log"
-  !> normalization factor
-  real(C_DOUBLE) :: normFFT
-  !> true if we use fftw-many routines
-  logical :: manycase
-  !> true if all the allocation stuff for global variables has been done.
-  logical :: is3DUpToDate = .false.
-
-contains
-  !========================================================================
-  !   Complex to complex transforms
-  !========================================================================
-
-  !> Initialisation of the fftw context for complex to complex transforms (forward and backward)
-  !! @param[in] resolution global domain resolution
-  !! @param[in] lengths width of each side of the domain
-  subroutine init_c2c_3d(resolution,lengths)
-
-    integer(kind=ip), dimension(3), intent(in) :: resolution
-    real(wp),dimension(3), intent(in) :: lengths
-
-    !! Size of the local memory required for fftw (cbuffer)
-    integer(C_INTPTR_T) :: alloc_local
-
-    if(is3DUpToDate) return
-
-    ! init fftw mpi context
-    call fftw_mpi_init()
-
-    if(rank==0) open(unit=21,file=filename,form="formatted")
-
-    ! set fft resolution
-    allocate(fft_resolution(3))
-    fft_resolution(:) = resolution(:)-1
-
-    ! compute "optimal" size (according to fftw) for local data (warning : dimension reversal)
-    alloc_local = fftw_mpi_local_size_3d_transposed(fft_resolution(c_Z),fft_resolution(c_Y),fft_resolution(c_X),main_comm,&
-         local_resolution(c_Z),local_offset(c_Z),local_resolution(c_Y),local_offset(c_Y));
-
-    ! Set a default value for c_X components.
-    local_offset(c_X) = 0
-    local_resolution(c_X) = fft_resolution(c_X)
-
-    ! allocate local buffer (used to save datain/dataout ==> in-place transform!!)
-    cbuffer1 = fftw_alloc_complex(alloc_local)
-    ! link datain and dataout to cbuffer, setting the right dimensions for each
-    call c_f_pointer(cbuffer1, datain1, [fft_resolution(c_X),fft_resolution(c_Y),local_resolution(c_Z)])
-    call c_f_pointer(cbuffer1, dataout1, [fft_resolution(c_X),fft_resolution(c_Z),local_resolution(c_Y)])
-
-    ! second buffer used for backward transform. Used to copy dataout into dataout2 (input for backward transform and filter)
-    ! and to save (in-place) the transform of the second component of the velocity
-    cbuffer2 = fftw_alloc_complex(alloc_local)
-    call c_f_pointer(cbuffer2, datain2, [fft_resolution(c_X),fft_resolution(c_Y),local_resolution(c_Z)])
-    call c_f_pointer(cbuffer2, dataout2, [fft_resolution(c_X),fft_resolution(c_Z),local_resolution(c_Y)])
-
-    ! second buffer used for backward transform. Used to copy dataout into dataout2 (input for backward transform and filter)
-    ! and to save (in-place) the transform of the second component of the velocity
-    cbuffer3 = fftw_alloc_complex(alloc_local)
-    call c_f_pointer(cbuffer3, datain3, [fft_resolution(c_X),fft_resolution(c_Y),local_resolution(c_Z)])
-    call c_f_pointer(cbuffer3, dataout3, [fft_resolution(c_X),fft_resolution(c_Z),local_resolution(c_Y)])
-
-    !   create MPI plan for in-place forward/backward DFT (note dimension reversal)
-    plan_forward1 = fftw_mpi_plan_dft_3d(fft_resolution(c_Z), fft_resolution(c_Y), fft_resolution(c_X),datain1,dataout1,&
-         main_comm,FFTW_FORWARD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward1 = fftw_mpi_plan_dft_3d(fft_resolution(c_Z),fft_resolution(c_Y),fft_resolution(c_X),dataout1,datain1,&
-         main_comm,FFTW_BACKWARD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-    plan_forward2 = fftw_mpi_plan_dft_3d(fft_resolution(c_Z), fft_resolution(c_Y), fft_resolution(c_X),datain2,dataout2,&
-         main_comm,FFTW_FORWARD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward2 = fftw_mpi_plan_dft_3d(fft_resolution(c_Z),fft_resolution(c_Y),fft_resolution(c_X),dataout2,datain2,&
-         main_comm,FFTW_BACKWARD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-    plan_forward3 = fftw_mpi_plan_dft_3d(fft_resolution(c_Z), fft_resolution(c_Y), fft_resolution(c_X),datain3,dataout3,&
-         main_comm,FFTW_FORWARD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward3 = fftw_mpi_plan_dft_3d(fft_resolution(c_Z),fft_resolution(c_Y),fft_resolution(c_X),dataout3,datain3,&
-         main_comm,FFTW_BACKWARD,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-
-    call computeKx(lengths(c_X))
-    call computeKy(lengths(c_Y))
-    call computeKz(lengths(c_Z))
-
-    !! call fft3d_diagnostics(alloc_local)
-
-    normFFT = one/(fft_resolution(c_X)*fft_resolution(c_Y)*fft_resolution(c_Z))
-    manycase = .false.
-
-    is3DUpToDate = .true.
-
-  end subroutine init_c2c_3d
-
-  !> Solve poisson problem for complex input and output vector fields
-  !!  @param[in] omega_x 3d scalar field, x-component of the input vector field
-  !!  @param[in] omega_y 3d scalar field, y-component of the input vector field
-  !!  @param[in] omega_z 3d scalar field, z-component of the input vector field
-  !!  @param[in] velocity_x 3d scalar field, x-component of the output vector field
-  !!  @param[in] velocity_y 3d scalar field, y-component of the output vector field
-  !!  @param[in] velocity_z 3d scalar field, z-component of the output vector field
-  subroutine c2c_3d(omega_x,omega_y,omega_z,velocity_x,velocity_y,velocity_z)
-    complex(wp),dimension(:,:,:) :: velocity_x,velocity_y,velocity_z
-    complex(wp),dimension(:,:,:),intent(in) :: omega_x,omega_y,omega_z
-
-    integer(C_INTPTR_T) :: i,j,k
-    ! Copy input data into the fftw buffer
-    do k = 1, local_resolution(c_Z)
-       do j = 1, fft_resolution(c_Y)
-          do i = 1, fft_resolution(c_X)
-             datain1(i,j,k) = omega_x(i,j,k)
-             datain2(i,j,k) = omega_y(i,j,k)
-             datain3(i,j,k) = omega_z(i,j,k)
-          end do
-       end do
-    end do
-    ! compute transform (as many times as desired)
-    call fftw_mpi_execute_dft(plan_forward1, datain1, dataout1)
-    call fftw_mpi_execute_dft(plan_forward2, datain2, dataout2)
-    call fftw_mpi_execute_dft(plan_forward3, datain3, dataout3)
-
-    ! apply poisson filter
-    call filter_poisson_3d()
-
-    ! inverse transform to retrieve velocity
-    call fftw_mpi_execute_dft(plan_backward1, dataout1,datain1)
-    call fftw_mpi_execute_dft(plan_backward2,dataout2,datain2)
-    call fftw_mpi_execute_dft(plan_backward3,dataout3,datain3)
-    do k =1, local_resolution(c_Z)
-       do j = 1, fft_resolution(c_Y)
-          do i = 1, fft_resolution(c_X)
-             velocity_x(i,j,k) = datain1(i,j,k)*normFFT
-             velocity_y(i,j,k) = datain2(i,j,k)*normFFT
-             velocity_z(i,j,k) = datain3(i,j,k)*normFFT
-          end do
-       end do
-    end do
-
-  end subroutine c2c_3d
-
-  !========================================================================
-  !  Real to complex transforms
-  !========================================================================
-
-  !> Initialisation of the fftw context for real to complex transforms (forward and backward)
-  !! @param[in] resolution global domain resolution
-  !! @param[in] lengths width of each side of the domain
-  subroutine init_r2c_3d(resolution,lengths)
-
-    integer(kind=ip), dimension(3), intent(in) :: resolution
-    real(wp),dimension(3), intent(in) :: lengths
-    !! Size of the local memory required for fftw (cbuffer)
-    integer(C_INTPTR_T) :: alloc_local,halfLength
-
-    if(is3DUpToDate) return
-
-    ! init fftw mpi context
-    call fftw_mpi_init()
-
-    if(rank==0) open(unit=21,file=filename,form="formatted")
-    allocate(fft_resolution(3))
-    fft_resolution(:) = resolution(:)-1
-    halfLength = fft_resolution(c_X)/2+1
-
-    ! compute "optimal" size (according to fftw) for local data (warning : dimension reversal)
-    alloc_local = fftw_mpi_local_size_3d_transposed(fft_resolution(c_Z),fft_resolution(c_Y),halfLength,&
-         main_comm,local_resolution(c_Z),local_offset(c_Z),local_resolution(c_Y),local_offset(c_Y));
-
-    ! init c_X part. This is required to compute kx with the same function in 2d and 3d cases.
-    local_offset(c_X) = 0
-    local_resolution(c_X) = halfLength
-
-    ! allocate local buffer (used to save datain/dataout ==> in-place transform!!)
-    cbuffer1 = fftw_alloc_complex(alloc_local)
-    cbuffer2 = fftw_alloc_complex(alloc_local)
-    cbuffer3 = fftw_alloc_complex(alloc_local)
-
-    ! link rdatain and dataout to cbuffer, setting the right dimensions for each
-    call c_f_pointer(cbuffer1, rdatain1, [2*halfLength,fft_resolution(c_Y),local_resolution(c_Z)])
-    call c_f_pointer(cbuffer1, dataout1, [halfLength, fft_resolution(c_Z), local_resolution(c_Y)])
-    call c_f_pointer(cbuffer2, rdatain2, [2*halfLength,fft_resolution(c_Y),local_resolution(c_Z)])
-    call c_f_pointer(cbuffer2, dataout2, [halfLength, fft_resolution(c_Z), local_resolution(c_Y)])
-    call c_f_pointer(cbuffer3, rdatain3, [2*halfLength,fft_resolution(c_Y),local_resolution(c_Z)])
-    call c_f_pointer(cbuffer3, dataout3, [halfLength, fft_resolution(c_Z), local_resolution(c_Y)])
-
-    rdatain1 = 0.0
-    rdatain2 = 0.0
-    rdatain3 = 0.0
-
-    !   create MPI plans for in-place forward/backward DFT (note dimension reversal)
-   plan_forward1 = fftw_mpi_plan_dft_r2c_3d(fft_resolution(c_Z),fft_resolution(c_Y), fft_resolution(c_X), rdatain1, dataout1, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward1 = fftw_mpi_plan_dft_c2r_3d(fft_resolution(c_Z),fft_resolution(c_Y), fft_resolution(c_X), dataout1, rdatain1, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-    plan_forward2 = fftw_mpi_plan_dft_r2c_3d(fft_resolution(c_Z),fft_resolution(c_Y), fft_resolution(c_X), rdatain2, dataout2, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward2 = fftw_mpi_plan_dft_c2r_3d(fft_resolution(c_Z),fft_resolution(c_Y), fft_resolution(c_X), dataout2, rdatain2, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-    plan_forward3 = fftw_mpi_plan_dft_r2c_3d(fft_resolution(c_Z),fft_resolution(c_Y), fft_resolution(c_X), rdatain3, dataout3, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward3 = fftw_mpi_plan_dft_c2r_3d(fft_resolution(c_Z),fft_resolution(c_Y), fft_resolution(c_X), dataout3, rdatain3, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-
-    call computeKx(lengths(c_X))
-    call computeKy(lengths(c_Y))
-    call computeKz(lengths(c_Z))
-
-    normFFT = one/(fft_resolution(c_X)*fft_resolution(c_Y)*fft_resolution(c_Z))
-    !! call fft3d_diagnostics(alloc_local)
-    manycase = .false.
-    is3DUpToDate = .true.
-
-  end subroutine init_r2c_3d
-
-  !> Initialisation of the fftw context for real to complex transforms (forward and backward)
-  !! @param[in] resolution global domain resolution
-  !! @param[in] lengths width of each side of the domain
-  subroutine init_r2c_scalar_3d(resolution,lengths)
-
-    integer(kind=ip), dimension(3), intent(in) :: resolution
-    real(wp),dimension(3), intent(in) :: lengths
-    !! Size of the local memory required for fftw (cbuffer)
-    integer(C_INTPTR_T) :: alloc_local,halfLength
-
-    if(is3DUpToDate) return
-
-    ! init fftw mpi context
-    call fftw_mpi_init()
-
-    if(rank==0) open(unit=21,file=filename,form="formatted")
-    allocate(fft_resolution(3))
-    fft_resolution(:) = resolution(:)-1
-    halfLength = fft_resolution(c_X)/2+1
-
-    ! compute "optimal" size (according to fftw) for local data (warning : dimension reversal)
-    alloc_local = fftw_mpi_local_size_3d_transposed(fft_resolution(c_Z),fft_resolution(c_Y),halfLength,&
-         main_comm,local_resolution(c_Z),local_offset(c_Z),local_resolution(c_Y),local_offset(c_Y));
-
-    ! init c_X part. This is required to compute kx with the same function in 2d and 3d cases.
-    local_offset(c_X) = 0
-    local_resolution(c_X) = halfLength
-
-    ! allocate local buffer (used to save datain/dataout ==> in-place transform!!)
-    cbuffer1 = fftw_alloc_complex(alloc_local)
-
-    ! link rdatain and dataout to cbuffer, setting the right dimensions for each
-    call c_f_pointer(cbuffer1, rdatain1, [2*halfLength,fft_resolution(c_Y),local_resolution(c_Z)])
-    call c_f_pointer(cbuffer1, dataout1, [halfLength, fft_resolution(c_Z), local_resolution(c_Y)])
-
-    rdatain1 = 0.0
-
-    !   create MPI plans for in-place forward/backward DFT (note dimension reversal)
-    plan_forward1 = fftw_mpi_plan_dft_r2c_3d(fft_resolution(c_Z),fft_resolution(c_Y), fft_resolution(c_X), rdatain1, dataout1, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward1 = fftw_mpi_plan_dft_c2r_3d(fft_resolution(c_Z),fft_resolution(c_Y), fft_resolution(c_X), dataout1, rdatain1, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-    
-    call computeKx(lengths(c_X))
-    call computeKy(lengths(c_Y))
-    call computeKz(lengths(c_Z))
-
-    normFFT = one/(fft_resolution(c_X)*fft_resolution(c_Y)*fft_resolution(c_Z))
-    !! call fft3d_diagnostics(alloc_local)
-    manycase = .false.
-    is3DUpToDate = .true.
-
-  end subroutine init_r2c_scalar_3d
-  
-  !> forward transform - The result is saved in local buffers
-  !!  @param[in] omega_x 3d scalar field, x-component of the input vector field
-  !!  @param[in] omega_y 3d scalar field, y-component of the input vector field
-  !!  @param[in] omega_z 3d scalar field, z-component of the input vector field
-  !!  @param[in] ghosts, number of points in the ghost layer of input fields.
-  subroutine r2c_3d(omega_x,omega_y,omega_z, ghosts)
-
-    real(wp),dimension(:,:,:),intent(in) :: omega_x,omega_y,omega_z
-    integer(kind=ip), dimension(3), intent(in) :: ghosts
-    !real(wp) :: start
-    integer(C_INTPTR_T) :: i,j,k, ig, jg, kg
-
-    ! ig, jg, kg are used to take into account
-    ! ghost points in input fields
-
-    ! init
-    do k =1, local_resolution(c_Z)
-       kg = k + ghosts(c_Z)
-       do j = 1, fft_resolution(c_Y)
-          jg = j + ghosts(c_Y)
-          do i = 1, fft_resolution(c_X)
-             ig = i + ghosts(c_X)
-             rdatain1(i,j,k) = omega_x(ig,jg,kg)
-             rdatain2(i,j,k) = omega_y(ig,jg,kg)
-             rdatain3(i,j,k) = omega_z(ig,jg,kg)
-          end do
-       end do
-    end do
-
-    ! compute transforms for each component
-    !start = MPI_WTIME()
-    call fftw_mpi_execute_dft_r2c(plan_forward1, rdatain1, dataout1)
-    call fftw_mpi_execute_dft_r2c(plan_forward2, rdatain2, dataout2)
-    call fftw_mpi_execute_dft_r2c(plan_forward3, rdatain3, dataout3)
-    !!print *, "r2c time = ", MPI_WTIME() - start
-
-  end subroutine r2c_3d
-
-  !> Backward fftw transform
-  !!  @param[in,out] velocity_x 3d scalar field, x-component of the output vector field
-  !!  @param[in,out] velocity_y 3d scalar field, y-component of the output vector field
-  !!  @param[in,out] velocity_z 3d scalar field, z-component of the output vector field
-  !!  @param[in] ghosts, number of points in the ghost layer of in/out velocity field.
-  subroutine c2r_3d(velocity_x,velocity_y,velocity_z, ghosts)
-    real(wp),dimension(:,:,:),intent(inout) :: velocity_x,velocity_y,velocity_z
-    integer(kind=ip), dimension(3), intent(in) :: ghosts
-    real(wp) :: start
-    integer(C_INTPTR_T) :: i,j,k, ig, jg, kg
-    start = MPI_WTIME()
-    call fftw_mpi_execute_dft_c2r(plan_backward1,dataout1,rdatain1)
-    call fftw_mpi_execute_dft_c2r(plan_backward2,dataout2,rdatain2)
-    call fftw_mpi_execute_dft_c2r(plan_backward3,dataout3,rdatain3)
-!!    print *, "c2r time : ", MPI_WTIME() -start
-    ! copy back to velocity and normalisation
-    do k =1, local_resolution(c_Z)
-       kg = k + ghosts(c_Z)
-       do j = 1, fft_resolution(c_Y)
-          jg = j + ghosts(c_Y)
-          do i = 1, fft_resolution(c_X)
-             ig = i + ghosts(c_X)
-             velocity_x(ig,jg,kg) = rdatain1(i,j,k)*normFFT
-             velocity_y(ig,jg,kg) = rdatain2(i,j,k)*normFFT
-             velocity_z(ig,jg,kg) = rdatain3(i,j,k)*normFFT
-          end do
-       end do
-    end do
-
-  end subroutine c2r_3d
-
-  !> forward transform - The result is saved in a local buffer
-  !!  @param[in] omega 3d scalar field, x-component of the input vector field
-  !!  @param[in] ghosts, number of points in the ghost layer of input field.
-  subroutine r2c_scalar_3d(scalar, ghosts)
-
-    real(wp),dimension(:,:,:),intent(in) :: scalar
-    integer(kind=ip), dimension(3), intent(in) :: ghosts
-    real(wp) :: start
-    integer(C_INTPTR_T) :: i,j,k, ig, jg, kg
-
-    ! ig, jg, kg are used to take into account
-    ! ghost points in input fields
-
-    ! init
-    do k =1, local_resolution(c_Z)
-       kg = k + ghosts(c_Z)
-       do j = 1, fft_resolution(c_Y)
-          jg = j + ghosts(c_Y)
-          do i = 1, fft_resolution(c_X)
-             ig = i + ghosts(c_X)
-             rdatain1(i,j,k) = scalar(ig,jg,kg)
-          end do
-       end do
-    end do
-
-    ! compute transforms for each component
-    start = MPI_WTIME()
-    call fftw_mpi_execute_dft_r2c(plan_forward1, rdatain1, dataout1)
-    !!print *, "r2c time = ", MPI_WTIME() - start
-
-  end subroutine r2c_scalar_3d
-
-  !> Backward fftw transform
-  !!  @param[in,out] scalar 3d scalar field
-  !!  @param[in] ghosts, number of points in the ghost layer of in/out scalar field.
-  subroutine c2r_scalar_3d(scalar, ghosts)
-    real(wp),dimension(:,:,:),intent(inout) :: scalar
-    integer(kind=ip), dimension(3), intent(in) :: ghosts
-    real(wp) :: start
-    integer(C_INTPTR_T) :: i,j,k, ig, jg, kg
-    start = MPI_WTIME()
-    call fftw_mpi_execute_dft_c2r(plan_backward1,dataout1,rdatain1)
-!!    print *, "c2r time : ", MPI_WTIME() -start
-    ! copy back to velocity and normalisation
-    do k =1, local_resolution(c_Z)
-       kg = k + ghosts(c_Z)
-       do j = 1, fft_resolution(c_Y)
-          jg = j + ghosts(c_Y)
-          do i = 1, fft_resolution(c_X)
-             ig = i + ghosts(c_X)
-             scalar(ig,jg,kg) = rdatain1(i,j,k)*normFFT
-          end do
-       end do
-    end do
-
-  end subroutine c2r_scalar_3d
-
-  !========================================================================
-  !  Real to complex transforms based on "many" fftw routines
-  !========================================================================
-
-  !> Initialisation of the fftw context for real to complex transforms (forward and backward)
-  !! @param[in] resolution global domain resolution
-  !! @param[in] lengths width of each side of the domain
-  subroutine init_r2c_3d_many(resolution,lengths)
-
-    integer(kind=ip), dimension(3), intent(in) :: resolution
-    real(wp),dimension(3), intent(in) :: lengths
-    !! Size of the local memory required for fftw (cbuffer)
-    integer(C_INTPTR_T) :: alloc_local,halfLength,howmany, blocksize
-    integer(C_INTPTR_T),dimension(3) :: n
-
-    ! init fftw mpi context
-    call fftw_mpi_init()
-    blocksize = FFTW_MPI_DEFAULT_BLOCK
-    if(rank==0) open(unit=21,file=filename,form="formatted")
-    allocate(fft_resolution(3))
-    fft_resolution(:) = resolution(:)-1
-    halfLength = fft_resolution(c_X)/2+1
-    n(1) = fft_resolution(c_Z)
-    n(2) = fft_resolution(c_Y)
-    n(3) = halfLength
-    howmany = 3
-    ! compute "optimal" size (according to fftw) for local data (warning : dimension reversal)
-    alloc_local = fftw_mpi_local_size_many_transposed(3,n,howmany,blocksize,blocksize,&
-         main_comm,local_resolution(c_Z),local_offset(c_Z),local_resolution(c_Y),local_offset(c_Y));
-
-    ! init c_X part. This is required to compute kx with the same function in 2d and 3d cases.
-    local_offset(c_X) = 0
-    local_resolution(c_X) = halfLength
-
-    ! allocate local buffer (used to save datain/dataout ==> in-place transform!!)
-    cbuffer1 = fftw_alloc_complex(alloc_local)
-
-    ! link rdatain and dataout to cbuffer, setting the right dimensions for each
-    call c_f_pointer(cbuffer1, rdatain_many, [howmany,2*halfLength,fft_resolution(c_Y),local_resolution(c_Z)])
-    call c_f_pointer(cbuffer1, dataout_many, [howmany,halfLength, fft_resolution(c_Z), local_resolution(c_Y)])
-
-    !   create MPI plans for in-place forward/backward DFT (note dimension reversal)
-    n(3) = fft_resolution(c_X)
-
-    plan_forward1 = fftw_mpi_plan_many_dft_r2c(3,n,howmany,blocksize,blocksize, rdatain_many, dataout_many, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_OUT))
-    plan_backward1 = fftw_mpi_plan_many_dft_c2r(3,n,howmany,blocksize,blocksize, dataout_many, rdatain_many, &
-         main_comm,ior(FFTW_MEASURE,FFTW_MPI_TRANSPOSED_IN))
-    call computeKx(lengths(c_X))
-    call computeKy(lengths(c_Y))
-    call computeKz(lengths(c_Z))
-
-    normFFT = one/(fft_resolution(c_X)*fft_resolution(c_Y)*fft_resolution(c_Z))
-    !! call fft3d_diagnostics(alloc_local,1)
-    manycase = .true.
-
-    is3DUpToDate = .true.
-
-  end subroutine init_r2c_3d_many
-
-  !> forward transform - The result is saved in local buffers
-  !!  @param[in] omega_x 3d scalar field, x-component of the input vector field
-  !!  @param[in] omega_y 3d scalar field, y-component of the input vector field
-  !!  @param[in] omega_z 3d scalar field, z-component of the input vector field
-  !! @param input data
-  subroutine r2c_3d_many(omega_x,omega_y,omega_z)
-
-    real(wp),dimension(:,:,:),intent(in) :: omega_x,omega_y,omega_z
-    real(wp) :: start
-    integer(C_INTPTR_T) :: i,j,k
-
-    ! init
-    do k =1, local_resolution(c_Z)
-       do j = 1, fft_resolution(c_Y)
-          do i = 1, fft_resolution(c_X)
-             rdatain_many(1,i,j,k) = omega_x(i,j,k)
-             rdatain_many(2,i,j,k) = omega_y(i,j,k)
-             rdatain_many(3,i,j,k) = omega_z(i,j,k)
-          end do
-       end do
-    end do
-
-    ! compute transform (as many times as desired)
-    start = MPI_WTIME()
-    call fftw_mpi_execute_dft_r2c(plan_forward1, rdatain_many, dataout_many)
-!!    print *, "r2c time = ", MPI_WTIME() - start
-
-  end subroutine r2c_3d_many
-
-  !> Backward fftw transform
-  !!  @param[in,out] velocity_x 3d scalar field, x-component of the output vector field
-  !!  @param[in,out] velocity_y 3d scalar field, y-component of the output vector field
-  !!  @param[in,out] velocity_z 3d scalar field, z-component of the output vector field
-  subroutine c2r_3d_many(velocity_x,velocity_y,velocity_z)
-    real(wp),dimension(:,:,:),intent(inout) :: velocity_x,velocity_y,velocity_z
-    real(wp) :: start
-    integer(C_INTPTR_T) :: i,j,k
-
-    start = MPI_WTIME()
-    call fftw_mpi_execute_dft_c2r(plan_backward1,dataout_many,rdatain_many)
-!!    print *, "c2r time : ", MPI_WTIME() -start
-    do k =1, local_resolution(c_Z)
-       do j = 1, fft_resolution(c_Y)
-          do i = 1, fft_resolution(c_X)
-             velocity_x(i,j,k) = rdatain_many(1,i,j,k)*normFFT
-             velocity_y(i,j,k) = rdatain_many(2,i,j,k)*normFFT
-             velocity_z(i,j,k) = rdatain_many(2,i,j,k)*normFFT
-          end do
-       end do
-    end do
-
-  end subroutine c2r_3d_many
-
-  !========================================================================
-  ! Common (r2c, c2c) subroutines
-  !========================================================================
-
-  !> Computation of frequencies coeff, over the distributed direction in the real/complex case
-  !> @param lengths size of the domain
-  subroutine computeKx(length)
-
-    real(wp),intent(in) :: length
-
-    !! Local loops indices
-    integer(C_INTPTR_T) :: i
-
-    !! Compute filter coefficients
-    allocate(kx(local_resolution(c_X)))
-    do i = 1, fft_resolution(c_X)/2+1
-       kx(i) = 2.*pi/length*(i-1)
-    end do
-    !! write(*,'(a,i5,a,i5,i5)') '[',rank,'] kx size', size(kx),i
-    do i = fft_resolution(c_X)/2+2,local_resolution(c_X)
-       kx(i) = 2.*pi/length*(i-fft_resolution(c_X)-1)
-    end do
-  end subroutine computeKx
-
-  !> Computation of frequencies coeff, over distributed direction(s)
-  !> @param lengths size of the domain
-  subroutine computeKy(length)
-    real(C_DOUBLE), intent(in) :: length
-
-    !! Local loops indices
-    integer(C_INTPTR_T) :: i
-    allocate(ky(local_resolution(c_Y)))
-
-    !! y frequencies (distributed over proc)
-    !! If we deal with positive frequencies only
-    if(local_offset(c_Y)+local_resolution(c_Y) <= fft_resolution(c_Y)/2+1 ) then
-       do i = 1,local_resolution(c_Y)
-          ky(i) =  2.*pi/length*(local_offset(c_Y)+i-1)
-       end do
-    else
-       !! else if we deal with negative frequencies only
-       if(local_offset(c_Y)+1 > fft_resolution(c_Y)/2+1 ) then
-          do i = 1,local_resolution(c_Y)
-             ky(i) =  2.*pi/length*(local_offset(c_Y)+i-1-fft_resolution(c_Y))
-          end do
-          !! final case : start positive freq, end in negative ones
-       else
-          do i = local_offset(c_Y)+1, fft_resolution(c_Y)/2+1 !! global index
-             ky(i-local_offset(c_Y)) =  2.*pi/length*(i-1)
-          end do
-          do i = fft_resolution(c_Y)/2+2,local_resolution(c_Y)+local_offset(c_Y)
-             ky(i-local_offset(c_Y)) =  2.*pi/length*(i-1-fft_resolution(c_Y))
-          end do
-       end if
-    end if
-
-  end subroutine computeKy
-
-  !> Computation of frequencies coeff, over non-distributed direction(s)
-  !> @param length size of the domain
-  subroutine computeKz(length)
-    real(wp),intent(in) :: length
-
-    !! Local loops indices
-    integer(C_INTPTR_T) :: i
-    allocate(kz(fft_resolution(c_Z)))
-    do i = 1, fft_resolution(c_Z)/2+1
-       kz(i) = 2.*pi/length*(i-1)
-    end do
-    do i = fft_resolution(c_Z)/2+2,fft_resolution(c_Z)
-       kz(i) = 2.*pi/length*(i-fft_resolution(c_Z)-1)
-    end do
-
-  end subroutine computeKz
-
-  !> Solve Poisson problem in the Fourier space :
-  !! \f{eqnarray*} \Delta \psi &=& - \omega \\ v = \nabla\times\psi \f}
-  subroutine filter_poisson_3d()
-
-    integer(C_INTPTR_T) :: i,j,k
-    complex(C_DOUBLE_COMPLEX) :: coeff
-    complex(C_DOUBLE_COMPLEX) :: buffer1,buffer2
-
-    ! Set first coeff (check for "all freq = 0" case)
-    if(local_offset(c_Y) == 0) then
-       dataout1(1,1,1) = 0.0
-       dataout2(1,1,1) = 0.0
-       dataout3(1,1,1) = 0.0
-    else
-       coeff = Icmplx/(ky(1)**2)
-       buffer1 = dataout1(1,1,1)
-       dataout1(1,1,1) = coeff*ky(1)*dataout3(1,1,1)
-       dataout2(1,1,1) = 0.0
-       dataout3(1,1,1) = -coeff*ky(1)*buffer1
-    endif
-
-    ! !! mind the transpose -> index inversion between y and z
-    do i = 2, local_resolution(c_X)
-       coeff = Icmplx/(kx(i)**2+ky(1)**2)
-       buffer1 = dataout1(i,1,1)
-       buffer2 = dataout2(i,1,1)
-       dataout1(i,1,1) = coeff*ky(1)*dataout3(i,1,1)
-       dataout2(i,1,1) = -coeff*kx(i)*dataout3(i,1,1)
-       dataout3(i,1,1) = coeff*(kx(i)*buffer2-ky(1)*buffer1)
-    end do
-
-    ! !! mind the transpose -> index inversion between y and z
-    do k = 2, fft_resolution(c_Z)
-       do i = 1, local_resolution(c_X)
-          coeff = Icmplx/(kx(i)**2+ky(1)**2+kz(k)**2)
-          buffer1 = dataout1(i,k,1)
-          buffer2 = dataout2(i,k,1)
-          dataout1(i,k,1) = coeff*(ky(1)*dataout3(i,k,1)-kz(k)*dataout2(i,k,1))
-          dataout2(i,k,1) = coeff*(kz(k)*buffer1-kx(i)*dataout3(i,k,1))
-          dataout3(i,k,1) = coeff*(kx(i)*buffer2-ky(1)*buffer1)
-       end do
-    end do
-
-    ! !! mind the transpose -> index inversion between y and z
-    do j = 2,local_resolution(c_Y)
-       do k = 1, fft_resolution(c_Z)
-          do i = 1, local_resolution(c_X)
-             coeff = Icmplx/(kx(i)**2+ky(j)**2+kz(k)**2)
-             buffer1 = dataout1(i,k,j)
-             buffer2 = dataout2(i,k,j)
-             dataout1(i,k,j) = coeff*(ky(j)*dataout3(i,k,j)-kz(k)*dataout2(i,k,j))
-             dataout2(i,k,j) = coeff*(kz(k)*buffer1-kx(i)*dataout3(i,k,j))
-             dataout3(i,k,j) = coeff*(kx(i)*buffer2-ky(j)*buffer1)
-          end do
-       end do
-    end do
-
-  end subroutine filter_poisson_3d
-
-  !> Solve diffusion problem in the Fourier space :
-  !! \f{eqnarray*} \omega &=& \nabla \times v \\ \frac{\partial \omega}{\partial t} &=& \nu \Delta \omega \f}
-  !! @param[in] nudt \f$ \nu\times dt\f$, diffusion coefficient times current time step
-  subroutine filter_curl_diffusion_3d(nudt)
-
-    real(C_DOUBLE), intent(in) :: nudt
-    integer(C_INTPTR_T) :: i,j,k
-    complex(C_DOUBLE_COMPLEX) :: coeff
-    complex(C_DOUBLE_COMPLEX) :: buffer1,buffer2
-
-    !! mind the transpose -> index inversion between y and z
-    do j = 1,local_resolution(c_Y)
-       do k = 1, fft_resolution(c_Z)
-          do i = 1, local_resolution(c_X)
-             coeff = Icmplx/(one + nudt * (kx(i)**2+ky(j)**2+kz(k)**2))
-             buffer1 = dataout1(i,k,j)
-             buffer2 = dataout2(i,k,j)
-             dataout1(i,k,j) = coeff*(ky(j)*dataout3(i,k,j)-kz(k)*dataout2(i,k,j))
-             dataout2(i,k,j) = coeff*(kz(k)*buffer1-kx(i)*dataout3(i,k,j))
-             dataout3(i,k,j) = coeff*(kx(i)*buffer2-ky(j)*buffer1)
-          end do
-       end do
-    end do
-
-  end subroutine filter_curl_diffusion_3d
-
-  !> Solve diffusion problem in the Fourier space :
-  !! \f{eqnarray*} \frac{\partial \omega}{\partial t} &=& \nu \Delta \omega \f}
-  !! @param[in] nudt \f$ \nu\times dt\f$, diffusion coefficient times current time step
-  subroutine filter_diffusion_3d(nudt)
-
-    real(C_DOUBLE), intent(in) :: nudt
-    integer(C_INTPTR_T) :: i,j,k
-    complex(C_DOUBLE_COMPLEX) :: coeff
-
-    !! mind the transpose -> index inversion between y and z
-    do j = 1,local_resolution(c_Y)
-       do k = 1, fft_resolution(c_Z)
-          do i = 1, local_resolution(c_X)
-             coeff = one/(one + nudt * (kx(i)**2+ky(j)**2+kz(k)**2))
-             dataout1(i,k,j) = coeff*dataout1(i,k,j)
-             dataout2(i,k,j) = coeff*dataout2(i,k,j)
-             dataout3(i,k,j) = coeff*dataout3(i,k,j)
-          end do
-       end do
-    end do
-
-  end subroutine filter_diffusion_3d
-
-  !> Solve curl problem in the Fourier space :
-  !! \f{eqnarray*} \omega &=& \nabla \times v
-  subroutine filter_curl_3d()
-
-    integer(C_INTPTR_T) :: i,j,k
-    complex(C_DOUBLE_COMPLEX) :: coeff
-    complex(C_DOUBLE_COMPLEX) :: buffer1,buffer2
-
-    !! mind the transpose -> index inversion between y and z
-    do j = 1,local_resolution(c_Y)
-       do k = 1, fft_resolution(c_Z)
-          do i = 1, local_resolution(c_X)
-             coeff = Icmplx
-             buffer1 = dataout1(i,k,j)
-             buffer2 = dataout2(i,k,j)
-             dataout1(i,k,j) = coeff*(ky(j)*dataout3(i,k,j)-kz(k)*dataout2(i,k,j))
-             dataout2(i,k,j) = coeff*(kz(k)*buffer1-kx(i)*dataout3(i,k,j))
-             dataout3(i,k,j) = coeff*(kx(i)*buffer2-ky(j)*buffer1)
-          end do
-       end do
-    end do
-
-  end subroutine filter_curl_3d
-
-  !> Perform solenoidal projection to ensure divergence free vorticity field
-  !! \f{eqnarray*} \omega ' &=& \omega - \nabla\pi \f}
-  subroutine filter_projection_om_3d()
-
-    integer(C_INTPTR_T) :: i,j,k
-    complex(C_DOUBLE_COMPLEX) :: coeff
-    complex(C_DOUBLE_COMPLEX) :: buffer1,buffer2,buffer3
-
-    ! Set first coeff (check for "all freq = 0" case)
-    if(local_offset(c_Y) == 0) then
-       dataout1(1,1,1) = dataout1(1,1,1)
-       dataout2(1,1,1) = dataout2(1,1,1)
-       dataout3(1,1,1) = dataout3(1,1,1)
-    else
-       coeff = one/(ky(1)**2)
-       buffer2 = dataout2(1,1,1)
-       dataout1(1,1,1) = dataout1(1,1,1)
-       dataout2(1,1,1) = dataout2(1,1,1) - coeff*ky(1)*(ky(1)*buffer2)
-       dataout3(1,1,1) = dataout3(1,1,1)
-    endif
-
-    ! !! mind the transpose -> index inversion between y and z
-    do i = 2, local_resolution(c_X)
-       coeff = one/(kx(i)**2+ky(1)**2+kz(1)**2)
-       buffer1 = dataout1(i,1,1)
-       buffer2 = dataout2(i,1,1)
-       buffer3 = dataout3(i,1,1)
-       dataout1(i,1,1) = dataout1(i,1,1) - coeff*kx(i)*(kx(i)*buffer1+ky(1)*buffer2+kz(1)*buffer3)
-       dataout2(i,1,1) = dataout2(i,1,1) - coeff*ky(1)*(kx(i)*buffer1+ky(1)*buffer2+kz(1)*buffer3)
-       dataout3(i,1,1) = dataout3(i,1,1) - coeff*kz(1)*(kx(i)*buffer1+ky(1)*buffer2+kz(1)*buffer3)
-    end do
-
-    ! !! mind the transpose -> index inversion between y and z
-    do k = 2, fft_resolution(c_Z)
-       do i = 1, local_resolution(c_X)
-          coeff = one/(kx(i)**2+ky(1)**2+kz(k)**2)
-          buffer1 = dataout1(i,k,1)
-          buffer2 = dataout2(i,k,1)
-          buffer3 = dataout3(i,k,1)
-          dataout1(i,k,1) = dataout1(i,k,1) - coeff*kx(i)*(kx(i)*buffer1+ky(1)*buffer2+kz(k)*buffer3)
-          dataout2(i,k,1) = dataout2(i,k,1) - coeff*ky(1)*(kx(i)*buffer1+ky(1)*buffer2+kz(k)*buffer3)
-          dataout3(i,k,1) = dataout3(i,k,1) - coeff*kz(k)*(kx(i)*buffer1+ky(1)*buffer2+kz(k)*buffer3)
-       end do
-    end do
-
-    ! !! mind the transpose -> index inversion between y and z
-    do j = 2,local_resolution(c_Y)
-       do k = 1, fft_resolution(c_Z)
-          do i = 1, local_resolution(c_X)
-             coeff = one/(kx(i)**2+ky(j)**2+kz(k)**2)
-             buffer1 = dataout1(i,k,j)
-             buffer2 = dataout2(i,k,j)
-             buffer3 = dataout3(i,k,j)
-             dataout1(i,k,j) = dataout1(i,k,j) - coeff*kx(i)*(kx(i)*buffer1+ky(j)*buffer2+kz(k)*buffer3)
-             dataout2(i,k,j) = dataout2(i,k,j) - coeff*ky(j)*(kx(i)*buffer1+ky(j)*buffer2+kz(k)*buffer3)
-             dataout3(i,k,j) = dataout3(i,k,j) - coeff*kz(k)*(kx(i)*buffer1+ky(j)*buffer2+kz(k)*buffer3)
-          end do
-       end do
-    end do
-
-  end subroutine filter_projection_om_3d
-
-  !> Projects vorticity values from fine to coarse grid :
-  !> the smallest modes of vorticity are nullified
-  !! @param[in] dxf, dyf, dzf: grid filter size = domainLength/(CoarseRes-1)
-  subroutine filter_multires_om_3d(dxf, dyf, dzf)
-
-    real(C_DOUBLE), intent(in) :: dxf, dyf, dzf
-    integer(C_INTPTR_T) :: i,j,k
-    real(C_DOUBLE) :: kxc, kyc, kzc
-
-    kxc = pi / dxf
-    kyc = pi / dyf
-    kzc = pi / dzf
-
-    !! mind the transpose -> index inversion between y and z
-    do j = 2,local_resolution(c_Y)
-       do k = 2, fft_resolution(c_Z)
-          do i = 2, local_resolution(c_X)
-             if ((abs(kx(i))>kxc) .and. (abs(ky(j))>kyc) .and. (abs(kz(k))>kzc)) then
-                dataout1(i,k,j) = 0.
-                dataout2(i,k,j) = 0.
-                dataout3(i,k,j) = 0.
-             endif
-          end do
-       end do
-    end do
-
-  end subroutine filter_multires_om_3d
-
-  !> Solve the Poisson problem allowing to recover
-  !! pressure from velocity in the Fourier space
-  subroutine filter_pressure_3d()
-    integer(C_INTPTR_T) :: i,j,k
-    complex(C_DOUBLE_COMPLEX) :: coeff
-
-    ! Set first coeff (check for "all freq = 0" case)
-    if(local_offset(c_Y) == 0) then
-       dataout1(1,1,1) = 0.0
-    else
-       coeff = -one/(ky(1)**2)
-       dataout1(1,1,1) = coeff*dataout1(1,1,1)
-    endif
-
-    ! !! mind the transpose -> index inversion between y and z
-    do i = 2, local_resolution(c_X)
-       coeff = -one/(kx(i)**2+ky(1)**2)
-       dataout1(i,1,1) = coeff*dataout1(i,1,1)
-    end do
-
-    ! !! mind the transpose -> index inversion between y and z
-    do k = 2, fft_resolution(c_Z)
-       do i = 1, local_resolution(c_X)
-          coeff = -one/(kx(i)**2+ky(1)**2+kz(k)**2)
-          dataout1(i,k,1) = coeff*dataout1(i,k,1)
-       end do
-    end do
-
-    ! !! mind the transpose -> index inversion between y and z
-    do j = 2,local_resolution(c_Y)
-       do k = 1, fft_resolution(c_Z)
-          do i = 1, local_resolution(c_X)
-             coeff = -one/(kx(i)**2+ky(j)**2+kz(k)**2)
-             dataout1(i,k,j) = coeff*dataout1(i,k,j)
-          end do
-       end do
-    end do
-  end subroutine filter_pressure_3d
-
-  !> Solve Poisson problem in the Fourier space :
-  !! \f{eqnarray*} \Delta \psi &=& - \omega \\ v &=& \nabla\times\psi \f}
-  subroutine filter_poisson_3d_many()
-
-    integer(C_INTPTR_T) :: i,j,k
-    complex(C_DOUBLE_COMPLEX) :: coeff
-    complex(C_DOUBLE_COMPLEX) :: buffer1,buffer2
-
-    ! Set first coeff (check for "all freq = 0" case)
-    if(local_offset(c_Y) == 0) then
-       dataout_many(:,1,1,1) = 0.0
-    else
-       coeff = Icmplx/(ky(1)**2)
-       buffer1 = dataout_many(1,1,1,1)
-       dataout_many(1,1,1,1) = coeff*ky(1)*dataout_many(3,1,1,1)
-       dataout_many(2,1,1,1) = 0.0
-       dataout_many(3,1,1,1) = -coeff*ky(1)*buffer1
-    endif
-
-    ! !! mind the transpose -> index inversion between y and z
-    do i = 2, local_resolution(c_X)
-       coeff = Icmplx/(kx(i)**2+ky(1)**2)
-       buffer1 = dataout_many(1,i,1,1)
-       buffer2 = dataout_many(2,i,1,1)
-       dataout_many(1,i,1,1) = coeff*ky(1)*dataout_many(3,i,1,1)
-       dataout_many(2,i,1,1) = -coeff*kx(i)*dataout_many(3,i,1,1)
-       dataout_many(3,i,1,1) = coeff*(kx(i)*buffer2-ky(1)*buffer1)
-    end do
-
-    ! !! mind the transpose -> index inversion between y and z
-    do k = 2, fft_resolution(c_Z)
-       do i = 1, local_resolution(c_X)
-          coeff = Icmplx/(kx(i)**2+ky(1)**2+kz(k)**2)
-          buffer1 = dataout_many(1,i,k,1)
-          buffer2 = dataout_many(2,i,k,1)
-          dataout_many(1,i,k,1) = coeff*(ky(1)*dataout_many(3,i,k,1)-kz(k)*dataout_many(2,i,k,1))
-          dataout_many(2,i,k,1) = coeff*(kz(k)*buffer1-kx(i)*dataout_many(3,i,k,1))
-          dataout_many(3,i,k,1) = coeff*(kx(i)*buffer2-ky(1)*buffer1)
-       end do
-    end do
-
-    ! !! mind the transpose -> index inversion between y and z
-    do j = 2,local_resolution(c_Y)
-       do k = 1, fft_resolution(c_Z)
-          do i = 1, local_resolution(c_X)
-             coeff = Icmplx/(kx(i)**2+ky(j)**2+kz(k)**2)
-             buffer1 = dataout_many(1,i,k,j)
-             buffer2 = dataout_many(2,i,k,j)
-             dataout_many(1,i,k,j) = coeff*(ky(j)*dataout_many(3,i,k,j)-kz(k)*dataout_many(2,i,k,j))
-             dataout_many(2,i,k,j) = coeff*(kz(k)*buffer1-kx(i)*dataout_many(3,i,k,j))
-             dataout_many(3,i,k,j) = coeff*(kx(i)*buffer2-ky(j)*buffer1)
-          end do
-       end do
-    end do
-
-  end subroutine filter_poisson_3d_many
-
-  !> Solve diffusion problem in the Fourier space :
-  !! \f{eqnarray*} \omega &=& \nabla \times v \\ \frac{\partial \omega}{\partial t} &=& \nu \Delta \omega \f}
-  !! @param[in] nudt \f$ \nu\times dt\f$, diffusion coefficient times current time step
-  subroutine filter_diffusion_3d_many(nudt)
-
-    real(C_DOUBLE), intent(in) :: nudt
-    integer(C_INTPTR_T) :: i,j,k
-    complex(C_DOUBLE_COMPLEX) :: coeff
-    complex(C_DOUBLE_COMPLEX) :: buffer1,buffer2
-
-    !! mind the transpose -> index inversion between y and z
-    do j = 1,local_resolution(c_Y)
-       do k = 1, fft_resolution(c_Z)
-          do i = 1, local_resolution(c_X)
-             coeff = Icmplx/(one + nudt * kx(i)**2+ky(j)**2+kz(k)**2)
-             buffer1 = dataout_many(1,i,k,j)
-             buffer2 = dataout_many(2,i,k,j)
-             dataout_many(1,i,k,j) = coeff*(ky(j)*dataout_many(3,i,k,j)-kz(k)*dataout_many(2,i,k,j))
-             dataout_many(2,i,k,j) = coeff*(kz(k)*buffer1-kx(i)*dataout_many(3,i,k,j))
-             dataout_many(3,i,k,j) = coeff*(kx(i)*buffer2-ky(j)*buffer1)
-          end do
-       end do
-    end do
-
-  end subroutine filter_diffusion_3d_many
-
-  !> Clean fftw context (free memory, plans ...)
-  subroutine cleanFFTW_3d()
-    call fftw_destroy_plan(plan_forward1)
-    call fftw_destroy_plan(plan_backward1)
-    if(.not.manycase) then
-       call fftw_destroy_plan(plan_forward2)
-       call fftw_destroy_plan(plan_backward2)
-       call fftw_destroy_plan(plan_forward3)
-       call fftw_destroy_plan(plan_backward3)
-       call fftw_free(cbuffer2)
-       call fftw_free(cbuffer3)
-    endif
-    call fftw_free(cbuffer1)
-    call fftw_mpi_cleanup()
-    deallocate(fft_resolution)
-    deallocate(kx,ky,kz)
-    if(rank==0) close(21)
-  end subroutine cleanFFTW_3d
-
-  !> some information about memory alloc, arrays sizes and so on
-  subroutine fft3d_diagnostics(nbelem,howmany)
-    integer(C_INTPTR_T), intent(in) :: nbelem
-    ! number of buffers used for fftw
-    integer, optional,intent(in) :: howmany
-    complex(C_DOUBLE_COMPLEX) :: memoryAllocated
-
-    integer :: nbFields
-    if(present(howmany)) then
-       nbFields = howmany
-    else
-       nbFields = 3
-    end if
-    memoryAllocated = real(nbelem*sizeof(memoryAllocated),wp)*1e-6
-    write(*,'(a,i5,a,i12,f10.2)') '[',rank,'] size of each buffer (elements / memory in MB):', &
-         nbelem, memoryAllocated
-    write(*,'(a,i5,a,3i12)') '[',rank,'] size of kx,y,z vectors (number of elements):', &
-         size(kx), size(ky),size(kz)
-    write(*,'(a,i5,a,6i5)') '[',rank,'] local resolution and offset :', local_resolution, local_offset
-    memoryAllocated = nbFields*memoryAllocated + real(sizeof(kx) + sizeof(ky) + sizeof(kz), wp)*1e-6
-    write(*,'(a,i5,a,f10.2)') '[',rank,'] Total memory used for fftw buffers (MB):', memoryAllocated
-
-  end subroutine fft3d_diagnostics
-
-  !> Get local size of input and output field on fftw topology
-  !! @param datashape local shape of the input field for the fftw process
-  !! @param offset index of the first component of the local field (in each dir) in the global set of indices
-  subroutine getParamatersTopologyFFTW3d(datashape,offset)
-    integer(kind=ip), intent(out),dimension(3) :: datashape
-    integer(kind=ip), intent(out),dimension(3) :: offset
-    integer(C_INTPTR_T) :: i_zero = 0
-    datashape = (/fft_resolution(c_X), fft_resolution(c_Y), local_resolution(c_Z)/)
-    offset = (/i_zero, i_zero, local_offset(c_Z)/)
-  end subroutine getParamatersTopologyFFTW3d
-
-  !> forward transform - The result is saved in local buffers
-  !!  @param[in] field 3d scalar field, x-component of the input vector field
-  !!  @param[in] ghosts, number of points in the ghost layer of input fields.
-  subroutine r2c_3d_scal(field, ghosts)
-
-    real(wp),dimension(:,:,:),intent(in) :: field
-    integer(kind=ip), dimension(3), intent(in) :: ghosts
-    !real(8) :: start
-    integer(C_INTPTR_T) :: i,j,k, ig, jg, kg
-
-    ! ig, jg, kg are used to take into account
-    ! ghost points in input fields
-    ! init
-    do k =1, local_resolution(c_Z)
-       kg = k + ghosts(c_Z)
-       do j = 1, fft_resolution(c_Y)
-          jg = j + ghosts(c_Y)
-          do i = 1, fft_resolution(c_X)
-             ig = i + ghosts(c_X)
-             rdatain1(i,j,k) = field(ig,jg,kg)
-          end do
-       end do
-    end do
-
-    ! compute transforms for each component
-    !start = MPI_WTIME()
-    call fftw_mpi_execute_dft_r2c(plan_forward1, rdatain1, dataout1)
-    !!print *, "r2c time = ", MPI_WTIME() - start
-
-  end subroutine r2c_3d_scal
-
-  !> Compute spectrum of the given data
-  subroutine filter_spectrum_3d(spectrum,wavelengths,length)
-
-    real(wp),dimension(:),intent(inout) :: spectrum
-    real(wp),dimension(:),intent(inout) :: wavelengths
-    real(wp),intent(in) :: length
-    integer(C_INTPTR_T) :: i,j,k
-    real(wp) :: c
-    real(wp) :: kk,dk,kc,eps
-    integer(C_INTPTR_T) :: indk
-    spectrum = 0
-    dk = 2.0_wp*pi/length
-    kc = pi*fft_resolution(c_X)/length
-    eps=kc/1000000.0_wp
-
-    !! mind the transpose -> index inversion between y and z
-    c = one/real(fft_resolution(c_Z)*fft_resolution(c_Y)*fft_resolution(c_X),wp)
-    c = c * c
-    do j = 1,local_resolution(c_Y)
-       do k = 1, fft_resolution(c_Z)
-          do i = 1, local_resolution(c_X)
-             kk=sqrt(kx(i)**2+ky(j)**2+kz(k)**2)
-             if ((kk.gt.eps).and.(kk.le.kc)) then
-                indk=1+int(kk/dk+0.5_wp)
-                spectrum(indk) = spectrum(indk) + real(dataout1(i,j,k)*conjg(dataout1(i,j,k)), wp) * c
-             end if
-          end do
-       end do
-    end do
-    wavelengths(:) = kx
-
-  end subroutine filter_spectrum_3d
-
-end module fft3d
diff --git a/hysop/old/numerics.old/fftw_f/fftw2py.f90 b/hysop/old/numerics.old/fftw_f/fftw2py.f90
deleted file mode 100755
index 09517a4ca3034a9800a76e99c94ff9656963e2a9..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/fftw_f/fftw2py.f90
+++ /dev/null
@@ -1,330 +0,0 @@
-!> @file fftw2py.f90
-!! Fortran to python interface file.
-
-!> Interface to mpi-fftw (fortran) utilities
-module fftw2py
-
-  use precision
-  use parameters
-  !> 2d case
-  use fft2d
-  !> 3d case
-  use fft3d
-  use, intrinsic :: iso_c_binding
-  use mpi, only : mpi_comm_dup, mpi_wtime
-  implicit none
-
-contains
-  
-  !> Initialisation of fftw context : create plans and memory buffers
-  !! @param[in] resolution global resolution of the discrete domain
-  !! @param[in] lengths width of each side of the domain
-  !! @param[in] comm MPI communicator
-  !! @param[out] datashape local dimension of the input/output field
-  !! @param[out] offset absolute index of the first component of the local field
-  subroutine init_fftw_solver(resolution,lengths,comm,datashape,offset,dim,fftw_type_real)
-
-    integer, intent(in) :: dim
-    integer(kind=ip), dimension(dim),intent(in) :: resolution
-    real(wp),dimension(dim), intent(in) :: lengths
-    integer(kind=ip), dimension(dim), intent(out) :: datashape
-    integer(kind=ip), dimension(dim), intent(out) :: offset
-    integer, intent(in)                 :: comm
-    logical, optional :: fftw_type_real
-    !f2py optional :: dim=len(resolution)
-    !f2py intent(hide) dim
-    !f2py logical optional, intent(in) :: fftw_type_real = 1
-
-    integer :: ierr
-
-    ! Duplicate comm into client_data::main_comm (used later in fft2d and fft3d)
-    call mpi_comm_dup(comm, main_comm, ierr)
-
-    if(fftw_type_real) then
-       if(dim == 2) then
-          !print*, "Init fftw/poisson solver for a 2d problem"
-          call init_r2c_2d(resolution,lengths)
-       else
-          !print*, "Init fftw/poisson solver for a 3d problem"
-          call init_r2c_3d(resolution,lengths)
-       end if
-    else
-       if(dim == 2) then
-          !print*, "Init fftw/poisson solver for a 2d problem"
-          call init_c2c_2d(resolution,lengths)
-       else
-          !print*, "Init fftw/poisson solver for a 3d problem"
-          call init_c2c_3d(resolution,lengths)
-       end if
-    end if
-
-    if(dim==2) then
-       call getParamatersTopologyFFTW2d(datashape,offset)
-    else
-       call getParamatersTopologyFFTW3d(datashape,offset)
-    end if
-  end subroutine init_fftw_solver
-
-
-    !> Initialisation of fftw context : create plans and memory buffers
-  !! @param[in] resolution global resolution of the discrete domain
-  !! @param[in] lengths width of each side of the domain
-  !! @param[in] comm MPI communicator
-  !! @param[out] datashape local dimension of the input/output field
-  !! @param[out] offset absolute index of the first component of the local field
-  subroutine init_fftw_solver_scalar(resolution,lengths,comm,datashape,offset,dim,fftw_type_real)
-
-    integer, intent(in) :: dim
-    integer(kind=ip), dimension(dim),intent(in) :: resolution
-    real(wp),dimension(dim), intent(in) :: lengths
-    integer(kind=ip), dimension(dim), intent(out) :: datashape
-    integer(kind=ip), dimension(dim), intent(out) :: offset
-    integer, intent(in)                 :: comm
-    logical, optional :: fftw_type_real
-    !f2py optional :: dim=len(resolution)
-    !f2py intent(hide) dim
-    !f2py logical optional, intent(in) :: fftw_type_real = 1
-
-    integer :: ierr
-
-    ! Duplicate comm into client_data::main_comm (used later in fft2d and fft3d)
-    call mpi_comm_dup(comm, main_comm, ierr)
-    
-    !print*, "Init fftw/poisson solver for a 3d problem"
-    call init_r2c_scalar_3d(resolution,lengths)
-    
-    call getParamatersTopologyFFTW3d(datashape,offset)
-    
-  end subroutine init_fftw_solver_scalar
-
-  !> Free memory allocated for fftw-related objects (plans and buffers)
-  subroutine clean_fftw_solver(dim)
-
-    integer, intent(in) :: dim
-    if(dim == 2) then
-       call cleanFFTW_2d()
-    else
-       call cleanFFTW_3d()
-    end if
-  end subroutine clean_fftw_solver
-
-  !> Solve
-  !! \f[ \nabla (\nabla \times velocity) = - \omega \f]
-  !! velocity being a 2D vector field and omega a 2D scalar field.
-  subroutine solve_poisson_2d(omega,velocity_x,velocity_y, ghosts_vort, ghosts_velo)
-    real(wp),dimension(:,:),intent(in):: omega
-    real(wp),dimension(size(omega,1),size(omega,2)),intent(out) :: velocity_x,velocity_y
-    integer(kind=ip), dimension(2), intent(in) :: ghosts_vort, ghosts_velo
-    !f2py intent(in,out) :: velocity_x,velocity_y
-    
-    call r2c_scalar_2d(omega, ghosts_vort)
-
-    call filter_poisson_2d()
-
-    call c2r_2d(velocity_x,velocity_y, ghosts_velo)
-    !!print *, "fortran resolution time : ", MPI_WTime() - start
-
-  end subroutine solve_poisson_2d
-
-  !> Solve
-  !! \f{eqnarray*} \frac{\partial \omega}{\partial t} &=& \nu \Delta \omega \f}
-  !! omega being a 2D scalar field.
-  subroutine solve_diffusion_2d(nudt, omega, ghosts_vort)
-    real(wp), intent(in) :: nudt
-    real(wp),dimension(:,:),intent(inout):: omega
-    integer(kind=ip), dimension(2), intent(in) :: ghosts_vort
-    !f2py intent(in,out) :: omega
-
-    call r2c_scalar_2d(omega, ghosts_vort)
-
-    call filter_diffusion_2d(nudt)
-
-    call c2r_scalar_2d(omega, ghosts_vort)
-
-  end subroutine solve_diffusion_2d
-
-  !> Solve
-  !! \f{eqnarray*} \Delta \psi &=& - \omega \\ velocity = \nabla\times\psi \f}
-  !! velocity and omega being 3D vector fields.
-  subroutine solve_poisson_3d(omega_x,omega_y,omega_z,velocity_x,velocity_y,velocity_z, ghosts_vort, ghosts_velo)
-    real(wp),dimension(:,:,:),intent(in):: omega_x,omega_y,omega_z
-    real(wp),dimension(size(omega_x,1),size(omega_y,2),size(omega_z,3)),intent(out) :: velocity_x,velocity_y,velocity_z
-    integer(kind=ip), dimension(3), intent(in) :: ghosts_vort, ghosts_velo
-    real(wp) :: start
-    !f2py intent(in,out) :: velocity_x,velocity_y,velocity_z
-    start = MPI_WTime()
-    call r2c_3d(omega_x,omega_y,omega_z, ghosts_vort)
-
-    call filter_poisson_3d()
-
-    call c2r_3d(velocity_x,velocity_y,velocity_z, ghosts_velo)
-    !!print *, "fortran resolution time : ", MPI_WTime() - start
-
-  end subroutine solve_poisson_3d
-
-  !> Solve
-  !! \f{eqnarray*} \Delta \psi &=& - \omega \\ velocity = \nabla\times\psi \f}
-  !! velocity being a 2D complex vector field and omega a 2D complex scalar field.
-  subroutine solve_poisson_2d_c(omega,velocity_x,velocity_y)
-    complex(wp),dimension(:,:),intent(in):: omega
-    complex(wp),dimension(size(omega,1),size(omega,2)),intent(out) :: velocity_x,velocity_y
-    !f2py intent(in,out) :: velocity_x,velocity_y
-
-    call c2c_2d(omega,velocity_x,velocity_y)
-
-  end subroutine solve_poisson_2d_c
-
-  !> Solve
-  !!  \f{eqnarray*} \Delta \psi &=& - \omega \\ velocity = \nabla\times\psi \f}
-  !! velocity and omega being 3D complex vector fields.
-  subroutine solve_poisson_3d_c(omega_x,omega_y,omega_z,velocity_x,velocity_y,velocity_Z)
-    complex(wp),dimension(:,:,:),intent(in):: omega_x,omega_y,omega_z
-    complex(wp),dimension(size(omega_x,1),size(omega_y,2),size(omega_z,3)),intent(out) :: velocity_x,velocity_y,velocity_z
-    !f2py intent(in,out) :: velocity_x,velocity_y,velocity_z
-
-    call c2c_3d(omega_x,omega_y,omega_z,velocity_x,velocity_y,velocity_z)
-
-  end subroutine solve_poisson_3d_c
-
-  !> Solve
-  !! \f{eqnarray*} \omega &=& \nabla \times v \\ \frac{\partial \omega}{\partial t} &=& \nu \Delta \omega \f}
-  !! velocity and omega being 3D vector fields.
-  subroutine solve_curl_diffusion_3d(nudt,velocity_x,velocity_y,velocity_z,omega_x,omega_y,omega_z, ghosts_velo, ghosts_vort)
-    real(wp), intent(in) :: nudt
-    real(wp),dimension(:,:,:),intent(in):: velocity_x,velocity_y,velocity_z
-    real(wp),dimension(size(velocity_x,1),size(velocity_y,2),size(velocity_z,3)),intent(out) :: omega_x,omega_y,omega_z
-    integer(kind=ip), dimension(3), intent(in) :: ghosts_vort, ghosts_velo
-    !f2py intent(in,out) :: omega_x,omega_y,omega_z
-
-    call r2c_3d(velocity_x,velocity_y,velocity_z, ghosts_velo)
-
-    call filter_curl_diffusion_3d(nudt)
-
-    call c2r_3d(omega_x,omega_y,omega_z, ghosts_vort)
-
-  end subroutine solve_curl_diffusion_3d
-
-  !> Solve
-  !! \f{eqnarray*} \frac{\partial \omega}{\partial t} &=& \nu \Delta \omega \f}
-  !! omega being 3D vector field.
-  subroutine solve_diffusion_3d(nudt,omega_x,omega_y,omega_z, ghosts)
-    real(wp), intent(in) :: nudt
-    real(wp),dimension(:,:,:),intent(inout):: omega_x,omega_y,omega_z
-    integer(kind=ip), dimension(3), intent(in) :: ghosts
-    !f2py intent(in,out) :: omega_x,omega_y,omega_z
-
-    call r2c_3d(omega_x,omega_y,omega_z, ghosts)
-
-    call filter_diffusion_3d(nudt)
-
-    call c2r_3d(omega_x,omega_y,omega_z, ghosts)
-
-  end subroutine solve_diffusion_3d
-
-  !> Perform solenoidal projection to ensure divergence free vorticity field
-  !! \f{eqnarray*} \omega ' &=& \omega - \nabla\pi \f}
-  !! omega being a 3D vector field.
-  subroutine projection_om_3d(omega_x,omega_y,omega_z, ghosts)
-    real(wp),dimension(:,:,:),intent(inout):: omega_x,omega_y,omega_z
-   integer(kind=ip), dimension(3), intent(in) :: ghosts
-    !f2py intent(in,out) :: omega_x,omega_y,omega_z
-
-    call r2c_3d(omega_x,omega_y,omega_z, ghosts)
-
-    call filter_projection_om_3d()
-
-    call c2r_3d(omega_x,omega_y,omega_z, ghosts)
-
-  end subroutine projection_om_3d
-
-  !> Projects vorticity values from fine to coarse grid :
-  !! @param[in] dxf, dyf, dzf: grid filter size = domainLength/(CoarseRes-1)
-  !! in the following, omega is the 3D vorticity vector field.
-  subroutine multires_om_3d(dxf, dyf, dzf, omega_x,omega_y,omega_z, ghosts)
-    real(wp), intent(in) :: dxf, dyf, dzf
-    real(wp),dimension(:,:,:),intent(inout):: omega_x,omega_y,omega_z
-    integer(kind=ip), dimension(3), intent(in) :: ghosts
-
-    !f2py intent(in,out) :: omega_x,omega_y,omega_z
-
-    call r2c_3d(omega_x,omega_y,omega_z, ghosts)
-
-    call filter_multires_om_3d(dxf, dyf, dzf)
-
-    call c2r_3d(omega_x,omega_y,omega_z, ghosts)
-
-  end subroutine multires_om_3d
-
-  !> Compute the pressure from the velocity field, solving a Poisson equation.
-  !! \f{eqnarray*} \Delta p ' &=& rhs \f}
-  !! with rhs depending on the first derivatives of the velocity field
-  !! @param[in, out] pressure
-  !! in the following, pressure is used as inout parameter. It must contains the rhs of poisson equation.
-  subroutine pressure_3d(pressure, ghosts)
-    integer(kind=ip), dimension(3), intent(in) :: ghosts
-    real(wp),dimension(:,:,:),intent(inout):: pressure
-    !f2py intent(in,out) :: pressure
-
-    call r2c_scalar_3d(pressure, ghosts)
-
-    call filter_pressure_3d()
-
-    call c2r_scalar_3d(pressure, ghosts)
-
-  end subroutine pressure_3d
-
-  !> Solve
-  !! \f{eqnarray*} \omega &=& \nabla \times v
-  !! velocity and omega being 3D vector fields.
-  subroutine solve_curl_3d(velocity_x,velocity_y,velocity_z,omega_x,omega_y,omega_z, ghosts_velo, ghosts_vort)
-    real(wp),dimension(:,:,:),intent(in):: velocity_x,velocity_y,velocity_z
-    real(wp),dimension(size(velocity_x,1),size(velocity_y,2),size(velocity_z,3)),intent(out) :: omega_x,omega_y,omega_z
-    integer(kind=ip), dimension(3), intent(in) :: ghosts_velo, ghosts_vort
-    !f2py intent(in,out) :: omega_x,omega_y,omega_z
-
-    call r2c_3d(velocity_x,velocity_y,velocity_z, ghosts_velo)
-
-    call filter_curl_3d()
-
-    call c2r_3d(omega_x,omega_y,omega_z, ghosts_vort)
-
-  end subroutine solve_curl_3d
-
-
-  !> Solve
-  !! \f{eqnarray*} \omega &=& \nabla \times v
-  !! velocity and omega being 2D vector and scalar fields.
-  subroutine solve_curl_2d(velocity_x,velocity_y, omega_z, ghosts_velo, ghosts_vort)
-    real(wp), dimension(:,:), intent(in):: velocity_x,velocity_y
-    real(wp), dimension(size(velocity_x,1), size(velocity_x,2)), intent(out) :: omega_z
-    integer(kind=ip), dimension(2), intent(in) :: ghosts_velo, ghosts_vort
-    !f2py intent(in,out) :: omega_z
-
-    call r2c_2d(velocity_x,velocity_y, ghosts_velo)
-
-    call filter_curl_2d()
-
-    call c2r_scalar_2d(omega_z, ghosts_vort)
-
-  end subroutine solve_curl_2d
-
-  !> Compute spectrum of a scalar field
-  !! @param[in] field
-  !! @param[out] spectrum
-  subroutine spectrum_3d(field, spectrum, wavelengths, ghosts, length)
-    real(wp),dimension(:,:,:),intent(in):: field
-    integer(kind=ip), dimension(3), intent(in) :: ghosts
-    real(wp),dimension(:), intent(inout) :: spectrum
-    real(wp),dimension(:), intent(inout) :: wavelengths
-    real(wp),intent(in) :: length
-    !f2py intent(in) :: field
-    !f2py intent(inout) :: spectrum
-    !f2py intent(inout) :: wavelengths
-
-    call r2c_3d_scal(field, ghosts)
-
-    call filter_spectrum_3d(spectrum, wavelengths, length)
-
-  end subroutine spectrum_3d
-
-end module fftw2py
diff --git a/hysop/old/numerics.old/fftw_f/fftw2py.pyf b/hysop/old/numerics.old/fftw_f/fftw2py.pyf
deleted file mode 100644
index 74ac41ee1c12458b18ea26880d940ab6f0a52d9f..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/fftw_f/fftw2py.pyf
+++ /dev/null
@@ -1,131 +0,0 @@
-!    -*- f90 -*-
-! Note: the context of this file is case sensitive.
-
-module fftw2py ! in fftw2py.f90
-  use fft2d
-  use fft3d
-  use precision
-  use parameters
-  use mpi
-  subroutine init_fftw_solver(resolution,lengths,comm,datashape,offset,dim,fftw_type_real) ! in fftw2py.f90:fftw2py
-    integer(kind=ip), dimension(dim),intent(in) :: resolution
-    real(kind=wp) dimension(dim),intent(in),depend(dim) :: lengths
-    integer intent(in) :: comm
-    integer(kind=ip) dimension(dim),intent(out),depend(dim) :: datashape
-    integer(kind=ip) dimension(dim),intent(out),depend(dim) :: offset
-    integer, optional,intent(hide), depend(resolution) :: dim=len(resolution)
-    logical, optional,intent(in) :: fftw_type_real=1
-  end subroutine init_fftw_solver
-  subroutine init_fftw_solver_scalar(resolution,lengths,comm,datashape,offset,dim,fftw_type_real) ! in fftw2py.f90:fftw2py
-    integer(kind=ip) dimension(dim),intent(in) :: resolution
-    real(kind=wp) dimension(dim),intent(in),depend(dim) :: lengths
-    integer intent(in) :: comm
-    integer(ip) dimension(dim),intent(out),depend(dim) :: datashape
-    integer(ip) dimension(dim),intent(out),depend(dim) :: offset
-    integer, optional,intent(hide), ,depend(resolution) :: dim=len(resolution)
-    logical, optional,intent(in) :: fftw_type_real=1
-  end subroutine init_fftw_solver_scalar
-  subroutine clean_fftw_solver(dim) ! in fftw2py.f90:fftw2py
-    integer intent(in) :: dim
-  end subroutine clean_fftw_solver
-  subroutine solve_poisson_2d(omega,velocity_x,velocity_y,ghosts_vort,ghosts_velo) ! in fftw2py.f90:fftw2py
-    real(kind=wp) dimension(:,:),intent(in) :: omega
-    real(kind=wp) dimension(size(omega,1),size(omega,2)),intent(in,out),depend(omega,omega) :: velocity_x
-    real(kind=wp) dimension(size(omega,1),size(omega,2)),intent(in,out),depend(omega,omega) :: velocity_y
-    integer(kind=ip) dimension(2),intent(in) :: ghosts_vort
-    integer(kind=ip) dimension(2),intent(in) :: ghosts_velo
-  end subroutine solve_poisson_2d
-  subroutine solve_diffusion_2d(nudt,omega,ghosts_vort) ! in fftw2py.f90:fftw2py
-    real(kind=wp) intent(in) :: nudt
-    real(kind=wp) dimension(:,:),intent(in,out) :: omega
-    integer(kind=ip) dimension(2),intent(in) :: ghosts_vort
-  end subroutine solve_diffusion_2d
-  subroutine solve_poisson_3d(omega_x,omega_y,omega_z,velocity_x,velocity_y,velocity_z,ghosts_vort,ghosts_velo) ! in fftw2py.f90:fftw2py
-    real(kind=wp) dimension(:,:,:),intent(in) :: omega_x
-    real(kind=wp) dimension(:,:,:),intent(in) :: omega_y
-    real(kind=wp) dimension(:,:,:),intent(in) :: omega_z
-    real(kind=wp) dimension(size(omega_x,1),size(omega_y,2),size(omega_z,3)),intent(in,out),depend(omega_x,omega_y,omega_z) :: velocity_x
-    real(kind=wp) dimension(size(omega_x,1),size(omega_y,2),size(omega_z,3)),intent(in,out),depend(omega_x,omega_y,omega_z) :: velocity_y
-    real(kind=wp) dimension(size(omega_x,1),size(omega_y,2),size(omega_z,3)),intent(in,out),depend(omega_x,omega_y,omega_z) :: velocity_z
-    integer(kind=ip) dimension(3),intent(in) :: ghosts_vort
-    integer(kind=ip) dimension(3),intent(in) :: ghosts_velo
-  end subroutine solve_poisson_3d
-  subroutine solve_poisson_2d_c(omega,velocity_x,velocity_y) ! in fftw2py.f90:fftw2py
-    complex(kind=wp) dimension(:,:),intent(in) :: omega
-    complex(kind=wp) dimension(size(omega,1),size(omega,2)),intent(in,out),depend(omega,omega) :: velocity_x
-    complex(kind=wp) dimension(size(omega,1),size(omega,2)),intent(in,out),depend(omega,omega) :: velocity_y
-  end subroutine solve_poisson_2d_c
-  subroutine solve_poisson_3d_c(omega_x,omega_y,omega_z,velocity_x,velocity_y,velocity_z) ! in fftw2py.f90:fftw2py
-    complex(kind=wp) dimension(:,:,:),intent(in) :: omega_x
-    complex(kind=wp) dimension(:,:,:),intent(in) :: omega_y
-    complex(kind=wp) dimension(:,:,:),intent(in) :: omega_z
-    complex(kind=wp) dimension(size(omega_x,1),size(omega_y,2),size(omega_z,3)),intent(in,out),depend(omega_x,omega_y,omega_z) :: velocity_x
-    complex(kind=wp) dimension(size(omega_x,1),size(omega_y,2),size(omega_z,3)),intent(in,out),depend(omega_x,omega_y,omega_z) :: velocity_y
-    complex(kind=wp) dimension(size(omega_x,1),size(omega_y,2),size(omega_z,3)),intent(in,out),depend(omega_x,omega_y,omega_z) :: velocity_z
-  end subroutine solve_poisson_3d_c
-  subroutine solve_curl_diffusion_3d(nudt,velocity_x,velocity_y,velocity_z,omega_x,omega_y,omega_z,ghosts_velo,ghosts_vort) ! in fftw2py.f90:fftw2py
-    real(kind=wp) intent(in) :: nudt
-    real(kind=wp) dimension(:,:,:),intent(in) :: velocity_x
-    real(kind=wp) dimension(:,:,:),intent(in) :: velocity_y
-    real(kind=wp) dimension(:,:,:),intent(in) :: velocity_z
-    real(kind=wp) dimension(size(velocity_x,1),size(velocity_y,2),size(velocity_z,3)),intent(in,out),depend(velocity_x,velocity_y,velocity_z) :: omega_x
-    real(kind=wp) dimension(size(velocity_x,1),size(velocity_y,2),size(velocity_z,3)),intent(in,out),depend(velocity_x,velocity_y,velocity_z) :: omega_y
-    real(kind=wp) dimension(size(velocity_x,1),size(velocity_y,2),size(velocity_z,3)),intent(in,out),depend(velocity_x,velocity_y,velocity_z) :: omega_z
-    integer(kind=ip) dimension(3),intent(in) :: ghosts_velo
-    integer(kind=ip) dimension(3),intent(in) :: ghosts_vort
-  end subroutine solve_curl_diffusion_3d
-  subroutine solve_diffusion_3d(nudt,omega_x,omega_y,omega_z,ghosts) ! in fftw2py.f90:fftw2py
-    real(kind=wp) intent(in) :: nudt
-    real(kind=wp) dimension(:,:,:),intent(in,out) :: omega_x
-    real(kind=wp) dimension(:,:,:),intent(in,out) :: omega_y
-    real(kind=wp) dimension(:,:,:),intent(in,out) :: omega_z
-    integer(kind=ip) dimension(3),intent(in) :: ghosts
-  end subroutine solve_diffusion_3d
-  subroutine projection_om_3d(omega_x,omega_y,omega_z,ghosts) ! in fftw2py.f90:fftw2py
-    real(kind=wp) dimension(:,:,:),intent(in,out) :: omega_x
-    real(kind=wp) dimension(:,:,:),intent(in,out) :: omega_y
-    real(kind=wp) dimension(:,:,:),intent(in,out) :: omega_z
-    integer(kind=ip) dimension(3),intent(in) :: ghosts
-  end subroutine projection_om_3d
-  subroutine multires_om_3d(dxf,dyf,dzf,omega_x,omega_y,omega_z,ghosts) ! in fftw2py.f90:fftw2py
-    real(kind=wp) intent(in) :: dxf
-    real(kind=wp) intent(in) :: dyf
-    real(kind=wp) intent(in) :: dzf
-    real(kind=wp) dimension(:,:,:),intent(in,out) :: omega_x
-    real(kind=wp) dimension(:,:,:),intent(in,out) :: omega_y
-    real(kind=wp) dimension(:,:,:),intent(in,out) :: omega_z
-    integer(kind=ip) dimension(3),intent(in) :: ghosts
-  end subroutine multires_om_3d
-  subroutine pressure_3d(pressure,ghosts) ! in fftw2py.f90:fftw2py
-    real(kind=wp) dimension(:,:,:),intent(in,out) :: pressure
-    integer(kind=ip) dimension(3),intent(in) :: ghosts
-  end subroutine pressure_3d
-  subroutine solve_curl_3d(velocity_x,velocity_y,velocity_z,omega_x,omega_y,omega_z,ghosts_velo,ghosts_vort) ! in fftw2py.f90:fftw2py
-    real(kind=wp) dimension(:,:,:),intent(in) :: velocity_x
-    real(kind=wp) dimension(:,:,:),intent(in) :: velocity_y
-    real(kind=wp) dimension(:,:,:),intent(in) :: velocity_z
-    real(kind=wp) dimension(size(velocity_x,1),size(velocity_y,2),size(velocity_z,3)),intent(in,out),depend(velocity_x,velocity_y,velocity_z) :: omega_x
-    real(kind=wp) dimension(size(velocity_x,1),size(velocity_y,2),size(velocity_z,3)),intent(in,out),depend(velocity_x,velocity_y,velocity_z) :: omega_y
-    real(kind=wp) dimension(size(velocity_x,1),size(velocity_y,2),size(velocity_z,3)),intent(in,out),depend(velocity_x,velocity_y,velocity_z) :: omega_z
-    integer(kind=ip) dimension(3),intent(in) :: ghosts_velo
-    integer(kind=ip) dimension(3),intent(in) :: ghosts_vort
-  end subroutine solve_curl_3d
-  subroutine solve_curl_2d(velocity_x,velocity_y,omega_z,ghosts_velo,ghosts_vort) ! in fftw2py.f90:fftw2py
-    real(kind=wp) dimension(:,:),intent(in) :: velocity_x
-    real(kind=wp) dimension(:,:),intent(in) :: velocity_y
-    real(kind=wp) dimension(size(velocity_x,1),size(velocity_x,2)),intent(in,out),depend(velocity_x,velocity_x) :: omega_z
-    integer(kind=ip) dimension(2),intent(in) :: ghosts_velo
-    integer(kind=ip) dimension(2),intent(in) :: ghosts_vort
-  end subroutine solve_curl_2d
-  subroutine spectrum_3d(field,spectrum,wavelengths,ghosts,length) ! in fftw2py.f90:fftw2py
-    real(kind=wp) dimension(:,:,:),intent(in) :: field
-    real(kind=wp) dimension(:),intent(in,out) :: spectrum
-    real(kind=wp) dimension(:),intent(in,out) :: wavelengths
-    integer(kind=ip) dimension(3),intent(in) :: ghosts
-    real(kind=wp) intent(in) :: length
-  end subroutine spectrum_3d
-
-end module fftw2py
-
-! This file was auto-generated with f2py (version:2).
-! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/hysop/old/numerics.old/finite_differences.py b/hysop/old/numerics.old/finite_differences.py
deleted file mode 100644
index 18580128968a35466e89da9cdc08116c01098b4a..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/finite_differences.py
+++ /dev/null
@@ -1,562 +0,0 @@
-"""Finite difference schemes
-
-.. currentmodule hysop.numerics.finite_differences
-
-* :class:`~FDC2` : first derivative, 2nd order centered scheme
-* :class:`~FDC4`: first derivative, 4th order centered scheme
-* :class:`~FD2C2`: second derivative, 2nd order centered scheme
-* :class `~FDC`: generic centered scheme
-* :class:`~FiniteDifference`, abstract base class.
-
-
-Usage:
-
-.. code::
-
-    # indices and space step from a predefined mesh
-    indices = topo.mesh.compute_index
-    step = topo.mesh.space_step
-    # init scheme
-    scheme = FDC4(step, indices)
-    # result = some array of the same shape as field.
-    # compute dfield/d direction in result
-    result = scheme(field, direction, result)
-    # or (with more assert and checks on inputs)
-    scheme.compute(field, direction, result)
-    # or result += dfield/d direction
-    scheme.compute_and_add(field, direction, result)
-
-
-Note
-----
-* fd are computed only on points described by indices :
-  result[ind] = diff(field[ind]),
-  which means that result and field must have the same shape.
-* You can also use a 'reduced' output result to minimize memory.
-  Use 'reduce_output_shape=True' in scheme init. In that case,
-  result must be of a shape corresponding to indices 'shape'.
-
-  Example:
-
-.. code::
-
-  scheme = FDC4(step, indices, reduce_output_shape=True)
-  # if indices is slice(4,10) in each direction
-  result = npw.zeros((6,6,6))
-  ...
-
-"""
-from abc import ABCMeta, abstractmethod
-from hysop.constants import debug
-from hysop.numerics.stencil import Stencil, StencilGenerator
-from hysop.numerics.stencil import CenteredStencil, CenteredStencilGenerator
-from hysop.tools.numpywrappers import npw
-import numpy as np
-
-
-class FiniteDifference(object):
-    """Describe and apply a finite difference scheme to compute
-    1st or second derivative of a variable saved in a numpy array.
-
-    Usage :
-
-    >> step = topo.mesh_space_step
-    >> scheme = FDC4(step, topo.mesh.compute_index)
-
-    For a given numpy array (obviously discretized on the topo
-    used to compute indices), to compute result as the derivative
-    of tab according to dir:
-
-    >> scheme.compute(tab, dir, result)
-
-    or result += the derivative of tab according to dir:
-
-    >> scheme.compute_and_add(tab, dir, result)
-    
-    >>> from hysop import Box
-    >>> domain = Box()
-
-    Notes FP :
-    Compute method is much more performant than compute and add
-    and needs less memory. But in some cases (when fd results need
-    to be accumulate into a field) compute_and_add may be useful
-    to limit memory usage, if required.
-    See Global_tests/testPerfAndMemForFD_and_div.py for perf results.
-    """
-
-    __metaclass__ = ABCMeta
-
-    @debug
-    def __new__(cls, *args, **kw):
-        return object.__new__(cls, *args, **kw)
-
-    def __init__(self, step, indices, output_indices=None):
-        """
-
-        Parameters
-        ----------
-        step : list or array of int
-            resolution of the mesh
-        indices : list of slices
-            Represents the local mesh on which finite-differences
-            will be applied, like compute_index
-            in :class:`~hysop.domain.mesh.Mesh`.
-        output_indices : list of slices, optional
-            set which indices of result will be modified. By default,
-            output_indices = indices
-
-        Attributes
-        ----------
-        ghosts_layer_size : int, static
-            minimal number of points required inside the ghost layer
-            for this scheme.
-
-        """
-
-        step = np.asarray(step)
-        #  dim of the field on which scheme will be applied
-        # (i.e dim of the domain)
-        self._dim = step.size
-        self._m1 = None
-        self._a1 = None
-        self._m2 = None
-        self._a2 = None
-        self._coeff = None
-        # List of slices representing the mesh on which fd scheme is applied
-        self.indices = indices
-        if output_indices is None or output_indices is False:
-            # This is the default case. It means that
-            # result shape is unknown. It will
-            # be the same as input tab shape during call
-            # and input/output indices are the same:
-            # we compute result[indices] = scheme(tab[indices])
-            self.output_indices = self.indices
-        elif isinstance(output_indices, list):
-            # In that case, result shape remains unknown but
-            # the position where result is written is not the
-            # same as input indices.
-            # We compute:
-            # result[output_indices] = scheme(tab[indices])
-            # Warning : result shape/output_indices compliance
-            # is not checked!
-            self.output_indices = output_indices
-
-        elif output_indices is True:
-            # Here, output shape is fixed to the minimal required
-            # shape, computed with indices.
-            # So, we compute:
-            # result[...] = scheme(tab[indices])
-            result_shape = tuple([indices[i].stop - indices[i].start
-                                  for i in xrange(len(indices))])
-            self.output_indices = [slice(0, result_shape[i])
-                                   for i in xrange(len(indices))]
-        else:
-            raise ValueError('Invalid value for indices_out!')
-
-        self._step = step
-        self._compute_indices(step)
-
-    @abstractmethod
-    def _compute_indices(self, step):
-        """Internal index lists and fd coeff computation
-
-        """
-
-    @abstractmethod
-    def __call__(self, tab, cdir, result):
-        """Apply FD scheme.
-        """
-
-    def compute(self, tab, cdir, result):
-        """Apply FD scheme. Result is overwritten.
-
-        Parameters
-        ----------
-        tab : numpy array
-            input field
-        cdir : int
-            direction of differentiation
-        result : numpy array
-            in/out, derivative of tab
-        """
-        assert result is not tab
-        assert result.__class__ is np.ndarray
-        assert tab.__class__ is np.ndarray
-        result = self.__call__(tab, cdir, result)
-        return result
-
-    @abstractmethod
-    def compute_and_add(self, tab, cdir, result, work):
-        """Apply FD scheme and add the result inplace.
-
-        Parameters
-        ----------
-        tab : numpy array
-            input field
-        cdir : int
-            direction of differentiation
-        result : numpy array
-            in/out, abs(derivative of tab) + result
-        work : numpy array
-            internal workspace. Must be 1D and of the same
-            size as result.
-        """
-
-    @abstractmethod
-    def compute_and_add_abs(self, tab, cdir, result, work):
-        """Apply FD scheme and add np.abs of the derivative inplace.
-
-        Parameters
-        ----------
-        tab : numpy array
-            input field
-        cdir : int
-            direction of differentiation
-        result : numpy array
-            in/out, derivative of tab + result
-        work : numpy array
-            internal workspace. Must be 1D and of the same
-            size as result.
-        """
-
-class FDkCp(FiniteDifference):
-    """
-    k-th derivative, centered scheme, p-th order.
-
-    """
-    
-    def __init__(self,k,p,*args,**kargs):
-        super(FDkCp,self).__init__(*args,**kargs)
-        
-        csg     = CenteredStencilGenerator()
-        stencil = csg.generate_exact_stencil(order=p,derivative=k)
-        
-        lghosts = stencil.L
-        rghosts = stencil.R
-        
-        self.stencil = stencil
-        self.right_ghost_layer_size = rghosts
-        self.left_ghosts_layer_size = lghosts
-        self.ghosts_layer_size      = np.maximum(lghosts, rghosts)
-    
-    def __call__(self, tab, cdir, result):
-        stencil = self.stencil
-        svars = {stencil.dx:self._step[cdir]}
-         
-        self.output_indices = [idx for idx in self.output_indices]
-        result[self.output_indices] = 0
-        for (offset,coeff) in self.stencil.iteritems(svars):
-            result[self.output_indices] += coeff*tab[self._indices(offset)]
-        return result
-    
-    def _compute_indices(self, step):
-        pass
-    
-    def compute_and_add(self):
-        pass
-    def compute_and_add_abs(self):
-        pass
-
-
-
-class FDC2(FiniteDifference):
-    """
-    1st derivative, centered scheme, 2nd order.
-
-    """
-
-    ghosts_layer_size = 1
-
-    def _compute_indices(self, step):
-
-        self._coeff = npw.asarray(1. / (2. * step))
-        self._m1 = []
-        self._a1 = []
-        for dim in xrange(self._dim):
-            self._m1.append(list(self.indices))
-            self._m1[dim][dim] = slice(self.indices[dim].start - 1,
-                                       self.indices[dim].stop - 1,
-                                       self.indices[dim].step)
-            self._a1.append(list(self.indices))
-            self._a1[dim][dim] = slice(self.indices[dim].start + 1,
-                                       self.indices[dim].stop + 1,
-                                       self.indices[dim].step)
-
-    def __call__(self, tab, cdir, result):
-        result[self.output_indices] = tab[self._a1[cdir]]
-        result[self.output_indices] -= tab[self._m1[cdir]]
-        result[self.output_indices] *= self._coeff[cdir]
-        return result
-
-    def compute_and_add(self, tab, cdir, result, work):
-        """Apply FD scheme and add the derivative inplace.
-
-        Parameters
-        ----------
-        tab : numpy array
-            input field
-        cdir : int
-            direction of differentiation
-        result : numpy array
-            in/out, derivative of tab + result
-        work : numpy array
-            internal workspace. Must be 1D and of the same
-            size as result.
-
-        """
-        assert result.__class__ is np.ndarray
-        assert tab.__class__ is np.ndarray
-        assert work.size == result.size
-        wk = work.reshape(result.shape)
-        np.subtract(tab[self._a1[cdir]], tab[self._m1[cdir]],
-                    wk[self.output_indices])
-        np.multiply(wk[self.output_indices], self._coeff[cdir],
-                    wk[self.output_indices])
-        np.add(wk[self.output_indices], result[self.output_indices],
-               result[self.output_indices])
-        return result
-
-    def compute_and_add_abs(self, tab, cdir, result, work):
-        """Apply FD scheme and add np.abs of the derivative inplace.
-
-        Parameters
-        ----------
-        tab : numpy array
-            input field
-        cdir : int
-            direction of differentiation
-        result : numpy array
-            in/out, derivative of tab + result
-        work : numpy array
-            internal workspace. Must be 1D and of the same
-            size as result.
-
-        """
-        assert result.__class__ is np.ndarray
-        assert tab.__class__ is np.ndarray
-        assert work.__class__ is np.ndarray
-        assert work is not result
-        wk = work.reshape(result.shape)
-        np.subtract(tab[self._a1[cdir]], tab[self._m1[cdir]],
-                    wk[self.output_indices])
-        np.multiply(wk[self.output_indices], self._coeff[cdir],
-                    wk[self.output_indices])
-        np.abs(wk[self.output_indices], wk[self.output_indices])
-        np.add(wk[self.output_indices], result[self.output_indices],
-               result[self.output_indices])
-        return result
-
-
-class FD2C2(FiniteDifference):
-    """
-    Second derivative, centered scheme, 2nd order.
-    """
-
-    ghosts_layer_size = 1
-
-    def _compute_indices(self, step):
-
-        self._m1 = []
-        self._a1 = []
-        self._coeff = npw.asarray(1. / (step * step))
-        for dim in xrange(self._dim):
-            self._m1.append(list(self.indices))
-            self._m1[dim][dim] = slice(self.indices[dim].start - 1,
-                                       self.indices[dim].stop - 1,
-                                       self.indices[dim].step)
-            self._a1.append(list(self.indices))
-            self._a1[dim][dim] = slice(self.indices[dim].start + 1,
-                                       self.indices[dim].stop + 1,
-                                       self.indices[dim].step)
-
-    def __call__(self, tab, cdir, result):
-        result[self.output_indices] = tab[self.indices]
-        result[self.output_indices] *= -2
-        result[self.output_indices] += tab[self._a1[cdir]]
-        result[self.output_indices] += tab[self._m1[cdir]]
-        result[self.output_indices] *= self._coeff[cdir]
-        return result
-
-    def compute_and_add(self, tab, cdir, result, work):
-        """Apply FD scheme and add the result inplace.
-
-        Parameters
-        ----------
-        tab : numpy array
-            input field
-        cdir : int
-            direction of differentiation
-        result : numpy array
-            in/out, derivative of tab + result
-        work : numpy array
-            internal workspace. Must be 1D and of the same
-            size as result.
-
-        """
-        assert result.__class__ is np.ndarray
-        assert tab.__class__ is np.ndarray
-        wk = work.reshape(result.shape)
-        np.multiply(tab[self.indices], -2., wk[self.output_indices])
-        np.add(tab[self._a1[cdir]], wk[self.output_indices],
-               wk[self.output_indices])
-        np.add(tab[self._m1[cdir]], wk[self.output_indices],
-               wk[self.output_indices])
-        np.multiply(wk[self.output_indices], self._coeff[cdir],
-                    wk[self.output_indices])
-        np.add(wk[self.output_indices], result[self.output_indices],
-               result[self.output_indices])
-        return result
-
-    def compute_and_add_abs(self, tab, cdir, result, work):
-        """Apply FD scheme and add np.abs of the derivative inplace.
-
-        Parameters
-        ----------
-        tab : numpy array
-            input field
-        cdir : int
-            direction of differentiation
-        result : numpy array
-            in/out, derivative of tab + result
-        work : numpy array
-            internal workspace. Must be 1D and of the same
-            size as result.
-
-        """
-        assert result.__class__ is np.ndarray
-        assert tab.__class__ is np.ndarray
-        wk = work.reshape(result.shape)
-        np.multiply(tab[self.indices], -2., wk[self.output_indices])
-        np.add(tab[self._a1[cdir]], wk[self.output_indices],
-               wk[self.output_indices])
-        np.add(tab[self._m1[cdir]], wk[self.output_indices],
-               wk[self.output_indices])
-        np.multiply(wk[self.output_indices], self._coeff[cdir],
-                    wk[self.output_indices])
-        np.abs(wk[self.output_indices], wk[self.output_indices])
-        np.add(wk[self.output_indices], result[self.output_indices],
-               result[self.output_indices])
-
-        return result
-
-
-class FDC4(FiniteDifference):
-    """
-    1st derivative, centered scheme, 4th order.
-    """
-
-    ghosts_layer_size = 2
-
-    def _compute_indices(self, step):
-        self._m1 = []
-        self._m2 = []
-        self._a1 = []
-        self._a2 = []
-        # FD scheme coefficients
-        self._coeff = npw.asarray(1. / (12. * step))
-        for dim in xrange(self._dim):
-            self._m1.append(list(self.indices))
-            self._m1[dim][dim] = slice(self.indices[dim].start - 1,
-                                       self.indices[dim].stop - 1,
-                                       self.indices[dim].step)
-            self._m2.append(list(self.indices))
-            self._m2[dim][dim] = slice(self.indices[dim].start - 2,
-                                       self.indices[dim].stop - 2,
-                                       self.indices[dim].step)
-            self._a1.append(list(self.indices))
-            self._a1[dim][dim] = slice(self.indices[dim].start + 1,
-                                       self.indices[dim].stop + 1,
-                                       self.indices[dim].step)
-            self._a2.append(list(self.indices))
-            self._a2[dim][dim] = slice(self.indices[dim].start + 2,
-                                       self.indices[dim].stop + 2,
-                                       self.indices[dim].step)
-
-    def __call__(self, tab, cdir, result):
-        result[self.output_indices] = tab[self._a1[cdir]]
-        result[self.output_indices] -= tab[self._m1[cdir]]
-        result[self.output_indices] *= 8
-        result[self.output_indices] += tab[self._m2[cdir]]
-        result[self.output_indices] -= tab[self._a2[cdir]]
-        result[self.output_indices] *= self._coeff[cdir]
-        return result
-
-    def compute_and_add(self, tab, cdir, result, work):
-        """Apply FD scheme and add the result inplace.
-
-        Parameters
-        ----------
-        tab : numpy array
-            input field
-        cdir : int
-            direction of differentiation
-        result : numpy array
-            in/out, derivative of tab + result
-        work : numpy array
-            internal workspace. Must be 1D and of the same
-            size as result.
-
-        """
-        assert result.__class__ is np.ndarray
-        assert tab.__class__ is np.ndarray
-        assert work is not result
-        assert work.size == result.size
-        wk = work.reshape(result.shape)
-        np.subtract(tab[self._a1[cdir]], tab[self._m1[cdir]],
-                    wk[self.output_indices])
-        np.multiply(wk[self.output_indices], 8.,
-                    wk[self.output_indices])
-        np.add(wk[self.output_indices], tab[self._m2[cdir]],
-               wk[self.output_indices])
-        np.subtract(wk[self.output_indices], tab[self._a2[cdir]],
-                    wk[self.output_indices])
-        np.multiply(wk[self.output_indices], self._coeff[cdir],
-                    wk[self.output_indices])
-        np.add(wk[self.output_indices], result[self.output_indices],
-               result[self.output_indices])
-        return result
-
-    def compute_and_add_abs(self, tab, cdir, result, work):
-        """Apply FD scheme and add np.abs of the derivative inplace.
-
-        Parameters
-        ----------
-        tab : numpy array
-            input field
-        cdir : int
-            direction of differentiation
-        result : numpy array
-            in/out, derivative of tab + result
-        work : numpy array
-            internal workspace. Must be 1D and of the same
-            size as result.
-
-        """
-        assert result.__class__ is np.ndarray
-        assert tab.__class__ is np.ndarray
-        assert work is not result
-        assert work.size == result.size
-        wk = work.reshape(result.shape)
-        np.subtract(tab[self._a1[cdir]], tab[self._m1[cdir]],
-                    wk[self.output_indices])
-        np.multiply(wk[self.output_indices], 8.,
-                    wk[self.output_indices])
-        np.add(wk[self.output_indices], tab[self._m2[cdir]],
-               wk[self.output_indices])
-        np.subtract(wk[self.output_indices], tab[self._a2[cdir]],
-                    wk[self.output_indices])
-        np.multiply(wk[self.output_indices], self._coeff[cdir],
-                    wk[self.output_indices])
-        np.abs(wk[self.output_indices], wk[self.output_indices])
-        np.add(wk[self.output_indices], result[self.output_indices],
-               result[self.output_indices])
-        return result
-
-if __name__ == '__main__':
-    N = np.asarray((16,16,16))
-    A = np.random.rand(*N)
-    dx      = 1.0/(N-1)
-    indices = np.ndindex(*N)
-
-    diff = FDkCp(2,2,step=dx,indices=indices)
-    diff(A,0,A)
diff --git a/hysop/old/numerics.old/interpolation.py b/hysop/old/numerics.old/interpolation.py
deleted file mode 100644
index 89adda2607b23fa9361a6d6cb3503b378ff2fc00..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/interpolation.py
+++ /dev/null
@@ -1,524 +0,0 @@
-"""Interpolation methods
-
-.. currentmodule hysop.numerics
-
-* :class:`~interpolation.Linear`
-
-"""
-from hysop.constants import HYSOP_INTEGER, EPS
-from hysop.tools.misc import WorkSpaceTools
-import numpy as np
-from hysop.tools.numpywrappers import npw
-from abc import ABCMeta
-try:
-    from hysop.f2hysop import interpolation as finterpol
-except:
-    finterpol = None
-
-
-class Interpolation(object):
-    """Common interface to all 'interpolation-like' numerical methods
-    (interpolation, remesh)
-    """
-    __metaclass__ = ABCMeta
-    ghosts_layer_size = 0
-
-    def __init__(self, topo_source, direction,
-                 topo_target=None, rwork=None, iwork=None):
-        """"Common interface to all 'interpolation-like' numerical methods
-        (interpolation, remesh)
-
-        Parameters
-        -----------
-        topo_source, topo_target : :class:`~hysop.topology.topology.CartesianTopology`
-            mpi process and data distribution for source and target
-            (see Notes for details)
-        direction : int
-            direction of interpolation
-        rwork, iwork : list of numpy arrays
-            internal work spaces for real (work) and integer (iwork) data.
-
-        Attributes
-        ----------
-        ghosts_layer_size : int, static
-            minimal number of points required inside the ghost layer
-            for this scheme.
-
-        Notes
-        -----
-        * 'source' concerned all data related to the grid on which the field
-        to interpolate is known, and 'target' is used for data related
-        to the grid on which the field values must be computed using
-        interpolation.
-        * topo_target is required if and only if source and target grid
-        have different resolution.
-        """
-        self.direction = direction
-        self.topo_source = topo_source
-        if topo_target is None:
-            topo_target = topo_source
-        self.topo_target = topo_target
-        # Check ghost layer size (min required in the interpolation direction)
-        msg = 'Ghost layer is too small for the chosen scheme.'
-        required_ghost_layer = self.ghosts_layer_size
-        assert topo_source.ghosts()[direction] >= required_ghost_layer, msg
-
-        # work arrays
-        self._rwork, self._iwork = self._set_work_arrays(
-            rwork=rwork, iwork=iwork)
-
-        # indices of grid points that will be remeshed:
-        # - at most the resolution in remeshed direction
-        #  --> update during call,
-        # (for example, it may depend on the number of particles)
-        # - all grid points except ghosts in other directions
-        ic = self.topo_source.mesh.compute_index
-        # grid indices in 'source' grid
-        self._icoords = np.ix_(
-            *(np.arange(ic[i].start, ic[i].stop)
-              for i in xrange(topo_source.domain.dim)))
-        self._inv_dx = 1. / self.topo_source.mesh.space_step
-
-        #source_res = topo_source.mesh.local_resolution
-        #target_res = topo_target.mesh.local_resolution
-        #msg = 'Interpolation error : only implemented in 3D.'
-        #assert topo_source.domain.dim == 3, msg
-        #assert topo_target.domain.dim == 3, msg
-        #if (source_res == target_res).all():
-        #    msg = 'Interpolation error : the two grids have the same '
-        #    msg += 'resolution, interpolation is useless.'
-        #shape_cfc = (source_res[0], target_res[2], source_res[1])
-        #shape_ccf = (source_res[0], source_res[1], target_res[2])
-        #shape_ccc = (source_res[0], source_res[2], source_res[1])
-        #shape_ccf2 = (source_res[0], source_res[2], target_res[2])
-        #self._rwork = [npw.zeros(shape_cfc), npw.zeros(shape_ccf),
-        #               npw.zeros(shape_ccc), npw.zeros(shape_ccf2)]
-        self.hsource = topo_source.mesh.space_step
-        self.htarget = topo_target.mesh.space_step
-        self.topo_target.build_subcomms()
-        self.sub_comms = self.topo_target.sub_comms
-        self.neighbours = self.topo_target.sub_neighbours
-        if finterpol is not None:
-            finterpol.initialize_interpolation_kernel('L4_4')
-
-        # indices of points on which interpolation occurs.
-        # This corresponds to the mesh of topo_target
-        # but is updated at each call, depending on the
-        # size of 'positions' argument and of the direction of interpolation.
-        self._target_indices = list(self.topo_target.mesh.compute_index)
-
-    def reset_target_indices(self, new_indices):
-        """Reset list of indices of points on which interpolation occurs.
-        Only useful when 'positions' argument shape is not equal
-        to topo_target.mesh shape, e.g. when a threshold is used
-        to initialize particles.
-
-        Parameters
-        ----------
-        new_indices : a list of slices
-            a slice per direction to define the range of
-            indices of interest. Like topo.mesh.compute_index attribute.
-        """
-        self._target_indices = list(new_indices)
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-        """Check and/or allocate internal work buffers.
-
-        Parameters
-        ----------
-        rwork, iwork : list of numpy arrays
-            internal work spaces for real (rwork) and integer (iwork) data.
-
-        """
-        wk_prop = self.get_work_properties(self.topo_target)
-        noghosts_shape = (np.prod(self.topo_target.mesh.compute_resolution),)
-        if wk_prop['rwork'] is None:
-            result_r = []
-        else:
-            result_r = WorkSpaceTools.check_work_array(len(wk_prop['rwork']),
-                                                       noghosts_shape, rwork)
-        if wk_prop['iwork'] is None:
-            result_i = []
-        else:
-            result_i = WorkSpaceTools.check_work_array(len(wk_prop['iwork']),
-                                                       noghosts_shape, iwork,
-                                                       data_type=HYSOP_INTEGER)
-        return result_r, result_i
-
-    @staticmethod
-    def get_work_properties(topo):
-        """Get properties of internal work arrays.
-
-        Returns
-        -------
-        dictionnary
-           keys = 'rwork' and 'iwork', values = list of shapes of internal
-           arrays required by this method (real arrays for rwork, integer
-           arrays for iwork).
-
-        Parameters
-        ----------
-        topo : :class:`hysop.topology.topology.CartesianTopology`
-            topology on which the method will be applied.
-        """
-        # default is None, None.
-        # This function must be reimplemented in derived classes
-        # (but we cannot combine abstract and static in python2.7 ...)
-        return {'rwork': None, 'iwork': None}
-
-    def _assert_target_in_source(self, target_coords):
-        """Check if target grid is strictly included
-        into source grid
-
-        This is required to be sure that 'left' and 'right'
-        points exist for each point on target grid.
-        This obviously depends on the interpolation order.
-        """
-        source_coords = self.topo_source.mesh.coords[self.direction]
-        assert np.min(target_coords) >= source_coords.flat[0]
-        assert np.max(target_coords) < source_coords.flat[-1]
-        return True
-
-    def _compute_iy(self, positions, work):
-        """Compute distance between points to be interpolated and their
-        closest point on the left.
-
-        Parameters
-        ----------
-        positions : numpy array
-            points to be interpolated. positions shape == topo.mesh.local_resolution
-            except in direction of interpolation where any number of points
-            is allowed, with a max of points equal to the number of grid
-            points in this direction (excluding ghosts).
-        work : numpy array.
-            Used as local buffer. work must be of the same shape
-            as positions and contains iy in return,
-            i_y = (xp - xg)/dx
-
-        Returns
-        -------
-        left_points_indices : list of numpy arrays or slices
-            indices in source grid of neighbours (left) points for
-            each point in positions.
-
-        Usage : field[left_points_indices] will return the array of
-        'left' points (in current direction)
-        while field[target_indices] = current points
-
-        Remark
-        ------
-        No memory allocation in this function --> everything is taken
-        from rwork or iwork
-        """
-        # local origin and space step in current direction
-        x0 = self.topo_source.mesh.origin[self.direction]
-        #i_y = work
-        # memory optim: check if input work 'belongs' to rwork
-        #assert npw.arrays_share_data(i_y, work)
-        # -- set indices of closest grid points, on the left --
-        # Initialized with source grid coordinates
-        left_points_indices = list(self._icoords)
-        # = [self._icoords[i] for i in xrange(dimension)]
-        # pick memory from self._iwork
-        iwork_dir = WorkSpaceTools.check_work_array(
-            1, work.shape, self._iwork, data_type=HYSOP_INTEGER)
-        # we need an array (filled later) for current direction
-        left_points_indices[self.direction] = iwork_dir[0]
-        # check memory consumption
-        assert npw.arrays_share_data(left_points_indices[self.direction],
-                                     self._iwork[0])
-        # -- compute distance between particles positions and
-        # closest grid point, on the left --
-        work[...] = (positions[self._target_indices] - x0) *\
-            self._inv_dx[self.direction]
-        #self._assert_particles_roundoff(work, self._target_indices)
-        # update interpolation point indices (in self._iwork)
-        left_points_indices[self.direction][...] = \
-            npw.asintegerarray(np.floor(work[...]))
-        work[...] -= left_points_indices[self.direction][...]
-        nmax = self.topo_source.mesh.local_resolution[self.direction]
-        # Ensure all points belongs to local domain
-        # (including ghosts)
-        assert left_points_indices[self.direction].max() < nmax
-        assert left_points_indices[self.direction].min() >= 0
-        return left_points_indices
-
-    @staticmethod
-    def _assert_particles_roundoff(work, ic):
-        """trick to avoid case where particle is very close to
-        grid point and 'floor' result in wrong grid
-        point because of numerical eps.
-        For example, if xp - xi == -1e-15, floor(xp)=xi-1
-        which is not what is expected.
-
-        Done in 'assertion' --> only in debug mode.
-        """
-        work[1][ic] = np.ceil(work[0][ic])
-        ilist = np.where(work[1][ic] - work[0][ic] < 10. * EPS)
-        work[0][ic][ilist] = work[1][ic][ilist]
-        return True
-
-    def _xy(self, vin, vout):
-        """Interpolate a field along X and Y-axis
-
-        Parameters
-        ----------
-        vin : list of numpy arrays
-            field on the source grid
-        vout : list of numpy arrays
-            field on the target/source grid
-        Notes
-        -----
-        * vout and vin must have the same resolution along
-          the first direction.
-        """
-        # Permutation to prepare first interpolation
-        for i in xrange(3):
-            self._rwork[2][:, i, :] = vin[:, :, i]
-        # Interpolation along last dir = Y
-        # in : vpermut(nc_x, nc_z, nc_y)
-        # out : vm(nc_x, nf_y, nc_z)
-        finterpol.interpol_lastdir_permut(self._rwork[2], self.hsource[:2],
-                                          self._rwork[3], self.htarget[:2],
-                                          self.neighbours[:, YDIR],
-                                          self.sub_comms[YDIR])
-        # Interpolation along X
-        # in :  vm(nc_x, nf_y, nc_z)
-        # out : vout(nf_x, nf_y, nc_z)
-        finterpol.interpol_firstdir_no_com(self._rwork[3],
-                                           self.hsource[:2],
-                                           vout, self.htarget[:2])
-
-    def _yz(self, vin, vout):
-        """Interpolate a field along Y and Z-axis
-
-        Parameters
-        ----------
-        vin : list of numpy arrays
-            field on the source grid
-        vout : list of numpy arrays
-            field on the target/source grid
-        Notes
-        -----
-        * vout and vin must have the same resolution along
-          the first direction.
-        """
-        # Interpolation along Z + permutation between Y and Z
-        # in : vin(nc_x, nc_y, nc_z)
-        # out : vtmp(nc_x, nf_z, nc_y)
-        finterpol.interpol_lastdir_permut(vin, self.hsource[1:],
-                                          self._rwork[0], self.htarget[1:],
-                                          self.neighbours[:, ZDIR],
-                                          self.sub_comms[ZDIR])
-        # Interpolation along Y
-        # (=third direction thanks to previous permutation)
-        # + permutation between Y and Z
-        # in : vtmp(nc_x, nf_z, nc_y)
-        # out : vout(nc_x, nf_y, nf_z)
-        finterpol.interpol_lastdir_permut(self._rwork[0], self.hsource[XDIR],
-                                          vout, self.htarget[XDIR],
-                                          self.neighbours[YDIR, :],
-                                          self.sub_comms[YDIR])
-
-    def _xz_permut(self, vin, vout):
-        """Interpolate a field along X and Z-axis, result order = Y,X,Z
-
-        Parameters
-        ----------
-        vin : list of numpy arrays
-            field on the source grid
-        vout : list of numpy arrays
-            field on the target grid
-
-        Notes
-        -----
-        * vout and vin must have the same resolution along
-          the first direction.
-        """
-        # Interpolation along Z
-        # Interpolation along Z
-        # in : vin(nc_x, nc_y, nc_z)
-        # out : vtmp(nc_x, nc_y, nf_z)
-        finterpol.interpol_lastdir(vin, self.hsource[::2],
-                                   self._rwork[1], self.htarget[::2],
-                                   self.neighbours[:, ZDIR],
-                                   self.sub_comms[ZDIR])
-        # Interpolation along X
-        # in : vtmp(nc_x, nc_y, nf_z)
-        # out : vout(nc_y, nf_x, nf_z)
-        finterpol.interpol_firstdir_permut_no_com(self._rwork[1],
-                                                  self.hsource[::2],
-                                                  vout, self.htarget[::2])
-
-    def _xy_permut(self, vin, vout):
-        """Interpolate a field along X and Y-axis, result order = Z,X,Y
-
-        Parameters
-        ----------
-        vin : list of numpy arrays
-            field on the source grid
-        vout : list of numpy arrays
-            field on the target grid
-
-        Notes
-        -----
-        * vout and vin must have the same resolution along
-          the first direction.
-        """
-        # Permutation to prepare first interpolation
-        for i in xrange(3):
-            self._rwork[2][:, i, :] = vin[:, :, i]
-        # Interpolation along last dir = Y
-        # in : vpermut(nc_x, nc_z, nc_y)
-        # out : vm(nc_x, nc_z, nf_y)
-        finterpol.interpol_lastdir(self._rwork[2], self.hsource[:2],
-                                   self._rwork[3], self.htarget[:2],
-                                   self.neighbours[:, YDIR],
-                                   self.sub_comms[YDIR])
-        # Interpolation along X
-        # in :  vm(nc_x, nc_z, nf_y)
-        # out : vout(nc_z, nf_x, nf_y)
-        finterpol.interpol_firstdir_permut_no_com(self._rwork[3],
-                                                  self.hsource[:2],
-                                                  vout, self.htarget[:2])
-
-    def _2d_3d_vect(self, vin, vout):
-        """Interpolate each component of a vector along a transverse direction
-        """
-        # For Vx, interpolation along Y and Z
-        self._yz(vin[XDIR], vtmp[XDIR])
-        # For Vy, interpolation along Z (with communications)
-        # then along X (no communication required)
-        self._xz_permut(vin[YDIR], vtmp[YDIR])
-        # For Vz, interpolation along Y (with communications)
-        # then along X (no communication required)
-        self._xy_permut(vin[ZDIR], vtmp[ZDIR])
-
-        finterpol.interpol_firstdir_no_com(vtmp[XDIR], self.hsource[XDIR],
-                                           vout[XDIR], self.htarget[XDIR])
-        finterpol.interpol_firstdir(vtmp[YDIR], self.hsource[YDIR],
-                                    vout[YDIR], self.htarget[YDIR],
-                                    self.neighbours[:, YDIR],
-                                    self.sub_comms[YDIR])
-        finterpol.interpol_firstdir(vtmp[ZDIR], self.hsource[ZDIR],
-                                    vout[ZDIR], self.htarget[ZDIR],
-                                    self.neighbours[:, ZDIR],
-                                    self.sub_comms[ZDIR])
-
-    def _3d(self, vin, vout):
-        """3D interpolation of a field to a targetr grid - no transpositions.
-        """
-        # rwork = vtmp(nf_x, nc_y, nc_z)
-        finterpol.interpol_firstdir_no_com(vin, self.hsource[XDIR],
-                                           self._rwork[0], self.htarget[XDIR])
-        self._yz(self._rwork[0], vout)
-
-
-class Linear(Interpolation):
-    """Linear interpolation of a field"""
-
-    ghosts_layer_size = 1
-
-    def __init__(self, interpolated_field, **kwds):
-        """
-        Parameters
-        ----------
-        interpolated_field : numpy array
-            data to be interpolated
-        kwds : base class parameters
-
-        Notes
-        -----
-        * topo argument corresponds to the topology of field interpolated_field
-        * it would be probably better to pass interpolated_field
-          during __call__ but interpolation is used as rhs in odesolvers and
-          call function must fit with rhs arg list.
-
-        """
-        self.interpolated_field = interpolated_field
-        super(Linear, self).__init__(**kwds)
-
-    @staticmethod
-    def get_work_properties(topo):
-        """Get properties of internal work arrays.
-
-        Returns
-        -------
-        dictionnary
-           keys = 'rwork' and 'iwork', values = list of shapes of internal
-           arrays required by this method (real arrays for rwork, integer
-           arrays for iwork).
-
-        Parameters
-        ----------
-        topo : :class:`hysop.topology.topology.CartesianTopology`
-            topology on which the method will be applied.
-        """
-        noghosts_shape = (np.prod(topo.mesh.compute_resolution),)
-        return {'rwork': [noghosts_shape, ], 'iwork': [noghosts_shape, ]}
-
-    def __call__(self, t, positions, result):
-        """interpolate values at nodes positions using linear interpolation.
-
-        Parameters
-        ----------
-        t : double
-            current time.
-        positions : list of numpy arrays
-            coordinates of points where to interpolate
-        result : list of numpy arrays
-
-        Returns
-        -------
-        result
-
-        Notes
-        -----
-        * since interpolation function is likely to be used
-        as right-hand side of an ode solver, the list of arguments must fit
-        with the integrator requirements, which explains the 't' argument
-        unused below but required, or the positions and result as lists.
-        """
-        assert isinstance(positions, list)
-        assert isinstance(result, list)
-        assert result[0].shape == positions[0].shape
-        assert (self.interpolated_field.shape ==
-                self.topo_source.mesh.local_resolution).all()
-        #assert self._assert_target_in_source(positions)
-        # linear interpolation:
-        # input = interpolated_field (field values at points xi),
-        #         pos, a vector of p points
-        # output = res(pos), interpolation of interpolated_field at points pos
-        # intf =interpolated_field
-        # res(pos_p) = (pos_p - xi)/h . intf_i+1 + (x_i+1 - pos_p)/h . intf_i
-        #          = iy.intf_i+1 + (1 - iy).intf_i
-        # where iy = (pos_p - x0)/h - i, i = floor((pos_p - x0)/h)
-
-        # number of points to interpolate
-        nb_points = positions[0].shape[self.direction]
-        # indices of points where we need interpolation
-        self._target_indices[self.direction] = slice(0, nb_points)
-        # pick buffer memory from self._rwork
-        # wk[0] shares memory with rwork[0] but data are rearranged
-        # according to positions[0].shape.
-        print nb_points, self.direction
-        wk = WorkSpaceTools.check_work_array(
-            1, positions[0][self._target_indices].shape, self._rwork)
-        # memory optim: check if input work 'belongs' to rwork
-        assert npw.arrays_share_data(self._rwork[0], wk[0])
-        left_points_indices = self._compute_iy(positions[0], wk[0])
-        assert npw.arrays_share_data(left_points_indices[self.direction],
-                                     self._iwork[0])
-        print self.interpolated_field[left_points_indices].shape
-        #refsize = positions[0].size
-        #refshape = positions[0].shape
-        #wk.append(result[0][])
-        #res = WorkSpaceTools.check_work_array(1, refshape, result)
-        result[0][self._target_indices] = \
-            self.interpolated_field[left_points_indices] * (1. - wk[0][...])
-        # compute 'right points' indices
-        left_points_indices[self.direction] += 1
-
-        result[0][self._target_indices] += \
-            self.interpolated_field[left_points_indices] * wk[0][...]
-        return result
diff --git a/hysop/old/numerics.old/method.py b/hysop/old/numerics.old/method.py
deleted file mode 100644
index 7125c1a081d4fe2efe4abbebd39ca01d65f69675..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/method.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
-@file method.py
-
-Abstract interface to numerical methods.
-"""
-from abc import ABCMeta, abstractmethod
-from hysop.constants import debug
-
-class NumericalMethod(object):
-    """ Abstract description of numerical method. """
-
-    __metaclass__ = ABCMeta
-
-    @debug
-    def __new__(cls, *args, **kw):
-        return object.__new__(cls, *args, **kw)
-
-    @staticmethod
-    def get_work_lengths(nb_components=None, domain_dim=None):
-        """
-        Compute the number of required work arrays for this method.
-        @param nb_components : number of components of the
-        @param domain_dim : dimension of the domain
-        fields on which this method operates.
-        @return length of list of work arrays of reals.
-        @return length of list of work arrays of int.
-        """
-        return 0, 0
-
-    @debug
-    @abstractmethod
-    def __call__(self):
-        """Call the method"""
diff --git a/hysop/old/numerics.old/odesolvers.py b/hysop/old/numerics.old/odesolvers.py
deleted file mode 100755
index 79b1a9f4264d70c00c307e95ecb2ad4d9e3f4ed7..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/odesolvers.py
+++ /dev/null
@@ -1,544 +0,0 @@
-"""ODE integrators
-
-.. currentmodule hysop.numerics
-
-* :class:`~odesolvers.Euler`,
-* :class:`~odesolvers.RK2`,
-* :class:`~odesolvers.RK3`,
-* :class:`~odesolvers.RK4`,
-* :class:`~odesolvers.ODESolver` (abstract base class).
-
-See :ref:`odesolvers`
-
-"""
-
-from abc import ABCMeta, abstractmethod
-from hysop.constants import WITH_GUESS
-from hysop.numerics.update_ghosts import UpdateGhosts
-from hysop.tools.misc import WorkSpaceTools
-from hysop.numerics.odesolvers.runge_kutta import ExplicitRungeKutta
-
-from hysop.tools.numpywrappers import npw
-import numpy as np
-
-
-class ODESolver(object):
-    """Abstract base class for ODE solvers.
-
-    To solve:
-
-    dy(t)/dt = f(t,y)
-
-    """
-    __metaclass__ = ABCMeta
-
-    def __init__(self, nb_components, topo, f=None, indices=None,
-                 rwork=None, optim=None, wk_shape=None, need_synchro=False):
-        """
-        Parameters
-        ----------
-        nb_components : int
-            number of components of the right-hand side.
-        topo : :class:`~hysop.topology.topology.CartesianTopology`
-            mpi process and data distribution
-        f : python function, optional.
-            right hand side of the equation to solve.
-            If None, f = 0, else f must have the following signature:
-            res = f(t, y, f_work)
-            - y, work and res must be lists of numpy arrays
-        indices : list of slices, optional
-            Represents the local mesh on which the operation
-            will be applied,
-            like compute_index in :class:`~hysop.domain.mesh.Mesh`.
-            See details in notes.
-        reduce_output_shape : boolean, optional
-            True to return the result in a reduced array. See notes below.
-        rwork : list of numpy arrays, optional
-            temporary work space.
-        optim : int, optional
-            the level of optimization (memory management).
-            Default = None.
-        wk_shape: tuple, optional
-            shape of the internal work vector. Default = input topo
-            resolution.
-        need_synchro: boolean, optional
-            Set to true if y parameter of the rhs needs an update
-            of its ghost points before each call. This might be
-            the case if rhs is an interpolation. Default=False
-
-        Notes
-        -----
-
-        about optim parameter - Two levels are available:
-        * default 'basic' level : at each, the rhs is evaluated and then the
-        system is solved. This is the default behavior
-        * optim = WITH_GUESS : in that case, work must contain a first
-        evaluation of f(t,y) before each call.
-        * wk_shape might be usefull when the advected field shape
-        in advection dir is lower than topo shape (for instance when
-        pushing particles with a threshhold parameter).
-
-        """
-        # RHS.
-        self.f = f
-        if f is None:
-            self.f = lambda t, y, work: [npw.zeros_like(y[i])
-                                         for i in xrange(nb_components)]
-        if optim is None:
-            self._fcall = self._basic
-        elif optim is WITH_GUESS:
-            self._fcall = self._core
-
-        self._nb_components = nb_components
-
-        if indices is None:
-            indices = topo.mesh.compute_index
-        self.in_indices = indices
-        # True if work has to be done for this set
-        # of indices on the current proc.
-        #self._on_proc = Utils.is_on_proc(indices)
-        #self.output_indices = None
-
-        # work arrays
-        self.rwork = self._set_work_arrays(topo, wk_shape, rwork=rwork)
-        if need_synchro:
-            self._synchronize = UpdateGhosts(topo, self._nb_components)
-            self.compute_rhs = self._update_rhs_with_synchro
-        else:
-            self.compute_rhs = self.f
-
-    def _set_work_arrays(self, topo, wk_shape=False, rwork=None):
-        """Check and allocate internal work buffers.
-        """
-        wk_prop = self.get_work_properties(self._nb_components, topo)['rwork']
-        if wk_prop is None:
-            return []
-        if wk_shape:
-            subsize = wk_shape
-        else:
-            subsize = topo.mesh.local_resolution
-
-        return WorkSpaceTools.check_work_array(len(wk_prop), subsize, rwork)
-
-    @staticmethod
-    def get_work_properties(nb_components, topo):
-        """Returns the number of required internal work arrays for this method.
-
-        Parameters
-        ----------
-        nb_components: integer,
-            number of components of the right-hand-side
-        topo : :class:`hysop.topology.topology.CartesianTopology`
-            topology on which the integrator will be applied.
-        """
-
-    # def update_rhs_local(self, t, y, result):
-    #     """Return rhs value, no synchronization between mpi domains.
-    #     """
-    #     return self.f(t, y, result)
-
-    def _update_rhs_with_synchro(self, t, y, result):
-        """"Return rhs value, but first
-        synchronize y values between mpi domains
-        (i.e. ghost points update)
-        """
-        self._synchronize(y)
-        return self.f(t, y, result)
-
-    def __call__(self, t, y, dt, result):
-        """Apply integrator
-
-        Parameters
-        ----------
-        t : double
-            current time.
-        y : list of numpy arrays
-            vector field at time t.
-        dt : double
-            time step
-        result : list of numpy arrays
-            vector field at t + dt
-        Returns
-        -------
-        result
-
-        """
-        return self._fcall(t, y, dt, result)
-
-    def _basic(self, t, y, dt, result):
-        """Apply integrator, with basic (i.e. no) memory
-        optimisation.
-
-        Parameters
-        ----------
-        t : double
-            current time.
-        y : list of numpy arrays
-            vector field at time t.
-        dt : double
-            time step
-        result : list of numpy arrays
-            vector field at t + dt
-        Returns
-        -------
-        result
-
-        self.rwork is used as temp. array to compute the rhs.
-        result provided as input arg.
-        result may be equal to y.
-        """
-        self.rwork[:self._nb_components] = \
-            self.compute_rhs(t, y, self.rwork[:self._nb_components])
-        return self._core(t, y, dt, result)
-
-    @abstractmethod
-    def _core(self, t, y, dt, result):
-        """internal function to integrate the system.
-
-        Must not be called directly. All work, arrays ... must be properly
-        allocated before any called to this function.
-
-        Parameters
-        ----------
-        t : double
-            current time.
-        y : list of numpy arrays
-            vector field at time t.
-        dt : double
-            time step
-        result : list of numpy arrays
-            vector field at t + dt
-        Returns
-        -------
-        result
-
-        """
-
-
-class Euler(ODESolver):
-    """Solve ode with forward Euler Method.
-
-    """
-
-    @staticmethod
-    def get_work_properties(nb_components, topo):
-        """Returns the number of required internal work arrays for this method.
-
-        Parameters
-        ----------
-        nb_components: integer,
-            number of components of the right-hand-side
-        topo : :class:`hysop.topology.topology.CartesianTopology`
-            topology on which the integrator will be applied.
-
-        """
-        shape = (np.prod(topo.mesh.local_resolution),)
-        return {'rwork': [shape, ] * nb_components, 'iwork': None}
-
-    def _core(self, t, y, dt, result):
-        """Euler method computational core
-
-        Must not be called directly. All work, arrays ... must be properly
-        allocated before any called to this function.
-
-        Parameters
-        ----------
-        t : double
-            current time.
-        y : list of numpy arrays
-            vector field at time t.
-        dt : double
-            time step
-        result : list of numpy arrays
-            vector field at t + dt
-        Returns
-        -------
-        result
-
-        Highest level of optimization : result and self.rwork
-        must be provided and work must contain a first evaluation of
-        f(t,y)
-        optim = WITH_GUESS
-        """
-        assert len(result) == self._nb_components
-        # result = f(t, y)
-        # result = y + dt * work0
-
-        # result = y + work0
-        for i in xrange(self._nb_components):
-            np.add(y[i], self.rwork[i] * dt, result[i])
-        return result
-
-    @staticmethod
-    def stability_coeff():
-        """Returns a stability coefficient (used in stretching)
-        """
-        return 2.0
-
-
-class RK2(ODESolver):
-    """Solve ode with forward Runge-Kutta 2 method.
-    """
-
-    @staticmethod
-    def get_work_properties(nb_components, topo):
-        """Returns the number of required internal work arrays for this method.
-
-        Parameters
-        ----------
-        nb_components: integer,
-            number of components of the right-hand-side
-        topo : :class:`hysop.topology.topology.CartesianTopology`
-            topology on which the integrator will be applied.
-
-        """
-        shape = (np.prod(topo.mesh.local_resolution), )
-        return {'rwork': [shape, ] * 2 * nb_components, 'iwork': None}
-
-    def _core(self, t, y, dt, result):
-        """RK2 computational core
-
-        Must not be called directly. All work, arrays ... must be properly
-        allocated before any called to this function.
-
-        Parameters
-        ----------
-        t : double
-            current time.
-        y : list of numpy arrays
-            vector field at time t.
-        dt : double
-            time step
-        result : list of numpy arrays
-            vector field at t + dt
-        Returns
-        -------
-        result
-
-        self.rwork[:nb_components] must contain a first evaluation of
-        f(t, y)
-        optim = WITH_GUESS
-
-        Note : since result may be equal to y, it can not
-        be used as a temporary workspace.
-        """
-        i_y = self.in_indices
-        for i in xrange(self._nb_components):
-            for j in xrange(len(self.rwork)):
-                assert not npw.arrays_share_data(y[i], self.rwork[j])
-        assert len(result) == self._nb_components
-        work0 = self.rwork[:self._nb_components]
-        yn = self.rwork[self._nb_components:2 * self._nb_components]
-
-        # k1 = f(t,y) = work0
-        # k2 = f(t + dt/2, y + dt/2 * k1)
-        # result = y + dt * k2
-
-        # yn
-        #print ic, y[0].shape
-        for i in xrange(self._nb_components):
-            np.add(y[i][i_y], work0[i] * 0.5 * dt, yn[i])
-            #np.add(y[i][ic], work0[i] * 0.5 * dt, yn[i])
-
-        # k2 in work0
-        work0 = self.compute_rhs(t + 0.5 * dt, yn, work0)
-        # *= dt
-        for i in xrange(self._nb_components):
-            #np.multiply(work0[i], dt, work0[i])
-            work0[i] *= dt
-            # result = y + work0
-            np.add(work0[i], y[i][i_y], result[i][i_y])
-            #np.add(work0[i], y[i][ic], result[i])
-
-        return result
-
-    @staticmethod
-    def stability_coeff():
-        """Returns a stability coefficient (used in stretching)
-        """
-        return 2.0
-
-
-class RK3(ODESolver):
-    """ODESolver implementation for solving an equation system with RK3 method.
-    """
-
-    @staticmethod
-    def get_work_properties(nb_components, topo):
-        """Returns the number of required internal work arrays for this method.
-
-        Parameters
-        ----------
-        nb_components: integer,
-            number of components of the right-hand-side
-        topo : :class:`hysop.topology.topology.CartesianTopology`
-            topology on which the integrator will be applied.
-
-        """
-        shape = (np.prod(topo.mesh.local_resolution),)
-        return {'rwork': [shape, ] * 3 * nb_components, 'iwork': None}
-
-    def _core(self, t, y, dt, result):
-        """RK3 computational core
-
-        Must not be called directly. All work, arrays ... must be properly
-        allocated before any called to this function.
-
-        Parameters
-        ----------
-        t : double
-            current time.
-        y : list of numpy arrays
-            vector field at time t.
-        dt : double
-            time step
-        result : list of numpy arrays
-            vector field at t + dt
-        Returns
-        -------
-        result
-
-        Highest level of optimization : work and result
-        must be provided and
-        work[:nb_components] must contain a first evaluation of
-        f(t,y,f_work)
-        optim = WITH_GUESS
-
-        Note : since result may be equal to y, it can not
-        be used as a temporary workspace.
-        """
-        for i in xrange(self._nb_components):
-            cond = [y[i] is self.rwork[j] for j in xrange(len(self.rwork))]
-            assert cond.count(True) is 0
-        assert len(result) == self._nb_components
-
-        work0 = self.rwork[:self._nb_components]
-        yn = self.rwork[self._nb_components:2 * self._nb_components]
-        kn = self.rwork[2 * self._nb_components:3 * self._nb_components]
-
-        # k1 = f(t,y) = work0
-        # k2 = f(t+dt/3, y + dt/3 *k1))
-        # k3 = f(t + 2/3 *dt , y + 2/3 dt * k2))
-        # result = y + 0.25 * dt * (k1 + 3 * k3)
-        # yn
-        for i in xrange(self._nb_components):
-            np.add(y[i], work0[i] * dt / 3, yn[i])
-
-        # k2 in kn
-        kn = self.compute_rhs(t + dt / 3, yn, kn)
-        # yn
-        for i in xrange(self._nb_components):
-            np.add(y[i], 2 * dt / 3 * kn[i], yn[i])
-
-        # k3 in kn
-        kn = self.compute_rhs(t + 2 * dt / 3, yn, kn)
-        # k1 + 3 * k3 in work0
-        for i in xrange(self._nb_components):
-            np.add(work0[i], 3 * kn[i], work0[i])
-            # *= dt / 4
-            work0[i] *= 0.25 * dt
-            # result = y + work0
-            np.add(work0[i], y[i], result[i])
-
-        return result
-
-    @staticmethod
-    def stability_coeff():
-        """Returns a stability coefficient (used in stretching)
-        """
-        return 2.5127
-
-
-class RK4(ODESolver):
-    """
-    ODESolver implementation for solving an equation system with RK4 method.
-    """
-    @staticmethod
-    def get_work_properties(nb_components, topo):
-        """Returns the number of required internal work arrays for this method.
-
-        nb_components: integer,
-            number of components of the right-hand-side
-        topo : :class:`hysop.topology.topology.CartesianTopology`
-            topology on which the integrator will be applied.
-        """
-        shape = (np.prod(topo.mesh.local_resolution),)
-        return {'rwork': [shape, ] * 3 * nb_components, 'iwork': None}
-
-    _one_over_six = 1. / 6.
-
-    def _core(self, t, y, dt, result):
-        """RK4 computational core
-
-        Must not be called directly. All work, arrays ... must be properly
-        allocated before any called to this function.
-
-        Parameters
-        ----------
-        t : double
-            current time.
-        y : list of numpy arrays
-            vector field at time t.
-        dt : double
-            time step
-        result : list of numpy arrays
-            vector field at t + dt
-        Returns
-        -------
-        result
-
-        """
-        for i in xrange(self._nb_components):
-            cond = [y[i] is self.rwork[j] for j in xrange(len(self.rwork))]
-            assert cond.count(True) is 0
-        assert len(result) == self._nb_components
-
-        work0 = self.rwork[:self._nb_components]
-        yn = self.rwork[self._nb_components:2 * self._nb_components]
-        kn = self.rwork[2 * self._nb_components:3 * self._nb_components]
-
-        # k1 = f(t,y)
-        # k2 = f(t + dt/2, y + dt/2 * k1)
-        # k3 = f(t + dt/2, y + dt/2 * k2)
-        # k4 = f(t + dt, y + dt * k3)
-        # result = y + dt/6( k1 + 2 * k2 + 2 * k3 + k4)
-
-        # yn
-        for i in xrange(self._nb_components):
-            np.add(y[i], work0[i] * dt / 2, yn[i])
-
-        # k2 in kn
-        kn = self.compute_rhs(t + dt / 2, yn, kn)
-
-        # k1 + 2 * k2 in work0
-        for i in xrange(self._nb_components):
-            np.add(work0[i], 2 * kn[i], work0[i])
-            # yn
-            np.add(y[i], dt / 2 * kn[i], yn[i])
-
-        # k3 in kn
-        kn = self.compute_rhs(t + dt / 2, yn, kn)
-
-        # k1 + 2 * k2 + 2 * k3 in work0
-        for i in xrange(self._nb_components):
-            np.add(work0[i], 2 * kn[i], work0[i])
-            # yn
-            np.add(y[i], dt * kn[i], yn[i])
-
-        # K4 in kn
-        kn = self.compute_rhs(t + dt, yn, kn)
-
-        # k1 + 2 * k2 + 2 * k3 + k4
-        for i in xrange(self._nb_components):
-            np.add(work0[i], kn[i], work0[i])
-            work0[i] *= self._one_over_six * dt
-            # result = y + work0
-            np.add(work0[i], y[i], result[i])
-
-        return result
-
-    @staticmethod
-    def stability_coeff():
-        """Returns a stability coefficient (used in stretching)
-        """
-        return 2.7853
diff --git a/hysop/old/numerics.old/remeshing.py b/hysop/old/numerics.old/remeshing.py
deleted file mode 100644
index 6bc4888678b20e7635131a73654b76fc3ef58c20..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/remeshing.py
+++ /dev/null
@@ -1,664 +0,0 @@
-"""Remeshing methods
-
-.. currentmodule:: hysop.numerics.remeshing
-
-* :class:`~Remeshing` : remeshing method
-
-Available formulas are :
-* :class:`~Linear`
-* :class:`~L2_1`
-* :class:`~L2_2`
-* :class:`~L2_3`
-* :class:`~L2_4`
-* :class:`~L4_2`
-* :class:`~L4_3`
-* :class:`~L4_4`
-* :class:`~M8Prime`
-* :class:`~L6_3`
-* :class:`~L6_4`
-* :class:`~L6_5`
-* :class:`~L6_6`
-* :class:`~L8_4`
-* :class:`RemeshFormula`, abstract base class for formulas
-
-"""
-import numpy as np
-from hysop.tools.numpywrappers import npw
-from hysop.tools.misc import WorkSpaceTools
-from hysop.numerics.interpolation.interpolation import Interpolation
-from hysop.core.mpi import Wtime
-
-
-class Remeshing(Interpolation):
-    """Remeshing schemes"""
-
-    def __init__(self, kernel, **kwds):
-        """Create a remeshing numeric method based on given formula.
-
-        Parameters
-        -----------
-        kernel : :class:`~hysop.numerics.remeshing.RemeshFormula`
-            remeshing formula.
-        kwds : base class parameters
-
-        Notes
-        -----
-        * topo arg must correspond to the topology of the remeshed field
-        i.e. topology of the targeted grid.
-
-        Availables formulas :
-          - 'l2_1' : Labmda2,1 : (=M'4) 4 point formula, C1 regularity
-          - 'l2_2' : Labmda2,2 : 4 point formula, C2 regularity
-          - 'l4_2' : Labmda4,2 : (=M'6) 6 point formula, C2 regularity
-          - 'l4_3' : Labmda4,3 : 6 point formula, C3 regularity
-          - 'l4_4' : Labmda4,4 : 6 point formula, C4 regularity
-          - 'l6_3' : Labmda6,3 : 8 point formula, C3 regularity
-          - 'l6_4' : Labmda6,4 : 8 point formula, C4 regularity
-          - 'l6_5' : Labmda6,5 : 8 point formula, C5 regularity
-          - 'l6_6' : Labmda6,6 : 8 point formula, C6 regularity
-          - 'l8_4' : Labmda8,4 : 10 point formula, C4 regularity
-          - 'm8prime' : M8prime formula
-        """
-        self._kernel = kernel()
-        self.ghosts_layer_size = self._kernel.ghosts_layer_size
-        super(Remeshing, self).__init__(**kwds)
-        self.shift = self._kernel.shift
-        self.weights = self._kernel.weights
-
-    @staticmethod
-    def get_work_properties(topo):
-        """Get properties of internal work arrays.
-
-        Returns
-        -------
-        dictionnary
-           keys = 'rwork' and 'iwork', values = list of shapes of internal
-           arrays required by this method (real arrays for rwork, integer
-           arrays for iwork).
-
-        Parameters
-        ----------
-        topo : :class:`hysop.topology.topology.CartesianTopology`
-            topology on which the method will be applied.
-        """
-        noghosts_shape = (np.prod(topo.mesh.compute_resolution),)
-        return {'rwork': [noghosts_shape, ] * 2, 'iwork': [noghosts_shape, ]}
-
-    def __call__(self, ppos, pscal, result):
-        """Apply remeshing scheme : remesh particles at
-        position p_pos with scalar p_scal along
-        direction d.
-
-        Parameters
-        ----------
-        ppos : numpy array
-            particle positions
-        pscal : list of numpy arrays
-             field(s) value on particles
-        result : list of numpy arrays
-
-        Returns
-        -------
-        result, the scalar field(s) remeshed on the grid.
-        """
-        assert np.asarray([pscal[i].shape == ppos.shape
-                           for i in xrange(len(pscal))]).all()
-        assert len(result) == len(pscal)
-        assert np.asarray([result[i].shape ==
-                           tuple(self.topo_target.mesh.local_resolution)
-                           for i in xrange(len(result))]).all()
-        # number of particles
-        nb_points = ppos.shape[self.direction]
-        self._target_indices[self.direction] = slice(0, nb_points)
-        # pick buffer memory from self._rwork
-        # wk[0] --> iy, wk[1] --> used in weights accumulation below
-        wk = WorkSpaceTools.check_work_array(
-            2, ppos[self._target_indices].shape, self._rwork)
-        assert npw.arrays_share_data(self._rwork[0], wk[0])
-        left_points_indices = self._compute_iy(ppos, wk[0])
-        assert npw.arrays_share_data(left_points_indices[self.direction],
-                                     self._iwork[0])
-        # shift iwork, so that iwork[direction][p] is the index
-        # of the first grid point in the remesh stencil of particle p
-        left_points_indices[self.direction][...] -= self.shift
-        for i in xrange(len(result)):
-            result[i][...] = 0.0
-        # Now, accumulate particles contributions into result
-        # for each remeshed field
-        for i in xrange(len(pscal)):
-            for k, _ in enumerate(self.weights):
-                # compute w_k(iy)
-                wk[1][...] = self._kernel(k, wk[0], wk[1])
-                wk[1][...] *= pscal[i][self._target_indices]
-                result[i][left_points_indices] += wk[1][...]
-                # shift to next grid point
-                left_points_indices[self.direction] += 1
-            left_points_indices[self.direction] -= len(self.weights)
-
-        return result
-
-
-class RemeshFormula(object):
-    """Abstract class for remeshing formulas"""
-    def __init__(self):
-        self.shift = 0
-        self.weights = None
-
-    def __call__(self, w, x, res):
-        """Compute remeshing weights.
-
-        Parameters
-        ----------
-        w : int
-            index (position in weight list) of the required weight
-        x : numpy array
-            relative position of the particle
-        res : numpy array
-            remeshed values
-        return res.
-
-        res = sum_i=0..n-1  w_i.x^(n-i), n = len(weight[w])
-        """
-        res[...] = self.weights[w][0]
-        for c in self.weights[w][1:]:
-            res[...] *= x[...]
-            res[...] += c
-        return res
-
-
-class Linear(RemeshFormula):
-    """Linear kernel."""
-
-    ghosts_layer_size = 1
-
-    def __init__(self):
-        super(Linear, self).__init__()
-        self.shift = 0
-        self.weights = [
-            npw.asrealarray([-1, 1]),
-            npw.asrealarray([1, 0]),
-            ]
-
-
-class L2_1(RemeshFormula):
-    """L2_1 kernel."""
-
-    ghosts_layer_size = 2
-
-    def __init__(self):
-        super(L2_1, self).__init__()
-        self.shift = 1
-        self.weights = [
-            npw.asrealarray([-1, 2, -1, 0]) / 2.,
-            npw.asrealarray([3, -5, 0, 2]) / 2.,
-            npw.asrealarray([-3, 4, 1, 0]) / 2.,
-            npw.asrealarray([1, -1, 0, 0]) / 2.,
-            ]
-
-
-class L2_2(RemeshFormula):
-    """L2_2 kernel."""
-
-    ghosts_layer_size = 3
-
-    def __init__(self):
-        super(L2_2, self).__init__()
-        self.shift = 1
-        self.weights = [
-            npw.asrealarray([2, -5, 3, 1, -1, 0]) / 2.,
-            npw.asrealarray([-6, 15, -9, -2, 0, 2]) / 2.,
-            npw.asrealarray([6, -15, 9, 1, 1, 0]) / 2.,
-            npw.asrealarray([-2, 5, -3, 0, 0, 0]) / 2.,
-            ]
-
-
-class L2_3(RemeshFormula):
-    """L2_3 kernel."""
-
-    ghosts_layer_size = 4
-
-    def __init__(self):
-        super(L2_3, self).__init__()
-        self.shift = 1
-        self.weights = [
-            npw.asrealarray([-6, 21, -25, 10, 0, 1, -1, 0]) / 2.,
-            npw.asrealarray([18, -63, 75, -30, 0, -2, 0, 2]) / 2.,
-            npw.asrealarray([-18, 63, -75, 30, 0, 1, 1, 0]) / 2.,
-            npw.asrealarray([6, -21, 25, -10, 0, 0, 0, 0]) / 2.,
-            ]
-
-
-class L2_4(RemeshFormula):
-    """L2_4 kernel."""
-
-    ghosts_layer_size = 5
-
-    def __init__(self):
-        super(L2_4, self).__init__()
-        self.shift = 1
-        self.weights = [
-            npw.asrealarray([20, -90, 154, -119, 35, 0, 0, 1, -1, 0]) / 2.,
-            npw.asrealarray([-60, 270, -462, 357, -105, 0, 0, -2, 0, 2]) / 2.,
-            npw.asrealarray([60, -270, 462, -357, 105, 0, 0, 1, 1, 0]) / 2.,
-            npw.asrealarray([-20, 90, -154, 119, -35, 0, 0, 0, 0, 0]) / 2.,
-            ]
-
-
-class L4_2(RemeshFormula):
-    """L4_2 kernel."""
-
-    ghosts_layer_size = 3
-
-    def __init__(self):
-        super(L4_2, self).__init__()
-        self.shift = 2
-        self.weights = [
-            npw.asrealarray([-5, 13, -9, -1, 2, 0]) / 24.,
-            npw.asrealarray([25, -64, 39, 16, -16, 0]) / 24.,
-            npw.asrealarray([-50, 126, -70, -30, 0, 24]) / 24.,
-            npw.asrealarray([50, -124, 66, 16, 16, 0]) / 24.,
-            npw.asrealarray([-25, 61, -33, -1, -2, 0]) / 24.,
-            npw.asrealarray([5, -12, 7, 0, 0, 0]) / 24.,
-            ]
-
-
-class L4_3(RemeshFormula):
-    """L4_3 kernel."""
-
-    ghosts_layer_size = 4
-
-    def __init__(self):
-        super(L4_3, self).__init__()
-        self.shift = 2
-        self.weights = [
-            npw.asrealarray([14, -49, 58, -22, -2, -1, 2, 0]) / 24.,
-            npw.asrealarray([-70, 245, -290, 111, 4, 16, -16, 0]) / 24.,
-            npw.asrealarray([140, -490, 580, -224, 0, -30, 0, 24]) / 24.,
-            npw.asrealarray([-140, 490, -580, 226, -4, 16, 16, 0]) / 24.,
-            npw.asrealarray([70, -245, 290, -114, 2, -1, -2, 0]) / 24.,
-            npw.asrealarray([-14, 49, -58, 23, 0, 0, 0, 0]) / 24.,
-            ]
-
-
-class L4_4(RemeshFormula):
-    """L4_4 kernel."""
-
-    ghosts_layer_size = 5
-
-    def __init__(self):
-        super(L4_4, self).__init__()
-        self.shift = 2
-        self.weights = [
-            npw.asrealarray([-46, 207, -354, 273, -80, 1, -2, -1, 2, 0]) / 24.,
-            npw.asrealarray([230, -1035, 1770, -1365,
-                             400, -4, 4, 16, -16, 0]) / 24.,
-            npw.asrealarray([-460, 2070, -3540, 2730, -800,
-                             6, 0, -30, 0, 24]) / 24.,
-            npw.asrealarray([460, -2070, 3540, -2730, 800,
-                             -4, -4, 16, 16, 0]) / 24.,
-            npw.asrealarray([-230, 1035, -1770, 1365, -400,
-                             1, 2, -1, -2, 0]) / 24.,
-            npw.asrealarray([46, -207, 354, -273, 80, 0, 0, 0, 0, 0]) / 24.,
-            ]
-
-
-class M8Prime(RemeshFormula):
-    """M8Prime kernel."""
-
-    ghosts_layer_size = 4
-
-    def __init__(self):
-        super(M8Prime, self).__init__()
-        self.shift = 3
-        self.weights = [
-            npw.asrealarray([-10, 21, 28, -105, 70, 35, -56, 17]) / 3360.,
-            npw.asrealarray([70, -175, -140, 770, -560, -350,
-                             504, -102]) / 3360.,
-            npw.asrealarray([-210, 609, 224, -2135, 910, 2765,
-                             -2520, 255]) / 3360.,
-            npw.asrealarray([350, -1155, 0, 2940, 0, -4900, 0,
-                             3020]) / 3360.,
-            npw.asrealarray([-350, 1295, -420, -2135, -910, 2765,
-                             2520, 255]) / 3360.,
-            npw.asrealarray([210, -861, 532, 770, 560, -350, -504,
-                             -102]) / 3360.,
-            npw.asrealarray([-70, 315, -280, -105, -70, 35, 56, 17]) / 3360.,
-            npw.asrealarray([10, -49, 56, 0, 0, 0, 0, 0]) / 3360.,
-            ]
-
-
-class L6_3(RemeshFormula):
-    """L6_3 kernel."""
-
-    ghosts_layer_size = 4
-
-    def __init__(self):
-        super(L6_3, self).__init__()
-        self.shift = 3
-        self.weights = [
-            npw.asrealarray([-89, 312, -370, 140, 15, 4, -12, 0]) / 720.,
-            npw.asrealarray([623, -2183, 2581, -955, -120, -54, 108,
-                             0]) / 720.,
-            npw.asrealarray([-1869, 6546, -7722, 2850, 195, 540, -540,
-                             0]) / 720.,
-            npw.asrealarray([3115, -10905, 12845, -4795, 0, -980, 0,
-                             720]) / 720.,
-            npw.asrealarray([-3115, 10900, -12830, 4880, -195, 540,
-                             540, 0]) / 720.,
-            npw.asrealarray([1869, -6537, 7695, -2985, 120, -54, -108,
-                             0]) / 720.,
-            npw.asrealarray([-623, 2178, -2566, 1010, -15, 4, 12, 0]) / 720.,
-            npw.asrealarray([89, -311, 367, -145, 0, 0, 0, 0]) / 720.,
-            ]
-
-
-class L6_4(RemeshFormula):
-    """L6_4 kernel."""
-
-    ghosts_layer_size = 5
-
-    def __init__(self):
-        super(L6_4, self).__init__()
-        self.shift = 3
-        self.weights = [
-            npw.asrealarray([290, -1305, 2231, -1718, 500, -5, 15, 4,
-                             -12, 0]) / 720.,
-            npw.asrealarray([-2030, 9135, -15617, 12027, -3509, 60,
-                             -120, -54, 108, 0]) / 720.,
-            npw.asrealarray([6090, -27405, 46851, -36084, 10548, -195,
-                             195, 540, -540, 0]) / 720.,
-            npw.asrealarray([-10150, 45675, -78085, 60145, -17605, 280,
-                             0, -980, 0, 720]) / 720.,
-            npw.asrealarray([10150, -45675, 78085, -60150, 17620, -195,
-                             -195, 540, 540, 0]) / 720.,
-            npw.asrealarray([-6090, 27405, -46851, 36093, -10575, 60, 120,
-                             -54, -108, 0]) / 720.,
-            npw.asrealarray([2030, -9135, 15617, -12032, 3524, -5, -15, 4,
-                             12, 0]) / 720.,
-            npw.asrealarray([-290, 1305, -2231, 1719, -503, 0, 0, 0, 0,
-                             0]) / 720.,
-            ]
-
-
-class L6_5(RemeshFormula):
-    """L6_5 kernel."""
-
-    ghosts_layer_size = 6
-
-    def __init__(self):
-        super(L6_5, self).__init__()
-        self.shift = 3
-        self.weights = [
-            npw.asrealarray([-1006, 5533, -12285, 13785, -7829, 1803,
-                             -3, -5, 15, 4, -12, 0]) / 720.,
-            npw.asrealarray([7042, -38731, 85995, -96495, 54803, -12620,
-                             12, 60, -120, -54, 108, 0]) / 720.,
-            npw.asrealarray([-21126, 116193, -257985, 289485, -164409,
-                             37857, -15, -195, 195, 540, -540, 0]) / 720.,
-            npw.asrealarray([35210, -193655, 429975, -482475, 274015,
-                             -63090, 0, 280, 0, -980, 0, 720]) / 720.,
-            npw.asrealarray([-35210, 193655, -429975, 482475, -274015,
-                             63085, 15, -195, -195, 540, 540, 0]) / 720.,
-            npw.asrealarray([21126, -116193, 257985, -289485, 164409,
-                             -37848, -12, 60, 120, -54, -108, 0]) / 720.,
-            npw.asrealarray([-7042, 38731, -85995, 96495, -54803, 12615,
-                             3, -5, -15, 4, 12, 0]) / 720.,
-            npw.asrealarray([1006, -5533, 12285, -13785, 7829, -1802,
-                             0, 0, 0, 0, 0, 0]) / 720.,
-            ]
-
-
-class L6_6(RemeshFormula):
-    """L6_6 kernel."""
-
-    ghosts_layer_size = 7
-
-    def __init__(self):
-        super(L6_6, self).__init__()
-        self.shift = 3
-        self.weights = [
-            npw.asrealarray([3604, -23426, 63866, -93577, 77815, -34869,
-                             6587, 1, -3, -5, 15, 4, -12, 0]) / 720.,
-            npw.asrealarray([-25228, 163982, -447062, 655039, -544705,
-                             244083, -46109, -6, 12, 60, -120, -54, 108,
-                             0]) / 720.,
-            npw.asrealarray([75684, -491946, 1341186, -1965117, 1634115,
-                             -732249, 138327, 15, -15, -195, 195, 540, -540,
-                             0]) / 720.,
-            npw.asrealarray([-126140, 819910, -2235310, 3275195, -2723525,
-                             1220415, -230545, -20, 0, 280, 0, -980, 0,
-                             720]) / 720.,
-            npw.asrealarray([126140, -819910, 2235310, -3275195, 2723525,
-                             -1220415, 230545, 15, 15, -195, -195, 540, 540,
-                             0]) / 720.,
-            npw.asrealarray([-75684, 491946, -1341186, 1965117, -1634115,
-                             732249, -138327, -6, -12, 60, 120, -54, -108,
-                             0]) / 720.,
-            npw.asrealarray([25228, -163982, 447062, -655039, 544705, -244083,
-                             46109, 1, 3, -5, -15, 4, 12, 0]) / 720.,
-            npw.asrealarray([-3604, 23426, -63866, 93577, -77815, 34869, -6587,
-                             0, 0, 0, 0, 0, 0, 0]) / 720.,
-            ]
-
-
-class L8_4(RemeshFormula):
-    """L8_4 kernel."""
-
-    ghosts_layer_size = 5
-
-    def __init__(self):
-        super(L8_4, self).__init__()
-        self.shift = 4
-        self.weights = [
-            npw.asrealarray([-3569, 16061, -27454, 21126, -6125, 49, -196,
-                             -36, 144, 0]) / 40320.,
-            npw.asrealarray([32121, -144548, 247074, -190092, 55125, -672,
-                             2016, 512, -1536, 0]) / 40320.,
-            npw.asrealarray([-128484, 578188, -988256, 760312, -221060, 4732,
-                             -9464, -4032, 8064, 0]) / 40320.,
-            npw.asrealarray([299796, -1349096, 2305856, -1774136, 517580,
-                             -13664, 13664, 32256, -32256, 0]) / 40320.,
-            npw.asrealarray([-449694, 2023630, -3458700, 2661540, -778806,
-                             19110, 0, -57400, 0, 40320]) / 40320.,
-            npw.asrealarray([449694, -2023616, 3458644, -2662016, 780430,
-                             -13664, -13664, 32256, 32256, 0]) / 40320.,
-            npw.asrealarray([-299796, 1349068, -2305744, 1775032, -520660,
-                             4732, 9464, -4032, -8064, 0]) / 40320.,
-            npw.asrealarray([128484, -578168, 988176, -760872, 223020, -672,
-                             -2016, 512, 1536, 0]) / 40320.,
-            npw.asrealarray([-32121, 144541, -247046, 190246, -55685, 49, 196,
-                             -36, -144, 0]) / 40320.,
-            npw.asrealarray([3569, -16060, 27450, -21140, 6181, 0, 0, 0,
-                             0, 0]) / 40320.,
-            ]
-
-
-def polynomial_optimisation():
-    """Testing different python implementation of a polynomial expression.
-    Use polynomial of degree 10 :
-    10*x^10+9*x^9+8*x^8+7*x^7+6*x^6+5*x^5+4*x^4+3*x^3+2*x^2+x
-    """
-    def test_form(func, r, a, s, *args):
-        tt = 0.
-        for _ in xrange(10):
-            r[...] = 0.
-            t = Wtime()
-            r[...] = func(a, *args)
-            tt += (Wtime() - t)
-        print tt, s
-
-    nb = 128
-    a = npw.asrealarray(np.random.random((nb, nb, nb)))
-    r = np.zeros_like(a)
-    temp = np.zeros_like(a)
-    lambda_p = lambda x: 1. + 2. * x + 3. * x ** 2 + 4. * x ** 3 + \
-        5. * x ** 4 + 6. * x ** 5 + 7. * x ** 6 + \
-        8. * x ** 7 + 9. * x ** 8 + 10. * x ** 9 + \
-        11. * x ** 10
-    lambda_h = lambda x: (x * (x * (x * (x * (x * (x * (x * (
-        x * (11. * x + 10.) + 9.) + 8.) + 7.) + 6.) + 5.) + 4.) + 3.) + 2.) + 1.
-
-    coeffs = coeffs = npw.asrealarray(np.arange(11, 0, -1))
-
-    def func_h(x, r):
-        r[...] = coeffs[0]
-        for c in coeffs[1:]:
-            r[...] *= x
-            r[...] += c
-
-    def func_p(x, r, tmp):
-        r[...] = 1.
-        tmp[...] = x
-        tmp[...] *= 2.
-        r[...] += tmp
-        tmp[...] = x ** 2
-        tmp[...] *= 3.
-        r[...] += tmp
-        tmp[...] = x ** 3
-        tmp[...] *= 4.
-        r[...] += tmp
-        tmp[...] = x ** 4
-        tmp[...] *= 5.
-        r[...] += tmp
-        tmp[...] = x ** 5
-        tmp[...] *= 6.
-        r[...] += tmp
-        tmp[...] = x ** 6
-        tmp[...] *= 7.
-        r[...] += tmp
-        tmp[...] = x ** 7
-        tmp[...] *= 8.
-        r[...] += tmp
-        tmp[...] = x ** 8
-        tmp[...] *= 9.
-        r[...] += tmp
-        tmp[...] = x ** 9
-        tmp[...] *= 10.
-        r[...] += tmp
-        tmp[...] = x ** 10
-        tmp[...] *= 11.
-        r[...] += tmp
-
-    def func_p_bis(x, r, tmp):
-        r[...] = 1.
-        tmp[...] = x
-        tmp[...] *= 2.
-        r[...] += tmp
-        tmp[...] = x
-        tmp[...] *= x
-        tmp[...] *= 3.
-        r[...] += tmp
-        tmp[...] = x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= 4.
-        r[...] += tmp
-        tmp[...] = x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= 5.
-        r[...] += tmp
-        tmp[...] = x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= 6.
-        r[...] += tmp
-        tmp[...] = x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= 7.
-        r[...] += tmp
-        tmp[...] = x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= 8.
-        r[...] += tmp
-        tmp[...] = x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= 9.
-        r[...] += tmp
-        tmp[...] = x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= 10.
-        r[...] += tmp
-        tmp[...] = x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= x
-        tmp[...] *= 11.
-        r[...] += tmp
-
-    from numpy.polynomial.polynomial import polyval
-
-    def np_polyval(x, r):
-        r[...] = polyval(x, coeffs[::-1])
-
-    assert lambda_h(1.) == 66.
-    assert lambda_p(1.) == 66.
-    single_val = npw.ones((1, ))
-    single_val_r = np.zeros_like(single_val)
-    single_val_tmp = np.zeros_like(single_val)
-    func_p(single_val, single_val_r, single_val_tmp)
-    assert single_val_r[0] == 66.
-    single_val_r[0] = 0.
-    func_p_bis(single_val, single_val_r, single_val_tmp)
-    assert single_val_r[0] == 66.
-    single_val_r[0] = 0.
-    func_h(single_val, single_val_r)
-    assert single_val_r[0] == 66.
-    single_val_r[0] = 0.
-    np_polyval(single_val, single_val_r)
-    assert single_val_r[0] == 66.
-
-    test_form(lambda_p, r, a, "Lambda base canonique")
-    test_form(lambda_h, r, a, "Lambda Horner")
-    test_form(func_p, r, a, "Function base canonique", r, temp)
-    test_form(func_p_bis, r, a, "Function base canonique (bis)", r, temp)
-    test_form(func_h, r, a, "Function Horner", r)
-    test_form(np_polyval, r, a, "Numpy polyval", r)
-
-    res_test = np.empty_like(a)
-    res_test_coeff = np.empty_like(a)
-    w_test = lambda y: (-12. + (4. + (15. + (-5. + (-3. + (1. + (6587. + (-34869. + \
-        (77815. + (-93577. + (63866. + (-23426. + 3604. * y) * y) * y) * y) * y) * y) \
-        * y) * y) * y) * y) * y) * y) * y / 720.
-    res_test[...] = w_test(a)
-    w_test_coeffs = npw.asrealarray([3604, -23426, 63866, -93577,
-                                     77815, -34869, 6587,
-                                     1, -3, -5, 15, 4, -12, 0]) / 720.
-    res_test_coeff[...] = w_test_coeffs[0]
-    for c in w_test_coeffs[1:]:
-        res_test_coeff[...] *= a
-        res_test_coeff[...] += c
-
-    print np.max(res_test - res_test_coeff)
-    assert np.allclose(res_test, res_test_coeff)
diff --git a/hysop/old/numerics.old/tests/__init__.py b/hysop/old/numerics.old/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/hysop/old/numerics.old/tests/test_differential_operations.py b/hysop/old/numerics.old/tests/test_differential_operations.py
deleted file mode 100755
index af64ca0798c785481a4439524a2efd7b35127b03..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/tests/test_differential_operations.py
+++ /dev/null
@@ -1,1015 +0,0 @@
-# -*- coding: utf-8 -*-
-from hysop import Field, Box
-import numpy as np
-import hysop.numerics.differential_operations as diffop
-from hysop.tools.numpywrappers import npw
-from hysop.tools.parameters import Discretization
-from hysop.domain.subsets import SubBox
-import math as m
-pi = m.pi
-cos = np.cos
-sin = np.sin
-
-
-def compute_vel(res, x, y, z, t):
-    res[0][...] = sin(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-def compute_vort(res, x, y, z, t):
-    res[0][...] = - cos(x) * sin(y) * sin(z)
-    res[1][...] = - sin(x) * cos(y) * sin(z)
-    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-    return res
-
-
-def compute_vel2(res, x, y, t):
-    res[0][...] = sin(x) * cos(y)
-    res[1][...] = - cos(x) * sin(y)
-    return res
-
-
-def compute_vort2(res, x, y, t):
-    res[0][...] = 2. * sin(x) * sin(y)
-    return res
-
-
-def analyticalDivRhoV(res, x, y, z, t):
-    res[0][...] = cos(y) * cos(z) * sin(y) * sin(z)
-    return res
-
-
-def analyticalDivRhoV2D(res, x, y, t):
-    res[0][...] = 0.
-    return res
-
-
-def analyticalDivWV(res, x, y, z, t):
-    res[0][...] = - cos(y) * cos(z) * sin(y) * sin(z)
-    res[1][...] = cos(x) * cos(z) * sin(z) * sin(x)
-    res[2][...] = 0.
-    return res
-
-
-def analyticalGradVxW(res, x, y, z, t):
-    res[0][...] = - sin(y) * cos(y) * sin(z) * cos(z)
-    res[1][...] = sin(x) * cos(x) * sin(z) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-def analyticalDivStressTensor(res, x, y, z, t):
-    res[0][...] = - 3. * sin(x) * cos(y) * cos(z)
-    res[1][...] = 3. * cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-def analyticalDivAdvection(res, x, y, z, t):
-    res[0][...] = - cos(z) * cos(z) * (cos(x) * cos(x) - sin(x) * sin(x)) - \
-        cos(z) * cos(z) * (cos(y) * cos(y) - sin(y) * sin(y))
-    return res
-
-
-Nx = 88
-Ny = 50
-Nz = 64
-g = 2
-discr3D = Discretization([Nx + 1, Ny + 1, Nz + 1], [g, g, g])
-discr2D = Discretization([Nx + 1, Ny + 1], [g, g])
-ldom = npw.asrealarray([2., pi, 3.5])
-xdom = [0., -0.3, 0.4]
-
-
-def init(discr, vform, wform):
-    dim = len(discr.resolution)
-    dom = Box(dimension=dim, length=ldom[:dim],
-              origin=xdom[:dim])
-    topo = dom.create_topology(discr)
-    work = []
-    shape = tuple(topo.mesh.local_resolution)
-    subsize = np.prod(shape)
-    for _ in xrange(6):
-        work.append(npw.zeros(subsize,))
-    velo = Field(domain=dom, formula=vform,
-                 name='Velocity', is_vector=True)
-    vorti = Field(domain=dom, formula=wform,
-                  name='Vorticity', is_vector=dim == 3)
-    vd = velo.discretize(topo)
-    wd = vorti.discretize(topo)
-    velo.initialize(topo=topo)
-    vorti.initialize(topo=topo)
-    return vd, wd, topo, work
-
-
-# Init 2D and 3D
-v3d, w3d, topo3, work3 = init(discr3D, compute_vel, compute_vort)
-v2d, w2d, topo2, work2 = init(discr2D, compute_vel2, compute_vort2)
-
-
-def build_op(op_class, len_result, topo, work=None, reduced=False,
-             reduce_output=False):
-    if reduced:
-        # computations will be done on a subset of the domain
-        sl = topo.domain.length * 0.5
-        orig = topo.domain.origin + 0.1 * topo.domain.length
-        subbox = SubBox(parent=topo.domain, origin=orig, length=sl)
-        indices = subbox.discretize(topo)[0]
-        if reduce_output:
-            outputshape = subbox.mesh[topo].resolution
-        else:
-            outputshape = np.asarray(topo.mesh.local_resolution).copy()
-    else:
-        indices = None
-        if reduce_output:
-            outputshape = np.asarray(topo.mesh.local_resolution).copy()
-            gh = topo.ghosts()
-            outputshape -= 2 * gh
-            outputshape = tuple(outputshape)
-        else:
-            outputshape = tuple(topo.mesh.local_resolution)
-
-    if work is not None:
-        wk_prop = op_class.get_work_properties(topo, indices)
-        if wk_prop['rwork'] is not None:
-            lwork = len(wk_prop['rwork'])
-        else:
-            lwork = 0
-        op = op_class(topo=topo, indices=indices, work=work[:lwork],
-                      reduce_output_shape=reduce_output)
-    else:
-        op = op_class(topo=topo, indices=indices,
-                      reduce_output_shape=reduce_output)
-    result = [npw.zeros(outputshape) for _ in xrange(len_result)]
-    return op, result
-
-
-def assert_curl(wdim, topo, velo, vorti, work=None, reduced=False,
-                reduce_output=False):
-    op, result = build_op(diffop.Curl, wdim, topo, work, reduced,
-                          reduce_output)
-    ic_out = op.output_indices
-    ic_in = op.in_indices
-    result = op(velo.data, result)
-    atol = np.max(topo.mesh.space_step ** 2)
-    for i in xrange(wdim):
-        assert np.allclose(vorti.data[i][ic_in], result[i][ic_out], atol=atol)
-
-
-def test_curl():
-    # 2d
-    assert_curl(1, topo2, v2d, w2d)
-    # 2d work
-    assert_curl(1, topo2, v2d, w2d, work2)
-    # 2d reduced
-    assert_curl(1, topo2, v2d, w2d, reduced=True)
-    # 2d reduced and work
-    assert_curl(1, topo2, v2d, w2d, work2, reduced=True)
-    # 2d reduce output
-    assert_curl(1, topo2, v2d, w2d, reduce_output=True)
-    # 2d reduce output and work
-    assert_curl(1, topo2, v2d, w2d, work2, reduce_output=True)
-    # 2d reduce output, on subset, with work
-    assert_curl(1, topo2, v2d, w2d, work2, reduced=True, reduce_output=True)
-    # 3d
-    assert_curl(3, topo3, v3d, w3d)
-    # 3d work
-    assert_curl(3, topo3, v3d, w3d, work3)
-    # 3d reduced
-    assert_curl(3, topo3, v3d, w3d, reduced=True)
-    # 3d reduced and work
-    assert_curl(3, topo3, v3d, w3d, work3, reduced=True)
-    # 3d reduce output
-    assert_curl(3, topo3, v3d, w3d, reduce_output=True)
-    # 3d reduce output and work
-    assert_curl(3, topo3, v3d, w3d, work3, reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_curl(3, topo3, v3d, w3d, work3, reduced=True, reduce_output=True)
-
-
-def assert_div_rho_v(topo, velo, scal, formula, work=None, reduced=False,
-                     reduce_output=False):
-    ref = Field(domain=topo.domain, formula=formula,
-                name='Analytical', is_vector=False)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.DivRhoV, 1, topo, work, reduced,
-                          reduce_output)
-    ic_out = op.output_indices
-    ic_in = op.in_indices
-    tol = np.max(topo.mesh.space_step ** 4)
-    result = op(velo.data, [scal], result)
-    assert np.allclose(rd[0][ic_in], result[0][ic_out], atol=tol)
-
-
-def test_div_rho_v():
-    # 2d
-    assert_div_rho_v(topo2, v2d, w2d[0], analyticalDivRhoV2D)
-    # 2d work
-    assert_div_rho_v(topo2, v2d, w2d[0], analyticalDivRhoV2D, work2)
-    # 2d reduced
-    assert_div_rho_v(topo2, v2d, w2d[0], analyticalDivRhoV2D, reduced=True)
-    # 2d reduced and work
-    assert_div_rho_v(topo2, v2d, w2d[0], analyticalDivRhoV2D, work2,
-                     reduced=True)
-    # 2d reduce output
-    assert_div_rho_v(topo2, v2d, w2d[0], analyticalDivRhoV2D,
-                     reduce_output=False)
-    # 2d reduce output and work
-    assert_div_rho_v(topo2, v2d, w2d[0], analyticalDivRhoV2D, work2,
-                     reduce_output=False)
-    # 2d reduce output, on subset, with work
-    assert_div_rho_v(topo2, v2d, w2d[0], analyticalDivRhoV2D, work2,
-                     reduce_output=True, reduced=True)
-    # 3d
-    assert_div_rho_v(topo3, v3d, w3d[0], analyticalDivRhoV)
-    # 3d work
-    assert_div_rho_v(topo3, v3d, w3d[0], analyticalDivRhoV, work3)
-    # 3d reduced
-    assert_div_rho_v(topo3, v3d, w3d[0], analyticalDivRhoV, reduced=True)
-    # 3d reduced and work
-    assert_div_rho_v(topo3, v3d, w3d[0], analyticalDivRhoV, work3,
-                     reduced=True)
-    # 3d reduce output
-    assert_div_rho_v(topo3, v3d, w3d[0], analyticalDivRhoV,
-                     reduce_output=False)
-    # 3d reduce output and work
-    assert_div_rho_v(topo3, v3d, w3d[0], analyticalDivRhoV, work3,
-                     reduce_output=False)
-    # 3d, work, reduce output, on subset
-    assert_div_rho_v(topo3, v3d, w3d[0], analyticalDivRhoV, work3,
-                     reduce_output=False, reduced=True)
-
-
-def assert_div_w_v(wdim, topo, velo, vorti, formula, work=None, reduced=False,
-                   reduce_output=False):
-    # Reference field
-    ref = Field(domain=topo.domain, formula=formula,
-                name='Analytical', is_vector=True)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.DivWV, wdim, topo, work, reduced,
-                          reduce_output)
-    result = op(vorti.data, velo.data, result)
-    ic_out = op.output_indices
-    ic_in = op.in_indices
-
-    # Numerical VS analytical
-    tol = np.max(topo.mesh.space_step ** 2)
-    for i in xrange(wdim):
-        assert np.allclose(rd[i][ic_in], result[i][ic_out], atol=tol)
-
-
-def test_div_w_v():
-    # 3d
-    assert_div_w_v(3, topo3, v3d, w3d, analyticalDivWV)
-    # 3d work
-    assert_div_w_v(3, topo3, v3d, w3d, analyticalDivWV, work3)
-    # 3d reduced
-    assert_div_w_v(3, topo3, v3d, w3d, analyticalDivWV, reduced=True)
-    # 3d reduced and work
-    assert_div_w_v(3, topo3, v3d, w3d, analyticalDivWV, work3,
-                   reduced=True)
-    # 3d reduce output
-    assert_div_w_v(3, topo3, v3d, w3d, analyticalDivWV, reduce_output=True)
-    # 3d reduce output and work
-    assert_div_w_v(3, topo3, v3d, w3d, analyticalDivWV, work3,
-                   reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_div_w_v(3, topo3, v3d, w3d, analyticalDivWV, work3,
-                   reduce_output=True, reduced=True)
-
-
-def assert_laplacian(wdim, topo, vorti, formula, work=None, reduced=False,
-                     reduce_output=False):
-    ref = Field(domain=topo.domain, formula=formula,
-                name='Analytical', is_vector=True)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.Laplacian, wdim, topo, work, reduced,
-                          reduce_output)
-    ic_out = op.output_indices
-    ic_in = op.in_indices
-    result = op(vorti.data, result)
-    tol = np.max(topo.mesh.space_step ** 2)
-    for i in xrange(wdim):
-        assert np.allclose(rd.data[i][ic_in], result[i][ic_out], rtol=tol)
-
-
-def laplacian_func(res, x, y, z, t):
-    res[0][...] = 3 * cos(x) * sin(y) * sin(z)
-    res[1][...] = 3 * sin(x) * cos(y) * sin(z)
-    res[2][...] = -6. * sin(x) * sin(y) * cos(z)
-    return res
-
-
-def laplacian_func_2d(res, x, y, t):
-    res[0][...] = -4. * sin(x) * sin(y)
-    return res
-
-
-def test_laplacian():
-    # 2d
-    assert_laplacian(1, topo2, w2d, laplacian_func_2d)
-    # 2d work
-    assert_laplacian(1, topo2, w2d, laplacian_func_2d, work2)
-    # 2d reduced
-    assert_laplacian(1, topo2, w2d, laplacian_func_2d, reduced=True)
-    # 2d reduced and work
-    assert_laplacian(1, topo2, w2d, laplacian_func_2d, work2,
-                     reduced=True)
-    # 2d reduce output
-    assert_laplacian(1, topo2, w2d, laplacian_func_2d, reduce_output=True)
-    # 2d reduce output and work
-    assert_laplacian(1, topo2, w2d, laplacian_func_2d, work2,
-                     reduce_output=True)
-    # 2d, work, reduce output, on subset
-    assert_laplacian(1, topo2, w2d, laplacian_func_2d, work2,
-                     reduce_output=True, reduced=True)
-
-    # 3d
-    assert_laplacian(3, topo3, w3d, laplacian_func)
-    # 3d work
-    assert_laplacian(3, topo3, w3d, laplacian_func, work3)
-
-    # 3d reduced
-    assert_laplacian(3, topo3, w3d, laplacian_func, reduced=True)
-    # 3d reduced and work
-    assert_laplacian(3, topo3, w3d, laplacian_func, work3,
-                     reduced=True)
-    # 3d reduce output
-    assert_laplacian(3, topo3, w3d, laplacian_func, reduce_output=True)
-    # 3d reduce output and work
-    assert_laplacian(3, topo3, w3d, laplacian_func, work3,
-                     reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_laplacian(3, topo3, w3d, laplacian_func, work3,
-                     reduce_output=True, reduced=True)
-
-
-def assert_grad_s(topo, scal, formula, work=None, reduced=False,
-                  reduce_output=False):
-    # Reference field
-    ref = Field(domain=topo.domain, formula=formula,
-                name='Analytical', is_vector=True)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.GradS, topo.domain.dim,
-                          topo, work, reduced, reduce_output)
-    ic_out = op.output_indices
-    ic_in = op.in_indices
-    result = op([scal], result)
-    # Numerical VS analytical
-    tol = np.max(topo.mesh.space_step ** 4)
-    for i in xrange(topo.domain.dim):
-        assert np.allclose(rd[i][ic_in], result[i][ic_out], atol=tol)
-
-
-def grad_s_func_2d(res, x, y, t):
-    res[0][...] = cos(x) * cos(y)
-    res[1][...] = -sin(x) * sin(y)
-    return res
-
-
-def grad_s_func_3d(res, x, y, z, t):
-    res[0][...] = cos(x) * cos(y) * cos(z)
-    res[1][...] = -sin(x) * sin(y) * cos(z)
-    res[2][...] = -sin(x) * cos(y) * sin(z)
-    return res
-
-
-def test_grad_s():
-    # 2d
-    assert_grad_s(topo2, v2d[0], grad_s_func_2d)
-    # 2d work
-    assert_grad_s(topo2, v2d[0], grad_s_func_2d, work2)
-    # 2d reduced
-    assert_grad_s(topo2, v2d[0], grad_s_func_2d, reduced=True)
-    # 2d reduced and work
-    assert_grad_s(topo2, v2d[0], grad_s_func_2d, work2,
-                  reduced=True)
-    # 2d reduce output
-    assert_grad_s(topo2, v2d[0], grad_s_func_2d,
-                  reduce_output=True)
-    # 2d reduce output and work
-    assert_grad_s(topo2, v2d[0], grad_s_func_2d, work2,
-                  reduce_output=True)
-    # 2d reduce output, on subset, with work
-    assert_grad_s(topo2, v2d[0], grad_s_func_2d, work2,
-                  reduce_output=True, reduced=True)
-    # 3d
-    assert_grad_s(topo3, v3d[0], grad_s_func_3d)
-    # 3d work
-    assert_grad_s(topo3, v3d[0], grad_s_func_3d, work3)
-    # 3d reduced
-    assert_grad_s(topo3, v3d[0], grad_s_func_3d, reduced=True)
-    # 3d reduced and work
-    assert_grad_s(topo3, v3d[0], grad_s_func_3d, work3,
-                  reduced=True)
-    # 3d reduce output
-    assert_grad_s(topo3, v3d[0], grad_s_func_3d, reduce_output=True)
-    # 3d reduce output and work
-    assert_grad_s(topo3, v3d[0], grad_s_func_3d, work3,
-                  reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_grad_s(topo3, v3d[0], grad_s_func_3d, work3,
-                  reduce_output=True, reduced=True)
-
-
-def assert_grad_v(topo, velo, formula, work=None, reduced=False,
-                  reduce_output=False):
-    # Reference field
-    resdim = topo.domain.dim ** 2
-    ref = Field(domain=topo.domain, formula=formula,
-                name='Analytical', nb_components=resdim)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.GradV, resdim, topo, work, reduced,
-                          reduce_output)
-    result = op(velo.data, result)
-    ic_out = op.output_indices
-    ic_in = op.in_indices
-    # Numerical VS analytical
-    tol = np.max(topo.mesh.space_step ** 4)
-    for i in xrange(resdim):
-        assert np.allclose(rd[i][ic_in], result[i][ic_out], atol=tol)
-
-
-def grad_v_func_2d(res, x, y, t):
-    res[0][...] = cos(x) * cos(y)
-    res[1][...] = -sin(x) * sin(y)
-    res[2][...] = sin(x) * sin(y)
-    res[3][...] = -cos(x) * cos(y)
-    return res
-
-
-def grad_v_func_3d(res, x, y, z, t):
-    res[0][...] = cos(x) * cos(y) * cos(z)
-    res[1][...] = -sin(x) * sin(y) * cos(z)
-    res[2][...] = -sin(x) * cos(y) * sin(z)
-    res[3][...] = sin(x) * sin(y) * cos(z)
-    res[4][...] = - cos(x) * cos(y) * cos(z) 
-    res[5][...] = cos(x) * sin(y) * sin(z)
-    res[6][...] = 0.
-    res[7][...] = 0.
-    res[8][...] = 0.
-    return res
-
-
-def test_grad_v():
-    # 2d
-    assert_grad_v(topo2, v2d, grad_v_func_2d)
-    # 2d work
-    assert_grad_v(topo2, v2d, grad_v_func_2d, work2)
-    # 2d reduced
-    assert_grad_v(topo2, v2d, grad_v_func_2d, reduced=True)
-    # 2d reduced and work
-    assert_grad_v(topo2, v2d, grad_v_func_2d, work2,
-                  reduced=True)
-    # 2d reduce output
-    assert_grad_v(topo2, v2d, grad_v_func_2d,
-                  reduce_output=True)
-    # 2d reduce output and work
-    assert_grad_v(topo2, v2d, grad_v_func_2d, work2,
-                  reduce_output=True)
-    # 2d reduce output, on subset, with work
-    assert_grad_v(topo2, v2d, grad_v_func_2d, work2,
-                  reduce_output=True, reduced=True)
-    # 3d
-    assert_grad_v(topo3, v3d, grad_v_func_3d)
-    # 3d work
-    assert_grad_v(topo3, v3d, grad_v_func_3d, work3)
-    # 3d reduced
-    assert_grad_v(topo3, v3d, grad_v_func_3d, reduced=True)
-    # 3d reduced and work
-    assert_grad_v(topo3, v3d, grad_v_func_3d, work3,
-                  reduced=True)
-    # 3d reduce output
-    assert_grad_v(topo3, v3d, grad_v_func_3d, reduce_output=True)
-    # 3d reduce output and work
-    assert_grad_v(topo3, v3d, grad_v_func_3d, work3,
-                  reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_grad_v(topo3, v3d, grad_v_func_3d, work3,
-                  reduce_output=True, reduced=True)
-
-
-def assert_grad_vxw(topo, velo, vorti, work=None, reduced=False,
-                    reduce_output=False):
-    # Reference field
-    ref = Field(domain=topo.domain, formula=analyticalGradVxW,
-                name='Analytical', is_vector=True)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.GradVxW, velo.nb_components, topo,
-                          work, reduced, reduce_output)
-    diag = npw.zeros(2)
-    result, diag = op(velo.data, vorti.data, result, diag)
-    # Numerical VS analytical
-    ic_out = op.output_indices
-    ic_in = op.in_indices
-    tol = np.max(topo3.mesh.space_step ** 2)
-    for i in xrange(topo.domain.dim):
-        assert np.allclose(rd[i][ic_in], result[i][ic_out], atol=tol)
-
-
-def test_grad_vxw():
-    # 3d
-    assert_grad_vxw(topo3, v3d, w3d)
-    # 3d work
-    assert_grad_vxw(topo3, v3d, w3d, work3)
-    # 3d reduced
-    assert_grad_vxw(topo3, v3d, w3d, reduced=True)
-    # 3d reduced and work
-    assert_grad_vxw(topo3, v3d, w3d, work3,
-                    reduced=True)
-    # 3d reduce output
-    assert_grad_vxw(topo3, v3d, w3d, reduce_output=True)
-    # 3d reduce output and work
-    assert_grad_vxw(topo3, v3d, w3d, work3,
-                    reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_grad_vxw(topo3, v3d, w3d, work3,
-                    reduce_output=True, reduced=True)
-
-
-# test for RHS of Pressure-Poisson equation
-def assert_div_advection(topo, velo, work=None, reduced=False,
-                         reduce_output=False):
-    # Reference scalar field
-    ref = Field(domain=topo.domain, formula=analyticalDivAdvection,
-                name='Analytical')
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.DivAdvection, 1, topo, work, reduced,
-                          reduce_output)
-    result = op(velo.data, result)
-    ic_out = op.output_indices
-    ic_in = op.in_indices
-
-    # Numerical VS analytical
-    tol = np.max(topo.mesh.space_step ** 4)
-    assert np.allclose(rd[0][ic_in], result[0][ic_out], rtol=tol)
-
-
-def test_div_advection():
-    # 3d
-    assert_div_advection(topo3, v3d)
-    # 3d work
-    assert_div_advection(topo3, v3d, work3)
-    # 3d reduced
-    assert_div_advection(topo3, v3d, reduced=True)
-    # 3d reduced and work
-    assert_div_advection(topo3, v3d, work3,
-                         reduced=True)
-    # 3d reduce output
-    assert_div_advection(topo3, v3d, reduce_output=True)
-    # 3d reduce output and work
-    assert_div_advection(topo3, v3d, work3,
-                         reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_div_advection(topo3, v3d, work3,
-                         reduce_output=True, reduced=True)
-
-
-def assert_strain(topo, velo, formula, work=None, reduced=False,
-                  reduce_output=False):
-    # Reference scalar field
-    resdim = topo.domain.dim * (topo.domain.dim + 1) / 2
-    ref = Field(domain=topo.domain, formula=formula,
-                name='Analytical', nb_components=resdim)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.Strain, resdim, topo, work, reduced,
-                          reduce_output)
-    result = op(velo.data, result)
-
-    ic_out = op.output_indices
-    ic_in = op.in_indices
-    # Numerical VS analytical
-    tol = np.max(topo.mesh.space_step ** 4)
-    for d in xrange(resdim):
-        assert np.allclose(rd[d][ic_in], result[d][ic_out], atol=tol)
-
-
-def strain_f_2d(res, x, y, t):
-    res[0][...] = cos(x) * cos(y)
-    res[1][...] = - cos(x) * cos(y)
-    res[2][...] = 0.
-    return res
-
-
-def strain_f_3d(res, x, y, z, t):
-    res[0][...] = cos(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * cos(y) * cos(z)
-    res[2][...] = 0.
-    res[3][...] = 0.
-    res[4][...] = -0.5 * sin(x) * cos(y) * sin(z)
-    res[5][...] = 0.5 * cos(x) * sin(y) * sin(z)
-    return res
-
-
-def test_strain():
-    # 2d
-    assert_strain(topo2, v2d, strain_f_2d)
-    # 2d work
-    assert_strain(topo2, v2d, strain_f_2d, work2)
-    # 2d reduced
-    assert_strain(topo2, v2d, strain_f_2d, reduced=True)
-    # 2d reduced and work
-    assert_strain(topo2, v2d, strain_f_2d, work2,
-                  reduced=True)
-    # 2d reduce output
-    assert_strain(topo2, v2d, strain_f_2d,
-                  reduce_output=True)
-    # 2d reduce output and work
-    assert_strain(topo2, v2d, strain_f_2d, work2,
-                  reduce_output=True)
-    # 2d reduce output, on subset, with work
-    assert_strain(topo2, v2d, strain_f_2d, work2,
-                  reduce_output=True, reduced=True)
-    # 3d
-    assert_strain(topo3, v3d, strain_f_3d)
-    # 3d work
-    assert_strain(topo3, v3d, strain_f_3d, work3)
-    # 3d reduced
-    assert_strain(topo3, v3d, strain_f_3d, reduced=True)
-    # 3d reduced and work
-    assert_strain(topo3, v3d, strain_f_3d, work3,
-                  reduced=True)
-    # 3d reduce output
-    assert_strain(topo3, v3d, strain_f_3d, reduce_output=True)
-    # 3d reduce output and work
-    assert_strain(topo3, v3d, strain_f_3d, work3,
-                  reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_strain(topo3, v3d, strain_f_3d, work3,
-                  reduce_output=True, reduced=True)
-
-
-def compute_max(field, ic):
-    """Compute max of sum_i (abs(field_i[ic])) for i in len(field)
-    """
-    temp = npw.zeros_like(field[0])
-    for d in xrange(len(field)):
-        temp[ic] += np.abs(field[d][ic])
-    return np.max(temp)
-
-
-def assert_strain_criteria(topo, velo, formula, work=None, reduced=False,
-                           reduce_output=False):
-    # Reference scalar field
-    resdim = topo.domain.dim * (topo.domain.dim + 1) / 2
-    ref = Field(domain=topo.domain, formula=formula,
-                name='Analytical', nb_components=resdim)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.StrainCriteria, 0, topo, work, reduced,
-                          reduce_output)
-    result = npw.zeros(topo.domain.dim)
-    result = op(velo.data, result)
-    ic_in = op.in_indices
-
-    dd = topo.domain.dim
-    if dd == 3:
-        ll = [None, ] * dd
-        ll[0] = [rd[0], rd[3], rd[4]]
-        ll[1] = [rd[1], rd[3], rd[5]]
-        ll[2] = [rd[2], rd[4], rd[5]]
-    elif dd == 2:
-        ll = [None, ] * dd
-        ll[0] = [rd[0], rd[2]]
-        ll[1] = [rd[1], rd[2]]
-
-    for d in xrange(dd):
-        refmax = compute_max(ll[d], ic_in)
-        assert np.allclose(refmax, result[d])
-
-
-def test_strain_criteria():
-    # 2d
-    assert_strain_criteria(topo2, v2d, strain_f_2d)
-    # 2d work
-    assert_strain_criteria(topo2, v2d, strain_f_2d, work2)
-    # 2d reduced
-    assert_strain_criteria(topo2, v2d, strain_f_2d, reduced=True)
-    # 2d reduced and work
-    assert_strain_criteria(topo2, v2d, strain_f_2d, work2,
-                           reduced=True)
-    # 2d reduce output
-    assert_strain_criteria(topo2, v2d, strain_f_2d,
-                           reduce_output=True)
-    # 2d reduce output and work
-    assert_strain_criteria(topo2, v2d, strain_f_2d, work2,
-                           reduce_output=True)
-    # 2d reduce output, on subset, with work
-    assert_strain_criteria(topo2, v2d, strain_f_2d, work2,
-                           reduce_output=True, reduced=True)
-    # 3d
-    assert_strain_criteria(topo3, v3d, strain_f_3d)
-    # 3d work
-    assert_strain_criteria(topo3, v3d, strain_f_3d, work3)
-    # 3d reduced
-    assert_strain_criteria(topo3, v3d, strain_f_3d, reduced=True)
-    # 3d reduced and work
-    assert_strain_criteria(topo3, v3d, strain_f_3d, work3,
-                           reduced=True)
-    # 3d reduce output
-    assert_strain_criteria(topo3, v3d, strain_f_3d, reduce_output=True)
-    # 3d reduce output and work
-    assert_strain_criteria(topo3, v3d, strain_f_3d, work3,
-                           reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_strain_criteria(topo3, v3d, strain_f_3d, work3,
-                           reduce_output=True, reduced=True)
-
-
-def assert_max_diag_grad(topo, velo, formula, work=None, reduced=False,
-                         reduce_output=False):
-    # Reference field
-    ref = Field(domain=topo.domain, formula=formula,
-                name='Analytical', is_vector=True)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.MaxDiagGradV, 0, topo, work, reduced,
-                          reduce_output)
-    result = npw.zeros(topo.domain.dim)
-    result = op(velo.data, result)
-    ic_in = op.in_indices
-    for d in xrange(topo.domain.dim):
-        refmax = compute_max([rd[d]], ic_in)
-        assert np.allclose(refmax, result[d])
-
-
-def diag_grad_func_2d(res, x, y, t):
-    res[0][...] = cos(x) * cos(y)
-    res[1][...] = -cos(x) * cos(y)
-    return res
-
-
-def diag_grad_func_3d(res, x, y, z, t):
-    res[0][...] = cos(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * cos(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-def test_max_diag_grad():
-    # 2d
-    assert_max_diag_grad(topo2, v2d, diag_grad_func_2d)
-    # 2d work
-    assert_max_diag_grad(topo2, v2d, diag_grad_func_2d, work2)
-    # 2d reduced
-    assert_max_diag_grad(topo2, v2d, diag_grad_func_2d, reduced=True)
-    # 2d reduced and work
-    assert_max_diag_grad(topo2, v2d, diag_grad_func_2d, work2,
-                         reduced=True)
-    # 2d reduce output
-    assert_max_diag_grad(topo2, v2d, diag_grad_func_2d,
-                         reduce_output=True)
-    # 2d reduce output and work
-    assert_max_diag_grad(topo2, v2d, diag_grad_func_2d, work2,
-                         reduce_output=True)
-    # 2d reduce output, on subset, with work
-    assert_max_diag_grad(topo2, v2d, diag_grad_func_2d, work2,
-                         reduce_output=True, reduced=True)
-    # 3d
-    assert_max_diag_grad(topo3, v3d, diag_grad_func_3d)
-    # 3d work
-    assert_max_diag_grad(topo3, v3d, diag_grad_func_3d, work3)
-    # 3d reduced
-    assert_max_diag_grad(topo3, v3d, diag_grad_func_3d, reduced=True)
-    # 3d reduced and work
-    assert_max_diag_grad(topo3, v3d, diag_grad_func_3d, work3,
-                         reduced=True)
-    # 3d reduce output
-    assert_max_diag_grad(topo3, v3d, diag_grad_func_3d, reduce_output=True)
-    # 3d reduce output and work
-    assert_max_diag_grad(topo3, v3d, diag_grad_func_3d, work3,
-                         reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_max_diag_grad(topo3, v3d, diag_grad_func_3d, work3,
-                         reduce_output=True, reduced=True)
-
-
-def assert_stretch_like(topo, velo, formula, work=None, reduced=False,
-                        reduce_output=False):
-    # Reference scalar field
-    resdim = topo.domain.dim ** 2
-    ref = Field(domain=topo.domain, formula=formula,
-                name='Analytical', nb_components=resdim)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.StretchLike, 0, topo, work, reduced,
-                          reduce_output)
-    result = npw.zeros(topo.domain.dim)
-    result = op(velo.data, result)
-    ic_in = op.in_indices
-
-    dd = topo.domain.dim
-    for d in xrange(dd):
-        pos = d * dd
-        refmax = compute_max(rd[pos: pos + dd], ic_in)
-        assert np.allclose(refmax, result[d])
-
-
-def test_stretch_like():
-    # 2d
-    assert_stretch_like(topo2, v2d, grad_v_func_2d)
-    # 2d work
-    assert_stretch_like(topo2, v2d, grad_v_func_2d, work2)
-    # 2d reduced
-    assert_stretch_like(topo2, v2d, grad_v_func_2d, reduced=True)
-    # 2d reduced and work
-    assert_stretch_like(topo2, v2d, grad_v_func_2d, work2,
-                        reduced=True)
-    # 2d reduce output
-    assert_stretch_like(topo2, v2d, grad_v_func_2d,
-                        reduce_output=True)
-    # 2d reduce output and work
-    assert_stretch_like(topo2, v2d, grad_v_func_2d, work2,
-                        reduce_output=True)
-    # 2d reduce output, on subset, with work
-    assert_stretch_like(topo2, v2d, grad_v_func_2d, work2,
-                        reduce_output=True, reduced=True)
-    # 3d
-    assert_stretch_like(topo3, v3d, grad_v_func_3d)
-    # 3d work
-    assert_stretch_like(topo3, v3d, grad_v_func_3d, work3)
-    # 3d reduced
-    assert_stretch_like(topo3, v3d, grad_v_func_3d, reduced=True)
-    # 3d reduced and work
-    assert_stretch_like(topo3, v3d, grad_v_func_3d, work3,
-                        reduced=True)
-    # 3d reduce output
-    assert_stretch_like(topo3, v3d, grad_v_func_3d, reduce_output=True)
-    # 3d reduce output and work
-    assert_stretch_like(topo3, v3d, grad_v_func_3d, work3,
-                        reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_stretch_like(topo3, v3d, grad_v_func_3d, work3,
-                        reduce_output=True, reduced=True)
-
-
-def assert_diag_and_stretch(topo, velo, formula, work=None, reduced=False,
-                            reduce_output=False):
-    # Reference scalar field
-    resdim = topo.domain.dim ** 2
-    ref = Field(domain=topo.domain, formula=formula,
-                name='Analytical', nb_components=resdim)
-    rd = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    op, result = build_op(diffop.DiagAndStretch, 0, topo, work, reduced,
-                          reduce_output)
-    result = npw.zeros(2 * topo.domain.dim)
-    result = op(velo.data, result)
-    ic_in = op.in_indices
-
-    dd = topo.domain.dim
-    ll = [rd[i * (dd + 1)] for i in xrange(dd)]
-
-    for d in xrange(dd):
-        refmax = compute_max([ll[d]], ic_in)
-        assert np.allclose(refmax, result[d])
-        pos = d * dd
-        refmax = compute_max(rd[pos: pos + dd], ic_in)
-        assert np.allclose(refmax, result[d + dd])
-
-
-def test_diag_and_stretch():
-    # 2d
-    assert_diag_and_stretch(topo2, v2d, grad_v_func_2d)
-    # 2d work
-    assert_diag_and_stretch(topo2, v2d, grad_v_func_2d, work2)
-    # 2d reduced
-    assert_diag_and_stretch(topo2, v2d, grad_v_func_2d, reduced=True)
-    # 2d reduced and work
-    assert_diag_and_stretch(topo2, v2d, grad_v_func_2d, work2,
-                            reduced=True)
-    # 2d reduce output
-    assert_diag_and_stretch(topo2, v2d, grad_v_func_2d,
-                            reduce_output=True)
-    # 2d reduce output and work
-    assert_diag_and_stretch(topo2, v2d, grad_v_func_2d, work2,
-                            reduce_output=True)
-    # 2d reduce output, on subset, with work
-    assert_diag_and_stretch(topo2, v2d, grad_v_func_2d, work2,
-                            reduce_output=True, reduced=True)
-    # 3d
-    assert_diag_and_stretch(topo3, v3d, grad_v_func_3d)
-    # # 3d work
-    assert_diag_and_stretch(topo3, v3d, grad_v_func_3d, work3)
-    # 3d reduced
-    assert_diag_and_stretch(topo3, v3d, grad_v_func_3d, reduced=True)
-    # 3d reduced and work
-    assert_diag_and_stretch(topo3, v3d, grad_v_func_3d, work3,
-                            reduced=True)
-    # 3d reduce output
-    assert_diag_and_stretch(topo3, v3d, grad_v_func_3d, reduce_output=True)
-    # 3d reduce output and work
-    assert_diag_and_stretch(topo3, v3d, grad_v_func_3d, work3,
-                            reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_diag_and_stretch(topo3, v3d, grad_v_func_3d, work3,
-                            reduce_output=True, reduced=True)
-
-
-def assert_strain_stretch_criteria(topo, velo, formula1, formula2, work=None,
-                                   reduced=False, reduce_output=False):
-    # Reference scalar field
-    resdim = topo.domain.dim * (topo.domain.dim + 1) / 2
-    graddim = topo.domain.dim ** 2
-    refstrain = Field(domain=topo.domain, formula=formula2,
-                      name='Strain', nb_components=resdim)
-    refgrad = Field(domain=topo.domain, formula=formula1,
-                    name='Gradl', nb_components=graddim)
-
-    rd = refstrain.discretize(topo)
-    gd = refgrad.discretize(topo)
-    refstrain.initialize(topo)
-    refgrad.initialize(topo=topo)
-    op, result = build_op(diffop.StrainAndStretch, 0, topo, work, reduced,
-                          reduce_output)
-    result = npw.zeros(2 * topo.domain.dim)
-    result = op(velo.data, result)
-    ic_in = op.in_indices
-
-    dd = topo.domain.dim
-    if dd == 3:
-        ll = [None, ] * dd
-        ll[0] = [rd[0], rd[3], rd[4]]
-        ll[1] = [rd[1], rd[3], rd[5]]
-        ll[2] = [rd[2], rd[4], rd[5]]
-    elif dd == 2:
-        ll = [None, ] * dd
-        ll[0] = [rd[0], rd[2]]
-        ll[1] = [rd[1], rd[2]]
-
-    if dd == 3:
-        for d in xrange(dd):
-            refmaxstr = compute_max(ll[d], ic_in)
-            assert np.allclose(refmaxstr, result[d])
-            pos = d * dd
-            refmaxgrad = compute_max(gd[pos: pos + dd], ic_in)
-            assert np.allclose(refmaxgrad, result[d + dd])
-
-
-def test_strain_stretch():
-    # 2d
-    assert_strain_stretch_criteria(topo2, v2d, grad_v_func_2d, strain_f_2d)
-    # 2d work
-    assert_strain_stretch_criteria(topo2, v2d, grad_v_func_2d, strain_f_2d,
-                                   work2)
-    # 2d reduced
-    assert_strain_stretch_criteria(topo2, v2d, grad_v_func_2d, strain_f_2d,
-                                   reduced=True)
-    # 2d reduced and work
-    assert_strain_stretch_criteria(topo2, v2d, grad_v_func_2d, strain_f_2d,
-                                   work2, reduced=True)
-    # 2d reduce output
-    assert_strain_stretch_criteria(topo2, v2d, grad_v_func_2d, strain_f_2d,
-                                   reduce_output=True)
-    # 2d reduce output and work
-    assert_strain_stretch_criteria(topo2, v2d, grad_v_func_2d, strain_f_2d,
-                                   work2, reduce_output=True)
-    # 2d reduce output, on subset, with work
-    assert_strain_stretch_criteria(topo2, v2d, grad_v_func_2d, strain_f_2d,
-                                   work2, reduce_output=True, reduced=True)
-    # 3d
-    assert_strain_stretch_criteria(topo3, v3d, grad_v_func_3d, strain_f_3d)
-    # 3d work
-    assert_strain_stretch_criteria(topo3, v3d, grad_v_func_3d, strain_f_3d,
-                                   work3)
-    # 3d reduced
-    assert_strain_stretch_criteria(topo3, v3d, grad_v_func_3d, strain_f_3d,
-                                   reduced=True)
-    # 3d reduced and work
-    assert_strain_stretch_criteria(topo3, v3d, grad_v_func_3d, strain_f_3d,
-                                   work3, reduced=True)
-    # 3d reduce output
-    assert_strain_stretch_criteria(topo3, v3d, grad_v_func_3d, strain_f_3d,
-                                   reduce_output=True)
-    # 3d reduce output and work
-    assert_strain_stretch_criteria(topo3, v3d, grad_v_func_3d, strain_f_3d,
-                                   work3, reduce_output=True)
-    # 3d, work, reduce output, on subset
-    assert_strain_stretch_criteria(topo3, v3d, grad_v_func_3d, strain_f_3d,
-                                   work3, reduce_output=True, reduced=True)
-
-if __name__ == "__main__":
-    test_curl()
-    test_div_rho_v()
-    test_div_w_v()
-    test_laplacian()
-    test_grad_s()
-    test_grad_v()
-    test_grad_vxw()
-    test_div_advection()
-    test_strain()
-    test_strain_criteria()
-    test_max_diag_grad()
-    test_stretch_like()
-    test_diag_and_stretch()
-    test_strain_criteria()
-    test_strain_stretch()
diff --git a/hysop/old/numerics.old/tests/test_finite_differences.py b/hysop/old/numerics.old/tests/test_finite_differences.py
deleted file mode 100644
index 768aec69e4a674bcffc28755726bb06c7962b058..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/tests/test_finite_differences.py
+++ /dev/null
@@ -1,190 +0,0 @@
-"""FD schemes tests
-"""
-from hysop.numerics.finite_differences import FDC2, FDC4, FD2C2
-from hysop.tools.numpywrappers import npw
-from hysop import Box, Discretization, Field
-import numpy as np
-from hysop.domain.subsets import SubBox
-
-
-Nx = Ny = Nz = 128
-g = 2
-d2d = Discretization([Nx, Ny], [g, g])
-d3d = Discretization([Nx, Ny, Nz], [g, g, g])
-
-
-def f2d(res, x, y, t):
-    res[0][...] = np.cos(x) * y ** 2 + x * y ** 3
-    return res
-
-
-def ref_f2d(res, x, y, t):
-    res[0][...] = -np.sin(x) * y ** 2 + y ** 3
-    return res
-
-
-def ref2_f2d(res, x, y, t):
-    res[0][...] = 2 * np.cos(x) + 6 * x * y
-    return res
-
-
-def f3d(res, x, y, z, t):
-    res[0][...] = np.cos(x) * y ** 2 + x * y ** 3
-    return res
-
-
-def ref_f3d(res, x, y, z, t):
-    res[0][...] = -np.sin(x) * y ** 2 + y ** 3
-    return res
-
-
-def ref2_f3d(res, x, y, z, t):
-    res[0][...] = 2 * np.cos(x) + 6 * x * y
-    return res
-
-
-lengthes = [1., 1., 1.]
-
-
-def init(discr, formulas):
-    dimension = len(discr.resolution)
-    box = Box(length=lengthes[:dimension])
-    f1 = Field(name='input', domain=box, formula=formulas[0])
-    ref = Field(name='ref', domain=box, formula=formulas[1])
-    topo = box.create_topology(discr)
-    f1d = f1.discretize(topo).data[0]
-    refd = ref.discretize(topo).data[0]
-    f1.initialize(topo=topo)
-    ref.initialize(topo=topo)
-    ref2 = Field(name='ref', domain=box, formula=formulas[2])
-    ref2d = ref2.discretize(topo).data[0]
-    ref2.initialize(topo=topo)
-    return topo, f1d, refd, ref2d
-
-
-def run_schemes(schemes, f1d, result, ind, hh, iout=None):
-
-    work = npw.zeros(result.size)
-    if iout is None:
-        iout = ind
-    for sc in schemes:
-        ref = schemes[sc][0]
-        order = schemes[sc][2]
-        direction = schemes[sc][1]
-        result = sc(f1d, direction, result)
-        assert np.allclose(result[iout], ref[ind], atol=hh[0] ** order)
-        result[...] = 1.
-        result = sc.compute_and_add(f1d, direction, result, work)
-        assert np.allclose(result[iout], 1. + ref[ind], atol=hh[0] ** order)
-        result[...] = 1.
-        result = sc.compute_and_add_abs(f1d, direction, result, work)
-        assert np.allclose(result[iout], 1. + np.abs(ref[ind]),
-                           atol=hh[0] ** order)
-
-
-def check_fd_schemes(discr, formulas):
-    topo, f1d, refd, ref2d = init(discr, formulas)
-    ind = topo.mesh.compute_index
-    hh = topo.mesh.space_step
-    sc = {
-        FDC2(hh, ind): [refd, 0, 2],
-        FDC4(hh, ind): [refd, 0, 4],
-        FD2C2(hh, ind): [ref2d, 1, 2]}
-    shape = topo.mesh.local_resolution
-    result = npw.zeros(shape)
-    run_schemes(sc, f1d, result, ind, hh)
-
-
-def check_fd_schemes_reduce_input(discr, formulas):
-    topo, f1d, refd, ref2d = init(discr, formulas)
-    sl = topo.domain.length * 0.5
-    orig = topo.domain.origin + 0.1 * topo.domain.length
-    subbox = SubBox(parent=topo.domain, origin=orig, length=sl)
-    ind = subbox.discretize(topo)[0]
-    hh = topo.mesh.space_step
-    sc = {
-        FDC2(hh, ind): [refd, 0, 2],
-        FDC4(hh, ind): [refd, 0, 4],
-        FD2C2(hh, ind): [ref2d, 1, 2]}
-    shape = topo.mesh.local_resolution
-    result = npw.zeros(shape)
-    run_schemes(sc, f1d, result, ind, hh)
-
-
-def check_fd_schemes_reduce_output(discr, formulas):
-    topo, f1d, refd, ref2d = init(discr, formulas)
-    ind = topo.mesh.compute_index
-    hh = topo.mesh.space_step
-    sc = {
-        FDC2(hh, ind, output_indices=True): [refd, 0, 2],
-        FDC4(hh, ind, output_indices=True): [refd, 0, 4],
-        FD2C2(hh, ind, output_indices=True): [ref2d, 1, 2]
-        }
-    shape = np.asarray(topo.mesh.local_resolution).copy()
-    shape -= 2 * g
-    shape = tuple(shape)
-    result = npw.zeros(shape)
-    iout = [slice(0, shape[i]) for i in xrange(len(shape))]
-    run_schemes(sc, f1d, result, ind, hh, iout)
-
-
-def check_fd_schemes_reduce_all(discr, formulas):
-    topo, f1d, refd, ref2d = init(discr, formulas)
-    hh = topo.mesh.space_step
-    sl = topo.domain.length * 0.5
-    orig = topo.domain.origin + 0.1 * topo.domain.length
-    subbox = SubBox(parent=topo.domain, origin=orig, length=sl)
-    ind = subbox.discretize(topo)[0]
-    sc = {
-        FDC2(hh, ind, output_indices=True): [refd, 0, 2],
-        FDC4(hh, ind, output_indices=True): [refd, 0, 4],
-        FD2C2(hh, ind, output_indices=True): [ref2d, 1, 2]
-        }
-    shape = subbox.mesh[topo].resolution
-    result = npw.zeros(shape)
-    iout = [slice(0, shape[i]) for i in xrange(len(shape))]
-    run_schemes(sc, f1d, result, ind, hh, iout)
-
-
-def check_fd_schemes_setiout(discr, formulas):
-    topo, f1d, refd, ref2d = init(discr, formulas)
-    hh = topo.mesh.space_step
-    sl = topo.domain.length * 0.5
-    orig = topo.domain.origin + 0.1 * topo.domain.length
-    subbox = SubBox(parent=topo.domain, origin=orig, length=sl)
-    ind = subbox.discretize(topo)[0]
-    shape = np.asarray(subbox.mesh[topo].resolution).copy()
-    shape += 6
-    shape = tuple(shape)
-    result = npw.zeros(shape)
-    iout = [slice(3, shape[i] - 3) for i in xrange(len(shape))]
-    sc = {
-        FDC2(hh, ind, output_indices=iout): [refd, 0, 2],
-        FDC4(hh, ind, output_indices=iout): [refd, 0, 4],
-        FD2C2(hh, ind, output_indices=iout): [ref2d, 1, 2]
-        }
-    run_schemes(sc, f1d, result, ind, hh, iout)
-
-
-def test_fd_2d():
-    check_fd_schemes(d2d, [f2d, ref_f2d, ref2_f2d])
-
-
-def test_fd_3d():
-    check_fd_schemes(d3d, [f3d, ref_f3d, ref2_f3d])
-
-
-def test_fd_3d_reduce_input():
-    check_fd_schemes_reduce_input(d3d, [f3d, ref_f3d, ref2_f3d])
-
-
-def test_fd_3d_reduce_output():
-    check_fd_schemes_reduce_output(d3d, [f3d, ref_f3d, ref2_f3d])
-
-
-def test_fd_3d_reduce_all():
-    check_fd_schemes_reduce_all(d3d, [f3d, ref_f3d, ref2_f3d])
-
-
-def test_fd_3d_setiout():
-    check_fd_schemes_setiout(d3d, [f3d, ref_f3d, ref2_f3d])
diff --git a/hysop/old/numerics.old/tests/test_interpolation.py b/hysop/old/numerics.old/tests/test_interpolation.py
deleted file mode 100644
index ca29a74eb13a9d4310c6191a7761d2f1481a3ad6..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/tests/test_interpolation.py
+++ /dev/null
@@ -1,123 +0,0 @@
-"""Test interpolation schemes
-"""
-from hysop.numerics.interpolation.interpolation import Linear
-from hysop.tools.numpywrappers import npw
-from hysop import Box, Discretization
-import numpy as np
-cos = np.cos
-
-disc = [None, ] * 3
-g = 1
-nx = 33
-ny = 69
-nz = 129
-disc[0] = Discretization([nx, ], [g, ])
-disc[1] = Discretization([nx, ny], [g, g])
-disc[2] = Discretization([nx, ny, nz], [g, g, g])
-doms = [None, ] * 3
-topos = [None, ] * 3
-rworks = [None, ] * 3
-iworks = [None, ] * 3
-for case in xrange(3):
-    dim = case + 1
-    doms[case] = Box(origin=[0.] * dim, length=[4. * np.pi, ] * dim)
-    topos[case] = doms[case].create_topology(disc[case])
-    refsize = (np.prod(topos[case].mesh.compute_resolution) + 324, )
-    iworks[case] = [npw.int_zeros(refsize)]
-    rworks[case] = [npw.zeros(refsize)]
-dx = topos[2].mesh.space_step
-
-
-def run_interp(dimension, velo=0., with_work=False):
-    """Interpolate a field and compare with reference
-    (cosine function), for a given domain dimension
-    """
-    topo = topos[dimension - 1]
-    rwork, iwork = None, None
-    if with_work:
-        rwork = rworks[dimension - 1]
-        iwork = iworks[dimension - 1]
-    ind = topo.mesh.compute_index
-    field = npw.zeros(topo.mesh.local_resolution)
-    for direction in xrange(topo.domain.dim):
-        # lref = doms[dimension - 1].length[direction]
-        h = topo.mesh.space_step[direction]
-        x = topo.mesh.coords[direction]
-        field[...] = np.cos(x)
-        # select a set of points which are advected randomly and interpolated
-        # --> 1 point over two from the original mesh
-        shape = list(topo.mesh.local_resolution)
-        coords = topo.mesh.coords[direction]
-        ic = [slice(0, coords.shape[i]) for i in xrange(topo.domain.dim)]
-        ic[direction] = slice(0, topo.mesh.local_resolution[direction] - 3, 2)
-        shape[direction] = topo.mesh.coords[direction][ic].shape[direction]
-        positions = npw.random(shape) * velo
-        positions[...] += topo.mesh.coords[direction][ic]
-        interp = Linear(field, direction=direction, topo_source=topo,
-                        rwork=rwork, iwork=iwork)
-        result = [npw.zeros_like(positions)]
-        result = interp(0., [positions], result)
-        reference = npw.zeros_like(result[0])
-        reference[...] = np.cos(positions)
-        tol = 0.5 * h ** 2
-        assert np.allclose(result[0][ind], reference[ind], rtol=tol)
-
-
-def test_linear_interpolation_1d_velocity_null():
-    """Linear interpolation on 1d domain"""
-    run_interp(1)
-
-
-def test_linear_interpolation_1d():
-    """Linear interpolation on 1d domain"""
-    run_interp(1, 0.5 * dx[0])
-
-
-def test_linear_interpolation_2d_velocity_null():
-    """Linear interpolation on 2d domain"""
-    run_interp(2)
-
-
-def test_linear_interpolation_2d():
-    """Linear interpolation on 2d domain"""
-    run_interp(2, 0.5 * dx[1])
-
-
-def test_linear_interpolation_3d_velocity_null():
-    """Linear interpolation on 3d domain"""
-    run_interp(3)
-
-
-def test_linear_interpolation_3d():
-    """Linear interpolation on 3d domain"""
-    run_interp(3, 0.5 * dx[2])
-
-
-def test_linear_interpolation_1d_wk_velocity_null():
-    """Linear interpolation on 1d domain, external work arrays"""
-    run_interp(1, 0., True)
-
-
-def test_linear_interpolation_1d_wk():
-    """Linear interpolation on 1d domain, external work arrays"""
-    run_interp(1, 0.5 * dx[0], True)
-
-
-def test_linear_interpolation_2d_wk_velocity_null():
-    """Linear interpolation on 2d domain, external work arrays"""
-    run_interp(2, 0., True)
-
-
-def test_linear_interpolation_2d_wk():
-    """Linear interpolation on 2d domain, external work arrays"""
-    run_interp(2, 0.5 * dx[1], True)
-
-
-def test_linear_interpolation_3d_wk_velocity_null():
-    """Linear interpolation on 3d domain, external work arrays"""
-    run_interp(3, 0., True)
-
-
-def test_linear_interpolation_3d_wk():
-    """Linear interpolation on 3d domain, external work arrays"""
-    run_interp(3, 0.5 * dx[2], True)
diff --git a/hysop/old/numerics.old/tests/test_odesolvers.py b/hysop/old/numerics.old/tests/test_odesolvers.py
deleted file mode 100755
index 7e3cd42cf5dd30a4d3f41b703206268c2ef0924a..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/tests/test_odesolvers.py
+++ /dev/null
@@ -1,204 +0,0 @@
-"""Tests for time integrators (RK ...)
-"""
-# -*- coding: utf-8 -*-
-from hysop.numerics.odesolvers import Euler, RK2, RK3, RK4
-from hysop.tools.numpywrappers import npw
-import math
-import numpy as np
-from hysop.tools.parameters import Discretization
-from hysop import Box
-from hysop.tools.misc import WorkSpaceTools
-
-
-pi = math.pi
-sin = np.sin
-cos = np.cos
-
-# Grid resolution for tests
-nx = ny = nz = 64
-# Initial time
-start = 0.
-# Final time
-end = 0.5
-
-d1 = Discretization([nx + 1])
-box = Box(length=[2.0 * pi], origin=[0.])
-topo = box.create_topology(dim=1, discretization=d1)
-local_res = topo.mesh.local_resolution
-d3d = Discretization([nx + 1, ny + 1, nz + 1])
-box3 = Box(length=[2.0 * pi, ] * 3, origin=[0., ] * 3)
-topo3d = box3.create_topology(discretization=d3d)
-local_res_3d = topo3d.mesh.local_resolution
-
-# A set of tests and reference functions
-
-def rhs(t, y, sol):
-    sol[0][...] = -y[0]
-    return sol
-
-
-def rhs3d(t, y, work):
-    work[0][...] = y[1]
-    work[1][...] = -y[0]
-    work[2][...] = -4 * y[0] * y[1]
-    return work
-
-
-def integrate(integ, nb_steps):
-    """Execute odesolver 'integ' nb_steps times.
-
-    * integ must be an instance of an odesolver (i.e. Euler, RK2 ...).
-    * the system must be 1D.
-    """
-    t = start
-    time_points = np.linspace(start, end, nb_steps)
-    dtt = time_points[1] - time_points[0]
-    y = [npw.ones(local_res[0]) * math.exp(-start)]
-    res = [npw.zeros(local_res[0])]
-    # work = None
-    i = 1
-    ref = npw.zeros((nb_steps, local_res[0]))
-    ref[0, :] = y[0][:]
-    while i < nb_steps:
-        res = integ(t, y, dtt, res)
-        y[0][...] = res[0]
-        ref[i, :] = y[0][:]
-        t += dtt
-        i += 1
-    err = 0.0
-    for i in xrange(local_res[0]):
-        err = max(err, (np.abs(ref[:, i] - np.exp(-time_points))).max())
-    return dtt, err
-
-
-def integrate_3d(integ, nb_steps):
-    """Execute odesolver 'integ' nb_steps times.
-
-    * integ must be an instance of an odesolver (i.e. Euler, RK2 ...).
-    * 3D system
-    """
-    t = start
-    time_points = np.linspace(start, end, nb_steps)
-    dtt = time_points[1] - time_points[0]
-    # y0
-    y = [npw.zeros(local_res_3d), npw.ones(local_res_3d),
-         npw.ones(local_res_3d)]
-    res = [npw.zeros(local_res_3d), npw.zeros(local_res_3d),
-           npw.zeros(local_res_3d)]
-    # work = None
-    i = 1
-    xref = local_res_3d[0] // 3
-    yref = local_res_3d[1] // 2
-    zref = local_res_3d[2] // 5
-    ref = npw.zeros((nb_steps, 4))
-    ref[0, 1] = y[0][xref, yref, zref]
-    ref[0, 2] = y[1][xref, yref, zref]
-    ref[0, 3] = y[2][xref, yref, zref]
-    ref[:, 0] = time_points
-    while i < nb_steps:
-        res = integ(t, y, dtt, res)
-        for d in xrange(3):
-            y[d][...] = res[d]
-        ref[i, 1] = y[0][xref, yref, zref]
-        ref[i, 2] = y[1][xref, yref, zref]
-        ref[i, 3] = y[2][xref, yref, zref]
-        t += dtt
-        i += 1
-    err = np.zeros(3)
-    err[0] = (np.abs(ref[:, 1] - np.sin(time_points))).max()
-    err[1] = (np.abs(ref[:, 2] - np.cos(time_points))).max()
-    err[2] = (np.abs(ref[:, 3] - np.cos(2 * time_points))).max()
-    return dtt, err
-
-
-def run_integ(integrator, order):
-    """apply 'integrator' (== Euler, RK3 ...)
-    and check error according to input order. 1D case
-    """
-    nb_steps = 100
-    wk_prop = integrator.get_work_properties(1, topo)['rwork']
-    work = []
-    for shape in wk_prop:
-        work.append(npw.zeros(shape))
-    dt, err = integrate(integrator(nb_components=1, topo=topo,
-                                   f=rhs, rwork=work), nb_steps)
-    assert err < dt ** order
-
-
-def run_integ_3d(integrator, order):
-    """apply 'integrator' (== Euler, RK3 ...)
-    and check error according to input order. 3D case.
-
-    """
-    nb_steps = 50
-    wk_prop = integrator.get_work_properties(3, topo3d)['rwork']
-    work = []
-    for shape in wk_prop:
-        work.append(npw.zeros(shape))
-    dt, err = integrate_3d(integrator(nb_components=3, topo=topo3d,
-                                      f=rhs3d, rwork=work),
-                           nb_steps)
-    for e in err:
-        assert e < dt ** order
-
-
-def run_integ_3d_no_work(integrator, order):
-    """apply 'integrator' (== Euler, RK3 ...)
-    and check error according to input order. 3D case.
-    """
-    nb_steps = 50
-    dt, err = integrate_3d(integrator(nb_components=3, topo=topo3d,
-                                      f=rhs3d), nb_steps)
-    for e in err:
-        assert e < dt ** order
-
-
-def test_euler_1d():
-    run_integ(Euler, 1)
-
-
-def test_rk2_1d():
-    run_integ(RK2, 2)
-
-
-def test_rk3_1d():
-    run_integ(RK3, 3)
-
-
-def test_rk4_1d():
-    run_integ(RK4, 4)
-
-
-def test_euler_3d():
-    run_integ_3d(Euler, 1)
-    run_integ_3d_no_work(Euler, 1)
-
-
-def test_rk2_3d():
-    run_integ_3d(RK2, 2)
-    run_integ_3d_no_work(RK2, 2)
-
-
-def test_rk3_3d():
-    run_integ_3d(RK3, 3)
-    run_integ_3d_no_work(RK3, 3)
-
-
-def test_rk4_3d():
-    run_integ_3d(RK4, 4)
-    run_integ_3d_no_work(RK4, 4)
-
-
-def test_user_defined_common_workspace():
-    """One work space for all integrators"""
-    integrators = {Euler: 1, RK2: 2, RK3: 3, RK4: 4}
-    nb_steps = 50
-    wk_prop = {}
-    for integ in integrators:
-        wk_prop[integ] = integ.get_work_properties(3, topo3d)['rwork']
-    work = WorkSpaceTools.allocate_common_workspaces(wk_prop.values())
-    for integ in integrators:
-        dt, err = integrate_3d(integ(nb_components=3, topo=topo3d,
-                                     f=rhs3d, rwork=work), nb_steps)
-        for e in err:
-            assert e < dt ** integrators[integ]
diff --git a/hysop/old/numerics.old/tests/test_remesh.py b/hysop/old/numerics.old/tests/test_remesh.py
deleted file mode 100644
index 41446c5a8c88deade3c4840dc21ab3eb78413d2e..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/tests/test_remesh.py
+++ /dev/null
@@ -1,196 +0,0 @@
-"""Test remesh schemes
-"""
-from hysop.numerics.remeshing import Linear, L2_1, Remeshing
-from hysop.tools.numpywrappers import npw
-from hysop.constants import EPS
-from hysop import Box, Discretization
-import numpy as np
-cos = np.cos
-
-disc = [None, ] * 3
-g = 2
-nx = 65
-ny = 49
-nz = 57
-disc[0] = Discretization([nx, ], [g, ])
-disc[1] = Discretization([nx, ny], [g, g])
-disc[2] = Discretization([nx, ny, nz], [g, g, g])
-doms = [None, ] * 3
-topos = [None, ] * 3
-rworks = [None, ] * 3
-iworks = [None, ] * 3
-for case in xrange(3):
-    dim = case + 1
-    doms[case] = Box(origin=[0.] * dim, length=[4. * np.pi, ] * dim)
-    topos[case] = doms[case].create_topology(disc[case])
-    refsize = (np.prod(topos[case].mesh.local_resolution) + 324, )
-    iworks[case] = [npw.int_zeros(refsize)]
-    rworks[case] = [npw.zeros(refsize), npw.zeros(refsize)]
-dx = [0., 0. , 0.] # topos[2].mesh.space_step
-
-
-def run_remesh(kernel, dimension, velo=0., with_work=False):
-    """Interpolate a field and compare with reference
-    (cosine function), for a given domain dimension
-    """
-    topo = topos[dimension - 1]
-    rwork, iwork = None, None
-    if with_work:
-        rwork = rworks[dimension - 1]
-        iwork = iworks[dimension - 1]
-    ind = topo.mesh.compute_index
-    lenfield = 2
-    fieldp = [None, ] * lenfield
-    result = [None, ] * lenfield
-    reference = [None, ] * lenfield
-    for direction in xrange(topo.domain.dim):
-        h = topo.mesh.space_step[direction]
-        x = topo.mesh.coords[direction]
-        reference = [npw.zeros(topo.mesh.local_resolution)
-                     for _ in xrange(lenfield)]
-        reference[0][...] = np.cos(x)
-        if lenfield > 1:
-            reference[1][...] = np.sin(x)
-        shape = topo.mesh.local_resolution.copy()
-        shape[direction] -= 2 * g
-        # note : this should work for any number of points
-        # in direction, as soon as work array is large enough
-        # + EPS to be sure that particles are (almost) on grid
-        # but on the right of the grid point when velo=0
-        positions = npw.random(shape) * velo
-        positions[...] += 10 * EPS
-        positions[...] += topo.mesh.compute_coords[direction]
-        for i in xrange(lenfield):
-            fieldp[i] = npw.zeros_like(positions)
-            result[i] = npw.zeros_like(reference[i])
-        fieldp[0][...] = np.cos(positions)
-        if lenfield > 1:
-            fieldp[1][...] = np.sin(positions)
-        remesh = Remeshing(kernel, topo_source=topo, direction=direction,
-                           rwork=rwork, iwork=iwork)
-        result = remesh(positions, fieldp, result)
-        tol = 0.5 * h ** 2
-        #plt.plot(x.flat[2:-2], result[ind], 'o-', x.flat[2:-2], reference[ind], '+-')
-        #plt.show()
-        for i in xrange(lenfield):
-            assert np.allclose(result[i][ind], reference[i][ind], rtol=tol)
-
-
-def test_linear_remesh_1d_velocity_null():
-    """Linear remesh on 1d domain"""
-    run_remesh(Linear, 1)
-
-
-def test_linear_remesh_1d():
-    """Linear remesh on 1d domain"""
-    run_remesh(Linear, 1, 0.3 * dx[0])
-
-
-def test_linear_remesh_2d_velocity_null():
-    """Linear remesh on 2d domain"""
-    run_remesh(Linear, 2)
-
-
-def test_linear_remesh_2d():
-    """Linear remesh on 2d domain"""
-    run_remesh(Linear, 2, 0.3 * dx[1])
-
-
-def test_linear_remesh_3d_velocity_null():
-    """Linear remesh on 3d domain"""
-    run_remesh(Linear, 3)
-
-
-def test_linear_remesh_3d():
-    """Linear remesh on 3d domain"""
-    run_remesh(Linear, 3, 0.3 * dx[2])
-
-
-def test_linear_remesh_1d_wk_velocity_null():
-    """Linear remesh on 1d domain, external work arrays"""
-    run_remesh(Linear, 1, 0., True)
-
-
-def test_linear_remesh_1d_wk():
-    """Linear remesh on 1d domain, external work arrays"""
-    run_remesh(Linear, 1, 0.3 * dx[0], True)
-
-
-def test_linear_remesh_2d_wk_velocity_null():
-    """Linear remesh on 2d domain, external work arrays"""
-    run_remesh(Linear, 2, 0., True)
-
-
-def test_linear_remesh_2d_wk():
-    """Linear remesh on 2d domain, external work arrays"""
-    run_remesh(Linear, 2, 0.3 * dx[1], True)
-
-
-def test_linear_remesh_3d_wk_velocity_null():
-    """Linear remesh on 3d domain, external work arrays"""
-    run_remesh(Linear, 3, 0., True)
-
-
-def test_linear_remesh_3d_wk():
-    """Linear remesh on 3d domain, external work arrays"""
-    run_remesh(Linear, 3, 0.3 * dx[2], True)
-
-
-def test_l21_remesh_1d_velocity_null():
-    """L2_1 remesh on 1d domain"""
-    run_remesh(L2_1, 1)
-
-
-def test_l21_remesh_1d():
-    """L2_1 remesh on 1d domain"""
-    run_remesh(L2_1, 1, 0.3 * dx[0])
-
-
-def test_l21_remesh_2d_velocity_null():
-    """L2_1 remesh on 2d domain"""
-    run_remesh(L2_1, 2)
-
-
-def test_l21_remesh_2d():
-    """L2_1 remesh on 2d domain"""
-    run_remesh(L2_1, 2, 0.3 * dx[1])
-
-
-def test_l21_remesh_3d_velocity_null():
-    """L2_1 remesh on 3d domain"""
-    run_remesh(L2_1, 3)
-
-
-def test_l21_remesh_3d():
-    """L2_1 remesh on 3d domain"""
-    run_remesh(L2_1, 3, 0.3 * dx[2])
-
-
-def test_l21_remesh_1d_wk_velocity_null():
-    """L2_1 remesh on 1d domain, external work arrays"""
-    run_remesh(L2_1, 1, 0., True)
-
-
-def test_l21_remesh_1d_wk():
-    """L2_1 remesh on 1d domain, external work arrays"""
-    run_remesh(L2_1, 1, 0.3 * dx[0], True)
-
-
-def test_l21_remesh_2d_wk_velocity_null():
-    """L2_1 remesh on 2d domain, external work arrays"""
-    run_remesh(L2_1, 2, 0., True)
-
-
-def test_l21_remesh_2d_wk():
-    """L2_1 remesh on 2d domain, external work arrays"""
-    run_remesh(L2_1, 2, 0.3 * dx[1], True)
-
-
-def test_l21_remesh_3d_wk_velocity_null():
-    """L2_1 remesh on 3d domain, external work arrays"""
-    run_remesh(L2_1, 3, 0., True)
-
-
-def test_l21_remesh_3d_wk():
-    """L2_1 remesh on 3d domain, external work arrays"""
-    run_remesh(L2_1, 3, 0.3 * dx[2], True)
diff --git a/hysop/old/numerics.old/tests/test_update_ghosts.py b/hysop/old/numerics.old/tests/test_update_ghosts.py
deleted file mode 100755
index 49645bb5369fd316c4451546c3e9e5b9e0f06e84..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/tests/test_update_ghosts.py
+++ /dev/null
@@ -1,266 +0,0 @@
-"""Test ghost points synchronization
-"""
-from hysop.numerics.update_ghosts import UpdateGhosts, UpdateGhostsFull
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.tools.parameters import Discretization
-from hysop.core.mpi import main_rank
-from hysop.tools.numpywrappers import npw
-import numpy as np
-
-
-def get_gh_slices(topo):
-    """Return indices of ghost points"""
-    tab = Field(topo.domain, name='temp')
-    tabd = tab.discretize(topo)[0]
-    tabd[...] = 0.
-    tabd[topo.mesh.compute_index] = 1.
-    return np.where(tabd == 0.)
-
-
-def get_ghosts_indices(topo):
-    """Return indices of points inside the ghost layers
-    for a given topology
-
-    gh_s[d] : for points at the beginning of the domain
-    in direction d.
-    gh_e[d] : for points at the end of the domain
-    in direction d.
-
-    Corner points are not included.
-    """
-    gh_s = []
-    gh_e = []
-    ghosts = topo.ghosts()
-    sl_start = [slice(0, ghosts[d]) for d in xrange(topo.domain.dim)]
-    sl_end = [slice(-ghosts[d], None, None)
-              for d in xrange(topo.domain.dim)]
-    for j in xrange(topo.domain.dim):
-        gh_s.append(list(topo.mesh.compute_index))
-        gh_e.append(list(topo.mesh.compute_index))
-        gh_s[j][j] = sl_start[j]
-        gh_e[j][j] = sl_end[j]
-    return gh_s, gh_e
-
-
-def get_corners_indices(topo):
-    """Return indices of points inside the ghost layers
-    for a given topology
-
-    gh_s[d] : for points at the beginning of the domain
-    in direction d.
-    gh_e[d] : for points at the end of the domain
-    in direction d.
-
-    Corner points are included.
-    """
-    gh_s = []
-    gh_e = []
-    ghosts = topo.ghosts()
-    sl_start = [slice(0, ghosts[d]) for d in xrange(topo.domain.dim)]
-    sl_end = [slice(-ghosts[d], None, None)
-              for d in xrange(topo.domain.dim)]
-    for j in xrange(topo.domain.dim):
-        gh_s.append(list(topo.mesh.local_index))
-        gh_e.append(list(topo.mesh.local_index))
-        gh_s[j][j] = sl_start[j]
-        gh_e[j][j] = sl_end[j]
-
-    start, end = get_ghosts_indices(topo)
-    tab = np.zeros((topo.mesh.local_resolution), dtype=np.int16)
-    tab[topo.mesh.compute_index] = 1
-    for d in xrange(topo.domain.dim):
-        tab[start[d]] = 1
-        tab[end[d]] = 1
-    corners = np.where(tab == 0)
-
-    corners = [list(topo.mesh.local_index), ] * (2 ** (topo.domain.dim))
-
-    dimension = topo.domain.dim
-    corners_size = 2 ** dimension
-    corners = [None, ] * corners_size
-    val = npw.int_zeros(corners_size)
-    if dimension == 1:
-        return [], 0.
-    elif dimension == 2:
-        corners[0] = [sl_start[0], sl_start[1]]
-        corners[1] = [sl_start[0], sl_end[1]]
-        corners[2] = [sl_end[0], sl_start[1]]
-        corners[3] = [sl_end[0], sl_end[1]]
-        val[0] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] - 1, topo.proc_coords[1] - 1])
-        val[1] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] - 1, topo.proc_coords[1] + 1])
-        val[2] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] + 1, topo.proc_coords[1] - 1])
-        val[3] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] + 1, topo.proc_coords[1] + 1])
-    elif dimension == 3:
-        corners[0] = [sl_start[0], sl_start[1], sl_start[2]]
-        corners[1] = [sl_start[0], sl_start[1], sl_end[2]]
-        corners[2] = [sl_start[0], sl_end[1], sl_start[2]]
-        corners[3] = [sl_start[0], sl_end[1], sl_end[2]]
-        corners[4] = [sl_end[0], sl_start[1], sl_start[2]]
-        corners[5] = [sl_end[0], sl_start[1], sl_end[2]]
-        corners[6] = [sl_end[0], sl_end[1], sl_start[2]]
-        corners[7] = [sl_end[0], sl_end[1], sl_end[2]]
-        val[0] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] - 1,
-             topo.proc_coords[1] - 1,
-             topo.proc_coords[2] - 1])
-        val[1] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] - 1,
-             topo.proc_coords[1] - 1,
-             topo.proc_coords[2] + 1])
-        val[2] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] - 1,
-             topo.proc_coords[1] + 1,
-             topo.proc_coords[2] - 1])
-        val[3] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] - 1,
-             topo.proc_coords[1] + 1,
-             topo.proc_coords[2] + 1])
-        val[4] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] + 1,
-             topo.proc_coords[1] - 1,
-             topo.proc_coords[2] - 1])
-        val[5] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] + 1,
-             topo.proc_coords[1] - 1,
-             topo.proc_coords[2] + 1])
-        val[6] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] + 1,
-             topo.proc_coords[1] + 1,
-             topo.proc_coords[2] - 1])
-        val[7] = topo.comm.Get_cart_rank(
-            [topo.proc_coords[0] + 1,
-             topo.proc_coords[1] + 1,
-             topo.proc_coords[2] + 1])
-
-#    print "youou ", corners
-    return corners, val
-
-
-def run_synchro(ghosts, cutdir=None, full=False):
-    """Build domain, fields and topology
-    for a given ghost layer size and
-    distributed directions.
-    Then call synchronization and check.
-    """
-    dimension = len(ghosts)
-    dom = Box(length=[1., ] * dimension)
-    f = Field(dom, is_vector=True, name='f')
-    discr = Discretization([33, ] * dimension, ghosts=ghosts)
-    topo = dom.create_topology(discr, cutdir=cutdir)
-    df = f.discretize(topo)
-    # initialize + 0 in ghost layer
-    nbc = f.nb_components
-    for i in xrange(nbc):
-        df[i][...] = main_rank
-        #df[i][topo.mesh.compute_index] = topo.rank
-
-    for i in xrange(df.nb_components):
-        assert np.allclose(df[i], main_rank)
-
-    if full:
-        sync = UpdateGhostsFull(topo, nbc)
-
-    else:
-        sync = UpdateGhosts(topo, nbc)
-
-    start, end = get_ghosts_indices(topo)
-    corners, val = get_corners_indices(topo)
-
-    sync.apply(df.data)
-    nghb = topo.neighbours_tmp
-    for i in xrange(df.nb_components):
-        assert np.allclose(df[i][topo.mesh.compute_index], main_rank)
-        for d in xrange(topo.domain.dim):
-            assert np.allclose(df[i][start[d]], nghb[d][0])
-            assert np.allclose(df[i][end[d]], nghb[d][1])
-        if full:
-            for d in xrange(len(corners)):
-                assert np.allclose(df[i][corners[d]], val[d])
-
-
-def test_1_1_simple():
-    """ 1D domain, 1D topo, simple update
-    """
-    run_synchro([1, ])
-
-
-def test_2_1_simple():
-    """ 2D domain, 1D topo, simple update
-    """
-    run_synchro([1, 1], [True, False])
-
-
-def test_2_2_simple():
-    """ 2D domain, 2D topo, simple update
-    """
-    run_synchro([1, 1])
-
-
-def test_3_1_simple():
-    """ 3D domain, 1D topo, simple update
-    """
-    run_synchro([1, 1, 1], [True, False, False])
-
-
-def test_3_2_simple():
-    """ 3D domain, 1D topo, simple update
-    """
-    run_synchro([1, 1, 1], [True, False, True])
-
-
-def test_3_3_simple():
-    """ 3D domain, 1D topo, simple update
-    """
-    run_synchro([1, 1, 1])
-
-
-def test_3_3_bis():
-    """ 3D domain, 3D topo, different ghost size in each dir
-    """
-    run_synchro([1, 3, 2])
-
-
-def test_1_1_full():
-    """ 1D domain, 1D topo, corners update
-    """
-    run_synchro([1, ], full=True)
-
-
-def test_2_1_full():
-    """ 2D domain, 1D topo, corners update
-    """
-    run_synchro([1, 1], [True, False], full=True)
-
-
-def test_2_2_full():
-    """ 2D domain, 2D topo, corners update
-    """
-    run_synchro([1, 1], full=True)
-
-
-def test_3_1_full():
-    """ 3D domain, 1D topo, corners update
-    """
-    run_synchro([1, 1, 1], [True, False, False], full=True)
-
-
-def test_3_2_full():
-    """ 3D domain, 2D topo, corners update
-    """
-    run_synchro([1, 1, 1], [True, False, True], full=True)
-
-
-def test_3_3_full():
-    """ 3D domain, 3D topo, corners update
-    """
-    run_synchro([1, 1, 1], full=True)
-
-def test_3_2_bis_full():
-    """ 3D domain, 2D topo, corners update, different ghost size in each dir
-    """
-    run_synchro([1, 3, 2], [True, False, True], full=True)
diff --git a/hysop/old/numerics.old/update_ghosts.py b/hysop/old/numerics.old/update_ghosts.py
deleted file mode 100644
index d403fa4440b72146d437a808faa6fc9362da9d92..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/update_ghosts.py
+++ /dev/null
@@ -1,343 +0,0 @@
-"""Update ghost points of a list of numpy arrays
-for a given topology.
-
-Setup for send/recv process of ghosts points for a list
-of numpy arrays, for a given topology.
-
-.. currentmodule:: hysop.numerics.update_ghosts
-
-* :class:`~UpdateGhosts` : udpate ghosts layers in a 'classical' way
-* :class:`~UpdateGhostsFull` : update also points in the corners of the grid,
-i.e. at the intersection of ghosts points lines.
-
-Usage :
-
-.. code::
-
-    # init
-    up = UpdateGhosts(topo, 2)
-    ...
-    # update
-    up([field1, field2])
-
-"""
-from hysop.constants import debug, PERIODIC, hysop.core.mpi_REAL
-from hysop.tools.numpywrappers import npw
-import numpy as np
-
-
-class UpdateGhosts(object):
-    """Ghost points synchronization for a list of numpy arrays
-    """
-
-    @debug
-    def __init__(self, topo, nb_elements):
-        """
-        Parameters
-        ----------
-        topo : :class:`hysop.topology.topology.CartesianTopology`
-            data and mpi process distribution
-        nb_elements : int
-            number of arrays that will be update
-            at each call.
-
-        Notes
-        ------
-        * nb_elements and topo.mesh.local_resolution will be used to
-          allocate memory for local buffers used for send-recv process.
-        """
-        # The mpi topology and mesh distribution
-        self.topology = topo
-        # Ghost layer
-        self.ghosts = self.topology.mesh.discretization.ghosts
-        # Indices of points to be filled from previous neighbour
-        # Each component is a slice and corresponds to a direction in
-        # the topology grid.
-        self._g_fromprevious = []
-        # Indices of points to be filled from next neighbour.
-        self._g_fromnext = []
-        # Indices of points to be sent to next neighbour.
-        self._g_tonext = []
-        # Indices of points to be sent to previous neighbour.
-        self._g_toprevious = []
-        # Buffers that contains ghosts points to be sent
-        self._sendbuffer = None
-        # List of sizes of buffer to be sent/recv in each direction
-        self._buffer_size = []
-        # Buffers for reception
-        self._recvbuffer = None
-        # domain dimension
-        self._dim = self.topology.domain.dim
-
-        self._setup_slices()
-
-        # shape of numpy arrays to be updated.
-        self.memshape = tuple(self.topology.mesh.local_resolution)
-        # length of memory required to save one numpy array
-        self._memoryblocksize = np.prod(self.memshape)
-        # Number of numpy arrays that will be updated
-        self.nb_elements = nb_elements
-
-        # list of functions, _apply[d] corresponds to the synchronization
-        # used in direction d, either
-        #  - local (non-distributed) --> apply BC
-        #  - distributed --> mpi comm
-        self._apply = [None, ] * self._dim
-        # distributed (mpi) directions
-        self.exchange_dir = []
-        if self.topology.size > 1:
-            self.exchange_dir = [d for d in xrange(self._dim)
-                                 if self.topology.cutdir[d]]
-        for d in self.exchange_dir:
-            self._apply[d] = self._apply_in_dir
-        for d in self.topology.bc_dirs:
-            self._apply[d] = self._apply_bc_in_dir
-
-        if self.topology.size > 1:  # else no need to set buffers ...
-            # Size computation below assumes that what we send in one
-            # dir has the same size as what we receive from process in the
-            # same dir ...
-            # A temporary array used to calculate slices sizes
-            temp = np.zeros(self.memshape, dtype=np.int8)
-
-            for d in self.exchange_dir:
-                buffsize = 0
-                buffsize += temp[self._g_tonext[d]].size * self.nb_elements
-                self._buffer_size.append(buffsize)
-            self._recvbuffer = npw.zeros(max(self._buffer_size))
-            self._sendbuffer = npw.zeros(max(self._buffer_size))
-
-    def _setup_slices(self):
-        """Compute slices used to describe what to send
-        and receive in ghosts points.
-        """
-        # all points
-        defslice = [slice(None, None, None)] * self._dim
-        # no points
-        nogh_slice = [slice(0)] * self._dim
-        for d in xrange(self._dim):
-            if self.ghosts[d] > 0:
-                # get ghost points indices in current direction ...
-                self._g_fromprevious.append(list(defslice))
-                self._g_fromprevious[d][d] = slice(self.ghosts[d])
-                self._g_fromnext.append(list(defslice))
-                self._g_fromnext[d][d] = slice(-self.ghosts[d], None, None)
-                self._g_toprevious.append(list(defslice))
-                self._g_toprevious[d][d] = slice(self.ghosts[d],
-                                                 2 * self.ghosts[d], None)
-                self._g_tonext.append(list(defslice))
-                self._g_tonext[d][d] = slice(-2 * self.ghosts[d],
-                                             -self.ghosts[d])
-
-                other_dim = self._other_dim(d)
-                # ... and only compute points indices in other directions
-                # i.e. 'corner' points are not taken into account.
-                for d2 in other_dim:
-                    self._g_fromprevious[d][d2] = slice(self.ghosts[d2],
-                                                        -self.ghosts[d2])
-                    self._g_fromnext[d][d2] = slice(self.ghosts[d2],
-                                                    -self.ghosts[d2])
-                    self._g_toprevious[d][d2] = slice(self.ghosts[d2],
-                                                      -self.ghosts[d2])
-                    self._g_tonext[d][d2] = slice(self.ghosts[d2],
-                                                  -self.ghosts[d2])
-            else:
-                # empty lists of indices
-                self._g_fromprevious.append(list(nogh_slice))
-                self._g_fromnext.append(list(nogh_slice))
-                self._g_toprevious.append(list(nogh_slice))
-                self._g_tonext.append(list(nogh_slice))
-
-        self._g_fromprevious = self._immutable(self._g_fromprevious)
-        self._immutable(self._g_fromnext)
-        self._immutable(self._g_toprevious)
-        self._immutable(self._g_tonext)
-
-    @staticmethod
-    def _immutable(var):
-        """Convert list of lists to immutable tuples
-        """
-        assert isinstance(var, list)
-        for i in xrange(len(var)):
-            var[i] = tuple(var[i])
-        return tuple(var)
-
-    def _other_dim(self, d):
-        """utility to get all directions except d
-        """
-        return [x for x in xrange(self._dim) if x != d]
-
-    def __call__(self, variables):
-        return self.apply(variables)
-
-    def apply_bc(self, variables):
-        """Apply boundary conditions for non-distributed directions (only).
-
-        Parameters
-        ----------
-        variables : a list of ndarrays
-
-        Note that in distributed directions, BC are automatically set
-        during ghosts update (apply).
-        """
-        assert (self.topology.domain.boundaries == PERIODIC).all(),\
-            'Only implemented for periodic boundary conditions.'
-        assert isinstance(variables, list)
-        dirs = self.topology.bc_dirs
-        for d in dirs:
-            self._apply_bc_in_dir(variables, d, 0)
-
-    def _apply_bc_in_dir(self, variables, d, i):
-        """Apply periodic boundary condition in direction d."""
-        assert (self.topology.domain.boundaries == PERIODIC).all(),\
-            'Only implemented for periodic boundary conditions.'
-        for v in variables:
-            assert v.shape == self.memshape
-            v[self._g_fromprevious[d]] = v[self._g_tonext[d]]
-            v[self._g_fromnext[d]] = v[self._g_toprevious[d]]
-        return i
-    # @debug
-    # def apply(self, variables):
-    #     """Update ghosts points values:
-    #     mpi communications and boundary conditions.
-
-    #     Parameters
-    #     ----------
-    #     variables : list of numpy arrays
-    #         arrays that must be synchronized.
-    #     """
-    #     assert isinstance(variables, list)
-    #     # Apply boundary conditions for distributed directions ...
-    #     i = 0
-    #     for d in self.exchange_dir:
-    #         self._apply_in_dir(variables, d, i)
-    #         # update position in buffers list
-    #         i += 1
-    #     #  ... and for non-distributed directions.
-    #     self.apply_bc(variables)
-
-    @debug
-    def apply(self, variables):
-        """Update ghosts points values:
-        mpi communications and boundary conditions.
-
-        Parameters
-        ----------
-        variables : list of numpy arrays
-            arrays that must be synchronized.
-        """
-        assert isinstance(variables, list)
-        # Apply boundary conditions all directions ...
-        i = 0
-        for d in xrange(self._dim):
-            i = self._apply[d](variables, d, i)
-
-    def _apply_in_dir(self, variables, d, i):
-        """Communicate ghosts values in direction d for neighbour
-        in direction i of the topology
-
-        Parameters
-        ----------
-        variables : list of numpy arrays
-            arrays to be synchronized
-        d : int
-            direction of synchronization
-        i : int
-            counter, current position in buffers.
-        """
-        comm = self.topology.comm
-        rank = self.topology.rank
-        neighbours = self.topology.neighbours_tmp[d]
-        # 1 - Fill in buffers
-        # Loop through all variables that are distributed
-        pos = 0
-        nextpos = 0
-        for v in variables:
-            assert v.shape == self.memshape
-            nextpos += v[self._g_tonext[d]].size
-            self._sendbuffer[pos:nextpos] = v[self._g_tonext[d]].flat
-            pos = nextpos
-
-        # 2 - Send to next receive from previous
-        dest_rk = neighbours[1]
-        from_rk = neighbours[0]
-        sendbuffer = self._sendbuffer[:self._buffer_size[i]]
-        recvbuffer = self._recvbuffer[:self._buffer_size[i]]
-
-        comm.Sendrecv([sendbuffer, hysop.core.mpi_REAL],
-                      dest=dest_rk, sendtag=rank,
-                      recvbuf=recvbuffer,
-                      source=from_rk, recvtag=from_rk)
-
-        # 3 - fill variables with recvbuffer and update sendbuffer
-        # for next send
-        pos = 0
-        nextpos = 0
-        for v in variables:
-            nextpos += v[self._g_fromprevious[d]].size
-            v[self._g_fromprevious[d]].flat = \
-                self._recvbuffer[pos:nextpos]
-            self._sendbuffer[pos:nextpos] = \
-                v[self._g_toprevious[d]].flat
-            pos = nextpos
-
-        # 4 -Send to previous and receive from next
-        dest_rk = neighbours[0]
-        from_rk = neighbours[1]
-        comm.Sendrecv([sendbuffer, hysop.core.mpi_REAL],
-                      dest=dest_rk, sendtag=rank,
-                      recvbuf=recvbuffer,
-                      source=from_rk, recvtag=from_rk)
-        # 5 - fill variables with recvbuffer
-        pos = 0
-        nextpos = 0
-        for v in variables:
-            nextpos += v[self._g_fromprevious[d]].size
-            v[self._g_fromnext[d]].flat = \
-                self._recvbuffer[pos:nextpos]
-            pos = nextpos
-
-        # update position in buffers list
-        return i + 1
-
-
-class UpdateGhostsFull(UpdateGhosts):
-    """Ghost points synchronization for a list of numpy arrays
-
-    Notes
-    -----
-    * This version differs from UpdateGhosts
-    by computing also ghosts values
-    in edges and corners of the domain.
-    The directions are computed in reversed order.
-
-    """
-
-    def _other_dim(self, d):
-        # Slices for other directions corresponding to directions not
-        # yet exchanged : x > d. For directions x < d, the slices is a
-        # full slice that includes ghosts. This assumes that directions
-        # x < d have already been computed (by communications or local
-        # exchanges)
-        #                if d == 0:
-        return [x for x in xrange(self._dim) if x > d]
-
-    # @debug
-    # def apply(self, variables):
-    #     """Apply either mpi communications
-    #     or local boundary conditions to fill ghosts.
-    #     Loop over directions and switch among local BC or mpi comm.
-    #     """
-    #     assert isinstance(variables, list)
-    #     local_bc_dir = self.topology.bc_dirs
-    #     assert len(self.exchange_dir) + len(local_bc_dir) == self._dim
-
-    #     i = 0
-    #     for d in xrange(self._dim):
-    #         if d in local_bc_dir:
-    #             self._apply_bc_in_dir(variables, d)
-    #         elif d in self.exchange_dir:
-    #             self._apply_in_dir(variables, d, i)
-    #             # update position in buffers list
-    #             i += 1
-    #             # End of loop through send/recv directions.
diff --git a/hysop/old/numerics.old/utils.py b/hysop/old/numerics.old/utils.py
deleted file mode 100644
index 727a05348327ee775a1f3dfce57d6fc8099a8e3e..0000000000000000000000000000000000000000
--- a/hysop/old/numerics.old/utils.py
+++ /dev/null
@@ -1,77 +0,0 @@
-"""Tools (static methods) to manage slices or arrays.
-
-.. currentmodule hysop.numerics.utils
-
-
-"""
-from hysop.constants import XDIR, YDIR, ZDIR
-from hysop.tools.numpywrappers import npw
-import numpy as np
-
-
-class Utils(object):
-
-    i1 = [YDIR, ZDIR, XDIR]
-    i2 = [ZDIR, XDIR, YDIR]
-    gen_indices = zip(i1, i2)
-
-    @staticmethod
-    def sum_cross_product(x, y, sl, work):
-        """
-        Parameters
-        ----------
-        x : a tuple of arrays
-            represents grid coordinates (like coords in mesh)
-        y : list of numpy arrays
-            represents a discrete field
-        sl : list of slices
-            mesh points indices (like topo.mesh.compute_index)
-        work: numpy array
-            temporary buffer
-
-        Sum on a volume (defined by sl) of cross products of
-        x with y at each grid point. Result in work.
-        """
-        current_dir = 0
-        dim = len(y)
-        assert work.size == y[0][sl].size
-        res = npw.zeros(dim)
-        for (i, j) in Utils.gen_indices:
-            np.multiply(x[i], y[j][sl], work)
-            res[current_dir] = npw.real_sum(work)
-            np.multiply(x[j], y[i][sl], work)
-            res[current_dir] -= npw.real_sum(work)
-            current_dir += 1
-        return res
-
-    @staticmethod
-    def sum_cross_product_2(x, y, ind, work):
-        current_dir = 0
-        res = npw.zeros(3)
-        ilist = np.where(ind)
-        nb = len(ilist[0])
-        for (i, j) in Utils.gen_indices:
-            work.flat[:nb] = x[i].flat[ilist[i]] * y[j][ind]\
-                - x[j].flat[ilist[j]] * y[i][ind]
-            res[current_dir] = npw.real_sum(work.flat[:nb])
-            current_dir += 1
-        return res
-
-    @staticmethod
-    def sum_cross_product_3(x, y, ind):
-        """
-        Integrate over the control box using python loops.
-        ---> wrong way, seems to be really slower although
-        it costs less in memory.
-        Used only for tests (timing).
-        """
-        ilist = np.where(ind)
-        res = npw.zeros(3)
-        for(ix, iy, iz) in zip(ilist[0], ilist[YDIR], ilist[ZDIR]):
-            res[XDIR] += x[YDIR][0, iy, 0] * y[ZDIR][ix, iy, iz]\
-                - x[ZDIR][0, 0, iz] * y[YDIR][ix, iy, iz]
-            res[YDIR] += x[ZDIR][0, 0, iz] * y[XDIR][ix, iy, iz]\
-                - x[XDIR][ix, 0, 0] * y[ZDIR][ix, iy, iz]
-            res[ZDIR] += x[XDIR][ix, 0, 0] * y[YDIR][ix, iy, iz]\
-                - x[YDIR][0, iy, 0] * y[XDIR][ix, iy, iz]
-        return res
diff --git a/hysop/old/operator.old/__init__.py b/hysop/old/operator.old/__init__.py
deleted file mode 100644
index 5fd789f846149728d69238261e3c35fc637070f3..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""Package for operators (discrete and continuous) description
-
-"""
diff --git a/hysop/old/operator.old/adapt_timestep.py b/hysop/old/operator.old/adapt_timestep.py
deleted file mode 100755
index 91308f8a1ae961a5097145e2fd0805c95fdee0c2..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/adapt_timestep.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Update time-step, depending on the flow field.
-
-See :ref:`adaptive_time_step` for details.
-
-"""
-from hysop.constants import debug
-from hysop.methods import TimeIntegrator, SpaceDiscretization
-from hysop.operator.discrete.adapt_timestep import AdaptiveTimeStepD
-from hysop.operator.continuous import opsetup
-from hysop.operator.computational import Computational
-import hysop.default_methods as default
-from hysop.core.mpi import main_comm, MPI
-from hysop.numerics.differential_operations import MaxDiagGradV, \
-    DiagAndStretch, StrainCriteria, StretchLike, StrainAndStretch
-
-
-class AdaptiveTimeStep(Computational):
-    """The adaptative Time Step is computed according
-    to the following expression :
-
-    dt_adapt = min (dt_advection, dt_stretching, dt_cfl)
-
-    """
-
-    authorized_criteria = ['vort', 'gradU', 'stretch', 'cfl', 'strain']
-
-    @debug
-    def __init__(self, velocity, vorticity, simulation,
-                 lcfl=0.125, cfl=0.5, time_range=None,
-                 maxdt=9999., criteria=None, **kwds):
-        """
-        Parameters
-        ----------
-        velocity, vorticity : :class:`~hysop.fields.continuous_field.DiscreteFields`
-            fields used to update the time steps. Read only.
-        simulation : :class:`hysop.fields.problem.simulation.Simulation`
-            time loop parameters description
-        lcfl : double, optional
-            the lagrangian CFL coefficient used for advection stability
-        cfl : double, optional
-            CFL coefficient
-        criteria : list of strings, optional
-            name of the criteria used to compute the new time step.
-            See notes below.
-        time_range : list of two integers, optional
-            use to define a 'window' in which the current operator is applied.
-            time_range = [start, end], outside this range, the operator
-            has no effect. Start/end are iteration numbers.
-            Default = [2, end of simu]
-        maxdt : double, optional
-            maximum value allowed for time step. Default = 9999.
-        kwds : arguments passed to base class.
-
-        Notes
-        -----
-       * This operator has no effect on input variables.
-        * Authorized criteria are:
-            * criteria for the advection scheme, one of :
-                1. 'vort' (default)
-                2. 'gradU'
-                3. 'strain'
-            * criteria for the stretching scheme :
-                4. 'stretch'
-            * cfl-like :
-                5. 'cfl'
-        * Computes a 'diagnostics' vector :
-          diagnostics = (time, time_step, c1, c2, ...)
-          ci = time step computed from the criterium number i in the list
-          above.
-        * dt_adapt = min(diagnostics, dtmax)
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(AdaptiveTimeStep, self).__init__(variables=[velocity, vorticity],
-                                               **kwds)
-        if self.method is None:
-            self.method = default.ADAPT_TIME_STEP
-        assert SpaceDiscretization in self.method.keys()
-        assert TimeIntegrator in self.method.keys()
-        # Definition of criterion for dt_advec computation
-        if criteria is None:
-            criteria = ['vort']
-        msg = 'criteria arg must be a list.'
-        assert isinstance(criteria, list), msg
-        msg = 'Unknow criteria : '
-        for cr in criteria:
-            assert cr in self.authorized_criteria, msg + cr
-        msg = 'criteria list is empty!'
-        assert len(criteria) >= 1, msg
-        unknowns = [k for k in criteria if k not in self.authorized_criteria]
-        msg = 'Unknown criteria ' + str(unknowns)
-        assert len(unknowns) == 0, msg
-
-        # list of criteria used to compute dt
-        self.criteria = criteria
-
-        # velocity variable (vector)
-        self.velocity = velocity
-        # vorticity variable (vector)
-        self.vorticity = vorticity
-        # simulation
-        self.simulation = simulation
-
-        self.input = self.variables
-        self.output = []
-        self.time_range = time_range
-        self.lcfl, self.cfl, self.maxdt = lcfl, cfl, maxdt
-        self._intercomms = {}
-        self._set_inter_comm()
-
-    def _set_inter_comm(self):
-        """Create intercommunicators, if required (i.e. if there are several
-        tasks defined in the domain).
-        """
-        task_is_source = self.mpi_params.task_id == self.domain.current_task()
-        tasks_list = self.domain.tasks_list()
-        others = (v for v in tasks_list if v != self.mpi_params.task_id)
-        if task_is_source:
-            remote_leader = set([tasks_list.index(i) for i in others])
-        else:
-            remote_leader = set([tasks_list.index(self.mpi_params.task_id)])
-
-        for rk in remote_leader:
-            self._intercomms[rk] = self.domain.task_comm.Create_intercomm(
-                0, main_comm, rk)
-
-    def get_work_properties(self):
-        super(AdaptiveTimeStep, self).get_work_properties()
-        diffop = None
-        topo = self.discrete_fields[self.velocity].topology
-        if 'gradU' in self.criteria:
-            diffop = MaxDiagGradV
-        if 'stretch' in self.criteria:
-            diffop = StretchLike
-        if 'strain' in self.criteria:
-            diffop = StrainCriteria
-        if 'stretch' in self.criteria and 'gradU' in self.criteria:
-            diffop = DiagAndStretch
-        if 'stretch' in self.criteria and 'strain' in self.criteria:
-            diffop = StrainAndStretch
-        if diffop is not None:
-            return diffop.get_work_properties(topo)
-        else:
-            return {'rwork': None, 'iwork': None}
-
-    def discretize(self):
-        nb_ghosts = self.method[SpaceDiscretization].ghosts_layer_size
-        super(AdaptiveTimeStep, self)._standard_discretize(nb_ghosts)
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        self.discrete_op =\
-            AdaptiveTimeStepD(self.discrete_fields[self.velocity],
-                              self.discrete_fields[self.vorticity],
-                              self.simulation, method=self.method,
-                              time_range=self.time_range,
-                              criteria=self.criteria,
-                              lcfl=self.lcfl,
-                              cfl=self.cfl,
-                              maxdt=self.maxdt,
-                              rwork=rwork, iwork=iwork)
-        # Output setup
-        self._set_io('dt_adapt', (1, 7))
-        self.discrete_op.set_writer(self._writer)
-        self._is_uptodate = True
-
-    def wait(self):
-        task_is_source = self.mpi_params.task_id == self.domain.current_task()
-        rank = self.mpi_params.rank
-        dt = self.simulation.time_step
-        for rk in self._intercomms:
-            if task_is_source:
-                # Local 0 broadcast current_indices to remote comm
-                if rank == 0:
-                    self._intercomms[rk].bcast(dt, root=MPI.ROOT)
-                else:
-                    self._intercomms[rk].bcast(dt, root=MPI.PROC_NULL)
-            else:
-                dt = self._intercomms[rk].bcast(dt, root=0)
-                self.simulation.update_time_step(dt)
-
-    def diagnostics(self):
-        """Get the list of computed dt (for each criteria)
-        """
-        return self.discrete_op.diagnostics
diff --git a/hysop/old/operator.old/advection.py b/hysop/old/operator.old/advection.py
deleted file mode 100644
index 9e34270165de98a38977c1fae0f43372057193f6..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/advection.py
+++ /dev/null
@@ -1,580 +0,0 @@
-"""Advection of a field.
-
-See :ref:`advection` in user guide.
-"""
-from __future__ import print_function
-from abc import ABCMeta, abstractmethod
-from hysop.constants import debug, DirectionLabels, ZDIR
-from hysop import __SCALES_ENABLED__, Field, __GPU_ENABLED__
-from hysop.operator.computational import Computational
-from hysop.methods import TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting, MultiScale
-from hysop.numerics.remeshing import Remeshing, L2_1
-from hysop.operator.continuous import opsetup, opapply
-import hysop.default_methods as default
-from hysop.tools.parameters import Discretization
-from hysop.topology.cartesian_topology import CartesianTopology
-from hysop.tools.numpywrappers import npw
-from hysop.problem.simulation import O2, SplittingParameters
-from hysop.operator.discrete.particle_advection import ParticleAdvection
-import numpy as np
-if __GPU_ENABLED__:
-    from hysop.backend.device.opencl.gpu_particle_advection import GPUParticleAdvection
-    from hysop.backend.device.opencl.multi_gpu_particle_advection import MultiGPUParticleAdvection
-if __SCALES_ENABLED__:
-    from hysop.f2hysop import scales2py as scales
-
-
-class AdvectionBase(Computational):
-    """Abstract interface to advection operators
-    """
-    __metaclass__ = ABCMeta
-
-    @debug
-    def __init__(self, velocity, advected_fields=None,
-                 discretization_fields=None, **kwds):
-        """
-        Parameters
-        ----------
-        velocity : :class:`hysop.field.continuous.Field`
-            the velocity field
-        advected_field: a list of :class:`hysop.field.continuous.Field`,
-         optional
-            fields to be advected
-        discretization_fields : :class:`hysop.topology.topology.CartesianTopology`
-         or :class:`tools.parameters.Discretization`
-            Defined the data/mpi distribution for advected_fields.
-            Default = same as velocity
-        kwds : base class arguments.
-
-        Notes
-        -----
-        * one single discretization for all advected fields
-        * velocity discretization may be different
-        * to limit advection to one or two directions, set
-          directions=[0, 2] (advection in x and z dirs).
-
-        """
-        # --- check inputs for fields (advected and velocity) ---
-        msg = 'discretization argument is required in Advection.'
-        assert 'discretization' in kwds, msg
-        msg = 'Wrong input for advection operator:'
-        msg += ' "variables" attribute is not allowed. Check manual.'
-        assert 'variables' not in kwds, msg
-        # advected_fields :
-        # == field or [field1, field2, ...] ==> same discretization for all,
-        #  equal to velocity discretization if discretization_fields is None
-        self.advected_fields = []
-        if advected_fields is not None:
-            if not isinstance(advected_fields, list):
-                assert isinstance(advected_fields, Field)
-                advected_fields = [advected_fields]
-            self.advected_fields = advected_fields
-        # velocity field
-        if discretization_fields is not None:
-            dfields = discretization_fields
-        else:
-            dfields = kwds['discretization']
-        variables = {velocity: kwds['discretization']}
-        for field in advected_fields:
-            variables[field] = dfields
-        kwds.pop('discretization')
-        super(AdvectionBase, self).__init__(variables=variables, **kwds)
-        # velocity field
-        self.velocity = velocity
-        assert velocity.nb_components == self.domain.dim
-
-        # --- Set default method, if required ---
-        if self.method is None:
-            self.method = default.ADVECTION
-
-        # --- Set name ---
-        vars_str = "_("
-        for vv in self.advected_fields:
-            vars_str += vv.name + ","
-        self.name += vars_str[0:-1] + ')'
-
-        self.config = {}
-        self.input = [var for var in self.variables]
-        self.output = [var for var in self.variables
-                       if var is not self.velocity]
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if self._is_uptodate:
-            return
-        # select discretization of the advected fields
-        advected_discrete_fields = [self.discrete_fields[v]
-                                    for v in self.variables
-                                    if v is not self.velocity]
-        toporef = advected_discrete_fields[0].topology
-        msg = 'All advected fields must have the same topology.'
-        for f in advected_discrete_fields:
-            assert f.topology == toporef, msg
-
-        # call specific setup (see derived class)
-        self._setup(advected_discrete_fields, rwork, iwork)
-        self._is_uptodate = True
-
-    def advected_fields_topology(self):
-        """Returns the topology used for advected fields
-        """
-        msg = 'Operator must be discretized'
-        assert self._is_discretized, msg
-        return self.discrete_fields[self.advected_fields[0]].topology
-
-    def velocity_topology(self):
-        """Returns the topology used for velocity
-        """
-        msg = 'Operator must be discretized'
-        assert self._is_discretized, msg
-        return self.discrete_fields[self.velocity].topology
-
-    @abstractmethod
-    def _setup(self, advected_discrete_fields, rwork=None, iwork=None):
-        """Setup specific to advection implementation (see derived class)
-        """
-
-
-class Advection(AdvectionBase):
-    """Python or GPU advection
-    """
-    @debug
-    def __init__(self, directions=None, **kwds):
-        """
-        Parameters
-        ----------
-        directions : list of int, optional
-            direction(s) in which advection must be performed. Default = all.
-        kwds : base class arguments.
-
-        """
-        super(Advection, self).__init__(**kwds)
-        self.extra_args = {} ### TO BE REVIEWED ###
-        if directions is None:
-            directions = [i for i in xrange(self.domain.dim)]
-        assert isinstance(directions, list)
-        self.directions = directions
-        assert TimeIntegrator in self.method.keys()
-        assert Interpolation in self.method.keys()
-        assert Remesh in self.method.keys()
-        if Splitting not in self.method.keys():
-            self.method[Splitting] = O2
-        self.discrete_op = [None] * self.domain.dim
-
-        # Fields on particles
-        self.particle_fields = None
-
-        # Positions of the particles
-        self.particle_positions = None
-
-        # number of components of the rhs (time integration)
-        self._rhs_size = 1
-
-        # check if advection is done on gpu
-        if Support in self.method:
-            self._gpu_advection = self.method[Support].find('gpu') >= 0
-        else:
-            self._gpu_advection = False
-        self.splitting = None
-        self._old_dir = 0
-
-    def discretize(self):
-        """Discretization (create topologies and discretize fields)
-        """
-        if self._is_discretized:
-            return
-        cutdir = [False] * self.domain.dim
-        cutdir[-1] = True
-        # call standard discretization (from computational base class)
-        self._standard_discretize(cutdir=cutdir)
-        # check if velocity and fields are defined with the same resolution
-        if self._single_topo:
-            self.method[MultiScale] = None
-        else:
-            # set a default value for interpolation method
-            if MultiScale not in self.method or\
-               self.method[MultiScale] is None:
-                self.method[MultiScale] = L2_1
-            mscale = self.method[MultiScale]
-            min_ghosts = mscale.ghosts_layer_size
-            # get ghost layer size used for velocity field
-            ghosts_v = self.variables[self.velocity].ghosts()
-            # and check if it fits with interpolation method
-            msg = 'Ghost layer required for velocity. Size min = '
-            msg += str(min_ghosts) + " (" + str(ghosts_v) + " given)"
-            assert (ghosts_v >= min_ghosts).all(), msg
-        self._is_discretized = True
-
-    def get_work_properties(self):
-        assert self._is_discretized
-        # Shape of reference comes from fields, not from velocity
-        advected_discrete_fields = [self.discrete_fields[v]
-                                    for v in self.variables
-                                    if v is not self.velocity]
-        topo = advected_discrete_fields[0].topology
-        # Find number and shape of required work arrays
-        if not self._gpu_advection:
-            # -- pure python advection --
-            #  work array shape depends on the time integrator
-            #  interpolation scheme and remeshing scheme
-            ti_work = self.method[TimeIntegrator].get_work_properties(
-                self._rhs_size, topo)
-            ti_rwork_length = len(ti_work['rwork'])
-            iw_prop = self.method[Interpolation].get_work_properties(topo)
-            rw_prop = Remeshing.get_work_properties(topo)
-            interp_iwork_length = len(iw_prop['iwork'])
-            interp_rwork_length = len(iw_prop['rwork'])
-            remesh_iwork_length = len(rw_prop['iwork'])
-            remesh_rwork_length = len(rw_prop['rwork'])
-            iwork_length = max(interp_iwork_length, remesh_iwork_length)
-            rwork_length = max(ti_rwork_length + interp_rwork_length,
-                               remesh_rwork_length)
-
-        else:
-            # -- GPU advection --
-            # no work array
-            iwork_length, rwork_length = 0, 0
-
-        # buffers for fields on particles
-        rwork_length += np.sum([f.nb_components for f in self.advected_fields])
-        if not self._gpu_advection or \
-           self.method[Support].find('gpu_2k') >= 0:
-            rwork_length += 1  # work array for positions
-        memsize = np.prod(topo.mesh.local_resolution)
-        return {'rwork': [(memsize,)] * rwork_length,
-                'iwork': [(memsize,)] * iwork_length}
-
-    def _setup(self, advected_discrete_fields, rwork=None, iwork=None):
-        for i in self.directions:
-            ref_shape = advected_discrete_fields[0].topology.shape[i]
-            if self._gpu_advection:
-                if ref_shape == 1:
-                    DA = GPUParticleAdvection
-                else:
-                    DA = MultiGPUParticleAdvection
-            else:
-                DA = ParticleAdvection
-
-            self.discrete_op[i] = DA(
-                velocity=self.discrete_fields[self.velocity],
-                fields_on_grid=advected_discrete_fields,
-                direction=i, method=self.method,
-                rwork=rwork, iwork=iwork)
-                #**self.extra_args)
-
-            if i == 0:
-                # work arrays can be shared between directions.
-                rwork = self.discrete_op[i]._rwork
-                iwork = self.discrete_op[i]._iwork
-
-        # set splitting parameters (depends on method)
-        self.splitting = self.method[Splitting](self.domain.dim)
-        assert isinstance(self.splitting, SplittingParameters)
-
-        # configure gpu
-        if self._gpu_advection:
-            self._configure_gpu()
-
-    def _configure_gpu(self):
-        """Setup for gpu related things (Memory ...)
-        """
-        splitting_nbSteps = len(self.splitting)
-        for d in xrange(self.domain.dim):
-            dOp = self.discrete_op[d]
-            assert len(dOp.exec_list) == splitting_nbSteps, \
-                "Discrete operator execution " + \
-                "list and splitting steps sizes must be equal " + \
-                str(len(dOp.exec_list)) + " != " + \
-                str(splitting_nbSteps)
-        s = ""
-        device_id = self.discrete_op[0].cl_env._device_id
-        gpu_comm = self.discrete_op[0].cl_env.gpu_comm
-        gpu_rank = gpu_comm.Get_rank()
-        if gpu_rank == 0:
-            s += "=== OpenCL buffers allocated"
-            s += " on Device:{0} ===\n".format(device_id)
-            s += "Global memory used:\n"
-        total_gmem = 0
-        for d in xrange(self.domain.dim):
-            g_mem_d = 0
-            # allocate all variables in advec_dir
-            for df in self.discrete_op[d].variables:
-                if not df.gpu_allocated:
-                    df.allocate()
-                    g_mem_df = gpu_comm.allreduce(df.mem_size)
-                    g_mem_d += g_mem_df
-            if gpu_rank == 0:
-                s += " Advection" + DirectionLabels[d] + ": {0:9d}".format(g_mem_d)
-                s += "Bytes ({0:5d} MB)\n".format(g_mem_d / (1024 ** 2))
-            total_gmem += g_mem_d
-        if gpu_rank == 0:
-            s += " Total      : {0:9d}".format(total_gmem)
-            s += "Bytes ({0:5d} MB)\n".format(total_gmem / (1024 ** 2))
-            s += "Local memory used:\n"
-        total_lmem = 0
-        for d in xrange(self.domain.dim):
-            l_mem_d = gpu_comm.allreduce(
-                self.discrete_op[d].size_local_alloc)
-            if gpu_rank == 0:
-                s += " Advection" + DirectionLabels[d] + ": {0:9d}".format(l_mem_d)
-                s += "Bytes ({0:5d} MB)\n".format(l_mem_d / (1024 ** 2))
-            total_lmem += l_mem_d
-        if gpu_rank == 0:
-            s += " Total      : {0:9d}".format(total_lmem) + "Bytes"
-            print(s)
-
-    @debug
-    @opapply
-    def apply(self, simulation=None):
-        """Redefinition of apply for advection --> dimensional splitting.
-
-        Parameters
-        ----------
-        simulation : `:class::~hysop.problem.simulation.Simulation`
-
-        """
-        assert simulation is not None
-        for split_id, split in enumerate(self.splitting()):
-            simulation.set_split(split_id, split)
-            self.discrete_op[simulation.current_dir].apply(simulation)
-            #, split[1], split_id, self._old_dir)
-            simulation.next_split()
-
-    @debug
-    def finalize(self):
-        """Memory cleaning.
-        """
-        for i in self.directions:
-            self.discrete_op[i].finalize()
-
-    def get_profiling_info(self):
-        if self._is_uptodate:
-            for d in self.directions:
-                self.profiler += self.discrete_op[d].profiler
-
-    def __str__(self):
-        """
-        Common printings for operators
-        """
-        short_name = str(self.__class__).rpartition('.')[-1][0:-2]
-        for i in self.directions:
-            if self.discrete_op[i] is not None:
-                s = str(self.discrete_op[i])
-            else:
-                s = short_name + " operator. Not discretised."
-        return s + "\n"
-
-
-class ScalesAdvection(AdvectionBase):
-    """Advection based on fortran (scales) interface
-    """
-    @debug
-    def __init__(self, cutdir=None, **kwds):
-        """
-        Parameters
-        ----------
-        cutdir : list of bool
-            cutdir[d] = True to distribute data in direction d.
-
-        kwds : base class arguments.
-
-        Notes
-        -----
-        * In scales cutdir[0] must be False, i.e. data are not
-        distributed in the first dir, which corresponds to contiguous
-        memory layout (fortran).
-        * Scales assumes that all subdomains have the same
-          local resolution.
-        * No ghosts points allowed in scales (at the moment)
-
-        """
-        super(ScalesAdvection, self).__init__(**kwds)
-        assert Remesh in self.method.keys()
-        msg = 'Scales is not available for your configuration.'
-        assert __SCALES_ENABLED__, msg
-        msg = 'Scales Advection is only implemented in 3D.'
-        assert self.domain.dim == 3, msg
-        # Default splitting = Strang
-        if Splitting not in self.method.keys():
-            self.method[Splitting] = 'strang'
-        if cutdir is None:
-            cutdir = [False, ] * self.domain.dim
-            cutdir[-1] = True
-        self.cutdir = cutdir
-
-    def scales_parameters(self):
-        """
-        Return the name of the particular method used in scales
-        and the type of splitting.
-        """
-        order = None
-        for o in ['p_O2', 'p_O4', 'p_L2',
-                  'p_M4', 'p_M6', 'p_M8',
-                  'p_44', 'p_64', 'p_66', 'p_84']:
-            if self.method[Remesh].find(o) >= 0:
-                order = o
-        if order is None:
-            print('Unknown advection method, turn to default (p_M6).')
-            order = 'p_M6'
-
-        # - Extract splitting form self.method (default strang) -
-        splitting = 'strang'
-        for s in ['classic', 'strang', 'particle']:
-            if self.method[Splitting].find(s) >= 0:
-                splitting = s
-
-        return order, splitting
-
-    def discretize(self):
-        """
-        Discretization (create topologies and discretize fields)
-        Available methods :
-        - 'scales' : SCALES fortran routines (3d only, list of vector
-        and/or scalar)
-        - 'gpu' : OpenCL kernels (2d and 3d, single field, scalar or vector)
-        - other : Pure python (2d and 3d, list of vector and/or scalar)
-        """
-        if self._is_discretized:
-            return
-        # Check if topos need to be created
-        build_topos = self._check_variables()
-        order, splitting = self.scales_parameters()
-
-        # Scales, single resolution
-        if self._single_topo:
-            if build_topos:
-                # In that case, self._discretization must be
-                # a Discretization object, used for all fields.
-                # We use it to initialize scales solver
-                topo = self._create_scales_topo(self._discretization,
-                                                order, splitting)
-                for v in self.variables:
-                    self.variables[v] = topo
-            else:
-                # In that case, self._discretization must be
-                # a CartesianTopology object, used for all fields.
-                # We use it to initialize scales solver
-                assert isinstance(self._discretization, CartesianTopology)
-                topo = self._discretization
-                msg = 'input topology is not compliant with scales.'
-                #assert topo.dimension == 1, msg
-                msg = 'Ghosts points not yet implemented for scales operators.'
-                assert (topo.mesh.discretization.ghosts == 0).all(), msg
-
-                nbcells = topo.mesh.discretization.resolution - 1
-                topodims = topo.shape
-                scalesres, global_start = \
-                    scales.init_advection_solver(nbcells,
-                                                 self.domain.length,
-                                                 npw.asintegerarray(topodims),
-                                                 self.mpi_params.comm.py2f(),
-                                                 order=order,
-                                                 dim_split=splitting)
-
-                assert (topo.shape == topodims).all()
-                assert (topo.mesh.local_resolution == scalesres).all()
-                assert (topo.mesh.start() == global_start).all()
-
-            msg = 'Scales Advection not yet implemented with ghosts points.'
-            assert (topo.ghosts() == 0).all(), msg
-
-        # Scales, multi-resolution
-        else:
-            if build_topos[self.velocity]:
-                # Resolution used for velocity
-                v_resol = self.variables[self.velocity].resolution - 1
-
-            else:
-                topo = self.variables[self.velocity]
-                v_resol = topo.mesh.discretization.resolution
-
-            vbuild = [v for v in self.variables if build_topos[v]]
-            for v in vbuild:
-                self.variables[v] = self._create_scales_topo(
-                    self.variables[v], order, splitting)
-
-            topo = self.variables.values()[0]
-            self._check_scales_topo(topo, order, splitting)
-
-            # Init multiscale in scales
-            scales.init_multiscale(v_resol[0], v_resol[1], v_resol[2],
-                                   self.method[MultiScale])
-
-        # All topos are built, we can discretize fields.
-        self._discretize_vars()
-
-        advected_discrete_fields = [self.discrete_fields[f]
-                                    for f in self.advected_fields]
-        toporef = advected_discrete_fields[0].topology
-        msg = 'All advected fields must have the same topology.'
-        for f in advected_discrete_fields:
-            assert f.topology == toporef, msg
-
-        if self._single_topo:
-            self.method[MultiScale] = None
-
-    def _create_scales_topo(self, d3d, order, splitting):
-        """set specific MPI layout for scales.
-        """
-        comm = self.mpi_params.comm
-        topodims = [1, 1, comm.Get_size()]
-        msg = 'Wrong type for parameter discretization (at init).'
-        msg += str(self._discretization)
-        assert isinstance(d3d, Discretization), msg
-        nbcells = d3d.resolution - 1
-        scalesres, global_start = \
-            scales.init_advection_solver(nbcells,
-                                         self.domain.length,
-                                         npw.asintegerarray(topodims),
-                                         comm.py2f(),
-                                         order=order,
-                                         dim_split=splitting)
-        # Create the topo (plane, cut through ZDIR)
-        return self.domain.create_plane_topology_from_mesh(
-            global_start=global_start, localres=scalesres,
-            discretization=d3d, cdir=ZDIR)
-
-    def _check_scales_topo(self, toporef, order, splitting):
-        """Check if input topo fits with scales requirements
-        """
-        # In that case, self._discretization must be
-        # a CartesianTopology object, used for all fields.
-        # We use it to initialize scales solver
-        comm = self.mpi_params.comm
-        #topodims = [1, 1, comm.Get_size()]
-        nbcells = toporef.mesh.discretization.resolution - 1
-
-        scalesres, global_start = \
-            scales.init_advection_solver(nbcells, self.domain.length,
-                                         npw.asintegerarray(toporef.shape),
-                                         comm.py2f(),
-                                         order=order, dim_split=splitting)
-        for v in self.variables:
-            topo = self.variables[v]
-            assert isinstance(topo, CartesianTopology), str(topo)
-            #assert (topo.shape == topodims).all(), \
-            #    str(topo.shape) + ' != ' + str(topodims)
-            assert not self._single_topo or \
-                (topo.mesh.local_resolution == scalesres).all(), \
-                str(topo.mesh.local_resolution) + ' != ' + str(scalesres)
-            assert not self._single_topo or \
-                (topo.mesh.start() == global_start).all(), \
-                str(topo.mesh.start()) + ' != ' + str(global_start)
-
-    def _setup(self, advected_discrete_fields, rwork=None, iwork=None):
-        # Check resolutions to set multiscale case, if required.
-        if not self._single_topo and MultiScale not in self.method:
-            self.method[MultiScale] = L2_1
-        # - Create the discrete_op from the
-        # list of discrete fields -
-        from hysop.operator.discrete.scales_advection import \
-            ScalesAdvection as SD
-        self.discrete_op = SD(
-            self.discrete_fields[self.velocity],
-            advected_discrete_fields, method=self.method,
-            rwork=rwork, iwork=iwork,
-            **self.config)
diff --git a/hysop/old/operator.old/advection_dir.py b/hysop/old/operator.old/advection_dir.py
deleted file mode 100644
index 668dcfce57409ecf6db82933b18b6fb51faf0ca6..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/advection_dir.py
+++ /dev/null
@@ -1,155 +0,0 @@
-"""Advection of a field in a given direction
-
-See :ref:`advection` in user guide.
-
-"""
-from hysop.constants import debug, DirectionLabels
-from hysop.methods import Support, TimeIntegrator, Interpolation
-from hysop.numerics.remeshing import Remeshing
-from hysop.operator.computational import Computational
-import numpy as np
-from hysop.operator.continuous import opsetup, opapply
-from hysop.operator.advection import Advection
-
-
-class AdvectionDir(Computational):
-    """Advection of a scalar or vector field in a given direction,
-    assuming incompressible flow.
-    """
-
-    @debug
-    def __init__(self, parent, direction, **kwds):
-        """"
-        Parameters
-        ----------
-        parent : :class:`hysop.operator.advection.Advection`
-            main advection operator
-        direction: int
-            direction of advection
-        kwds : base class arguments.
-        """
-        assert isinstance(parent, Advection)
-        self.parent = parent
-        # advected_fields :
-        self.advected_fields = parent.advected_fields
-        # velocity field
-        self.velocity = parent.velocity
-        self._kwds = kwds
-        super(AdvectionDir, self).__init__(variables=parent.variables, **kwds)
-
-        self.method = parent.method
-        self.input = parent.input
-        self.output = parent.output
-        self.name = parent.name + DirectionLabels[direction]
-
-        # direction to advect
-        self.direction = direction
-
-        # Fields on particles
-        self.particle_fields = None
-
-        # Positions of the particles
-        self.particle_positions = None
-
-        # number of components of the rhs (time integration)
-        self._rhs_size = 1
-
-        # check if advection is done on gpu
-        self._gpu_advection = self.method[Support].find('gpu') >= 0
-
-    @debug
-    def discretize(self):
-        if self._is_discretized:
-            return
-
-        # everything is done in parent ...
-        self.variables = self.parent.variables
-        self.discrete_fields = self.parent.discrete_fields
-        self._is_discretized = True
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        # select discretization of the advected fields
-        advected_discrete_fields = [self.discrete_fields[v]
-                                    for v in self.variables
-                                    if v is not self.velocity]
-        # GPU advection ...
-        if self._gpu_advection:
-            topo_shape = advected_discrete_fields[0].topology.shape
-            if topo_shape[self.direction] == 1:
-                from hysop.backend.device.opencl.gpu_particle_advection \
-                    import GPUParticleAdvection as advec
-            else:
-                from hysop.backend.device.opencl.multi_gpu_particle_advection \
-                    import MultiGPUParticleAdvection as advec
-        else:
-            # pure-python advection
-            from hysop.operator.discrete.particle_advection \
-                import ParticleAdvection as advec
-
-        self.discrete_op = advec(
-            velocity=self.discrete_fields[self.velocity],
-            fields_on_grid=advected_discrete_fields,
-            direction=self.direction,
-            rwork=rwork, iwork=iwork,
-            **self._kwds)
-
-        self._is_uptodate = True
-
-    def get_work_properties(self):
-        super(AdvectionDir, self).get_work_properties()
-        # Shape of reference comes from fields, not from velocity
-        advected_discrete_fields = [self.discrete_fields[v]
-                                    for v in self.variables
-                                    if v is not self.velocity]
-        topo = advected_discrete_fields[0].topology
-        # Find number and shape of required work arrays
-        if not self._gpu_advection:
-            # -- pure python advection --
-            #  work array shape depends on the time integrator
-            #  interpolation scheme and remeshing scheme
-            ti_work = self.method[TimeIntegrator].get_work_properties(
-                self._rhs_size, topo)
-            ti_rwork_length = len(ti_work['rwork'])
-            iw_prop = self.method[Interpolation].get_work_properties(topo)
-            rw_prop = Remeshing.get_work_properties(topo)
-            interp_iwork_length = len(iw_prop['iwork'])
-            interp_rwork_length = len(iw_prop['rwork'])
-            remesh_iwork_length = len(rw_prop['iwork'])
-            remesh_rwork_length = len(rw_prop['rwork'])
-            iwork_length = max(interp_iwork_length, remesh_iwork_length)
-            rwork_length = max(ti_rwork_length + interp_rwork_length,
-                               remesh_rwork_length)
-        else:
-            # -- GPU advection --
-            # no work array
-            iwork_length, rwork_length = 0, 0
-
-        # buffers for fields on particles
-        rwork_length += np.sum([f.nb_components for f in self.advected_fields])
-        if not self._gpu_advection or \
-           self.method[Support].find('gpu_2k') >= 0:
-            rwork_length += 1  # work array for positions
-        memsize = np.prod(topo.mesh.local_resolution)
-        return {'rwork': [memshape] * rwork_length,
-                'iwork': [memshape] * iwork_length}
-
-    @debug
-    @opapply
-    def apply(self, simulation=None, dt_coeff=1.0, split_id=0, old_dir=0):
-        """
-
-        Parameters
-        ----------
-
-        simulation : `:class::~hysop.problem.simulation.Simulation`
-        dt_coeff : double
-        split_id : int, optional
-        old_dir : int, optional
-        """
-        if not self._single_topo and not self._gpu_advection:
-            raise ValueError("Multiscale advection is not yet supported "
-                             "in pure Python, use Scales or GPU.")
-
-        self.discrete_op.apply(simulation,
-                               dt_coeff, split_id, old_dir)
diff --git a/hysop/old/operator.old/analytic.py b/hysop/old/operator.old/analytic.py
deleted file mode 100644
index c3523900674525a503073861bd07c4fbf1b6267a..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/analytic.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""Initialize fields on a grid, with a user-defined function
-"""
-from hysop.constants import debug
-from hysop.operator.continuous import opsetup, opapply
-from hysop.operator.computational import Computational
-from hysop.methods import Support
-
-
-class Analytic(Computational):
-    """
-    Applies an analytic formula, given by user, on its fields.
-    """
-
-    @debug
-    def __init__(self, formula=None, vectorize_formula=False, **kwds):
-        """ Apply a user-defined formula onto a list of fields.
-
-        Parameters
-        ----------
-        formula : python function
-            the formula to be applied
-        vectorize_formula : boolean, optional
-            true if formula must be vectorized (numpy), default = false.
-
-        Notes
-        -----
-        see :ref:`analytical_operator` for details on
-        the authorized signature for input formula or check
-        test_analytic.py
-        """
-        super(Analytic, self).__init__(**kwds)
-        isGPU = False
-        if 'method' in kwds.keys() and Support in kwds['method'].keys():
-            isGPU = kwds['method'][Support].find('gpu') >= 0
-        if formula is not None:
-            # A formula applied to all variables of this operator
-            self.formula = formula
-            for v in self.variables:
-                v.set_formula(formula, vectorize_formula)
-        elif not isGPU:
-            vref = self.variables.keys()[0]
-            assert vref.formula is not None
-            self.formula = vref.formula
-            # Only one formula allowed per operator
-            for v in self.variables:
-                assert v.formula is self.formula
-
-        self.output = self.variables
-
-    def discretize(self):
-        super(Analytic, self)._standard_discretize()
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        self._is_uptodate = True
-
-    @debug
-    @opapply
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing simulation value for computation."
-        for v in self.variables:
-            topo = self.discrete_fields[v].topology
-            v.initialize(time=simulation.time, topo=topo)
-
-    def get_profiling_info(self):
-        pass
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
diff --git a/hysop/old/operator.old/baroclinic.py b/hysop/old/operator.old/baroclinic.py
deleted file mode 100644
index 5eae4221aa53aef2f5922d87c162c593bd4038a5..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/baroclinic.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file operator/multiphase.py
-
-MultiPhase Rot Grad P
-"""
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.baroclinic import Baroclinic as BD
-from hysop.methods import SpaceDiscretization
-from hysop.numerics.finite_differences import FDC4
-from hysop.constants import debug
-import hysop.default_methods as default
-from hysop.operator.continuous import opsetup
-
-
-class Baroclinic(Computational):
-    """
-    Pressure operator representation
-    """
-
-    @debug
-    def __init__(self, velocity, vorticity, density, viscosity, **kwds):
-        """
-        Constructor.
-        Create a Pressure operator from given velocity variables.
-
-        @param velocity field
-        @param vorticity field
-        @param viscosity constant
-        @param density field
-        @param resolutions : grid resolution of velocity, vorticity, density
-        @param method : solving method
-        (default = finite differences, 4th order, in space)
-        @param topo : a predefined topology to discretize
-         velocity/vorticity/density
-        @param ghosts : number of ghosts points. Default depends on the method.
-        Autom. computed if not set.
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Baroclinic, self).__init__(variables=[velocity,
-                                                    vorticity, density],
-                                         **kwds)
-        if self.method is None:
-            self.method = default.BAROCLINIC
-        self.velocity = velocity
-        self.vorticity = vorticity
-        self.density = density
-        self.viscosity = viscosity
-        self.input = [self.velocity, self.vorticity, self.density]
-        self.output = [self.vorticity]
-        assert SpaceDiscretization in self.method.keys()
-
-    def discretize(self):
-        if self.method[SpaceDiscretization] is FDC4:
-            nbGhosts = 2
-        else:
-            raise ValueError("Unknown method for space discretization of the\
-                baroclinic operator.")
-
-        super(Baroclinic, self)._standard_discretize(nbGhosts)
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        """
-        Baroclinic operator discretization method.
-        Create a discrete Baroclinic operator from given specifications.
-        """
-        self.discrete_op = \
-            BD(self.discrete_fields[self.velocity],
-               self.discrete_fields[self.vorticity],
-               self.discrete_fields[self.density],
-               self.viscosity,
-               method=self.method)
-
-        self._is_uptodate = True
-
-    def initialize_velocity(self):
-        self.discrete_op.initialize_velocity()
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
diff --git a/hysop/old/operator.old/baroclinic_from_rhs.py b/hysop/old/operator.old/baroclinic_from_rhs.py
deleted file mode 100644
index 42aba93d9a63f0dfb81f10a4cbaf091fe6c4392c..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/baroclinic_from_rhs.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file operator/baroclinic_from_rhs.py
-
-MultiPhase baroclinic term
-"""
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.baroclinic_from_rhs import BaroclinicFromRHS as BD
-from hysop.methods import SpaceDiscretization
-from hysop.numerics.finite_differences import FDC4
-from hysop.constants import debug
-import hysop.default_methods as default
-from hysop.operator.continuous import opsetup
-
-
-class BaroclinicFromRHS(Computational):
-    """
-    Pressure operator representation
-    """
-
-    @debug
-    def __init__(self, vorticity, rhs, **kwds):
-        """
-        Constructor.
-        Create a BaroclinicFromRHS operator on a given vorticity and the rhs.
-
-        @param vorticity field
-        @param rhs field
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(BaroclinicFromRHS, self).__init__(variables=[vorticity, rhs],
-                                                **kwds)
-        if self.method is None:
-            self.method = default.BAROCLINIC
-        self.vorticity = vorticity
-        self.rhs = rhs
-        self.input = [self.vorticity, self.rhs]
-        self.output = [self.vorticity]
-        assert SpaceDiscretization in self.method.keys()
-
-    def discretize(self):
-        super(BaroclinicFromRHS, self)._standard_discretize()
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        """
-        Baroclinic operator discretization method.
-        Create a discrete Baroclinic operator from given specifications.
-        """
-        self.discrete_op = \
-            BD(self.discrete_fields[self.vorticity],
-               self.discrete_fields[self.rhs],
-               method=self.method)
-
-        self._is_uptodate = True
diff --git a/hysop/old/operator.old/computational.py b/hysop/old/operator.old/computational.py
deleted file mode 100755
index 783d062654b0a2b4ae54dae9a5727715c51a1853..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/computational.py
+++ /dev/null
@@ -1,377 +0,0 @@
-""" Abstract interface for computational operators
-
-"""
-from abc import ABCMeta, abstractmethod
-from hysop.constants import debug
-from hysop.operator.continuous import OperatorBase, opapply
-from hysop.topology.cartesian_topology import CartesianTopology
-from hysop.tools.parameters import Discretization
-from hysop.tools.numpywrappers import npw
-from hysop import __FFTW_ENABLED__
-
-
-class Computational(OperatorBase):
-    """
-    Abstract base class for computational operators.
-
-    An operator is composed of :
-    - a set of continuous variables (at least one)
-    - a method which defined how it would be discretized/processed
-    - a discrete operator : object build using the method
-    and the discretized variables.
-
-    To each variable a 'resolution' is associated, used
-    to create a topology and a discrete field.
-    See details in 'real' operators (derived classes) description.
-    """
-    __metaclass__ = ABCMeta
-
-    @debug
-    def __new__(cls, *args, **kw):
-        return object.__new__(cls, *args, **kw)
-
-    @debug
-    @abstractmethod
-    def __init__(self, discretization=None, method=None, **kwds):
-        """
-
-        Parameters
-        ----------
-        discretization : :class:`hysop.topology.topology.CartesianTopology`
-         or :class:`tools.parameters.Discretization`
-            Defined the data mpi distribution. See Notes below.
-        method : :class:`hysop.method`
-            specific parameters for the operator
-            (space discretisation ...). See methods.py for authorized values.
-        **kwds : arguments passed to base class.
-
-        Notes:
-        ------
-        """
-        # Base class init
-        super(Computational, self).__init__(**kwds)
-
-        # Set mpi related stuff
-        if self.variables:
-            self._set_domain_and_tasks()
-
-        # A dictionnary of parameters, to define numerical methods used
-        # used to discretize the operator.
-        # When method is None, each operator must provide a default
-        # behavior.
-        self.method = method
-
-        # The discretization of this operator.
-        self.discrete_op = None
-
-        # A dictionnary of discrete_fields associated with this operator
-        # key = continuous variable \n
-        # Example : discrete_variable = discrete_fields[velocity]
-        # returns the discrete fields that corresponds to the continuous
-        # variable velocity.
-        self.discrete_fields = {}
-    
-        if not self._vars_from_list:
-            msg = 'discretization parameter is useless when variables are set'
-            msg += ' from a dict.'
-            if (discretization is not None):
-                raise ValueError(msg)
-
-        self._discretization = discretization
-        # Remark FP: discretization may be None in two cases:
-        # - not required when variables is set from a dict
-        # - the current task does not need to discretize this operator.
-
-        # False if fields have different discretizations.
-        # Set during discretize call.
-        self._single_topo = True
-
-        # If true, ready for setup ...
-        # Turn to true after self._discretize_vars call.
-        self._is_discretized = False
-
-    @abstractmethod
-    def get_work_properties(self):
-        """Get properties of internal work arrays. Must be call after discretize
-        but before setup.
-
-        Returns
-        -------
-        dictionnary
-           keys = 'rwork' and 'iwork', values = list of shapes of internal
-           arrays required by this operator (real arrays for rwork, integer
-           arrays for iwork).
-
-        Example
-        -------
-
-        >> works_prop = op.get_work_properties()
-        >> print works_prop
-        {'rwork': [(12, 12), (45, 12, 33)], 'iwork': None}
-
-        means that the operator requires two real arrays of
-        shape (12,12) and (45, 12, 33) and no integer arrays
-
-        """
-        msg = 'The operator must be discretized '
-        msg += 'before any call to this function.'
-        assert self._is_discretized, msg
-
-    def discretize(self):
-        """
-        For each variable, check if a proper topology has been defined,
-        if not, build one according to 'discretization' parameters set
-        during initialization of the class.
-        Then, discretize each variable on this topology.
-        """
-        self._standard_discretize()
-
-    def _discretize_vars(self):
-        """
-        Discretize all variables of the current operator.
-        """
-        for v in self.variables:
-            msg = 'Missing topology to discretize ' + v.name
-            msg += ' in operator ' + self.name
-            assert isinstance(self.variables[v], CartesianTopology), msg
-
-            self.discrete_fields[v] = v.discretize(self.variables[v])
-        self._is_discretized = True
-
-    def _check_variables(self):
-        """
-        Check variables and discretization parameters
-        Set single_topo: if true all fields are discretized with the
-        same topo
-
-        Returns
-        -------
-        build_topos : a dict (key == field), if build_topos[v] is true,
-        a topology must be built for v. In that case, the discretization has
-        been saved in self.variables[v] during init. In the other case
-        self.variables[v] is the required topology.
-
-        Remark : since operators belong to one and only one task, this function
-        must not be called by all tasks. So it can not be called at init.
-        """
-        if self._vars_from_list:
-            # In that case, self._single_topo is True
-            # but we need to check if discretization param
-            # was a topology or a Discretization.
-            msg = 'required parameter discretization has not been'
-            msg += ' set during operator construction.'
-            assert self._discretization is not None
-            # Fill variables dictionnary
-            for v in self.variables:
-                self.variables[v] = self._discretization
-            self._single_topo = True
-            if isinstance(self._discretization, CartesianTopology):
-                # No need to build topologies
-                build_topos = False
-            elif isinstance(self._discretization, Discretization):
-                build_topos = True
-            else:
-                msg = 'Wrong type for parameter discretization in'
-                msg += ' operator construction.'
-                raise ValueError(msg)
-        else:
-            msg = 'discretization parameter in operator construction is '
-            msg += 'useless when variables are set from a dict.'
-            assert self._discretization is None, msg
-            self._single_topo = False
-            build_topos = {}
-            for v in self.variables:
-                disc = self.variables[v]
-                if isinstance(disc, CartesianTopology):
-                    build_topos[v] = False
-                elif isinstance(disc, Discretization):
-                    build_topos[v] = True
-                else:
-                    msg = 'Wrong type for values in variables dictionnary '
-                    msg += '(parameter in operator construction).'
-                    raise ValueError(msg)
-
-            ref = self.variables.values()[0]
-            self._single_topo = True
-            for disc in self.variables.values():
-                self._single_topo = ref == disc and self._single_topo
-
-            if self._single_topo:
-                build_topos = build_topos.values()[0]
-                self._discretization = self.variables.values()[0]
-
-        return build_topos
-
-    def _standard_discretize(self, min_ghosts=0, cutdir=None):
-        """
-        This functions provides a standard way to discretize the operator,
-        but some operators may need a specific discretization process.
-        """
-        if self._is_discretized:
-            return
-
-        build_topos = self._check_variables()
-        if self._single_topo:
-            # One topo for all fields ...
-            if build_topos:
-                topo = self._build_topo(self._discretization, min_ghosts,
-                                        cutdir)
-                for v in self.variables:
-                    self.variables[v] = topo
-            else:
-                # Topo is already built, just check its ghosts and cutdir
-                msg = 'The proposed ghost layer is not large enough.'
-                ghosts = self.variables.values()[0].mesh.discretization.ghosts
-                assert (ghosts >= min_ghosts).all(), msg
-                if cutdir is not None:
-                    assert (self.variables.values()[0].cutdir == cutdir).all()
-        else:
-            # ... or one topo for each field.
-            for v in self.variables:
-                if build_topos[v]:
-                    self.variables[v] = self._build_topo(self.variables[v],
-                                                         min_ghosts, cutdir)
-                    build_topos[v] = False
-                else:
-                    assert (self.variables[v].ghosts() >= min_ghosts).all()
-
-        # All topos are built, we can discretize fields.
-        self._discretize_vars()
-
-    def _build_topo(self, discretization, min_ghosts, cutdir=None):
-        """Build a mpi topology and its mesh from a given
-        discretization.
-        """
-        # Reset ghosts if necessary
-        ghosts = discretization.ghosts
-        ghosts[ghosts < min_ghosts] = min_ghosts
-        # build a topology from the given discretization
-        return self.domain.create_topology(discretization=discretization,
-                                           cutdir=cutdir)
-
-    def _fftw_discretize(self):
-        """
-        fftw specific way to discretize variables for a given
-        'reference' resolution.
-        We assume that in fft case, only one topology must be used
-        for all variables.
-        """
-        if self._is_discretized:
-            return
-        
-        assert __FFTW_ENABLED__
-        from hysop.f2hysop import fftw2py
-
-        build_topos = self._check_variables()
-        assert self._single_topo, 'All fields must use the same topology.'
-        # Get local mesh parameters from fftw
-        comm = self.mpi_params.comm
-        if build_topos:
-            # In that case, self._discretization must be
-            # a Discretization object, used for all fields.
-            # We use it to initialize scales solver
-            msg = 'Wrong type for parameter discretization (at init).'
-            assert isinstance(self._discretization, Discretization), msg
-            resolution = npw.asintegerarray(self._discretization.resolution)
-            localres, global_start = fftw2py.init_fftw_solver(
-                resolution, self.domain.length, comm=comm.py2f())
-            # Create the topo (plane, cut through ZDIR)
-            topo = self.domain.create_plane_topology_from_mesh(
-                global_start=global_start, localres=localres,
-                discretization=self._discretization)
-            for v in self.variables:
-                self.variables[v] = topo
-        else:
-            # In that case, self._discretization must be
-            # a CartesianTopology object, used for all fields.
-            # We use it to initialize fftw solver
-            assert isinstance(self._discretization, CartesianTopology)
-            topo = self._discretization
-            msg = 'input topology is not compliant with fftw.'
-            assert topo.dimension == 1, msg
-
-            from hysop.constants import ORDER
-            if ORDER == 'C':
-                assert topo.shape[0] == self.mpi_params.comm.Get_size(), msg
-            else:
-                assert topo.shape[-1] == self.mpi_params.comm.Get_size(), msg
-
-            resolution = npw.asintegerarray(topo.mesh.discretization.resolution)
-
-            localres, global_start = fftw2py.init_fftw_solver(
-                resolution, self.domain.length, comm=comm.py2f())
-
-        assert (topo.mesh.local_resolution == localres).all()
-        assert (topo.mesh.start() == global_start).all()
-        msg = 'Ghosts points not yet implemented for fftw-type operators.'
-        assert (topo.ghosts() == 0).all(), msg
-
-        # All topos are built, we can discretize fields.
-        self._discretize_vars()
-
-    @abstractmethod
-    def setup(self, rwork=None, iwork=None):
-        """
-        Last step of initialization. After this, the operator must be
-        ready for apply call.
-
-        Main step : setup for discrete operators.
-        """
-        assert self._is_discretized
-        super(Computational, self).setup()
-
-    @debug
-    def finalize(self):
-        """
-        Memory cleaning.
-        """
-        if self.discrete_op is not None:
-            self.discrete_op.finalize()
-
-    @debug
-    @opapply
-    def apply(self, simulation=None):
-        """Apply this operator to its variables.
-
-        Parameters
-        ----------
-        simulation : `:class::~hysop.problem.simulation.Simulation`
-
-        """
-        if self.discrete_op is not None:
-            self.discrete_op.apply(simulation)
-
-    def computation_time(self):
-        """ Time monitoring."""
-        if self.discrete_op is not None:
-            self.discrete_op.computation_time()
-            self.time_info = self.discrete_op.time_info
-        else:
-            from hysop.core.mpi import main_rank
-            short_name = str(self.__class__).rpartition('.')[-1][0:-2]
-            s = '[' + str(main_rank) + '] ' + short_name
-            s += " : operator not discretized --> no computation, time = 0."
-            print s
-
-    def update_ghosts(self):
-        """
-        Update ghost points values, if any.
-        """
-        assert self._is_discretized
-        self.discrete_op.update_ghosts()
-
-    def __str__(self):
-        """
-        Common printings for operators
-        """
-        short_name = str(self.__class__).rpartition('.')[-1][0:-2]
-        if self.discrete_op is not None:
-            s = str(self.discrete_op)
-        else:
-            s = short_name + " operator. Not discretised."
-        return s + "\n"
-
-    def get_profiling_info(self):
-        """Update profiler"""
-        if self.discrete_op is not None:
-            self.profiler += self.discrete_op.profiler
diff --git a/hysop/old/operator.old/continuous.py b/hysop/old/operator.old/continuous.py
deleted file mode 100755
index 40d7701270a8651b773ddd9b981b25069df65b0f..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/continuous.py
+++ /dev/null
@@ -1,356 +0,0 @@
-"""Common interface for all continuous operators.
-
-"""
-from abc import ABCMeta, abstractmethod
-from hysop.constants import debug
-from hysop.tools.profiler import Profiler
-from hysop.tools.io_utils import IOParams, IO
-from hysop.tools.parameters import MPIParams
-import hysop.tools.io_utils as io
-import inspect
-from hysop.tools.profiler import ftime
-
-
-class Operator(object):
-    """Abstract interface to continuous operators.
-    """
-    __metaclass__ = ABCMeta
-
-    @debug
-    def __new__(cls, *args, **kw):
-        return object.__new__(cls, *args, **kw)
-
-    @debug
-    @abstractmethod
-    def __init__(self, variables=None, mpi_params=None,
-                 io_params=None, **kwds):
-        """
-        Parameters
-        ----------
-        variables : list or dictionnary. See Notes for details.
-        mpi_params : :class:`hysop.tools.parameters.MPIParams`
-            mpi config for the operator (comm, task ...)
-        io_params : :class:`hysop.tools.io_utils.IOParams`
-            file i/o config (filename, format ...)
-
-        Notes
-        -----
-        Variables arg may be either a list or a dictionnary, depending
-        on the type of operator.
-        The elements of the list or the keys of the dict
-        are :class:`hysop.fields.continuous_field.Fields`.
-
-        The values of the dict can be either
-        :class:`hysop.topology.topology.CartesianTopology`
-        or :class:`hysop.tools.parameters.Discretization`::
-
-        op = Analytic(variables = [velo, vorti], ...)
-
-        or::
-
-        op = Analytic(variables = {velo: topo, vorti: discr3D}, ...)
-
-        Attributes
-        ----------
-        variables : dict or list
-            :class:`~hysop.fields.continuous_field.Continuous`
-            with their discretisation
-        domain : :class:`~hysop.domain.domain.Domain`,
-        the geometry on which this operator applies
-
-        """
-        super(OperatorBase,self).__init__(**kwds)
-
-        # 1 ---- Variables setup ----
-        # List of hysop.continuous.Fields involved in the operator.
-        if isinstance(variables, list):
-            self.variables = {}
-            for v in variables:
-                self.variables[v] = None
-            self._vars_from_list = True
-            # Details on descretization process must be provided
-            # in derived class (extra args like resolution, topo ...)
-        elif isinstance(variables, dict):
-            self._vars_from_list = False
-            self.variables = variables
-        elif variables is not None:
-            # Note that some operators may not have variables (redistribute for
-            # example).
-            msg = 'Wrong type for variables arg.'
-            msg += 'It must be a list or a dictionnary.'
-            raise AttributeError(msg)
-        else:
-            # this last case corresponds with redistribute operators
-            # that may have variables implicitely defined from input
-            # source and target operators
-            #(see hysop.operator.redistribute.Redistribute for details).
-            self.variables = {}
-
-        # Domain of definition.
-        # Must be the same for all variables
-        # Set in derived class.
-        self.domain = None
-        """Physical domain of definition for the operator """
-        # mpi context
-        self.mpi_params = mpi_params
-        # tools for profiling
-        self.profiler = None
-
-        # Remark : domain, mpi_params and profiler will be set properly in
-        # _set_domain_and_tasks, called in derived class, since it may
-        # require some specific initialization (check domain ...)
-
-        # Input variables.
-        self.input = []
-        # Output variables.
-        self.output = []
-        # bool to check if the setup function has been called for
-        # this operator
-        self._is_uptodate = False
-
-        self.name = self.__class__.__name__
-        # List of operators that must be waited for.
-        self._wait_list = []
-        # time monitoring
-        self.time_info = None
-        # Dictionnary of optional parameters for output
-        self.io_params = io_params
-        # Object that deals with output file writing.
-        # Optional.
-        self._writer = None
-        self.ontask = False
-
-    def _set_domain_and_tasks(self):
-        """
-        Initialize the mpi context, depending on local variables, domain
-        and so on.
-        """
-        # When this function is called, the operator must at least
-        # have one variable.
-        assert len(self.variables) > 0
-        if isinstance(self.variables, list):
-            self.domain = self.variables[0].domain
-        elif isinstance(self.variables, dict):
-            self.domain = self.variables.keys()[0].domain
-
-        # Check if all variables have the same domain
-        for v in self.variables:
-            assert v.domain is self.domain, 'All variables of the operator\
-            must be defined on the same domain.'
-        # Set/check mpi context
-        if self.mpi_params is None:
-            self.mpi_params = MPIParams(comm=self.domain.task_comm,
-                                   task_id=self.domain.current_task())
-
-        # Set profiler
-        self.profiler = Profiler(self, self.domain.task_comm)
-
-    @staticmethod
-    def _error_():
-        """internal error message
-        """
-        raise RuntimeError("This operator is not defined for the current task")
-
-    def wait_for(self, op):
-        """MPI synchronisation
-
-        :param op:  :class:`~hysop.operator.continuous.Continuous`
-            Add an operator into 'wait' list of the present object.
-            It means that before any apply of this operator, all
-            (mpi) operations in op must be fulfilled, which implies
-            a call to op.wait().
-
-        """
-        self._wait_list.append(op)
-
-    def wait_list(self):
-        """Get MPI running comm. list
-
-        Returns
-        -------
-        python list of all operators that must be fulfilled
-        before any attempt to apply the present operator.
-        """
-        return self._wait_list
-
-    def wait(self):
-        """
-        MPI wait for synchronisation: when this function is called,
-        the programm wait for the fulfillment of all the running
-        operations of this operator (mpi requests for example).
-        This is a blocking call.
-        """
-        pass
-
-    def test_requests(self):
-        """Checks for unfinished mpi communications.
-
-        Returns
-        -------
-        bool : MPI send/recv test for synchronisation, when this function is
-        called, the programm checks if this operator handles some uncomplete
-        mpi requests (if so return true, else false).
-        This is a non-blocking call.
-        """
-        pass
-
-    @abstractmethod
-    def setup(self, rwork=None, iwork=None):
-        """
-        Last step of initialization. After this, the operator must be
-        ready to apply.
-        In derived classes, called through @opsetup decorator.
-        """
-        if not self.domain.current_task() == self.mpi_params.task_id:
-            self.ontask = False
-            self._error_()
-
-    @abstractmethod
-    @debug
-    def apply(self, simulation=None):
-        """
-        Apply this operator to its variables.
-
-        Parameters
-        ----------
-        simulation : hysop.problem.simulation.Simulation
-            describes the simulation parameters
-            (time, time step, iteration number ...)
-
-        In derived classes, called through @opapply decorator.
-        """
-        for op in self.wait_list():
-            op.wait()
-
-    def finalize(self):
-        """
-        Memory cleaning.
-        """
-        # wait for all remaining communications, if any
-        self.wait()
-
-    @abstractmethod
-    def computation_time(self):
-        """
-        Time monitoring.
-        """
-
-    def is_up(self):
-        """Returns True if ready to be applied
-        (--> setup function has been called succesfully)
-        """
-        return self._is_uptodate
-
-    def _set_io(self, filename, buffshape):
-        """
-        Parameters
-        -----------
-        filename : string
-            name of the output file used by this operator
-        buffshape : tuple
-            shape of the numpy buffer used to save data to
-            be printed out into filename. Must be 2D.
-            Example : shape (2,4)
-
-        Notes
-        -----
-        This function is private and must not be called by
-        external object. It is usually called by operator
-        during construction (__init__).
-
-        """
-        iopar = self.io_params
-        # if iopar is not None (i.e. set in operator init)
-        # and True or set as an IOParams , then
-        # build a writer
-        if iopar:
-            if isinstance(iopar, bool):
-                # default values for iop
-                self.io_params = IOParams(filename, fileformat=IO.ASCII)
-            else:
-                msg = 'Error, wrong file format for operator output.'
-                assert self.io_params.fileformat is IO.ASCII, msg
-            self._writer = io.Writer(io_params=self.io_params,
-                                     mpi_params=self.mpi_params,
-                                     buffshape=buffshape)
-
-    def task_id(self):
-        """
-        Returns the id of the task on which this operator works.
-        """
-        return self.mpi_params.task_id
-
-
-def opsetup(f):
-    """
-    Setup decorator: what must be done by all operators
-    at setup.
-    Usage : add @opsetup before setup class method
-    """
-
-    def decorator(*args, **kwargs):
-        """Decorate 'setup' method
-        """
-        # Job before setup of the function ...
-        # nothing for the moment
-        name = inspect.getmro(args[0].setup.im_class)
-        # call the setup function
-        retval = f(*args, **kwargs)
-        # Warning : we cannot call super(...) since
-        # it leads to infinite cycling when setup
-        # is not defined in the class but in its
-        # base class and when this base class is derived
-        # from Computational ...
-        # So we directly call Computational.setup()
-        # It's ugly but it seems to work.
-        # Job after setup of the function ...
-        name[-3].setup(args[0])
-        #super(args[0].__class__, args[0]).setup()
-        return retval
-
-    return decorator
-
-
-def opapply(f):
-    """
-    What must be done by all operators
-    before apply.
-    Usage : add @opapply before apply class method
-    """
-    def decorator(*args, **kwargs):
-        """decorate 'apply' method"""
-        # get 'continuous' base class and run its apply function
-        # --> call wait function of ops in wait_list
-        name = inspect.getmro(args[0].apply.im_class)
-        t0 = ftime()
-        name[-2].apply(args[0])
-        #t0 = ftime()
-        res = f(*args, **kwargs)
-        args[0].profiler[f.func_name] += ftime() - t0
-        return res
-
-    return decorator
-
-
-class Tools(object):
-    """
-    Static class with utilities related to operators
-    """
-
-    @staticmethod
-    def check_device(op):
-        """
-        Returns true if op operates on a GPU
-        """
-        from hysop.methods import Support
-
-        try:
-            is_device = \
-                op.method[Support].find('gpu') >= 0
-        except KeyError:  # op.method is a dict not containing Support in keys
-            is_device = False
-        except IndexError:  # op.method is a string
-            is_device = False
-        except TypeError:  # op.method is None
-            is_device = False
-        return is_device
diff --git a/hysop/old/operator.old/curlAndDiffusion.py b/hysop/old/operator.old/curlAndDiffusion.py
deleted file mode 100644
index 92a5ec8581580b28afeec9a2c9d5dc51b2e76bc2..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/curlAndDiffusion.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file diffusion.py
-
-Operator for diffusion problem.
-
-"""
-from hysop.operator.continuous import OperatorBase
-try:
-    from hysop.f2hysop import fftw2py
-except ImportError:
-    from hysop.fakef2py import fftw2py
-from hysop.operator.discrete.diffusion_fft import DiffusionFFT
-from hysop.constants import debug
-from hysop.operator.continuous import opsetup
-
-
-class CurlDiffusion(OperatorBase):
-    """
-    Diffusion operator
-    \f{eqnarray*}
-    \omega = Op(\omega)
-    \f} with :
-    \f{eqnarray*}
-    \frac{\partial \omega}{\partial t} &=& \nu\Delta\omega
-    \f}
-    """
-
-    @debug
-    def __init__(self, velocity, vorticity, **kwds):
-        """
-        Constructor.
-        Create a Diffusion operator using FFT.
-
-        @param velocity ContinuousVectorField : velocity variable.
-        @param vorticity ContinuousVectorField : vorticity variable.
-        @param viscosity : viscosity of the considered medium.
-        """
-        super(CurlDiffusion, self).__init__(variables=[velocity, vorticity], **kwds)
-        self.velocity = velocity
-        self.vorticity = vorticity
-        raise ValueError("This operator is obsolete and must be reviewed.\
-                          Do not use it.")
-
-    @debug
-    @opsetup
-    def setup(self):
-        """
-        Diffusion operator discretization method.
-        Create a discrete Diffusion operator from given specifications.
-        """
-        if self._comm is None:
-            from hysop.core.mpi import main_comm as comm
-        else:
-            comm = self._comm
-
-        localres, localoffset = fftw2py.init_fftw_solver(
-            self.resolutions[self.vorticity],
-            self.domain.length, comm=comm.py2f())
-
-        topodims = self.resolutions[self.vorticity] / localres
-        print ('topodims DIFFUSION', topodims)
-        # variables discretization
-
-        for v in self.variables:
-            topo = self.domain.getOrCreateTopology(self.domain.dim,
-                                                   self.resolutions[v],
-                                                   topodims,
-                                                   comm=comm)
-            vd = v.discretize(topo)
-            self.discrete_fields[v] = vd
-
-        self.discrete_op =\
-            DiffusionFFT(self.discrete_fields[self.velocity],
-                         self.discrete_fields[self.vorticity],
-                         self.method, **self.config)
-
-        self.discrete_op.setup()
-        self._is_uptodate = True
diff --git a/hysop/old/operator.old/custom.py b/hysop/old/operator.old/custom.py
deleted file mode 100644
index 271e49a3d8fc8382d7c2ad77dde874b035fcf4a9..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/custom.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""Interface to set a user-defined operator
-"""
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.custom import Custom as CO
-from hysop.operator.continuous import opsetup
-
-
-class Custom(Computational):
-    """User-defined generic operator
-    """
-    def __init__(self, function, in_fields, out_fields=None,
-                 diagnostics_shape=None, **kwds):
-        """
-
-        Parameters
-        ----------
-        in_fields: list of :class:`~hysop.fields.discrete_field.DiscreteField`
-             input fields args. for function, see notes below
-        out_fields: list of :class:`~hysop.fields.discrete_field.DiscreteField`
-             output fields args for function, see notes below
-        function: python function
-             a user defined function, called by this op.apply method.
-        diagnostics_shape: tuple, optional
-             shape of the data expected to be written into file.
-
-        Notes
-        -----
-        A function is used to set the behavior of the current operator,
-        during apply call.
-        This function must look like::
-
-            def some_func(simulation, in_fields, out_fields, diagnostics=None):
-                # do things ...
-
-        and compute out_fields values and optionnaly some diagnostics.
-        See :ref:`custom`
-        """
-        super(Custom, self).__init__(**kwds)
-        self.function = function
-        self.input = in_fields
-        if out_fields is not None:
-            self.output = out_fields
-        self._diagnostics_shape = diagnostics_shape
-
-    def discretize(self):
-        super(Custom, self)._standard_discretize()
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if not self._is_uptodate:
-            self.discrete_op = CO(
-                in_fields=[self.discrete_fields[f] for f in self.input],
-                out_fields=[self.discrete_fields[f] for f in self.output],
-                function=self.function,
-                variables=self.discrete_fields.values())
-            if self._diagnostics_shape is not None:
-                assert isinstance(self._diagnostics_shape, tuple)
-                assert len(self._diagnostics_shape) == 2
-                self._set_io(self.function.__name__, self._diagnostics_shape)
-                self.discrete_op.set_writer(self._writer)
-
-            self._is_uptodate = True
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
diff --git a/hysop/old/operator.old/density.py b/hysop/old/operator.old/density.py
deleted file mode 100644
index 560855b44e785dceb64a05caa847697d97dc6661..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/density.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file operator/density.py
-
-"""
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.density import DensityVisco_d
-from hysop.operator.continuous import opsetup
-from hysop.constants import debug
-
-
-class DensityVisco(Computational):
-    """
-    Density and Viscosity reconstruction
-    """
-
-    @debug
-    def __init__(self, density, viscosity, **kwds):
-        """
-        @param density : scalar field
-        @param viscosity : scalar field
-        """
-        super(DensityVisco, self).__init__(variables=[density, viscosity],
-                                           **kwds)
-        self.density = density
-        self.viscosity = viscosity
-        self.input = [self.density, self.viscosity]
-        self.output = [self.density, self.viscosity]
-
-    def discretize(self):
-        super(DensityVisco, self)._standard_discretize()
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        """
-        Density and Viscosity reconstruction operator discretization method.
-        Create a discrete operator from given specifications.
-        """
-
-        self.discrete_op = \
-            DensityVisco_d(density=self.discrete_fields[self.density],
-                           viscosity=self.discrete_fields[self.viscosity],
-                           method=self.method)
-        self._is_uptodate = True
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
-    
diff --git a/hysop/old/operator.old/differential.py b/hysop/old/operator.old/differential.py
deleted file mode 100644
index 36277c9cf9a352c7e543ca5af64a114f17002413..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/differential.py
+++ /dev/null
@@ -1,164 +0,0 @@
-"""Differential operators
-
-.. currentmodule hysop.operator.differential
-
-* :class:`~Curl`,
-* :class:`~Grad`,
-* :class:`~DivAdvection`,
-* :class:`~Differential` (abstract base class).
-
-
-"""
-from hysop.constants import debug
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.differential import CurlFFT, CurlFD,\
-    GradFD, DivAdvectionFD
-from hysop.methods import SpaceDiscretization
-from hysop.operator.continuous import opsetup
-from hysop.numerics.finite_differences import FiniteDifference
-import hysop.default_methods as default
-from abc import ABCMeta, abstractmethod
-from hysop.numerics.differential_operations import Curl as NumCurl
-from hysop.numerics.differential_operations import DivAdvection as NumDA
-from hysop import __FFTW_ENABLED__
-
-
-class Differential(Computational):
-    """Abstract base class for differential operators
-    """
-    __metaclass__ = ABCMeta
-
-    # @debug
-    # def __new__(cls, *args, **kw):
-    #     return object.__new__(cls, *args, **kw)
-
-    @debug
-    def __init__(self, invar, outvar, **kwds):
-        """
-        Parameters
-        ----------
-        invar, outvar : :class:`~hysop.fields.continuous_field.Field`
-           input/output scalar or vector fields
-            such that outvar = op(invar).
-        **kwds : base class parameters
-
-       """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Differential, self).__init__(variables=[invar, outvar], **kwds)
-        if self.method is None:
-            self.method = default.DIFFERENTIAL
-
-        # input variable
-        self.invar = invar
-        # result of the operator
-        self.outvar = outvar
-
-        # Remark : at the time, all variables must have the same topology.
-        # This is implicitely checked with the assert on kwds['variables']:
-        # the only construction allowed is :
-        # (invar= ..., outvar=..., discretization=...)
-        self.output = [outvar]
-        self.input = [invar]
-        # Discrete operator type. Set in derived class.
-        self._discrete_op_class = self._init_space_discr_method()
-
-    @abstractmethod
-    def _init_space_discr_method(self):
-        """Select the proper discretisation for the operator
-        """
-
-    def discretize(self):
-        """Build topologies for all variables.
-
-        At the time, two space-discretization methods : based on
-        FFT or on Finite Differences.
-        """
-        space_d = self.method[SpaceDiscretization]
-        if space_d is 'fftw':
-            super(Differential, self)._fftw_discretize()
-
-        elif space_d.mro()[1] is FiniteDifference:
-            number_of_ghosts = space_d.ghosts_layer_size
-            super(Differential, self)._standard_discretize(number_of_ghosts)
-        else:
-            raise ValueError("Unknown method for space discretization of the\
-                differential operator.")
-
-        msg = 'Operator not yet implemented for multiple resolutions.'
-        assert self._single_topo, msg
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        """
-        Last step of initialization. After this, the operator must be
-        ready for apply call.
-
-        Main step : setup for discrete operators.
-        """
-        self.discrete_op = self._discrete_op_class(
-            invar=self.discrete_fields[self.invar],
-            outvar=self.discrete_fields[self.outvar],
-            method=self.method, rwork=rwork)
-        self._is_uptodate = True
-
-
-class Curl(Differential):
-    """Computes outVar = nabla X inVar
-    """
-
-    def _init_space_discr_method(self):
-        if self.method[SpaceDiscretization] is 'fftw' and __FFTW_ENABLED__:
-            op_class = CurlFFT
-        elif not isinstance(self.method[SpaceDiscretization], str):
-            if self.method[SpaceDiscretization].mro()[1] is FiniteDifference:
-                op_class = CurlFD
-        else:
-            raise ValueError("The required Space Discretization is\
-                not available for Curl.")
-        return op_class
-
-    def get_work_properties(self):
-        super(Curl, self).get_work_properties()
-        res = {'rwork': None, 'iwork': None}
-        # Only FD methods need internal work space
-        if self.method[SpaceDiscretization].mro()[1] is FiniteDifference:
-            toporef = self.variables[self.invar]
-            res = NumCurl.get_work_properties(toporef)
-        return res
-
-
-class Grad(Differential):
-    """Computes outVar = nabla(inVa)
-    """
-
-    def _init_space_discr_method(self):
-        if self.method[SpaceDiscretization].mro()[1] is FiniteDifference:
-            op_class = GradFD
-        else:
-            raise ValueError("The required Space Discretization is\
-                not available for Grad.")
-        return op_class
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
-
-
-class DivAdvection(Differential):
-    """Computes outVar = -nabla .(invar . nabla(nvar))
-    """
-    def _init_space_discr_method(self):
-        if self.method[SpaceDiscretization].mro()[1] is FiniteDifference:
-            op_class = DivAdvectionFD
-        else:
-            raise ValueError("The required Space Discretization is\
-                not available for DivAdvection.")
-        return op_class
-
-    def get_work_properties(self):
-        super(DivAdvection, self).get_work_properties()
-        res = {'rwork': None, 'iwork': None}
-        # Only FD methods need internal work space
-        if self.method[SpaceDiscretization].mro()[1] is FiniteDifference:
-            toporef = self.variables[self.invar]
-            res = NumDA.get_work_properties(toporef)
-        return res
diff --git a/hysop/old/operator.old/diffusion.py b/hysop/old/operator.old/diffusion.py
deleted file mode 100644
index 97dbee6a7b3d666bab5b7215642f99059e7d3f29..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/diffusion.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Operator for diffusion problem.
-
-See :ref:`diffusion` in HySoP user guide.
-
-
-"""
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.diffusion_fft import DiffusionFFT,\
-    CurlAndDiffusionFFT
-from hysop.constants import debug
-from hysop.operator.continuous import opsetup
-from hysop.methods import SpaceDiscretization, GhostUpdate
-from hysop import __FFTW_ENABLED__, __GPU_ENABLED__
-if __GPU_ENABLED__:
-    from hysop.backend.device.opencl.gpu_diffusion import GPUDiffusion
-
-
-class Diffusion(Computational):
-    """Diffusion of a field.
-    """
-
-    _authorized_methods = ['fftw', 'on_gpu']
-
-    @debug
-    def __init__(self, viscosity, vorticity, **kwds):
-        """Diffusion operator.
-        See :ref:`diffusion` in HySoP user guide.
-
-        Parameters
-        ----------
-        viscosity : double
-             constant viscosity value
-        vorticity : :class:`~hysop.fields.continuous_field.Field`
-             vorticity field, in/out parameter.
-        kwds : base class parameters.
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Diffusion, self).__init__(variables=[vorticity], **kwds)
-        if 'method' not in kwds:
-            import hysop.default_methods as default
-            self.method = default.DIFFUSION
-        else:
-            self.method = kwds.pop('method')
-        
-        msg = 'Diffusion : unknown method for space discretization'
-        assert self.method[SpaceDiscretization] in self._authorized_methods,\
-               msg
-
-        msg = 'Diffusion : on_gpu resolution is not available on your system.'
-        if self.method[SpaceDiscretization] is 'on_gpu':
-            assert __GPU_ENABLED__, msg
-
-        # input/output field, solution of the problem
-        self.vorticity = vorticity
-        # viscosity
-        self.viscosity = viscosity
-        # kwds required for gpu discrete operator
-        self.kwds = kwds.copy()
-        if 'discretization' in self.kwds:
-            self.kwds.pop('discretization')
-        self.input = [self.vorticity]
-        self.output = [self.vorticity]
-
-    def discretize(self):
-        if self.method[SpaceDiscretization] is 'fftw' and __FFTW_ENABLED__:
-            super(Diffusion, self)._fftw_discretize()
-        elif self.method[SpaceDiscretization] is 'on_gpu':
-            super(Diffusion, self)._standard_discretize()
-        else:
-            raise AttributeError("Method not yet implemented.")
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if self.method[SpaceDiscretization] is 'fftw':
-            dop = DiffusionFFT
-        elif self.method[SpaceDiscretization] is 'on_gpu':
-            dop = GPUDiffusion
-
-        self.discrete_op = dop(
-            viscosity=self.viscosity,
-            vorticity=self.discrete_fields[self.vorticity],
-            method=self.method, **self.kwds)
-        self._is_uptodate = True
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
-
-
-class CurlAndDiffusion(Computational):
-    """Solve curl of velocity and its diffusion in one shot
-    (in Fourier domain).
-    """
-
-    _authorized_methods = ['fftw']
-
-    @debug
-    def __init__(self, viscosity, velocity, vorticity, **kwds):
-        """Diffusion operator.
-        See :ref:`diffusion` in HySoP user guide.
-
-        Parameters
-        ----------
-        viscosity : double
-             constant viscosity value
-        velocity : :class:`~hysop.fields.continuous_field.Field
-             vorticity field, in/out parameter.
-        vorticity : :class:`~hysop.fields.continuous_field.Field
-             vorticity field, in/out parameter.
-        kwds : base class parameters.
-
-        Notes:
-        * vorticity parameter is optional since the field
-        can be provided in the usual way for operators using
-        variables parameter::
-
-            op = Diffusion(variables={vorticity: topo}, ...)
-
-        """
-        self.viscosity = viscosity
-        self.velocity = velocity
-        self.vorticity = vorticity
-        msg = 'fftw required for CurlAndDiffusion. '
-        msg += 'Try to recompile with WITH_FFTW=ON'
-        assert __FFTW_ENABLED__, msg
-        msg = 'CurlAndDiffusion : unknown method for space discretization'
-        self.method = {SpaceDiscretization: 'fftw', GhostUpdate: True}
-        self.input = [self.velocity]
-        self.output = [self.vorticity]
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(CurlAndDiffusion, self).__init__(
-            variables=[velocity, vorticity], **kwds)
-
-    def discretize(self):
-        super(CurlAndDiffusion, self)._fftw_discretize()
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        self.discrete_op = CurlAndDiffusionFFT(
-            viscosity=self.viscosity,
-            velocity=self.discrete_fields[self.velocity],
-            vorticity=self.discrete_fields[self.vorticity],
-            method=self.method)
-        self._is_uptodate = True
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
diff --git a/hysop/old/operator.old/discrete/__init__.py b/hysop/old/operator.old/discrete/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/hysop/old/operator.old/discrete/absorption_bc.py b/hysop/old/operator.old/discrete/absorption_bc.py
deleted file mode 100755
index c28f35a97bc4379886e8efd7844ba776513030cf..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/absorption_bc.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Operator to kill the vorticity at the outlet boundary
-(i.e. removal of the periodic BC in the flow direction
-by vorticity absorption in order to set the far field
-velocity to u_inf at the inlet)
-"""
-
-from hysop.constants import debug, np
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.tools.profiler import profile
-from hysop.tools.numpywrappers import npw
-from hysop.constants import YDIR, ZDIR
-from hysop.tools.misc import WorkSpaceTools
-
-
-class AbsorptionBC(DiscreteOperator):
-    """
-    The periodic boundary condition is modified at the outlet
-    in the flow direction in order to discard
-    in the downstream region the eddies coming
-    periodically from the outlet.
-    The vorticity absorption conserves div(omega)=0.
-    The far field velocity is set to u_inf at the inlet.
-    """
-
-    @debug
-    def __init__(self, velocity, vorticity, req_flowrate,
-                 absorption_box, filter_func=None, **kwds):
-        """
-        Parameters
-        ----------
-        velocity, vorticity : :class:`~hysop.fields.discrete_field.DiscreteField`
-        req_flowrate : double
-            required value for the flow rate
-        absorption_box : :class:`~hysop.subsets.SubBox`
-            a box representing the area where filter is applied.
-        filter_func: list of python functions, optional
-            functions used to compute the filter and its differential.
-        **kwds : extra parameters for base class
-
-
-        Notes
-        -----
-        * if set, filter_func[0] and filter_func[1] must be python function
-        returning a numpy array. For example to apply a sine inside
-        the absorption area use :
-
-        .. code::
-
-            def func(x):
-                return np.sin(x)
-
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        # velocity discrete field
-        self.velocity = velocity
-        # vorticity discrete field
-        self.vorticity = vorticity
-        self.absorption_box = absorption_box
-        super(AbsorptionBC, self).__init__(
-            variables=[velocity, vorticity], **kwds)
-        # If 2D problem, vorticity must be a scalar
-        if self._dim == 2:
-            assert self.vorticity.nb_components == 1
-        assert (self._dim > 2),\
-            "Wrong problem dimension: only 3D cases are implemented."
-        topo = self.vorticity.topology
-        xcoords = topo.mesh.coords[0]
-        self.input = self.variables
-        self.output = [self.vorticity]
-        # Expected value for the flow rate through self.surfRef
-        msg = 'Wrong type or length for input req_flowrate.'
-        if not isinstance(req_flowrate, VariableParameter):
-            self.req_flowrate = VariableParameter(
-                data=np.asarray(req_flowrate))
-            assert np.asarray(req_flowrate).size == self._dim, msg
-        else:
-            self.req_flowrate = req_flowrate
-
-        t_dir = [1, 2]
-        dsurf = npw.prod(self.absorption_box.real_length[topo][t_dir])
-        self._inv_ds = 1. / dsurf
-        if self.absorption_box.on_proc[topo]:
-            ind = self.absorption_box.ind[topo][0]
-
-            if filter_func is None:
-                self.absorption_filter = None
-                self.diff_filter = None
-                self._set_default_filter(xcoords[ind[0]])
-            else:
-                assert isinstance(filter_func, list)
-                self.absorption_filter = filter_func[0](xcoords[ind[0]])
-                self.diff_filter = filter_func[1](xcoords[ind[0]])
-
-    def _set_default_filter(self, x):
-        """Default values for the filter in the absorption box
-        """
-        xb = x[0]
-        xe = x[-1]
-        xc = xb + (xe - xb) / 2.0
-        eps = 10.
-        form = np.tanh(eps * (x - xc))
-        self.absorption_filter = form - np.tanh(eps * (xe - xc))
-        coeff = 1.0 / (np.tanh(eps * (xb - xc)) - np.tanh(eps * (xe - xc)))
-        self.absorption_filter[...] *= coeff
-        self.diff_filter = eps * (1.0 - form ** 2)
-        self.diff_filter *= coeff
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-
-        # Reference shape comes from the box at the end of the domain
-        subshape = tuple(
-            self.absorption_box.mesh[self.vorticity.topology].resolution)
-        self._rwork = WorkSpaceTools.check_work_array(1, subshape, rwork)
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        if not self.absorption_box.on_proc[self.vorticity.topology]:
-            return
-        # the required flowrate value is updated (depending on time)
-        self.req_flowrate.update(simulation)
-        # \warning : the flow rate value is divided by area of input surf.
-        req_flowrate_val = self.req_flowrate.data * self._inv_ds
-
-        ind = self.absorption_box.ind[self.vorticity.topology][0]
-        # 1 - filter * periodic vorticity:
-        for d in xrange(self.vorticity.nb_components):
-            np.multiply(self.vorticity[d][ind], self.absorption_filter,
-                        self.vorticity[d][ind])
-
-        # 2 - nabla filter X periodic velocity + nabla(1-filter) X uinf
-        # uinf - vz
-        np.subtract(req_flowrate_val, self.velocity[ZDIR][ind],
-                    self._rwork[0])
-        # wk = filter' * (uinf - vz )
-        np.multiply(self.diff_filter, self._rwork[0], self._rwork[0])
-        # add the result into vorticity
-        np.add(self.vorticity.data[YDIR][ind], self._rwork[0],
-               self.vorticity.data[YDIR][ind])
-        # and now, w_z ...
-        # vy - uinf
-        np.subtract(self.velocity[YDIR][ind], req_flowrate_val,
-                    self._rwork[0])
-        # wk = filter' * (uinf - vy )
-        np.multiply(self.diff_filter, self._rwork[0], self._rwork[0])
-        # add the result into vorticity
-        np.add(self.vorticity.data[ZDIR][ind], self._rwork[0],
-               self.vorticity.data[ZDIR][ind])
diff --git a/hysop/old/operator.old/discrete/adapt_timestep.py b/hysop/old/operator.old/discrete/adapt_timestep.py
deleted file mode 100755
index 2ccbf90a7c716e13c74907e6b1b3913fc04e77af..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/adapt_timestep.py
+++ /dev/null
@@ -1,273 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Discrete operator to Compute the time step according to the flow fields.
-"""
-
-from hysop.constants import debug
-from hysop.methods import TimeIntegrator, SpaceDiscretization
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.numerics.differential_operations import MaxDiagGradV, \
-    DiagAndStretch, StrainCriteria, StretchLike, StrainAndStretch
-from hysop.tools.numpywrappers import npw
-from hysop.numerics.update_ghosts import UpdateGhosts
-from hysop.core.mpi import MPI
-from hysop.constants import np, hysop.core.mpi_REAL
-from hysop.tools.profiler import profile
-from hysop.problem.simulation import Simulation
-from hysop.tools.misc import WorkSpaceTools
-
-
-class AdaptiveTimeStepD(DiscreteOperator):
-    """
-    The adaptative Time Step is computed as:
-
-    dt_adapt = min (dt_advection, dt_stretching, dt_cfl)
-    """
-
-    authorized_criteria = ['vort', 'gradU', 'stretch', 'cfl', 'deform']
-
-    @debug
-    def __init__(self, velocity, vorticity, simulation,
-                 lcfl=0.125, cfl=0.5, criteria=None,
-                 time_range=None, maxdt=9999., **kwds):
-        """
-        Parameters
-        ----------
-        velocity, vorticity : :class:`~hysop.fields.discrete_field.DiscreteFields`
-            discrete fields used to update the time steps. Read only.
-        simulation : :class:`~hysop.problem.simulation.Simulation`
-        lcfl : double, optional
-            the lagrangian CFL coefficient used for advection stability
-        cfl : double, optional
-            CFL coefficient
-        criteria : list of strings, optional
-            name of the criteria used to compute the new time step.
-            See notes below.
-        time_range : list of two integers, optional
-            use to define a 'window' in which the current operator is applied.
-            time_range = [start, end], outside this range, the operator
-            has no effect. Start/end are iteration numbers.
-            Default = [2, end of simu]
-        maxdt : double, optional
-            maximum value allowed for time step. Default = 9999.
-        kwds : arguments passed to base class.
-
-        Notes
-        -----
-        * This operator has no effect on input variables.
-        * Authorized criteria are:
-            * criteria for the advection scheme, one of :
-                1. 'vort' (default)
-                2. 'gradU'
-                3. 'strain'
-            * criteria for the stretching scheme :
-                4. 'stretch'
-            * cfl-like :
-                5. 'cfl'
-        * Computes a 'diagnostics' vector :
-          diagnostics = (time, time_step, c1, c2, ...)
-          ci = time step computed from the criterium number i in the list
-          above.
-        """
-        # velocity discrete field
-        self.velocity = velocity
-        # vorticity discrete field
-        self.vorticity = vorticity
-        # adaptative time step variable
-        assert isinstance(simulation, Simulation)
-        self.simulation = simulation
-        # Build the user required function list
-        self._used_functions = {}
-        # local buffer used to compute max values.
-        self._result = None
-        # position in result (only needed when stretch criterium is used)
-        self._pos = 0
-        # name of (optional) differential operator required to compute
-        # the criteria.
-        self._diffop_name = self._init_differential_operator(criteria)
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(AdaptiveTimeStepD, self).__init__(
-            variables=[velocity, vorticity], **kwds)
-
-        self.input = self.variables
-        self.output = []
-        # Courant Fredrich Levy coefficient
-        self.cfl = cfl
-        # Lagrangian CFL coefficient
-        self.lcfl = lcfl
-        # Max. timestep
-        self.maxdt = maxdt
-
-        # Time range
-        if time_range is None:
-            time_range = [2, np.infty]
-        self.time_range = time_range
-
-        # local buffer :
-        # [time, dt, d1, d2, d3, d4, d5]
-        # for d1...d5 see computation details in apply.
-        self.diagnostics = npw.zeros((7))
-        # list of indices of diagnostics of interest.
-        self._diag_ind = []
-        # True if ghost points synchro is required
-        self._needs_synchro = False
-        # The (optional) differential operation used to
-        # compute the criterium
-        self._diff_op = None
-        # Coefficient used to compute streching stability criterium
-        self.coeff_stretch = self.method[TimeIntegrator].stability_coeff()
-        # Init functions ...
-        if self._diffop_name is not None:
-            self._diff_op = self._diffop_name(
-                topo=self.velocity.topology,
-                method=self.method[SpaceDiscretization],
-                work=self._rwork)
-            self._needs_synchro = True
-            # Ghost synchronisation operator.
-            if 'vort' in criteria and len(criteria) == 1:
-                self._synchronize = UpdateGhosts(self.vorticity.topology,
-                                                 self.vorticity.nb_components)
-                self._synchr_buff = self.vorticity.data
-            elif 'vort' in criteria:
-                self._synchronize = UpdateGhosts(
-                    self.velocity.topology,
-                    self.vorticity.nb_components + self.velocity.nb_components)
-                self._synchr_buff = self.velocity.data + self.vorticity.data
-
-            else:
-                self._synchronize = UpdateGhosts(
-                    self.velocity.topology,
-                    self.velocity.nb_components)
-                self._synchr_buff = self.velocity.data
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-        """Allocate local work spaces (if needed)
-        """
-        if self._diffop_name is not None:
-            topo = self.velocity.topology
-            wkp = self._diffop_name.get_work_properties(topo)
-            lwork = len(wkp['rwork'])
-            subshape = wkp['rwork'][0]
-            self._rwork = WorkSpaceTools.check_work_array(lwork,
-                                                          subshape, rwork)
-
-    def _init_differential_operator(self, criteria):
-        """Select the required differential operator (if any)
-        depending on the chosen criteria
-
-        Notes:
-        * required to check/allocate internal work arrays
-        * required to connect the functions needed to compute the criteria
-        * MUST be called before parent class init
-        """
-        # Dictionnary of the available diagnostics functions
-        # all_functions[name] =
-        # (Index in self.diagnostics, function)
-        all_functions = {
-            'vort': (2, self._compute_vort),
-            'gradU': (3, self._compute_grad_u),
-            'strain': (4, self._compute_strain),
-            'stretch': (5, self._compute_stretch),
-            'cfl': (6, self._compute_cfl)
-            }
-
-        # select functions of interest, from input criteria list.
-        self._used_functions = {all_functions[k][0]: all_functions[k][1]
-                                for k in criteria if k in all_functions}
-        diffop = None
-        dimension = self.velocity.domain.dim
-        if 'gradU' in criteria:
-            diffop = MaxDiagGradV
-            self._result = npw.zeros(dimension)
-        if 'stretch' in criteria:
-            diffop = StretchLike
-            self._result = npw.zeros(dimension)
-            self._pos = 0
-        if 'strain' in criteria:
-            diffop = StrainCriteria
-            self._result = npw.zeros(dimension)
-        if 'stretch' in criteria and 'gradU' in criteria:
-            diffop = DiagAndStretch
-            self._result = npw.zeros(2 * dimension)
-            self._pos = dimension
-        if 'stretch' in criteria and 'strain' in criteria:
-            diffop = StrainAndStretch
-            self._result = npw.zeros(2 * dimension)
-            self._pos = dimension
-        return diffop
-
-    def _compute_grad_u(self):
-        """Compute criterium ...
-        """
-        self._result = self._diff_op(self.velocity.data, self._result)
-        return self.lcfl / max(self._result[:self._dim])
-
-    def _compute_stretch(self):
-        """Compute criterium ...
-        """
-        self._result = self._diff_op(self.velocity.data, self._result)
-        return self.coeff_stretch / max(
-            self._result[self._pos:self._pos + self._dim])
-
-    def _compute_cfl(self):
-        """maxima of velocity : needed for CFL based time step
-        """
-        coef = self.cfl * self.velocity.topology.mesh.space_step[0]
-        return coef / np.max([np.max(np.abs(v_c))
-                              for v_c in self.velocity.data])
-
-    def _compute_vort(self):
-        """maxima of vorticity :
-        needed for advection stability condition
-        """
-        return self.lcfl / np.max([np.max(np.abs(w_c))
-                                   for w_c in self.vorticity.data])
-
-    def _compute_strain(self):
-        """maxima of strain tensor:
-        needed for advection stability condition
-        """
-        self._result = self._diff_op(self.velocity.data, self._result)
-        return self.lcfl / np.max(self._result[:self._dim])
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        if simulation is not None:
-            assert self.simulation is simulation
-        ind = self.velocity.topology.mesh.compute_index
-        # current time and iteration number
-        time = self.simulation.time
-        iteration = self.simulation.current_iteration
-        # upper value allowed for iterations
-        nmax = min(self.simulation.max_iter, self.time_range[1])
-        buff = npw.zeros(7)
-        buff[0] = time
-        if iteration >= self.time_range[0] and iteration <= nmax:
-            # Apply each function listed in used_functions
-            if self._needs_synchro:
-                # Synchronize ghost points of velocity
-                self._synchronize(self._synchr_buff)
-
-            for func in self._used_functions:
-                # func: (Index in self.diagnostics, function, is gradU needed)
-                buff[func] = self._used_functions[func]()
-
-            # mpi reduction
-            self.velocity.topology.comm.Allreduce(
-                sendbuf=[buff, 7, hysop.core.mpi_REAL],
-                recvbuf=[self.diagnostics, 7, hysop.core.mpi_REAL],
-                op=MPI.MAX)
-            ind = self._used_functions.keys()
-            time_step = np.min(list(self.diagnostics[ind]) +
-                               [self.maxdt])
-            self.diagnostics[0] = time
-            self.diagnostics[1] = time_step
-
-            if self._writer is not None and self._writer.do_write(iteration):
-                self._writer.buffer[0, :] = self.diagnostics
-                self._writer.write()
-
-            # Update simulation time step with the new dt
-            self.simulation.update_time_step(time_step)
-            # Warning this update is done only for the current MPI task!
-            # See wait function in base class.
diff --git a/hysop/old/operator.old/discrete/baroclinic.py b/hysop/old/operator.old/discrete/baroclinic.py
deleted file mode 100644
index 5e14e8e3beffa23d223d3b50badaf794acdbfa68..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/baroclinic.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file operator/discrete/baroclinic.py
-Discrete MultiPhase Rot Grad P
-"""
-from hysop.operator.discrete.discrete import DiscreteOperator
-import hysop.numerics.differential_operations as diff_op
-from hysop.constants import debug, XDIR, YDIR, ZDIR, np
-from hysop.methods import SpaceDiscretization
-from hysop.numerics.update_ghosts import UpdateGhosts
-from hysop.tools.profiler import ftime
-from hysop.tools.numpywrappers import npw
-
-
-class Baroclinic(DiscreteOperator):
-    """
-    TODO : describe this operator ...
-    """
-    @debug
-    def __init__(self, velocity, vorticity, density, viscosity,
-                 formula=None, **kwds):
-        """
-        Constructor.
-        Create the baroclinic term -GradRho/rho x GradP/rho
-        in N.S equation
-        @param velocity : discretization of the velocity field
-        @param vorticity : discretization of the vorticity field
-        @param density : discretization of a scalar field
-        @param viscosity
-        @param formula : formula to initialize u^(n-1)
-        Note : this should be the formula used to initialize
-        the velocity field
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        if 'method' not in kwds:
-            import hysop.default_methods as default
-            kwds['method'] = default.BAROCLINIC
-
-        super(Baroclinic, self).__init__(variables=[velocity, vorticity,
-                                                    density], **kwds)
-        self.velocity = velocity
-        self.vorticity = vorticity
-        self.density = density
-        self.viscosity = viscosity
-        self.input = [self.velocity, self.vorticity, self.density]
-        self.output = [self.vorticity]
-
-        # prepare ghost points synchro for velocity (vector)
-        # and density (scalar) fields
-        self._synchronizeVel = UpdateGhosts(self.velocity.topology,
-                                            self.velocity.nb_components)
-        self._synchronizeRho = UpdateGhosts(self.density.topology,
-                                            self.density.nb_components)
-
-        self._result = [npw.zeros_like(d) for d in self.velocity.data]
-        self._tempGrad = [npw.zeros_like(d) for d in self.velocity.data]
-        self._baroclinicTerm = [npw.zeros_like(d) for d in self.velocity.data]
-
-        self._laplacian = diff_op.Laplacian(
-            self.velocity.topology,
-            indices=self.velocity.topology.mesh.compute_index)
-        self._gradOp = diff_op.GradS(
-            self.velocity.topology,
-            indices=self.velocity.topology.mesh.compute_index,
-            method=self.method[SpaceDiscretization])
-
-        # Gravity vector
-        self._gravity = npw.asrealarray([0., 0., -9.81])
-
-        # Time stem of the previous iteration
-        self._old_dt = None
-
-    def initialize_velocity(self):
-        """Initialize the temporary array 'result' with the velocity"""
-        topo = self.velocity.topology
-        compute_index = topo.mesh.compute_index
-        for d in xrange(self.velocity.dimension):
-            self._result[d][compute_index] = -self.velocity[d][compute_index]
-
-    @debug
-    def apply(self, simulation=None):
-        """Computes the baroclinic term: BT = -grad(P)/rho
-        BT = grad(rho)/rho x (du/dt + (u . grad)u - nu laplacien(u) - g)
-        then solves
-        dw/dt = -BT
-        """
-        if simulation is None:
-            raise ValueError("Missing simulation value for computation.")
-
-        dt = simulation.time_step
-        if self._old_dt is None:
-            self._old_dt = dt
-        # Synchronize ghost points of velocity and density
-        self._synchronizeVel(self.velocity.data)
-        self._synchronizeRho(self.density.data)
-
-        topo = self.velocity.topology
-        compute_index = topo.mesh.compute_index
-
-        # result = du/dt = (u^(n)-u^(n-1))/dt
-        # result has been initialized with -u^(n-1)
-        # and _old_dt equals to the previous dt
-        for d in xrange(self.velocity.dimension):
-            self._result[d][compute_index] += self.velocity[d][compute_index]
-            self._result[d][compute_index] /= self._old_dt
-
-        # result = result + (u . grad)u
-        # (u. grad)u = (u.du/dx + v.du/dy + w.du/dz ;
-        #               u.dv/dx + v.dv/dy + w.dv/dz ;
-        #               u.dw/dx + v.dw/dy + w.dw/dz)
-        # Add (u. grad)u components directly in result
-        self._tempGrad = self._gradOp(
-            self.velocity[XDIR:XDIR + 1], self._tempGrad)
-        # result[X] = result[X] + ((u. grad)u)[X]
-        #           = result[X] + u.du/dx + v.du/dy + w.du/dz
-        for d in xrange(self.velocity.dimension):
-            self._result[XDIR][compute_index] += \
-                self.velocity[d][compute_index] * self._tempGrad[d][compute_index]
-
-        self._tempGrad = self._gradOp(
-            self.velocity[YDIR:YDIR + 1], self._tempGrad)
-        # result[Y] = result[Y] + ((u. grad)u)[Y]
-        #           = result[Y] + u.dv/dx + v.dv/dy + w.dv/dz
-        for d in xrange(self.velocity.dimension):
-            self._result[YDIR][compute_index] += \
-                self.velocity[d][compute_index] * self._tempGrad[d][compute_index]
-
-        self._tempGrad = self._gradOp(
-            self.velocity[ZDIR:ZDIR + 1], self._tempGrad)
-        # result[Z] = result[Z] + ((u. grad)u)[Z]
-        #           = result[Z] + u.dw/dx + v.dw/dy + w.dw/dz
-        for d in xrange(self.velocity.dimension):
-            self._result[ZDIR][compute_index] += \
-                self.velocity[d][compute_index] * self._tempGrad[d][compute_index]
-
-        # result = result - nu*\Laplacian u (-g) = gradP/rho
-        for d in xrange(self.velocity.dimension):
-            self._tempGrad[d:d + 1] = self._laplacian(
-                self.velocity[d:d + 1], self._tempGrad[d:d + 1])
-        for d in xrange(self.velocity.dimension):
-            self._tempGrad[d][compute_index] *= self.viscosity
-            self._result[d][compute_index] -= self._tempGrad[d][compute_index]
-
-        # gravity term : result = result - g
-        for d in xrange(self.velocity.dimension):
-            self._result[2][compute_index] -= self._gravity[d]
-
-        # baroclinicTerm = -(gradRho/rho) x (gradP/rho)
-        self._tempGrad = self._gradOp(self.density[0:1], self._tempGrad)
-        for d in xrange(self.velocity.dimension):
-            self._tempGrad[d][compute_index] = \
-                self._tempGrad[d][compute_index] / self.density[0][compute_index]
-
-        self._baroclinicTerm[0][compute_index] = \
-            - self._tempGrad[1][compute_index] * self._result[2][compute_index]
-        self._baroclinicTerm[0][compute_index] += \
-            self._tempGrad[2][compute_index] * self._result[1][compute_index]
-        self._baroclinicTerm[1][compute_index] = \
-            - self._tempGrad[2][compute_index] * self._result[0][compute_index]
-        self._baroclinicTerm[1][compute_index] += \
-            self._tempGrad[0][compute_index] * self._result[2][compute_index]
-        self._baroclinicTerm[2][compute_index] = \
-            - self._tempGrad[0][compute_index] * self._result[1][compute_index]
-        self._baroclinicTerm[2][compute_index] += \
-            self._tempGrad[1][compute_index] * self._result[0][compute_index]
-
-        # vorti(n+1) = vorti(n) + dt * baroclinicTerm
-        for d in xrange(self.vorticity.dimension):
-            self._baroclinicTerm[d][compute_index] *= dt
-            self.vorticity[d][compute_index] += self._baroclinicTerm[d][compute_index]
-
-
-        # reinitialise for next iteration
-        # velo(n-1) update
-        for d in xrange(self.velocity.dimension):
-            self._result[d][compute_index] = -self.velocity.data[d][compute_index]
-        self._old_dt = dt
diff --git a/hysop/old/operator.old/discrete/baroclinic_from_rhs.py b/hysop/old/operator.old/discrete/baroclinic_from_rhs.py
deleted file mode 100644
index 6d0e69627846cfe55bdac20c2d5572e3fbe8b1d0..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/baroclinic_from_rhs.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file operator/discrete/baroclinic_from_rhs.py
-Discrete MultiPhase Rot Grad P
-"""
-from hysop.operator.discrete.discrete import DiscreteOperator
-import hysop.numerics.differential_operations as diff_op
-from hysop.constants import debug, XDIR, YDIR, ZDIR, np
-from hysop.methods import SpaceDiscretization
-from hysop.numerics.update_ghosts import UpdateGhosts
-from hysop.tools.profiler import ftime
-from hysop.tools.numpywrappers import npw
-
-
-class BaroclinicFromRHS(DiscreteOperator):
-    """
-    TODO : describe this operator ...
-    """
-    @debug
-    def __init__(self, vorticity, rhs, **kwds):
-        """
-        Constructor.
-        Create the baroclinic term in the N.S. equations with a given
-        -GradRho/rho x GradP/rho term as the rhs field.
-        @param vorticity : discretization of the vorticity field
-        @param rhs : right hand side of the term
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        if 'method' not in kwds:
-            import hysop.default_methods as default
-            kwds['method'] = default.BAROCLINIC
-
-        super(BaroclinicFromRHS, self).__init__(
-            variables=[vorticity, rhs], **kwds)
-        self.vorticity = vorticity
-        self.rhs = rhs
-        self.input = [self.vorticity, self.rhs]
-        self.output = [self.vorticity]
-
-        # prepare ghost points synchro for velocity (vector)
-        # and density (scalar) fields
-        self._synchronizeRHS = UpdateGhosts(self.rhs.topology,
-                                            self.rhs.nb_components)
-
-    @debug
-    def apply(self, simulation=None):
-        """Solves dw/dt = -RHS
-        """
-        if simulation is None:
-            raise ValueError("Missing simulation value for computation.")
-
-        dt = simulation.time_step
-        # Synchronize ghost points of velocity and density
-        #self._synchronizeRHS(self.rhs.data)
-
-        topo = self.vorticity.topology
-        compute_index = topo.mesh.compute_index
-
-        # vorti(n+1) = vorti(n) + dt * baroclinicTerm
-        for d in xrange(self.vorticity.dimension):
-            self.vorticity[d][compute_index] += self.rhs[d][compute_index] * dt
diff --git a/hysop/old/operator.old/discrete/custom.py b/hysop/old/operator.old/discrete/custom.py
deleted file mode 100644
index b08a904d735a2daf50df504b955165a6a0e43d97..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/custom.py
+++ /dev/null
@@ -1,55 +0,0 @@
-"""User-defined discrete operator"""
-
-from hysop.operator.discrete.discrete import DiscreteOperator
-
-
-class Custom(DiscreteOperator):
-    """User-defined operator: action defined by an external function.
-    """
-    def __init__(self, function, in_fields, out_fields=None, **kwds):
-        """
-
-        Parameters
-        ----------
-        in_fields: list of :class:`~hysop.fields.discrete_field.DiscreteField`
-             input fields args. for function, see notes below
-        out_fields: list of :class:`~hysop.fields.discrete_field.DiscreteField`
-             output fields args for function, see notes below
-        function: python function
-             a user defined function, called by this op.apply method.
-
-        Notes
-        -----
-        A function is used to set the behavior of the current operator,
-        during apply call.
-        This function must look like::
-
-            def some_func(simulation, in_fields, out_fields=None, diag=None):
-                # do things ...
-
-        and compute out_fields values.
-        """
-        # callback for apply function
-        self.function = function
-        super(Custom, self).__init__(**kwds)
-        # in/out fields must obviously belong to variables
-        self.input = in_fields
-        if out_fields is not None:
-            self.input += out_fields
-            self.output = out_fields
-        msg = 'Custom: all in/out fields must belong to op variables.'
-        assert set(self.input).intersection(set(self.variables)) == \
-            set(self.input), msg
-        self._in_fields = in_fields
-        self._out_fields = out_fields
-
-    def apply(self, simulation=None):
-        if self._writer is not None:
-            diagnostics = self._writer.buffer
-        else:
-            diagnostics = None
-        self.function(simulation, self._in_fields, self._out_fields,
-                      diagnostics)
-        ite = simulation.current_iteration
-        if self._writer is not None and self._writer.do_write(ite):
-            self._writer.write()
diff --git a/hysop/old/operator.old/discrete/density.py b/hysop/old/operator.old/discrete/density.py
deleted file mode 100644
index d72df5ca7e9e7ca7d98f99c2d4dc8402af3704ff..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/density.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file operator/discrete/density.py
-Discrete MultiPhase Rot Grad P
-"""
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.constants import np, debug
-from hysop.tools.profiler import profile
-
-
-class DensityVisco_d(DiscreteOperator):
-    """
-    To be documented ...
-    """
-    @debug
-    def __init__(self, density, viscosity,
-                 densityVal=None, viscoVal=None, **kwds):
-        """
-        @param operator.
-        """
-        if 'variables' in kwds:
-            super(DensityVisco_d, self).__init__(**kwds)
-            self.density = self.variables[0]
-            self.viscosity = self.variables[1]
-        else:
-            super(DensityVisco_d, self).__init__(variables=[density,
-                                                            viscosity],
-                                                 **kwds)
-            self.density = density
-            self.viscosity = viscosity
-
-        self.densityVal = densityVal
-        self.viscoVal = viscoVal
-        self.input = [self.density, self.viscosity]
-        self.output = [self.density, self.viscosity]
-
-        # Note FP : what must be done if densityVal or viscoVal is None???
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing simulation value for computation."
-
-        compute_index = self.density.topology.mesh.compute_index
-
-        # Density reconstruction
-        if self.density[0][compute_index].all() <= np.absolute(
-                self.densityVal[1] - self.densityVal[0]) / 2.0:
-            self.density[0][compute_index] = self.densityVal[1]
-        else:
-            self.density[0][compute_index] = self.densityVal[0]
-
-        # Viscosity reconstruction :
-        # nu = nu1 + (nu2 - nu1) * (density - rho1)/(rho2 - rho1)
-        self.viscosity.data[0] = self.viscoVal[0] + \
-            (self.viscoVal[1] - self.viscoVal[0]) * \
-            ((self.density.data[0] - self.densityVal[0]) /
-             (self.densityVal[1] - self.densityVal[0]))
diff --git a/hysop/old/operator.old/discrete/differential.py b/hysop/old/operator.old/discrete/differential.py
deleted file mode 100644
index 950a2bd1de1167ab8570d47b706c5406ce835a40..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/differential.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Discretization of the differential operators (curl, grad ...)
-
-..currentmodule hysop.operator.discrete.differential
-* :class:`~CurlFFT`,
-* :class:`~CurlFD`,
-* :class:`~GradFD`,
-* :class:`~DivAdvectionFD`,
-* :class:`~Differential` (abstract base class).
-
-"""
-from hysop.constants import debug
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.numerics.differential_operations import Curl, GradV,\
-    DivAdvection
-from abc import ABCMeta, abstractmethod
-from hysop.numerics.update_ghosts import UpdateGhosts
-from hysop.methods import SpaceDiscretization
-import hysop.default_methods as default
-from hysop.tools.profiler import profile
-from hysop.tools.misc import WorkSpaceTools
-from hysop import __FFTW_ENABLED__
-if __FFTW_ENABLED__:
-    from hysop.f2hysop import fftw2py
-
-
-class Differential(DiscreteOperator):
-    """Abstract base class for discrete differential operators
-    """
-    __metaclass__ = ABCMeta
-
-    # @debug
-    # def __new__(cls, *args, **kw):
-    #     return object.__new__(cls, *args, **kw)
-
-    @debug
-    def __init__(self, invar, outvar, **kwds):
-        """
-        Parameters
-        ----------
-        invar, outvar : :class:`~hysop.fields.discrete_field.DiscreteField`
-           input/output scalar or vector fields
-            such that outvar = op(invar).
-        **kwds : base class parameters
-
-        """
-        self.invar = invar
-        self.outvar = outvar
-        if 'method' not in kwds:
-            kwds['method'] = default.DIFFERENTIAL
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Differential, self).__init__(variables=[invar, outvar],
-                                           **kwds)
-        self.input = [self.invar]
-        self.output = [self.outvar]
-        self._synchronize = None
-        # connexion to a numerical method
-        self._function = None
-
-    @abstractmethod
-    def apply(self, simulation=None):
-        """
-        Abstract interface
-        """
-
-
-class CurlFFT(Differential):
-    """Computes the curl of a discrete field, using Fourier fftw
-    """
-
-    def __init__(self, **kwds):
-        assert __FFTW_ENABLED__, "Run hysop with fftw to use this class."
-        super(CurlFFT, self).__init__(**kwds)
-        if self.domain.dim == 3:
-            self._apply = self._apply_3d
-        elif self.domain.dim == 2:
-            self._apply = self._apply_2d
-
-    def apply(self, simulation=None):
-        self._apply()
-
-    @debug
-    @profile
-    def _apply_3d(self):
-        ghosts_in = self.invar.topology.ghosts()
-        ghosts_out = self.outvar.topology.ghosts()
-        self.outvar.data[0], self.outvar.data[1], self.outvar.data[2] = \
-            fftw2py.solve_curl_3d(self.invar.data[0], self.invar.data[1],
-                                  self.invar.data[2], self.outvar.data[0],
-                                  self.outvar.data[1], self.outvar.data[2],
-                                  ghosts_in, ghosts_out)
-
-    def _apply_2d(self):
-        ghosts_in = self.invar.topology.ghosts()
-        ghosts_out = self.outvar.topology.ghosts()
-        self.outvar.data[0] = \
-            fftw2py.solve_curl_2d(self.invar.data[0], self.invar.data[1],
-                                  self.outvar.data[0],
-                                  ghosts_in, ghosts_out)
-
-    def finalize(self):
-        """Clean memory (fftw plans and so on)
-        """
-        fftw2py.clean_fftw_solver(self.outvar.dimension)
-
-
-class CurlFD(Differential):
-    """Computes the curl of a discrete field, using finite differences.
-    """
-
-    def __init__(self, **kwds):
-
-        super(CurlFD, self).__init__(**kwds)
-
-        # prepare ghost points synchro for velocity
-        self._synchronize = UpdateGhosts(self.invar.topology,
-                                         self.invar.nb_components)
-        self._function = Curl(topo=self.invar.topology, work=self._rwork,
-                              method=self.method[SpaceDiscretization])
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-        work_prop = Curl.get_work_properties(self.invar.topology)
-        lwork = len(work_prop['rwork'])
-        memshape = work_prop['rwork'][0]
-        self._rwork = WorkSpaceTools.check_work_array(lwork, memshape,
-                                                      rwork)
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        self._synchronize(self.invar.data)
-        self.outvar.data = self._function(self.invar.data, self.outvar.data)
-
-
-class GradFD(Differential):
-    """Computes the grad of a discrete field, using finite differences.
-    """
-
-    def __init__(self, **kwds):
-
-        super(GradFD, self).__init__(**kwds)
-        # prepare ghost points synchro for velocity
-        self._synchronize = UpdateGhosts(self.invar.topology,
-                                         self.invar.nb_components)
-        dim = self.domain.dim
-        assert self.outvar.nb_components == dim * self.invar.nb_components
-        self._function = GradV(topo=self.invar.topology,
-                               method=self.method[SpaceDiscretization])
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        self._synchronize(self.invar.data)
-        self.outvar.data = self._function(self.invar.data, self.outvar.data)
-
-
-class DivAdvectionFD(Differential):
-    """Computes  outVar = -nabla .(invar . nabla(nvar))
-    """
-
-    def __init__(self, **kwds):
-
-        super(DivAdvectionFD, self).__init__(**kwds)
-        # prepare ghost points synchro for velocity
-        self._synchronize = UpdateGhosts(self.invar.topology,
-                                         self.invar.nb_components)
-        assert self.outvar.nb_components == 1
-        self._function = DivAdvection(topo=self.invar.topology,
-                                      method=self.method[SpaceDiscretization],
-                                      work=self._rwork)
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-        work_prop = DivAdvection.get_work_properties(self.invar.topology)
-        lwork = len(work_prop['rwork'])
-        memshape = work_prop['rwork'][0]
-        self._rwork = WorkSpaceTools.check_work_array(lwork, memshape,
-                                                      rwork)
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        self._synchronize(self.invar.data)
-        self.outvar.data = self._function(self.invar.data, self.outvar.data)
diff --git a/hysop/old/operator.old/discrete/diffusion_fft.py b/hysop/old/operator.old/discrete/diffusion_fft.py
deleted file mode 100644
index ce5495db4b119dce9874df8cbe71e534a94440fc..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/diffusion_fft.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Discrete Diffusion operator using FFTW (fortran)
-
-See :ref:`diffusion` in HySoP user guide.
-
-"""
-try:
-    from hysop.f2hysop import fftw2py
-except ImportError:
-    msg = 'fftw package not available for your hysop install.'
-    msg += 'Try to recompile with WITH_FFTW=ON'
-    raise ImportError(msg)
-
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.constants import debug
-from hysop.tools.profiler import profile
-
-
-class DiffusionFFT(DiscreteOperator):
-    """Discretized Poisson operator based on FFTW.
-    See details in hysop.operator.diffusion.
-
-    """
-    @debug
-    def __init__(self, viscosity, vorticity, **kwds):
-        """Discrete diffusion operator, based on fftw solver.
-
-        Parameters
-        ----------
-        viscosity : double
-             constant viscosity value
-        vorticity : :class:`~hysop.fields.discrete_field.DiscreteField`
-             vorticity field, in/out parameter
-        kwds : base class arguments
-        """
-        # Discretization of the solution field
-        self.vorticity = vorticity
-        # Viscosity.
-        self.viscosity = viscosity
-        dim = self.vorticity.dimension
-        if dim == 3:
-            self._apply = self.apply_3d
-        elif dim == 2:
-            self._apply = self.apply_2d
-        else:
-            raise AttributeError(dim + "D case not yet implemented.")
-        # Base class initialisation
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(DiffusionFFT, self).__init__(variables=[vorticity],
-                                           **kwds)
-        self.input = [self.vorticity]
-        self.output = [self.vorticity]
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing dt value for diffusion computation."
-        dt = simulation.time_step
-        ghosts = self.vorticity.topology.ghosts()
-        self._apply(dt, ghosts)
-
-    def apply_2d(self, dt, ghosts):
-        """2d implementation of apply function"""
-        self.vorticity.data = fftw2py.solve_diffusion_2d(
-            self.viscosity * dt, self.vorticity.data, ghosts)
-
-    def apply_3d(self, dt, ghosts):
-        """3d implementation of apply function"""
-        self.vorticity.data[0], self.vorticity.data[1],\
-            self.vorticity.data[2] = \
-            fftw2py.solve_diffusion_3d(self.viscosity * dt,
-                                       self.vorticity.data[0],
-                                       self.vorticity.data[1],
-                                       self.vorticity.data[2],
-                                       ghosts)
-
-    def finalize(self):
-        """
-        Clean memory (fftw plans and so on)
-        """
-        pass
-        # TODO : fix bug that occurs when several finalize
-        # of fft operators are called.
-        # fftw2py.clean_fftw_solver(self.vorticity.dimension)
-
-
-class CurlAndDiffusionFFT(DiscreteOperator):
-    """Discretized Curl/Diffusion operator based on FFTW.
-    See details in hysop.operator.diffusion.
-
-    """
-    @debug
-    def __init__(self, viscosity, velocity, vorticity, **kwds):
-        """Solve diffusion problem in Fourier
-        domain (velocity curl + diffusion in one shot)
-
-        Parameters
-        ----------
-        viscosity : double
-             constant viscosity value
-        velocity : :class:`~hysop.fields.discrete_field.DiscreteField`
-             velocity field, input parameter
-        vorticity : :class:`~hysop.fields.discrete_field.DiscreteField`
-             velocity field, output parameter
-        kwds : base class arguments (vorticity, viscosity ...)
-        """
-        self.velocity = velocity
-        self.vorticity = vorticity
-        self.viscosity = viscosity
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(CurlAndDiffusionFFT, self).__init__(
-            variables=[velocity, vorticity], **kwds)
-        self.input = [self.velocity]
-        self.output = [self.vorticity]
-        msge = 'CurlAndDiffusion: only implemented for 3d domain.'
-        assert self.velocity.dimension == 3, msge
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        dt = simulation.time_step
-        gh_velo = self.velocity.topology.ghosts()
-        gh_vorti = self.vorticity.topology.ghosts()
-        self.vorticity.data[0], self.vorticity.data[1],\
-            self.vorticity.data[2] = \
-            fftw2py.solve_curl_diffusion_3d(self.viscosity * dt,
-                                            self.velocity.data[0],
-                                            self.velocity.data[1],
-                                            self.velocity.data[2],
-                                            self.vorticity.data[0],
-                                            self.vorticity.data[1],
-                                            self.vorticity.data[2],
-                                            gh_velo, gh_vorti)
diff --git a/hysop/old/operator.old/discrete/discrete.py b/hysop/old/operator.old/discrete/discrete.py
deleted file mode 100755
index 244717927e2b41a1f022fa96da0217f4012642d5..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/discrete.py
+++ /dev/null
@@ -1,185 +0,0 @@
-"""Abstract interface for discrete operators.
-"""
-from abc import ABCMeta, abstractmethod
-from hysop.constants import debug
-from hysop.methods import GhostUpdate
-from hysop.tools.profiler import Profiler
-
-
-class DiscreteOperator(object):
-    """Common interface to all discrete
-    operators.
-
-    """
-
-    __metaclass__ = ABCMeta
-
-    @debug
-    def __new__(cls, *args, **kw):
-        return object.__new__(cls, *args, **kw)
-
-    @debug
-    @abstractmethod
-    def __init__(self, variables, rwork=None, iwork=None, method=None):
-        """
-        Parameters
-        -----------
-        variables : list of :class:`hysop.fields.discrete_field.DiscreteField`
-            the fields on which this operator works.
-        rwork : list of numpy real arrays, optional
-            internal work arrays. See notes below.
-        iwork : list of numpy integer arrays, optional
-            internal work arrays. See notes below
-        method : dictionnary, optional
-            internal solver parameters (discretisation ...).
-            If None, use default method from hysop.default_method.py.
-
-        Attributes
-        ----------
-
-        variables : list of discrete fields on which the operator works.
-        domain : physical domain
-        input : fields used as input (i.e. read-only)
-        output : fields used as in/out (i.e. modified during apply call)
-
-        """
-        if isinstance(variables, list):
-            # variables
-            self.variables = variables
-        else:
-            self.variables = [variables]
-
-        self.domain = self.variables[0].domain
-        self._dim = self.domain.dim
-
-        # Input variables
-        self.input = []
-        # Output variables
-        self.output = []
-        # Operator numerical method.
-        if method is None:
-            method = {}
-        self.method = method
-        if GhostUpdate not in method:
-            method[GhostUpdate] = True
-        # Operator name
-        self.name = self.__class__.__name__
-        # Object to store computational times of lower level functions
-        self.profiler = Profiler(self, self.domain.task_comm)
-
-        # Allocate or check work arrays.
-        # Their shapes, number ... strongly depends
-        # on the type of discrete operator.
-        # A _set_work_arrays function must be implemented
-        # in all derived classes where work are required.
-        self._rwork = None
-        self._iwork = None
-        self._set_work_arrays(rwork, iwork)
-
-        # Function to synchronize ghosts if needed
-        self._synchronize = None
-
-        # Object that deals with output file writing.
-        # Optional.
-        self._writer = None
-        # Check topologies consistency
-        if self.variables is not None:
-            toporef = self.variables[0].topology
-            for v in self.variables:
-                assert v.topology.is_consistent_with(toporef)
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-        """
-        To set the internal work arrays used by this operator.
-        Parameters
-        ----------
-        rwork : list of numpy real arrays
-            real buffers for internal work
-        iwork : list of numpy integer arrays
-            integer buffers for internal work
-
-        """
-        pass
-
-    def set_writer(self, writer):
-        """
-        Assign a writer to the current operator
-        """
-        self._writer = writer
-
-    @debug
-    @abstractmethod
-    def apply(self, simulation=None):
-        """Execute the operator for the current simulation state
-
-        Parameters
-        ----------
-        simulation : :class:`~hysop.problem.simulation.Simulation`
-        """
-
-    @debug
-    def finalize(self):
-        """
-        Cleaning, if required.
-        """
-        pass
-
-    def __str__(self):
-        """Common printings for discrete operators."""
-        short_name = str(self.__class__).rpartition('.')[-1][0:-2]
-        s = short_name + " discrete operator. \n"
-        if self.input is not None:
-            s += "Input fields : \n"
-            for f in self.input:
-                s += str(f) + "\n"
-        if self.output is not None:
-            s += "Output fields : \n"
-            for f in self.output:
-                s += str(f) + "\n"
-        return s
-
-    def update_ghosts(self):
-        """
-        Update ghost points values, if any.
-        This function must be implemented in the discrete
-        operator if it may be useful to ask for ghost
-        points update without a call to apply.
-        For example for monitoring purpose before
-        operator apply.
-        """
-        pass
-
-    def get_profiling_info(self):
-        """Get the manual profiler informations into the default profiler"""
-        pass
-
-
-def get_extra_args_from_method(op, key, default_value):
-    """Returns the given extra arguments dictionary from method attributes.
-
-    Parameters
-    -----------
-    op : operator
-        extract method attribute from
-    key : string
-        key to extract
-    default_value :
-        default value when ExtraArgs is not in op.method or
-        key is not in op.method[ExtraArgs]
-
-    Usage
-    -----
-
-    .. code::
-
-        method = {ExtraArgs: {'device_id': 2, user_src: ['./ker.cl']}
-        op = SomeOp(..., method=method)
-        val = get_extra_args_from_method(op, device_id, 6)
-        # set val to 2. If device_id or ExtraArgs does not exist, set val to 8.
-
-    """
-    from hysop.methods import ExtraArgs
-    try:
-        return op.method[ExtraArgs][key]
-    except KeyError:
-        return default_value
diff --git a/hysop/old/operator.old/discrete/drag_and_lift.py b/hysop/old/operator.old/discrete/drag_and_lift.py
deleted file mode 100644
index 9beb469758b59d24738a4a7e5a7e011c94105a59..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/drag_and_lift.py
+++ /dev/null
@@ -1,870 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Discrete operators to compute drag and lift forces
-"""
-from hysop.numerics.update_ghosts import UpdateGhosts
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.tools.numpywrappers import npw
-from abc import ABCMeta, abstractmethod
-from hysop.numerics.utils import Utils
-from hysop.constants import XDIR, YDIR, ZDIR
-from hysop.domain.control_box import ControlBox
-from hysop.domain.subsets import Subset
-from hysop.numerics.differential_operations import Laplacian
-from hysop.numerics.finite_differences import FDC2
-import numpy as np
-from hysop.tools.misc import WorkSpaceTools
-
-
-class Forces(DiscreteOperator):
-    """
-    Compute drag and lift using Noca's formula.
-    See Noca99 or Plouhmans, 2002, Journal of Computational Physics
-    The present class implements formula (52) of Plouhmans2002.
-    Integral inside the obstacle is not taken into account.
-    """
-    __metaclass__ = ABCMeta
-
-    def __init__(self, obstacles=None, normalization=1., **kwds):
-        """
-        Parameters
-        ----------
-        obstacles : list of :class:`~hysop.domain.subsets.Subset`
-            list of bodies inside the flow
-        normalization : double, optional
-            a normalization coefficient applied to the force, default = 1.
-        kwds : arguments passed to base class.
-
-        Attributes
-        ----------
-        force : numpy array
-            drag and lift forces
-
-        """
-        # true if the operator needs to work on the current process.
-        # Updated in derived class.
-        self._on_proc = True
-        # deal with obstacles, volume of control ...
-
-        self._indices = self._init_indices(obstacles)
-
-        super(Forces, self).__init__(**kwds)
-        # topology is common to all variables
-        self.input = self.variables
-        self._topology = self.input[0].topology
-        # elem. vol
-        self._dvol = npw.prod(self._topology.mesh.space_step)
-
-        msg = 'Force computation undefined for domain of dimension 1.'
-        assert self._dim > 1, msg
-
-        # Local buffers, used for time-derivative computation
-        self._previous = npw.zeros(self._dim)
-        self._buffer = npw.zeros(self._dim)
-        # The force we want to compute (lift(s) and drag)
-        self.force = npw.zeros(self._dim)
-
-        # list of np arrays to be synchronized
-        self._datalist = []
-        for v in self.input:
-            self._datalist += v.data
-        nbc = len(self._datalist)
-        # Ghost points synchronizer
-        self._synchronize = UpdateGhosts(self._topology, nbc)
-
-        # Normalizing coefficient for forces
-        # (based on the physics of the flow)
-        self._normalization = normalization
-
-        # Which formula must be used to compute the forces.
-        # Must be set in derived class.
-        self._formula = lambda dt: 0
-
-        # Set how reduction will be performed
-        # Default = reduction over all process.
-        # \todo : add param to choose this option
-        self.mpi_sum = self._mpi_allsum
-        # A 'reduced' communicator used to mpi-reduce the force.
-        # Set in derived class
-        self._subcomm = None
-
-    @abstractmethod
-    def _init_indices(self, obstacles):
-        """
-        Parameters
-        -----------
-        obstacles : a list of :class:`hysop.domain.subsets.Subset`
-
-        Returns
-        -------
-        a list of np arrays
-            points indices (like result from np.where)
-
-        Discretize obstacles, volume of control ... and
-        compute a list of points representing these sets.
-        What is inside indices depends on the chosen method.
-        See derived class 'init_indices' function for details.
-        """
-
-    def _mpi_allsum(self):
-        """
-        Performs MPI reduction (sum result value over all process)
-        All process get the result of the sum.
-        """
-        self.force = self._topology.comm.allreduce(self.force)
-
-    def _mpi_sum(self, root=0):
-        """
-        Performs MPI reduction (sum result value over all process)
-        Result send only to 'root' process.
-
-        : param root : int
-            number of the process which collect the result.
-
-        """
-        self.force = self._topology.comm.reduce(self.force, root=root)
-
-    def apply(self, simulation=None):
-        """Compute forces
-
-        :param simulation: :class:`~hysop.problem.simulation.Simulation`
-
-        """
-        assert simulation is not None,\
-            "Simulation parameter is required for Forces apply."
-        # Synchro of ghost points is required for fd schemes
-        self._synchronize(self._datalist)
-        # Compute forces locally
-        dt = simulation.time_step
-        if not self._on_proc:
-            self._buffer[...] = 0.0
-            self.force[...] = 0.0
-            self._previous[...] = 0.0
-        else:
-            self._formula(dt)
-        # Reduce results over MPI processes
-        self.mpi_sum()
-        # normalization of the forces --> cD, cL, cZ
-        self.force *= self._normalization
-        # Print results, if required
-        ite = simulation.current_iteration
-        if self._writer is not None and self._writer.do_write(ite):
-            self._writer.buffer[0, 0] = simulation.time
-            self._writer.buffer[0, 1:] = self.force
-            self._writer.write()
-
-
-class MomentumForces(Forces):
-    """
-    Compute drag and lift using Noca's formula.
-    See Noca99 or Plouhmans, 2002, Journal of Computational Physics
-    The present class implements formula (52) of Plouhmans2002.
-    Integral inside the obstacle is not taken into account.
-    """
-    def __init__(self, velocity, penalisation_coeff, **kwds):
-        """
-        Parameters
-        -----------
-        velocity : :class:`hysop.field.discrete.DiscreteField`
-            the velocity field
-        penalisation_coeff : list of double
-            coeff used to penalise velocity before force computation
-        kwds : arguments passed to drag_and_lift.Forces base class.
-
-        See :ref:`forces`.
-
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        # discrete velocity field
-        self.velocity = velocity
-        msg = 'penalisation coeff must be a list of values.'
-        assert isinstance(penalisation_coeff, list), msg
-        # penalisation coefficient(s)
-        self._coeff = penalisation_coeff
-
-        super(MomentumForces, self).__init__(variables=[velocity], **kwds)
-
-        # formula used to compute drag and lift
-        self._formula = self._momentum
-
-        # Check ghost points
-        assert (self._topology.ghosts() >= 1).all()
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-        """Set or check rwork.
-
-        rwork will be required only in formulations where
-        an integral over the volume of the control box is computed.
-        """
-        # !!! Velocity must be set before a call to this function
-        # and so before base class initialization !!!
-
-        # The only required buffer if for integral on the volume
-        # of control.
-        if not self._on_proc:
-            self._rwork = [None]
-            return
-
-        size = 0
-        for ind in self._indices:
-            size = np.maximum(self.velocity.data[0][ind].size, size)
-        shape = (size,)
-        self._rwork = WorkSpaceTools.check_work_array(1, shape, rwork)
-
-    def _init_indices(self, obstacles):
-        msg = 'obstacles arg must be a list.'
-        assert isinstance(obstacles, list), msg
-        # only one obstacle allowed for the moment
-        assert len(obstacles) == 1
-        obst = obstacles[0]
-        toporef = self.velocity.topology
-        obst.discretize(toporef)
-        self._on_proc = obst.on_proc[toporef]
-        # mpi communicator
-        #self._subcomm = obstacles[0].subcomm[self._topology]
-        # Return the list of indices of points inside the obstacle
-        return obst.ind[toporef]
-
-    def _momentum(self, dt):
-        """update force value for the current time step
-        """
-        # -- Integration over the obstacle --
-        # the force has to be set to 0 before computation
-        self.force[...] = 0.0
-        for d in xrange(self._dim):
-            # buff is initialized to component d of
-            # the velocity, inside the obstacle and to
-            # zero elsewhere.
-            # For each area of the considered obstacle:
-            for i in xrange(len(self._indices)):
-                ind = self._indices[i]
-                subshape = self.velocity.data[d][ind].shape
-                lbuff = np.prod(subshape)
-                buff = self._rwork[0][:lbuff].reshape(subshape)
-                coeff = self._coeff[i] / (1. + self._coeff[i] * dt)
-                buff[...] = coeff * self.velocity.data[d][ind]
-                self.force[d] += npw.real_sum(buff)
-
-        self.force *= self._dvol
-
-
-class NocaForces(Forces):
-    """
-    Compute drag and lift using Noca's formula.
-    See Noca99 or Plouhmans, 2002, Journal of Computational Physics
-    The present class implements formula (52) of Plouhmans2002.
-    Integral inside the obstacle is not taken into account.
-    """
-
-    __metaclass__ = ABCMeta
-
-    def __init__(self, velocity, vorticity, nu, volume_of_control,
-                 surfdir=None, **kwds):
-        """
-        Parameters
-        -----------
-        velocity : :class:`hysop.field.discrete.DiscreteField`
-            the velocity field
-        vorticity : :class:`hysop.field.discrete.DiscreteField`
-            vorticity field inside the domain
-        nu : double
-            viscosity
-        volume_of_control : :class:`~hysop.domain.subset.controlBox.ControlBox`
-            a subset of the domain, on which forces will be computed,
-            useful to reduce computational cost
-        surfdir : python list, optional
-            indices of the surfaces on which forces are computed,
-            0, 1 = bottom/top faces in xdir, 2,3 in ydir ...
-            Default = only surfaces normal to x direction.
-        kwds : arguments passed to drag_and_lift.Forces base class.
-
-        """
-        # A volume of control, in which forces are computed
-        self._voc = volume_of_control
-        # discrete velocity field
-        self.velocity = velocity
-        # discrete vorticity field
-        self.vorticity = vorticity
-
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(NocaForces, self).__init__(variables=[velocity, vorticity],
-                                         **kwds)
-        # Coef in the Noca formula
-        self._coeff = 1. / (self._dim - 1)
-        # viscosity
-        self.nu = nu
-
-        # connect to noca's formula
-        self._formula = self._noca
-        # Set mpi comm
-        self._subcomm = self._voc.subcomm[self._topology]
-
-        if surfdir is None:
-            surfdir = [0, 1]
-        # Faces where integrals on surfaces are computed
-        # order : [xmin, xmax, ymin, ymax, ...]
-        # So if you need computation in x and z directions,
-        # surfdir = [0, 1, 4, 5]
-        self._surfdir = surfdir
-        # buffers used to compute d/dt velocity
-        self._previous_velo = [None] * self._dim * 2 * self._dim
-        self._surf_buffer = None
-        self._init_surf_buffers()
-
-    def _init_indices(self, obstacles):
-        """
-        Compute a list of indices corresponding to points inside
-        the volume of control minus those inside the obstacles
-
-        Parameters
-        ----------
-        obstacles: list of :class:`~hysop.domain.subsets.Subset`
-            obstacles in the flow
-
-        Returns
-        -------
-        list of indices of points inside those obstacles
-
-        """
-        assert isinstance(self._voc, ControlBox)
-        toporef = self.velocity.topology
-        self._on_proc = self._voc.on_proc[toporef]
-        # no obstacle in the box, just for test purpose.
-        if obstacles is None or len(obstacles) == 0:
-            return self._voc.ind[toporef]
-        else:
-            msg = 'obstacles arg must be a list.'
-            assert isinstance(obstacles, list), msg
-            for obs in obstacles:
-                obs.discretize(toporef)
-            # return indices of points inside the box, excluding points
-            # inside the obstacles.
-            return Subset.subtract_list_of_sets([self._voc], obstacles,
-                                                toporef)
-
-    def _update_surf_buffers(self):
-        """
-        Set local buffers values (self._previous_velo)
-        previous_velo is used to compute dv/dt on the surfaces, so we need
-        to save velocity at the current time step for computation at the next
-        time step.
-        """
-        pass
-
-    def _init_surf_buffers(self):
-        """Allocate memory for local buffers (used to compute
-        time derivative of the velocity on the surfaces of
-        the volume of control)
-        """
-        # This Noca's formulation uses only 'gamma_common'
-        # which require a buffer/direction of integration
-        # of size shape(v) on the surface and unlocked.
-        # Surface in the same direction can use the same buffer.
-        toporef = self.velocity.topology
-        subsize = 0
-        for s_id in self._surfdir:
-            surf = self._voc.surf[s_id]
-            ind = surf.mesh[toporef].ind4integ
-            subsize = np.maximum(self.velocity.data[0][ind].size, subsize)
-        self._surf_buffer = npw.zeros(2 * subsize)
-
-    def _compute_gamma_common(self, surf, res):
-        """
-        Computation of the part common to the 3 Noca's
-        formulations for the local integral
-        on a surface of the control box.
-
-        Parameters
-        ----------
-        surf : :class:`~hysop.domain.subset.boxes.SubBox`
-            The surface on which the integral is computed
-        s_id : int
-            index of the surface in buffer list
-        res : np array
-            in, out parameter
-
-        Returns
-        -------
-        res
-            value of the integral on surf
-
-        Notes
-        -----
-        * res input will be completely erased and recomputed
-        * this function uses self._previous_velo[s_id * self._dim] buffer
-        which must be of shape equal to the resolution of the input surface.
-        * finite differences are used to compute Laplacian and other
-          derivatives.
-
-        """
-        res[...] = 0.0
-        if not surf.on_proc[self._topology]:
-            return res
-        # Get indices for integration on the surface
-        ind = surf.mesh[self._topology].ind4integ
-        # i_n : normal dir
-        # i_t : other dirs
-        i_n = surf.n_dir
-        i_t = surf.t_dir
-        # coordinates of points in the surface (for integration)
-        coords = surf.mesh[self._topology].coords4int
-        # list of array for discrete velocity field
-        vd = self.velocity.data
-        normal = surf.normal
-
-        # --- First part of the integral ---
-        #  int(0.5 (uu)n - (nu)u)
-        # i_n component
-        subshape = vd[i_n][ind].shape
-        subsize = vd[i_n][ind].size
-        buff = self._surf_buffer[:subsize].reshape(subshape)
-        for j in i_t:
-            np.multiply(vd[j][ind], vd[j][ind], buff)
-            res[i_n] += npw.real_sum(buff)
-        np.multiply(vd[i_n][ind], vd[i_n][ind], buff)
-        res[i_n] -= npw.real_sum(buff)
-        res[i_n] *= 0.5 * normal
-
-        # other components
-        for j in i_t:
-            np.multiply(vd[i_n][ind], vd[j][ind], buff)
-            res[j] = - normal * npw.real_sum(buff)
-
-        # --- Second part of integral on surface ---
-        #  1/(dim - 1) * int( (nw)(x X u) - (nu)(x X w))
-        x0 = coords[i_n].flat[0]
-        buff2 = self._surf_buffer[subsize:2 * subsize].reshape(subshape)
-        # Indices used for cross-product
-        j1 = [YDIR, ZDIR, XDIR]
-        j2 = [ZDIR, XDIR, YDIR]
-        wd = self.vorticity.data
-        for j in i_t:
-            np.multiply(vd[j2[j]][ind], wd[j1[j]][ind], buff)
-            np.multiply(vd[j1[j]][ind], wd[j2[j]][ind], buff2)
-            np.subtract(buff, buff2, buff)
-            res[j] += x0 * normal * self._coeff * npw.real_sum(buff)
-            np.multiply(coords[j], buff, buff)
-            res[i_n] -= self._coeff * normal * npw.real_sum(buff)
-
-        # Last part
-        # Update fd schemes in order to compute laplacian and other derivatives
-        # only on the surface (i.e. for list of indices in sl)
-
-        # function to compute the laplacian
-        # of a scalar field. Default fd scheme.
-        laplacian = Laplacian(topo=self._topology, indices=ind,
-                              reduce_output_shape=True)
-        for j in i_t:
-            [buff] = laplacian(vd[j:j + 1], [buff])
-            res[j] -= self._coeff * self.nu * normal * x0 * npw.real_sum(buff)
-            np.multiply(coords[j], buff, buff)
-            res[i_n] += self._coeff * self.nu * normal * npw.real_sum(buff)
-        # function used to compute first derivative of
-        # a scalar field in a given direction.
-        # Default = FDC2. Todo : set this as an input method value.
-        iout = laplacian.output_indices
-        fd_scheme = FDC2(self._topology.mesh.space_step, ind, iout)
-        fd_scheme.compute(vd[i_n], i_n, buff)
-        res[i_n] += 2.0 * normal * self.nu * npw.real_sum(buff)
-        for j in i_t:
-            fd_scheme.compute(vd[i_n], j, buff)
-            res[j] += normal * self.nu * npw.real_sum(buff)
-            fd_scheme.compute(vd[j], i_n, buff)
-            res[j] += normal * self.nu * npw.real_sum(buff)
-
-        return res
-
-    @abstractmethod
-    def _noca(self, dt):
-        """Computes local values of the forces
-
-        Parameters
-        ----------
-        dt : double
-            current time step
-
-        Returns
-        -------
-        array of double
-            the local (i.e. current mpi process) forces
-        """
-
-
-class NocaI(NocaForces):
-    """Noca, "Impulse Equation" from Noca99
-    """
-    def _set_work_arrays(self, rwork=None, iwork=None):
-        """Set or check rwork.
-
-        rwork will be required only in formulations where
-        an integral over the volume of the control box is computed,
-        Noca I and Noca II.
-        """
-        # !!! Velocity must be set before a call to this function
-        # and so before base class initialization !!!
-
-        # The only required buffer if for integral on the volume
-        # of control.
-        toporef = self.velocity.topology
-        v_ind = self._voc.mesh[toporef].ind4integ
-        shape_v = self.velocity.data[0][v_ind].shape
-        self._rwork = WorkSpaceTools.check_work_array(1, shape_v, rwork)
-
-    def _noca(self, dt):
-        """
-        Computes local values of the forces using formula 2.1
-        ("Impulse Equation") from :cite:`Noca-1999`
-        :parameter dt: double
-            current time step
-
-        Returns
-        -------
-        np array
-            local (i.e. current mpi process) forces
-        """
-        # -- Integration over the volume of control --
-        # -1/(N-1) . d/dt int(x ^ w)
-        mesh = self._voc.mesh[self._topology]
-        coords = mesh.coords4int
-        ind = mesh.ind4integ
-        if self._dim == 2:
-            wz = self.vorticity.data[0]
-            np.multiply(coords[YDIR], wz[ind], self._rwork[-1])
-            self._buffer[0] = npw.real_sum(self._rwork[-1])
-            np.multiply(coords[XDIR], wz[ind], self._rwork[-1])
-            self._buffer[1] = -npw.real_sum(self._rwork[-1])
-        elif self._dim == 3:
-            self._rwork[-1][...] = 0.
-            self._buffer[...] = Utils.sum_cross_product(coords,
-                                                        self.vorticity.data,
-                                                        ind, self._rwork[-1])
-        self._buffer[...] *= self._dvol
-        self.force[...] = -1. / dt * self._coeff * (self._buffer -
-                                                    self._previous)
-        # Update previous for next time step ...
-        self._previous[...] = self._buffer[...]
-
-        # -- Integrals on surfaces --
-        # Only on surf. normal to dir in self._surfdir.
-        for s_id in self._surfdir:
-            s_x = self._voc.surf[s_id]
-            i_t = s_x.t_dir
-            # cell surface
-            dsurf = npw.prod(self._topology.mesh.space_step[i_t])
-            # The 'common' part (same in all Noca's formula)
-            self._buffer = self._compute_gamma_common(s_x, self._buffer)
-            self.force += self._buffer * dsurf
-
-
-class NocaII(NocaForces):
-    """Second formulation of Noca, "Momentum equation".
-    """
-
-    def _init_surf_buffers(self):
-        """Allocate memory for local buffers (used to compute
-        time derivative of the velocity on the surfaces of
-        the volume of control)
-        """
-        # Buffers are :
-        # - used in gamma momentum
-        # - used in gamma common
-        # - update with local velocity and locked till next gamma_momentum.
-        # For each surface, a buffer is required for each velocity component
-        # in tangential directions to the surface.
-        # For example, in 3D, with integration only in xdir, 2 * 2 buffers are
-        # required.
-
-        toporef = self.velocity.topology
-        subsize = 0
-        for s_id in self._surfdir:
-            surf = self._voc.surf[s_id]
-            ind = surf.mesh[toporef].ind4integ
-            subsize = np.maximum(self.velocity.data[0][ind].size, subsize)
-        self._surf_buffer = npw.zeros(2 * subsize)
-        # For each surface ...
-        for s_id in self._surfdir:
-            ind = self._voc.surf[s_id].mesh[self._topology].ind4integ
-            shape = self.velocity.data[0][ind].shape
-            i_t = self._voc.surf[s_id].t_dir
-            # for each tangential direction ...
-            for d in i_t:
-                pos = s_id * self._dim + d
-                self._previous_velo[pos] = npw.zeros(shape)
-
-    def _update_surf_buffers(self):
-        """Set local buffers values (self._previous_velo)
-        previous_velo is used to compute dv/dt on the surfaces, so we need
-        to save velocity at the current time step for computation at the next
-        time step.
-        After a call to this function, the buffer is "locked" until next call
-        to _noca.
-        """
-        # Done only for surfaces on which integration is performed
-        for s_id in self._surfdir:
-            surf = self._voc.surf[s_id]
-            if surf.on_proc[self._topology]:
-                i_t = self._voc.surf[s_id].t_dir
-                ind = self._voc.surf[s_id].mesh[self._topology].ind4integ
-                for d in i_t:
-                    pos = s_id * self._dim + d
-                    self._previous_velo[pos][...] = self.velocity.data[d][ind]
-                    npw.lock(self._previous_velo[pos])
-
-    def _noca(self, dt):
-        """
-        Computes local values of the forces using formula 2.5
-        ("Momentum Equation") from :cite:`Noca-1999`
-
-        :parameter dt: double
-            current time step
-
-        Returns
-        -------
-        np array
-            local (i.e. current mpi process) forces
-        """
-        # -- Integration over the volume of control --
-        # -d/dt int(v)
-        nbc = self.velocity.nb_components
-        self._buffer[...] = \
-            [self._voc.integrate_dfield_on_proc(self.velocity,
-                                                component=d)
-             for d in xrange(nbc)]
-        self.force[...] = -1. / dt * (self._buffer - self._previous)
-
-        # Update previous for next time step ...
-        self._previous[...] = self._buffer[...]
-
-        # -- Integrals on surfaces --
-        # Only on surf. normal to dir in self._surfdir.
-        for s_id in self._surfdir:
-            s_x = self._voc.surf[s_id]
-            if s_x.on_proc[self._topology]:
-                i_t = s_x.t_dir
-                # cell surface
-                dsurf = npw.prod(self._topology.mesh.space_step[i_t])
-                # First, part relative to "du/dt"
-                self._buffer = self._compute_gamma_momentum(dt, s_x, s_id,
-                                                            self._buffer)
-                self.force += self._buffer * dsurf
-                # Then the 'common' part (same in all Noca's formula)
-                self._buffer = self._compute_gamma_common(s_x, self._buffer)
-                self.force += self._buffer * dsurf
-        # Prepare next step
-        self._update_surf_buffers()
-
-    def _compute_gamma_momentum(self, dt, surf, s_id, res):
-        """
-        Partial computation of gamma in Noca's "momentum formulation",
-        on a surface of the control box.
-
-        It corresponds to the terms in the second line of gamma_mom
-        in formula 2.5 of :cite:`Noca-1999`.
-
-        Parameters
-        ----------
-        dt: double
-            time step
-        surf : :class:`~hysop.domain.subset.boxes.SubBox`
-            The surface on which the integral is computed
-        s_id : int
-            index of the surface in buffer list
-        res : np array
-            in, out parameter.
-
-
-        Returns
-        -------
-        res
-            value of the integral on surf
-
-        Notes
-        -----
-        * res input will be completely erased and recomputed
-        * this function uses self._previous_velo[s_id * self._dim + j] buffers,
-        j = tangential dirs to the surface.
-        which must be of shape equal to the resolution of the input surface.
-        """
-        res[...] = 0.
-
-        # i_n : normal dir
-        # i_t : other dirs
-        i_n = surf.n_dir
-        i_t = surf.t_dir
-        coords = surf.mesh[self._topology].coords4int
-        ind = surf.mesh[self._topology].ind4integ
-        x0 = coords[i_n].flatten()
-        # We want to compute:
-        # res = -1/(d - 1) * integrate_on_surf(
-        #         (x.du/dt)n - (x.n) du/dt)
-        # n : normal, d : dimension of the domain, u : velocity
-
-        # (x.du/dt)n - (x.n) du/dt
-        coeff = self._coeff * surf.normal * 1. / dt
-        for j in i_t:
-            # compute d(velocity_it)/dt on surf in buff
-            buff = self._previous_velo[s_id * self._dim + j]
-            npw.unlock(buff)
-            np.subtract(self.velocity.data[j][ind], buff, buff)
-            res[j] = coeff * x0 * npw.real_sum(buff)
-            np.multiply(coords[j], buff, buff)
-            res[i_n] -= coeff * npw.real_sum(buff)
-        return res
-
-
-class NocaIII(NocaForces):
-    """Third formulation of Noca, "Flux Equation"
-    """
-
-    def _init_surf_buffers(self):
-        """Allocate memory for local buffers (used to compute
-        time derivative of the velocity on the surfaces of
-        the volume of control)
-        """
-        # Buffers are :
-        # - used in gamma momentum and unlocked
-        # - used in gamma flux
-        # - update with local velocity and locked till next gamma_momentum.
-        # For each surface, a buffer is required for each velocity component
-        # For example, in 3D, with integration only in xdir, 2 * 3 buffers are
-        # required.
-        toporef = self.velocity.topology
-        subsize = 0
-        for s_id in self._surfdir:
-            surf = self._voc.surf[s_id]
-            ind = surf.mesh[toporef].ind4integ
-            subsize = np.maximum(self.velocity.data[0][ind].size, subsize)
-        self._surf_buffer = npw.zeros(2 * subsize)
-        for s_id in self._surfdir:
-            ind = self._voc.surf[s_id].mesh[self._topology].ind4integ
-            shape = self.velocity.data[0][ind].shape
-            i_n = self._voc.surf[s_id].n_dir
-            pos = s_id * self._dim + i_n
-            self._previous_velo[pos] = npw.zeros(shape)
-            i_t = self._voc.surf[s_id].t_dir
-            # for each tangential direction ...
-            for d in i_t:
-                pos = s_id * self._dim + d
-                self._previous_velo[pos] = npw.zeros(shape)
-
-    def _update_surf_buffers(self):
-        """Set local buffers values (self._previous_velo).
-        previous_velo is used to compute dv/dt on the surfaces, so we need
-        to save velocity at the current time step for computation at the next
-        time step.
-        After a call to this function, the buffer is "locked" until next call
-        to _noca.
-        """
-        for s_id in self._surfdir:
-            surf = self._voc.surf[s_id]
-            if surf.on_proc[self._topology]:
-                ind = self._voc.surf[s_id].mesh[self._topology].ind4integ
-                i_n = self._voc.surf[s_id].n_dir
-                pos = s_id * self._dim + i_n
-                # update v component normal to surf
-                self._previous_velo[pos][...] = self.velocity.data[i_n][ind]
-                # lock
-                npw.lock(self._previous_velo[pos])
-                i_t = self._voc.surf[s_id].t_dir
-                # for each tangential direction ...
-                for d in i_t:
-                    pos = s_id * self._dim + d
-                    # update v components tangent to the surface
-                    self._previous_velo[pos][...] = self.velocity.data[d][ind]
-                    # lock
-                    npw.lock(self._previous_velo[pos])
-
-    def _noca(self, dt):
-        """
-        Computes local values of the forces using formula 2.10
-        ("Flux Equation") from :cite:`Noca-1999`
-
-        :parameter dt: double
-            current time step
-
-        Returns
-        -------
-        np array
-            local (i.e. current mpi process) forces
-        """
-        self.force[...] = 0.
-        # -- Integrals on surfaces --
-        # Only on surf. normal to dir in self._surfdir.
-        for s_id in self._surfdir:
-            s_x = self._voc.surf[s_id]
-            if s_x.on_proc[self._topology]:
-                i_t = s_x.t_dir
-                # cell surface
-                dsurf = npw.prod(self._topology.mesh.space_step[i_t])
-                # First, part relative to "du/dt"
-                self._buffer = self._compute_gamma_flux(dt, s_x, s_id,
-                                                    self._buffer)
-                self.force += self._buffer * dsurf
-                self._buffer = self._compute_gamma_common(s_x, self._buffer)
-                self.force += self._buffer * dsurf
-        self._update_surf_buffers()
-
-    def _compute_gamma_flux(self, dt, surf, s_id, res):
-        """
-        Partial computation of gamma in Noca's "flux formulation",
-        on a surface of the control box.
-
-        It corresponds to the terms in the second line of gamma_flux
-        in formula 2.10 of :cite:`Noca-1999`.
-
-        Parameters
-        ----------
-        dt: double
-            time step
-        surf : :class:`~hysop.domain.subset.boxes.SubBox`
-            The surface on which the integral is computed
-        s_id : int
-            index of the surface in buffer list
-        res : np array
-            in, out parameter.
-
-        Returns
-        -------
-        res
-            value of the integral on surf
-
-        Notes
-        -----
-        * this function uses self._previous_velo[s_id * self._dim + j] buffers,
-        j = xrange(domain.dim)
-        which must be of shape equal to the resolution of the input surface.
-        """
-        res[...] = 0.
-        # i_n : normal dir
-        # i_t : other dirs
-        i_n = surf.n_dir
-        i_t = surf.t_dir
-        coords = surf.mesh[self._topology].coords4int
-        ind = surf.mesh[self._topology].ind4integ
-        x0 = coords[i_n].flat[0]
-        # We want to compute:
-        # res = -1/(d - 1) * integrate_on_surf(
-        #         (x.du/dt)n - (x.n) du/dt
-        #         + (d-1) * (du/dt.n).x)
-        # n : normal, d : dimension of the domain, u : velocity
-
-        # buff = d(velocity_in) /dt on surf
-        buff = self._previous_velo[s_id * self._dim + i_n]
-        npw.unlock(buff)
-        np.subtract(self.velocity.data[i_n][ind], buff, buff)
-        coeff = surf.normal * 1. / dt
-        # -(n.du/dt).x
-        res[i_n] = - coeff * x0 * npw.real_sum(buff)
-        for j in i_t:
-            res[j] = - coeff * npw.real_sum(coords[j] * buff)
-
-        # (x.du/dt)n - (x.n) du/dt
-        coeff = self._coeff * surf.normal * 1. / dt
-        for j in i_t:
-            # compute d(velocity_it)/dt on surf in buff
-            buff = self._previous_velo[s_id * self._dim + j]
-            npw.unlock(buff)
-            np.subtract(self.velocity.data[j][ind], buff, buff)
-            res[j] += coeff * x0 * npw.real_sum(buff)
-            np.multiply(coords[j], buff, buff)
-            res[i_n] -= coeff * npw.real_sum(buff)
-        return res
diff --git a/hysop/old/operator.old/discrete/energy_enstrophy.py b/hysop/old/operator.old/discrete/energy_enstrophy.py
deleted file mode 100644
index e29c4a5ef4c62d7ba7cbd830e818ad0782f04f31..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/energy_enstrophy.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Discrete operator to compute Energy and Enstrophy
-"""
-from hysop.constants import debug
-from hysop.tools.profiler import profile
-from hysop.tools.numpywrappers import npw
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.tools.misc import WorkSpaceTools
-
-
-class EnergyEnstrophy(DiscreteOperator):
-    """Discretization of the energy/enstrophy computation process.
-    """
-    def __init__(self, velocity, vorticity, is_normalized=True, **kwds):
-        """
-        Parameters
-        ----------
-        velocity : :class:`~hysop.operator.field.discrete.DiscreteField`
-            velocity discrete fields
-        vorticity : :class:`~hysop.operator.field.discrete.DiscreteField`
-            vorticity discrete fields
-        isNormalized : boolean
-            true if enstrophy and energy values have to be normalized
-            by the domain lengths.
-        """
-        # velocity field
-        self.velocity = velocity
-        # vorticity field
-        self.vorticity = vorticity
-
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(EnergyEnstrophy, self).__init__(variables=[velocity, vorticity],
-                                              **kwds)
-        # Coeffs for integration
-        self.coeff = {}
-        # Global energy
-        self.energy = 0.0
-        # Global enstrophy
-        self.enstrophy = 0.0
-        topo_w = self.vorticity.topology
-        topo_v = self.velocity.topology
-        space_step = topo_w.mesh.space_step
-        length = topo_w.domain.length
-        # remark topo_w.domain and topo_v.domain
-        # must be the same, no need to topo_v...length.
-        self.coeff['Enstrophy'] = npw.prod(space_step)
-        space_step = topo_v.mesh.space_step
-        self.coeff['Energy'] = 0.5 * npw.prod(space_step)
-        if is_normalized:
-            normalization = 1. / npw.prod(length)
-            self.coeff['Enstrophy'] *= normalization
-            self.coeff['Energy'] *= normalization
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-
-        v_ind = self.velocity.topology.mesh.compute_index
-        w_ind = self.vorticity.topology.mesh.compute_index
-        size_v = self.velocity.data[0][v_ind].size
-        size_w = self.vorticity.data[0][w_ind].size
-        size_work = max(size_w, size_v)
-        lwork = 1
-        self._rwork = WorkSpaceTools.check_work_array(lwork, size_work, rwork)
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing simulation value for computation."
-
-        # --- Kinetic energy computation ---
-        vd = self.velocity
-        # get the list of computation points (no ghosts)
-        nbc = vd.nb_components
-        ic = self.velocity.topology.mesh.compute_index
-        # Integrate (locally) velocity ** 2
-        local_energy = 0.
-        size_v = vd[0][ic].size
-        for i in xrange(nbc):
-            self._rwork[0][:size_v] = (vd[i][ic] ** 2).flat
-            local_energy += npw.real_sum(self._rwork[0][:size_v])
-
-        # --- Enstrophy computation ---
-        wd = self.vorticity
-        nbc = wd.nb_components
-        ic = self.vorticity.topology.mesh.compute_index
-        size_w = wd[0][ic].size
-        # Integrate (locally) vorticity ** 2
-        local_enstrophy = 0.
-        for i in xrange(nbc):
-            self._rwork[0][:size_w] = (wd[i][ic] ** 2).flat
-            local_enstrophy += npw.real_sum(self._rwork[0][:size_w])
-
-        # --- Reduce energy and enstrophy values overs all proc ---
-        # two ways : numpy or classical. Todo : check perf and comm
-        sendbuff = npw.zeros((2))
-        recvbuff = npw.zeros((2))
-        sendbuff[:] = [local_energy, local_enstrophy]
-        #
-        self.velocity.topology.comm.Allreduce(sendbuff, recvbuff)
-        # the other way :
-        #energy = self.velocity.topology.allreduce(local_energy,
-        #                                          hysop.core.mpi_REAL,
-        #                                          op=MPI.SUM)
-        #enstrophy = self.velocity.topology.allreduce(local_enstrophy,
-        #                                             hysop.core.mpi_REAL,
-        #                                             op=MPI.SUM)
-
-        # Update global values
-        self.energy = recvbuff[0] * self.coeff['Energy']
-        self.enstrophy = recvbuff[1] * self.coeff['Enstrophy']
-
-        # Print results, if required
-        ite = simulation.current_iteration
-        if self._writer is not None and self._writer.do_write(ite):
-            self._writer.buffer[0, 0] = simulation.time
-            self._writer.buffer[0, 1] = self.energy
-            self._writer.buffer[0, 2] = self.enstrophy
-            self._writer.write()
diff --git a/hysop/old/operator.old/discrete/forcing.py b/hysop/old/operator.old/discrete/forcing.py
deleted file mode 100644
index fe6699137e5b0179edeff13235e4b9e8fc72408c..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/forcing.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# -*- coding: utf-8 -*-
-"""DOperator implementing the forcing term in the NS,
-    depending on the filtered field
-    (--> computation of base flow).
-    
-.. currentmodule:: hysop.operator.discrete.forcing
-
-"""
-from hysop.constants import debug
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.tools.profiler import profile
-
-class Forcing(DiscreteOperator):
-    """Discretized forcing operator.
-    i.e. solve (with an implicit Euler scheme)
-    \f{eqnarray*}
-    \frac{\partial \omega}{\partial t} &=& -\chi(\omega - \bar{\omega}))
-    \Leftrightarrow \omega^{n+1} &=&
-    \frac{\omega^n + \Delta t \chi \bar{\omega^{n+1}}}{1+\Delta t \chi}
-    \f}
-    """
-
-    @debug
-    def __init__(self, strength=None, **kwds):
-        """
-        Parameters
-        ----------
-        strength : strength of the forcing, chosen in the order
-        of the amplification rate related to the unstable flow.
-        **kwds : extra parameters for parent class.
-
-        """
-        super(Forcing, self).__init__(**kwds)
-        topo = self.variables[0].topology
-        for v in self.variables:
-            msg = 'Multiresolution not implemented for penalization.'
-            assert v.topology == topo, msg
-        
-        ## variable
-        self.var = self.variables[0]
-        ## forced variable
-        self.varFiltered = self.variables[1]
-        ## strength of the forcing
-        self.strength = strength
-
-    def _apply(self, dt):
-        nbc = self.var.nb_components
-        for d in xrange(nbc):
-            self.var[d][...] += self.varFiltered[d][...] * \
-                                    (dt * self.strength)
-            self.var[d][...] *= 1.0 / (1.0 + dt * self.strength)
-        print 'forcing: non conservative formulation'
-
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Simulation parameter is required."
-        dt = simulation.time_step
-        self._apply(dt)
-
-
-class ForcingConserv(Forcing):
-    """Discretized forcing operator.
-        i.e. solve (with an implicit Euler scheme and a CONSERVATIVE formulation)
-        \f{eqnarray*}
-        \frac{\partial \omega}{\partial t} &=& -\chi(\omega - \bar{\omega}))
-        \Leftrightarrow \omega^{n+1} &=&
-        \omega^n + \nabla \times (\frac{\Delta t \chi}{1+\Delta t \chi} (\bar{u^{n+1}}-u^n))
-        \f}
-        """
-
-    @debug
-    def __init__(self, vorticity, velocity, velocityFilt, curl, **kwds):
-        """
-        Parameters
-        ----------
-        velocity, vorticity, velocityFilt: :class:`~hysop.fields.continuous_field.Field`
-        curl : :class:`~hysop.operator.differential`
-        internal operator to compute the curl of the forced velocity
-        **kwds : extra parameters for parent class.
-            
-        Notes
-        -----
-        velocity and velocityFilt are not modified by this operator.
-        vorticity is an in-out parameter.
-        input and ouput variables of the curl are some local buffers.
-        """
-
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(ForcingConserv, self).__init__(variables=[vorticity,
-                                                        velocity,
-                                                        velocityFilt],
-                                             **kwds)
-        # Input vector fields
-        self.velocity = velocity
-        self.vorticity = vorticity
-        self.velocityFilt = velocityFilt
-        # warning : a buffer is added for invar variable in curl
-        topo = self.velocity.topology
-        msg = 'Multiresolution not implemented for vort forcing.'
-        assert self.vorticity.topology == topo, msg
-        self._curl = curl
-            
-    def _apply(self, dt):
-        # Vorticity forcing
-        # warning : the buff0 array ensures "invar" to be 0
-        # outside the obstacle for the curl evaluation
-        invar = self._curl.invar
-        nbc = invar.nb_components
-        for d in xrange(nbc):
-            invar.data[d][...] = 0.0
-        coeff = (dt * self.strength) / (1.0 + dt * self.strength)
-        for d in xrange(nbc):
-            invar.data[d][...] = \
-                (self.velocityFilt[d][...] -
-                 self.velocity[d][...]) * coeff
-        self._curl.apply()
-        for d in xrange(self.vorticity.nb_components):
-            self.vorticity[d][...] += self._curl.outvar[d][...]
-        print 'forcing: conservative formulation'
-
-
-
-
-
diff --git a/hysop/old/operator.old/discrete/low_pass_filt.py b/hysop/old/operator.old/discrete/low_pass_filt.py
deleted file mode 100644
index f6304b581ae140816464301c4430cdf81e978c48..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/low_pass_filt.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Discrete operator for vorticity low-pass filtering
-    (--> computation of base flow).
-.. currentmodule:: hysop.operator.discrete.low_pass_filt_vort
-
-"""
-from hysop.constants import debug
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.tools.profiler import profile
-
-class LowPassFilt(DiscreteOperator):
-    """Discretized vorticity filtering operator.
-    i.e. solve (with an explicit Euler scheme)
-    \f{eqnarray*}
-    \frac{\partial \bar{\omega}}{\partial t} &=& \Omega_c(\omega - \bar{\omega}))
-    \Leftrightarrow \bar{\omega^{n+1}} &=& \bar{\omega^{n}} +
-    \Delta t \Omega_c (\omega^n - \bar{\omega^n})
-    \Leftrightarrow \bar{\omega^{n+1}} &=& 
-    \bar{\omega^{n}} (1 - \Delta t \Omega_c) + 
-    \Delta t \Omega_c \omega^n
-    \f}
-    """
-
-    @debug
-    def __init__(self, cutFreq=None, **kwds):
-        """
-        Parameters
-        ----------
-        cutFreq : cutting circular frequency corresponding to the half of 
-        the eigenfrequency of the flow instability
-        **kwds : extra parameters for parent class.
-
-        """
-        super(LowPassFilt, self).__init__(**kwds)
-        topo = self.variables[0].topology
-        for v in self.variables:
-            msg = 'Multiresolution not implemented for penalization.'
-            assert v.topology == topo, msg
-        
-        ## variable
-        self.var = self.variables[0]
-        ## filtered variable
-        self.varFiltered = self.variables[1]
-        ## cutting circular frequency
-        self.cutFreq = cutFreq
-            
-    def _apply(self, dt):
-        nbc = self.varFiltered.nb_components
-        coeff = dt * self.cutFreq
-        for d in xrange(nbc):
-            self.varFiltered[d][...] *= (1.0 - coeff)
-            self.varFiltered[d][...] += self.var[d][...] * coeff
-        print 'filtering: non conservative formulation'
-
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Simulation parameter is required."
-        dt = simulation.time_step
-        self._apply(dt)
-
-
-class LowPassFiltConserv(LowPassFilt):
-    """Discretized vorticity filtering operator.
-    i.e. solve (with an explicit Euler scheme and a CONSERVATIVE formulation)
-    \f{eqnarray*}
-    \frac{\partial \bar{\omega}}{\partial t} &=& \Omega_c(\omega - \bar{\omega}))
-    \Leftrightarrow \bar{\omega^{n+1}} &=& \bar{\omega^{n}} +
-    \nabla \times (\Delta t \Omega_c (u^n - \bar{u^n}))
-    \f}
-    """
-    
-    @debug
-    def __init__(self, vorticityFilt, velocity, velocityFilt, curl, **kwds):
-        """
-        Parameters
-        ----------
-        vorticityFilt, vorticity, velocityFilt: :class:`~hysop.fields.continuous_field.Field`
-        curl : :class:`~hysop.operator.differential`
-        internal operator to compute the curl of the forced velocity
-        **kwds : extra parameters for parent class.
-            
-        Notes
-        -----
-        velocity and velocityFilt are not modified by this operator.
-        vorticityFilt is an in-out parameter.
-        input and ouput variables of the curl are some local buffers.
-        """
-
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(LowPassFiltConserv, self).__init__(variables=[vorticityFilt,
-                                                            velocity,
-                                                            velocityFilt],
-                                                 **kwds)
-        # Input vector fields
-        self.vorticityFilt = vorticityFilt
-        self.velocity = velocity
-        self.velocityFilt = velocityFilt
-        # warning : a buffer is added for invar variable in curl
-        topo = self.velocity.topology
-        msg = 'Multiresolution not implemented for vort forcing.'
-        assert self.vorticityFilt.topology == topo, msg
-        assert self.velocityFilt.topology == topo, msg
-        self._curl = curl
-
-    def _apply(self, dt):
-        # Vorticity filtering
-        # warning : the buff0 array ensures "invar" to be 0
-        # outside the obstacle for the curl evaluation
-        invar = self._curl.invar
-        nbc = invar.nb_components
-        for d in xrange(nbc):
-            invar.data[d][...] = 0.0
-        coeff = dt * self.cutFreq
-        for d in xrange(nbc):
-            invar.data[d][...] = \
-                (self.velocity[d][...] -
-                 self.velocityFilt[d][...]) * coeff
-        self._curl.apply()
-        for d in xrange(self.vorticityFilt.nb_components):
-            self.vorticityFilt[d][...] += self._curl.outvar[d][...]
-        print 'filtering: conservative formulation'
-
-
diff --git a/hysop/old/operator.old/discrete/monitoringPoints.py b/hysop/old/operator.old/discrete/monitoringPoints.py
deleted file mode 100644
index ca7f83bfdfa5a25e843a71abce76b218b5424b0e..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/monitoringPoints.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file monitoringPoints.py
-Print time evolution of flow variables (velo, vorti)
-at a particular monitoring point in the wake
-"""
-from hysop.constants import debug
-from hysop.tools.profiler import profile
-from hysop.tools.numpywrappers import npw
-import scitools.filetable as ft
-import numpy as np
-from hysop.operator.discrete.discrete import DiscreteOperator
-
-
-class MonitoringPoints(DiscreteOperator):
-    """
-        Print time evolution of flow variables at a given position in the wake.
-    """
-    def __init__(self, velocity, vorticity, monitPt_coords, **kwds):
-        """
-        Constructor.
-        @param velocity field
-        @param vorticity field
-        @param monitPt_coords : list of coordinates corresponding
-            to the space location of the different monitoring points in the wake
-        """
-        ## velocity field
-        self.velocity = velocity
-        ## vorticity field
-        self.vorticity = vorticity
-        ## Monitoring point coordinates
-        self.monitPt_coords = monitPt_coords
-
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(MonitoringPoints, self).__init__(variables=[velocity, vorticity],
-                                       **kwds)
-        topo_v = self.velocity.topology
-        self.shape_v = self.velocity.data[0][topo_v.mesh.compute_index].shape
-        self.space_step = topo_v.mesh.space_step
-        self.length = topo_v.domain.length
-        self.origin = topo_v.domain.origin
-        self.coords = topo_v.mesh.coords
-        self.nb_iter = 0
-        ## Normalized flow values at monitoring position (velNorm, vortNorm)
-        self.velNorm = 0.0
-        self.vortNorm = 0.0
-
-        # Is current processor working ? (Is monitPt_coords(z) in z-coords ?)
-        self.is_rk_computing = False
-        s = self._dim - 1
-        if (self.monitPt_coords[s] in self.coords[s]):
-            self.is_rk_computing = True
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-
-        v_ind = self.velocity.topology.mesh.compute_index
-        w_ind = self.vorticity.topology.mesh.compute_index
-        shape_v = self.velocity.data[0][v_ind].shape
-        shape_w = self.velocity.data[0][w_ind].shape
-        # setup for rwork, iwork is useless.
-        if rwork is None:
-            # ---  Local allocation ---
-            if shape_v == shape_w:
-                self._rwork = [npw.zeros(shape_v)]
-            else:
-                self._rwork = [npw.zeros(shape_v), npw.zeros(shape_w)]
-        else:
-            assert isinstance(rwork, list), 'rwork must be a list.'
-            # --- External rwork ---
-            self._rwork = rwork
-            if shape_v == shape_w:
-                assert len(self._rwork) == 1
-                assert self._rwork[0].shape == shape_v
-            else:
-                assert len(self._rwork) == 2
-                assert self._rwork[0].shape == shape_v
-                assert self._rwork[1].shape == shape_w
-
-    def get_work_properties(self):
-
-        v_ind = self.velocity.topology.mesh.compute_index
-        w_ind = self.vorticity.topology.mesh.compute_index
-        shape_v = self.velocity.data[0][v_ind].shape
-        shape_w = self.velocity.data[0][w_ind].shape
-        if shape_v == shape_w:
-            return {'rwork': [shape_v], 'iwork': None}
-        else:
-            return {'rwork': [shape_v, shape_w], 'iwork': None}
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing simulation value for computation."
-
-        time = simulation.time
-        ite = simulation.current_iteration
-        filename = self._writer.filename  #+ '_ite' + format(ite)
-
-        if self.is_rk_computing :
-            self.nb_iter += 1
-            vd = self.velocity.data
-            vortd = self.vorticity.data
-            nbc = self.velocity.nb_components
-            tab = [self.monitPt_coords[0], self.monitPt_coords[1],
-                   self.monitPt_coords[2]]
-
-            ind = []
-            for d in xrange(nbc):
-                cond = np.where(abs(self.coords[d] - tab[d])
-                                < (self.space_step[d] * 0.5))
-                if cond[0].size > 0:
-                    ind.append(cond[d][0])
-                else:
-                    raise ValueError("Wrong set of coordinates.")
-
-            # Compute velocity and vorticity norm
-            # at the monitoring point
-            self.velNorm = np.sqrt(vd[0][ind[0],ind[1],ind[2]] ** 2 +
-                                   vd[1][ind[0],ind[1],ind[2]] ** 2 +
-                                   vd[2][ind[0],ind[1],ind[2]] ** 2)
-            self.vortNorm = np.sqrt(vortd[0][ind[0],ind[1],ind[2]] ** 2 +
-                                    vortd[1][ind[0],ind[1],ind[2]] ** 2 +
-                                    vortd[2][ind[0],ind[1],ind[2]] ** 2)
-
-
-            if self._writer is not None and self._writer.do_write(ite) :
-                self._writer.buffer[0, 0] = time
-                self._writer.buffer[0, 1] = self.velNorm
-                self._writer.buffer[0, 2] = self.vortNorm
-                self._writer.write()
-
-
-
diff --git a/hysop/old/operator.old/discrete/multiphase_gradp.py b/hysop/old/operator.old/discrete/multiphase_gradp.py
deleted file mode 100644
index f9f7318e08a51f14c2996d1fe75d5bf6d9bba46f..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/multiphase_gradp.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file operator/discrete/multiphase_gradp.py
-Discrer operator of the pressure gradient in a multiphasic flow.
-"""
-from hysop.operator.discrete.discrete import DiscreteOperator
-import hysop.numerics.differential_operations as diff_op
-from hysop.constants import debug, XDIR, YDIR, ZDIR, np
-from hysop.methods import SpaceDiscretization
-from hysop.numerics.update_ghosts import UpdateGhosts
-from hysop.tools.numpywrappers import npw
-from hysop.operator.discrete.discrete import get_extra_args_from_method
-
-
-class GradP(DiscreteOperator):
-    """
-    TODO : describe this operator ...
-    """
-    @debug
-    def __init__(self, velocity, gradp, viscosity,
-                 formula=None, **kwds):
-        """
-        Constructor.
-        Compute the pressure gradient term - GradP/rho
-        in N.S equation from velocity.
-        @param velocity : discretization of the velocity field
-        @param gradp : discretization of the result
-        @param viscosity
-        @param formula : formula to initialize u^(n-1)
-        Note : this should be the formula used to initialize
-        the velocity field
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        if 'method' not in kwds:
-            import hysop.default_methods as default
-            kwds['method'] = default.MULTIPHASEGRADP
-
-        super(GradP, self).__init__(
-            variables=[velocity, gradp], **kwds)
-        self.velocity = velocity
-        self.gradp = gradp
-        self.viscosity = viscosity
-        self.input = [self.velocity, ]
-        self.output = [self.gradp, ]
-
-        # prepare ghost points synchro for velocity (vector)
-        # and density (scalar) fields
-        self._synchronizeVel = UpdateGhosts(self.velocity.topology,
-                                            self.velocity.nb_components)
-
-        self._result = [npw.zeros_like(d) for d in self.velocity.data]
-        self._tempGrad = [npw.zeros_like(d) for d in self.velocity.data]
-        self._baroclinicTerm = [npw.zeros_like(d) for d in self.velocity.data]
-
-        self._laplacian = diff_op.Laplacian(
-            self.velocity.topology,
-            indices=self.velocity.topology.mesh.compute_index)
-        self._gradOp = diff_op.GradS(
-            self.velocity.topology,
-            indices=self.velocity.topology.mesh.compute_index,
-            method=self.method[SpaceDiscretization])
-
-        # Gravity vector
-        self._gravity = npw.asrealarray(
-            get_extra_args_from_method(self, 'gravity', [0., 0., -9.81]))
-
-        # Time stem of the previous iteration
-        self._old_dt = None
-
-    def initialize_velocity(self):
-        """Initialize the temporary array 'result' with the velocity"""
-        topo = self.velocity.topology
-        compute_index = topo.mesh.compute_index
-        for d in xrange(self.velocity.dimension):
-            self._result[d][compute_index] = -self.velocity[d][compute_index]
-
-    @debug
-    def apply(self, simulation=None):
-        """Computes the pressure gradient term: -grad(P)/rho
-        -grad(P)/rho =  du/dt + (u . grad)u - nu laplacien(u) - g
-        """
-        if simulation is None:
-            raise ValueError("Missing simulation value for computation.")
-
-        dt = simulation.time_step
-        if self._old_dt is None:
-            self._old_dt = dt
-        # Synchronize ghost points of velocity and density
-        self._synchronizeVel(self.velocity.data)
-
-        topo = self.velocity.topology
-        compute_index = topo.mesh.compute_index
-
-        # result = du/dt = (u^(n)-u^(n-1))/dt
-        # result has been initialized with -u^(n-1)
-        # and _old_dt equals to the previous dt
-        for d in xrange(self.velocity.dimension):
-            self._result[d][compute_index] += self.velocity[d][compute_index]
-            self._result[d][compute_index] /= self._old_dt
-
-        # result = result + (u . grad)u
-        # (u. grad)u = (u.du/dx + v.du/dy + w.du/dz ;
-        #               u.dv/dx + v.dv/dy + w.dv/dz ;
-        #               u.dw/dx + v.dw/dy + w.dw/dz)
-        # Add (u. grad)u components directly in result
-        self._tempGrad = self._gradOp(
-            self.velocity[XDIR:XDIR + 1], self._tempGrad)
-        # result[X] = result[X] + ((u. grad)u)[X]
-        #           = result[X] + u.du/dx + v.du/dy + w.du/dz
-        for d in xrange(self.velocity.dimension):
-            self._result[XDIR][compute_index] += \
-                self.velocity[d][compute_index] * self._tempGrad[d][compute_index]
-
-        self._tempGrad = self._gradOp(
-            self.velocity[YDIR:YDIR + 1], self._tempGrad)
-        # result[Y] = result[Y] + ((u. grad)u)[Y]
-        #           = result[Y] + u.dv/dx + v.dv/dy + w.dv/dz
-        for d in xrange(self.velocity.dimension):
-            self._result[YDIR][compute_index] += \
-                self.velocity[d][compute_index] * self._tempGrad[d][compute_index]
-
-        self._tempGrad = self._gradOp(
-            self.velocity[ZDIR:ZDIR + 1], self._tempGrad)
-        # result[Z] = result[Z] + ((u. grad)u)[Z]
-        #           = result[Z] + u.dw/dx + v.dw/dy + w.dw/dz
-        for d in xrange(self.velocity.dimension):
-            self._result[ZDIR][compute_index] += \
-                self.velocity[d][compute_index] * self._tempGrad[d][compute_index]
-
-        # result = result - nu*\Laplacian u (-g) = gradP/rho
-        for d in xrange(self.velocity.dimension):
-            self._tempGrad[d:d + 1] = self._laplacian(
-                self.velocity[d:d + 1], self._tempGrad[d:d + 1])
-        for d in xrange(self.velocity.dimension):
-            self._tempGrad[d][compute_index] *= self.viscosity
-            self._result[d][compute_index] -= self._tempGrad[d][compute_index]
-
-        # gravity term : result = result - g
-        for d in xrange(self.velocity.dimension):
-            self._result[2][compute_index] -= self._gravity[d]
-
-        for d in xrange(self.velocity.dimension):
-            self.gradp[d][...] = self._result[d]
-
-        # reinitialise for next iteration
-        # velo(n-1) update
-        for d in xrange(self.velocity.dimension):
-            self._result[d][compute_index] = -self.velocity.data[d][compute_index]
-        self._old_dt = dt
diff --git a/hysop/old/operator.old/discrete/multiresolution_filter.py b/hysop/old/operator.old/discrete/multiresolution_filter.py
deleted file mode 100644
index eb0f0bda0a901775eadba57abc74f6fefdf755d4..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/multiresolution_filter.py
+++ /dev/null
@@ -1,253 +0,0 @@
-"""Filter values from a fine grid to a coarse grid.
-
-"""
-from hysop.constants import debug, np, hysop.core.mpi_REAL
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.tools.profiler import profile
-from hysop.tools.numpywrappers import npw
-from hysop.methods import Remesh
-from hysop.numerics.remeshing import RemeshFormula, Linear, L2_1
-
-
-class FilterFineToCoarse(DiscreteOperator):
-    """Discretized operator for filtering from fine to coarse grid.
-    """
-
-    authorized_methods = [Linear, L2_1]
-
-    def __init__(self, field_in, field_out, **kwds):
-        """
-        Parameters
-        ----------
-        field_in, field_out : lists of
-            :class:`~hysop.fields.discrete_field.DiscreteFields`
-        kwds : base class parameters
-
-        """
-        self.field_in, self.field_out = field_in, field_out
-        # Note : since discrete operators are usually
-        # supposed to work on one single topology,
-        # only field_in is passed to base class.
-
-        # mesh_out must be set before base class init,
-        # to set rwork properly
-
-        self._mesh_out = self.field_out[0].topology.mesh
-        self.gh_out = self.field_out[0].topology.ghosts()
-        super(FilterFineToCoarse, self).__init__(
-            variables=self.field_in, **kwds)
-        self.input = [self.field_in]
-        self.output = [self.field_out]
-        self._mesh_in = self.field_in[0].topology.mesh
-        gh_in = self.field_in[0].topology.ghosts()
-        self._rmsh_method = self._check_method()
-
-        resol_in = self._mesh_in.resolution - 2 * gh_in
-        resol_out = self._mesh_out.resolution - 2 * self.gh_out
-        pts_per_cell = resol_in / resol_out
-        assert np.all(pts_per_cell >= 1), \
-            "This operator is fine grid to coarse one"
-        self.scale_factor = np.prod(self._mesh_in.space_step) / \
-            np.prod(self._mesh_out.space_step)
-
-        # multi-gpu ghosts buffers for communication
-        self._cutdir_list = np.where(
-            self.field_in[0].topology.cutdir)[0].tolist()
-        self._comm = self.field_in[0].topology.comm
-        self._comm_size = self._comm.Get_size()
-        self._comm_rank = self._comm.Get_rank()
-        if self._comm_size == 1:
-            self._exchange_ghosts = self._exchange_ghosts_local
-        else:
-            self._exchange_ghosts = self._exchange_ghosts_mpi
-            self._gh_from_l = [None] * self._dim
-            self._gh_from_r = [None] * self._dim
-            self._gh_to_l = [None] * self._dim
-            self._gh_to_r = [None] * self._dim
-            for d in self._cutdir_list:
-                shape = list(self.field_out[0].data[0].shape)
-                shape[d] = self.gh_out[d]
-                self._gh_from_l[d] = npw.zeros(tuple(shape))
-                self._gh_from_r[d] = npw.zeros(tuple(shape))
-                self._gh_to_l[d] = npw.zeros(tuple(shape))
-                self._gh_to_r[d] = npw.zeros(tuple(shape))
-
-        in_coords = (self._mesh_in.coords - self._mesh_in.origin) / \
-            self._mesh_out.space_step
-        self.floor_coords = np.array([np.floor(c) for c in in_coords])
-        self.dist_coords = in_coords - self.floor_coords
-        self._work_weight = np.array(
-            [npw.zeros_like(c) for c in self.dist_coords])
-        # Slices to serialize concurrent access in coarse grid
-        # Several points in fine grid are laying in the same coarse cell
-        # The serialization avoid concurrent access.
-        self._sl = []
-        for ix in xrange(pts_per_cell[0]):
-            for iy in xrange(pts_per_cell[1]):
-                for iz in xrange(pts_per_cell[2]):
-                    self._sl.append((
-                        slice(ix + gh_in[0],
-                              resol_in[0] + ix + gh_in[0],
-                              pts_per_cell[0]),
-                        slice(iy + gh_in[1],
-                              resol_in[1] + iy + gh_in[1],
-                              pts_per_cell[1]),
-                        slice(iz + gh_in[2],
-                              resol_in[2] + iz + gh_in[2],
-                              pts_per_cell[2]),
-                    ))
-        assert len(self._sl) == np.prod(pts_per_cell)
-        # Slice in coarse grid to distribute values
-        self._sl_coarse = []
-        # Weights associated to offsets in coarse grid
-        self._w_coarse = []
-        for i_x in xrange(len(self._rmsh_method.weights)):
-            for i_y in xrange(len(self._rmsh_method.weights)):
-                for i_z in xrange(len(self._rmsh_method.weights)):
-                    self._sl_coarse.append((
-                        slice(self._mesh_out.compute_index[0].start -
-                              self._rmsh_method.shift + i_x,
-                              self._mesh_out.compute_index[0].stop -
-                              self._rmsh_method.shift + i_x,
-                              None),
-                        slice(self._mesh_out.compute_index[1].start -
-                              self._rmsh_method.shift + i_y,
-                              self._mesh_out.compute_index[1].stop -
-                              self._rmsh_method.shift + i_y,
-                              None),
-                        slice(self._mesh_out.compute_index[2].start -
-                              self._rmsh_method.shift + i_z,
-                              self._mesh_out.compute_index[2].stop -
-                              self._rmsh_method.shift + i_z,
-                              None)
-                    ))
-                    self._w_coarse.append((i_x, i_y, i_z))
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-        subshape = tuple(self._mesh_out.resolution - 2 * self.gh_out)
-        subsize = np.prod(subshape)
-        if rwork is None:
-            self._rwork = [npw.zeros(subshape)]
-        else:
-            self._rwork = []
-            assert isinstance(rwork, list), 'rwork must be a list.'
-            assert len(rwork) >= 1, 'Wrong length for work arrays list.'
-            for wk in rwork[:1]:
-                assert wk.size >= subsize
-                self._rwork.append(wk.ravel()[:subsize].reshape(subshape))
-
-    def _check_method(self):
-        """Check remeshing method parameters.
-        """
-        try:
-            assert issubclass(self.method[Remesh], RemeshFormula), \
-                "This operator works with a RemeshingFormula."
-            rmsh_method = self.method[Remesh]()
-        except KeyError:
-            rmsh_method = Linear()
-
-        msg = 'Ghost layer must be increased for the chosen scheme.'
-        assert (self.gh_out >=
-                self.method[Remesh].ghosts_layer_size).all(), msg
-        msg = 'Remesh scheme not yet implemented.'
-        assert self.method[Remesh] in self.authorized_methods, msg
-        return rmsh_method
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing simulation value for computation."
-        wk = self._rwork[0]
-        for v_in, v_out in zip(self.field_in, self.field_out):
-            for d in xrange(v_in.nb_components):
-                for sl_coarse, iw_fun in zip(self._sl_coarse, self._w_coarse):
-                    self._work_weight[0] = self._rmsh_method(
-                        iw_fun[0], self.dist_coords[0], self._work_weight[0])
-                    self._work_weight[1] = self._rmsh_method(
-                        iw_fun[1], self.dist_coords[1], self._work_weight[1])
-                    self._work_weight[2] = self._rmsh_method(
-                        iw_fun[2], self.dist_coords[2], self._work_weight[2])
-                    # Loop over fine grid points sharing the same coarse cell
-                    for sl in self._sl:
-                        wk[...] = v_in[d][sl]
-                        # Compute weights
-                        wk[...] *= self._work_weight[0][sl[0], :, :]
-                        wk[...] *= self._work_weight[1][:, sl[1], :]
-                        wk[...] *= self._work_weight[2][:, :, sl[2]]
-                        wk[...] *= self.scale_factor
-                        # Add contributions in data
-                        v_out[d][sl_coarse] += wk[...]
-        self._exchange_ghosts()
-
-    def _exchange_ghosts_local_d(self, d):
-        """Exchange ghosts values in periodic local array"""
-        s_gh = self.gh_out[d]
-        sl = [slice(None) for _ in xrange(self._dim)]
-        sl_gh = [slice(None) for _ in xrange(self._dim)]
-        sl[d] = slice(1 * s_gh, 2 * s_gh)
-        sl_gh[d] = slice(-1 * s_gh, None)
-        # Add ghost points values at the end of the domain in direction
-        # d into corresponding points at the beginning of the domain.
-        for v_out in self.field_out:
-            v_out.data[0][tuple(sl)] += v_out.data[0][tuple(sl_gh)]
-        # Add ghost points values at the beginning of the domain in direction
-        # d into corresponding points at the end of the domain.
-        sl[d] = slice(-2 * s_gh, -1 * s_gh)
-        sl_gh[d] = slice(0, 1 * s_gh)
-        for v_out in self.field_out:
-            v_out.data[0][tuple(sl)] += v_out.data[0][tuple(sl_gh)]
-
-    @profile
-    def _exchange_ghosts_local(self):
-        """Performs ghosts exchange locally in each direction"""
-        for d in xrange(self._dim):
-            self._exchange_ghosts_local_d(d)
-
-    def _exchange_ghosts_mpi_d(self, d):
-        """Exchange ghosts values in parallel"""
-        s_gh = self.gh_out[d]
-        sl_l = [slice(None) for _ in xrange(self._dim)]
-        sl_gh_l = [slice(None) for _ in xrange(self._dim)]
-        sl_r = [slice(None) for _ in xrange(self._dim)]
-        sl_gh_r = [slice(None) for _ in xrange(self._dim)]
-        sl_l[d] = slice(1 * s_gh, 2 * s_gh)
-        sl_gh_r[d] = slice(-1 * s_gh, None)
-        sl_r[d] = slice(-2 * s_gh, -1 * s_gh)
-        sl_gh_l[d] = slice(0, 1 * s_gh)
-        for v_out in self.field_out:
-            first_cut_dir = v_out.topology.cutdir.tolist().index(True)
-            self._gh_to_l[d][...] = v_out.data[0][tuple(sl_gh_l)]
-            self._gh_to_r[d][...] = v_out.data[0][tuple(sl_gh_r)]
-            r_rk = v_out.topology.neighbours[1, d - first_cut_dir]
-            l_rk = v_out.topology.neighbours[0, d - first_cut_dir]
-            recv_r = self._comm.Irecv(
-                [self._gh_from_r[d], self._gh_from_r[d].size,
-                 hysop.core.mpi_REAL],
-                source=r_rk, tag=1234 + r_rk + 19 * d)
-            recv_l = self._comm.Irecv(
-                [self._gh_from_l[d], self._gh_from_l[d].size,
-                 hysop.core.mpi_REAL],
-                source=l_rk, tag=4321 + l_rk + 17 * d)
-            send_l = self._comm.Issend(
-                [self._gh_to_l[d], self._gh_to_l[d].size, hysop.core.mpi_REAL],
-                dest=l_rk, tag=1234 + self._comm_rank + 19 * d)
-            send_r = self._comm.Issend(
-                [self._gh_to_r[d], self._gh_to_r[d].size, hysop.core.mpi_REAL],
-                dest=r_rk, tag=4321 + self._comm_rank + 17 * d)
-            send_r.wait()
-            recv_l.wait()
-            v_out.data[0][tuple(sl_l)] += self._gh_from_l[d]
-            send_l.wait()
-            recv_r.wait()
-            v_out.data[0][tuple(sl_r)] += self._gh_from_r[d]
-
-    @profile
-    def _exchange_ghosts_mpi(self):
-        """Performs ghosts exchange either locally or with mpi communications
-        in each direction"""
-        for d in xrange(self._dim):
-            if d in self._cutdir_list:
-                self._exchange_ghosts_mpi_d(d)
-            else:
-                self._exchange_ghosts_local_d(d)
diff --git a/hysop/old/operator.old/discrete/particle_advection.py b/hysop/old/operator.old/discrete/particle_advection.py
deleted file mode 100644
index a7e69a3620b8a035281affd820b01974252830d3..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/particle_advection.py
+++ /dev/null
@@ -1,305 +0,0 @@
-"""Advection solver, particular method, pure-python version
-
-"""
-from hysop.constants import debug, WITH_GUESS, HYSOP_REAL, HYSOP_INTEGER, EPS
-from hysop.methods import TimeIntegrator, Interpolation, Remesh
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.default_methods import ADVECTION
-import numpy as np
-from hysop.numerics.remeshing import Remeshing
-from hysop.tools.profiler import profile
-from hysop.tools.misc import WorkSpaceTools
-from hysop.tools.numpywrappers import npw
-
-
-class ParticleAdvection(DiscreteOperator):
-    """
-    Particular method for advection of a list of fields in a given direction.
-    """
-
-    @debug
-    def __init__(self, velocity, fields_on_grid, direction, **kwds):
-        """
-        Parameters
-        ----------
-        velocity : :class:`~hysop.fields.discrete_field.DiscreteField`
-            advection velocity (discretized)
-        fields_on_grid : list of :class:`~hysop.fields.discrete_field.DiscreteField`
-            discrete field(s) to be advected
-        direction : int
-            advection direction
-        kwds : extra parameters for base class
-
-        """
-        msg = 'Scales advection init : variables parameter is useless.'
-        msg += 'See user guide for details on the proper'
-        msg += ' way to build the operator.'
-        assert 'variables' not in kwds, msg
-        # Advection velocity
-        self.velocity = velocity
-
-        # set variables list ...
-        variables = [self.velocity]
-        if not isinstance(fields_on_grid, list):
-            self.fields_on_grid = [fields_on_grid]
-        else:
-            self.fields_on_grid = fields_on_grid
-        variables += self.fields_on_grid
-
-        if 'method' not in kwds:
-            kwds['method'] = ADVECTION
-        # number of components of the RHS (see time integrator)
-        self._rhs_size = 1
-        # dictionnary of internal buffers used
-        # to save fields values on particles
-        # --> link to _rwork
-        self.fields_on_part = None
-        # buffers for numerical methods
-        # --> just link to _rwork/_iwork components
-        self._rw_interp = None
-        self._iw_interp = None
-        self._rw_integ = None
-        self._rw_remesh = None
-        self._iw_remesh = None
-        # buffer used to save particles position
-        # --> link to _rwork
-        self.part_positions = None
-
-        super(ParticleAdvection, self).__init__(variables=variables, **kwds)
-
-        self.input = self.variables
-        self.output = [df for df in self.variables if df is not self.velocity]
-        self.direction = direction
-        # time integrator
-        self.num_advec = None
-        # remeshing method
-        self.num_remesh = None
-        # build and configure numerical methods
-        self._configure_numerical_methods()
-        # minimal value required for advected field
-        # on grid point to add a particle.
-        # If None, particles everywhere.
-        self._threshold = None
-        self._init_particles_on_grid = self._init_particles_everywhere
-        if self._threshold is not None:
-            self._init_particles_on_grid = self._init_particles_with_threshold
-
-    def _configure_numerical_methods(self):
-        """Function to set the numerical method for python operator and link them
-        to the proper working arrays.
-
-        Warning : must be called after _set_work_arrays
-        """
-
-        # ---  numerical interpolation operator ---
-        # discrete field that will be interpolated
-        vd = self.velocity.data[self.direction]
-        # first field topology as reference
-        topo_fields = self.fields_on_grid[0].topology
-        # velocity 'grid'
-        topo_velo = self.velocity.topology
-        # -- Interpolation scheme --
-        # to interpolate velocity from topo_velo
-        # to the grid on which particles are initialized.
-        num_interpolate = \
-            self.method[Interpolation](vd, direction=self.direction,
-                                       topo_source=topo_velo,
-                                       topo_target=topo_fields,
-                                       rwork=self._rw_interp,
-                                       iwork=self._iw_interp)
-
-        # -- time integration --
-        # advection is performed only on 'computational points',
-        # not on ghosts
-        ic = topo_fields.mesh.compute_index
-        self.num_advec = self.method[TimeIntegrator](
-            self._rhs_size, rwork=self._rw_integ,
-            f=num_interpolate,
-            topo=topo_fields,
-            optim=WITH_GUESS,
-            indices=ic)
-
-        # -- remesh --
-        self.num_remesh = Remeshing(self.method[Remesh],
-                                    topo_source=topo_fields,
-                                    direction=self.direction,
-                                    rwork=self._rw_remesh,
-                                    iwork=self._iw_remesh)
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-
-        # Find number and shape of required work arrays
-
-        # topologies for advected field(s) grid
-        topo_fields = self.fields_on_grid[0].topology
-        # -- work arrays properties for interpolation --
-        # work arrays shape depends on the targeted grid, i.e. topo_fields.
-        interp_wp = self.method[Interpolation].get_work_properties(topo_fields)
-        interp_iwork_length = len(interp_wp['iwork'])
-        interp_rwork_length = len(interp_wp['rwork'])
-
-        # -- work arrays properties for time-integrator --
-        # depends on topo_fields.
-        ti_wp = self.method[TimeIntegrator].get_work_properties(
-            self._rhs_size, topo_fields)
-        ti_rwork_length = len(ti_wp['rwork'])
-        # -- work arrays properties for remeshing scheme --
-        # depends on topo_fields.
-        remesh_wp = Remeshing.get_work_properties(topo_fields)
-        remesh_iwork_length = len(remesh_wp['iwork'])
-        remesh_rwork_length = len(remesh_wp['rwork'])
-
-        # -- Find out the number of required work arrays --
-        # Interpolation and odesolver work arrays must be different
-        # but after interpolation/advection, work arrays can be reused
-        # by remeshing. So the number of required work arrays is
-        # equal to
-        # max(nb works_for_time_integrator + nb work_for_interpolation,
-        #     nb works_for_remesh)
-        rwork_length = max(remesh_rwork_length,
-                           ti_rwork_length + interp_rwork_length)
-        iwork_length = max(interp_iwork_length, remesh_iwork_length)
-
-        # -- Finally, particles positions and fields on particles
-        # values will also be in work array.
-        ppos_index = rwork_length
-        # max number of components for advected fields
-        nbc = max([f.nb_components for f in self.fields_on_grid])
-        rwork_length += nbc
-        rwork_length += 1  # work array for positions
-
-        # -- rwork organisation --
-
-        # rw = [ | rw_interp | rw_time_int | part_pos | fields_on_part |]
-        #        |        rw_remesh        |      ...                  |]
-
-        #  work array shape depends on the time integrator
-        #  interpolation scheme and remeshing scheme
-
-        # check and/or allocate work arrays according to properties above
-        # reshape stuff is done inside numerical methods.
-        # Here we just allocate flat memory.
-        subshape_field = tuple(topo_fields.mesh.compute_resolution)
-        subsize_interp_ti = interp_wp['rwork'] + ti_wp['rwork']
-        subsize_remesh = remesh_wp['rwork']
-        # list of shapes for rwork arrays (!flat arrays!)
-        subsize_rw = max(subsize_interp_ti, subsize_remesh)
-        # list of shapes for iwork arrays (!flat arrays!)
-        subsize_iw = max(interp_wp['iwork'], remesh_wp['iwork'])
-        # one array for particles positions
-        # and arrays for fields on particles
-        for i in xrange(nbc + 1):
-            subsize_rw.append((np.prod(subshape_field), ))
-
-        # Build iwork and rwork lists
-        self._rwork = WorkSpaceTools.check_work_array(rwork_length, subsize_rw,
-                                                      rwork, HYSOP_REAL)
-        self._iwork = WorkSpaceTools.check_work_array(iwork_length, subsize_iw,
-                                                      iwork, HYSOP_INTEGER)
-
-        # -- set connections for remesh, ti and interp methods --
-        # interpolation
-        self._rw_interp = [self._rwork[i] for i in xrange(interp_rwork_length)]
-        self._iw_interp = [self._iwork[i] for i in xrange(interp_iwork_length)]
-        assert np.asarray([npw.arrays_share_data(self._rw_interp[i],
-                                                 self._rwork[i])
-                           for i in xrange(interp_rwork_length)]).all()
-
-        # time integration
-        self._rw_integ = [self._rwork[i]
-                          for i in xrange(interp_rwork_length,
-                                          interp_rwork_length +
-                                          ti_rwork_length)]
-        # remesh
-        self._rw_remesh = [self._rwork[i] for i in xrange(remesh_rwork_length)]
-        self._iw_remesh = [self._iwork[i] for i in xrange(remesh_iwork_length)]
-
-        # --- links to work for particles positions buffers ---
-        self._work_part_pos = ppos_index
-        self.part_positions = WorkSpaceTools.check_work_array(
-            1, subshape_field, [self._rwork[ppos_index]])
-        assert npw.arrays_share_data(self.part_positions[0],
-                                     self._rwork[ppos_index])
-
-        # --- links to buffers for fields on particles ---
-        ppos_index += 1
-        self.fields_on_part = WorkSpaceTools.check_work_array(
-            nbc, subshape_field,
-            [self._rwork[i] for i in xrange(ppos_index, ppos_index + nbc)])
-        assert np.asarray(
-            [npw.arrays_share_data(self.fields_on_part[i],
-                                   self._rwork[ppos_index + i])
-             for i in xrange(nbc)]).all()
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-            #, dt_coeff=1., split_id=0, old_dir=0):
-        """
-        Advection algorithm:
-        - initialize particles and fields with their values on the grid.
-          --> x_p, f_p
-        - compute particle positions in splitting direction,
-          --> solve dx_p / dt = v_p
-        - remesh fields from particles to grid
-          --> f_g
-        """
-        assert simulation is not None, \
-            'Simulation parameter is missing.'
-        t, dt = simulation.time, simulation.sub_step
-
-        # -- Initialize particles on the grid --
-        self._init_particles_on_grid()
-
-        # -- Advect particles --
-        # RHS of odesolver must be computed and save in odesolver._rwork[0]
-        # Notes :
-        # - ghost-synchro of the rhs (if needed) is done by the odesolver
-        # - interpolation of velocity field from grid to particles is done
-        # 'inside' the ode solver (rhs = interpolation scheme)
-        self.num_advec.rwork[0][...] = self.velocity.data[self.direction][...]
-        self.part_positions = self.num_advec(
-            t, self.part_positions, dt, result=self.part_positions)
-
-        # -- Remesh --
-        for fg in self.fields_on_grid:
-            # Initialize fields on particles with fields on grid values --
-            for d in xrange(fg.nb_components):
-                self.fields_on_part[d][...] = fg[d][...]
-            # Remesh all components of the field
-            fg = self.num_remesh(ppos=self.part_positions,
-                                 pscal=self.fields_on_part, result=fg)
-
-    def _init_particles_everywhere(self):
-        """Initialize particles on grid points
-        """
-        # indices of computation points (i.e. no ghosts)
-        topo = self.fields_on_grid[0].topology
-        ic = topo.mesh.compute_index
-        self.part_positions[0][...] = \
-            topo.mesh.coords[self.direction][ic[self.direction]] + 10 * EPS
-        # remark : we shift particles with EPS to avoid
-        # the case where, because of floating point precision
-        # xp (particle position) < xg (grid position),
-        # with xp - xg very small. If this happens,
-        # left point of xp will be xg - 1 rather than xg
-        # which is not what is expected.
-
-    def _init_particles_with_threshold(self):
-        """"Initialize particles on grid points where
-        advected field value is greater than a given threshold
-        """
-        raise AttributeError("Not yet implemented")
-        # first field is used as reference to check threshold
-        ref_field = self.fields_on_grid[0].data
-        topo = self.fields_on_grid[0].topology
-        ic = topo.mesh.compute_index
-        self._rwork[0][...] = 0.
-        for i in xrange(len(ref_field)):
-            self._rwork[0][...] += ref_field[i][ic] ** 2
-        self._rwork[0][...] = np.sqrt(self._rwork[0])
-        ipart = np.where(self._rwork < self._threshold)
-        part_shape = ref_field[0][ipart].shape
-        self.part_positions = WorkSpaceTools.check_work_array(
-            1, part_shape, self._rwork[self._work_part_pos])
-        self.part_positions[0][...] = topo.mesh.coords[self.direction][ipart]
diff --git a/hysop/old/operator.old/discrete/particle_advection_dir.py b/hysop/old/operator.old/discrete/particle_advection_dir.py
deleted file mode 100644
index 2f804cba77ddd9d4eaac40205b62e6b2bdeb546e..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/particle_advection_dir.py
+++ /dev/null
@@ -1,247 +0,0 @@
-"""
-@file particle_advection.py
-Base for directionally splitted advection solvers (pure-python and GPU version).
-"""
-from hysop.constants import debug, WITH_GUESS, HYSOP_REAL, HYSOP_INTEGER, SIZEOF_HYSOP_REAL, SIZEOF_HYSOP_INTEGER, ORDER
-from hysop.methods import TimeIntegrator, Interpolation, Remesh, Support
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.backend.device.codegen.structs.mesh_info import MeshDirection
-from hysop.tools.numpywrappers import npw
-import hysop.default_methods as default
-import numpy as np
-from hysop.numerics.remeshing import Remeshing
-from hysop.tools.profiler import profile
-
-class ParticleAdvectionDir(DiscreteOperator):
-    """
-    Interface for particular advection method of a list of fields in a given direction.
-    Base class for pure-python and GPU ParticleAdvectionDir implementations.
-    """
-    @debug
-    def __init__(self, velocity, advected_fields, direction, **kwds):
-        """
-        Constructor.
-        @param velocity:        discretization of the velocity field (all components)
-        @param advected_fields : list of discretized fields to be advected.
-        @param direction:      direction of advection
-        """
-
-        cls = self.__class__.__name__
-        if ORDER != 'F':
-            msg='{} has not been implemented for C-contiguous arrays!'.format(cls)
-            raise NotImplementedError(msg)
-        
-        # Check input values
-        if 'variables' in kwds.keys():
-            msg='variables parameter is should not be used in {}.'.format(cls)
-            raise ValueError(msg)
-        if not isinstance(advected_fields, list):
-            advected_fields = [advected_fields]
-        if not isinstance(direction, int) or (direction not in MeshDirection.entries()):
-            raise ValueError("Invalid direction value '{}'.".format(direction))
-        if ('iwork' in kwds) or ('rwork' in kwds):
-            msg='rwork or iwork parameters are useless in {}.'.format(cls)
-            raise ValueError(msg)
-
-        # Discrete fields and their topologies
-        self.velocity        = velocity
-        self.advected_fields = advected_fields
-
-        self.velocity_topo   = velocity.topology
-        self.fields_topo     = advected_fields[0].topology
-        self._is_multi_scale = self._check_topologies()
-
-        # MPI related variables
-        self._comm           = self.fields_topo.comm
-        self._comm_size      = self._comm.Get_size()
-        self._comm_rank      = self._comm.Get_rank()
-        self._is_distributed = (self._comm_size > 1)
-
-        # Variables
-        input_vars  = [velocity]+advected_fields
-        output_vars = advected_fields
-        variables   = input_vars
-
-        super(ParticleAdvectionDir, self).__init__(
-            rwork=None, iwork=None,
-            variables=variables,
-            **kwds)
-
-        self.input     = self.variables
-        self.output    = [df for df in self.variables if df is not self.velocity]
-        self.direction = direction
-    
-    def _check_topologies(self):
-        multiscale = False
-        topo_ref = self.fields_topo
-        for f in self.advected_fields:
-            if f.topology != topo_ref:
-                raise ValueError('One of the advected field has a different topology!')
-        multiscale = (self.velocity_topo != topo_ref)
-        return multiscale
-
-# ParticleAdvectionDir interface    
-    @staticmethod
-    def supports_multiscale():
-        return False
-    @staticmethod
-    def supports_mpi():
-        return False
-    
-    def _check(self):
-        cls = self.__class__.__name__
-        if (self._is_multi_scale) and (not self.supports_multiscale()):
-            msg='Multiscale advection in {} has not been implemented yet !'.format(cls)
-            raise NotImplementedError(msg)
-        if (self._is_distributed) and (not self.supports_mpi()):
-            msg='MPI multi-GPU advection has not been implemented in {} yet!'.format(cls)
-            raise NotImplementedError(msg)
-
-    def _initialize(self):
-        pass
-
-    def get_work_properties(self):
-        """ 
-            directional work properties should return the same shapes
-            to ease buffer sharing between all directions.
-        """
-        cls=self.__class__.__name__
-        raise NotImplementedError('{}.get_work_properties'.format(cls))
-    
-    def setup(self, rwork, iwork):
-        """ 
-            rwork and iwork cannot be None during setup
-    ,
-    because they are shared between all directions.
-        """
-        cls=self.__class__.__name__
-        raise NotImplementedError('{}.setup'.format(cls))
-
-    def apply(self,simulation,step_id,**extra_params):
-        """ 
-            First check if input step_id corresponds to this operator's direction.
-            And apply this operator with given step_id.
-        """
-        cls=self.__class__.__name__
-        raise NotImplementedError('{}.apply'.format(cls))
-    
-    def step_directions(self):
-        """ 
-            List of directions in order of execution.
-            
-            Ex: If configured splitting method is Strang splitting of order 2
-                which execution order is (X,Y,Z,Y,X) in 3d.
-                this function should return 
-                    [0]         in 1D
-                    [0,1,0]     in 2D
-                    [0,1,2,1,0] in 3D
-        """
-        cls=self.__class__.__name__
-        raise NotImplementedError('{}.step_directions'.format(cls))
-
-
-
-class PythonParticleAdvectionDir(ParticleAdvectionDir):
-    """
-    Pure python particle advection dir.
-    """
-    
-    def __init__(self, velocity, advected_fields, direction, **kwds):
-        super(self.__class__,self).__init__(velocity,advected_fields,direction,**kwds)
-        
-        #TODO check this implementation
-        raise NotImplementedError('Check this implementation.')
-
-        self._configure_numerical_methods()
-
-    def _configure_numerical_methods(self):
-        """
-        Function to set the numerical method for python operator and link them
-        to the proper working arrays.
-        """
-        # Use first field topology as reference
-        topo = self.advected_fields[0].topology
-
-        # --- Initialize time integrator for advection ---
-        w_interp, iw_interp =\
-            self.method[Interpolation].getWorkLengths(
-                domain_dim=self.domain.dim)
-        self._rw_interp = self._rwork[:w_interp]
-        self._iw_interp = self._iwork[:iw_interp]
-
-        vd = self.velocity.data[self.direction]
-        num_interpolate = \
-            self.method[Interpolation](vd, self.direction, topo,
-                                       work=self._rw_interp,
-                                       iwork=self._iw_interp)
-
-        w_rk = self.method[TimeIntegrator].getWorkLengths(nb_components=1)
-        self._rw_integ = self._rwork[w_interp:w_interp + w_rk]
-        self.num_advec = self.method[TimeIntegrator](1, work=self._rw_integ,
-                                                     f=num_interpolate,
-                                                     topo=topo,
-                                                     optim=WITH_GUESS)
-        # --- Initialize remesh ---
-        w_remesh, iw_remesh =\
-            Remeshing.getWorkLengths(
-                domain_dim=self.domain.dim)
-        self._rw_remesh = self._rwork[:w_remesh]
-        self._iw_remesh = self._iwork[:iw_remesh]
-
-        self.num_remesh = Remeshing(self.method[Remesh],
-                                    self.domain.dim,
-                                    topo, self.direction,
-                                    work=self._rw_remesh,
-                                    iwork=self._iw_remesh)
-
-        ## Particles positions
-        start = max(w_interp + w_rk, w_remesh)
-        self.part_position = [self._rwork[start]]
-
-        ## Fields on particles
-        self.fields_on_part = {}
-        start += 1
-        for f in self.advected_fields:
-            self.fields_on_part[f] = self._rwork[start: start + f.nb_components]
-            start += f.nb_components
-
-
-    @debug
-    @profile
-    def apply(self, simulation, step_id):
-        """
-        Advection algorithm:
-        - initialize particles and fields with their values on the grid.
-        - compute particle positions in splitting direction,
-        (time integrator), resolution of dx_p/dt = a_p.
-        - remesh fields from particles to grid
-        """
-        assert simulation is not None, \
-            'Simulation parameter is missing.'
-
-        t, dt = simulation.time, simulation.time_step * dt_coeff
-        # Initialize fields on particles with fields on grid values.
-        for fg in self.advected_fields:
-            for d in xrange(fg.nb_components):
-                self.fields_on_part[fg][d][...] = fg[d][...]
-
-        # Initialize particles on the grid
-        toporef = self.advected_fields[0].topology
-        self.part_position[0][...] = toporef.mesh.coords[self.direction]
-
-        # Advect particles
-        # RK use the first 2 (or 3) works and leave others to interpolation
-        # First work contains fist evaluation of ode right hand side.
-        self._rw_integ[0][...] = self.velocity.data[self.direction][...]
-        self.part_position = self.num_advec(
-            t, self.part_position, dt, result=self.part_position)
-
-        # Remesh particles
-        # It uses the last dim + 2 workspaces (same as interpolation)
-        for fg in self.advected_fields:
-            fp = self.fields_on_part[fg]
-            for d in xrange(fg.nb_components):
-                fg[d][...] = self.num_remesh(
-                    self.part_position, fp[d], result=fg[d])
-
-
diff --git a/hysop/old/operator.old/discrete/penalization.py b/hysop/old/operator.old/discrete/penalization.py
deleted file mode 100644
index 1746ade1e624b35f2f5dabeb62f64835dffbb5d7..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/penalization.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Discrete operators for penalization problem.
-.. currentmodule:: hysop.operator.discrete.penalization
-* :class:`Penalization` : standard penalisation
-* :class:`PenalizeVorticity`  : vorticity formulation
-
-"""
-from hysop.constants import debug
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.tools.profiler import profile
-from hysop.domain.subsets import Subset
-
-
-class Penalization(DiscreteOperator):
-    """Discretized penalisation operator.
-    """
-
-    @debug
-    def __init__(self, obstacles, coeff=None, **kwds):
-        """
-        Parameters
-        ----------
-        obstacles : dictionnary or list of `~hysop.domain.subsets.Subset`
-            sets of geometries on which penalization must be applied
-        coeff : double, optional
-            penalization factor applied to all geometries.
-
-        """
-        super(Penalization, self).__init__(**kwds)
-
-        topo = self.variables[0].topology
-        # indices of points of grid on which penalization is applied.
-        # It may be a single condition (one penal coeff for all subsets)
-        # or a list of conditions (one different coeff for each subset).
-        self._cond = None
-        if isinstance(obstacles, list):
-            msg = 'A penalization factor is required for the obstacles.'
-            assert coeff is not None, msg
-            self._coeff = coeff
-            self._cond = self._init_single_coeff(obstacles, topo)
-            self._apply = self._apply_single_coeff
-
-        elif isinstance(obstacles, dict):
-            # cond is a dictionnary, key = list of indices,
-            # value = penalization coeff
-            self._cond, self._coeff = self._init_multi_coeff(obstacles, topo)
-            self._apply = self._apply_multi_coeff
-
-        for v in self.variables:
-            msg = 'Multiresolution not implemented for penalization.'
-            assert v.topology == topo, msg
-
-        # list of numpy arrays to penalize
-        self._varlist = []
-        for v in self.variables:
-            for d in xrange(v.nb_components):
-                self._varlist.append(v[d])
-
-    def _init_single_coeff(self, obstacles, topo):
-        """
-        Compute a condition which represents the union
-        of all obstacles.
-        """
-        msg = 'Warning : you use a porous obstacle but apply the same'
-        msg += ' penalisation factor everywhere.'
-        for _ in [obs for obs in obstacles if obs.is_porous]:
-            print msg
-        assert isinstance(obstacles, list)
-        return Subset.union(obstacles, topo)
-
-    def _init_multi_coeff(self, obstacles, topo):
-        """
-        Compute a condition which represents the union
-        of all obstacles.
-        """
-        cond = []
-        coeff = []
-        for obs in obstacles:
-            if obs.is_porous:
-                assert isinstance(obstacles[obs], list)
-                current = obs.ind[topo]
-                nb_layers = len(current)
-                assert len(current) == nb_layers
-                for i in xrange(nb_layers):
-                    # append the list of indices
-                    cond.append(current[i])
-                    # and its corresponding coeff
-                    coeff.append(obstacles[obs][i])
-            else:
-                cond.append(obs.ind[topo][0])
-                coeff.append(obstacles[obs])
-        return cond, coeff
-
-    @debug
-    @profile
-    def _apply_single_coeff(self, dt):
-        coef = 1.0 / (1.0 + dt * self._coeff)
-        for v in self._varlist:
-            v[self._cond] *= coef
-
-    def _apply_multi_coeff(self, dt):
-        for i in xrange(len(self._cond)):
-            coef = 1.0 / (1.0 + dt * self._coeff[i])
-            cond = self._cond[i]
-            for v in self._varlist:
-                v[cond] *= coef
-
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Simulation parameter is required."
-        dt = simulation.time_step
-        self._apply(dt)
-
-
-class PenalizeVorticity(Penalization):
-    """
-    Discretized penalisation operator.
-    See details in hysop.operator.penalization
-    """
-
-    @debug
-    def __init__(self, vorticity, velocity, curl, **kwds):
-        """
-        Parameters
-        ----------
-        velocity, vorticity: :class:`~hysop.fields.continuous_field.Field`
-        curl : :class:`~hysop..operator.differential`
-            internal operator to compute the curl of the penalised velocity
-        **kwds : extra parameters for parent class.
-
-        Notes
-        -----
-        velocity is not modified by this operator.
-        vorticity is an in-out parameter.
-        input and ouput variables of the curl are some local buffers.
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(PenalizeVorticity, self).__init__(variables=[vorticity,
-                                                           velocity],
-                                                **kwds)
-        self.velocity = velocity
-        self.vorticity = vorticity
-        # warning : a buffer is added for invar variable in curl
-        topo = self.velocity.topology
-        msg = 'Multiresolution not implemented for penalization.'
-        assert self.vorticity.topology == topo, msg
-        self._curl = curl
-
-    def _apply_single_coeff(self, dt):
-        # Vorticity penalization
-        # warning : the buff0 array ensures "invar" to be 0
-        # outside the obstacle for the curl evaluation
-        invar = self._curl.invar
-        nbc = invar.nb_components
-        for d in xrange(nbc):
-            invar.data[d][...] = 0.0
-        coeff = -dt * self._coeff / (1.0 + dt * self._coeff)
-        for d in xrange(nbc):
-            invar.data[d][self._cond] = \
-                self.velocity[d][self._cond] * coeff
-        self._curl.apply()
-        for d in xrange(self.vorticity.nb_components):
-            self.vorticity[d][...] += self._curl.outvar[d][...]
-
-    def _apply_multi_coeff(self, dt):
-        invar = self._curl.invar
-        nbc = invar.nb_components
-
-        for d in xrange(nbc):
-            invar.data[d][...] = 0.0
-
-        for i in xrange(len(self._cond)):
-            coeff = -dt * self._coeff[i] / (1.0 + dt * self._coeff[i])
-            cond = self._cond[i]
-            for d in xrange(nbc):
-                invar.data[d][cond] = self.velocity[d][cond] * coeff
-
-        self._curl.apply()
-
-        for d in xrange(self.vorticity.nb_components):
-            self.vorticity[d][...] += self._curl.outvar[d][...]
diff --git a/hysop/old/operator.old/discrete/poisson_fft.py b/hysop/old/operator.old/discrete/poisson_fft.py
deleted file mode 100644
index f0d3f51e01fd6d2ccb3ffd254efe2e72e25af5a4..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/poisson_fft.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Discrete operator for Poisson problem (fftw based)
-
-.. currentmodule hysop.operator.discrete
-
-"""
-from hysop.tools.numpywrappers import npw
-
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.operator.discrete.reprojection import Reprojection
-from hysop.constants import debug
-from hysop.tools.profiler import profile
-try:
-    from hysop.f2hysop import fftw2py
-except ImportError:
-    msg = 'fftw package not available for your hysop install.'
-    msg += 'Try to recompile with WITH_FFTW=ON'
-    raise ImportError(msg)
-
-
-class PoissonFFT(DiscreteOperator):
-    """Discretized Poisson operator based on FFTW.
-    See details in hysop.operator.poisson
-    """
-
-    @debug
-    def __init__(self, output_field, input_field, projection=None,
-                 filter_size=None, correction=None, formulation=None, **kwds):
-        """Poisson operator, incompressible flow.
-
-        Parameters
-        ----------
-        output_field : :class:`~hysop.fields.discrete_field.DiscreteField
-            solution field
-        input_field  : :class:`~hysop.fields.discrete_field.DiscreteField
-            right-hand side
-        projection : double or tuple, optional
-             projection criterion, see notes below.
-        filter_size :
-        correction : :class:`~velocity_correction.VelocityCorrection_D
-        operator used to shift output_field according
-          to a given input (fixed) flowrate.
-          See hysop.operator.velocity_correction.
-        formulation :
-        kwds : base class parameters.
-        """
-        # Base class initialisation
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(PoissonFFT, self).__init__(variables=[output_field, input_field],
-                                         **kwds)
-        # Solution field
-        self.output_field = output_field
-        # RHS field
-        self.input_field = input_field
-        # Solenoidal projection of input_field ?
-        self.projection = projection
-        # Filter size array = domainLength/(CoarseRes-1)
-        self.filter_size = filter_size
-        # If 2D problem, input_field must be a scalar
-        self._dim = self.output_field.domain.dim
-        if self._dim == 2:
-            assert self.input_field.nb_components == 1
-        assert (self._dim >= 2),\
-            "Wrong problem dimension: only 2D and 3D cases are implemented."
-        self.correction = correction
-        self.formulation = formulation
-        self.input = [self.input_field]
-        self.output = [self.output_field]
-
-        # The function called during apply
-        self.solve = None
-        # a sub function ...
-        self._solve = None
-        self.do_projection = None
-        self._select_solve()
-
-    def _select_solve(self):
-        """
-        TODO : add pressure solver selection
-        f(output.nbComponents) + pb type 'pressure-poisson'
-        """
-        # Multiresolution?
-        multires = \
-            self.output_field.topology.mesh != self.input_field.topology.mesh
-        # connexion to the required apply function
-        if self._dim == 2:
-            self._solve = self._solve_2d
-        elif self._dim == 3:
-            # If there is a projection, input_field is also an output
-            if self.projection is not None:
-                self.output.append(self.input_field)
-                if multires:
-                    self._solve = self._solve_3d_proj_multires
-                else:
-                    self._solve = self._solve_3d_proj
-
-                if isinstance(self.projection, Reprojection):
-                    self.do_projection = self.do_projection_with_op
-                else:
-                    self.do_projection = self.do_projection_no_op
-
-            else:
-                if multires:
-                    self._solve = self._solve_3d_multires
-                elif self.formulation is not None:
-                    self._solve = self._solve_3d_scalar_fd
-                else:
-                    self._solve = self._solve_3d
-
-        # swtich to the proper solving method (with or without correction)
-        if self.correction is not None:
-            self.solve = self._solve_and_correct
-        else:
-            self.solve = self._solve
-
-    def do_projection_with_op(self, simu):
-        """Compute projection criterion and return
-        true if projection is needed.
-        """
-        self.projection.apply(simu)
-        ite = simu.current_iteration
-        return self.projection.do_projection(ite)
-
-    def do_projection_no_op(self, simu):
-        """return true if projection is needed.
-        """
-        ite = simu.current_iteration
-        return ite % self.projection == 0
-
-    def _solve_2d(self, simu=None):
-        """ Solve 2D poisson problem, no projection, no correction.
-        """
-        ghosts_v = self.output_field.topology.ghosts()
-        ghosts_w = self.input_field.topology.ghosts()
-        self.output_field.data[0], self.output_field.data[1] =\
-            fftw2py.solve_poisson_2d(self.input_field.data[0],
-                                     self.output_field.data[0],
-                                     self.output_field.data[1],
-                                     ghosts_w, ghosts_v)
-
-    def _project(self):
-        """apply projection onto input_field
-        """
-        ghosts_w = self.input_field.topology.ghosts()
-        self.input_field.data[0], self.input_field.data[1], \
-            self.input_field.data[2] = \
-            fftw2py.projection_om_3d(self.input_field.data[0],
-                                     self.input_field.data[1],
-                                     self.input_field.data[2], ghosts_w)
-
-    def _solve_3d_multires(self, simu=None):
-        """3D, multiresolution
-        """
-        # Projects input_field values from fine to coarse grid
-        # in frequencies space by nullifying the smallest modes
-        vortFilter = npw.copy(self.input_field.data)
-        vortFilter[0], vortFilter[1], vortFilter[2] = \
-            fftw2py.multires_om_3d(self.filter_size[0], self.filter_size[1],
-                                   self.filter_size[2],
-                                   self.input_field.data[0],
-                                   self.input_field.data[1],
-                                   self.input_field.data[2])
-
-        # Solves Poisson equation using filter input_field
-        ghosts_v = self.output_field.topology.ghosts()
-        ghosts_w = self.input_field.topology.ghosts()
-        self.output_field.data[0], self.output_field.data[1],\
-            self.output_field.data[2] = \
-            fftw2py.solve_poisson_3d(vortFilter[0], vortFilter[1],
-                                     vortFilter[2], self.output_field.data[0],
-                                     self.output_field.data[1],
-                                     self.output_field.data[2],
-                                     ghosts_w, ghosts_v)
-
-    def _solve_3d_proj_multires(self, simu):
-        """3D, multiresolution, with projection
-        """
-        if self.do_projection(simu):
-            self._project()
-        self._solve_3d_multires()
-
-    def _solve_3d_proj(self, simu):
-        """3D, with projection
-        """
-        if self.do_projection(simu):
-            self._project()
-        self._solve_3d()
-
-    def _solve_3d(self,simu=None):
-        """Basic solve
-        """
-        # Solves Poisson equation using usual input_field
-        ghosts_v = self.output_field.topology.ghosts()
-        ghosts_w = self.input_field.topology.ghosts()
-        self.output_field.data[0], self.output_field.data[1],\
-            self.output_field.data[2] =\
-            fftw2py.solve_poisson_3d(self.input_field.data[0],
-                                     self.input_field.data[1],
-                                     self.input_field.data[2],
-                                     self.output_field.data[0],
-                                     self.output_field.data[1],
-                                     self.output_field.data[2],
-                                     ghosts_w, ghosts_v)
-
-    def _solve_and_correct(self, simu):
-        """Solve Poisson problem and apply correction on velocity.
-        """
-        self._solve(simu.current_iteration)
-        self.correction.apply(simu)
-
-    def _solve_3d_scalar_fd(self, simu=None):
-        """solve poisson-pressure like problem
-        input = 3D vector field
-        output = 3D scalar field
-        """
-        # Compute rhs = f(input) inplace
-        # --> output == rhs
-        # Call fftw filter
-        # !!! pressure3d use the same arg for input and output
-        # ---> input_field will be overwritten
-        ghosts = self.output_field.topology.ghosts()
-        self.output_field.data[0] = fftw2py.pressure_3d(
-            self.input_field.data[0], ghosts)
-
-    def _solve_3d_scalar(self, simu=None):
-        """solve poisson-pressure like problem
-        input = 3D vector field
-        output = 3D scalar field
-        """
-        # # Call fftw filter
-        # self._output_field.data[0] = fftw2py.solve_poisson_3d_pressure(
-        #     self._input_field.data[0],
-        #     self._input_field.data[1],
-        #     self._input_field.data[2])
-        pass
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        self.solve(simulation)
-
-    def finalize(self):
-        """
-        Clean memory (fftw plans and so on)
-        """
-        pass
-        #fftw2py.clean_fftw_solver(self.output_field.dimension)
diff --git a/hysop/old/operator.old/discrete/profiles.py b/hysop/old/operator.old/discrete/profiles.py
deleted file mode 100644
index f34e4cd6ebb612577652fbe44e879d7e156a59ff..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/profiles.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file profiles.py
-Compute and print velo/vorti profiles
-"""
-from hysop.constants import debug
-from hysop.tools.profiler import profile
-from hysop.tools.numpywrappers import npw
-import scitools.filetable as ft
-import numpy as np
-from hysop.operator.discrete.discrete import DiscreteOperator
-
-
-class Profiles(DiscreteOperator):
-    """
-    Compute and print velo/vorti profiles at a given position.
-    """
-    def __init__(self, velocity, vorticity, prof_coords, 
-                 direction, beginMeanComput, **kwds):
-        """
-        Constructor.
-        @param velocity : discretization of the velocity field
-        @param vorticity : discretization of the vorticity field
-        @param direction : profile direction (0, 1 or 2)
-        @param beginMeanComput : time at which the computation of mean profile must begin
-        @param prof_coords : X and Y coordinates of the profile 
-        warning : the Z-coordinate is supposed to be 0 for each profile !
-        """
-        ## velocity field
-        self.velocity = velocity
-        ## vorticity field
-        self.vorticity = vorticity
-        ## X and Y coordinates of the profile
-        self.prof_coords =  prof_coords
-        ## profile direction (0, 1 or 2)
-        self.direction = direction
-        ## time at which the computation of mean profile must begin
-        self.beginMeanComput = beginMeanComput
-
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Profiles, self).__init__(variables=[velocity, vorticity],
-                                       **kwds)
-        topo_v = self.velocity.topology
-        self.shape_v = self.velocity.data[0][topo_v.mesh.compute_index].shape
-        self.space_step = topo_v.mesh.space_step
-        self.length = topo_v.domain.length
-        self.origin = topo_v.domain.origin
-        self.coords = topo_v.mesh.coords
-        self.nb_iter = 0
-        ## Mean quantities (meanVelNorm, meanVortNorm, meanVelX, 
-        ## meanVelY, meanVelY, meanVortX, meanVortY, meanVortZ)
-        self.mean_qtities = None
-        if direction==0:
-            self.mean_qtities = [npw.zeros(self.shape_v[0]) for d in xrange(8)]
-        elif direction==1:
-            self.mean_qtities = [npw.zeros(self.shape_v[1]) for d in xrange(8)]
-        else:
-            raise ValueError("Only profiles in the X or Y direction.")
-
-        # Is current processor working ? (Is 0 in z-coords ?)
-        self.is_rk_computing = False
-        if (0.0 in self.coords[2]):
-            self.is_rk_computing = True
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-
-        v_ind = self.velocity.topology.mesh.compute_index
-        w_ind = self.vorticity.topology.mesh.compute_index
-        shape_v = self.velocity.data[0][v_ind].shape
-        shape_w = self.velocity.data[0][w_ind].shape
-        # setup for rwork, iwork is useless.
-        if rwork is None:
-            # ---  Local allocation ---
-            if shape_v == shape_w:
-                self._rwork = [npw.zeros(shape_v)]
-            else:
-                self._rwork = [npw.zeros(shape_v), npw.zeros(shape_w)]
-        else:
-            assert isinstance(rwork, list), 'rwork must be a list.'
-            # --- External rwork ---
-            self._rwork = rwork
-            if shape_v == shape_w:
-                assert len(self._rwork) == 1
-                assert self._rwork[0].shape == shape_v
-            else:
-                assert len(self._rwork) == 2
-                assert self._rwork[0].shape == shape_v
-                assert self._rwork[1].shape == shape_w
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing simulation value for computation."
-
-        time = simulation.time
-        ite = simulation.current_iteration
-        filename = self._writer.filename  + '_ite' + format(ite)
-
-        if time >= self.beginMeanComput and self.is_rk_computing :
-            self.nb_iter += 1
-            vd = self.velocity.data
-            vortd = self.vorticity.data
-            nbc = self.velocity.nb_components
-            tab = [self.prof_coords[0],  self.prof_coords[1], 0.0]
-
-            ind = []
-            for d in xrange(nbc):
-                cond = np.where(abs(self.coords[d] - tab[d])
-                                < (self.space_step[d] * 0.5))
-                if cond[0].size > 0:
-                    ind.append(cond[d][0])
-                else:
-                    raise ValueError("Wrong set of coordinates.")
-
-            if self.direction==0 :
-                for i in xrange (self.shape_v[0]):
-                    self.mean_qtities[0][i] += np.sqrt(vd[0][i,ind[1],ind[2]] ** 2 +
-                                                       vd[1][i,ind[1],ind[2]] ** 2 +
-                                                       vd[2][i,ind[1],ind[2]] ** 2)
-                    self.mean_qtities[1][i] += np.sqrt(vortd[0][i,ind[1],ind[2]] ** 2 +
-                                                       vortd[1][i,ind[1],ind[2]] ** 2 +
-                                                       vortd[2][i,ind[1],ind[2]] ** 2)
-                    self.mean_qtities[2][i] += vd[0][i,ind[1],ind[2]]
-                    self.mean_qtities[3][i] += vd[1][i,ind[1],ind[2]]
-                    self.mean_qtities[4][i] += vd[2][i,ind[1],ind[2]]
-                    self.mean_qtities[5][i] += vortd[0][i,ind[1],ind[2]]
-                    self.mean_qtities[6][i] += vortd[1][i,ind[1],ind[2]]
-                    self.mean_qtities[7][i] += vortd[2][i,ind[1],ind[2]]
-
-            elif self.direction==1 :
-                for j in xrange (self.shape_v[1]):
-                    self.mean_qtities[0][j] += np.sqrt(vd[0][ind[0],j,ind[2]] ** 2 +
-                                                       vd[1][ind[0],j,ind[2]] ** 2 +
-                                                       vd[2][ind[0],j,ind[2]] ** 2)
-                    self.mean_qtities[1][j] += np.sqrt(vortd[0][ind[0],j,ind[2]] ** 2 +
-                                                       vortd[1][ind[0],j,ind[2]] ** 2 +
-                                                       vortd[2][ind[0],j,ind[2]] ** 2)
-                    self.mean_qtities[2][j] += vd[0][ind[0],j,ind[2]]
-                    self.mean_qtities[3][j] += vd[1][ind[0],j,ind[2]]
-                    self.mean_qtities[4][j] += vd[2][ind[0],j,ind[2]]
-                    self.mean_qtities[5][j] += vortd[0][ind[0],j,ind[2]]
-                    self.mean_qtities[6][j] += vortd[1][ind[0],j,ind[2]]
-                    self.mean_qtities[7][j] += vortd[2][ind[0],j,ind[2]]
-
-            else:
-                raise ValueError("Only profiles in the X or Y direction.")
-
-            if self._writer is not None and self._writer.do_write(ite) :
-                f = open(filename, 'w')
-                if self.direction==0 :
-                    for i in xrange (self.shape_v[0]):
-                        self._writer.buffer[0, 0] = self.coords[0][i,0,0]
-                        self._writer.buffer[0, 1] = self.mean_qtities[0][i] / self.nb_iter
-                        self._writer.buffer[0, 2] = self.mean_qtities[1][i] / self.nb_iter
-                        self._writer.buffer[0, 3] = self.mean_qtities[2][i] / self.nb_iter
-                        self._writer.buffer[0, 4] = self.mean_qtities[3][i] / self.nb_iter
-                        self._writer.buffer[0, 5] = self.mean_qtities[4][i] / self.nb_iter
-                        self._writer.buffer[0, 6] = self.mean_qtities[5][i] / self.nb_iter
-                        self._writer.buffer[0, 7] = self.mean_qtities[6][i] / self.nb_iter
-                        self._writer.buffer[0, 8] = self.mean_qtities[7][i] / self.nb_iter
-                        ft.write(f, self._writer.buffer)
-                elif self.direction==1 :
-                    for j in xrange (self.shape_v[1]):
-                        self._writer.buffer[0, 0] = self.coords[1][0,j,0]
-                        self._writer.buffer[0, 1] = self.mean_qtities[0][j] / self.nb_iter
-                        self._writer.buffer[0, 2] = self.mean_qtities[1][j] / self.nb_iter
-                        self._writer.buffer[0, 3] = self.mean_qtities[2][j] / self.nb_iter
-                        self._writer.buffer[0, 4] = self.mean_qtities[3][j] / self.nb_iter
-                        self._writer.buffer[0, 5] = self.mean_qtities[4][j] / self.nb_iter
-                        self._writer.buffer[0, 6] = self.mean_qtities[5][j] / self.nb_iter
-                        self._writer.buffer[0, 7] = self.mean_qtities[6][j] / self.nb_iter
-                        self._writer.buffer[0, 8] = self.mean_qtities[7][j] / self.nb_iter
-                        ft.write(f, self._writer.buffer)
-                else :
-                    raise ValueError("Only profiles in the X or Y direction.")
-                f.close()
-
-
-
diff --git a/hysop/old/operator.old/discrete/reprojection.py b/hysop/old/operator.old/discrete/reprojection.py
deleted file mode 100644
index 100c1b1985d0d0c86cfdd5847463a26c9f4320b4..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/reprojection.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Compute reprojection criterion and divergence maximum
-"""
-import numpy as np
-from hysop.constants import debug
-from hysop.methods import SpaceDiscretization
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.numerics.finite_differences import FDC4
-from hysop.numerics.differential_operations import GradV
-from hysop.tools.numpywrappers import npw
-from hysop.numerics.update_ghosts import UpdateGhosts
-from hysop.core.mpi import MPI
-from hysop.tools.profiler import profile
-
-
-class Reprojection(DiscreteOperator):
-    """Update the reprojection frequency, according to the current
-    value of the vorticity field.
-    """
-    def __init__(self, vorticity, threshold, frequency, **kwds):
-        """
-
-        Parameters
-        ----------
-        vorticity : :class:`~hysop.fields.discrete_field.DiscreteField
-            vorticity field
-        threshold : double
-            update frequency when criterion is greater than this threshold
-        frequency : double
-            frequency of execution of the reprojection
-        """
-        # ensure that space discretisation method is set
-        if 'method' in kwds and kwds['method'] is None:
-            kwds['method'] = {SpaceDiscretization: FDC4}
-
-        # vorticity field
-        self.vorticity = vorticity
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Reprojection, self).__init__(variables=[vorticity], **kwds)
-        # Frequency for reprojection
-        self.frequency = frequency
-        # The initial value will be used as default during
-        # simulation
-        self._default_frequency = frequency
-        # constant threshold defining the reprojection criterion :
-        # if the latter is greater than this constant, then a reprojection
-        # is needed
-        self.threshold = threshold
-        self._counter = 0
-        # Numerical methods for space discretization
-        assert SpaceDiscretization in self.method
-        self.method = self.method[SpaceDiscretization]
-        self.input = [vorticity]
-        self.output = []
-        topo = self.vorticity.topology
-        # prepare ghost points synchro for vorticity
-        self._synchronize = UpdateGhosts(topo, self.vorticity.nb_components)
-        # grad function
-        self._function = GradV(topo=topo, method=self.method)
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-
-        memshape = self.vorticity.data[0].shape
-        worklength = self.vorticity.nb_components ** 2
-
-        # setup for rwork, iwork is useless.
-        if rwork is None:
-            # ---  Local allocation ---
-            self._rwork = []
-            for _ in xrange(worklength):
-                self._rwork.append(npw.zeros(memshape))
-        else:
-            assert isinstance(rwork, list), 'rwork must be a list.'
-            # --- External rwork ---
-            self._rwork = rwork
-            msg = 'Bad shape/length external work. Use get_work_properties'
-            msg += ' function to find the right properties for work arrays.'
-            assert len(self._rwork) == worklength, msg
-            for wk in self._rwork:
-                assert wk.shape == memshape
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            'Simulation parameter is missing.'
-        ite = simulation.current_iteration
-
-        # Reset reprojection frequency to default
-        self.frequency = self._default_frequency
-
-        # Synchronize ghost points of vorticity
-        self._synchronize(self.vorticity.data)
-        # gradU computation
-        self._rwork = self._function(self.vorticity.data, self._rwork)
-        nb_components = self.vorticity.nb_components
-        # maxima of vorticity divergence (abs)
-        d1 = np.max(abs(sum([(self._rwork[(nb_components + 1) * i])
-                             for i in xrange(nb_components)])))
-        # maxima of partial derivatives of vorticity
-        d2 = 0.0
-        for grad_n in self._rwork:
-            d2 = max(d2, np.max(abs(grad_n)))
-
-        # computation of the reprojection criterion and mpi-reduction
-        criterion = d1 / d2
-        criterion = self.vorticity.topology.comm.allreduce(
-            criterion, op=MPI.MAX)
-        # is reprojection of vorticity needed for the next time step ?
-        if criterion > self.threshold:
-            self.frequency = 1
-
-        # update counter
-        if self.do_projection(ite):
-            self._counter += 1
-
-        # Print results, if required
-        # Remark : writer buffer is (pointer) connected to diagnostics
-        if self._writer is not None and self._writer.do_write(ite):
-            self._writer.buffer[0, 0] = simulation.time
-            self._writer.buffer[0, 1] = d1
-            self._writer.buffer[0, 2] = d2
-            self._writer.buffer[0, 3] = self._counter
-            self._writer.write()
-
-    def do_projection(self, ite):
-        """True if projection must be done
-        """
-        return ite % self.frequency == 0
diff --git a/hysop/old/operator.old/discrete/residual.py b/hysop/old/operator.old/discrete/residual.py
deleted file mode 100644
index c79cbf84ffe54519925e1622cc80cb68d8f25a62..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/residual.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file residual.py
-Compute and print the time evolution of the residual
-"""
-from hysop.constants import debug
-from hysop.tools.profiler import profile
-from hysop.tools.numpywrappers import npw
-import scitools.filetable as ft
-import numpy as np
-from hysop.operator.discrete.discrete import DiscreteOperator
-
-
-class Residual(DiscreteOperator):
-    """
-        Compute and print the residual as a function of time
-    """
-    def __init__(self, vorticity, **kwds):
-        """
-        Constructor.
-        @param vorticity field
-        """
-        ## vorticity field
-        self.vorticity = vorticity
-
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Residual, self).__init__(variables=[vorticity], **kwds)
-        topo_w = self.vorticity.topology
-        self.shape_w = self.vorticity.data[0][topo_w.mesh.compute_index].shape
-        self.space_step = topo_w.mesh.space_step
-        self.length = topo_w.domain.length
-        self.origin = topo_w.domain.origin
-        self.coords = topo_w.mesh.coords
-        self.nb_iter = 0
-        ## Global residual
-        self.residual = 0.0
-        # Time stem of the previous iteration
-        self._old_dt = None
-        # Define array to store vorticity field at previous iteration
-        self._vortPrev = [npw.zeros_like(d) for d in self.vorticity.data]
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-
-        w_ind = self.vorticity.topology.mesh.compute_index
-        shape_w = self.vorticity.data[0][w_ind].shape
-        # setup for rwork, iwork is useless.
-        if rwork is None:
-            # ---  Local allocation ---
-            self._rwork = [npw.zeros(shape_w)]
-        else:
-            assert isinstance(rwork, list), 'rwork must be a list.'
-            # --- External rwork ---
-            self._rwork = rwork
-            assert len(self._rwork) == 1
-            assert self._rwork[0].shape == shape_w
-
-    def get_work_properties(self):
-
-        w_ind = self.vorticity.topology.mesh.compute_index
-        shape_w = self.vorticity.data[0][w_ind].shape
-        return {'rwork': [shape_w], 'iwork': None}
-
-    def initialize_vortPrev(self):
-        
-        w_ind = self.vorticity.topology.mesh.compute_index
-        for d in xrange(self.vorticity.dimension):
-            self._vortPrev[d][w_ind] = self.vorticity[d][w_ind]
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing simulation value for computation."
-
-        time = simulation.time
-        ite = simulation.current_iteration
-        dt = simulation.time_step
-        if self._old_dt is None:
-            self._old_dt = dt
-        
-        filename = self._writer.filename
-        
-        # Compute on local proc (w^(n+1)-w^n) ** 2
-        local_res = 0.
-        # get the list of computation points (no ghosts)
-        wd = self.vorticity
-        nbc = wd.nb_components
-        w_ind = self.vorticity.topology.mesh.compute_index
-        for i in xrange(nbc):
-            self._rwork[0][...] = (wd[i][w_ind] -
-                                   self._vortPrev[i][w_ind]) ** 2
-            local_res += npw.real_sum(self._rwork[0])
-
-        # --- Reduce local_res over all proc ---
-        sendbuff = npw.zeros((1))
-        recvbuff = npw.zeros((1))
-        sendbuff[:] = [local_res]
-        #
-        self.vorticity.topology.comm.Allreduce(sendbuff, recvbuff)
-        
-        # Update global residual
-        self.residual = np.sqrt(recvbuff[0])
-
-        # Print results, if required
-        if self._writer is not None and self._writer.do_write(ite) :
-            self._writer.buffer[0, 0] = time
-            self._writer.buffer[0, 1] = ite
-            self._writer.buffer[0, 2] = self.residual
-            self._writer.write()
-
-        # update vort(n-1) for next iteration
-        for i in xrange(nbc):
-            self._vortPrev[i][w_ind] = self.vorticity.data[i][w_ind]
-        self._old_dt = dt
-
-
-
diff --git a/hysop/old/operator.old/discrete/scales_advection.py b/hysop/old/operator.old/discrete/scales_advection.py
deleted file mode 100644
index 1bb23d04b3c94f2f615c4df053c02efd0e6ac290..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/scales_advection.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Interface to scales solver (fortran)
-
-"""
-try:
-    from hysop.f2hysop import scales2py
-except ImportError:
-    msgE = 'scales package not available for your hysop install.'
-    msgE += 'Try to recompile with WITH_SCALES=ON'
-    raise ImportError(msgE)
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.methods import MultiScale
-from hysop.constants import debug
-import math
-ceil = math.ceil
-
-
-class ScalesAdvection(DiscreteOperator):
-    """Advection process, based on scales library.
-    """
-
-    _scales_remesh_kernels = ['p_M4', 'p_M8', 'p_44', 'p_64',
-                              'p_66', 'p_84', 'p_M6']
-    """Kernels available in scales"""
-    _scales_remesh_corrected_kernels = ['p_O2', 'p_O4', 'p_L2']
-    """Corrected kernels available in scales"""
-
-    @debug
-    def __init__(self, velocity, advected_fields, **kwds):
-        """
-        Parameters
-        ----------
-        velocity : :class:`~hysop.fields.discrete_field.DiscreteField`
-            advection velocity (discretized)
-        advected_fields : list of :class:`~hysop.fields.discrete_field.DiscreteField`
-            discrete field(s) to be advected
-        direction : int
-            advection direction
-        kwds : extra parameters for base class
-        """
-        msg = 'Scales advection init : variables parameter is useless.'
-        msg += 'See user guide for details on the proper'
-        msg += ' way to build the operator.'
-        assert 'variables' not in kwds, msg
-        # Advection velocity
-        self.velocity = velocity
-        variables = [self.velocity]
-        if isinstance(advected_fields, list):
-            self.advected_fields = advected_fields
-        else:
-            self.advected_fields = [advected_fields]
-        variables += self.advected_fields
-
-        msg = 'Scales advection init : method parameter is compulsory.'
-        assert 'method' in kwds, msg
-
-        super(ScalesAdvection, self).__init__(variables=variables, **kwds)
-
-        self.input = self.variables
-        self.output = self.advected_fields
-
-        # Connection to the proper scales functions.
-        self._scales_func = []
-        is_multi_scale = self.method[MultiScale] is not None
-        for adF in self.advected_fields:
-            if adF.nb_components == 3:
-                if is_multi_scale:
-                    # 3D interpolation of the velocity before advection
-                    self._scales_func.append(
-                        scales2py.solve_advection_inter_basic_vect)
-                    # Other interpolation only 2D interpolation first and
-                    # 1D interpolations before advections in each direction
-                    # (slower than basic): solve_advection_inter
-                else:
-                    self._scales_func.append(scales2py.solve_advection_vect)
-            else:
-                if is_multi_scale:
-                    self._scales_func.append(
-                        scales2py.solve_advection_inter_basic)
-                else:
-                    self._scales_func.append(scales2py.solve_advection)
-
-    @debug
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing simulation value for computation."
-
-        dt = simulation.time_step
-        # Call scales advection
-        for adF, fun in zip(self.advected_fields, self._scales_func):
-            adF = fun(dt, self.velocity.data[0],
-                      self.velocity.data[1],
-                      self.velocity.data[2],
-                      *adF)
-
-    def finalize(self):
-        """
-        \todo check memory deallocation in scales???
-        """
-        DiscreteOperator.finalize(self)
diff --git a/hysop/old/operator.old/discrete/spectrum.py b/hysop/old/operator.old/discrete/spectrum.py
deleted file mode 100755
index a448d6dc7728fa85d299b5aa1bd41375afc85fa5..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/spectrum.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Discrete Spectrum operator using FFTW
-"""
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.constants import debug, np, hysop.core.mpi_REAL
-from hysop.tools.profiler import profile
-from hysop.tools.numpywrappers import npw
-from hysop.core.mpi import MPI
-
-try:
-    from hysop.f2hysop import fftw2py
-except ImportError:
-    msgE = 'fftw package not available for your hysop install.'
-    msgE += 'Try to recompile with WITH_FFTW=ON'
-    raise ImportError(msgE)
-
-
-class FFTSpectrum(DiscreteOperator):
-    """Discretized Spectrum operator based on FFTW.
-    """
-    @debug
-    def __init__(self, field, **kwds):
-        """
-
-        Parameters
-        ----------
-        field: :class:`~hysop.fields.discrete_field.DiscreteField`
-            the input field for which spectrum will be computed
-        """
-        # Discretization of the input field
-        self.field = field
-        msg = 'Spectrum error: implemented only for 3D problems.'
-        assert self.field.dimension == 3, msg
-        if self.field.nb_components > 1:
-            raise AttributeError("Vector case not yet implemented.")
-        # Base class initialisation
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(FFTSpectrum, self).__init__(variables=[field],
-                                          **kwds)
-        self.input = [self.field]
-        l = np.min(self.field.topology.mesh.discretization.resolution)
-        self._tmp = npw.zeros(((l - 1) / 2, ))
-        self._kx = npw.zeros(((l - 1) / 2, ))
-        self.res = npw.zeros(((l - 1) / 2, ))
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing simulation parameter."
-        ite = simulation.current_iteration
-        ghosts = self.field.topology.ghosts()
-        fftw2py.spectrum_3d(self.field.data[0],
-                            self._tmp, self._kx,
-                            ghosts, np.min(self.domain.length))
-        if self.field.topology.size == 1:
-            self.res[...] = self._tmp
-        else:
-            self.field.topology.comm.Reduce(
-                [self._tmp, self.res.shape[0], hysop.core.mpi_REAL],
-                [self.res, self.res.shape[0], hysop.core.mpi_REAL],
-                op=MPI.SUM, root=0)
-
-        if self._writer is not None and self._writer.do_write(ite):
-            nbc = self.res.shape[0]
-            self._writer.buffer[0, 0:nbc] = self._kx
-            self._writer.buffer[0, nbc:] = self.res
-            self._writer.write()
-
-    def finalize(self):
-        """Clean memory (fftw plans and so on)
-        """
-        pass
diff --git a/hysop/old/operator.old/discrete/stretching.py b/hysop/old/operator.old/discrete/stretching.py
deleted file mode 100755
index e7b30d21c70426fb67d4d977a282143269d7d20e..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/stretching.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Discretization of the stretching operator.
-
-Formulations:
-
-* :class:`~hysop.operator.discrete.stretching.Conservative`
-* :class:`~hysop.operator.discrete.stretching.GradUW`
-
-"""
-
-from hysop.constants import debug, WITH_GUESS
-from hysop.methods import TimeIntegrator, SpaceDiscretization
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.numerics.differential_operations import GradVxW, DivWV
-from hysop.tools.numpywrappers import npw
-from hysop.numerics.update_ghosts import UpdateGhosts
-from hysop.tools.profiler import profile
-from hysop.tools.misc import WorkSpaceTools
-from abc import ABCMeta, abstractmethod
-import math
-import numpy as np
-ceil = math.ceil
-
-
-class Stretching(DiscreteOperator):
-    """Abstract interface to stretching discrete operators.
-    Check user guide to see the available formulations.
-    """
-
-    __metaclass__ = ABCMeta
-
-    @debug
-    def __init__(self, velocity, vorticity, formulation,
-                 rhs, extra_ghosts_fields=None, **kwds):
-        """Abstract interface for stretching operator
-
-        Parameters
-        ----------
-        velocity, vorticity : :class:`~hysop.fields.discrete_field.DiscreteField`
-        formulation : one of the discrete stretching classes.
-        rhs : python function
-            right-hand side for the time integrator
-        extra_ghosts_fields : int, optional
-            number of additional numpy arrays to be synchronized (mpi),
-            default = None, i.e. synchro of vorticity and velocity components.
-        **kwds : extra parameters for base class
-
-        """
-        # velocity discrete field
-        self.velocity = velocity
-        # vorticity discrete field
-        self.vorticity = vorticity
-        # Formulation for stretching (divWV or GradVxW)
-        self.formulation = formulation
-
-        if 'method' not in kwds:
-            import hysop.default_methods as default
-            kwds['method'] = default.STRETCHING
-        # Work vector used by time-integrator
-        self._ti_work = None
-        # Work vector used by numerical diff operator.
-        self._str_work = None
-        # all works vectors are set in base class init,
-        # thanks to _set_work_arrays function.
-        super(Stretching, self).__init__(variables=[self.velocity,
-                                                    self.vorticity], **kwds)
-
-        self.input = self.variables
-        self.output = [self.vorticity]
-        # \todo multiresolution case
-        assert self.velocity.topology.mesh == self.vorticity.topology.mesh,\
-            'Multiresolution case not yet implemented.'
-
-        # Number of components of the operator (result)
-        self.nb_components = 3  # Stretching only in 3D and for vector fields.
-
-        # prepare ghost points synchro for velocity and vorticity
-        nb_ghosts_fields = self.velocity.nb_components + \
-            self.vorticity.nb_components
-        if extra_ghosts_fields is not None:
-            nb_ghosts_fields += extra_ghosts_fields
-        self._synchronize = UpdateGhosts(self.velocity.topology,
-                                         nb_ghosts_fields)
-
-        # A function to compute the gradient of the velocity.
-        # Work vector is provided in input.
-        self.strFunc = \
-            self.formulation(topo=self.velocity.topology,
-                             work=self._str_work,
-                             method=self.method[SpaceDiscretization])
-        # Time integrator
-        self.timeIntegrator = \
-            self.method[TimeIntegrator](self.nb_components,
-                                        self.velocity.topology,
-                                        rwork=self._ti_work,
-                                        f=rhs,
-                                        optim=WITH_GUESS)
-        # In self._integrate ti_work is used as in/out parameters in the rhs,
-        # of the time-integrator so it needs a reshape.
-        # Warning : this is done without any memory realloc.
-        for i in xrange(len(self._ti_work)):
-            self._ti_work[i] = self.timeIntegrator.rwork[i]
-
-    def _set_work_arrays(self, rwork=None, iwork=None):
-
-        ti = self.method[TimeIntegrator]
-        topo = self.velocity.topology
-        nbc = self.velocity.nb_components
-        # properties of working arrays required for time-integrator
-        wk_prop = ti.get_work_properties(nbc, topo)['rwork']
-        lenti = len(wk_prop)
-        # properties of working arrays required for differential operations,
-        wk_prop += self.formulation.get_work_properties(topo)['rwork']
-        subshape = np.prod(topo.mesh.local_resolution)
-        self._rwork = WorkSpaceTools.check_work_array(len(wk_prop),
-                                                      subshape, rwork)
-        self._ti_work = self._rwork[:lenti]
-        self._str_work = self._rwork[lenti:]
-
-    @profile
-    def update_ghosts(self):
-        """Ghost points synchronization
-        """
-        self._synchronize(self.velocity.data + self.vorticity.data)
-
-    def apply(self, simulation=None):
-        """Solve stretching equation and update vorticity
-        """
-        assert simulation is not None, \
-            "Missing simulation value for computation."
-
-        # time step
-        dt = simulation.time_step
-        # current time
-        t = simulation.time
-
-        # Synchronize ghost points of velocity
-        self._synchronize(self.velocity.data + self.vorticity.data)
-        self._compute(dt, t)
-
-    @abstractmethod
-    def _compute(self, dt, t):
-        """Abstract interface to local stretching resolution
-        """
-
-    @profile
-    def _integrate(self, dt, t):
-        """Apply time integrator
-        """
-        # Init workspace with a first evaluation of the
-        # rhs of the integrator
-        self._ti_work[:self.nb_components] = \
-            self.timeIntegrator.f(t, self.vorticity.data,
-                                  self._ti_work[:self.nb_components])
-        # perform integration and save result in-place
-        self.vorticity.data = self.timeIntegrator(t, self.vorticity.data, dt,
-                                                  result=self.vorticity.data)
-
-
-class Conservative(Stretching):
-    """Conservative formulation
-    """
-
-    @profile
-    def __init__(self, **kwds):
-
-        # Right-hand side for time integration
-        def rhs(t, y, result):
-            """rhs used in time integrator
-            """
-            return self.strFunc(y, self.velocity.data, result)
-
-        super(Conservative, self).__init__(
-            formulation=DivWV, rhs=rhs, **kwds)
-
-    @profile
-    def _compute(self, dt, t):
-        # No subcycling for this formulation
-        self._integrate(dt, t)
-
-
-class GradUW(Stretching):
-    """GradUW formulation
-    """
-
-    def __init__(self, **kwds):
-
-        def rhs(t, y, result):
-            """rhs used in time integrator
-            """
-            result, self.diagnostics =\
-                self.strFunc(self.velocity.data, y, result, self.diagnostics)
-            return result
-
-        super(GradUW, self).__init__(formulation=GradVxW, rhs=rhs, **kwds)
-
-        # stability constant
-        # Depends on time integration method
-        self.cststretch = self.method[TimeIntegrator].stability_coeff()
-        # a vector to save diagnostics computed from GradVxW (max div ...)
-        self.diagnostics = npw.zeros(2)
-        self.diagnostics[1] = self.cststretch
-
-    @debug
-    @profile
-    def _compute(self, dt, t):
-        # Compute the number of required subcycles
-        ndt, subdt = self._check_stability(dt)
-        assert sum(subdt) == dt
-
-        for i in xrange(ndt):
-            self._integrate(subdt[i], t)
-
-    def _check_stability(self, dt):
-        """Computes a stability condition depending on some
-        diagnostics (from GradVxW)
-
-        :param dt: current time step
-
-        Returns
-        --------
-        nb_cylces : int
-            the number of required subcycles and
-        subdt : array of float
-            the subcycles time-step.
-        """
-        dt_stab = min(dt, self.cststretch / self.diagnostics[1])
-        nb_cycles = int(ceil(dt / dt_stab))
-        subdt = npw.zeros((nb_cycles))
-        subdt[:] = dt_stab
-        subdt[-1] = dt - (nb_cycles - 1) * dt_stab
-        return nb_cycles, subdt
-
-
-class StretchingLinearized(Stretching):
-    """Conservative formulation of the linearized stretching.
-    """
-
-    def __init__(self, vorticity_BF, usual_op, **kwds):
-        # vorticity of the base flow (steady solution)
-        self.vorticity_BF = vorticity_BF
-        # prepare ghost points synchro for vorticity_BF
-        self._synchronize_vort_BF = \
-            UpdateGhosts(self.vorticity_BF.topology,
-                         self.vorticity_BF.nb_components)
-        self.usual_op = usual_op
-        # boolean used  to switch between two rhs forms:
-        # either rhs(t, y) = div(y:u) (if true)
-        # or rhs(t,y) = div(y:w_bf) (if false)
-        self._divwu = True
-
-        def rhs(t, y, result):
-            """rhs used in time integrator
-            """
-            if self._divwu:
-                result = self.strFunc(y, self.velocity.data, result)
-            else:
-                result = self.strFunc(y, self.vorticity_BF.data, result)
-            return result
-
-        super(StretchingLinearized, self).__init__(
-            formulation=DivWV, rhs=rhs,
-            extra_ghosts_fields=self.vorticity_BF.nb_components,
-            **kwds)
-
-    def _compute(self, dt, t):
-        # - Call time integrator (1st term over 3) -
-        # Init workspace with a first evaluation of the div(wb:u') term in the
-        # rhs of the integrator
-        self._divwu = True
-        self._ti_work[:self.nb_components] = \
-            self.timeIntegrator.f(t, self.vorticity_BF.data,
-                                  self._ti_work[:self.nb_components])
-        # perform integration and save result in-place
-        self.vorticity.data = self.timeIntegrator(t, self.vorticity.data, dt,
-                                                  result=self.vorticity.data)
-        # - Call time integrator (2nd term over 3) -
-        # Init workspace with a first evaluation of the div(u':wb) term in the
-        # rhs of the integrator
-        self._divwu = False
-        self._ti_work[:self.nb_components] = \
-            self.timeIntegrator.f(t, self.velocity.data,
-                                  self._ti_work[:self.nb_components])
-        # perform integration and save result in-place
-        self.vorticity.data = self.timeIntegrator(t, self.vorticity.data, dt,
-                                                  result=self.vorticity.data)
-
-    def apply(self, simulation=None):
-        assert simulation is not None, \
-            "Missing simulation value for computation."
-
-        # time step
-        dt = simulation.time_step
-        # current time
-        t = simulation.time
-
-        # Synchronize ghost points
-        self._synchronize(self.velocity.data + self.vorticity.data +
-                          self.vorticity_BF.data)
-        # Compute the 2 first "stretching" terms (div(wb:u') and div(u':wb))
-        # and update vorticity for each of them
-        self._compute(dt, t)
-        # Compute the 3rd stretching term (div(w':ub)) and update vorticity
-        self.usual_op.apply(simulation)
diff --git a/hysop/old/operator.old/discrete/velocity_correction.py b/hysop/old/operator.old/discrete/velocity_correction.py
deleted file mode 100755
index 927eebc2b683253a8683b28a440e772883bada8c..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/discrete/velocity_correction.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Operator used to shift velocity value
- to fit with a required input flowrate.
-
-Check details in :ref:`velocity_correction` in HySoP user guide.
-
-
-"""
-from hysop.constants import debug
-from hysop.operator.discrete.discrete import DiscreteOperator
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.tools.profiler import profile
-from hysop.tools.numpywrappers import npw
-from hysop.constants import XDIR, YDIR, ZDIR
-
-
-class VelocityCorrection_D(DiscreteOperator):
-    """The velocity field is corrected after solving the
-    Poisson equation. For more details about calculations,
-    see :ref:`velocity_correction` in HySoP user guide.
-    """
-
-    @debug
-    def __init__(self, velocity, vorticity, req_flowrate, cb, **kwds):
-        """Update velocity field (solution of Poisson equation)
-        in order to prescribe proper mean flow and ensure
-        the desired inlet flowrate.
-
-        Parameters
-        ----------
-        velocity : :class:`~hysop.fields.discrete_field.DiscreteField`
-            in/out velocity vector field.
-        vorticity : :class:`~hysop.fields.discrete_field.DiscreteField`
-            input vorticity vector field.
-        req_flowrate : a
-         :class:`~hysop.fields.variable_parameter.VariableParameter`
-            the desired inlet flowrate value
-        cb : :class:`~hysop.domain.control_box.ControlBox`
-            volume of control used to compute a reference for correction.
-        kwds : base class parameters
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(VelocityCorrection_D, self).__init__(
-            variables=[velocity, vorticity], **kwds)
-        # velocity discrete field
-        self.velocity = velocity
-        # vorticity discrete field
-        self.vorticity = vorticity
-        # If 2D problem, vorticity must be a scalar
-        if self._dim == 2:
-            assert self.vorticity.nb_components == 1
-            self._update_velocity_components = self._update_velocity_2d
-        elif self._dim == 3:
-            self._update_velocity_components = self._update_velocity_3d
-
-        assert (self._dim >= 2),\
-            "Wrong problem dimension: only 2D and 3D cases are implemented."
-
-        self.input = self.variables
-        self.output = [self.velocity]
-        # A reference topology
-        self.topo = self.velocity.topology
-        # Volume of control
-        self.cb = cb
-        self.cb.discretize(self.topo)
-        # A reference surface, i.e. input surface for flow in x direction
-        self._in_surf = cb.surf[XDIR]
-
-        sdirs = self._in_surf.t_dir
-        # Compute 1./ds and 1./dv ...
-        cb_length = self.cb.real_length[self.topo]
-        self._inv_ds = 1. / npw.prod(cb_length[sdirs])
-        self._inv_dvol = 1. / npw.prod(cb_length)
-        # Expected value for the flow rate through self.surfRef
-        self.req_flowrate = req_flowrate
-        assert isinstance(self.req_flowrate, VariableParameter),\
-            "the required flowrate must be a VariableParameter object."
-        # The correction that must be applied on each
-        # component of the velocity.
-        self.velocity_shift = npw.zeros(self._dim)
-        nbf = self.velocity.nb_components + self.vorticity.nb_components
-        # temp buffer, used to save flow rates and mean
-        # values of vorticity
-        self.rates = npw.zeros(nbf)
-        self.req_flowrate_val = None
-
-        spaceStep = self.topo.mesh.space_step
-        lengths = self.topo.domain.length
-        self.coeff_mean = npw.prod(spaceStep) / npw.prod(lengths)
-        x0 = self._in_surf.real_orig[self.topo][XDIR]
-        # Compute X - X0, x0 being the coordinate of the 'entry'
-        # surface for the flow.
-        self.x_coord = self.topo.mesh.coords[XDIR] - x0
-
-    def compute_correction(self):
-        """Compute the required correction for the current state
-        but do not apply it onto velocity.
-        """
-        # Computation of the flowrates evaluated from
-        # current (ie non corrected) velocity
-        nbf = self.velocity.nb_components + self.vorticity.nb_components
-        localrates = npw.zeros((nbf))
-        for i in xrange(self.velocity.nb_components):
-            localrates[i] = self._in_surf.integrate_dfield_on_proc(
-                self.velocity, component=i)
-        start = self.velocity.nb_components
-        # Integrate vorticity over the whole domain
-        for i in xrange(self.vorticity.nb_components):
-            localrates[start + i] = self.cb.integrate_dfield_on_proc(
-                self.vorticity, component=i)
-
-        # MPI reduction for rates
-        # rates = [flowrate[X], flowrate[Y], flowrate[Z],
-        #          vort_mean[X], ..., vort_mean[Z]]
-        # or (in 2D) = [flowrate[X], flowrate[Y], vort_mean]
-        self.rates[...] = 0.0
-        self.velocity.topology.comm.Allreduce(localrates, self.rates)
-
-        self.rates[:start] *= self._inv_ds
-        self.rates[start:] *= self._inv_dvol
-
-        # Set velocity_shift == [Vx_shift, vort_mean[Y], vort_mean[Z]]
-        # or (in 2D) velocity_shift == [Vx_shift, vort_mean]
-        # Velocity shift for main dir component
-        self.velocity_shift[XDIR] = self.req_flowrate_val[XDIR]\
-            - self.rates[XDIR]
-        # Shifts in other directions depend on x coord
-        # and will be computed during apply.
-
-    def _update_velocity_2d(self, vort_mean):
-        """update velocity value, 2d case
-        """
-        self.velocity[YDIR][...] += self.req_flowrate_val[YDIR] +\
-            vort_mean[XDIR] * self.x_coord - self.rates[YDIR]
-
-    def _update_velocity_3d(self, vort_mean):
-        """update velocity value, 3d case
-        """
-        self.velocity[YDIR][...] += self.req_flowrate_val[YDIR] + \
-            vort_mean[ZDIR] * self.x_coord - self.rates[YDIR]
-        self.velocity[ZDIR][...] += self.req_flowrate_val[ZDIR] - \
-            vort_mean[YDIR] * self.x_coord - self.rates[ZDIR]
-
-    @debug
-    @profile
-    def apply(self, simulation=None):
-        # the required flowrate value is updated (depending on time)
-        self.req_flowrate.update(simulation)
-
-        # warning : the flow rate value is divided by surf.
-        self.req_flowrate_val = self.req_flowrate.data * self._inv_ds
-        # Computation of the required velocity shift
-        # for the current state
-        self.compute_correction()
-        compute_index = self.topo.mesh.compute_index
-
-        # Apply shift to velocity (x component)
-        self.velocity[XDIR][compute_index] += self.velocity_shift[XDIR]
-        start = self.velocity.nb_components
-        # reminder : self.rates =[vx_shift, flowrates[Y], flowrate[Z],
-        #                         vort_mean[X], vort_mean[Y], vort_mean[Z]]
-        # or (in 2D) [vx_shift, flowrates[Y], vort_mean]
-        vort_mean = self.rates[start:]
-        ite = simulation.current_iteration
-        if self._writer is not None and self._writer.do_write(ite):
-            self._writer.buffer[0, 0] = simulation.time
-            self._writer.buffer[0, 1] = ite
-            self._writer.buffer[0, 2:] = vort_mean[...]
-            self._writer.write()
-
-        self._update_velocity_components(vort_mean)
diff --git a/hysop/old/operator.old/drag_and_lift.py b/hysop/old/operator.old/drag_and_lift.py
deleted file mode 100755
index 4de8058afdbd01c2113e96e3d0df30528705059b..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/drag_and_lift.py
+++ /dev/null
@@ -1,270 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Methods to compute drag and lift for a flow around a body.
-
-.. currentmodule:: hysop.operator
-
-* :class:`~drag_and_lift.MomentumForces` : Momentum Formula
-* :class:`~drag_and_lift.NocaForces` : Noca formulation
- (formulation = 1, 2 or 3)
-
-See :ref:`forces`.
-
-
-"""
-from hysop.operator.computational import Computational
-from hysop.operator.continuous import opsetup
-from abc import ABCMeta, abstractmethod
-from hysop.domain.control_box import ControlBox
-import numpy as np
-
-
-class Forces(Computational):
-    """Abstract interface to classes dedicated to drag/lift computation
-    for a flow around a predefined obstacle.
-
-
-    """
-
-    __metaclass__ = ABCMeta
-
-    def __init__(self, obstacles, normalization=1., **kwds):
-        """
-
-        Parameters
-        ----------
-        obstacles : list of :class:`hysop.domain.obstacles`
-            list of bodies inside the flow
-        normalization : double, optional
-            a normalization coefficient applied to the force, default = 1.
-        kwds : arguments passed to base class.
-
-
-        """
-        super(Forces, self).__init__(**kwds)
-        self.input = self.variables
-        # List of hysop.domain.subsets, obstacles to the flow
-        self.obstacles = obstacles
-        # Normalizing coefficient for forces
-        # (based on the physics of the flow)
-        self.normalization = normalization
-        # Minimal length of ghost layer.
-        # This obviously depends on the formulation used for the force.
-        self._min_ghosts = 0
-
-    def discretize(self):
-        super(Forces, self)._standard_discretize(self._min_ghosts)
-        # all variables must have the same resolution
-        assert self._single_topo, 'multi-resolution case not allowed.'
-
-    @abstractmethod
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        pass
-
-    def drag(self):
-        """
-
-        Returns
-        -------
-        HYSOP_REAL
-            the last computed value for drag force
-        """
-        return self.discrete_op.force[0]
-
-    def lift(self):
-        """
-        Returns
-        -------
-
-        np.array
-            the last computed values for lift forces
-        """
-        return self.discrete_op.force[1:]
-
-    def forces(self):
-        """
-        Returns
-        -------
-
-        np.array
-            the last computed values for forces (drag, lifts)
-        """
-        return self.discrete_op.force
-
-
-class MomentumForces(Forces):
-    """
-    Computation of forces (drag and lift) around an obstacle using
-    Momentum (Heloise) formula
-
-    """
-
-    def __init__(self, velocity, penalisation_coeff, **kwds):
-        """
-        Parameters
-        -----------
-        velocity : :class:`hysop.field.continuous.Field`
-            the velocity field
-        penalisation_coeff : double
-            coeff used to penalise velocity before force computation
-        kwds : arguments passed to drag_and_lift.Forces base class.
-
-
-        See :ref:`forces`.
-
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        self.velocity = velocity
-        super(MomentumForces, self).__init__(variables=[velocity], **kwds)
-        # Penalisation coefficient value
-        msg = 'penalisation coeff must be a list of values.'
-        assert isinstance(penalisation_coeff, list), msg
-        self._coeff = penalisation_coeff
-        self._min_ghosts = 1
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if not self._is_uptodate:
-            from hysop.operator.discrete.drag_and_lift \
-                import MomentumForces as DiscreteForce
-            # build the discrete operator
-            self.discrete_op = DiscreteForce(
-                velocity=self.discrete_fields[self.velocity],
-                penalisation_coeff=self._coeff,
-                obstacles=self.obstacles,
-                normalization=self.normalization)
-
-            # output setup
-            self._set_io('drag_and_lift', (1, 4))
-            self.discrete_op.set_writer(self._writer)
-            self._is_uptodate = True
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
-
-
-class NocaForces(Forces):
-    """
-    Computation of forces (drag and lift) around an obstacle using
-    Noca's formula
-    (See Noca99 or Plouhmans, 2002, Journal of Computational Physics)
-    """
-
-    def __init__(self, velocity, vorticity, nu, formulation=1,
-                 volume_of_control=None, surfdir=None, **kwds):
-        """
-        Parameters
-        -----------
-        velocity : :class:`hysop.field.continuous.Field`
-            the velocity field
-        vorticity : :class:`hysop.field.continuous.Field`
-            vorticity field inside the domain
-        nu : double
-            viscosity
-        formulation : int, optional
-            Noca formulation (1, 2 or 3, corresponds to equations
-            I, II and III in Noca's paper)
-        volume_of_control: :class:`~hysop.domain.subset.controlBox.ControlBox`,
-        optional
-            an optional subset of the domain, on which forces will be computed,
-            useful to reduce computational cost
-        surfdir : python list, optional
-            indices of the surfaces on which forces are computed,
-            see example below.
-            Default = only surfaces normal to x direction.
-        kwds : arguments passed to drag_and_lift.Forces base class.
-
-        Attributes
-        ----------
-        nu : double
-            viscosity
-
-
-        Examples
-        --------
-
-        >> op = NocaForces(velocity=v, vorticity=w, nu=0.3, surfdir=[0, 1],
-                           obstacles=[])
-
-        Compute the integral of surface in Noca's formula only for
-        surfaces normal to x and y axis.
-
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(NocaForces, self).__init__(variables=[velocity, vorticity],
-                                         **kwds)
-
-        self.velocity = velocity
-        self.vorticity = vorticity
-        # viscosity
-        self.nu = nu
-
-        # setup for finite differences
-        if self.method is None:
-            import hysop.default_methods as default
-            self.method = default.FORCES
-        from hysop.methods import SpaceDiscretization
-        from hysop.numerics.finite_differences import FDC4, FDC2
-        assert SpaceDiscretization in self.method.keys()
-        if SpaceDiscretization is FDC2:
-            self._min_ghosts = 1
-        elif SpaceDiscretization is FDC4:
-            self._min_ghosts = 2
-
-        if surfdir is None:
-            surfdir = [0, 1]
-        # Directions where integrals on surfaces are computed
-        self._surfdir = surfdir
-
-        # The volume of control, in which forces are computed
-        if volume_of_control is None:
-            lr = self.domain.length * 0.9
-            xr = self.domain.origin + 0.04 * self.domain.length
-            volume_of_control = ControlBox(parent=self.domain,
-                                           origin=xr, length=lr)
-        self.voc = volume_of_control
-
-        if formulation == 1:
-            from hysop.operator.discrete.drag_and_lift import NocaI
-            self.formulation = NocaI
-        elif formulation == 2:
-            from hysop.operator.discrete.drag_and_lift import NocaII
-            self.formulation = NocaII
-        elif formulation == 3:
-            from hysop.operator.discrete.drag_and_lift import NocaIII
-            self.formulation = NocaIII
-        else:
-            raise ValueError("Unknown formulation for Noca formula")
-
-    def get_work_properties(self):
-        super(NocaForces, self).get_work_properties()
-        shape_v = [None, ] * (self.domain.dim + 1)
-        slist = self.voc.surf
-        toporef = self.discrete_fields[self.velocity].topology
-        for i in xrange(self.domain.dim):
-            v_ind = slist[2 * i].mesh[toporef].ind4integ
-            shape_v[i] = self.velocity.data[i][v_ind].shape
-        v_ind = self.voc.mesh[toporef].ind4integ
-        shape_v[-1] = self.velocity.data[0][v_ind].shape
-        memsize = np.prod(shape_v)
-        # setup for rwork; iwork is useless.
-        return {'rwork': (memsize,), 'iwork': None}
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if not self._is_uptodate:
-            topo = self.discrete_fields[self.velocity].topology
-            self.voc.discretize(topo)
-            self.discrete_op = self.formulation(
-                velocity=self.discrete_fields[self.velocity],
-                vorticity=self.discrete_fields[self.vorticity],
-                nu=self.nu,
-                volume_of_control=self.voc,
-                surfdir=self._surfdir,
-                obstacles=self.obstacles,
-                normalization=self.normalization)
-
-            # output setup
-            self._set_io('drag_and_lift', (1, 4))
-            self.discrete_op.set_writer(self._writer)
-            self._is_uptodate = True
diff --git a/hysop/old/operator.old/energy_enstrophy.py b/hysop/old/operator.old/energy_enstrophy.py
deleted file mode 100644
index 6f3d0cd9cb627ff6fbac10642d3f6058dc7bc865..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/energy_enstrophy.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Compute Energy and Enstrophy from velocity and vorticity
-
-
-See :ref:`energy_enstrophy`.
-"""
-from hysop.operator.discrete.energy_enstrophy import EnergyEnstrophy as DEE
-from hysop.operator.computational import Computational
-from hysop.operator.continuous import opsetup
-
-
-class EnergyEnstrophy(Computational):
-    """
-    Computes enstrophy and kinetic energy
-    """
-
-    def __init__(self, velocity, vorticity, is_normalized=True, **kwds):
-        """
-
-        Parameters
-        ----------
-        velocity, vorticity : :class:`~hysop.operator.field.continuous.Field`
-            velocity and vorticity fields
-        isNormalized : boolean
-            true if enstrophy and energy values have to be normalized
-            by the domain lengths.
-
-        Notes
-        -----
-        By default, values at each time step are appended
-        to energy_enstrophy file. See :ref:`operators_io`
-        for more details.
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(EnergyEnstrophy, self).__init__(variables=[velocity, vorticity],
-                                              **kwds)
-        # velocity field
-        self.velocity = velocity
-        # vorticity field
-        self.vorticity = vorticity
-        # are the energy end enstrophy values normalized by domain lengths ?
-        self.is_normalized = is_normalized
-        # self._buffer_1 = 0.
-        # self._buffer_2 = 0.
-        self.input = [velocity, vorticity]
-        self.output = []
-
-    def get_work_properties(self):
-        super(EnergyEnstrophy, self).get_work_properties()
-        vd = self.discrete_fields[self.velocity]
-        wd = self.discrete_fields[self.vorticity]
-        v_ind = vd.topology.mesh.compute_index
-        w_ind = wd.topology.mesh.compute_index
-        size_v = vd[0][v_ind].size
-        size_w = wd[0][w_ind].size
-        size_work = max(size_v, size_w)
-        # work array is a flat array, shared between energy and enstrophy
-        return {'rwork': [size_work], 'iwork': None}
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if not self._is_uptodate:
-
-            self.discrete_op = DEE(self.discrete_fields[self.velocity],
-                                   self.discrete_fields[self.vorticity],
-                                   self.is_normalized,
-                                   rwork=rwork)
-            # Output setup
-            self._set_io('energy_enstrophy', (1, 3))
-            self.discrete_op.set_writer(self._writer)
-            self._is_uptodate = True
-
-    def energy(self):
-        """Return last computed value of the energy
-        """
-        return self.discrete_op.energy
-
-    def enstrophy(self):
-        """Return last computed value of the enstrophy
-        """
-        return self.discrete_op.enstrophy
diff --git a/hysop/old/operator.old/forcing.py b/hysop/old/operator.old/forcing.py
deleted file mode 100644
index 27310829cf051fd637a647bcffbd32503cc0d13d..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/forcing.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Operator implementing the forcing term in the NS, 
-   depending on the filtered field
-   (--> computation of base flow).
-
-.. currentmodule:: hysop.operator.forcing
-
-"""
-
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.forcing import Forcing as DForcing
-from hysop.operator.discrete.forcing import ForcingConserv as DForcingCsv
-from hysop.constants import debug
-from hysop.operator.continuous import opsetup
-import hysop.default_methods as default
-from hysop.methods import SpaceDiscretization
-from hysop.numerics.finite_differences import FDC4,\
-    FDC2
-from hysop.operator.differential import Curl
-from hysop.fields.continuous_field import Field
-
-
-class Forcing(Computational):
-    """
-    Integrate a forcing term in the right hand side 
-    of the NS equations, depending on the filtered field.
-    i.e. solve
-    \f{eqnarray*}
-    \frac{\partial \omega}{\partial t} &=& -\chi(\omega - \bar{\omega}))
-    \f}
-    The strength of the forcing is chosen in the order
-    of the amplification rate related to the unstable flow.
-
-    """
-
-    @debug
-    def __init__(self, strength=None, **kwds):
-        """
-        Parameters
-        ----------
-        @param strength : strength of the filter
-        **kwds : extra parameters for parent class
-
-        """
-        super(Forcing, self).__init__(**kwds)
-
-        ## strength of the filter
-        self.strength = strength
-        
-        self.input = self.output = self.variables
-#        self.output = [self.variables[0]]
-
-    def discretize(self):
-        super(Forcing, self)._standard_discretize()
-        # all variables must have the same resolution
-        assert self._single_topo, 'multi-resolution case not allowed.'
-
-    @debug
-    def setup(self, rwork=None, iwork=None):
-        self.discrete_op = DForcing(
-            variables=self.discrete_fields.values(),
-            strength=self.strength,
-            rwork=rwork, iwork=iwork)
-
-        self._is_uptodate = True
-
-
-class ForcingConserv(Forcing):
-    """
-    Integrate a forcing term in the right hand side
-    of the NS equations, depending on the filtered vorticity.
-    i.e. solve
-    \f{eqnarray*}
-    \frac{\partial \omega}{\partial t} &=& -\chi(\omega - \bar{\omega}))
-    \f}
-    The equation is solved using a CONSERVATIVE formulation.
-    The strength of the forcing is chosen in the order
-    of the amplification rate related to the unstable flow.
-        
-    """
-    @debug
-    def __init__(self, velocity, vorticity, velocityFilt, **kwds):
-        """
-        Parameters
-        ----------
-        @param[in] velocity, vorticity, velocityFilt fields
-        @param[in, out] forced vorticity field
-        @param strength : strength of the filter
-        **kwds : extra parameters for parent class
-            
-        Notes
-        -----
-        velocity and velocityFilt are not modified by this operator.
-        vorticity is an in-out parameter.
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(ForcingConserv, self).__init__(
-            variables=[velocity, vorticity, velocityFilt], **kwds)
-        ## velocity variable
-        self.velocity = velocity
-        ## vorticity variable
-        self.vorticity = vorticity
-        ## filtered velocity variable
-        self.velocityFilt = velocityFilt
-        # A method is required to set how the curl will be computed.
-        if self.method is None:
-            self.method = default.DIFFERENTIAL
-        # operator to compute buffer = curl(penalised velocity)
-        self._curl = None
-
-        self.input = [self.velocity, self.vorticity, self.velocityFilt]
-        self.output = [self.vorticity]
-
-    def discretize(self):
-    
-        if self.method[SpaceDiscretization] is FDC4:
-            # Finite differences method
-            # Minimal number of ghost points
-            nb_ghosts = 2
-        elif self.method[SpaceDiscretization] is FDC2:
-            nb_ghosts = 1
-        else:
-            raise ValueError("Unknown method for space discretization of the\
-                differential operator in penalization.")
-        super(ForcingConserv, self)._standard_discretize(nb_ghosts)
-        # all variables must have the same resolution
-        assert self._single_topo, 'multi-resolution case not allowed.'
-        topo = self.variables[self.velocity]
-        invar = Field(domain=self.velocity.domain,
-                      name='curl_in', is_vector=True)
-        dimension = self.domain.dim
-        outvar = Field(domain=self.velocity.domain,
-                       name='curl_out',
-                       is_vector=dimension == 3)
-        self._curl = Curl(invar=invar, outvar=outvar,
-                          discretization=topo, method=self.method)
-        self._curl.discretize()
-
-    def get_work_properties(self):
-        return self._curl.get_work_properties()
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        self._curl.setup(rwork, iwork)
-        self.discrete_op = DForcingCsv(
-            vorticity=self.discrete_fields[self.vorticity],
-            velocity=self.discrete_fields[self.velocity],
-            velocityFilt=self.discrete_fields[self.velocityFilt],
-            curl=self._curl.discrete_op,
-            strength=self.strength,
-            rwork=rwork, iwork=iwork)
-        self._is_uptodate = True
-
-
diff --git a/hysop/old/operator.old/low_pass_filt.py b/hysop/old/operator.old/low_pass_filt.py
deleted file mode 100644
index 2a02d7db1a4f1242daba98b67f7ed6091ebc446d..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/low_pass_filt.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Operator for any field low-pass filtering
-   (--> computation of base flow).
-
-.. currentmodule:: hysop.operator.low_pass_filt
-
-"""
-
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.low_pass_filt import LowPassFilt as DFilt
-from hysop.operator.discrete.low_pass_filt import LowPassFiltConserv as DFiltCsv
-from hysop.constants import debug
-from hysop.operator.continuous import opsetup
-import hysop.default_methods as default
-from hysop.methods import SpaceDiscretization
-from hysop.numerics.finite_differences import FDC4,\
-    FDC2
-from hysop.operator.differential import Curl
-from hysop.fields.continuous_field import Field
-
-
-class LowPassFilt(Computational):
-    """
-    Provides a filtered field by low-pass filtering
-    the flow with the frequency of the instability divided by 2.
-    i.e. solve
-    \f{eqnarray*}
-    \frac{\partial \bar{\omega}}{\partial t} &=& \Omega_c(\omega - \bar{\omega}))
-    \f}
-
-    """
-
-    @debug
-    def __init__(self, cutFreq=None, **kwds):
-        """
-        Parameters
-        ----------
-        @param cutFreq : cutting circular frequency corresponding to the half of
-        the eigenfrequency of the flow instability
-        **kwds : extra parameters for parent class
-
-        """
-        super(LowPassFilt, self).__init__(**kwds)
-
-        ## cutting circular frequency
-        self.cutFreq = cutFreq
-        
-        self.input = self.output = self.variables
-
-    def discretize(self):
-        super(LowPassFilt, self)._standard_discretize()
-        # all variables must have the same resolution
-        assert self._single_topo, 'multi-resolution case not allowed.'
-
-    @debug
-    def setup(self, rwork=None, iwork=None):
-        self.discrete_op = DFilt(
-            variables=self.discrete_fields.values(),
-            cutFreq=self.cutFreq,
-            rwork=rwork, iwork=iwork)
-
-        self._is_uptodate = True
-
-
-class LowPassFiltConserv(LowPassFilt):
-    """
-        Provides a filtered field by low-pass filtering
-        the flow with the frequency of the instability divided by 2.
-        i.e. solve
-        \f{eqnarray*}
-        \frac{\partial \bar{\omega}}{\partial t} &=& \Omega_c(\omega - \bar{\omega}))
-        \f}
-        The equation is solved using a CONSERVATIVE formulation.
-        
-        """
-    
-    @debug
-    def __init__(self, velocity, vorticityFilt, velocityFilt, **kwds):
-        """
-        Parameters
-        ----------
-        @param[in] velocity, vorticityFilt, velocityFilt fields
-        @param[in, out] vorticityFilt field (i.e. filtered vorticity field)
-        @param strength : strength of the filter
-        **kwds : extra parameters for parent class
-            
-        Notes
-        -----
-        velocity and velocityFilt are not modified by this operator.
-        vorticityFilt is an in-out parameter.
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(LowPassFiltConserv, self).__init__(
-            variables=[velocity, vorticityFilt, velocityFilt], **kwds)
-
-        ## velocity variable
-        self.velocity = velocity
-        ## filtered vorticity variable
-        self.vorticityFilt = vorticityFilt
-        ## filtered velocity variable
-        self.velocityFilt = velocityFilt
-        # A method is required to set how the curl will be computed.
-        if self.method is None:
-            self.method = default.DIFFERENTIAL
-        # operator to compute buffer = curl(penalised velocity)
-        self._curl = None
-
-        self.input = [self.velocity, self.vorticityFilt, self.velocityFilt]
-        self.output = [self.vorticityFilt]
-
-    def discretize(self):
-    
-        if self.method[SpaceDiscretization] is FDC4:
-            # Finite differences method
-            # Minimal number of ghost points
-            nb_ghosts = 2
-        elif self.method[SpaceDiscretization] is FDC2:
-            nb_ghosts = 1
-        else:
-            raise ValueError("Unknown method for space discretization of the\
-                differential operator in penalization.")
-        super(LowPassFiltConserv, self)._standard_discretize(nb_ghosts)
-        # all variables must have the same resolution
-        assert self._single_topo, 'multi-resolution case not allowed.'
-        topo = self.variables[self.velocity]
-        invar = Field(domain=self.velocity.domain,
-                      name='curl_in', is_vector=True)
-        dimension = self.domain.dim
-        outvar = Field(domain=self.velocity.domain,
-                       name='curl_out',
-                       is_vector=dimension == 3)
-        self._curl = Curl(invar=invar, outvar=outvar,
-                          discretization=topo, method=self.method)
-        self._curl.discretize()
-
-    def get_work_properties(self):
-        return self._curl.get_work_properties()
-    
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        self._curl.setup(rwork, iwork)
-        self.discrete_op = DFiltCsv(
-            vorticityFilt=self.discrete_fields[self.vorticityFilt],
-            velocity=self.discrete_fields[self.velocity],
-            velocityFilt=self.discrete_fields[self.velocityFilt],
-            curl=self._curl.discrete_op,
-            cutFreq=self.cutFreq,
-            rwork=rwork, iwork=iwork)
-        self._is_uptodate = True
-
-
-
diff --git a/hysop/old/operator.old/monitoringPoints.py b/hysop/old/operator.old/monitoringPoints.py
deleted file mode 100644
index 73237c7dea0cf4b954445ca843a17a22172b00b1..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/monitoringPoints.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file monitoringPoints.py
-Print time evolution of flow variables (velo, vorti)
-at a particular monitoring point in the wake
-"""
-from hysop.operator.discrete.monitoringPoints import MonitoringPoints as MonitD
-from hysop.operator.computational import Computational
-from hysop.operator.continuous import opsetup
-
-
-class MonitoringPoints(Computational):
-    """
-    Compute and print velo/vorti profiles
-    """
-
-    def __init__(self, velocity, vorticity, monitPt_coords, **kwds):
-        """
-        Constructor.
-        @param velocity field
-        @param vorticity field
-        @param monitPts_coords : coordinates corresponding
-            to the space location of the monitoring point in the wake
-
-        Default file name = 'monit.dat'
-        See hysop.tools.io_utils.Writer for details
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(MonitoringPoints, self).__init__(variables=
-                                               [velocity, vorticity],
-                                               **kwds)
-        ## velocity field
-        self.velocity = velocity
-        ## vorticity field
-        self.vorticity = vorticity
-        ## coordinates of the monitoring point
-        self.monitPt_coords = monitPt_coords
-        self.input = [velocity, vorticity]
-        self.output = []
-
-    def get_work_properties(self):
-        if not self._is_discretized:
-            msg = 'The operator must be discretized '
-            msg += 'before any call to this function.'
-            raise RuntimeError(msg)
-        vd = self.discrete_fields[self.velocity]
-        wd = self.discrete_fields[self.vorticity]
-        v_ind = vd.topology.mesh.compute_index
-        w_ind = wd.topology.mesh.compute_index
-        shape_v = vd[0][v_ind].shape
-        shape_w = wd[0][w_ind].shape
-        if shape_v == shape_w:
-            return {'rwork': [shape_v], 'iwork': None}
-        else:
-            return {'rwork': [shape_v, shape_w], 'iwork': None}
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if not self._is_uptodate:
-
-            self.discrete_op = MonitD(self.discrete_fields[self.velocity],
-                                      self.discrete_fields[self.vorticity],
-                                      self.monitPt_coords, rwork=rwork)
-            # Output setup
-            self._set_io('monit', (1, 3))
-            self.discrete_op.set_writer(self._writer)
-            self._is_uptodate = True
-
diff --git a/hysop/old/operator.old/multiphase_baroclinic_rhs.py b/hysop/old/operator.old/multiphase_baroclinic_rhs.py
deleted file mode 100644
index caa301a9030a8c071226ba0e46f90d8f69bdf68f..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/multiphase_baroclinic_rhs.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file operator/multiphase_baroclinic_rhs.py
-
-Computation of the baroclinic term in a multiphasic flow:
-\f{eqnarray*}
-\frac{\partial\vec{\omega}{\partial t} = -\frac{\nabla \rho}{\rho}\times\left(-\frac{\nabla P}{\rho}\right)
-\f} with finite differences
-"""
-from hysop.operator.computational import Computational
-from hysop.methods import SpaceDiscretization, Support
-from hysop.constants import debug, np
-import hysop.default_methods as default
-from hysop.operator.continuous import opsetup
-from hysop.backend.device.opencl.gpu_multiphase_baroclinic_rhs import BaroclinicRHS
-
-
-class MultiphaseBaroclinicRHS(Computational):
-    """
-    Baroclinic operator representation
-    """
-
-    @debug
-    def __init__(self, rhs, rho, gradp, **kwds):
-        """
-        Constructor.
-        Create a Baroclinic operator from given variables.
-
-        @param rhs fiel
-        @param rho field
-        @param gradp field
-        @param method : solving method
-        (default = finite differences, 4th order, in space)
-        @param ghosts : number of ghosts points. Default depends on the method.
-        Autom. computed if not set.
-        """
-        super(MultiphaseBaroclinicRHS, self).__init__(**kwds)
-        if self.method is None:
-            self.method = default.MULTIPHASEBAROCLINIC
-        self.rhs = rhs
-        self.rho = rho
-        self.gradp = gradp
-        self.input = [self.rho, self.gradp]
-        self.output = [self.rhs, ]
-        assert SpaceDiscretization in self.method.keys()
-        msg = "This operator is implemented for GPU only"
-        assert Support in self.method.keys(), msg
-        assert self.method[Support] == 'gpu', msg
-
-    def discretize(self):
-        build_topos = self._check_variables()
-        assert not self._single_topo, \
-            "This operator must have different topologies"
-        for v in self.variables:
-            if build_topos[v]:
-                topo = self.domain.create_topology(
-                    discretization=self.variables[v], dim=2)
-                self.variables[v] = topo
-                build_topos[v] = False
-        msg = "Need review for ghosts"
-        assert np.all(self.variables[self.rhs].ghosts() == 0), msg
-        assert np.all(self.variables[self.gradp].ghosts() > 0), msg
-        assert np.all(self.variables[self.rho].ghosts() == 0), msg
-
-        # All topos are built, we can discretize fields.
-        self._discretize_vars()
-        self._is_discretized = True
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        """
-        Baroclinic operator discretization method.
-        Create a discrete Baroclinic operator from given specifications.
-        """
-        self.discrete_op = \
-            BaroclinicRHS(self.discrete_fields[self.rhs],
-                          self.discrete_fields[self.rho],
-                          self.discrete_fields[self.gradp],
-                          method=self.method)
-        self._is_uptodate = True
diff --git a/hysop/old/operator.old/multiphase_gradp.py b/hysop/old/operator.old/multiphase_gradp.py
deleted file mode 100644
index 03a5900f8f168a8f5d0ead64b7a19637b1474b60..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/multiphase_gradp.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file operator/multiphase_gradp.py
-
-Computation of the pressure gradient in a multiphasic flow:
-\f{eqnarray*}
--\frac{\nabla P}{\rho} = \frac{\partial\boldsymbol{u}}{\partial t} + (\boldsymbol{u}\cdot\nabla)\boldsymbol{u}  - \nu\Delta\boldsymbol{u}
-\f} with finite differences
-"""
-from hysop.operator.computational import Computational
-from hysop.methods import SpaceDiscretization
-from hysop.numerics.finite_differences import FDC4
-from hysop.constants import debug, np
-import hysop.default_methods as default
-from hysop.operator.continuous import opsetup
-from hysop.operator.discrete.multiphase_gradp import GradP
-
-
-class MultiphaseGradP(Computational):
-    """
-    Pressure operator representation
-    """
-
-    @debug
-    def __init__(self, velocity, gradp, viscosity, **kwds):
-        """
-        Constructor.
-        Create a Pressure operator from given velocity variables.
-
-        @param velocity field
-        @param gradp result
-        @param viscosity constant
-        @param resolutions : grid resolution of velocity and gradp
-        @param method : solving method
-        (default = finite differences, 4th order, in space)
-        @param topo : a predefined topology to discretize
-         velocity/gradp
-        @param ghosts : number of ghosts points. Default depends on the method.
-        Autom. computed if not set.
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(MultiphaseGradP, self).__init__(
-            variables=[velocity, gradp], **kwds)
-        if self.method is None:
-            self.method = default.MULTIPHASEGRADP
-        self.velocity = velocity
-        self.gradp = gradp
-        self.viscosity = viscosity
-        self.input = [self.velocity, ]
-        self.output = [self.gradp, ]
-        assert SpaceDiscretization in self.method.keys()
-
-    def discretize(self):
-        if self.method[SpaceDiscretization] is FDC4:
-            nbGhosts = 2
-        else:
-            raise ValueError("Unknown method for space discretization of the\
-                multiphase gradp operator.")
-
-        super(MultiphaseGradP, self)._standard_discretize(nbGhosts)
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        """
-        Baroclinic operator discretization method.
-        Create a discrete Baroclinic operator from given specifications.
-        """
-        self.discrete_op = \
-            GradP(self.discrete_fields[self.velocity],
-                  self.discrete_fields[self.gradp],
-                  self.viscosity,
-                  method=self.method)
-        self._is_uptodate = True
-
-    def initialize_velocity(self):
-        self.discrete_op.initialize_velocity()
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
diff --git a/hysop/old/operator.old/multiresolution_filter.py b/hysop/old/operator.old/multiresolution_filter.py
deleted file mode 100644
index a2b6f8e8bac85b5eb0879fc602d74ca637867153..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/multiresolution_filter.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""Filter values between grids of different resolution.
-
-"""
-from hysop.constants import debug
-from hysop.operator.continuous import opsetup
-from hysop.operator.computational import Computational
-import hysop.default_methods as default
-from hysop.methods import Support
-
-
-class MultiresolutionFilter(Computational):
-    """Interpolation from fine grid to coarse grid
-
-    """
-
-    @debug
-    def __init__(self, d_in, d_out, **kwds):
-        """
-        Parameters
-        ----------
-        d_in, d_out : :class:`hysop.topology.topology.CartesianTopology`
-            or :class:`tools.parameters.Discretization`
-            data distribution for source (in) and target (out) grids.
-        kwds : base class parameters
-
-        """
-        if 'method' not in kwds:
-            kwds['method'] = default.MULTIRESOLUTION_FILTER
-        super(MultiresolutionFilter, self).__init__(**kwds)
-        self.d_in, self.d_out = d_in, d_out
-        self.input = self.variables
-        self.output = self.variables
-        self._df_in = []
-        self._df_out = []
-
-    def discretize(self):
-        super(MultiresolutionFilter, self)._standard_discretize()
-        topo_in = self._build_topo(self.d_in, 0)
-        topo_out = self._build_topo(self.d_out, 0)
-        for v in self.variables:
-            self._df_in.append(v.discretize(topo_in))
-            self._df_out.append(v.discretize(topo_out))
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if Support in self.method.keys() and \
-           self.method[Support].find('gpu') >= 0:
-            from hysop.backend.device.opencl.gpu_multiresolution_filter \
-                import GPUFilterFineToCoarse as discreteFilter
-        else:
-            from hysop.operator.discrete.multiresolution_filter \
-                import FilterFineToCoarse as discreteFilter
-        self.discrete_op = discreteFilter(
-            field_in=self._df_in, field_out=self._df_out,
-            method=self.method, rwork=rwork, iwork=iwork)
-        self._is_uptodate = True
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
diff --git a/hysop/old/operator.old/penalization.py b/hysop/old/operator.old/penalization.py
deleted file mode 100644
index 3ac8547097cfb3ad290755a0442887baeac45db3..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/penalization.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Operators for penalization problem.
-
-.. currentmodule:: hysop.operator.penalization
-
-* :class:`Penalization` : standard penalisation
-* :class:`PenalizeVorticity`  : vorticity formulation
-
-See details in :ref:`penalisation` section of HySoP user guide.
-
-"""
-
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.penalization import Penalization as DPenalV
-from hysop.operator.discrete.penalization import PenalizeVorticity as DPenalW
-from hysop.constants import debug
-from hysop.operator.continuous import opsetup
-from hysop.domain.subsets import Subset
-import hysop.default_methods as default
-from hysop.methods import SpaceDiscretization
-from hysop.numerics.finite_differences import FDC4,\
-    FDC2
-from hysop.operator.differential import Curl
-from hysop.fields.continuous_field import Field
-
-
-class Penalization(Computational):
-    """
-    Solves
-    \f{eqnarray*}
-    v = Op(\v)
-    \f} with :
-    \f{eqnarray*}
-    \frac{\partial v}{\partial t} &=& \lambda\chi_s(v_D - v)
-    \f}
-
-    """
-
-    @debug
-    def __init__(self, obstacles, coeff=None, **kwds):
-        """
-        Parameters
-        ----------
-        obstacles : dict or list of :class:`~hysop.domain.subsets.Subset`
-            sets of geometries on which penalization must be applied
-        coeff : double, optional
-            penalization factor applied to all geometries.
-        **kwds : extra parameters for parent class
-
-        Notes
-        -----
-        Set::
-
-        obstacles = {obs1: coeff1, obs2: coeff2, ...}
-        coeff = None
-
-        to apply a different coefficient on each subset.
-        Set::
-
-        obstacles = [obs1, obs2, ...]
-        coeff = some_value
-
-        to apply the same penalization on all subsets.
-        obs1, ob2 ... must be some :class:`~hysop.domain.subsets.Subset`
-        and some_value must be either a real scalar or a function of the
-        coordinates like::
-
-            def coeff(*args):
-                return 3 * args[0]
-
-        with args[0,1,...] = x,y,...
-
-        Warning : coeff as a function is not yet implemented!!
-        """
-        super(Penalization, self).__init__(**kwds)
-
-        # The list of subset on which penalization must be applied
-        self.obstacles = obstacles
-
-        # Penalization functions or coef
-        self.coeff = coeff
-        self.input = self.output = self.variables
-
-    def discretize(self):
-        super(Penalization, self)._standard_discretize()
-        # all variables must have the same resolution
-        assert self._single_topo, 'multi-resolution case not allowed.'
-        topo = self.variables.values()[0]
-        for obs in self.obstacles:
-            assert isinstance(obs, Subset)
-            obs.discretize(topo)
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        self.discrete_op = DPenalV(
-            variables=self.discrete_fields.values(), obstacles=self.obstacles,
-            coeff=self.coeff, rwork=rwork, iwork=iwork)
-
-        self._is_uptodate = True
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
-
-
-class PenalizeVorticity(Penalization):
-    """
-    Solve
-    \f{eqnarray*}
-    \frac{\partial w}{\partial t} &=& \lambda\chi_s\nabla\times(v_D - v)
-    \f}
-    using penalization.
-    """
-
-    @debug
-    def __init__(self, velocity, vorticity, **kwds):
-        """
-        Parameters
-        ----------
-        velocity, vorticity: :class:`~hysop.fields.continuous_field.Field`
-        **kwds : extra parameters for parent class.
-
-        Notes
-        -----
-        velocity is not modified by this operator.
-        vorticity is an in-out parameter.
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(PenalizeVorticity, self).__init__(
-            variables=[velocity, vorticity], **kwds)
-        # velocity field
-        self.velocity = velocity
-        # vorticity field
-        self.vorticity = vorticity
-        # A method is required to set how the curl will be computed.
-        if self.method is None:
-            self.method = default.DIFFERENTIAL
-        # operator to compute buffer = curl(penalised velocity)
-        self._curl = None
-        self.input = self.variables
-        self.output = self.vorticity
-
-    def discretize(self):
-
-        if self.method[SpaceDiscretization] is FDC4:
-            # Finite differences method
-            # Minimal number of ghost points
-            nb_ghosts = 2
-        elif self.method[SpaceDiscretization] is FDC2:
-            nb_ghosts = 1
-        else:
-            raise ValueError("Unknown method for space discretization of the\
-                differential operator in penalization.")
-        super(PenalizeVorticity, self)._standard_discretize(nb_ghosts)
-        # all variables must have the same resolution
-        assert self._single_topo, 'multi-resolution case not allowed.'
-        topo = self.variables[self.velocity]
-        for obs in self.obstacles:
-            assert isinstance(obs, Subset)
-            obs.discretize(topo)
-        invar = Field(domain=self.velocity.domain,
-                      name='curl_in', is_vector=True)
-        dimension = self.domain.dim
-        outvar = Field(domain=self.velocity.domain,
-                       name='curl_out',
-                       is_vector=dimension == 3)
-        self._curl = Curl(invar=invar, outvar=outvar,
-                          discretization=topo, method=self.method)
-        self._curl.discretize()
-
-    def get_work_properties(self):
-        return self._curl.get_work_properties()
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        self._curl.setup(rwork, iwork)
-        self.discrete_op = DPenalW(
-            vorticity=self.discrete_fields[self.vorticity],
-            velocity=self.discrete_fields[self.velocity],
-            curl=self._curl.discrete_op,
-            obstacles=self.obstacles,
-            coeff=self.coeff, rwork=rwork, iwork=iwork)
-        self._is_uptodate = True
diff --git a/hysop/old/operator.old/poisson.py b/hysop/old/operator.old/poisson.py
deleted file mode 100644
index c2034b95b57964f5f1e2dfcb387b26328c8b9011..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/poisson.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Operator to solve Poisson problem.
-
-See :ref:`poisson` in HySoP user guide.
-"""
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.poisson_fft import PoissonFFT
-from hysop.constants import debug
-from hysop.operator.velocity_correction import VelocityCorrection
-from hysop.operator.reprojection import Reprojection
-from hysop.methods import SpaceDiscretization, Formulation
-from hysop.operator.continuous import opsetup
-from hysop import __FFTW_ENABLED__
-
-
-class Poisson(Computational):
-    """Solve Poisson problem (in: vorticity, out velocity)
-    for incompressible flow.
-    """
-
-    _authorized_methods = ['fftw']
-
-    @debug
-    def __init__(self, output_field, input_field, flowrate=None,
-                 projection=None, **kwds):
-        """Poisson operator, incompressible flow.
-
-        Parameters
-        ----------
-        output_field : :class:`~hysop.fields.continuous_field.Field
-            solution field
-        input_field  : :class:`~hysop.fields.continuous_field.Field`
-            right-hand side
-        flowrate: :class:`~hysop.fields.variable_parameter.VariableParameter`
-           or double, optional
-            flow rate value through input surface (normal to xdir),
-            used to calibrate solution, default=0.
-        projection : double or tuple, optional
-             projection criterion, see notes below.
-        kwds : base class parameters.
-
-
-        Notes:
-        * projection might be:
-           * None: no projection
-           * the value of the frequency of reprojection (constant)
-           * a tuple (frequency, threshold). In that case, a criterion
-           depending on the input_field will be computed at each time step,
-           and if criterion > threshold., then frequency projection is active.
-        * About method parameter:
-           - SpaceDiscretization == fftw
-           - Formulation = 'velocity' or 'pressure'
-           velocity : laplacian(phi) = -w and v = nabla X psi, in = vorticity, out = velo
-           pressure : laplacian(p) = -nabla.(u.nabla u, in = velo, out = pressure
-        """
-        # Warning : for fftw all variables must have
-        # the same resolution.
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Poisson, self).__init__(variables=[output_field, input_field],
-                                      **kwds)
-        # solution of the problem
-        self.output_field = output_field
-        # -(right-hand side)
-        self.input_field = input_field
-        if self.method is None:
-            import hysop.default_methods as default
-            self.method = default.POISSON
-        if self.method[SpaceDiscretization] is 'fftw':
-            assert __FFTW_ENABLED__
-        msg = 'Poisson : unknown method for space discretization'
-        assert self.method[SpaceDiscretization] in self._authorized_methods,\
-            msg
-
-        # Set Poisson equation formulation :
-        # Velo Poisson eq or Pressure Poisson eq
-        self.formulation = None
-        if self.method[Formulation] is not 'velocity':
-            self.formulation = self.method[Formulation]
-
-        self.input = [self.input_field]
-        self.output = [self.output_field]
-        # Enable correction if required
-        if flowrate is not None:
-            self.withCorrection = True
-            self._flowrate = flowrate
-        else:
-            self.withCorrection = False
-        self.correction = None
-        self.projection = projection
-        self._config = kwds
-
-        if projection is not None:
-            self.output.append(self.input_field)
-
-    def discretize(self):
-        # Poisson solver based on fftw
-        if self.method[SpaceDiscretization] is 'fftw':
-            super(Poisson, self)._fftw_discretize()
-            # prepare correction and projection, if needed
-            if self.withCorrection:
-                toporef = self.discrete_fields[self.output_field].topology
-                if 'discretization' in self._config:
-                    self._config['discretization'] = toporef
-                self.correction = VelocityCorrection(
-                    self.output_field, self.input_field,
-                    req_flowrate=self._flowrate, **self._config)
-                self.correction.discretize()
-
-                if isinstance(self.projection, tuple):
-                    freq = self.projection[0]
-                    threshold = self.projection[1]
-                    self.projection = Reprojection(self.input_field,
-                                                   threshold, freq,
-                                                   **self._config)
-                    self.projection.discretize()
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        # Activate correction, if required
-        if self.withCorrection:
-            self.correction.setup()
-            cd = self.correction.discrete_op
-        else:
-            cd = None
-
-        # Activate projection, if required
-        if isinstance(self.projection, Reprojection):
-            # Projection frequency is updated at each
-            # time step, and depends on the input_field
-            self.projection.setup(rwork=rwork)
-            projection_discr = self.projection.discrete_op
-        else:
-            projection_discr = self.projection
-
-        self.discrete_op = PoissonFFT(self.discrete_fields[self.output_field],
-                                      self.discrete_fields[self.input_field],
-                                      correction=cd,
-                                      rwork=rwork, iwork=iwork,
-                                      projection=projection_discr,
-                                      formulation=self.formulation)
-
-        self._is_uptodate = True
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
diff --git a/hysop/old/operator.old/profiles.py b/hysop/old/operator.old/profiles.py
deleted file mode 100644
index abee5ceddea44b43dfb0cc4225c2c9613d40728a..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/profiles.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""Compute and print velo/vorti profiles
-"""
-from hysop.operator.discrete.profiles import Profiles as ProfD
-from hysop.operator.computational import Computational
-from hysop.operator.continuous import opsetup
-
-
-class Profiles(Computational):
-    """
-    Compute and print velo/vorti profiles
-    """
-
-    def __init__(self, velocity, vorticity, prof_coords,
-                 direction, beginMeanComput, **kwds):
-        """
-        Constructor.
-        @param velocity field
-        @param vorticity field
-        @param direction : profile direction (0, 1 or 2)
-        @param beginMeanComput : time at which the computation of mean profile must begin
-        @param prof_coords : X and Y coordinates of the profile 
-        warning : the Z-coordinate is supposed to be 0 for each profile !
-
-        Default file name = 'profile.dat'
-        See hysop.tools.io_utils.Writer for details
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Profiles, self).__init__(variables=[velocity, vorticity],
-                                       **kwds)
-        # velocity field
-        self.velocity = velocity
-        # vorticity field
-        self.vorticity = vorticity
-        # X and Y coordinates of the profile
-        self.prof_coords = prof_coords
-        # profile direction (0, 1 or 2)
-        self.direction = direction
-        # time at which the computation of mean profile must begin
-        self.beginMeanComput = beginMeanComput
-        self.input = [velocity, vorticity]
-        self.output = []
-
-    def get_work_properties(self):
-        super(Profiles, self).get_work_properties()
-        vd = self.discrete_fields[self.velocity]
-        wd = self.discrete_fields[self.vorticity]
-        v_ind = vd.topology.mesh.compute_index
-        w_ind = wd.topology.mesh.compute_index
-        shape_v = vd[0][v_ind].shape
-        shape_w = wd[0][w_ind].shape
-        if shape_v == shape_w:
-            return {'rwork': [shape_v], 'iwork': None}
-        else:
-            return {'rwork': [shape_v, shape_w], 'iwork': None}
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if not self._is_uptodate:
-
-            self.discrete_op = ProfD(self.discrete_fields[self.velocity],
-                                     self.discrete_fields[self.vorticity],
-                                     self.prof_coords, self.direction,
-                                     self.beginMeanComput,
-                                     rwork=rwork)
-            # Output setup
-            self._set_io('profile', (1, 9))
-            self.discrete_op.set_writer(self._writer)
-            self._is_uptodate = True
diff --git a/hysop/old/operator.old/redistribute.py b/hysop/old/operator.old/redistribute.py
deleted file mode 100644
index 756ba337e3df18d782bc83a9ab46e54ab89c244b..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/redistribute.py
+++ /dev/null
@@ -1,650 +0,0 @@
-"""Setup for data transfer/redistribution between topologies or operators
-
-`.. currentmodule : hysop.operator.redistribute
-
-* :class:`~RedistributeIntra` for topologies/operators defined
-  inside the same mpi communicator
-* :class:`~RedistributeInter` for topologies/operators defined
-  on two different mpi communicator
-* :class:`~RedistributeOverlap` for topologies defined
-  inside the same mpi parent communicator and
-  with a different number of processes
-* :class:`~Redistribute` abstract base class
-
-"""
-
-from hysop.operator.continuous import OperatorBase
-from abc import ABCMeta, abstractmethod
-from hysop.topology.cartesian_topology import CartesianTopology
-from hysop.operator.computational import Computational
-from hysop.operator.continuous import opsetup, opapply
-from hysop.core.mpi.bridge import Bridge, BridgeOverlap, BridgeInter
-from hysop.constants import DirectionLabels, debug
-
-
-class Redistribute(OperatorBase):
-    """Abstract interface to redistribute operators
-    """
-
-    __metaclass__ = ABCMeta
-
-    def __init__(self, source, target, component=None,
-                 run_till=None, **kwds):
-        """
-        Parameters
-        ----------
-        source, target: :class:`~hysop.topology.topology.CartesianTopology` or
-         :class:`~hysop.operator.computational.Computational
-            topologies or operators that own the source mesh and targeted mesh
-        component: int
-            which component of the field must be distributed (default = all)
-        run_till: list of :class:`~hysop.operator.computational.Computational
-            operators that must wait for the completion of this redistribute
-            before any apply.
-
-        """
-        # Base class initialisation
-        super(Redistribute, self).__init__(**kwds)
-
-        # Object (may be an operator or a topology) which handles the
-        # fields to be transfered
-        self._source = source
-        # Object (may an operator or a topology) which handles the fields
-        # to be filled in from source.
-        self._target = target
-
-        self.component = component
-        if component is None:
-            # All components are considered
-            self._range_components = lambda v: xrange(v.nb_components)
-        else:
-            # Only the given component is considered
-            assert self.component >= 0, 'component value must be positive.'
-            self._range_components = lambda v: (self.component)
-
-        # Bridge between topology of source and topology of target
-        self.bridge = None
-        # True if some MPI operations are running for the current operator.
-        self._has_requests = False
-        # Which operator must wait for this one before
-        # any computation
-        # Exp : run_till = op1 means that op1 will
-        # wait for the end of this operator before
-        # op1 starts its apply.
-        if run_till is None:
-            run_till = []
-
-        assert isinstance(run_till, list)
-        self._run_till = run_till
-
-    @abstractmethod
-    def setup(self, rwork=None, iwork=None):
-        """
-        Check/set the list of variables to be distributed
-
-        What must be set at setup?
-        ---> the list of continuous variables to be distributed
-        ---> the bridge (one for all variables, which means
-        that all vars must have the same topology in source
-        and the same topology in target.
-        ---> the list of discrete variables for source and
-        for target.
-        """
-        assert self.domain is not None
-        for v in self.variables:
-            assert v.domain is self.domain
-        super(Redistribute, self).setup(rwork, iwork)
-
-    def _check_operator(self, op):
-        """ ensure op properties:
-           * check if op is really a computational operator
-           * discretize op
-           * check if all required variables (if any) belong to op
-
-        Parameters
-        ----------
-        op : :class:`~hysop.operator.computational.Computational
-                :param:  op : a computational operator
-
-        """
-        assert isinstance(op, Computational)
-        op.discretize()
-        msg = 'The variables to be distributed '
-        msg += 'do not belong to the input operator.'
-        if len(self.variables) > 0:
-            assert all(v in op.variables for v in self.variables), msg
-
-    def _set_variables(self):
-        """
-        Check/set the list of variables proceed by the current operator.
-        """
-        # Set list of variables.
-        # It depends on :
-        # - the type of source/target : CartesianTopology, Computational or None
-        # - the args variables : a list of variables or None
-        # Possible cases:
-        # - if source or target is None --> variables is required
-        # - if source and target are CartesianTopology --> variables is required
-        # - in all other cases, variables is optional.
-        # If variables are not set at init,
-        # they must be infered from source/target operators.
-        has_var = len(self.variables) > 0
-        vlist = (v for v in self.variables)
-
-        if self._source is None or self._target is None:
-            assert len(self.variables) > 0
-            self.variables = [v for v in vlist]
-        else:
-            source_is_topo = isinstance(self._source, CartesianTopology)
-            target_is_topo = isinstance(self._target, CartesianTopology)
-
-            # both source and target are topologies. Variables required.
-            if source_is_topo and target_is_topo:
-                msg = 'Redistribute, a list of variables is required at init.'
-                assert has_var, msg
-                self.variables = [v for v in vlist]
-
-            elif not source_is_topo and not target_is_topo:
-                # both source and target are operators
-                # --> intersection of their variables
-                vsource = self._source.variables
-                vtarget = self._target.variables
-                if not has_var:
-                    vlist = (v for v in vsource if v in vtarget)
-                self.variables = [v for v in vlist]
-
-            elif source_is_topo:
-                # source = topo, target = operator
-                vtarget = self._target.variables
-                if not has_var:
-                    vlist = (v for v in vtarget)
-                self.variables = [v for v in vlist]
-
-            else:
-                # source = operator, target = topo
-                vsource = self._source.variables
-                if not has_var:
-                    vlist = (v for v in vsource)
-                self.variables = [v for v in vlist]
-
-        assert len(self.variables) > 0
-
-        # Variables is converted to a dict to be coherent with
-        # computational operators ...
-        self.variables = {key: None for key in self.variables}
-
-        # All variables must have the same domain
-        self.domain = self.variables.keys()[0].domain
-        for v in self.variables:
-            assert v.domain is self.domain
-
-    def _set_topology(self, current):
-        """This function check if current is valid, fits with self.variables
-        and get its topology to set self._topology.
-
-        Parameters
-        ----------
-        current : :class:`~hysop.topology.topology.CartesianTopology` or
-         :class:`~hysop.core.mpi.operator.computational.Computational`
-
-        """
-        if isinstance(current, CartesianTopology):
-            result = current
-            for v in self.variables:
-                v.discretize(result)
-        elif isinstance(current, Computational):
-            self._check_operator(current)
-            vref = self.variables.keys()[0]
-            vcurrent = current.variables
-            result = vcurrent[vref]
-            # We ensure that all vars have
-            # the same topo in target/target.
-            for v in (v for v in self.variables if v is not vref):
-                assert vcurrent[v] is result
-        else:
-            msg = "the source/target is neither an operator or a topology."
-            raise AttributeError(msg)
-        assert result.task_id() == self.domain.current_task()
-        return result
-
-    def computation_time(self):
-        pass
-
-
-class RedistributeIntra(Redistribute):
-    """Data transfer between two operators/topologies.
-    Source and target must:
-    - be defined on the same communicator
-    - work on the same number of mpi process
-    - work with the same global resolution
-    """
-
-    def __init__(self, **kwds):
-        """Data transfer between two operators/topologies defined on the
-        same communicator
-
-        Source and target must:
-        * be defined on the same communicator
-        * work on the same number of mpi process
-        * work with the same global resolution
-        """
-
-        # Base class initialisation
-        super(RedistributeIntra, self).__init__(**kwds)
-
-        # Warning : comm from io_params will be used as
-        # reference for all mpi communication of this operator.
-        # --> rank computed in refcomm
-        # --> source and target must work inside refcomm
-        # If io_params is None, refcomm will COMM_WORLD.
-
-        # Dictionnary of discrete fields to be sent
-        self._vsource = {}
-        # Dictionnary of discrete fields to be overwritten
-        self._vtarget = {}
-
-        # dictionnary which maps rank with mpi derived type
-        # for send operations
-        self._send = {}
-        # dictionnay which maps rank with mpi derived type
-        # for send operations
-        self._receive = {}
-        # dictionnary which map rank/field name with a
-        # receive request
-        self._r_request = None
-        # dictionnary which map rank/field name with a
-        # send request
-        self._s_request = None
-
-        # Set list of variables and the domain.
-        self._set_variables()
-        # Set mpi related stuff
-        self._set_domain_and_tasks()
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        # At setup, source and topo must be either
-        # a hysop.topology.topology.CartesianTopology or
-        # a computational operator.
-
-        msg = 'Redistribute error : undefined source of target.'
-        assert self._source is not None and self._target is not None, msg
-
-        t_source = self._set_topology(self._source)
-        t_target = self._set_topology(self._target)
-
-        source_res = t_source.mesh.discretization.resolution
-        target_res = t_target.mesh.discretization.resolution
-        msg = 'Redistribute error: source and target must '
-        msg += 'have the same global resolution.'
-        assert (source_res == target_res).all(), msg
-
-        # Set the dictionnaries of source/target variables
-        self._vsource = {v: v.discretize(t_source)
-                         for v in self.variables}
-        self._vtarget = {v: v.discretize(t_target)
-                         for v in self.variables}
-
-        # We can create the bridge
-        self.bridge = Bridge(t_source, t_target)
-
-        # Shape of reference is the shape of source/target mesh
-        self._send = self.bridge.send_types()
-        self._receive = self.bridge.recv_types()
-        self._set_synchro()
-        self._is_uptodate = True
-
-    def _set_synchro(self):
-        """
-        Set who must wait for who ...
-        """
-        # Check input operators
-        if isinstance(self._source, Computational):
-            #  redistribute must wait for source if a variable of redistribute
-            # is an output from source.
-            for v in self.variables:
-                vout = v in self._source.output or False
-            if vout:
-                self.wait_for(self._source)
-                # And source must wait for redistribute
-                # if a variable of red. is an output from source.
-                self._source.wait_for(self)
-
-        if isinstance(self._target, Computational):
-            # target operator must wait for
-            # the end of this operator to apply.
-            self._run_till.append(self._target)
-
-        # Add this operator into wait list of
-        # operators listed in run_till
-        for op in self._run_till:
-            op.wait_for(self)
-
-        self._is_uptodate = True
-
-    def add_run_till_op(self, op):
-        """Add an operator to the wait list"""
-        self._run_till.append(op)
-        op.wait_for(self)
-
-    @opapply
-    def apply(self, simulation=None):
-        # Try different way to send vars?
-        # - Buffered : copy all data into a buffer and send/recv
-        # - Standard : one send/recv per component
-        # --- Standard send/recv ---
-        br = self.bridge
-
-        # reset send/recv requests
-        self._r_request = {}
-        self._s_request = {}
-
-        basetag = self.mpi_params.rank + 1
-        # Comm used for send/receive operations
-        # It must contains all proc. of source topo and
-        # target topo.
-        refcomm = self.bridge.comm
-        # Loop over all required components of each variable
-        for v in self.variables:
-            for d in self._range_components(v):
-                v_name = v.name + DirectionLabels[d]
-
-                # Deal with local copies of data
-                if br.has_local_inter():
-                    vTo = self._vtarget[v].data[d]
-                    vFrom = self._vsource[v].data[d]
-                    vTo[br.local_target_ind()] = vFrom[br.local_source_ind()]
-
-                # Transfers to other mpi processes
-                for rk in self._receive:
-                    recvtag = basetag * 989 + (rk + 1) * 99 + (d + 1) * 88
-                    mpi_type = self._receive[rk]
-                    vTo = self._vtarget[v].data[d]
-                    self._r_request[v_name + str(rk)] = \
-                        refcomm.Irecv([vTo, 1, mpi_type],
-                                      source=rk, tag=recvtag)
-                    self._has_requests = True
-                for rk in self._send:
-                    sendtag = (rk + 1) * 989 + basetag * 99 + (d + 1) * 88
-                    mpi_type = self._send[rk]
-                    vFrom = self._vsource[v].data[d]
-                    self._s_request[v_name + str(rk)] = \
-                        refcomm.Issend([vFrom, 1, mpi_type],
-                                       dest=rk, tag=sendtag)
-                    self._has_requests = True
-
-    def wait(self):
-        if self._has_requests:
-            for rk in self._r_request:
-                self._r_request[rk].Wait()
-            for rk in self._s_request:
-                self._s_request[rk].Wait()
-        self._has_requests = False
-
-    def test_requests(self):
-        res = True
-        for rk in self._r_request.keys():
-            res = self._r_request[rk].Test()
-            if not res:
-                return res
-        for rk in self._s_request.keys():
-            res = self._s_request[rk].Test()
-            if not res:
-                return res
-
-    def test_single_request(self, rsend=None, rrecv=None):
-        """if neither rsend or rrecv is given return
-        True if all communication request are complete
-        else check for sending to rsend or receiving from rrecv.
-        Process ranks should be those in parent_comm.
-
-        Parameters
-        ----------
-        rsend : string
-            discrete variable name + DirectionLabels + rank of the process
-            to which a message has been sent
-            and for which we want to test message completion.
-        rrecv : string
-            discrete variable name + DirectionLabels + rank of the process
-            from which a message has been receive
-            and for which we want to test message completion.
-
-        """
-        if rsend is not None or rrecv is not None:
-            send_res = True
-            recv_res = True
-            if rsend is not None:
-                send_res = self._s_request[rsend].Test()
-            if rrecv is not None:
-                recv_res = self._r_request[rrecv].Test()
-            res = send_res and recv_res
-            return res
-        else:
-            return self.test_requests()
-
-
-class RedistributeInter(Redistribute):
-    """Operator to redistribute data from one communicator to another.
-    Source/target may be either a topology or a computational operator.
-    It implies mpi inter-communications.
-    """
-
-    @debug
-    def __init__(self, parent, source_id=None, target_id=None, **kwds):
-        """redistribute data from one communicator to another.
-        Source/target may be either a topology or a computational operator.
-        It implies mpi inter-communications.
-
-        Parameters
-        ----------
-        parent : MPI.COMM
-            mpi communicator that must owns all the
-            processes involved in source and target.
-        source_id, target_id : int
-            mpi task ids for the source/target.
-            Required if source/target is None
-            else infered from source/target.
-
-        See other required parameters in base class.
-        """
-        super(RedistributeInter, self).__init__(**kwds)
-
-        # parent communicator, that must contains all processes
-        # involved in source and target tasks.
-        self.parent = parent
-
-        # set source and targets ids.
-        # They must be known before setup.
-        # Either they can be infered from source and target
-        # or must be set in argument list, if either source
-        # or target is undefined on the current process.
-        if self._source is None:
-            assert source_id is not None
-
-        if self._target is None:
-            assert target_id is not None
-
-        self._source_id = source_id
-        self._target_id = target_id
-
-        # Set list of variables and domain.
-        self._set_variables()
-        # Set mpi related stuff
-        self._set_domain_and_tasks()
-
-        # Domain is set, we can check if we are on source or target
-        current_task = self.domain.current_task()
-        self._is_source = current_task == self._source_id
-        self._is_target = current_task == self._target_id
-        assert self._is_target or self._is_source
-        assert not (self._is_target and self._is_source)
-
-        nbprocs = len(self.domain.tasks_list())
-        msg = "Parent communicator size and number of procs "
-        msg += "in domain differ."
-        assert parent.Get_size() == nbprocs, msg
-
-        # the local topology. May be either source or target
-        # depending on the task of the current process.
-        self._topology = None
-
-        # dictionnary which maps rank with mpi derived type
-        # used for send/recv operations (send on source, recv on target ...)
-        self._transfer_types = None
-
-        # dictionnary which maps rank/field name with a
-        # send/recv request
-        self._requests = {}
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        # First of all, we need to get the current topology:
-        if self._is_source:
-            assert self._source is not None
-            self._topology = self._set_topology(self._source)
-        elif self._is_target:
-            assert self._target is not None
-            self._topology = self._set_topology(self._target)
-
-        # Now we can build the bridge (intercomm)
-        self.bridge = BridgeInter(self._topology, self.parent,
-                                  self._source_id, self._target_id)
-
-        # And get mpi derived types
-        self._transfer_types = self.bridge.transfer_types()
-
-        self._set_synchro()
-        self._is_uptodate = True
-
-    def _set_synchro(self):
-        """
-        Set who must wait for who ...
-        """
-        if self._is_source and isinstance(self._source, Computational):
-            #  redistribute must wait for source if a variable of redistribute
-            # is an output from source.
-            for v in self.variables:
-                vout = v in self._source.output or False
-            if vout:
-                self.wait_for(self._source)
-                # And source must wait for redistribute
-                # if a variable of red. is an output from source.
-                self._source.wait_for(self)
-
-        if self._is_target and isinstance(self._target, Computational):
-            # target operator must wait for
-            # the end of this operator to apply.
-            self._run_till.append(self._target)
-
-        # Add this operator into wait list of
-        # operators listed in run_till
-        for op in self._run_till:
-            op.wait_for(self)
-
-    def add_run_till_op(self, op):
-        """Add an operator to the wait list"""
-        if self._is_target:
-            self._run_till.append(op)
-            op.wait_for(self)
-
-    @debug
-    @opapply
-    def apply(self, simulation=None):
-        # --- Standard send/recv ---
-        self._requests = {}
-
-        # basetag = self.mpi_params.rank + 1
-        # Comm used for send/receive operations
-        # It must contains all proc. of source topo and
-        # target topo.
-        refcomm = self.bridge.comm
-        # Map between rank and mpi types
-        # Loop over all required components of each variable
-        for v in self.variables:
-            rank = self._topology.comm.Get_rank()
-            for d in self._range_components(v):
-                v_name = v.name + DirectionLabels[d]
-                vtab = v.discrete_fields[self._topology].data[d]
-                for rk in self._transfer_types:
-                    if self._is_target:
-                        # Set reception
-                        self._requests[v_name + str(rk)] = \
-                            refcomm.Irecv([vtab[...], 1,
-                                           self._transfer_types[rk]],
-                                          source=rk, tag=rk)
-                    if self._is_source:
-                        self._requests[v_name + str(rk)] = \
-                            refcomm.Issend([vtab[...], 1,
-                                            self._transfer_types[rk]],
-                                           dest=rk, tag=rank)
-                    self._has_requests = True
-
-    def wait(self):
-        if self._has_requests:
-            for rk in self._requests:
-                self._requests[rk].Wait()
-        for v in self.variables:
-            for d in self._range_components(v):
-                vtab = v.discrete_fields[self._topology].data[d]
-        self._has_requests = False
-
-    def test_requests(self):
-        res = True
-        for rk in self._requests:
-            res = self._requests[rk].Test()
-            if not res:
-                return res
-
-
-class RedistributeOverlap(RedistributeIntra):
-    """A specific redistribute where source and target do not work with the same
-    group of mpi processes.
-    Requirements :
-    - work only on topologies, not on operators
-    - same global resolution for both topologies
-    - group from source topology and target topology MUST overlap.
-    """
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        """
-        Check/set the list of variables to be distributed
-
-        What must be set at setup?
-        ---> the list of continuous variables to be distributed
-        ---> the bridge (one for all variables, which means
-        that all vars must have the same topology in source
-        and the same topology in target.
-        ---> the list of discrete variables for source and
-        for target.
-        """
-        if self._source is not None:
-            self._vsource = self._discrete_fields(self._source)
-        if self._target is not None:
-            self._vtarget = self._discrete_fields(self._target)
-
-        # We can create the bridge
-        self.bridge = BridgeOverlap(source=self._source, target=self._target,
-                                    comm_ref=self.mpi_params.comm)
-
-        # Build mpi derived types for send and receive operations.
-        # Shape of reference is the shape of source/target mesh
-        if self._source is not None:
-            self._send = self.bridge.send_types()
-        if self._target is not None:
-            self._receive = self.bridge.recv_types()
-
-        self._set_synchro()
-        self._is_uptodate = True
-
-    def _discrete_fields(self, topo):
-        """Return the dictionnary of discrete fields for topo
-        and the variables of this operator.
-
-        Parameters
-        ----------
-        topo : :class:`~hysop.topology.topology.CartesianTopology`
-        """
-        assert isinstance(topo, CartesianTopology)
-        return {v: v.discretize(topo) for v in self.variables}
diff --git a/hysop/old/operator.old/reprojection.py b/hysop/old/operator.old/reprojection.py
deleted file mode 100644
index 956f40ae6b07808582fee7e7ed2f95e3bcf45ef1..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/reprojection.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file operator/reprojection.py
-Compute reprojection criterion and divergence maximum
-"""
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.reprojection import Reprojection as RD
-from hysop.operator.continuous import opsetup
-
-
-class Reprojection(Computational):
-    """
-    Computes and prints reprojection criterion.
-    See the related PDF called "vorticity_solenoidal_projection.pdf"
-    in HySoPDoc for more details.
-    """
-    def __init__(self, vorticity, threshold, frequency, **kwds):
-        """
-        Constructor.
-        @param vorticity field
-        @param threshold : update frequency when criterion is greater than
-        this threshold
-        @param frequency : set frequency of execution of the reprojection
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Reprojection, self).__init__(variables=[vorticity], **kwds)
-        # constant defining the reprojection criterion :
-        # if the latter is greater than this constant, then a reprojection
-        # is needed
-        self.threshold = threshold
-        ## Frequency for reprojection
-        self.frequency = frequency
-        ## vorticity field
-        self.vorticity = vorticity
-        self.input = [vorticity]
-        self.output = []
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if not self._is_uptodate:
-            self.discrete_op = RD(self.discrete_fields[self.vorticity],
-                                       self.threshold,
-                                       self.frequency, rwork=rwork,
-                                       method=self.method)
-        self._set_io('reprojection', (1, 4))
-        self.discrete_op.set_writer(self._writer)
-        self._is_uptodate = True
-
-    def do_projection(self, ite):
-        """
-        True if projection must be done
-        """
-        return self.discrete_op.do_projection(ite)
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
diff --git a/hysop/old/operator.old/residual.py b/hysop/old/operator.old/residual.py
deleted file mode 100644
index 6ead744ede75fa500d7a1c447b809bf2a9451415..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/residual.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@file residual.py
-Compute and print the time evolution of the residual
-"""
-from hysop.operator.discrete.residual import Residual as ResD
-from hysop.operator.computational import Computational
-from hysop.operator.continuous import opsetup
-
-
-class Residual(Computational):
-    """
-    Compute and print the residual time evolution
-    """
-
-    def __init__(self, vorticity, **kwds):
-        """
-        Constructor.
-        @param vorticity field
-
-        Default file name = 'residual.dat'
-        See hysop.tools.io_utils.Writer for details
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Residual, self).__init__(variables=[vorticity], **kwds)
-        ## vorticity field
-        self.vorticity = vorticity
-        self.input = [vorticity]
-        self.output = []
-
-    def get_work_properties(self):
-        if not self._is_discretized:
-            msg = 'The operator must be discretized '
-            msg += 'before any call to this function.'
-            raise RuntimeError(msg)
-        wd = self.discrete_fields[self.vorticity]
-        w_ind = wd.topology.mesh.compute_index
-        shape_w = wd[0][w_ind].shape
-        return {'rwork': [shape_w], 'iwork': None}
-
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if not self._is_uptodate:
-
-            self.discrete_op = ResD(self.discrete_fields[self.vorticity],
-                                    rwork=rwork)
-            # Initialization of w^(n-1) vorticity value
-            self.discrete_op.initialize_vortPrev()
-            # Output setup
-            self._set_io('residual', (1, 3))
-            self.discrete_op.set_writer(self._writer)
-            self._is_uptodate = True
-
diff --git a/hysop/old/operator.old/spectrum.py b/hysop/old/operator.old/spectrum.py
deleted file mode 100644
index 2e169cb62164915553ce8042193d001b9c003452..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/spectrum.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""Compute the spectrum of a field (fftw based)
-"""
-from hysop.operator.computational import Computational
-from hysop.operator.discrete.spectrum import FFTSpectrum
-from hysop.constants import debug
-from hysop.operator.continuous import opsetup
-
-
-class Spectrum(Computational):
-    """
-    Fourier spectrum computation of a scalar field.
-    """
-
-    def __init__(self, field, **kwds):
-        """
-
-        Parameters
-        ----------
-        field: :class:`~hysop.fields.continuous_field.Field`
-            the input field for which spectrum will be computed
-        """
-        super(Spectrum, self).__init__(variables=[field], **kwds)
-        self.field = field
-        self.input = [field]
-
-    def discretize(self):
-        super(Spectrum, self)._fftw_discretize()
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        self.discrete_op = FFTSpectrum(self.discrete_fields[self.field],
-                                       method=self.method)
-        nbc = self.discrete_op.res.size
-        self._set_io('spectrum', (1, 2 * nbc))
-        self.discrete_op.set_writer(self._writer)
-        self._is_uptodate = True
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
diff --git a/hysop/old/operator.old/stretching.py b/hysop/old/operator.old/stretching.py
deleted file mode 100755
index eb9e1e8a8e953dac2c6e3e5db02386f96b3ee15d..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/stretching.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Computation of the stretching term in Navier-Stokes.
-
-See also
---------
-
-* :ref:`stretching` in HySoP user guide.
-
-
-"""
-from hysop.constants import debug
-from hysop.methods import TimeIntegrator, Formulation, \
-    SpaceDiscretization
-from hysop.numerics.finite_differences import FDC4
-from hysop.operator.computational import Computational
-from hysop.operator.continuous import opsetup
-from hysop.operator.discrete.stretching import Conservative, GradUW
-from hysop.operator.discrete.stretching import StretchingLinearized as SLD
-from hysop.numerics.differential_operations import GradVxW, DivWV
-from hysop.numerics.odesolvers import Euler
-
-
-class Stretching(Computational):
-    """Abstract interface to stretching operator.
-    """
-
-    _authorized_methods = [FDC4]
-    _authorized_formulations = [Conservative, GradUW]
-
-    @debug
-    def __init__(self, velocity, vorticity, **kwds):
-        """
-        Parameters
-        -----------
-        velocity, vorticity : :class:`~hysop.fields.continuous_field.Field`
-        **kwds : extra parameters for base class
-
-        Notes
-        -----
-        * The default formulation is the 'Conservative' one.
-        * The default solving method is finite differences, 4th order, in space
-          and Runge-Kutta 3 in time.
-
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(Stretching, self).__init__(variables=[velocity, vorticity],
-                                         **kwds)
-        # velocity variable (vector)
-        self.velocity = velocity
-        # vorticity variable (vector)
-        self.vorticity = vorticity
-        # Numerical methods for time and space discretization
-
-        if self.method is None:
-            import hysop.default_methods as default
-            self.method = default.STRETCHING
-        assert Formulation in self.method.keys()
-        assert SpaceDiscretization in self.method.keys()
-        assert TimeIntegrator in self.method.keys()
-        msg = 'Stretching : unknown method for space discretization'
-        assert self.method[SpaceDiscretization] in self._authorized_methods,\
-            msg
-        # Formulation used for the stretching equation.
-        # Default = conservative form.
-        self.formulation = self.method[Formulation]
-        msg = 'Stretching error : unknown formulation.'
-        assert self.formulation in self._authorized_formulations, msg
-        self.input = [self.velocity, self.vorticity]
-        self.output = [self.vorticity]
-
-    def get_work_properties(self):
-        super(Stretching, self).get_work_properties()
-        # Get fields local shape.
-        vd = self.discrete_fields[self.velocity]
-        # Collect info from numerical methods
-        # --> time-integrator required work space
-        ti = self.method[TimeIntegrator]
-        topo = vd.topology
-        nbc = self.velocity.nb_components
-        res = ti.get_work_properties(nbc, topo)
-        # ---> differential operator work space
-        if self.formulation is GradUW:
-            dop = GradVxW
-
-        elif self.formulation is Conservative:
-            dop = DivWV
-        res['rwork'] += dop.get_work_properties(topo)['rwork']
-        return res
-
-    @debug
-    def discretize(self):
-        nbghosts = self.method[SpaceDiscretization].ghosts_layer_size
-        super(Stretching, self)._standard_discretize(nbghosts)
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        self.discrete_op =\
-            self.formulation(velocity=self.discrete_fields[self.velocity],
-                             vorticity=self.discrete_fields[self.vorticity],
-                             method=self.method, rwork=rwork, iwork=iwork)
-        self._is_uptodate = True
-
-
-class StretchingLinearized(Stretching):
-    """Solve the linearized stretching equation
-
-    See details in :ref:`stretching`.
-
-    """
-    @debug
-    def __init__(self, velocity_BF, vorticity_BF, **kwds):
-        """
-        Parameters
-        -----------
-        velocity_BF, vorticity_BF : base flow fields :
-        class:`~hysop.fields.continuous_field.Field`
-        **kwds : extra parameters for base class
-
-        Notes
-        -----
-        * The default formulation is the 'Conservative' one.
-        * The default solving method is finite differences, 4th order,
-        in space and Runge-Kutta 3 in time.
-        """
-        super(StretchingLinearized, self).__init__(**kwds)
-        self.variables[velocity_BF] = None
-        self.variables[vorticity_BF] = None
-
-        # Base flow velocity variable (vector)
-        self.velocity_BF = velocity_BF
-        # Base flow vorticity variable (vector)
-        self.vorticity_BF = vorticity_BF
-        # Usual stretching operator to compute the
-        # first term of the linearized rhs: (w'.grad)ub
-        self.usual_stretch = Stretching(self.velocity_BF, self.vorticity,
-                                        discretization=self._discretization)
-
-        self.input.append(self.velocity_BF)
-        self.input.append(self.vorticity_BF)
-
-    @debug
-    def discretize(self):
-        nbghosts = self.method[SpaceDiscretization].ghosts_layer_size
-        self.usual_stretch.discretize()
-        super(StretchingLinearized, self)._standard_discretize(nbghosts)
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        # Setup of the usual stretching operator
-        self.usual_stretch.setup()
-
-        # Setup of a second stretching operator (receiving 3 input variables)
-        # to compute the 2nd term of the linearized rhs: (wb.grad)u'
-        method_lin = self.method.copy()
-        method_lin[TimeIntegrator] = Euler
-        self.discrete_op =\
-            SLD(velocity=self.discrete_fields[self.velocity],
-                vorticity=self.discrete_fields[self.vorticity],
-                vorticity_BF=self.discrete_fields[self.vorticity_BF],
-                usual_op=self.usual_stretch.discrete_op,
-                method=method_lin, rwork=rwork, iwork=iwork)
-        self._is_uptodate = True
diff --git a/hysop/old/operator.old/tests/__init__.py b/hysop/old/operator.old/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/hysop/old/operator.old/tests/ref_files/penal2d_multi.xmf b/hysop/old/operator.old/tests/ref_files/penal2d_multi.xmf
deleted file mode 100644
index 42737ced693dd21a2515b66c309ed2a1f85b50e7..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/ref_files/penal2d_multi.xmf
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0" ?>
-<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
-<Xdmf Version="2.0">
- <Domain>
-  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
-   <Grid Name="Iteration 000" GridType="Uniform">
-    <Time Value="0.0" />
-    <Topology TopologyType="2DCORECTMesh" NumberOfElements="96  128 "/>
-    <Geometry GeometryType="ORIGIN_DXDY">
-     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
-     -0.29999999999999999  0.10000000000000001
-     </DataItem>
-     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
-     0.065449846949787352  0.049087385212340517
-     </DataItem>
-    </Geometry>
-    <Attribute Name="veloref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal2d_multi_00000.h5:/veloref_0_X
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Y" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal2d_multi_00000.h5:/veloref_0_Y
-     </DataItem>
-    </Attribute>
-    <Attribute Name="scalref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal2d_multi_00000.h5:/scalref_0_X
-     </DataItem>
-    </Attribute>
-   </Grid>
-  </Grid>
- </Domain>
-</Xdmf>
diff --git a/hysop/old/operator.old/tests/ref_files/penal2d_multi_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal2d_multi_00000.h5
deleted file mode 100644
index d66edb34279d0969aa93941615f4309204490062..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal2d_multi_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/ref_files/penal2d_porous.xmf b/hysop/old/operator.old/tests/ref_files/penal2d_porous.xmf
deleted file mode 100644
index 65f280851c06f1612cfbd4b6ba7360c4c7c8137d..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/ref_files/penal2d_porous.xmf
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0" ?>
-<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
-<Xdmf Version="2.0">
- <Domain>
-  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
-   <Grid Name="Iteration 000" GridType="Uniform">
-    <Time Value="0.0" />
-    <Topology TopologyType="2DCORECTMesh" NumberOfElements="96  128 "/>
-    <Geometry GeometryType="ORIGIN_DXDY">
-     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
-     -0.29999999999999999  0.10000000000000001
-     </DataItem>
-     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
-     0.065449846949787352  0.049087385212340517
-     </DataItem>
-    </Geometry>
-    <Attribute Name="veloref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal2d_porous_00000.h5:/veloref_0_X
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Y" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal2d_porous_00000.h5:/veloref_0_Y
-     </DataItem>
-    </Attribute>
-    <Attribute Name="scalref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal2d_porous_00000.h5:/scalref_0_X
-     </DataItem>
-    </Attribute>
-   </Grid>
-  </Grid>
- </Domain>
-</Xdmf>
diff --git a/hysop/old/operator.old/tests/ref_files/penal2d_porous_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal2d_porous_00000.h5
deleted file mode 100644
index ba45fcf2a67110bf3327f76bad2d880c4b96c8dd..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal2d_porous_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/ref_files/penal2d_sphere.xmf b/hysop/old/operator.old/tests/ref_files/penal2d_sphere.xmf
deleted file mode 100644
index b8608c1b2b77c6a0c7d0c17355ea14435145a52b..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/ref_files/penal2d_sphere.xmf
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0" ?>
-<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
-<Xdmf Version="2.0">
- <Domain>
-  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
-   <Grid Name="Iteration 000" GridType="Uniform">
-    <Time Value="0.0" />
-    <Topology TopologyType="2DCORECTMesh" NumberOfElements="96  128 "/>
-    <Geometry GeometryType="ORIGIN_DXDY">
-     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
-     -0.29999999999999999  0.10000000000000001
-     </DataItem>
-     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
-     0.065449846949787352  0.049087385212340517
-     </DataItem>
-    </Geometry>
-    <Attribute Name="veloref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal2d_sphere_00000.h5:/veloref_0_X
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Y" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal2d_sphere_00000.h5:/veloref_0_Y
-     </DataItem>
-    </Attribute>
-    <Attribute Name="scalref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal2d_sphere_00000.h5:/scalref_0_X
-     </DataItem>
-    </Attribute>
-   </Grid>
-  </Grid>
- </Domain>
-</Xdmf>
diff --git a/hysop/old/operator.old/tests/ref_files/penal2d_sphere_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal2d_sphere_00000.h5
deleted file mode 100644
index a91749ccaf6babcd887a1450a0ebd45b3dc378ba..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal2d_sphere_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/ref_files/penal3d_multi.xmf b/hysop/old/operator.old/tests/ref_files/penal3d_multi.xmf
deleted file mode 100644
index 2f7967b1d2a69cf9618fcf62059663d6f242f7af..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/ref_files/penal3d_multi.xmf
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0" ?>
-<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
-<Xdmf Version="2.0">
- <Domain>
-  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
-   <Grid Name="Iteration 000" GridType="Uniform">
-    <Time Value="0.0" />
-    <Topology TopologyType="3DCORECTMesh" NumberOfElements="102  96  128 "/>
-    <Geometry GeometryType="ORIGIN_DXDYDZ">
-     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
-     0.5  -0.29999999999999999  0.10000000000000001
-     </DataItem>
-     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
-     0.061599855952741041  0.065449846949787352  0.049087385212340517
-     </DataItem>
-    </Geometry>
-    <Attribute Name="veloref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_multi_00000.h5:/veloref_0_X
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Y" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_multi_00000.h5:/veloref_0_Y
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Z" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_multi_00000.h5:/veloref_0_Z
-     </DataItem>
-    </Attribute>
-    <Attribute Name="scalref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_multi_00000.h5:/scalref_0_X
-     </DataItem>
-    </Attribute>
-   </Grid>
-  </Grid>
- </Domain>
-</Xdmf>
diff --git a/hysop/old/operator.old/tests/ref_files/penal3d_multi_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal3d_multi_00000.h5
deleted file mode 100644
index 249a531ef8c70b64a191e846f9ff8fe24e533936..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal3d_multi_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/ref_files/penal3d_porous.xmf b/hysop/old/operator.old/tests/ref_files/penal3d_porous.xmf
deleted file mode 100644
index bae3302e2e331bb8939024cbbe1a29a34c991897..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/ref_files/penal3d_porous.xmf
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0" ?>
-<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
-<Xdmf Version="2.0">
- <Domain>
-  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
-   <Grid Name="Iteration 000" GridType="Uniform">
-    <Time Value="0.0" />
-    <Topology TopologyType="3DCORECTMesh" NumberOfElements="102  96  128 "/>
-    <Geometry GeometryType="ORIGIN_DXDYDZ">
-     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
-     0.5  -0.29999999999999999  0.10000000000000001
-     </DataItem>
-     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
-     0.061599855952741041  0.065449846949787352  0.049087385212340517
-     </DataItem>
-    </Geometry>
-    <Attribute Name="veloref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_porous_00000.h5:/veloref_0_X
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Y" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_porous_00000.h5:/veloref_0_Y
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Z" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_porous_00000.h5:/veloref_0_Z
-     </DataItem>
-    </Attribute>
-    <Attribute Name="scalref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_porous_00000.h5:/scalref_0_X
-     </DataItem>
-    </Attribute>
-   </Grid>
-  </Grid>
- </Domain>
-</Xdmf>
diff --git a/hysop/old/operator.old/tests/ref_files/penal3d_porous_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal3d_porous_00000.h5
deleted file mode 100644
index 0fd9ac23ef919069b30d7710646afd5ecb2f9023..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal3d_porous_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/ref_files/penal3d_porous_cyl.xmf b/hysop/old/operator.old/tests/ref_files/penal3d_porous_cyl.xmf
deleted file mode 100644
index c32d3ee56c94cf5eee2749760bc05cf0d93ce834..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/ref_files/penal3d_porous_cyl.xmf
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0" ?>
-<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
-<Xdmf Version="2.0">
- <Domain>
-  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
-   <Grid Name="Iteration 000" GridType="Uniform">
-    <Time Value="0.0" />
-    <Topology TopologyType="3DCORECTMesh" NumberOfElements="102  96  128 "/>
-    <Geometry GeometryType="ORIGIN_DXDYDZ">
-     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
-     0.5  -0.29999999999999999  0.10000000000000001
-     </DataItem>
-     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
-     0.061599855952741041  0.065449846949787352  0.049087385212340517
-     </DataItem>
-    </Geometry>
-    <Attribute Name="veloref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_porous_cyl_00000.h5:/veloref_0_X
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Y" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_porous_cyl_00000.h5:/veloref_0_Y
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Z" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_porous_cyl_00000.h5:/veloref_0_Z
-     </DataItem>
-    </Attribute>
-    <Attribute Name="scalref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_porous_cyl_00000.h5:/scalref_0_X
-     </DataItem>
-    </Attribute>
-   </Grid>
-  </Grid>
- </Domain>
-</Xdmf>
diff --git a/hysop/old/operator.old/tests/ref_files/penal3d_porous_cyl_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal3d_porous_cyl_00000.h5
deleted file mode 100644
index 871a8204aafd4d10c8eddb61b6f38640be0fbcb1..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal3d_porous_cyl_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/ref_files/penal3d_sphere.xmf b/hysop/old/operator.old/tests/ref_files/penal3d_sphere.xmf
deleted file mode 100644
index 04892782d23c0882dabca9cf5a37ae8f16845716..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/ref_files/penal3d_sphere.xmf
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0" ?>
-<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
-<Xdmf Version="2.0">
- <Domain>
-  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
-   <Grid Name="Iteration 000" GridType="Uniform">
-    <Time Value="0.0" />
-    <Topology TopologyType="3DCORECTMesh" NumberOfElements="102  96  128 "/>
-    <Geometry GeometryType="ORIGIN_DXDYDZ">
-     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
-     0.5  -0.29999999999999999  0.10000000000000001
-     </DataItem>
-     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
-     0.061599855952741041  0.065449846949787352  0.049087385212340517
-     </DataItem>
-    </Geometry>
-    <Attribute Name="veloref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_sphere_00000.h5:/veloref_0_X
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Y" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_sphere_00000.h5:/veloref_0_Y
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Z" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_sphere_00000.h5:/veloref_0_Z
-     </DataItem>
-    </Attribute>
-    <Attribute Name="scalref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal3d_sphere_00000.h5:/scalref_0_X
-     </DataItem>
-    </Attribute>
-   </Grid>
-  </Grid>
- </Domain>
-</Xdmf>
diff --git a/hysop/old/operator.old/tests/ref_files/penal3d_sphere_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal3d_sphere_00000.h5
deleted file mode 100644
index a04870cc22c740419699ee5b49480d509c0a0045..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal3d_sphere_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/ref_files/penal_vort_2d_multi_sphere_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal_vort_2d_multi_sphere_00000.h5
deleted file mode 100644
index b055c6734f9bea165cceae61ea2f92cf90334295..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal_vort_2d_multi_sphere_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/ref_files/penal_vort_2d_sphere.xmf b/hysop/old/operator.old/tests/ref_files/penal_vort_2d_sphere.xmf
deleted file mode 100644
index d83c62f3ac5bd1139e7f1c04fe2a1597f1993337..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/ref_files/penal_vort_2d_sphere.xmf
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0" ?>
-<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
-<Xdmf Version="2.0">
- <Domain>
-  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
-   <Grid Name="Iteration 000" GridType="Uniform">
-    <Time Value="0.0" />
-    <Topology TopologyType="2DCORECTMesh" NumberOfElements="96  128 "/>
-    <Geometry GeometryType="ORIGIN_DXDY">
-     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
-     -0.29999999999999999  0.10000000000000001
-     </DataItem>
-     <DataItem Dimensions="2 " NumberType="Float" Precision="8" Format="XML">
-     0.065449846949787352  0.049087385212340517
-     </DataItem>
-    </Geometry>
-    <Attribute Name="veloref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal_vort_2d_sphere_00000.h5:/veloref_0_X
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Y" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal_vort_2d_sphere_00000.h5:/veloref_0_Y
-     </DataItem>
-    </Attribute>
-    <Attribute Name="vortiref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal_vort_2d_sphere_00000.h5:/vortiref_0_X
-     </DataItem>
-    </Attribute>
-   </Grid>
-  </Grid>
- </Domain>
-</Xdmf>
diff --git a/hysop/old/operator.old/tests/ref_files/penal_vort_2d_sphere_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal_vort_2d_sphere_00000.h5
deleted file mode 100644
index cadc7f3af33b45fbe961ec8b78c1262c471ee3fc..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal_vort_2d_sphere_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/ref_files/penal_vort_3d_multi_sphere_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal_vort_3d_multi_sphere_00000.h5
deleted file mode 100644
index 9e6880cd9079c8b543bb775ad0934e1fd03c83bb..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal_vort_3d_multi_sphere_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/ref_files/penal_vort_3d_sphere.xmf b/hysop/old/operator.old/tests/ref_files/penal_vort_3d_sphere.xmf
deleted file mode 100644
index 68ceca679e0ac00319b0fad514e0f7e7cbc0e0b6..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/ref_files/penal_vort_3d_sphere.xmf
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0" ?>
-<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd">
-<Xdmf Version="2.0">
- <Domain>
-  <Grid Name="CellTime" GridType="Collection" CollectionType="Temporal">
-   <Grid Name="Iteration 000" GridType="Uniform">
-    <Time Value="0.0" />
-    <Topology TopologyType="3DCORECTMesh" NumberOfElements="102  96  128 "/>
-    <Geometry GeometryType="ORIGIN_DXDYDZ">
-     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
-     0.5  -0.29999999999999999  0.10000000000000001
-     </DataItem>
-     <DataItem Dimensions="3 " NumberType="Float" Precision="8" Format="XML">
-     0.061599855952741041  0.065449846949787352  0.049087385212340517
-     </DataItem>
-    </Geometry>
-    <Attribute Name="veloref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal_vort_3d_sphere_00000.h5:/veloref_0_X
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Y" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal_vort_3d_sphere_00000.h5:/veloref_0_Y
-     </DataItem>
-    </Attribute>
-    <Attribute Name="veloref_0_Z" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal_vort_3d_sphere_00000.h5:/veloref_0_Z
-     </DataItem>
-    </Attribute>
-    <Attribute Name="vortiref_0_Z" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal_vort_3d_sphere_00000.h5:/vortiref_0_Z
-     </DataItem>
-    </Attribute>
-    <Attribute Name="vortiref_0_Y" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal_vort_3d_sphere_00000.h5:/vortiref_0_Y
-     </DataItem>
-    </Attribute>
-    <Attribute Name="vortiref_0_X" AttributeType="Scalar" Center="Node">
-     <DataItem Dimensions="102  96  128 " NumberType="Float" Precision="8" Format="HDF" Compression="Raw">
-      penal_vort_3d_sphere_00000.h5:/vortiref_0_X
-     </DataItem>
-    </Attribute>
-   </Grid>
-  </Grid>
- </Domain>
-</Xdmf>
diff --git a/hysop/old/operator.old/tests/ref_files/penal_vort_3d_sphere_00000.h5 b/hysop/old/operator.old/tests/ref_files/penal_vort_3d_sphere_00000.h5
deleted file mode 100644
index edff96ef1e134e020f10eb66a9dcaafb775e17a9..0000000000000000000000000000000000000000
Binary files a/hysop/old/operator.old/tests/ref_files/penal_vort_3d_sphere_00000.h5 and /dev/null differ
diff --git a/hysop/old/operator.old/tests/test_absorption_bc.py b/hysop/old/operator.old/tests/test_absorption_bc.py
deleted file mode 100644
index 8f97dd6c244ee6f982e361783404a2fa5724e12f..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_absorption_bc.py
+++ /dev/null
@@ -1,123 +0,0 @@
-"""Test 'boundaries absorption' operator
-"""
-from hysop.operator.absorption_bc import AbsorptionBC
-from hysop import Box, Discretization, Field, VariableParameter,\
-    Simulation
-import numpy as np
-from hysop.domain.subsets import SubBox
-from hysop.operator.analytic import Analytic
-
-
-d2d = Discretization([33, ] * 2)
-d3d = Discretization([33, ] * 3)
-
-uinf = 10.
-
-
-def init_test(dim, discr, ext_filter, filter_func=None):
-    """Create fields and operator
-    """
-    dom = Box([1., ] * dim)
-    #topo = dom.create_topology(discr)
-    v = Field(dom, name='velo', is_vector=True)
-    w = Field(dom, name='vorti', is_vector=True)
-    wref = Field(v.domain, name='wref', is_vector=True)
-
-    topo = dom.create_topology(discr)
-    # Build and apply absorption operator on w
-    flowrate = VariableParameter(data=uinf)
-    x_range = [0.9, 1.]
-    op = AbsorptionBC(v, w, flowrate, x_range,
-                      discretization=discr, filter_func=filter_func)
-    op.discretize()
-    op.setup()
-    wk_prop = op.get_work_properties()['rwork']
-    dop = op.discrete_op
-    assert np.prod(dop.absorption_box.mesh[topo].resolution) == wk_prop[0]
-
-    orig = [0.9, 0., 0.]
-    ll = [0.1, 1., 1.]
-    boxend = SubBox(origin=orig, length=ll, parent=dom)
-    ll = [0.9, 1., 1.]
-    boxend.discretize(topo)
-    v.randomize(topo)
-    vd = v.discretize(topo)
-    w.randomize(topo)
-    wref.copy(w, topo)
-    wdref = wref.discretize(topo)
-    wd = w.discretize(topo)
-
-    for d in xrange(dom.dimension):
-        assert np.allclose(wdref[d], wd[d])
-    op.apply()
-    # Apply filter on wref
-    if dop.absorption_box.on_proc[topo]:
-        ext_filter(wdref, vd, boxend, topo)
-    for d in xrange(dom.dimension):
-        assert np.allclose(wdref[d], wd[d])
-
-
-def apply_filter_default(res, velo, subbox, topo):
-    """Default values for the filter in the absorption box
-    """
-    xcend = subbox.mesh[topo].coords[0]
-    iend = subbox.ind[topo][0]
-    xb = xcend[0]
-    xe = xcend[-1]
-    xc = xb + (xe - xb) / 2.0
-    eps = 10.
-    form = np.tanh(eps * (xcend - xc))
-    absorption_filter = form - np.tanh(eps * (xe - xc))
-    coeff = 1.0 / (np.tanh(eps * (xb - xc)) - np.tanh(eps * (xe - xc)))
-    absorption_filter *= coeff
-    res[0][iend] *= absorption_filter
-    res[1][iend] *= absorption_filter
-    res[2][iend] *= absorption_filter
-    dfilter = eps * (1.0 - form ** 2)
-    dfilter *= coeff
-    res[1][iend] = res[1][iend] + (uinf - velo[2][iend]) * dfilter
-    res[2][iend] = res[2][iend] - (uinf - velo[1][iend]) * dfilter
-    return res
-
-
-def apply_user_filter(res, velo, subbox, topo):
-    """Linear filter
-    """
-    xcend = subbox.mesh[topo].coords[0]
-    iend = subbox.ind[topo][0]
-    xb = xcend[0]
-    xe = xcend[-1]
-    absorption_filter = xcend / (xb - xe) - 1. / (xb - xe)
-    res[0][iend] *= absorption_filter
-    res[1][iend] *= absorption_filter
-    res[2][iend] *= absorption_filter
-    dfilter = 1. / (xb - xe)
-    res[1][iend] = res[1][iend] + (uinf - velo[2][iend]) * dfilter
-    res[2][iend] = res[2][iend] - (uinf - velo[1][iend]) * dfilter
-    return res
-
-
-def test_default_filter_3d():
-    """Filter using default internal
-    filter
-    """
-    init_test(3, d3d, apply_filter_default)
-
-
-def user_filter(x):
-    """Linear filter, just for test"""
-    xb = x[0]
-    xe = x[-1]
-    return x / (xb - xe) - 1. / (xb - xe)
-
-
-def user_diff_filter(x):
-    """Linear filter, just for test"""
-    return 1. / (x[0] - x[-1])
-
-
-def test_user_filter_3d():
-    """Filter using external filter
-    """
-    filter_f = [user_filter, user_diff_filter]
-    init_test(3, d3d, apply_user_filter, filter_f)
diff --git a/hysop/old/operator.old/tests/test_adaptive_time_step.py b/hysop/old/operator.old/tests/test_adaptive_time_step.py
deleted file mode 100755
index 7c3820b5737d42679e77317e51d7417ed6dc69a1..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_adaptive_time_step.py
+++ /dev/null
@@ -1,328 +0,0 @@
-# -*- coding: utf-8 -*-
-from hysop import Field, Box
-from hysop.operator.adapt_timestep import AdaptiveTimeStep
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop.core.mpi import main_comm, main_size
-import numpy as np
-from hysop.tools.numpywrappers import npw
-from hysop.tools.misc import WorkSpaceTools
-from hysop.numerics.tests.test_differential_operations import compute_vel,\
-    compute_vort, diag_grad_func_3d, compute_max, grad_v_func_3d, strain_f_3d
-from math import pi
-from hysop.methods import TimeIntegrator
-
-sin = np.sin
-cos = np.cos
-ldom = npw.asrealarray([pi * 2., 4. * pi, 2. * pi])
-xdom = [0., 0., 0.]
-
-d3d = Discretization([65, 123, 33], [2, 2, 2])
-
-
-def init(criteria=None, work=False):
-    """build fields and simu"""
-    box = Box(length=ldom, origin=xdom)
-    velo = Field(domain=box, formula=compute_vel,
-                 name='Velocity', is_vector=True)
-    vorti = Field(domain=box, formula=compute_vort,
-                  name='Vorticity', is_vector=True)
-    simu = Simulation(nb_iter=4)
-    simu.initialize()
-    assert simu.current_iteration == 0
-    op = AdaptiveTimeStep(velo, vorti, simulation=simu,
-                          criteria=criteria,
-                          discretization=d3d, lcfl=0.125, cfl=0.5,
-                          time_range=[0, 4])
-    op.discretize()
-    rwork = None
-    if work is True:
-        wkp = op.get_work_properties()
-        lwork = len(wkp['rwork'])
-        subshape = wkp['rwork'][0]
-        rwork = WorkSpaceTools.check_work_array(lwork, subshape)
-    op.setup(rwork=rwork)
-    vorti.initialize(time=simu.time, topo=op.discrete_op.vorticity.topology)
-    velo.initialize(time=simu.time, topo=op.discrete_op.vorticity.topology)
-    op.apply()
-    op.wait()
-    return simu, op
-
-
-def compute_dt_ref(topo, simu, op):
-    """Compute some reference values for dt for
-    each criteria
-    """
-    ref = Field(domain=topo.domain, formula=grad_v_func_3d,
-                name='Analytical', nb_components=topo.domain.dim ** 2)
-    rd = ref.discretize(topo)
-    ref.initialize(time=simu.time, topo=topo)
-    ref2 = Field(domain=topo.domain, formula=strain_f_3d,
-                 name='Analytical',
-                 nb_components=topo.domain.dim *
-                 (topo.domain.dim + 1) / 2)
-    rd2 = ref2.discretize(topo)
-    ref2.initialize(time=simu.time, topo=topo)
-    dd = topo.domain.dim
-    ll = [rd[i * (dd + 1)] for i in xrange(dd)]
-    ic = topo.mesh.compute_index
-    refmax_adv = refmax_str = refmax_strain = 0.
-    ll2 = [None, ] * dd
-    ll2[0] = [rd2[0], rd2[3], rd2[4]]
-    ll2[1] = [rd2[1], rd2[3], rd2[5]]
-    ll2[2] = [rd2[2], rd2[4], rd2[5]]
-    for d in xrange(dd):
-        refmax_adv = max(compute_max([ll[d]], ic), refmax_adv)
-        refmax_str = max(compute_max(rd[d * dd: (d + 1) * dd], ic), refmax_str)
-        refmax_strain = max(compute_max(ll2[d], ic), refmax_strain)
-    dt = {}
-    dt['gradU'] = op.lcfl / refmax_adv
-    dt['stretch'] = op.method[TimeIntegrator].stability_coeff() / refmax_str
-    refmax = max([np.max(np.abs(v[ic]))
-                  for v in op.velocity.discretize(topo).data])
-    dt['cfl'] = op.cfl * topo.mesh.space_step[0] / refmax
-    dt['strain'] = op.lcfl / refmax_strain
-    refmax = max([np.max(np.abs(w[ic]))
-                  for w in op.vorticity.discretize(topo).data])
-    dt['vort'] = op.lcfl / refmax
-    return dt
-
-
-def test_adapt_vort():
-    """
-    Test 'vort' criteria (default config)
-    """
-    simu, op = init()
-    assert 'vort' in op.criteria
-    dt0 = simu.time_step
-    topo = op.discrete_op.vorticity.topology
-    wd = op.vorticity.discretize(topo)
-    ic = topo.mesh.compute_index
-    ref = max([np.max(np.abs(w[ic])) for w in wd.data])
-    assert np.allclose(op.diagnostics()[2], op.lcfl / ref)
-    assert np.allclose(simu.time_step, min(dt0, op.diagnostics()[2]))
-
-
-def test_adapt_fail():
-    """ Check wrong crit. value
-    """
-    fail = False
-    try:
-        init(['vorti', 'stretch'])
-    except:
-        fail = True
-    assert fail
-
-
-def test_adapt_gradu():
-    """
-    Test 'gradu' criteria
-    """
-    simu, op = init(['gradU'])
-    dt0 = simu.time_step
-    topo = op.discrete_op.velocity.topology
-    ref = Field(domain=topo.domain, formula=diag_grad_func_3d,
-                name='Analytical', is_vector=True)
-    rd = ref.discretize(topo)
-    ref.initialize(time=simu.time, topo=topo)
-    dd = topo.domain.dim
-    refmax = max([compute_max([rd[d]], topo.mesh.compute_index)
-                  for d in xrange(dd)])
-    assert np.allclose(op.diagnostics()[3], op.lcfl / refmax,
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(simu.time_step, min(dt0, op.diagnostics()[3]))
-
-
-def test_adapt_strain():
-    """
-    Test 'strain' criteria
-    """
-    simu, op = init(['strain'])
-    dt0 = simu.time_step
-    topo = op.discrete_op.velocity.topology
-    resdim = topo.domain.dim * (topo.domain.dim + 1) / 2
-    ref = Field(domain=topo.domain, formula=strain_f_3d,
-                name='Analytical', nb_components=resdim)
-    rd = ref.discretize(topo)
-    ref.initialize(time=simu.time, topo=topo)
-    dd = topo.domain.dim
-    ll = [None, ] * dd
-    ll[0] = [rd[0], rd[3], rd[4]]
-    ll[1] = [rd[1], rd[3], rd[5]]
-    ll[2] = [rd[2], rd[4], rd[5]]
-    ic = topo.mesh.compute_index
-    refmax = 0.
-    for d in xrange(dd):
-        refmax = max(compute_max(ll[d], ic), refmax)
-    dt = op.lcfl / refmax
-    assert np.allclose(op.diagnostics()[4], dt,
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(simu.time_step, min(dt0, op.diagnostics()[4]))
-
-
-def test_adapt_stretch():
-    """
-    Test 'stretch' criteria
-    """
-    simu, op = init(['stretch'])
-    dt0 = simu.time_step
-    topo = op.discrete_op.velocity.topology
-    ref = Field(domain=topo.domain, formula=grad_v_func_3d,
-                name='Analytical', nb_components=topo.domain.dim ** 2)
-    rd = ref.discretize(topo)
-    ref.initialize(time=simu.time, topo=topo)
-    dd = topo.domain.dim
-    refmax = 0.
-    ic = topo.mesh.compute_index
-    for d in xrange(dd):
-        pos = d * dd
-        refmax = max(compute_max(rd[pos: pos + dd], ic), refmax)
-    cs = op.method[TimeIntegrator].stability_coeff()
-    dt = cs / refmax
-    assert np.allclose(op.diagnostics()[5], dt,
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(simu.time_step, min(dt0, op.diagnostics()[5]))
-
-
-def test_adapt_cfl():
-    """
-    Test 'stretch' criteria
-    """
-    simu, op = init(['cfl'])
-    dt0 = simu.time_step
-    topo = op.discrete_op.velocity.topology
-    dt0 = simu.time_step
-    vd = op.velocity.discretize(topo)
-    ic = topo.mesh.compute_index
-    ref = max([np.max(np.abs(v[ic])) for v in vd.data])
-    dt = op.cfl * topo.mesh.space_step[0] / ref
-    assert np.allclose(op.diagnostics()[6], dt)
-    assert np.allclose(simu.time_step, min(dt0, op.diagnostics()[6]))
-
-
-def test_adapt_multi_crit():
-    """
-    Test combination of 3 criteria ('grad_u', 'stretch', 'cfl')
-    """
-    criteria = ['gradU', 'stretch', 'cfl']
-    simu, op = init(criteria)
-    topo = op.discrete_op.velocity.topology
-    dtref = compute_dt_ref(topo, simu, op)
-    dt0 = simu.time_step
-
-    assert np.allclose(op.diagnostics()[3], dtref['gradU'],
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(op.diagnostics()[5], dtref['stretch'],
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(op.diagnostics()[6], dtref['cfl'],
-                       rtol=topo.mesh.space_step.min())
-    dtref_min = min([dtref[c] for c in criteria])
-    assert np.allclose(op.diagnostics()[0], simu.time)
-    assert np.allclose(op.diagnostics()[1], simu.time_step)
-    assert simu.time_step <= op.maxdt
-    assert np.allclose(simu.time_step, min(dt0, dtref_min))
-
-
-def test_adapt_all_crit():
-    """
-    Test combination of all criteria
-    """
-    criteria = ['gradU', 'stretch', 'cfl', 'strain', 'vort']
-    simu, op = init(criteria)
-    topo = op.discrete_op.velocity.topology
-    dtref = compute_dt_ref(topo, simu, op)
-    dt0 = simu.time_step
-
-    assert np.allclose(op.diagnostics()[0], simu.time)
-    assert np.allclose(op.diagnostics()[1], simu.time_step)
-    assert simu.time_step <= op.maxdt
-    assert np.allclose(op.diagnostics()[2], dtref['vort'],
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(op.diagnostics()[3], dtref['gradU'],
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(op.diagnostics()[4], dtref['strain'],
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(op.diagnostics()[5], dtref['stretch'],
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(op.diagnostics()[6], dtref['cfl'],
-                       rtol=topo.mesh.space_step.min())
-    dtref_min = min([dtref[c] for c in criteria])
-    assert np.allclose(op.diagnostics()[0], simu.time)
-    assert np.allclose(op.diagnostics()[1], simu.time_step)
-    assert simu.time_step <= op.maxdt
-    assert np.allclose(simu.time_step, min(dt0, dtref_min))
-
-
-def test_adapt_all_crit_with_work():
-    """
-    Test combination of all criteria
-    """
-    criteria = ['gradU', 'stretch', 'cfl', 'strain', 'vort']
-    simu, op = init(criteria, work=True)
-    topo = op.discrete_op.velocity.topology
-    dtref = compute_dt_ref(topo, simu, op)
-    dt0 = simu.time_step
-
-    assert np.allclose(op.diagnostics()[0], simu.time)
-    assert np.allclose(op.diagnostics()[1], simu.time_step)
-    assert simu.time_step <= op.maxdt
-    assert np.allclose(op.diagnostics()[2], dtref['vort'],
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(op.diagnostics()[3], dtref['gradU'],
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(op.diagnostics()[4], dtref['strain'],
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(op.diagnostics()[5], dtref['stretch'],
-                       rtol=topo.mesh.space_step.min())
-    assert np.allclose(op.diagnostics()[6], dtref['cfl'],
-                       rtol=topo.mesh.space_step.min())
-    dtref_min = min([dtref[c] for c in criteria])
-    assert np.allclose(op.diagnostics()[0], simu.time)
-    assert np.allclose(op.diagnostics()[1], simu.time_step)
-    assert simu.time_step <= op.maxdt
-    assert np.allclose(simu.time_step, min(dt0, dtref_min))
-
-
-def test_adapt_multi_tasks():
-    """
-    Multi mpi tasks version
-    """
-    # MPI procs are distributed among two tasks
-    GPU = 4
-    CPU = 1
-    VISU = 12
-    proc_tasks = [CPU, ] * main_size
-
-    if main_size > 4:
-        proc_tasks[-1] = GPU
-        proc_tasks[2] = GPU
-        proc_tasks[1] = VISU
-
-    dom = Box(dimension=3, proc_tasks=proc_tasks)
-    velo = Field(domain=dom, formula=compute_vel,
-                 name='Velocity', is_vector=True)
-    vorti = Field(domain=dom, formula=compute_vort,
-                  name='Vorticity', is_vector=True)
-
-    from hysop.tools.parameters import MPIParams
-    cpu_task = MPIParams(comm=dom.task_comm, task_id=CPU)
-    simu = Simulation(nb_iter=4)
-    op = AdaptiveTimeStep(velo, vorti, simulation=simu, io_params=True,
-                          discretization=d3d, lcfl=0.125, cfl=0.5,
-                          mpi_params=cpu_task)
-    simu.initialize()
-    if dom.is_on_task(CPU):
-        op.discretize()
-        op.setup()
-        vorti.initialize()
-
-    while not simu.is_over:
-        if dom.is_on_task(CPU):
-            op.apply()
-        op.wait()
-        simu.advance()
-        refval = 0
-        if dom.is_on_task(CPU):
-            refval = simu.time_step
-        refval = main_comm.bcast(refval, root=0)
-        assert refval == simu.time_step
diff --git a/hysop/old/operator.old/tests/test_advec_scales.py b/hysop/old/operator.old/tests/test_advec_scales.py
deleted file mode 100755
index 56f8cf91b93ac3341c0c3bbe0e3df551c5138d1e..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_advec_scales.py
+++ /dev/null
@@ -1,218 +0,0 @@
-"""Testing Scales advection operator.
-"""
-from __future__ import print_function
-import numpy as np
-from hysop.methods import TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting
-from hysop.methods import RK2, L2_1, L4_2, M8Prime, Linear
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.advection import Advection, ScalesAdvection
-from hysop.problem.simulation import O2FULLHALF
-from hysop.problem.simulation import Simulation
-from hysop.testsenv import scales_failed
-from hysop.tools.parameters import Discretization
-d3d = Discretization([17, 17, 17])
-d3d_g = Discretization([17, 17, 17], [6, 6, 6])
-
-m1 = {TimeIntegrator: RK2, Interpolation: Linear, Remesh: L2_1,
-      Splitting: O2FULLHALF, Support: ''}
-m2 = {TimeIntegrator: RK2, Interpolation: Linear, Remesh: L4_2,
-      Splitting: O2FULLHALF, Support: ''}
-m3 = {TimeIntegrator: RK2, Interpolation: Linear, Remesh: M8Prime,
-      Splitting: O2FULLHALF, Support: ''}
-
-
-def run_advection(vector_field, method, mref=None, random_velocity=False):
-    """Create advection operator, ref operator
-    and fields, run scales and python advection,
-    compare results.
-
-    Parameters
-    ----------
-    vector_field: bool
-        True to advect a vector field, else scalar field.
-    method : dictionnary
-        Set scales remeshing type.
-    mref : dictionnary, optional
-        method for pure python advection. Default = m1
-    random_velocity: boolean
-        If True, randomize velocity values, else set it to 0.
-    """
-    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
-    scal = Field(domain=box, name='Scalar', is_vector=vector_field)
-    scal_ref = Field(domain=box, name='Scalar_ref', is_vector=vector_field)
-    velo = Field(domain=box, name='Velocity',
-                 formula=lambda x, y, z, t: (0., 0., 0.), is_vector=True,
-                 vectorize_formula=True)
-    advec = ScalesAdvection(velocity=velo,
-                            advected_fields=scal,
-                            discretization=d3d,
-                            method=method)
-    if mref is None:
-        mref = m1
-    advec_py = Advection(velocity=velo,
-                         advected_fields=scal_ref, discretization=d3d_g,
-                         method=mref)
-    advec.discretize()
-    advec_py.discretize()
-    advec.setup()
-    advec_py.setup()
-    # Get and randomize discrete fields
-    topo = advec.advected_fields_topology()
-    topo_ref = advec_py.advected_fields_topology()
-
-    if random_velocity:
-        topo_velo = advec.velocity_topology()
-        vd = velo.randomize(topo_velo)
-        for d in xrange(velo.nb_components):
-            vd[d] /= 2 * topo_velo.mesh.local_resolution[d]
-    else:
-        assert (velo.norm(topo) == 0).all()
-    ic = topo.mesh.compute_index
-    icref = topo_ref.mesh.compute_index
-    print(icref)
-    print(ic)
-    scal_d = scal.randomize(topo)
-    scal_ref_d = scal.discretize(topo_ref)
-    for d in xrange(len(scal_d.data)):
-        scal_ref_d[d][icref] = scal_d[d][ic]
-        assert np.allclose(scal_ref_d.data[d][icref], scal_d.data[d][ic])
-    advec.apply(Simulation(start=0., end=0.1, nb_iter=1))
-    advec_py.apply(Simulation(start=0., end=0.1, nb_iter=1))
-    # compare values on grid (excluding ghosts)
-    # for d in xrange(len(scal_d.data)):
-    #     assert np.allclose(scal_ref_d.data[d][icref], scal_d.data[d][ic])
-
-
-def run_advection_2(vector_field, method):
-    """Create advection operator,
-    fields, run and check results.
-
-    Parameters
-    ----------
-    vector_field: bool
-        True to advect a vector field, else scalar field.
-    method : dictionnary
-        Set scales remeshing type.
-    """
-    box = Box(length=[1., 1., 1.], origin=[0., 0., 0.])
-    scal = Field(domain=box, name='Scalar', is_vector=vector_field)
-    scal_ref = Field(domain=box, name='Scalar_ref', is_vector=vector_field)
-    velo = Field(domain=box, name='Velocity',
-                 formula=lambda x, y, z, t: (0., 0., 0.), is_vector=True,
-                 vectorize_formula=True)
-    advec = ScalesAdvection(velocity=velo,
-                            advected_fields=scal, discretization=d3d,
-                            method=method)
-    advec.discretize()
-    advec.setup()
-    # Get and randomize discrete fields
-    topo = advec.advected_fields_topology()
-    assert (velo.norm(topo) == 0).all()
-    ic = topo.mesh.compute_index
-    scal_d = scal.randomize(topo)
-    scal_ref.copy(scal, topo)
-    scal_ref_d = scal.discretize(topo)
-    for d in xrange(len(scal_d.data)):
-        assert np.allclose(scal_ref_d.data[d][ic], scal_d.data[d][ic])
-    advec.apply(Simulation(start=0., end=0.1, nb_iter=1))
-    # compare values on grid (excluding ghosts)
-    for d in xrange(len(scal_d.data)):
-        assert np.allclose(scal_ref_d.data[d][ic], scal_d.data[d][ic])
-
-
-@scales_failed
-def test_null_velocity_m4():
-    """Scalar field advection, null velocity, M4prime remesh
-    """
-    method = {Remesh: 'p_M4'}
-    run_advection_2(False, method)
-
-
-@scales_failed
-def test_null_velocity_vec_m4():
-    """Vector field advection, null velocity, M4prime remesh
-    """
-    method = {Remesh: 'p_M4'}
-    run_advection_2(True, method)
-
-
-@scales_failed
-def test_null_velocity_m6():
-    """Scalar field advection, null velocity, M6prime remesh
-    """
-    method = {Remesh: 'p_M6'}
-    run_advection_2(False, method)
-
-
-@scales_failed
-def test_null_velocity_vec_m6():
-    """Vector field advection, null velocity, M6prime remesh
-    """
-    method = {Remesh: 'p_M6'}
-    run_advection_2(True, method)
-
-
-@scales_failed
-def test_null_velocity_m8():
-    """Scalar field advection, null velocity, M6prime remesh
-    """
-    method = {Remesh: 'p_M8'}
-    run_advection_2(False, method)
-
-
-@scales_failed
-def test_null_velocity_vec_m8():
-    """Vector field advection, null velocity, M6prime remesh
-    """
-    method = {Remesh: 'p_M8'}
-    run_advection_2(True, method)
-
-
-@scales_failed
-def test_random_velocity_m4():
-    """Scalar field advection, random velocity, M4prime remesh
-    """
-    method = {Remesh: 'p_M4'}
-    run_advection(False, method, random_velocity=True)
-
-
-@scales_failed
-def test_random_velocity_vec_m4():
-    """Vector field advection, random velocity, M4prime remesh
-    """
-    method = {Remesh: 'p_M4'}
-    run_advection(True, method, random_velocity=True)
-
-
-@scales_failed
-def test_random_velocity_m6():
-    """Scalar field advection, random velocity, M6prime remesh
-    """
-    method = {Remesh: 'p_M6'}
-    run_advection(False, method, m2, random_velocity=True)
-
-
-@scales_failed
-def test_random_velocity_vec_m6():
-    """Vector field advection, random velocity, M6prime remesh
-    """
-    method = {Remesh: 'p_M6'}
-    run_advection(True, method, m2, random_velocity=True)
-
-
-@scales_failed
-def test_random_velocity_m8():
-    """Scalar field advection, random velocity, M8prime remesh
-    """
-    method = {Remesh: 'p_M8'}
-    run_advection(False, method, m3, random_velocity=True)
-
-
-@scales_failed
-def test_random_velocity_vec_m8():
-    """Vector field advection, random velocity, M8prime remesh
-    """
-    method = {Remesh: 'p_M8'}
-    run_advection(True, method, m3, random_velocity=True)
diff --git a/hysop/old/operator.old/tests/test_advection.py b/hysop/old/operator.old/tests/test_advection.py
deleted file mode 100644
index 73d766adfa1b28226815ced88ac73677da1002d6..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_advection.py
+++ /dev/null
@@ -1,199 +0,0 @@
-"""Tests for non-gpu Advection"""
-
-from hysop.tools.parameters import Discretization
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.problem.simulation import Simulation
-from hysop.operator.advection import Advection
-from hysop.tools.numpywrappers import npw
-import numpy as np
-
-
-def find_min_work(velo, fields, topo):
-    """Get minimal space required for
-    internal buffers in advection
-    """
-    op = Advection(velocity=velo,
-                   advected_fields=fields, discretization=topo)
-    op.discretize()
-    iwork = []
-    rwork = []
-    work_prop = op.get_work_properties()
-    if work_prop['rwork'] is not None:
-        for sh in work_prop['rwork']:
-            incr_sh = np.asarray(sh)
-            incr_sh[0] += 143
-            rwork.append(npw.zeros(incr_sh))
-
-    if work_prop['iwork'] is not None:
-        for sh in work_prop['iwork']:
-            incr_sh = np.asarray(sh)
-            incr_sh[0] += 1343
-            iwork.append(npw.int_zeros(sh))
-        return iwork, rwork
-
-g = 2
-d2d = Discretization([17, 17], [g, g])
-d3d = Discretization([17, 33, 29], [g, g, g])
-d2d_2 = Discretization([25, 17], [2, 2])
-d3d_2 = Discretization([25, 33, 29], [2, 2, 2])
-box_2d = Box(length=[2., 2.], origin=[-1., -1.])
-box_3d = Box(length=[2., 4., 1.], origin=[-1., -2., 0.])
-topo_2d = box_2d.create_topology(d2d, cutdir=[False, True])
-topo_3d = box_3d.create_topology(d3d, cutdir=[False, False, True])
-topo_2d_2 = box_2d.create_topology(d2d_2, cutdir=[False, True])
-topo_3d_2 = box_3d.create_topology(d3d_2, cutdir=[False, False, True])
-velo_2d = Field(domain=box_2d, name='Velocity2D',
-                formula=lambda x, y: (0., 0.), is_vector=True)
-velo_3d = Field(domain=box_3d, name='Velocity3D',
-                formula=lambda x, y: (0., 0., 0.), is_vector=True)
-scal_2d = Field(domain=box_2d, name='Scal2D')
-scal_3d = Field(domain=box_3d, name='Scal3D')
-scal_2d_2 = Field(domain=box_2d, name='Scal2D2')
-scal_3d_2 = Field(domain=box_3d, name='Scal3D2')
-global_iwork_2d, global_rwork_2d = find_min_work(velo_2d, scal_2d, topo_2d)
-global_iwork_3d, global_rwork_3d = find_min_work(velo_3d, scal_3d, topo_3d)
-simu = Simulation(start=0., end=0.1, nb_iter=1)
-
-
-def build_advection(velo, fields, discr, discr_fields=None,
-                    rwork=None, iwork=None):
-    """declare, discretize and setup advection operator
-
-    Parameters
-    ----------
-
-    velo, fields : velocity field and list of advected fields
-    discr : chosen discretization for velocity
-    discr_fields : chosen discretization for advected_fields
-       if None == discr
-    rwork, iwork : pre-allocated work arrays
-       if None, let operator do the allocation job.
-
-    """
-    # Build and discretize advection
-    op = Advection(velocity=velo,
-                   advected_fields=fields, discretization=discr,
-                   discretization_fields=discr_fields)
-    op.discretize()
-    # checks
-    assert velo in op.input
-    assert velo not in op.output
-    if isinstance(fields, list):
-        for f in fields:
-            assert f in op.input
-            assert f in op.output
-    else:
-        assert fields in op.input
-        assert fields in op.output
-    assert len(op.discrete_op) == velo.domain.dim
-    # iwork = None
-    # rwork = None
-    # if use_work:
-    #     work_prop = op.get_work_properties()
-    #     if work_prop['rwork'] is not None:
-    #         rwork = []
-    #         for sh in work_prop['rwork']:
-    #             rwork.append(npw.zeros(sh))
-
-    #     if work_prop['iwork'] is not None:
-    #         rwork = []
-    #         for sh in work_prop['iwork']:
-    #             iwork.append(npw.zeros(sh))
-
-    #     work_prop = op.get_work_properties()
-    op.setup(rwork, iwork)
-    dop = op.discrete_op[0]
-    rw0 = dop._rwork
-    iw0 = dop._iwork
-    for i in xrange(1, velo.domain.dim):
-        dop = op.discrete_op[i]
-        rw = dop._rwork
-        iw = dop._iwork
-        assert len(rw) == len(rw0)
-        assert len(iw) == len(iw0)
-        for j in xrange(len(rw)):
-            assert npw.arrays_share_data(rw[j], rw0[j])
-        for j in xrange(len(iw)):
-            assert npw.arrays_share_data(iw[j], iw0[j])
-
-    return op
-
-
-def test_build_2d_1():
-    """single resolution build, 2d
-    """
-    build_advection(velo_2d, scal_2d, d2d)
-
-
-def test_build_2d_2():
-    """single resolution build, from topo, 2d"""
-    build_advection(velo_2d, [scal_2d, scal_2d_2], topo_2d)
-
-
-def test_build_2d_3():
-    """multi resolution build, 2d
-    """
-    build_advection(velo_2d, [scal_2d, scal_2d_2], d2d_2, d2d)
-
-
-def test_build_2d_4():
-    """multi resolution build, from topo, 2d"""
-    build_advection(velo_2d, scal_2d, topo_2d_2, topo_2d)
-
-
-def test_build_2d_5():
-    """multi resolution build, from topo, 2d, with work"""
-    op = build_advection(velo_2d, scal_2d, topo_2d_2, topo_2d,
-                         rwork=global_rwork_2d, iwork=global_iwork_2d)
-    for i in xrange(velo_2d.domain.dim):
-        dop = op.discrete_op[i]
-        rw = dop._rwork
-        iw = dop._iwork
-        for j in xrange(len(rw)):
-            assert npw.arrays_share_data(rw[j], global_rwork_2d[j])
-        for j in xrange(len(iw)):
-            assert npw.arrays_share_data(iw[j], global_iwork_2d[j])
-
-
-def test_build_3d_1():
-    """single resolution build, 3d
-    """
-    build_advection(velo_3d, scal_3d, d3d)
-
-
-def test_build_3d_2():
-    """single resolution build, from topo, 3d"""
-    build_advection(velo_3d, [scal_3d, scal_3d_2], topo_3d)
-
-
-def test_build_3d_3():
-    """multi resolution build, 3d
-    """
-    build_advection(velo_3d, [scal_3d, scal_3d_2], d3d_2, d3d)
-
-
-def test_build_3d_4():
-    """multi resolution build, from topo, 3d"""
-    build_advection(velo_3d, scal_3d, topo_3d_2, topo_3d)
-
-
-def test_build_3d_5():
-    """multi resolution build, from topo, 3d, with work"""
-    op = build_advection(velo_3d, scal_3d, topo_3d_2, topo_3d,
-                         rwork=global_rwork_3d, iwork=global_iwork_3d)
-    for i in xrange(velo_3d.domain.dim):
-        dop = op.discrete_op[i]
-        rw = dop._rwork
-        iw = dop._iwork
-        for j in xrange(len(rw)):
-            assert npw.arrays_share_data(rw[j], global_rwork_3d[j])
-        for j in xrange(len(iw)):
-            assert npw.arrays_share_data(iw[j], global_iwork_3d[j])
-
-
-def test_apply_2d():
-    """advection of a field ... at null velocity, 2d.
-    """
-    op = build_advection(velo_2d, scal_2d, d2d)
-    op.apply(simu)
diff --git a/hysop/old/operator.old/tests/test_analytic.py b/hysop/old/operator.old/tests/test_analytic.py
deleted file mode 100755
index 8c4b1e4f4336a434f6f246e6340ff8145c02b16e..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_analytic.py
+++ /dev/null
@@ -1,162 +0,0 @@
-"""Test initialization of fields with analytic formula
-"""
-from numpy import allclose
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.analytic import Analytic
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop.fields.tests.func_for_tests import func_scal_1, func_scal_2, \
-    func_vec_1, func_vec_2, func_vec_3, func_vec_4, func_vec_5, func_vec_6
-d3D = Discretization([33, 33, 33])
-d2D = Discretization([33, 33])
-L2D = [1., 1.]
-origin2D = [0., 0.]
-nbc = 4
-simu = Simulation(start=0., end=0.1, nb_iter=1)
-
-
-# Non-Vectorized and vectorized formulas for a scalar
-def test_analytical_op_1():
-    box = Box()
-    caf = Field(box, formula=func_scal_1, name='caf')
-    caf2 = Field(box, name='caf2', formula=func_scal_2, vectorize_formula=True)
-    op = Analytic(variables={caf: d3D})
-    op2 = Analytic(variables={caf2: d3D})
-    op.discretize()
-    op2.discretize()
-    op.setup()
-    op2.setup()
-    topo = op.discrete_fields[caf].topology
-    coords = topo.mesh.coords
-    ref = Field(box, name='ref')
-    refd = ref.discretize(topo)
-    cafd = caf.discrete_fields[topo]
-    cafd2 = caf2.discrete_fields[topo]
-    ids = id(cafd.data[0])
-    ids2 = id(cafd2.data[0])
-    op.apply(simu)
-    op2.apply(simu)
-    refd.data = func_scal_1(refd.data, *(coords + (simu.time,)))
-    assert allclose(cafd[0], refd.data[0])
-    assert id(cafd.data[0]) == ids
-    assert allclose(cafd2[0], refd.data[0])
-    assert id(cafd2.data[0]) == ids2
-
-
-# Non-Vectorized and vectorized formulas for a vector
-def test_analytical_op_3():
-    box = Box()
-    caf = Field(box, name='caf', formula=func_vec_1, is_vector=True)
-    caf2 = Field(box, name='caf', formula=func_vec_2,
-                 vectorize_formula=True, is_vector=True)
-    op = Analytic(variables={caf: d3D})
-    op2 = Analytic(variables={caf2: d3D})
-    op.discretize()
-    op2.discretize()
-    op.setup()
-    op2.setup()
-    topo = op.discrete_fields[caf].topology
-    coords = topo.mesh.coords
-    ref = Field(box, is_vector=True, name='ref')
-    refd = ref.discretize(topo)
-    cafd = caf.discrete_fields[topo]
-    cafd2 = caf2.discrete_fields[topo]
-    ids = [0, ] * 3
-    ids2 = [0, ] * 3
-    for i in xrange(3):
-        ids[i] = id(cafd.data[i])
-        ids2[i] = id(cafd2.data[i])
-    op.apply(simu)
-    op2.apply(simu)
-    refd.data = func_vec_1(refd.data, *(coords + (simu.time,)))
-    for i in xrange(caf.nb_components):
-        assert allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-        assert allclose(cafd2[i], refd.data[i])
-        assert id(cafd2.data[i]) == ids2[i]
-
-
-# Non-Vectorized and vectorized formulas for a vector with extra-args
-def test_analytical_op_4():
-    box = Box()
-    caf = Field(box, formula=func_vec_3, is_vector=True, name='caf')
-    caf2 = Field(box, formula=func_vec_4, vectorize_formula=True,
-                 name='caf2', is_vector=True)
-    op = Analytic(variables={caf: d3D})
-    op2 = Analytic(variables={caf2: d3D})
-    op.discretize()
-    op2.discretize()
-    op.setup()
-    op2.setup()
-    topo = op.discrete_fields[caf].topology
-    coords = topo.mesh.coords
-    ref = Field(box, name='ref', is_vector=True)
-    refd = ref.discretize(topo)
-    cafd = caf.discrete_fields[topo]
-    cafd2 = caf2.discrete_fields[topo]
-    ids = [0, ] * 3
-    ids2 = [0, ] * 3
-    for i in xrange(3):
-        ids[i] = id(cafd.data[i])
-        ids2[i] = id(cafd2.data[i])
-    theta = 3.
-    caf.set_formula_parameters(theta)
-    caf2.set_formula_parameters(theta)
-    op.apply(simu)
-    op2.apply(simu)
-    refd.data = func_vec_3(refd.data, *(coords + (simu.time, theta)))
-    for i in xrange(caf.nb_components):
-        assert allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-        assert allclose(cafd2[i], refd.data[i])
-        assert id(cafd2.data[i]) == ids2[i]
-
-
-# Non-Vectorized formula for a nbc components field with extra-args
-def test_analytical_op_5():
-    box = Box()
-    caf = Field(box, formula=func_vec_5, nb_components=nbc, name='caf')
-    op = Analytic(variables={caf: d3D})
-    op.discretize()
-    op.setup()
-    topo = op.discrete_fields[caf].topology
-    coords = topo.mesh.coords
-    ref = Field(box, nb_components=nbc, name='ref')
-    refd = ref.discretize(topo)
-    cafd = caf.discrete_fields[topo]
-    ids = [0, ] * nbc
-    for i in xrange(nbc):
-        ids[i] = id(cafd.data[i])
-    theta = 3.
-    caf.set_formula_parameters(theta)
-    op.apply(simu)
-    refd.data = func_vec_5(refd.data, *(coords + (simu.time, theta)))
-    for i in xrange(caf.nb_components):
-        assert allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-
-
-# Non-Vectorized formula for a nbc components field in 2D, with extra-args
-def test_analytical_op_6():
-    box = Box(dimension=2, length=L2D, origin=origin2D)
-    caf = Field(box, formula=func_vec_6, nb_components=nbc, name='caf')
-    op = Analytic(variables={caf: d2D})
-    op.discretize()
-    op.setup()
-    topo = op.discrete_fields[caf].topology
-    coords = topo.mesh.coords
-    ref = Field(box, nb_components=nbc, name='ref')
-    refd = ref.discretize(topo)
-    cafd = caf.discrete_fields[topo]
-    ids = [0, ] * nbc
-    for i in xrange(nbc):
-        ids[i] = id(cafd.data[i])
-    theta = 3.
-    caf.set_formula_parameters(theta)
-    op.apply(simu)
-    refd.data = func_vec_6(refd.data, *(coords + (simu.time, theta)))
-    for i in xrange(caf.nb_components):
-        assert allclose(cafd[i], refd.data[i])
-        assert id(cafd.data[i]) == ids[i]
-
diff --git a/hysop/old/operator.old/tests/test_custom.py b/hysop/old/operator.old/tests/test_custom.py
deleted file mode 100644
index 6f92c54ff6500b5c80c762ad72d6ef948ea10f1b..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_custom.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""Test user-defined (custom) operator
-"""
-
-from hysop.operator.custom import Custom
-from hysop import Field, Discretization, Box, Simulation, IOParams, IO
-import numpy as np
-import os
-
-d2d = Discretization([33, 65], [2, 0])
-d3d = Discretization([33, 65, 33], [2, 0, 1])
-
-
-def func3d(sim, f_in, f_out, d=None):
-    """A test function, for a 3d domain,
-    no diagnostics
-    """
-    nbc = f_in[0].nb_components
-    for d in xrange(nbc):
-        f_out[0][d] = d * f_in[0][d] + + f_in[1][0] + np.cos(sim.time)
-
-
-def func3d_with_diag(sim, f_in, f_out, diagnostics):
-    """A test function, for a 3d domain,
-    no diagnostics
-    """
-    nbc = f_in[0].nb_components
-    for d in xrange(nbc):
-        f_out[0][d] = d * f_in[0][d] + f_in[1][0] + np.cos(sim.time)
-    diagnostics[0, 0] = sim.time
-    diagnostics[0, 1] = f_out[0].data[0].min()
-    diagnostics[0, 1] = f_out[0].data[0].max()
-
-
-def init_custom(dim, discr, func, do_write=False):
-    """Test build and apply, without
-    writer.
-    """
-    dom = Box(length=[1., ] * dim)
-    v1 = Field(name='v1', is_vector=True, domain=dom)
-    v2 = Field(name='v2', is_vector=False, domain=dom)
-    v3 = Field(name='v3', is_vector=True, domain=dom)
-    topo = dom.create_topology(discr)
-    v1.randomize(topo)
-    v2.randomize(topo)
-    if do_write:
-        iop = IOParams('/tmp/hysop_custom_test.dat', fileformat=IO.ASCII)
-        d_shape = (1, 3)
-    else:
-        iop = None
-        d_shape = None
-    op = Custom(in_fields=[v1, v2], out_fields=[v3],
-                variables=[v1, v2, v3], discretization=discr,
-                function=func, io_params=iop,
-                diagnostics_shape=d_shape)
-    op.discretize()
-    op.setup()
-    sim = Simulation(nb_iter=10)
-    sim.initialize()
-    nbc = v1.nb_components
-    vd1 = v1.discretize(topo)
-    vd2 = v2.discretize(topo)
-    vd3 = v3.discretize(topo)
-    while not sim.is_over:
-        op.apply(sim)
-        for d in xrange(nbc):
-            tmp1 = d * vd1[d] + + vd2[0] + np.cos(sim.time)
-            assert np.allclose(vd3[d], tmp1)
-        sim.advance()
-    if do_write:
-        assert os.path.isfile(iop.filename)
-
-
-def test_custom_op_1():
-    """Test build and apply, without
-    writer.
-    """
-    init_custom(3, d3d, func3d)
-
-
-def test_custom_op_2():
-    """Test build and apply, with
-    output in a file
-    """
-    init_custom(3, d3d, func3d_with_diag, True)
diff --git a/hysop/old/operator.old/tests/test_density.py b/hysop/old/operator.old/tests/test_density.py
deleted file mode 100755
index 55aee581bb0e565e41f38f89a1940c05ba8a36f8..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_density.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# -*- coding: utf-8 -*-
-import hysop as pp
-from hysop.operator.density import DensityVisco
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop import Field
-
-d3d = Discretization([129, 129, 129])
-
-
-def computeVisco(res, x, y, z, t):
-    res[0][...] = x + y + z * t
-    return res
-
-
-def test_density():
-    """
-    Todo : write proper tests.
-    Here we just check if discr/setup/apply process goes well.
-    """
-    box = pp.Box(length=[2., 1., 0.9], origin=[0.0, -1., -0.43])
-    density = Field(domain=box, name='density')
-    viscosity = Field(domain=box, formula=computeVisco, name='visco')
-    op = DensityVisco(density=density, viscosity=viscosity, discretization=d3d)
-    op.discretize()
-    op.setup()
-    topo = op.variables[viscosity]
-    viscosity.initialize(topo=topo)
-    #    simu = Simulation(nb_iter=2)
-    # op.apply(simu)  ## --> need to be reviewed
diff --git a/hysop/old/operator.old/tests/test_diff_poisson_3D.py b/hysop/old/operator.old/tests/test_diff_poisson_3D.py
deleted file mode 100755
index e406d2b4a6ef52f11a90bd2847047ee68e9fb1c2..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_diff_poisson_3D.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""Diffusion - Poisson sequence
-"""
-import hysop as pp
-from hysop.operator.poisson import Poisson
-from hysop.operator.diffusion import Diffusion
-from math import sqrt, pi, exp
-from hysop.problem.simulation import Simulation
-from hysop import testsenv
-from hysop.tools.parameters import Discretization
-
-
-def computeVel(x, y, z):
-    vx = 0.
-    vy = 0.
-    vz = 0.
-    return vx, vy, vz
-
-
-def computeVort(x, y, z):
-    xc = 1. / 2.
-    yc = 1. / 2.
-    zc = 1. / 4.
-    R = 0.2
-    sigma = R / 2.
-    Gamma = 0.0075
-    dist = sqrt((x - xc) ** 2 + (y - yc) ** 2)
-    s2 = (z - zc) ** 2 + (dist - R) ** 2
-    wx = 0.
-    wy = 0.
-    wz = 0.
-    if (dist != 0.):
-        cosTheta = (x - xc) / dist
-        sinTheta = (y - yc) / dist
-        wTheta = Gamma / (pi * sigma ** 2) * \
-            exp(-(s2 / sigma ** 2))
-        wx = - wTheta * sinTheta
-        wy = wTheta * cosTheta
-        wz = 0.
-    return wx, wy, wz
-
-
-@testsenv.fftw_failed
-def test_diff_poisson():
-    """
-    """
-    # Parameters
-    nb = 33
-    boxLength = [1., 1., 1.]
-    boxMin = [0., 0., 0.]
-    d3D = Discretization([nb, nb, nb])
-
-    # Domain
-    box = pp.Box(length=boxLength, origin=boxMin)
-
-    # Fields
-    velo = pp.Field(domain=box, formula=computeVel,
-                    name='Velocity', is_vector=True)
-    vorti = pp.Field(domain=box, formula=computeVort,
-                     name='Vorticity', is_vector=True)
-
-    # FFT Diffusion operators and FFT Poisson solver
-    diffusion = Diffusion(vorticity=vorti, viscosity=0.002, discretization=d3D)
-    poisson = Poisson(velo, vorti, discretization=d3D)
-
-    diffusion.discretize()
-    poisson.discretize()
-
-    diffusion.setup()
-    poisson.setup()
-
-    simu = Simulation(start=0.0, end=10., time_step=0.002,
-                      max_iter=1000000)
-    diffusion.apply(simu)
-    poisson.apply(simu)
diff --git a/hysop/old/operator.old/tests/test_differential.py b/hysop/old/operator.old/tests/test_differential.py
deleted file mode 100755
index 47e8af47bd7b58efb7ad2abc3a99986afb564827..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_differential.py
+++ /dev/null
@@ -1,266 +0,0 @@
-"""Tests for differential operators.
-"""
-
-import numpy as np
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.tools.numpywrappers import npw
-from hysop.methods import SpaceDiscretization
-from hysop.methods import FDC4, FDC2
-from hysop.operator.differential import Curl, Grad, DivAdvection
-from hysop.tools.parameters import Discretization
-from hysop import testsenv
-# Domain and topologies definitions
-import math
-
-nb = 65
-Lx = Ly = Lz = 2. * math.pi
-box1_3d = Box(length=[Lx, Ly, Lz], origin=[0., 0., 0.])
-box1_2d = Box(length=[Lx, Ly], origin=[0., 0.])
-
-d1_3d = Discretization([nb, nb, nb], [2, 2, 2])
-d1_2d = Discretization([nb, nb], [2, 2])
-topo1_3d = box1_3d.create_topology(d1_3d)
-topo1_2d = box1_2d.create_topology(d1_2d)
-
-cos = np.cos
-sin = np.sin
-
-Nx = 128
-Ny = 96
-Nz = 102
-g = 2
-ldef = npw.asrealarray([0.3, 0.4, 1.0])
-d3_3d = Discretization([Nx + 1, Ny + 1, Nz + 1], [g, g, g])
-d3_2d = Discretization([Nx + 1, Ny + 1], [g, g])
-xdom = npw.asrealarray([0.1, -0.3, 0.5])
-ldom = npw.asrealarray([math.pi * 2., ] * 3)
-xdef = npw.asrealarray(xdom + 0.2)
-box3_3d = Box(length=ldom, origin=xdom)
-box3_2d = Box(length=ldom[:2], origin=xdom[:2])
-topo3_3d = box3_3d.create_topology(d3_3d)
-topo3_2d = box3_2d.create_topology(d3_2d)
-
-
-
-def velocity_f(res, x, y, z, t):
-    res[0][...] = sin(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-def vorticity_f(res, x, y, z, t):
-    res[0][...] = - cos(x) * sin(y) * sin(z)
-    res[1][...] = - sin(x) * cos(y) * sin(z)
-    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-    return res
-
-
-def velocity_f2d(res, x, y, t):
-    res[0][...] = sin(x) * cos(y)
-    res[1][...] = - cos(x) * sin(y)
-    return res
-
-
-def vorticity_f2d(res, x, y, t):
-    res[0][...] = 2. * sin(x) * sin(y)
-    return res
-
-
-def grad_velo(res, x, y, z, t):
-    res[0][...] = cos(x) * cos(y) * cos(z)
-    res[1][...] = -sin(x) * sin(y) * cos(z)
-    res[2][...] = -sin(x) * cos(y) * sin(z)
-    res[3][...] = sin(x) * sin(y) * cos(z)
-    res[4][...] = - cos(x) * cos(y) * cos(z)
-    res[5][...] = cos(x) * sin(y) * sin(z)
-    res[6][...] = 0.0
-    res[7][...] = 0.0
-    res[8][...] = 0.0
-    return res
-
-
-def grad_velo_2d(res, x, y, t):
-    res[0][...] = cos(x) * cos(y)
-    res[1][...] = -sin(x) * sin(y)
-    res[2][...] = sin(x) * sin(y)
-    res[3][...] = -cos(x) * cos(y)
-    return res
-
-
-def check(op, ref_formula, topo, op_dim=3, order=4):
-    """Apply operator 'op' and compare its results with some references.
-    """
-    # Reference field
-    ref = Field(domain=topo.domain, formula=ref_formula, nb_components=op_dim,
-                name='reference')
-    ref_d = ref.discretize(topo)
-    ref.initialize(topo=topo)
-    velo = op.invar
-    result = op.outvar
-    velo.initialize(topo=topo)
-    op.apply()
-    res_d = result.discrete_fields[topo]
-
-    # Compare results with reference
-    ind = topo.mesh.compute_index
-    err = topo.mesh.space_step ** order
-    dim = topo.domain.dim
-    for i in xrange(result.nb_components):
-        print ('err = O(h**order) =', err[i % dim])
-        print (np.max(np.abs(res_d[i][ind] - ref_d[i][ind])))
-        assert np.allclose(res_d[i][ind], ref_d[i][ind],
-                           rtol=err[i % dim])
-    op.finalize()
-
-
-def call_op(class_name, ref_formula, topo, use_work=False,
-            op_dim=3, method=None, order=4, vform=velocity_f):
-    """init and set an operator of type 'class_name'
-        and call check function on this operator.
-    """
-
-    # Velocity and result fields
-    velo = Field(domain=topo.domain, formula=vform, is_vector=True,
-                 name='velo')
-    result = Field(domain=topo.domain, nb_components=op_dim, name='result')
-    # Differential operator
-    op = class_name(invar=velo, outvar=result, discretization=topo,
-                    method=method)
-
-    op.discretize()
-    work = None
-    if use_work:
-        work_prop = op.get_work_properties()['rwork']
-        work = []
-        if work_prop is not None:
-            for l in xrange(len(work_prop)):
-                shape = work_prop[l]
-                work.append(npw.zeros(shape))
-
-    op.setup(rwork=work)
-    check(op, ref_formula, topo, op_dim, order)
-
-
-def call_op_fft(class_name, ref_formula, dom, discr,
-                op_dim=3, method=None, order=4, vform=velocity_f):
-    """init and set an operator of type 'class_name'
-        and call check function on this operator. FFTW method.
-    """
-
-    # Velocity and result fields
-    velo = Field(domain=dom, formula=vform, is_vector=True,
-                 name='velo')
-    result = Field(domain=dom, nb_components=op_dim, name='result')
-    # Differential operator
-    op = class_name(invar=velo, outvar=result, discretization=discr,
-                    method=method)
-
-    op.discretize()
-    op.setup()
-    topo = op.discrete_fields[velo].topology
-    check(op, ref_formula, topo, op_dim, order)
-
-
-def test_curl_fd_1():
-    method = {SpaceDiscretization: FDC4}
-    call_op(Curl, vorticity_f, topo1_3d, method=method)
-
-
-def test_curl_fd_2():
-    method = {SpaceDiscretization: FDC2}
-    call_op(Curl, vorticity_f, topo1_3d, method=method, order=2)
-
-
-def test_curl_fd_1_2d():
-    method = {SpaceDiscretization: FDC4}
-    call_op(Curl, vorticity_f2d, topo1_2d, method=method,
-            op_dim=1, vform=velocity_f2d)
-
-
-def test_curl_fd_3():
-    method = {SpaceDiscretization: FDC4}
-    call_op(Curl, vorticity_f, topo3_3d, method=method)
-
-
-def test_curl_fd_3_2d():
-    method = {SpaceDiscretization: FDC4}
-    call_op(Curl, vorticity_f2d, topo3_2d, op_dim=1,
-            method=method, vform=velocity_f2d)
-
-
-def test_curl_fd_work():
-    method = {SpaceDiscretization: FDC4}
-    call_op(Curl, vorticity_f, topo3_3d, method=method, use_work=True)
-
-
-@testsenv.fftw_failed
-def test_curl_fft_1():
-    method = {SpaceDiscretization: 'fftw'}
-    d1_3d_nog = Discretization([nb, nb, nb])
-    call_op_fft(Curl, vorticity_f, box1_3d, d1_3d_nog, method=method, order=6)
-
-
-# def test_curl_fft_1_2d():
-#     method = {SpaceDiscretization: 'fftw'}
-#     d1_2d_nog = Discretization([nb, nb])
-#     call_op_fft(Curl, vorticity_f2d, box1_2d, d1_2d_nog, op_dim=1,
-#                 method=method, order=6, vform=velocity_f2d)
-
-#def test_curl_fft_ghosts():
-#    from hysop.methods import SpaceDiscretization
-#    from hysop.operator.differential import Curl
-#    method = {SpaceDiscretization: 'fftw'}
-#    call_op(Curl, vorticity_f, method=method, order=6, discretization=d3D)
-
-
-# def test_curl_fft_2():
-#     method = {SpaceDiscretization: 'fftw'}
-#     d2_3d_nog = Discretization([2 * nb, nb, nb])
-#     box2_3d = Box(length=[2. * Lx, Ly, Lz], origin=[0., 0., 0.])
-#     call_op_fft(Curl, vorticity_f, box2_3d, d2_3d_nog, method=method, order=6)
-
-
-# def test_curl_fft_2_2d():
-#     method = {SpaceDiscretization: 'fftw'}
-#     d2_2d_nog = Discretization([2 * nb, nb])
-#     box2_2d = Box(length=[2. * Lx, Ly], origin=[0., 0.])
-#     call_op_fft(Curl, vorticity_f, box2_2d, d2_2d_nog, method=method, order=6)
-
-
-def test_grad_1():
-    method = {SpaceDiscretization: FDC2}
-    call_op(Grad, grad_velo, topo1_3d, op_dim=9, method=method, order=2)
-
-
-def test_grad_2():
-    method = {SpaceDiscretization: FDC4}
-    call_op(Grad, grad_velo, topo1_3d, op_dim=9, method=method)
-
-
-def test_grad_3():
-    method = {SpaceDiscretization: FDC2}
-    call_op(Grad, grad_velo, topo3_3d, op_dim=9, method=method, order=2)
-
-
-def test_grad_3_work():
-    method = {SpaceDiscretization: FDC2}
-    call_op(Grad, grad_velo, topo3_3d, op_dim=9, method=method, order=2,
-            use_work=True)
-
-
-def test_grad_3_2d():
-    method = {SpaceDiscretization: FDC4}
-    call_op(Grad, grad_velo_2d, topo3_2d, op_dim=4, method=method,
-            vform=velocity_f2d)
-
-def divadvection_func(res, x, y, z, t):
-    res[0][...] = - cos(z) * cos(z) * (cos(x) * cos(x) - sin(x) * sin(x)) - \
-        cos(z) * cos(z) * (cos(y) * cos(y) - sin(y) * sin(y))
-    return res
-
-
-def test_div_advection():
-    method = {SpaceDiscretization: FDC4}
-    call_op(DivAdvection, divadvection_func, topo3_3d, op_dim=1, method=method)
diff --git a/hysop/old/operator.old/tests/test_diffusion.py b/hysop/old/operator.old/tests/test_diffusion.py
deleted file mode 100755
index f933be0979870f0cabfcbed7bba7caafe4008299..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_diffusion.py
+++ /dev/null
@@ -1,94 +0,0 @@
-"""Tests for diffusion and curl_diffusion operators.
-"""
-# -*- coding: utf-8 -*-
-
-import hysop as pp
-from hysop.operator.diffusion import Diffusion, CurlAndDiffusion
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-import numpy as np
-from hysop.tools.numpywrappers import npw
-import math
-from hysop import testsenv
-from hysop.fields.tests.func_for_tests import v_TG, w_TG
-pi = math.pi
-sin = np.sin
-cos = np.cos
-# Physical Domain description
-dim = 3
-LL = 2 * pi * npw.ones((dim))
-cc = 2 * pi / LL
-d3D = Discretization([33, 33, 33])
-d2D = Discretization([33, 33])
-
-
-def computeVort2D(res, x, y, t):
-    # todo ...
-    res[0][...] = 4 * pi ** 2 * (cos(x * cc[0]) * sin(y * cc[1])) * \
-        (1. / LL[0] ** 2 + 1. / LL[1] ** 2)
-    return res
-
-
-@testsenv.fftw_failed
-def test_diffusion_3d():
-    """Vector field diffusion, 3d domain"""
-    dom = pp.Box(length=LL)
-    vorticity = pp.Field(domain=dom, formula=w_TG,
-                         name='Vorticity', is_vector=True)
-    diff = Diffusion(viscosity=0.3, vorticity=vorticity, discretization=d3D)
-    diff.discretize()
-    diff.setup()
-    topo = diff.discrete_fields[vorticity].topology
-    simu = Simulation(end=0.1, time_step=0.01)
-    vorticity.initialize(topo=topo)
-    diff.apply(simu)
-    diff.finalize()
-
-
-@testsenv.fftw_failed
-def test_diffusion_2d():
-    """Vector field diffusion 2d domain"""
-    dom = pp.Box(length=LL[:2])
-    vorticity = pp.Field(domain=dom, formula=computeVort2D, name='Vorticity')
-    diff = Diffusion(viscosity=0.3, vorticity=vorticity, discretization=d2D)
-    diff.discretize()
-    diff.setup()
-    topo = diff.discrete_fields[vorticity].topology
-    simu = Simulation(end=0.1, time_step=0.01)
-    vorticity.initialize(topo=topo)
-    diff.apply(simu)
-    diff.finalize()
-
-
-@testsenv.fftw_failed
-def test_curl_and_diffusion_3d():
-    """Vector field diffusion, 3d domain"""
-    dom = pp.Box(length=LL)
-    vorticity_ref = pp.Field(domain=dom, name='Wref', formula=w_TG,
-                             is_vector=True)
-    vorticity = pp.Field(domain=dom, name='Vorticity', is_vector=True)
-    velocity = pp.Field(domain=dom, name='Velocity', formula=v_TG,
-                        is_vector=True)
-
-    diff_0 = Diffusion(viscosity=0.3, vorticity=vorticity_ref,
-                       discretization=d3D)
-    diff_0.discretize()
-    diff_0.setup()
-    topo = diff_0.discrete_fields[vorticity_ref].topology
-    diff = CurlAndDiffusion(viscosity=0.3, velocity=velocity,
-                            vorticity=vorticity, discretization=d3D)
-    diff.discretize()
-    diff.setup()
-
-    simu = Simulation(end=0.1, time_step=0.01)
-    vorticity_ref.initialize(topo=topo)
-    velocity.initialize(topo=topo)
-    simu.initialize()
-    wd = vorticity.discretize(topo).data
-    wd_ref = vorticity_ref.discretize(topo).data
-    diff_0.apply(simu)
-    diff.apply(simu)
-    for i in xrange(3):
-        assert np.allclose(wd[i], wd_ref[i])
-
-    diff_0.finalize()
diff --git a/hysop/old/operator.old/tests/test_drag_and_lift.py b/hysop/old/operator.old/tests/test_drag_and_lift.py
deleted file mode 100755
index 2795f4e986901a74cd26e8c8fd62df9059b31489..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_drag_and_lift.py
+++ /dev/null
@@ -1,196 +0,0 @@
-# -*- coding: utf-8 -*-
-from hysop.domain.subsets import Sphere
-from hysop.operator.penalization import PenalizeVorticity
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop.tools.io_utils import IOParams, IO
-from hysop.topology.cartesian_topology import CartesianTopology
-import numpy as np
-import os
-from hysop import Field, Box
-# from hysop.operator.hdf_io import HDF_Reader
-from hysop.operator.drag_and_lift import MomentumForces, NocaForces
-cos = np.cos
-sin = np.sin
-
-
-def v2d(res, x, y, t):
-    res[0][...] = 1.
-    res[1][...] = 1.
-    return res
-
-
-def s2d(res, x, y, t):
-    res[0][...] = 1.
-    return res
-
-
-def v3d(res, x, y, z, t):
-    res[0][...] = t * sin(x) * cos(y) * cos(z)
-    res[1][...] = - t * cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-def vorticity_f(res, x, y, z, t):
-    res[0][...] = - t * cos(x) * sin(y) * sin(z)
-    res[1][...] = - t * sin(x) * cos(y) * sin(z)
-    res[2][...] = 2 * t * sin(x) * sin(y) * cos(z)
-    return res
-
-
-def drag_vol(x, y, z, t):
-    res = np.zeros(3)
-    res[0] = -2 * cos(x) * sin(z) * (sin(y) - y * cos(y))\
-        - 2 * t * cos(x) * sin(y) * (sin(z) - z * cos(z))
-    res[1] = 2 * cos(y) * sin(z) * (sin(x) - x * cos(x)) +\
-        sin(x) * cos(y) * (sin(z) - z * cos(z))
-    res[2] = 2 * sin(y) * cos(z) * t * (sin(x) - x * cos(x))\
-        - sin(x) * cos(z) * (sin(y) - y * cos(y))
-    return res
-
-
-def s3d(res, x, y, z, t):
-    res[0][...] = 1.
-    return res
-
-
-Nx = 32
-Ny = 106
-Nz = 64
-g = 2
-
-
-ldef = [0.3, 0.4, 1.0]
-discr3D = Discretization([Nx + 1, Ny + 1, Nz + 1], [g, g, g])
-discr2D = Discretization([Nx + 1, Ny + 1], [g, g])
-xdom = np.asarray([0.1, -0.3, 0.5])
-import math
-ldom = np.asarray([math.pi * 2., ] * 3)
-xdef = xdom + 0.2
-xpos = ldom * 0.5
-xpos[-1] += 0.1
-working_dir = os.getcwd() + '/'
-xdom = np.asarray([0., 0., 0.])
-ldom = np.asarray([1., ] * 3)
-
-def init(discr, vform=v3d, wform=vorticity_f):
-    CartesianTopology.reset_counter()
-    dim = len(discr.resolution)
-    dom = Box(dimension=dim, length=ldom[:dim],
-              origin=xdom[:dim])
-    topo = dom.create_topology(discr)
-    velo = Field(domain=topo.domain, formula=vform,
-                 name='Velo', is_vector=True)
-    vorti = Field(domain=topo.domain, formula=wform, name='Vorti',
-                  is_vector=dim == 3)
-    rd = ldom[0] * 0.2
-    hsphere = Sphere(parent=topo.domain, origin=xpos, radius=rd)
-    hsphere.discretize(topo)
-    # penalisation
-    penal = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                              discretization=topo,
-                              obstacles=[hsphere], coeff=1e8)
-    penal.discretize()
-    penal.setup()
-    velo.initialize(topo=topo)
-    vorti.initialize(topo=topo)
-    return topo, penal
-
-
-def test_momentum_forces_3d():
-    topo, op = init(discr3D)
-    # Forces
-    velo = op.velocity
-    obst = op.obstacles
-    dg = MomentumForces(velocity=velo, discretization=topo,
-                        obstacles=obst, penalisation_coeff=[1e8])
-    dg.discretize()
-    dg.setup()
-    simu = Simulation(nb_iter=3)
-    op.apply(simu)
-    dg.apply(simu)
-
-
-def build_noca(formulation):
-    """
-    Compute drag/lift in 3D, flow around a sphere
-    """
-    topo, op = init(discr3D)
-    # Velocity field
-    velo = op.velocity
-    vorti = op.vorticity
-    obst = op.obstacles
-    dg = NocaForces(velocity=velo, vorticity=vorti, discretization=topo,
-                    nu=0.3, obstacles=obst, surfdir=[2, 3],
-                    formulation=formulation)
-    dg.discretize()
-    dg.setup()
-    return dg, op
-
-
-def test_noca1():
-    dg, op = build_noca(1)
-    simu = Simulation(nb_iter=3)
-    op.apply(simu)
-    dg.apply(simu)
-
-
-def test_noca2():
-    dg, op = build_noca(2)
-    simu = Simulation(nb_iter=3)
-    op.apply(simu)
-    dg.apply(simu)
-
-
-def test_noca3():
-    dg, op = build_noca(3)
-    simu = Simulation(nb_iter=3)
-    op.apply(simu)
-    dg.apply(simu)
-
-
-def test_all_drags():
-    topo, op = init(discr3D)
-    velo = op.velocity
-    vorti = op.vorticity
-    obst = op.obstacles
-    dg = {}
-    dg['mom'] = MomentumForces(velocity=velo, discretization=topo,
-                               obstacles=obst, penalisation_coeff=[1e8],
-                               io_params=IOParams('Heloise',
-                                                  fileformat=IO.ASCII))
-    sdir = [2]
-    dg['noca_1'] = NocaForces(velocity=velo, vorticity=vorti,
-                              discretization=topo, surfdir=sdir,
-                              nu=0.3, obstacles=obst, formulation=1,
-                              io_params=IOParams('Noca1',
-                                                 fileformat=IO.ASCII))
-    dg['noca_2'] = NocaForces(velocity=velo, vorticity=vorti,
-                              discretization=topo, surfdir=sdir,
-                              nu=0.3, obstacles=obst, formulation=2,
-                              io_params=IOParams('Noca2',
-                                                 fileformat=IO.ASCII))
-    dg['noca_3'] = NocaForces(velocity=velo, vorticity=vorti,
-                              discretization=topo, surfdir=sdir,
-                              nu=0.3, obstacles=obst, formulation=3,
-                              io_params=IOParams('Noca3',
-                                                 fileformat=IO.ASCII))
-    for drag in dg.values():
-        drag.discretize()
-        drag.setup()
-
-    simu = Simulation(time_step=1e-4, end=1e-2)
-
-    op.apply(simu)
-    for drag in dg:
-        dg[drag].apply(simu)
-    simu.initialize()
-    while not simu.is_over:
-        velo.initialize(simu.time, topo)
-        vorti.initialize(simu.time, topo)
-        for drag in dg:
-            dg[drag].apply(simu)
-        op.apply(simu)
-        simu.advance()
-    #assert False
diff --git a/hysop/old/operator.old/tests/test_energy_enstrophy.py b/hysop/old/operator.old/tests/test_energy_enstrophy.py
deleted file mode 100755
index 6e624ce33b809fcff1837f46b055c68588a049a8..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_energy_enstrophy.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# -*- coding: utf-8 -*-
-import hysop as pp
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop import Field
-import numpy as np
-try:
-    from scipy.integrate import nquad
-
-except:
-    def nquad(func, coords_range):
-        coords = []
-        nbsteps = 1000
-        for x in coords_range:
-            coords.append(np.linspace(x[0], x[1], nbsteps))
-        coords = tuple(coords)
-        hstep = coords[0][1] - coords[0][0]
-        ll = [coords_range[i][1] - coords_range[i][0] for i in xrange(1, 3)]
-        return [np.sum(func(*coords)[:-1]) * hstep * np.prod(ll)]
-
-sin = np.sin
-cos = np.cos
-
-d3d = Discretization([129, 129, 129])
-
-
-def computeVel(res, x, y, z, t):
-    res[0][...] = x
-    res[1][...] = y
-    res[2][...] = z
-    return res
-
-
-def computeVort(res, x, y, z, t):
-    res[0][...] = x
-    res[1][...] = y
-    res[2][...] = z
-    return res
-
-
-def energy_ref(x, y, z):
-    return x ** 2
-
-
-def init():
-    box = pp.Box(length=[2., 1., 0.9], origin=[0.0, -1., -0.43])
-    velo = Field(domain=box, formula=computeVel,
-                 name='Velocity', is_vector=True)
-    vorti = Field(domain=box, formula=computeVort,
-                  name='Vorticity', is_vector=True)
-    return velo, vorti
-
-
-def test_energy_enstrophy():
-    """
-    Todo : write proper tests.
-    Here we just check if discr/setup/apply process goes well.
-    """
-    dim = 3
-    velo, vorti = init()
-    op = EnergyEnstrophy(velo, vorti, discretization=d3d)
-    op.discretize()
-    op.setup()
-    topo = op.variables[velo]
-    velo.initialize(topo=topo)
-    vorti.initialize(topo=topo)
-    simu = Simulation(nb_iter=2)
-    op.apply(simu)
-    intrange = []
-    box = topo.domain
-    invvol = 1. / np.prod(box.length)
-    for i in xrange(dim):
-        origin = box.origin[i]
-        end = origin + box.length[i]
-        intrange.append([origin, end])
-    intrange = 2 * intrange
-    eref = nquad(energy_ref, intrange[:dim])[0]
-    eref += nquad(energy_ref, intrange[1:dim + 1])[0]
-    eref += nquad(energy_ref, intrange[2:dim + 2])[0]
-    eref *= invvol
-    tol = (topo.mesh.space_step).max() ** 2
-    assert (op.energy() - eref * 0.5) < tol
-    assert (op.enstrophy() - eref) < tol
diff --git a/hysop/old/operator.old/tests/test_hdf5_io.py b/hysop/old/operator.old/tests/test_hdf5_io.py
deleted file mode 100755
index 927048c581083cafeaaf4c0d77102a5e11917d05..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_hdf5_io.py
+++ /dev/null
@@ -1,399 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Tests for reader/writer of fields in hdf5 format.
-"""
-
-from hysop import Box, Field
-import numpy as np
-import os
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop.operator.hdf_io import HDF_Writer, HDF_Reader
-from hysop.tools.io_utils import IO, IOParams
-from hysop.core.mpi import main_rank, main_size
-from hysop.domain.subsets import SubBox
-from hysop.testsenv import postclean
-
-Lx = 2.
-nb = 65
-working_dir = os.getcwd() + '/test_hdf5/p' + str(main_size)
-#IO.set_default_path(os.getcwd() + '/test_hdf5/')
-if main_rank == 0:
-    print 'Set I/O default path to ', IO.default_path()
-
-cos = np.cos
-sin = np.sin
-
-
-def init1(dim):
-    # Domain (cubic)
-    dom = Box(length=[Lx] * dim, origin=[-1.] * dim)
-    # global resolution for the grid
-    resol = Discretization([nb] * dim, [2] * dim)
-    topo = dom.create_topology(discretization=resol)
-    return dom, topo
-
-
-def init2():
-    # Domain (not cubic)
-    dom = Box(length=[Lx, 2 * Lx, 3.9 * Lx], origin=[-1., 2., 3.9])
-    # global resolution for the grid
-    resol = Discretization([nb, 2 * nb, nb + 8], [2, 0, 1])
-    topo = dom.create_topology(discretization=resol)
-    return dom, topo
-
-
-def func3D(res, x, y, z, t):
-    res[0][...] = cos(t * x) + sin(y) + z
-    return res
-
-
-def vec3D(res, x, y, z, t):
-    res[0][...] = cos(t * x) + sin(y) + z + 0.2
-    res[1][...] = sin(t * x) + sin(y) + z + 0.3
-    res[2][...] = 3 * cos(2 * t * x) + sin(y) + y
-    return res
-
-
-def vort3D(res, x, y, z, t):
-    res[0][...] = 3 * cos(2 * t * x) + cos(y) + z
-    res[1][...] = sin(t * y) + x + 0.2
-    res[2][...] = 3 * cos(t) + sin(y) + z
-    return res
-
-
-@postclean(working_dir)
-def test_write_read_scalar_3D():
-    dom, topo = init1(3)
-    scal3D = Field(domain=dom, name='Scal3D')
-    scalRef = Field(domain=dom, formula=func3D, name='ScalRef3D')
-
-    filename = working_dir + '/testIO_scal'
-    iop = IOParams(filename, fileformat=IO.HDF5)
-    op = HDF_Writer(variables={scalRef: topo}, io_params=iop)
-    simu = Simulation(nb_iter=10)
-    op.discretize()
-    op.setup()
-    simu.initialize()
-
-    scalRef.initialize(simu.time, topo=topo)
-    op.apply(simu)
-
-    simu.advance()
-    simu.advance()
-    # Print scalRef for other iterations
-    op.apply(simu)
-    op.finalize()
-    fullpath = iop.filename
-    assert os.path.exists(fullpath + '.xmf')
-    assert os.path.exists(fullpath + '_00000.h5')
-    assert os.path.exists(fullpath + '_00002.h5')
-
-    # Reader
-    iop_read = IOParams(working_dir + '/testIO_scal_00002.h5',
-                        fileformat=IO.HDF5)
-    reader = HDF_Reader(variables=[scal3D], discretization=topo,
-                        io_params=iop_read,
-                        var_names={scal3D: 'ScalRef3D_' + str(topo.get_id())})
-    reader.discretize()
-    reader.setup()
-    sc3d = scal3D.discretize(topo)
-    scref = scalRef.discretize(topo)
-    ind = topo.mesh.compute_index
-    for d in xrange(scal3D.nb_components):
-        sc3d.data[d][...] = 0.0
-        assert not np.allclose(scref.data[d][ind], sc3d.data[d][ind])
-    reader.apply()
-    reader.finalize()
-
-    for d in xrange(scal3D.nb_components):
-        assert np.allclose(scref.data[d][ind], sc3d.data[d][ind])
-
-
-@postclean(working_dir)
-def test_write_read_scalar_3D_defaults():
-    dom, topo = init1(3)
-    scal3D = Field(domain=dom, name='Scal3D')
-    scalRef = Field(domain=dom, formula=func3D, name='ScalRef3D')
-
-    # Write a scalar field, using default configuration for output
-    # names and location
-    op = HDF_Writer(variables={scalRef: topo})
-    simu = Simulation(nb_iter=3)
-    op.discretize()
-    op.setup()
-    scal3D.discretize(topo=topo)
-    scalRef.initialize(simu.time, topo=topo)
-    simu.initialize()
-    while not simu.is_over:
-        op.apply(simu)
-        simu.advance()
-
-    op.finalize()
-    filename = scalRef.name
-    fullpath = os.path.join(IO.default_path(), filename)
-
-    assert os.path.exists(fullpath + '.xmf')
-    assert os.path.exists(fullpath + '_00000.h5')
-    assert os.path.exists(fullpath + '_00001.h5')
-
-    sc3d = scal3D.discretize(topo)
-    scref = scalRef.discretize(topo)
-    ind = topo.mesh.compute_index
-    for d in xrange(scal3D.nb_components):
-        sc3d.data[d][...] = scref.data[d][...]
-        scref.data[d][...] = 0.0
-        # reinit ScalRef
-
-    # Read a scalar field, using default configuration for output
-    # names and location, with a given iteration number.
-    reader = HDF_Reader(variables={scalRef: topo},
-                        restart=simu.current_iteration - 1)
-    reader.discretize()
-    reader.setup()
-    for d in xrange(scal3D.nb_components):
-        assert not np.allclose(scref.data[d][ind], sc3d.data[d][ind])
-    reader.apply()
-    reader.finalize()
-
-    for d in xrange(scal3D.nb_components):
-        assert np.allclose(scref.data[d][ind], sc3d.data[d][ind])
-
-@postclean(working_dir)
-def test_write_read_vectors_3D_defaults():
-    dom, topo = init2()
-    velo = Field(domain=dom, formula=vec3D, name='velo', is_vector=True)
-    vorti = Field(domain=dom, formula=vort3D, name='vorti', is_vector=True)
-
-    # Write a vector field, using default configuration for output
-    # names and location
-    op = HDF_Writer(variables={velo: topo, vorti: topo})
-    simu = Simulation(nb_iter=3)
-    op.discretize()
-    op.setup()
-    velo.initialize(simu.time, topo=topo)
-    vorti.initialize(simu.time, topo=topo)
-    simu.initialize()
-    while not simu.is_over:
-        op.apply(simu)
-        simu.advance()
-
-    op.finalize()
-    filename = ''
-    names = []
-    for var in op.input:
-        names.append(var.name)
-        names.sort()
-    for nn in names:
-        filename += nn + '_'
-    filename = filename[:-1]
-    fullpath = os.path.join(IO.default_path(), filename)
-
-    assert os.path.exists(fullpath + '.xmf')
-    assert os.path.exists(fullpath + '_00000.h5')
-    assert os.path.exists(fullpath + '_00001.h5')
-
-    v3d = velo.discretize(topo)
-    w3d = vorti.discretize(topo)
-    ind = topo.mesh.compute_index
-
-    # Copy current values of v3 and w3 into buff1 and buff2, for comparison
-    # after reader.apply, below.
-    buff1 = Field(domain=dom, name='buff1', is_vector=True)
-    buff2 = Field(domain=dom, name='buff2', is_vector=True)
-    b1 = buff1.discretize(topo=topo)
-    b2 = buff2.discretize(topo=topo)
-    for d in xrange(velo.nb_components):
-        b1.data[d][...] = v3d.data[d][...]
-        b2.data[d][...] = w3d.data[d][...]
-        # reset v3 and w3 to zero.
-        v3d.data[d][...] = 0.0
-        w3d.data[d][...] = 0.0
-
-    # Read vector fields, using default configuration for input
-    # names and location, with a given iteration number.
-    # If everything works fine, reader must read output from
-    # the writer above.
-    reader = HDF_Reader(variables={velo: topo, vorti: topo},
-                        io_params=IOParams(filename),
-                        restart=simu.current_iteration - 1)
-    reader.discretize()
-    reader.setup()
-    for d in xrange(v3d.nb_components):
-        assert not np.allclose(b1.data[d][ind], v3d.data[d][ind])
-        assert not np.allclose(b2.data[d][ind], w3d.data[d][ind])
-
-    reader.apply()
-    reader.finalize()
-    # Now, v3 and w3 (just read) must be equal to saved values in b1 and b2.
-    for d in xrange(v3d.nb_components):
-        assert np.allclose(b1.data[d][ind], v3d.data[d][ind])
-        assert np.allclose(b2.data[d][ind], w3d.data[d][ind])
-
-
-@postclean(working_dir)
-def test_write_read_vectors_3D():
-    dom, topo = init2()
-    velo = Field(domain=dom, formula=vec3D, name='velo', is_vector=True)
-    vorti = Field(domain=dom, formula=vort3D, name='vorti', is_vector=True)
-
-    # Write a vector field, using default for output location
-    # but with fixed names for datasets
-    filename = working_dir + '/testIO_vec'
-    iop = IOParams(filename, fileformat=IO.HDF5)
-    op = HDF_Writer(variables={velo: topo, vorti: topo},
-                    var_names={velo: 'io_1', vorti: 'io_2'}, io_params=iop)
-    simu = Simulation(nb_iter=3)
-    op.discretize()
-    op.setup()
-
-    velo.initialize(simu.time, topo=topo)
-    vorti.initialize(simu.time, topo=topo)
-    simu.initialize()
-    while not simu.is_over:
-        op.apply(simu)
-        simu.advance()
-
-    op.finalize()
-
-    # filename = ''
-    # for v in op.input:
-    #     filename += v.name
-    #     filename += '_'
-    fullpath = iop.filename
-    assert os.path.exists(fullpath + '.xmf')
-    assert os.path.exists(fullpath + '_00000.h5')
-    assert os.path.exists(fullpath + '_00001.h5')
-
-    v3d = velo.discretize(topo)
-    w3d = vorti.discretize(topo)
-    ind = topo.mesh.compute_index
-
-    buff1 = Field(domain=dom, name='buff1', is_vector=True)
-    buff2 = Field(domain=dom, name='buff2', is_vector=True)
-
-    # Read vector fields, fixed filename, fixed dataset names.
-    iop_read = IOParams(working_dir + '/testIO_vec_00001.h5',
-                        fileformat=IO.HDF5)
-    reader = HDF_Reader(variables={buff1: topo, buff2: topo},
-                        io_params=iop_read,
-                        var_names={buff1: 'io_2', buff2: 'io_1'})
-    reader.discretize()
-    reader.setup()
-    reader.apply()
-    reader.finalize()
-    b1 = buff1.discretize(topo)
-    b2 = buff2.discretize(topo)
-    for d in xrange(v3d.nb_components):
-        assert np.allclose(b2.data[d][ind], v3d.data[d][ind])
-        assert np.allclose(b1.data[d][ind], w3d.data[d][ind])
-
-
-@postclean(working_dir)
-def test_write_read_subset_1():
-    dom, topo = init2()
-    velo = Field(domain=dom, formula=vec3D, name='velo', is_vector=True)
-
-    # A subset of the current domain
-    from hysop.domain.subsets import SubBox
-    mybox = SubBox(origin=[-0.5, 2.3, 4.1], length=[Lx / 2, Lx / 3, Lx],
-                   parent=dom)
-    # Write a vector field, using default for output location
-    # but with fixed names for datasets
-    op = HDF_Writer(variables={velo: topo}, var_names={velo: 'io_1'},
-                    subset=mybox)
-    simu = Simulation(nb_iter=3)
-    op.discretize()
-    op.setup()
-    velo.initialize(simu.time, topo=topo)
-    simu.initialize()
-    while not simu.is_over:
-        op.apply(simu)
-        simu.advance()
-    op.finalize()
-
-    filename = ''
-    for v in op.input:
-        filename += v.name
-        filename += '_'
-    filename = filename[:-1]
-    fullpath = os.path.join(IO.default_path(), filename)
-
-    assert os.path.exists(fullpath + '.xmf')
-    assert os.path.exists(fullpath + '_00000.h5')
-    assert os.path.exists(fullpath + '_00001.h5')
-
-    v3d = velo.discretize(topo)
-    ind = topo.mesh.compute_index
-    indsubset = mybox.mesh[topo].compute_index
-
-    buff1 = Field(domain=dom, name='buff1', is_vector=True)
-
-    # Read vector fields, fixed filename, fixed dataset names.
-    iop = IOParams(filename + '_00000.h5', fileformat=IO.HDF5)
-    reader = HDF_Reader(variables={buff1: topo},
-                        io_params=iop,
-                        var_names={buff1: 'io_1'}, subset=mybox)
-    reader.discretize()
-    reader.setup()
-    reader.apply()
-    reader.finalize()
-    b1 = buff1.discretize(topo)
-    for d in xrange(v3d.nb_components):
-        assert not np.allclose(b1.data[d][ind], v3d.data[d][ind])
-        assert np.allclose(b1.data[d][indsubset], v3d.data[d][indsubset])
-
-
-@postclean(working_dir)
-def test_write_read_subset_2():
-    dom, topo = init2()
-    velo = Field(domain=dom, formula=vec3D, name='velo', is_vector=True)
-
-    # A subset of the current domain
-    # a plane ...
-    mybox = SubBox(origin=[-0.5, 2.3, 4.1], length=[Lx / 2, Lx / 3, 0.0],
-                   parent=dom)
-    # Write a vector field, using default for output location
-    # but with fixed names for datasets
-    op = HDF_Writer(variables={velo: topo}, var_names={velo: 'io_1'},
-                    subset=mybox)
-    simu = Simulation(nb_iter=3)
-    op.discretize()
-    op.setup()
-    velo.initialize(simu.time, topo=topo)
-    simu.initialize()
-    while not simu.is_over:
-        op.apply(simu)
-        simu.advance()
-    op.finalize()
-
-    filename = ''
-    for v in op.input:
-        filename += v.name
-        filename += '_'
-    filename = filename[:-1]
-    fullpath = os.path.join(IO.default_path(), filename)
-
-    assert os.path.exists(fullpath + '.xmf')
-    assert os.path.exists(fullpath + '_00000.h5')
-    assert os.path.exists(fullpath + '_00001.h5')
-
-    v3d = velo.discretize(topo)
-    ind = topo.mesh.compute_index
-    indsubset = mybox.mesh[topo].compute_index
-
-    buff1 = Field(domain=dom, name='buff1', is_vector=True)
-
-    # Read vector fields, fixed filename, fixed dataset names.
-    iop = IOParams(filename + '_00000.h5', fileformat=IO.HDF5)
-    reader = HDF_Reader(variables={buff1: topo},
-                        io_params=iop,
-                        var_names={buff1: 'io_1'}, subset=mybox)
-    reader.discretize()
-    reader.setup()
-    reader.apply()
-    reader.finalize()
-    b1 = buff1.discretize(topo)
-    for d in xrange(v3d.nb_components):
-        assert not np.allclose(b1.data[d][ind], v3d.data[d][ind])
-        assert np.allclose(b1.data[d][indsubset], v3d.data[d][indsubset])
-
diff --git a/hysop/old/operator.old/tests/test_multiphase_gradp.py b/hysop/old/operator.old/tests/test_multiphase_gradp.py
deleted file mode 100755
index 0e8eeadd27bf439d9fe77cc9b3648e9c20f994e4..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_multiphase_gradp.py
+++ /dev/null
@@ -1,72 +0,0 @@
-"""Testing gradp operator"""
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.multiphase_gradp import MultiphaseGradP
-from hysop.numerics.finite_differences import FDC4, FDC2
-from hysop.tools.numpywrappers import npw
-import numpy as np
-from hysop.methods import Support, SpaceDiscretization, ExtraArgs
-from hysop.constants import HYSOP_REAL
-pi, sin, cos = np.pi, np.sin, np.cos
-
-VISCOSITY = 1e-4
-
-
-def compute_true_res_formula():
-    """Tool to compute true res from sympy"""
-    import sympy as sp
-    from sympy.vector import CoordSysCartesian, gradient
-    R = CoordSysCartesian('R')
-    u = (sp.sin(2*sp.pi*R.x)*sp.cos(2*sp.pi*R.y)*sp.cos(2*sp.pi*R.z)) * R.i + \
-        (-sp.cos(2*sp.pi*R.x)*sp.sin(2*sp.pi*R.y)*sp.cos(2*sp.pi*R.z)) * R.j
-    n = sp.symbols('n')
-    res = lambda r, c: sp.simplify(u.dot(gradient(u.components[r], R)) -
-                                   n * sp.diff(u.components[r], c, c))
-    for r, c in zip((R.i, R.j), (R.x, R.y)):
-        print str(res(r, c)).replace('R.', '')
-
-
-def velo_func(res, x, y, z, t):
-    res[0][...] = sin(2. * pi * x) * \
-        cos(2. * pi * y) * cos(2. * pi * z)
-    res[1][...] = - cos(2. * pi * x) * \
-        sin(2. * pi * y) * cos(2. * pi * z)
-    res[2][...] = 0.
-    return res
-
-
-def res_func(res, x, y, z, t):
-    res[0][...] = pi*(4*pi*VISCOSITY*cos(2*y*pi) + cos(pi*(2*x - 2*z)) +
-                      cos(pi*(2*x + 2*z)))*sin(2*x*pi)*cos(2*z*pi)
-    res[1][...] = pi*(-4*pi*VISCOSITY*cos(2*x*pi) + cos(pi*(2*y - 2*z)) +
-                      cos(pi*(2*y + 2*z)))*sin(2*y*pi)*cos(2*z*pi)
-    res[2][...] = 9.81
-    return res
-
-
-def test_gradp():
-    """Testing gradp operator against analytical result"""
-    simu = Simulation(start=0.0, end=1.0, time_step=0.1, max_iter=1)
-    box = Box()
-    velo = Field(box, is_vector=True, formula=velo_func, name='v0')
-    res = Field(box, is_vector=True, name='res')
-    true_res = Field(box, is_vector=True, formula=res_func, name='vres')
-    d = Discretization([129,129,129])
-    op = MultiphaseGradP(velocity=velo, gradp=res, viscosity=VISCOSITY,
-                         discretization=d)
-    op.discretize()
-    op.setup()
-    topo = op.discrete_fields[velo].topology
-    compute_index = topo.mesh.compute_index
-    velo.initialize(topo=topo)
-    op.initialize_velocity()
-    res.initialize(topo=topo)
-    true_res.initialize(topo=topo)
-    op.apply(simu)
-    d_res = res.discrete_fields[topo]
-    d_true_res = true_res.discrete_fields[topo]
-    assert np.allclose(d_res[0][compute_index], d_true_res[0][compute_index], atol=8e-3)
-    assert np.allclose(d_res[1][compute_index], d_true_res[1][compute_index], atol=8e-3)
-    assert np.allclose(d_res[2][compute_index], d_true_res[2][compute_index])
diff --git a/hysop/old/operator.old/tests/test_multiresolution_filter.py b/hysop/old/operator.old/tests/test_multiresolution_filter.py
deleted file mode 100755
index 1a5c58ac0c5f38e0596cea6d10c6b58d52135db8..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_multiresolution_filter.py
+++ /dev/null
@@ -1,221 +0,0 @@
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.multiresolution_filter import MultiresolutionFilter
-from hysop.tools.numpywrappers import npw
-import numpy as np
-from hysop.methods import Remesh
-from hysop.methods import Rmsh_Linear, L2_1
-from hysop.core.mpi import main_size
-
-L = [1., 1., 1.]
-O = [0., 0., 0.]
-simu = Simulation(start=0., end=0.1, nb_iter=1)
-
-# Warning : values too large for tests
-n_middle = 513
-n_large = 1025
-
-def func_periodic_X(res, x, y, z, t=0):
-    res[0][...] = np.sin(2. * np.pi * x)
-    return res
-
-
-def func_periodic_Y(res, x, y, z, t=0):
-    res[0][...] = np.sin(2. * np.pi * y)
-    return res
-
-
-def func_periodic_Z(res, x, y, z, t=0):
-    res[0][...] = np.sin(2. * np.pi * z)
-    return res
-
-
-def func_periodic_XY(res, x, y, z, t=0):
-    res[0][...] = np.sin(2. * np.pi * x) * np.cos(2. * np.pi * y)
-    return res
-
-
-def filter(d_fine, d_coarse, func, method, atol=1e-8, rtol=1e-5):
-    box = Box(length=L, origin=O)
-    f = Field(box, formula=func, name='f0')
-    op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,
-                               variables={f: d_coarse},
-                               method=method)
-    op.discretize()
-    op.setup()
-    topo_coarse = op.discrete_fields[f].topology
-    topo_fine = [t for t in f.discrete_fields.keys()
-                 if not t is topo_coarse][0]
-    f.initialize(topo=topo_fine)
-    # f_in = f.discrete_fields[topo_fine]
-    f_out = f.discrete_fields[topo_coarse]
-    op.apply(simu)
-    valid = [npw.zeros(f_out[0].shape), ]
-    valid = func(valid, *topo_coarse.mesh.coords)
-    e = np.max(np.abs(valid[0][topo_coarse.mesh.compute_index] -
-                      f_out[0][topo_coarse.mesh.compute_index]))
-    err = atol + rtol * np.max(np.abs(valid[0][topo_coarse.mesh.compute_index]))
-    return np.allclose(f_out[0][topo_coarse.mesh.compute_index],
-                       valid[0][topo_coarse.mesh.compute_index],
-                       atol=atol, rtol=rtol), e, err
-
-
-def test_linear_X():
-    b, e, err = filter(d_coarse=Discretization([n_middle, 5, 5],
-                                               ghosts=[1, 1, 1]),
-                       d_fine=Discretization([n_large, 5, 5]),
-                       method={Remesh: Rmsh_Linear, },
-                       func=func_periodic_X)
-    assert b, "max(|error|)=" + str(e) + " <= " + str(err)
-
-
-def test_linear_Y():
-    b, e, err = filter(d_coarse=Discretization([5, n_middle, 5],
-                                               ghosts=[1, 1, 1]),
-                       d_fine=Discretization([5, n_large, 5]),
-                       method={Remesh: Rmsh_Linear, },
-                       func=func_periodic_Y)
-    assert b, "max(|error|)=" + str(e) + " <= " + str(err)
-
-
-def test_linear_Z():
-    b, e, err = filter(d_coarse=Discretization([5, 5, n_middle],
-                                               ghosts=[1, 1, 1]),
-                       d_fine=Discretization([5, 5, n_large]),
-                       method={Remesh: Rmsh_Linear, },
-                       func=func_periodic_Z)
-    assert b, "max(|error|)=" + str(e) + " <= " + str(err)
-
-
-def test_linear_XY():
-    b, e, err = filter(d_coarse=Discretization([n_large, n_large, 5],
-                                               ghosts=[1, 1, 1]),
-                       d_fine=Discretization([2049, 2049, 5]),
-                       func=func_periodic_XY,
-                       method={Remesh: Rmsh_Linear, },
-                       atol=1e-3, rtol=1e-3)
-    assert b, "max(|error|)=" + str(e) + " <= " + str(err)
-
-
-def order_linear():
-    e_old = 1.
-    for i in (128, 256, 512, 1024, 2048):
-        b, e, err = filter(d_coarse=Discretization([i + 1, 5, 5],
-                                                   ghosts=[1, 1, 1]),
-                           d_fine=Discretization([2 * i + 1, 5, 5]),
-                           method={Remesh: Rmsh_Linear, },
-                           func=func_periodic_X)
-        if i > 128:
-            print i, e_old / e
-        e_old = e
-
-
-def test_L21_X():
-    b, e, err = filter(d_coarse=Discretization([n_middle, 5, 5],
-                                               ghosts=[2, 2, 2]),
-                       d_fine=Discretization([n_large, 5, 5]),
-                       method={Remesh: L2_1, },
-                       func=func_periodic_X)
-    print b, e, err
-    assert b, "max(|error|)=" + str(e) + " <= " + str(err)
-
-
-def test_L21_Y():
-    b, e, err = filter(d_coarse=Discretization([5, n_middle, 5],
-                                               ghosts=[2, 2, 2]),
-                       d_fine=Discretization([5, n_large, 5]),
-                       method={Remesh: L2_1, },
-                       func=func_periodic_X)
-    assert b, "max(|error|)=" + str(e) + " <= " + str(err)
-
-
-def test_L21_Z():
-    b, e, err = filter(d_coarse=Discretization([5, 5, n_middle],
-                                               ghosts=[2, 2, 2]),
-                       d_fine=Discretization([5, 5, n_large]),
-                       method={Remesh: L2_1, },
-                       func=func_periodic_X)
-    assert b, "max(|error|)=" + str(e) + " <= " + str(err)
-
-
-def test_L21_XY():
-    b, e, err = filter(d_coarse=Discretization([n_large, n_large, 5],
-                                               ghosts=[2, 2, 2]),
-                       d_fine=Discretization([2049, 2049, 5]),
-                       func=func_periodic_XY,
-                       method={Remesh: L2_1, },
-                       atol=1e-3, rtol=1e-3)
-    assert b, "max(|error|)=" + str(e) + " <= " + str(err)
-
-
-def order_L21():
-    e_old = 1.
-    for i in (128, 256, 512, 1024, 2048):
-        b, e, err = filter(d_coarse=Discretization([i + 1, 5, 5],
-                                                   ghosts=[1, 1, 1]),
-                           d_fine=Discretization([2 * i + 1, 5, 5]),
-                           method={Remesh: Rmsh_Linear, },
-                           func=func_periodic_X)
-        if i > 128:
-            print i, e_old / e
-        e_old = e
-
-
-def func(res, x, y, z, t=0):
-    res[0][...] = np.cos(2. * np.pi * x) * \
-                  np.sin(2. * np.pi * y) * np.cos(4. * np.pi * z)
-    return res
-
-
-def test_filter_linear():
-    """This test compares the GPU linear filter with python implementation"""
-    box = Box(length=L, origin=O)
-    f = Field(box, formula=func, name='f1')
-    d_fine = Discretization([n_middle, n_middle, n_middle])
-    d_coarse = Discretization([257, 257, 257], ghosts=[1, 1, 1])
-    op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,
-                               variables={f: d_coarse},
-                               method={Remesh: Rmsh_Linear, })
-    op.discretize()
-    op.setup()
-    topo_coarse = op.discrete_fields[f].topology
-    topo_fine = [t for t in f.discrete_fields.keys()
-                 if t is not topo_coarse][0]
-    f.initialize(topo=topo_fine)
-    f_out = f.discrete_fields[topo_coarse]
-    op.apply(simu)
-    valid = [npw.zeros(f_out[0].shape), ]
-    valid = func(valid, *topo_coarse.mesh.coords)
-    assert np.allclose(valid[0][topo_coarse.mesh.compute_index],
-                       f_out[0][topo_coarse.mesh.compute_index],
-                       atol=1e-4, rtol=1e-3), \
-        np.max(np.abs(valid[0][topo_coarse.mesh.compute_index] -
-                      f_out[0][topo_coarse.mesh.compute_index]))
-
-
-def test_filter_l2_1():
-    """This test compares the GPU linear filter with python implementation"""
-    box = Box(length=L, origin=O)
-    f = Field(box, formula=func, name='f1')
-    d_fine = Discretization([n_middle, n_middle, n_middle])
-    d_coarse = Discretization([257, 257, 257], ghosts=[2, 2, 2])
-    op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,
-                               variables={f: d_coarse},
-                               method={Remesh: L2_1, })
-    op.discretize()
-    op.setup()
-    topo_coarse = op.discrete_fields[f].topology
-    topo_fine = [t for t in f.discrete_fields.keys()
-                 if t is not topo_coarse][0]
-    f.initialize(topo=topo_fine)
-    f_out = f.discrete_fields[topo_coarse]
-    op.apply(simu)
-    valid = [npw.zeros(f_out[0].shape), ]
-    valid = func(valid, *topo_coarse.mesh.coords)
-    assert np.allclose(valid[0][topo_coarse.mesh.compute_index],
-                       f_out[0][topo_coarse.mesh.compute_index]), \
-        np.max(np.abs(valid[0][topo_coarse.mesh.compute_index] -
-                      f_out[0][topo_coarse.mesh.compute_index]))
-
diff --git a/hysop/old/operator.old/tests/test_operators.py b/hysop/old/operator.old/tests/test_operators.py
deleted file mode 100644
index e25b61b254a5c33b98473e93e1cefaea37c7a2bd..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_operators.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# -*- coding: utf-8 -*-
-"""tests for operators general interface
-"""
-
-from hysop.core.mpi.tests.utils import create_multitask_context, CPU, GPU, OTHER
-from hysop.tools.parameters import Discretization
-from hysop.operator.analytic import Analytic
-from hysop.core.mpi import main_size
-import hysop as pp
-
-r_ng = Discretization([33, ] * 3)
-
-
-def v3d(res, x, y, z, t):
-    res[0][...] = x + t
-    res[1][...] = y
-    res[2][...] = z
-    return res
-
-
-def test_tasks():
-    """
-    test proper tasks assignment
-    """
-    dom, topo = create_multitask_context(dim=3, discr=r_ng)
-    assert topo.task_id() == dom.current_task()
-    velo = pp.Field(domain=dom, name='velocity',
-                    is_vector=True, formula=v3d)
-    op = Analytic(variables={velo: r_ng})
-    op.discretize()
-    op.setup()
-
-    assert op.task_id() == dom.current_task()
-    if main_size == 8:
-        if dom.is_on_task(CPU):
-            assert op.variables[velo].size == 5
-        elif dom.is_on_task(GPU):
-            assert op.variables[velo].size == 2
-        elif dom.is_on_task(OTHER):
-            assert op.variables[velo].size == 1
diff --git a/hysop/old/operator.old/tests/test_particle_advection.py b/hysop/old/operator.old/tests/test_particle_advection.py
deleted file mode 100755
index f63bfc433aae1d647aec7a7cb6ec8f41c4178cd3..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_particle_advection.py
+++ /dev/null
@@ -1,212 +0,0 @@
-"""Testing pure python particle advection with null velocity.
-"""
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.advection import Advection
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop.tools.numpywrappers import npw
-import numpy as np
-
-
-d2d = Discretization([17, 17])
-d3d = Discretization([17, 17, 17])
-
-
-
-def setup_2D():
-    box = Box(length=[2., 2.], origin=[-1., -1.])
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity',
-                 formula=lambda x, y: (0., 0.), is_vector=True)
-    return scal, velo
-
-
-def setup_vector_2D():
-    box = Box(length=[2., 2.], origin=[-1., -1.])
-    scal = Field(domain=box, name='Vector', is_vector=True)
-    velo = Field(domain=box, name='Velocity',
-                 formula=lambda x, y: (0., 0.), is_vector=True)
-    return scal, velo
-
-
-def setup_list_2D():
-    box = Box(length=[2., 2.], origin=[-1., -1.])
-    scal1 = Field(domain=box, name='Scalar1')
-    scal2 = Field(domain=box, name='Scalar2')
-    velo = Field(domain=box, name='Velocity',
-                 formula=lambda x, y: (0., 0.), is_vector=True)
-    return [scal1, scal2], velo
-
-
-def setup_3d():
-    """Build fields inside a 3d domain
-    """
-    box = Box(length=[2., 4., 1.], origin=[-1., -2., 0.])
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity',
-                 formula=lambda x, y, z: (0., 0., 0.), is_vector=True)
-    return scal, velo
-
-
-def setup_vector_3D():
-    box = Box(length=[2., 4., 1.], origin=[-1., -2., 0.])
-    scal = Field(domain=box, name='Scalar', is_vector=True)
-    velo = Field(domain=box, name='Velocity',
-                 formula=lambda x, y, z: (0., 0., 0.), is_vector=True)
-    return scal, velo
-
-
-def setup_list_3D():
-    box = Box(length=[2., 4., 1.], origin=[-1., -2., 0.])
-    scal1 = Field(domain=box, name='Scalar1')
-    scal2 = Field(domain=box, name='Scalar2')
-    velo = Field(domain=box, name='Velocity',
-                 formula=lambda x, y, z: (0., 0., 0.), is_vector=True)
-    return [scal1, scal2], velo
-
-
-def setup_dict_3D():
-    box = Box(length=[2., 4., 1.], origin=[-1., -2., 0.])
-    scal1 = Field(domain=box, name='Scalar1')
-    scal2 = Field(domain=box, name='Scalar2')
-    velo = Field(domain=box, name='Velocity',
-                 formula=lambda x, y, z: (0., 0., 0.), is_vector=True)
-    return {scal1: d3d, scal2: d3d}, velo
-
-
-
-def assertion(scal, advec):
-    advec.discretize()
-    advec.setup()
-    scal_d = scal.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(
-        np.random.random(scal_d.data[0].shape))
-    scal_init = npw.copy(scal_d.data[0])
-
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    return np.allclose(scal_init, scal_d.data[0])
-
-
-def assertion_vector2D(scal, advec):
-    advec.discretize()
-    advec.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(
-        np.random.random(scal_d.data[0].shape))
-    scal_d.data[1][...] = npw.asrealarray(
-        np.random.random(scal_d.data[1].shape))
-    scal1_init = npw.copy(scal_d.data[0])
-    scal2_init = npw.copy(scal_d.data[1])
-
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    print (np.max(np.abs((scal1_init - scal_d.data[0]))))
-    print (np.max(np.abs((scal2_init - scal_d.data[1]))))
-    return np.allclose(scal1_init, scal_d.data[0]) and \
-        np.allclose(scal2_init, scal_d.data[1])
-
-
-def assertion_vector3D(scal, advec):
-    advec.discretize()
-    advec.setup()
-
-    scal_d = scal.discrete_fields.values()[0]
-    scal_d.data[0][...] = npw.asrealarray(
-        np.random.random(scal_d.data[0].shape))
-    scal_d.data[1][...] = npw.asrealarray(np.random.random(
-        scal_d.data[1].shape))
-    scal_d.data[2][...] = npw.asrealarray(
-        np.random.random(scal_d.data[2].shape))
-    scal1_init = npw.copy(scal_d.data[0])
-    scal2_init = npw.copy(scal_d.data[1])
-    scal3_init = npw.copy(scal_d.data[2])
-
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    return np.allclose(scal1_init, scal_d.data[0]) and \
-        np.allclose(scal2_init, scal_d.data[1]) and \
-        np.allclose(scal3_init, scal_d.data[2])
-
-
-def assertion_list(scal, advec):
-    advec.discretize()
-    advec.setup()
-
-    scal1_d = scal[0].discrete_fields.values()[0]
-    scal2_d = scal[1].discrete_fields.values()[0]
-    scal1_d.data[0][...] = npw.asrealarray(
-        np.random.random(scal1_d.data[0].shape))
-    scal2_d.data[0][...] = npw.asrealarray(
-        np.random.random(scal2_d.data[0].shape))
-    scal1_init = npw.copy(scal1_d.data[0])
-    scal2_init = npw.copy(scal2_d.data[0])
-
-    advec.apply(Simulation(start=0., end=0.01, nb_iter=1))
-    print (scal1_init, scal1_d.data[0])
-    print (scal2_init, scal2_d.data[0])
-    return np.allclose(scal1_init, scal1_d.data[0]) and \
-        np.allclose(scal2_init, scal2_d.data[0])
-
-
-def test_nullVelocity_2D():
-    """2D case, advection with null velocity, single resolution.
-    """
-    scal, velo = setup_2D()
-
-    advec = Advection(velo, scal, discretization=d2d)
-
-    assert assertion(scal, advec)
-
-
-def test_nullVelocity_vector_2D():
-    """
-    """
-    scal, velo = setup_vector_2D()
-
-    advec = Advection(velo, scal, discretization=d2d)
-    assert assertion_vector2D(scal, advec)
-
-
-def test_nullVelocity_list_2D():
-    """
-    """
-    scal, velo = setup_list_2D()
-
-    advec = Advection(velo, scal, discretization=d2d)
-    assert assertion_list(scal, advec)
-
-
-def test_nullVelocity_3D():
-    """
-    """
-    scal, velo = setup_3d()
-
-    advec = Advection(velo, scal, discretization=d3d)
-    assert assertion(scal, advec)
-
-
-def test_nullVelocity_vector_3D():
-    """
-    """
-    scal, velo = setup_vector_3D()
-    advec = Advection(velo, scal, discretization=d3d)
-
-    assert assertion_vector3D(scal, advec)
-
-
-def test_nullVelocity_list_3D():
-    """
-    """
-    scal, velo = setup_list_3D()
-
-    advec = Advection(velo, scal, discretization=d3d)
-
-    assert assertion_list(scal, advec)
-
-
-def test_nullVelocity_dict_3D():
-    scal, velo = setup_dict_3D()
-
-    advec = Advection(velocity=velo, variables=scal, discretization=d3d)
-
-    assert assertion_list(scal.keys(), advec)
diff --git a/hysop/old/operator.old/tests/test_penalization.py b/hysop/old/operator.old/tests/test_penalization.py
deleted file mode 100755
index a66a409115580e1786fcd0830048616868dc58f7..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_penalization.py
+++ /dev/null
@@ -1,370 +0,0 @@
-# -*- coding: utf-8 -*-
-from hysop.domain.subsets import HemiSphere, Sphere, Cylinder
-from hysop.domain.porous import Porous
-from hysop.operator.penalization import Penalization, PenalizeVorticity
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop.tools.io_utils import IOParams
-from hysop.topology.cartesian_topology import CartesianTopology
-from hysop.tools.numpywrappers import npw
-import numpy as np
-import os
-from hysop import Field, Box
-from hysop.operator.hdf_io import HDF_Reader
-from hysop.domain.subsets import SubBox
-
-
-def v2d(res, x, y, t):
-    res[0][...] = 1.
-    res[1][...] = 1.
-    return res
-
-
-def s2d(res, x, y, t):
-    res[0][...] = 1.
-    return res
-
-
-def v3d(res, x, y, z, t):
-    res[0][...] = 1.
-    res[1][...] = 1.
-    res[2][...] = 1.
-    return res
-
-
-def s3d(res, x, y, z, t):
-    res[0][...] = 1.
-    return res
-
-
-def v2dw(res, x, y, t):
-    res[0][...] = np.cos(x) + np.sin(y)
-    res[1][...] = np.sin(x) + np.cos(y)
-    return res
-
-
-def v3dw(res, x, y, z, t):
-    res[0][...] = np.cos(x) + np.sin(y) + np.cos(z)
-    res[1][...] = np.sin(x) + np.cos(y)
-    res[2][...] = np.cos(z) + np.sin(y) + np.cos(x)
-    return res
-
-
-Nx = 128
-Ny = 96
-Nz = 102
-g = 2
-
-
-ldef = npw.asrealarray([0.3, 0.4, 1.0])
-discr3D = Discretization([Nx + 1, Ny + 1, Nz + 1], [g - 1, g - 2, g])
-discr2D = Discretization([Nx + 1, Ny + 1], [g - 1, g])
-xdom = npw.asrealarray([0.1, -0.3, 0.5])
-import math
-ldom = npw.asrealarray([math.pi * 2., ] * 3)
-xdef = npw.asrealarray(xdom + 0.2)
-xpos = npw.asrealarray(ldom * 0.5)
-xpos[-1] += 0.1
-working_dir = os.getcwd() + '/'
-
-
-def init(discr, fileref):
-    CartesianTopology.reset_counter()
-    dim = len(discr.resolution)
-    dom = Box(dimension=dim, length=ldom[:dim],
-              origin=xdom[:dim])
-    topo = dom.create_topology(discr)
-    scalref = Field(domain=topo.domain, name='scalref')
-    #    scalRef.hdf_load(topo, iop, restart=0)
-    veloref = Field(domain=topo.domain, name='veloref', is_vector=True)
-    # Read a reference file
-    iop = IOParams(working_dir + fileref)
-    reader = HDF_Reader(variables={scalref: topo, veloref: topo},
-                        io_params=iop, restart=0)
-    reader.discretize()
-    reader.setup()
-    reader.apply()
-    reader.finalize()
-    sdref = scalref.discretize(topo)
-    vdref = veloref.discretize(topo)
-    return topo, sdref, vdref
-
-
-def check_penal(penal, sref, vref, scal, velo):
-    penal.discretize()
-    penal.setup()
-    topo = penal.variables[scal]
-    scal.initialize(topo=topo)
-    velo.initialize(topo=topo)
-    vd = velo.discretize(topo)
-    sd = scal.discretize(topo)
-    simu = Simulation(nb_iter=3)
-    penal.apply(simu)
-    ind = topo.mesh.compute_index
-    assert np.allclose(sd.data[0][ind], sref.data[0][ind])
-    for d in xrange(vd.nb_components):
-        assert np.allclose(vd.data[d][ind], vref.data[d][ind])
-
-
-def test_penal_2d():
-    """
-    Penalization in 2D, obstacles = semi-cylinder (disk indeed ...)
-    and a plate, fields=scalar and vector.
-    """
-    topo, sref, vref = init(discr2D, 'penal2d_sphere')
-    # Obstacles
-    rd = ldom[0] * 0.3
-    # Fields to penalize
-    scal = Field(domain=topo.domain, formula=s2d, name='Scalar')
-    velo = Field(domain=topo.domain, formula=v2d, name='Velo', is_vector=True)
-    hsphere = HemiSphere(parent=topo.domain, origin=xpos[:2], radius=rd)
-    penal = Penalization(variables=[scal, velo], discretization=topo,
-                         obstacles=[hsphere], coeff=1e6)
-    hsphere.discretize(topo)
-    check_penal(penal, sref, vref, scal, velo)
-
-
-def test_penal_2d_multi():
-    """
-    Penalization in 2D, for several different obstacles
-    """
-    topo, sref, vref = init(discr2D, 'penal2d_multi')
-    # Obstacles
-    rd = ldom[0] * 0.1
-    # Fields to penalize
-    scal = Field(domain=topo.domain, formula=s2d, name='Scalar')
-    velo = Field(domain=topo.domain, formula=v2d, name='Velo', is_vector=True)
-    hsphere = Sphere(parent=topo.domain, origin=xpos[:2], radius=rd)
-    newpos = list(xpos)
-    newpos[1] += 1.
-    hsphere2 = HemiSphere(parent=topo.domain, origin=newpos[:2],
-                          radius=rd + 0.3)
-    ll = topo.domain.length.copy()
-    ll[1] = 0.
-    downplane = SubBox(parent=topo.domain, origin=topo.domain.origin,
-                       length=ll)
-    penal = Penalization(variables=[scal, velo], discretization=topo,
-                         obstacles=[hsphere, downplane, hsphere2], coeff=1e6)
-    check_penal(penal, sref, vref, scal, velo)
-
-
-def test_penal_3d():
-    """
-    Penalization in 3D, obstacles = semi-cylinder (disk indeed ...)
-    and a plate, fields=scalar and vector.
-    """
-    topo, sref, vref = init(discr3D, 'penal3d_sphere')
-    # Obstacles
-    rd = ldom[0] * 0.3
-    # Fields to penalize
-    scal = Field(domain=topo.domain, formula=s3d, name='Scalar')
-    velo = Field(domain=topo.domain, formula=v3d, name='Velo', is_vector=True)
-    hsphere = HemiSphere(parent=topo.domain, origin=xpos, radius=rd)
-    penal = Penalization(variables=[scal, velo], discretization=topo,
-                         obstacles=[hsphere], coeff=1e6)
-    check_penal(penal, sref, vref, scal, velo)
-
-
-def test_penal_3d_multi():
-    """
-    Penalization in 3D, for several different obstacles
-    """
-    topo, sref, vref = init(discr3D, 'penal3d_multi')
-    # Obstacles
-    rd = ldom[0] * 0.1
-    # Fields to penalize
-    scal = Field(domain=topo.domain, formula=s3d, name='Scalar')
-    velo = Field(domain=topo.domain, formula=v3d, name='Velo', is_vector=True)
-    hsphere = Sphere(parent=topo.domain, origin=xpos, radius=rd)
-    newpos = list(xpos)
-    newpos[1] += 1.
-    hsphere2 = HemiSphere(parent=topo.domain, origin=newpos,
-                          radius=rd + 0.3)
-    ll = topo.domain.length.copy()
-    ll[1] = 0.
-    downplane = SubBox(parent=topo.domain, origin=topo.domain.origin,
-                       length=ll)
-    penal = Penalization(variables=[scal, velo], discretization=topo,
-                         obstacles=[hsphere, hsphere2, downplane], coeff=1e6)
-    check_penal(penal, sref, vref, scal, velo)
-
-
-def test_penal_3d_porous():
-    """
-    Penalization in 3D, with porous obstacles
-    """
-    topo, sref, vref = init(discr3D, 'penal3d_porous')
-    # Fields to penalize
-    scal = Field(domain=topo.domain, formula=s3d, name='Scalar')
-    velo = Field(domain=topo.domain, formula=v3d, name='Velo', is_vector=True)
-    newpos = list(xpos)
-    newpos[1] += 1.
-    psphere = Porous(parent=topo.domain, origin=newpos,
-                     source=Sphere, layers=[0.5, 0.7, 0.3])
-    ll = topo.domain.length.copy()
-    ll[1] = 0.
-    downplane = SubBox(parent=topo.domain, origin=topo.domain.origin,
-                       length=ll)
-    penal = Penalization(variables=[scal, velo], discretization=topo,
-                         obstacles={psphere: [1e6, 1e2, 1e1], downplane: 1e7})
-    check_penal(penal, sref, vref, scal, velo)
-
-
-def test_penal_3d_porous_cyl():
-    """
-    Penalization in 3D, with porous obstacles
-    """
-    topo, sref, vref = init(discr3D, 'penal3d_porous_cyl')
-    # Fields to penalize
-    scal = Field(domain=topo.domain, formula=s3d, name='Scalar')
-    velo = Field(domain=topo.domain, formula=v3d, name='Velo', is_vector=True)
-    newpos = list(xpos)
-    newpos[1] += 1.
-    pcyl = Porous(parent=topo.domain, origin=newpos,
-                  source=Cylinder, layers=[0.5, 0.7, 0.3])
-    ll = topo.domain.length.copy()
-    ll[1] = 0.
-    downplane = SubBox(parent=topo.domain, origin=topo.domain.origin,
-                       length=ll)
-    penal = Penalization(variables=[scal, velo], discretization=topo,
-                         obstacles={pcyl: [1e6, 0.1, 1e6], downplane: 1e7})
-    check_penal(penal, sref, vref, scal, velo)
-
-
-def test_penal_2d_porous():
-    """
-    Penalization in 2D, with porous obstacles
-    """
-    topo, sref, vref = init(discr2D, 'penal2d_porous')
-    # Fields to penalize
-    scal = Field(domain=topo.domain, formula=s2d, name='Scalar')
-    velo = Field(domain=topo.domain, formula=v2d, name='Velo', is_vector=True)
-    newpos = list(xpos)
-    newpos[1] += 1.
-    psphere = Porous(parent=topo.domain, origin=newpos[:2],
-                     source=Sphere, layers=[0.5, 0.7, 0.3])
-    ll = topo.domain.length.copy()
-    ll[1] = 0.
-    downplane = SubBox(parent=topo.domain, origin=topo.domain.origin,
-                       length=ll)
-    penal = Penalization(variables=[scal, velo], discretization=topo,
-                         obstacles={psphere: [1e6, 1e2, 1e1], downplane: 1e7})
-    check_penal(penal, sref, vref, scal, velo)
-
-
-def init_vort(discr, fileref):
-    CartesianTopology.reset_counter()
-    dim = len(discr.resolution)
-    dom = Box(dimension=dim, length=ldom[:dim],
-              origin=xdom[:dim])
-    topo = dom.create_topology(discr)
-    wref = Field(domain=topo.domain, name='vortiref', is_vector=dim == 3)
-    #    scalRef.hdf_load(topo, iop, restart=0)
-    veloref = Field(domain=topo.domain, name='veloref', is_vector=True)
-    # Read a reference file
-    iop = IOParams(working_dir + fileref)
-    reader = HDF_Reader(variables={wref: topo, veloref: topo},
-                        io_params=iop, restart=0)
-    reader.discretize()
-    reader.setup()
-    reader.apply()
-    reader.finalize()
-    wdref = wref.discretize(topo)
-    vdref = veloref.discretize(topo)
-    return topo, wdref, vdref
-
-
-def check_penal_vort(penal, wref, vref, vorti, velo):
-    penal.discretize()
-    penal.setup()
-    topo = penal.variables[vorti]
-    vorti.initialize(topo=topo)
-    velo.initialize(topo=topo)
-    vd = velo.discretize(topo)
-    wd = vorti.discretize(topo)
-    ind = topo.mesh.compute_index
-
-    simu = Simulation(nb_iter=200)
-    penal.apply(simu)
-    for d in xrange(vd.nb_components):
-        assert np.allclose(vd.data[d][ind], vref.data[d][ind])
-    for d in xrange(wd.nb_components):
-        assert np.allclose(wd.data[d][ind], wref.data[d][ind])
-
-
-def test_penal_vort_2d():
-    """
-    Penalization + Curl in 2D, obstacles = semi-cylinder (disk indeed ...)
-    and a plate, fields=scalar and vector.
-    """
-    d2d = Discretization([Nx + 1, Ny + 1], [g, g])
-    topo, wref, vref = init_vort(d2d, 'penal_vort_2d_sphere')
-    # Obstacles
-    rd = ldom[0] * 0.3
-    # Fields to penalize
-    vorti = Field(domain=topo.domain, formula=s2d, name='Vorti')
-    velo = Field(domain=topo.domain, formula=v2dw, name='Velo', is_vector=True)
-    hsphere = HemiSphere(parent=topo.domain, origin=xpos[:2], radius=rd)
-    penal = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                              discretization=topo,
-                              obstacles=[hsphere], coeff=1e8)
-    #hsphere.discretize(topo)
-    check_penal_vort(penal, wref, vref, vorti, velo)
-
-
-def test_penal_vort_3d():
-    """
-    Penalization in 3D, obstacles = semi-cylinder
-    and a plate, fields=scalar and vector.
-    """
-    d3d = Discretization([Nx + 1, Ny + 1, Nz + 1], [g, g, g])
-    topo, wref, vref = init_vort(d3d, 'penal_vort_3d_sphere')
-    # Obstacles
-    rd = ldom[0] * 0.3
-    # Fields to penalize
-    vorti = Field(domain=topo.domain, formula=v3d, name='Vorti',
-                  is_vector=True)
-    velo = Field(domain=topo.domain, formula=v3dw, name='Velo', is_vector=True)
-    hsphere = HemiSphere(parent=topo.domain, origin=xpos, radius=rd)
-    penal = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                              discretization=topo,
-                              obstacles=[hsphere], coeff=1e8)
-    check_penal_vort(penal, wref, vref, vorti, velo)
-
-
-def test_penal_vort_multi_2d():
-    """
-    Penalization in 3D, obstacles = semi-cylinder
-    and a plate, fields=scalar and vector.
-    """
-    d2d = Discretization([Nx + 1, Ny + 1], [g, g])
-    topo, wref, vref = init_vort(d2d, 'penal_vort_2d_multi_sphere')
-    # Fields to penalize
-    vorti = Field(domain=topo.domain, formula=s2d, name='Vorti')
-    velo = Field(domain=topo.domain, formula=v2dw, name='Velo', is_vector=True)
-    hsphere = Porous(parent=topo.domain, source=HemiSphere,
-                     origin=xpos[:2], layers=[0.5, 1.1, 1.])
-    penal = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                              discretization=topo,
-                              obstacles={hsphere: [1, 10, 1e8]})
-    check_penal_vort(penal, wref, vref, vorti, velo)
-
-
-def test_penal_vort_multi_3d():
-    """
-    Penalization in 3D, obstacles = semi-cylinder
-    and a plate, fields=scalar and vector.
-    """
-    d3d = Discretization([Nx + 1, Ny + 1, Nz + 1], [g, g, g])
-    topo, wref, vref = init_vort(d3d, 'penal_vort_3d_multi_sphere')
-    # Fields to penalize
-    vorti = Field(domain=topo.domain, formula=v3d, name='Vorti',
-                  is_vector=True)
-    velo = Field(domain=topo.domain, formula=v3dw, name='Velo', is_vector=True)
-    hsphere = Porous(parent=topo.domain, source=HemiSphere,
-                     origin=xpos, layers=[0.5, 1.1, 1.])
-    penal = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                              discretization=topo,
-                              obstacles={hsphere: [1, 10, 1e8]})
-    check_penal_vort(penal, wref, vref, vorti, velo)
-
diff --git a/hysop/old/operator.old/tests/test_poisson.py b/hysop/old/operator.old/tests/test_poisson.py
deleted file mode 100755
index 6bd2718b8b49b5cf87c9407adbb3b75d955db9a7..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_poisson.py
+++ /dev/null
@@ -1,308 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import hysop as pp
-from hysop.operator.poisson import Poisson
-from hysop.operator.analytic import Analytic
-from hysop.operator.reprojection import Reprojection
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-from hysop.methods import SpaceDiscretization, \
-    GhostUpdate, Formulation
-import numpy as np
-from hysop.tools.numpywrappers import npw
-import math
-from hysop.domain.subsets import SubBox
-from hysop import testsenv
-
-
-pi = math.pi
-sin = np.sin
-cos = np.cos
-
-## Physical Domain description
-dim = 3
-LL = 2 * pi * npw.ones((dim))
-# formula to compute initial vorticity field
-coeff = 4 * pi ** 2 * (LL[1] ** 2 * LL[2] ** 2 + LL[0] ** 2 * LL[2] ** 2 +
-                       LL[0] ** 2 * LL[1] ** 2) / (LL[0] ** 2 * LL[1] ** 2
-                                                   * LL[2] ** 2)
-cc = 2 * pi / LL
-d3D = Discretization([33, 257, 257])
-d2D = Discretization([33, 33])
-uinf = 1.0
-
-
-def computeVort(res, x, y, z, t):
-    res[0][...] = coeff * sin(x * cc[0]) * sin(y * cc[1]) * cos(z * cc[2])
-    res[1][...] = coeff * cos(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])
-    res[2][...] = coeff * cos(x * cc[0]) * cos(y * cc[1]) * sin(z * cc[2])
-    return res
-
-def computePressure(res, x, y, z, t):
-    res[0][...] = -3.0 * sin(x * cc[0]) * cos(y * cc[1]) * cos(z * cc[2])
-    return res
-
-def computeRefPressure(res, x, y, z, t):
-    res[0][...] = sin(x * cc[0]) * cos(y * cc[1]) * cos(z * cc[2])
-    return res
-
-
-# ref. field
-def computeRef(res, x, y, z, t):
-    res[0][...] = -2. * pi / LL[1] * \
-        (cos(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
-        - 2. * pi / LL[2] * (cos(x * cc[0]) * sin(y * cc[1]) * cos(z * cc[2]))
-
-    res[1][...] = -2. * pi / LL[2] * \
-        (sin(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
-        + 2. * pi / LL[0] * (sin(x * cc[0]) * cos(y * cc[1]) * sin(z * cc[2]))
-
-    res[2][...] = -2. * pi / LL[0] * \
-        (sin(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
-        - 2. * pi / LL[1] * (sin(x * cc[0]) * cos(y * cc[1]) * cos(z * cc[2]))
-
-    return res
-
-
-# ref. field
-def computeRef_with_correction(res, x, y, z, t):
-    res[0][...] = -2. * pi / LL[1] * \
-        (cos(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
-        - 2. * pi / LL[2] * (cos(x * cc[0]) * sin(y * cc[1]) * cos(z * cc[2]))\
-        + uinf
-
-    res[1][...] = -2. * pi / LL[2] * \
-        (sin(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
-        + 2. * pi / LL[0] * (sin(x * cc[0]) * cos(y * cc[1]) * sin(z * cc[2]))
-
-    res[2][...] = -2. * pi / LL[0] * \
-        (sin(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
-        - 2. * pi / LL[1] * (sin(x * cc[0]) * cos(y * cc[1]) * cos(z * cc[2]))
-
-    return res
-
-
-def computeVort2D(res, x, y, t):
-    # todo ...
-    res[0][...] = 4 * pi ** 2 * (cos(x * cc[0]) * sin(y * cc[1])) * \
-        (1. / LL[0] ** 2 + 1. / LL[1] ** 2)
-    return res
-
-
-# ref. field
-def computeRef2D(res, x, y, t):
-    res[0][...] = 2. * pi / LL[1] * (cos(x * cc[0]) * cos(y * cc[1]))
-    res[1][...] = 2. * pi / LL[0] * (sin(x * cc[0]) * sin(y * cc[1]))
-
-    return res
-
-@testsenv.fftw_failed
-def test_Poisson_Pressure_3D():
-    dom = pp.Box(length=LL)
-
-    # Fields
-    ref = pp.Field(domain=dom, name='Ref')
-    pressure = pp.Field(domain=dom, formula=computePressure, name='Pressure')
-
-    # Definition of the Poisson operator
-    poisson = Poisson(pressure, pressure, discretization=d3D,
-                      method={SpaceDiscretization: 'fftw',
-                              GhostUpdate: True,
-                              Formulation: 'pressure'})
-
-    poisson.discretize()
-    poisson.setup()
-    topo = poisson.discrete_fields[pressure].topology
-    # Analytic operator to compute the reference field
-    refOp = Analytic(variables={ref: topo}, formula=computeRefPressure)
-    simu = Simulation(nb_iter=10)
-    refOp.discretize()
-    refOp.setup()
-    pressure.initialize(topo=topo)
-    poisson.apply(simu)
-    refOp.apply(simu)
-    assert np.allclose(ref.norm(topo), pressure.norm(topo))
-    refD = ref.discretize(topo)
-    prsD = pressure.discretize(topo)
-    assert np.allclose(prsD[0], refD[0])
-    poisson.finalize()
-
-
-@testsenv.fftw_failed
-def test_Poisson3D():
-    dom = pp.Box(length=LL)
-
-    # Fields
-    velocity = pp.Field(domain=dom, is_vector=True, name='Velocity')
-    vorticity = pp.Field(domain=dom, formula=computeVort,
-                         name='Vorticity', is_vector=True)
-
-    # Definition of the Poisson operator
-    poisson = Poisson(velocity, vorticity, discretization=d3D)
-
-    poisson.discretize()
-    poisson.setup()
-    topo = poisson.discrete_fields[vorticity].topology
-    # Analytic operator to compute the reference field
-    ref = pp.Field(domain=dom, name='reference', is_vector=True)
-    refOp = Analytic(variables={ref: topo}, formula=computeRef)
-    simu = Simulation(nb_iter=10)
-    refOp.discretize()
-    refOp.setup()
-    vorticity.initialize(topo=topo)
-    poisson.apply(simu)
-    refOp.apply(simu)
-    assert np.allclose(ref.norm(topo), velocity.norm(topo))
-    refD = ref.discretize(topo)
-    vd = velocity.discretize(topo)
-    for i in range(dom.dimension):
-        assert np.allclose(vd[i], refD[i])
-    poisson.finalize()
-
-
-@testsenv.fftw_failed
-def test_Poisson2D():
-    dom = pp.Box(length=[2. * pi, 2. * pi], origin=[0., 0.])
-
-    # Fields
-    velocity = pp.Field(domain=dom, is_vector=True, name='Velocity')
-    vorticity = pp.Field(domain=dom, formula=computeVort2D, name='Vorticity')
-
-    # Definition of the Poisson operator
-    poisson = Poisson(velocity, vorticity, discretization=d2D)
-
-    poisson.discretize()
-    poisson.setup()
-    topo = poisson.discrete_fields[vorticity].topology
-    # Analytic operator to compute the reference field
-    ref = pp.Field(domain=dom, name='reference', is_vector=True)
-    refOp = Analytic(variables={ref: topo}, formula=computeRef2D)
-    simu = Simulation(nb_iter=10)
-    refOp.discretize()
-    refOp.setup()
-    vorticity.initialize(topo=topo)
-    poisson.apply(simu)
-    refOp.apply(simu)
-
-    assert np.allclose(ref.norm(topo), velocity.norm(topo))
-    refD = ref.discretize(topo)
-    vd = velocity.discretize(topo)
-
-    assert np.allclose(ref.norm(topo), velocity.norm(topo))
-    for i in range(dom.dimension):
-        assert np.allclose(vd[i], refD[i])
-    poisson.finalize()
-
-
-@testsenv.fftw_failed
-def test_Poisson3D_correction():
-    dom = pp.Box(length=LL)
-
-    # Fields
-    velocity = pp.Field(domain=dom, is_vector=True, name='Velocity')
-    vorticity = pp.Field(domain=dom, formula=computeVort,
-                         name='Vorticity', is_vector=True)
-
-    # Definition of the Poisson operator
-    ref_rate = npw.zeros(3)
-    ref_rate[0] = uinf * LL[1] * LL[2]
-    rate = pp.VariableParameter(data=ref_rate)
-    poisson = Poisson(velocity, vorticity, discretization=d3D, flowrate=rate)
-
-    poisson.discretize()
-    poisson.setup()
-    topo = poisson.discrete_fields[vorticity].topology
-    # Analytic operator to compute the reference field
-    ref = pp.Field(domain=dom, name='reference', is_vector=True)
-    refOp = Analytic(variables={ref: topo}, formula=computeRef_with_correction)
-    simu = Simulation(nb_iter=10)
-    refOp.discretize()
-    refOp.setup()
-    vorticity.initialize(topo=topo)
-
-    poisson.apply(simu)
-    refOp.apply(simu)
-    refD = ref.discretize(topo)
-    vd = velocity.discretize(topo)
-    surf = SubBox(parent=dom, origin=dom.origin,
-                  length=[0., LL[1], LL[2]])
-    surf.discretize(topo)
-    assert np.allclose(ref.norm(topo), velocity.norm(topo))
-    assert np.allclose(ref.norm(topo), velocity.norm(topo))
-    for i in range(dom.dimension):
-        assert np.allclose(vd[i], refD[i])
-    poisson.finalize()
-
-
-@testsenv.fftw_failed
-def test_Poisson3D_projection_1():
-    dom = pp.Box(length=LL)
-
-    # Fields
-    velocity = pp.Field(domain=dom, is_vector=True, name='Velocity')
-    vorticity = pp.Field(domain=dom, formula=computeVort,
-                         name='Vorticity', is_vector=True)
-
-    # Definition of the Poisson operator
-    poisson = Poisson(velocity, vorticity, discretization=d3D, projection=4)
-
-    poisson.discretize()
-    poisson.setup()
-    topo = poisson.discrete_fields[vorticity].topology
-    # Analytic operator to compute the reference field
-    ref = pp.Field(domain=dom, name='reference', is_vector=True)
-    refOp = Analytic(variables={ref: topo}, formula=computeRef)
-    simu = Simulation(nb_iter=10)
-    refOp.discretize()
-    refOp.setup()
-    vorticity.initialize(topo=topo)
-    poisson.apply(simu)
-    refOp.apply(simu)
-    assert np.allclose(ref.norm(topo), velocity.norm(topo))
-    refD = ref.discretize(topo)
-    vd = velocity.discretize(topo)
-
-    assert np.allclose(ref.norm(topo), velocity.norm(topo))
-    for i in range(dom.dimension):
-        assert np.allclose(vd[i], refD[i])
-
-    poisson.finalize()
-
-
-@testsenv.fftw_failed
-def test_Poisson3D_projection_2():
-    dom = pp.Box(length=LL)
-
-    # Fields
-    velocity = pp.Field(domain=dom, is_vector=True, name='Velocity')
-    vorticity = pp.Field(domain=dom, formula=computeVort,
-                         name='Vorticity', is_vector=True)
-    d3dG = Discretization([33, 33, 33], [2, 2, 2])
-    # Definition of the Poisson operator
-    proj = Reprojection(vorticity, threshold=0.05, frequency=4,
-                        discretization=d3dG, io_params=True)
-
-    poisson = Poisson(velocity, vorticity, discretization=d3D,
-                      projection=proj)
-    proj.discretize()
-    poisson.discretize()
-    poisson.setup()
-    proj.setup()
-    topo = poisson.discrete_fields[vorticity].topology
-    # Analytic operator to compute the reference field
-    ref = pp.Field(domain=dom, name='reference', is_vector=True)
-    refOp = Analytic(variables={ref: topo}, formula=computeRef)
-    simu = Simulation(nb_iter=10)
-    refOp.discretize()
-    refOp.setup()
-    vorticity.initialize(topo=topo)
-    poisson.apply(simu)
-    refOp.apply(simu)
-    assert np.allclose(ref.norm(topo), velocity.norm(topo))
-    refD = ref.discretize(topo)
-    vd = velocity.discretize(topo)
-
-    assert np.allclose(ref.norm(topo), velocity.norm(topo))
-    for i in range(dom.dimension):
-        assert np.allclose(vd[i], refD[i])
-    poisson.finalize()
diff --git a/hysop/old/operator.old/tests/test_redistribute.py b/hysop/old/operator.old/tests/test_redistribute.py
deleted file mode 100755
index 2bd74cab090138a56e3d10636ddb2288b95aa7de..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_redistribute.py
+++ /dev/null
@@ -1,675 +0,0 @@
-from hysop.operator.redistribute import RedistributeIntra,\
-    RedistributeInter, RedistributeOverlap
-from hysop.tools.parameters import Discretization, MPIParams
-from hysop import testsenv
-import hysop as pp
-import numpy as np
-import math
-from hysop import Simulation
-from hysop.operator.analytic import Analytic
-from hysop.core.mpi import main_comm, main_size
-from hysop.core.mpi.tests.utils import create_inter_topos, CPU, GPU, OTHER,\
-    create_multitask_context
-from hysop.fields.tests.func_for_tests import v3d, v2d, v3dbis
-from hysop.operators import EnergyEnstrophy
-from hysop.core.mpi.tests.test_bridge import create_subtopos
-sin = np.sin
-pi = math.pi
-
-
-dim3 = 3
-# Global discretization, no ghosts
-r_ng = Discretization([33, ] * dim3)
-# Global discretization, with ghosts
-r_wg = Discretization([33, ] * dim3, [0, 1, 2])
-# Another global discretization, for failed tests.
-r_failed = Discretization([27, ] * dim3, [0, 1, 2])
-Lx = pi
-dom = pp.Box(length=[Lx] * dim3, origin=[0.] * dim3)
-
-
-def init_3d(domain):
-    # Domain and variables
-    fields = {}
-    fields['velocity'] = pp.Field(domain=domain, name='velocity',
-                                  is_vector=True, formula=v3d)
-    fields['vorticity'] = pp.Field(domain=domain, name='vorticity',
-                                   is_vector=True, formula=v3d)
-    fields['scal'] = pp.Field(domain=domain, name='scal')
-    simu = Simulation(nb_iter=3)
-    simu.initialize()
-    return fields, simu
-
-
-def test_distribute_intra_1():
-    """
-    redistribute data, intra comm, from one
-    topology to another
-    """
-    fields, simu = init_3d(dom)
-    velocity = fields['velocity']
-    vorticity = fields['vorticity']
-    # create the topologies
-    # 1D topo, no ghost
-    plane_topo = dom.create_topology(discretization=r_ng,
-                                     cutdir=(False, False, True))
-    # 3D topo, ghosts
-    default_topo = dom.create_topology(discretization=r_wg)
-    # Redistribute operator
-    red = RedistributeIntra(source=plane_topo, target=default_topo,
-                            variables=[velocity])
-    red.setup()
-    # Initialize fields only on plane_topo
-    velocity.discretize(topo=plane_topo)
-    velocity.initialize(topo=plane_topo)
-    # Initialize vorticity on default_topo, used as reference
-    wd = vorticity.discretize(topo=default_topo)
-    vorticity.initialize(topo=default_topo)
-    vnorm = velocity.norm(plane_topo)
-    wnorm = vorticity.norm(default_topo)
-    assert velocity in red.variables
-    assert (vnorm > 0).all()
-    assert np.allclose(velocity.norm(default_topo), 0)
-    assert np.allclose(wnorm, vnorm)
-    red.apply()
-    red.wait()
-    vd_2 = velocity.discretize(default_topo)
-    ind_ng = default_topo.mesh.compute_index
-    for d in xrange(velocity.nb_components):
-        assert np.allclose(vd_2.data[d][ind_ng], wd.data[d][ind_ng])
-
-
-@testsenv.hysop_failed
-def test_distribute_intra_fail_1():
-    """
-    redistribute data, intra comm, from one
-    topology to another, no variables -> must fail
-    """
-    # create the topologies
-    # 1D topo, no ghost
-    plane_topo = dom.create_topology(discretization=r_ng,
-                                     cutdir=(False, False, True))
-    # 3D topo, ghosts
-    default_topo = dom.create_topology(discretization=r_wg)
-    # Redistribute operator
-    RedistributeIntra(source=plane_topo, target=default_topo)
-
-
-def test_distribute_intra_2():
-    """
-    redistribute data, intra comm, from a
-    topology to an operator
-    """
-    fields, simu = init_3d(dom)
-    velocity = fields['velocity']
-    vorticity = fields['vorticity']
-    # create the topologies
-    # 1D topo, no ghost
-    plane_topo = dom.create_topology(discretization=r_ng,
-                                     cutdir=(False, False, True))
-
-    # Create an operator, that will work on a 3D topo.
-    op = Analytic(variables={velocity: r_wg})
-    op.discretize()
-    op.setup()
-
-    # Redistribute operator
-    red = RedistributeIntra(source=plane_topo, target=op,
-                            variables=[velocity])
-    red.setup()
-    # Initialize fields only on plane_topo
-    velocity.discretize(topo=plane_topo)
-    velocity.initialize(topo=plane_topo)
-
-    # Initialize vorticity on default_topo, used as reference
-    default_topo = op.variables[velocity]
-    vorticity.discretize(topo=default_topo)
-    vorticity.initialize(topo=default_topo)
-    vnorm = velocity.norm(plane_topo)
-    wnorm = vorticity.norm(default_topo)
-    assert velocity in red.variables
-    assert (vnorm > 0).all()
-    assert np.allclose(velocity.norm(default_topo), 0)
-    assert np.allclose(wnorm, vnorm)
-    red.apply()
-    red.wait()
-    vd_2 = velocity.discretize(default_topo)
-    ind_ng = default_topo.mesh.compute_index
-    wd = vorticity.discretize(default_topo)
-    for d in xrange(velocity.nb_components):
-        assert np.allclose(vd_2.data[d][ind_ng], wd.data[d][ind_ng])
-
-
-def test_distribute_intra_3():
-    """
-    redistribute data, intra comm, from an operator
-    to a topology.
-    """
-    fields, simu = init_3d(dom)
-    velocity = fields['velocity']
-    vorticity = fields['vorticity']
-
-    # Create an operator, that will work on a 3D topo.
-    op = Analytic(variables={velocity: r_ng})
-    op.discretize()
-    op.setup()
-    # Initialize velocity on the topo of op
-    op.apply(simu)
-    source_topo = op.variables[velocity]
-    vnorm = velocity.norm(source_topo)
-    assert (vnorm > 0).all()
-
-    # create the topologies
-    # 1D topo, ghosts
-    target_topo = dom.create_topology(discretization=r_wg,
-                                      cutdir=(False, False, True))
-
-    # # Redistribute operator
-    red = RedistributeIntra(source=op, target=target_topo,
-                            variables=[velocity])
-    red.setup()
-    assert velocity in red.variables
-    assert np.allclose(velocity.norm(target_topo), 0)
-
-    # Initialize vorticity on target_topo, used as reference
-    wd = vorticity.discretize(topo=target_topo)
-    vorticity.initialize(time=simu.time, topo=target_topo)
-    wnorm = vorticity.norm(target_topo)
-
-    assert np.allclose(wnorm, vnorm)
-    red.apply()
-    red.wait()
-    vd_2 = velocity.discretize(target_topo)
-    ind_ng = target_topo.mesh.compute_index
-
-    for d in xrange(velocity.nb_components):
-        assert np.allclose(vd_2.data[d][ind_ng], wd.data[d][ind_ng])
-
-
-def test_distribute_intra_4():
-    """
-    redistribute data, intra comm, between two operators.
-    """
-    fields, simu = init_3d(dom)
-    velocity = fields['velocity']
-    vorticity = fields['vorticity']
-
-    # Create an operator, that will work on a 3D topo.
-    source = Analytic(variables={velocity: r_ng})
-    source.discretize()
-    source.setup()
-    # Initialize velocity on the topo of op
-    source.apply(simu)
-    source_topo = source.variables[velocity]
-    vnorm = velocity.norm(source_topo)
-    assert (vnorm > 0).all()
-
-    # create the topologies
-    # 1D topo, ghosts
-    target_topo = dom.create_topology(discretization=r_wg,
-                                      cutdir=(False, False, True))
-    # Create an operator from this topo
-    target = Analytic(variables={velocity: target_topo}, formula=v3dbis)
-    target.discretize()
-    target.setup()
-    target.apply(simu)
-    assert not np.allclose(velocity.norm(target_topo), vnorm)
-
-    # Redistribute operator
-    red = RedistributeIntra(source=source, target=target,
-                            variables=[velocity])
-    red.setup()
-    assert velocity in red.variables
-
-    # Initialize vorticity on target_topo, used as reference
-    wd = vorticity.discretize(topo=target_topo)
-    vorticity.initialize(time=simu.time, topo=target_topo)
-
-    assert np.allclose(vorticity.norm(target_topo), vnorm)
-    red.apply()
-    red.wait()
-    vd_2 = velocity.discretize(target_topo)
-    ind_ng = target_topo.mesh.compute_index
-
-    for d in xrange(velocity.nb_components):
-        assert np.allclose(vd_2.data[d][ind_ng], wd.data[d][ind_ng])
-
-
-def test_distribute_intra_5():
-    """
-    redistribute data, intra comm, between two operators, several variables
-    """
-    fields, simu = init_3d(dom)
-    velocity = fields['velocity']
-    vorticity = fields['vorticity']
-
-    # Create an operator, that will work on a 3D topo.
-    source = Analytic(variables={velocity: r_ng, vorticity: r_ng})
-    source.discretize()
-    source.setup()
-    # Initialize velocity on the topo of op
-    source.apply(simu)
-    source_topo = source.variables[velocity]
-    vnorm = velocity.norm(source_topo)
-    wnorm = vorticity.norm(source_topo)
-
-    assert (vnorm > 0).all() and (wnorm > 0).all()
-
-    # 1D topo, ghosts
-    target_topo = dom.create_topology(discretization=r_wg,
-                                      cutdir=(False, False, True))
-    # Create an operator from this topo
-    target = Analytic(variables={velocity: target_topo,
-                                 vorticity: target_topo},
-                      formula=v3dbis)
-    target.discretize()
-    target.setup()
-    target.apply(simu)
-    assert not np.allclose(velocity.norm(target_topo), vnorm)
-    assert not np.allclose(vorticity.norm(target_topo), wnorm)
-
-    # Redistribute operator
-    red = RedistributeIntra(source=source, target=target)
-    red.setup()
-    assert velocity in red.variables
-    assert vorticity in red.variables
-
-    # Initialize a field of reference
-    ref = pp.Field(domain=dom, name='ref', is_vector=True, formula=v3d)
-    rd = ref.discretize(topo=target_topo)
-    ref.initialize(time=simu.time, topo=target_topo)
-    red.apply()
-    red.wait()
-    vd = velocity.discretize(target_topo)
-    wd = vorticity.discretize(target_topo)
-    ind_ng = target_topo.mesh.compute_index
-
-    for d in xrange(velocity.nb_components):
-        assert np.allclose(vd.data[d][ind_ng], rd.data[d][ind_ng])
-        assert np.allclose(wd.data[d][ind_ng], rd.data[d][ind_ng])
-
-
-@testsenv.hysop_failed
-def test_distribute_intra_fail_4():
-    """
-    redistribute data, intra comm, between two operators.
-    """
-    fields, simu = init_3d(dom)
-    velocity = fields['velocity']
-
-    # Create an operator, that will work on a 3D topo.
-    source = Analytic(variables={velocity: r_ng})
-    source.discretize()
-    source.setup()
-    # Initialize velocity on the topo of op
-    source.apply(simu)
-
-    # create the topologies
-    # 1D topo, ghosts
-    target_topo = dom.create_topology(discretization=r_failed,
-                                      cutdir=(False, False, True))
-    # Create an operator from this topo
-    target = Analytic(variables={velocity: target_topo}, formula=v3dbis)
-    target.discretize()
-    target.setup()
-    target.apply(simu)
-
-    # Redistribute operator
-    red = RedistributeIntra(source=source, target=target,
-                            variables=[velocity])
-
-    red.setup()
-
-
-@testsenv.hysop_failed
-def test_distribute_fail_5():
-    """
-    Try the pathologic case where source and target do not apply on
-    the same group of process but when groups overlap.
-    Must failed with standard RedistributeIntra.
-    """
-    if main_size < 4:
-        return
-    fields, simu = init_3d(dom)
-    velocity = fields['velocity']
-    source_topo, target_topo = create_subtopos(dom, r_ng, r_wg)
-    # It's important to set mpi_params : main_comm will be used
-    # as communicator of reference in red. It works
-    # since it handles all the processes of source and all target
-    # of target.
-    mpi_ref = MPIParams(comm=main_comm)
-    red = RedistributeIntra(source=source_topo, target=target_topo,
-                            mpi_params=mpi_ref, variables=[velocity])
-    red.setup()
-
-
-def test_distribute_overlap():
-    """
-    Try the pathologic case where source and target do not apply on
-    the same group of process but when groups overlap.
-    """
-    if main_size < 4:
-        return
-    fields, simu = init_3d(dom)
-    velocity = fields['velocity']
-    vorticity = fields['vorticity']
-    source_topo, target_topo = create_subtopos(dom, r_ng, r_wg)
-    # It's important to set mpi_params : main_comm will be used
-    # as communicator of reference in red. It works
-    # since it handles all the processes of source and all target
-    # of target.
-    mpi_ref = MPIParams(comm=main_comm)
-    red = RedistributeOverlap(source=source_topo, target=target_topo,
-                              mpi_params=mpi_ref, variables=[velocity])
-    red.setup()
-
-    if source_topo is not None:
-        # Initialize fields only on source_topo
-        velocity.discretize(topo=source_topo)
-        velocity.initialize(topo=source_topo)
-        # Initialize vorticity on default_topo, used as reference
-        vnorm = velocity.norm(source_topo)
-        assert (vnorm > 0).all()
-    if target_topo is not None:
-        wd = vorticity.discretize(topo=target_topo)
-        vorticity.initialize(topo=target_topo)
-        wnorm = vorticity.norm(target_topo)
-        assert np.allclose(velocity.norm(target_topo), 0)
-        if source_topo is not None:
-            assert np.allclose(wnorm, vnorm)
-    assert velocity in red.variables
-
-    red.apply()
-    red.wait()
-    if target_topo is not None:
-        vd_2 = velocity.discretize(target_topo)
-        ind_ng = target_topo.mesh.compute_index
-        for d in xrange(velocity.nb_components):
-            assert np.allclose(vd_2.data[d][ind_ng], wd.data[d][ind_ng])
-
-
-def test_distribute_inter():
-    """
-    2 tasks, redistribute topo to topo
-    """
-    if main_size < 4:
-        return
-    dom_tasks, topo1, topo2 = create_inter_topos(3, r_ng, r_wg)
-    fields, simu = init_3d(dom_tasks)
-    velocity = fields['velocity']
-    # Inititialize velocity on CPU task
-    vd = velocity.discretize(topo=topo1)
-    if dom_tasks.is_on_task(CPU):
-        velocity.initialize(time=simu.time, topo=topo1)
-
-    # A field to compute a reference solution, initialized with an analytic
-    # operator, on both tasks.
-    reference = fields['vorticity']
-    op = Analytic(variables={reference: r_ng})
-    op.discretize()
-    op.setup()
-    op.apply(simu)
-    wnorm = reference.norm(topo1)
-    vnorm = velocity.norm(topo1)
-    if dom_tasks.is_on_task(CPU):
-        assert (vnorm > 0).all()
-        assert np.allclose(vnorm, wnorm)
-    elif dom_tasks.is_on_task(GPU):
-        assert (wnorm > 0).all()
-        assert np.allclose(vnorm, 0)
-
-    # Redistribute from topo1 on CPU to topo1 on GPU
-    red = RedistributeInter(source=topo1, target=topo1, parent=main_comm,
-                            variables=[velocity],
-                            source_id=CPU, target_id=GPU)
-    red.setup()
-    red.apply(simu)
-    red.wait()
-    wd = reference.discretize(topo1)
-    if dom.is_on_task(CPU):
-        assert (vnorm > 0).all()
-        assert np.allclose(vnorm, wnorm)
-    elif dom.is_on_task(GPU):
-        assert (wnorm > 0).all()
-        assert np.allclose(vnorm, wnorm)
-        for d in xrange(dom.dimension):
-            assert np.allclose(wd.data[d], vd.data[d])
-        print wnorm
-
-
-def test_distribute_inter_2():
-    """
-    2 tasks, redistribute topo to topo
-    """
-    if main_size < 4:
-        return
-    proc_tasks = [CPU, ] * main_size
-    if main_size > 2:
-        proc_tasks[-1] = GPU
-        proc_tasks[0] = GPU
-    domtasks = pp.Box(proc_tasks=proc_tasks)
-    fields, simu = init_3d(domtasks)
-    velocity = fields['velocity']
-    # Inititialize velocity on GPU task
-    if domtasks.is_on_task(GPU):
-        topo_GPU = domtasks.create_topology(r_ng)
-        vd = velocity.discretize(topo=topo_GPU)
-        velocity.initialize(time=simu.time, topo=topo_GPU)
-        vnorm = velocity.norm(topo_GPU)
-        assert (vnorm > 0).all()
-        topo_CPU = None
-
-    elif domtasks.is_on_task(CPU):
-        # A field to compute a reference solution, initialized with an analytic
-        # operator, on both tasks.
-        reference = fields['vorticity']
-        op = Analytic(variables={reference: r_ng})
-        op.discretize()
-        op.setup()
-        op.apply(simu)
-        topo_GPU = None
-        topo_CPU = op.variables[reference]
-
-    # Redistribute from GPU to CPU
-    red = RedistributeInter(source=topo_GPU, target=topo_CPU, parent=main_comm,
-                            variables=[velocity],
-                            source_id=GPU, target_id=CPU)
-    red.setup()
-    red.apply(simu)
-    red.wait()
-    if domtasks.is_on_task(CPU):
-        vd = velocity.discretize(topo=topo_CPU)
-        wd = reference.discretize(topo=topo_CPU)
-        vnorm = velocity.norm(topo_CPU)
-        ind = topo_CPU.mesh.compute_index
-        wnorm = reference.norm(topo_CPU)
-        assert np.allclose(vnorm, wnorm)
-        for d in xrange(dom.dimension):
-            assert np.allclose(wd.data[d][ind], vd.data[d][ind])
-
-
-def test_distribute_inter_3():
-    """
-    2 tasks, redistribute topo to topo
-    """
-    if main_size < 4:
-        return
-    dom_tasks, topo1, topo2 = create_inter_topos(3, r_ng, r_wg)
-    fields, simu = init_3d(dom_tasks)
-    velocity = fields['velocity']
-    # Inititialize velocity on GPU task
-    if dom_tasks.is_on_task(GPU):
-        vd = velocity.discretize(topo=topo1)
-        velocity.initialize(time=simu.time, topo=topo1)
-
-    # A field to compute a reference solution, initialized with an analytic
-    # operator, on both tasks.
-    reference = fields['vorticity']
-    op = Analytic(variables={reference: topo2})
-    op.discretize()
-    op.setup()
-    op.apply(simu)
-    wnorm = reference.norm(topo2)
-    if dom_tasks.is_on_task(GPU):
-        vnorm = velocity.norm(topo1)
-        assert (vnorm > 0).all()
-        assert np.allclose(vnorm, wnorm)
-    # Redistribute from topo1 on CPU to topo1 on GPU
-    red = RedistributeInter(source=topo1, target=topo2, parent=main_comm,
-                            variables=[velocity],
-                            source_id=GPU, target_id=CPU)
-    red.setup()
-    red.apply(simu)
-    red.wait()
-    if dom_tasks.is_on_task(CPU):
-        wd = reference.discretize(topo=topo2)
-        vd = velocity.discretize(topo=topo2)
-        vnorm = velocity.norm(topo2)
-        ind = topo2.mesh.compute_index
-        assert np.allclose(vnorm, wnorm)
-        for d in xrange(dom.dimension):
-            assert np.allclose(wd.data[d][ind], vd.data[d][ind])
-
-
-def test_distribute_inter_4():
-    """
-    3 tasks, redistribute topo to topo
-    """
-    if main_size < 4:
-        return
-    dom_tasks, topo = create_multitask_context(3, r_ng)
-    fields, simu = init_3d(dom_tasks)
-    velocity = fields['velocity']
-    # Inititialize velocity on GPU task
-    if dom_tasks.is_on_task(GPU):
-        vd = velocity.discretize(topo=topo)
-        velocity.initialize(time=simu.time, topo=topo)
-
-    # A field to compute a reference solution, initialized with an analytic
-    # operator, on both tasks.
-    reference = fields['vorticity']
-    op = Analytic(variables={reference: topo})
-    op.discretize()
-    op.setup()
-    op.apply(simu)
-
-    # Redistribute from topo on CPU to topo on GPU, ignoring OTHER
-    if not dom_tasks.is_on_task(OTHER):
-        red = RedistributeInter(source=topo, target=topo, parent=main_comm,
-                                variables=[velocity],
-                                source_id=GPU, target_id=CPU)
-        red.setup()
-        red.apply(simu)
-        red.wait()
-
-    if dom_tasks.is_on_task(CPU):
-        wd = reference.discretize(topo=topo)
-        vd = velocity.discretize(topo=topo)
-        ind = topo.mesh.compute_index
-        for d in xrange(dom.dimension):
-            assert np.allclose(wd.data[d][ind], vd.data[d][ind])
-
-    if dom_tasks.is_on_task(OTHER):
-        assert topo not in velocity.discrete_fields
-
-
-def test_distribute_inter_5():
-    """
-    2 tasks, redistribute op to op
-    """
-    if main_size < 4:
-        return
-    proc_tasks = [CPU, ] * main_size
-    proc_tasks[-1] = GPU
-    proc_tasks[0] = GPU
-    domtasks = pp.Box(proc_tasks=proc_tasks)
-
-    fields, simu = init_3d(domtasks)
-    velocity = fields['velocity']
-    reference = fields['vorticity']
-    if domtasks.is_on_task(CPU):
-        # initialize velocity on CPU
-        op = Analytic(variables={velocity: r_ng})
-        op.discretize()
-        op.setup()
-        op.apply(simu)
-    elif domtasks.is_on_task(GPU):
-        # initialize reference on CPU
-        op_init = Analytic(variables={reference: r_ng})
-        op_init.discretize()
-        op_init.setup()
-        op_init.apply(simu)
-        # An empty operator for velocity
-        op = EnergyEnstrophy(
-            velocity=velocity, vorticity=reference,
-            discretization=r_ng)
-        op.discretize()
-        op.setup()
-
-    # Redistribute from CPU to GPU
-    red = RedistributeInter(source=op, target=op, parent=main_comm,
-                            variables=[velocity],
-                            source_id=CPU, target_id=GPU)
-    red.setup()
-    red.apply(simu)
-    red.wait()
-
-    if domtasks.is_on_task(GPU):
-        toporef = op.variables[reference]
-        vd = velocity.discretize(toporef)
-        wd = reference.discretize(toporef)
-        for d in xrange(domtasks.dimension):
-            assert np.allclose(wd.data[d], vd.data[d])
-
-
-def test_distribute_inter_2d():
-    """
-    2 tasks, redistribute op to op, 2D domain
-    """
-    if main_size < 4:
-        return
-    proc_tasks = [CPU, ] * main_size
-    proc_tasks[-1] = GPU
-    proc_tasks[0] = GPU
-    domtasks = pp.Box(dimension=2, proc_tasks=proc_tasks)
-    velocity = pp.Field(domain=domtasks, name='velocity',
-                        is_vector=True, formula=v2d)
-    vort = pp.Field(domain=domtasks, name='vort')
-    simu = Simulation(nb_iter=3)
-    reference = pp.Field(domain=domtasks, name='ref',
-                         is_vector=True, formula=v2d)
-    r_2d = Discretization([33, ] * 2)
-    if domtasks.is_on_task(CPU):
-        # initialize velocity on CPU
-        op = Analytic(variables={velocity: r_2d})
-        op.discretize()
-        op.setup()
-        op.apply(simu)
-    elif domtasks.is_on_task(GPU):
-        # initialize reference on CPU
-        op_init = Analytic(variables={reference: r_2d})
-        op_init.discretize()
-        op_init.setup()
-        op_init.apply(simu)
-        # An empty operator for velocity
-        op = EnergyEnstrophy(velocity=velocity,
-                             vorticity=vort,
-                             discretization=r_2d)
-        op.discretize()
-        op.setup()
-
-    # Redistribute from CPU to GPU
-    red = RedistributeInter(source=op, target=op, parent=main_comm,
-                            variables=[velocity],
-                            source_id=CPU, target_id=GPU)
-    red.setup()
-    red.apply(simu)
-    red.wait()
-
-    if domtasks.is_on_task(GPU):
-        toporef = op.variables[velocity]
-        vd = velocity.discretize(toporef)
-        wd = velocity.discretize(toporef)
-        for d in xrange(2):
-            assert np.allclose(wd.data[d], vd.data[d])
-
diff --git a/hysop/old/operator.old/tests/test_reprojection.py b/hysop/old/operator.old/tests/test_reprojection.py
deleted file mode 100644
index c98ae316cc91460cf0b313fd0885871c317c6f24..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_reprojection.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import hysop as pp
-from hysop.operator.reprojection import Reprojection
-from hysop.problem.simulation import Simulation
-import numpy as np
-from hysop.tools.parameters import Discretization
-pi = np.pi
-cos = np.cos
-sin = np.sin
-# Upstream flow velocity
-uinf = 1.0
-tol = 1e-12
-
-
-## Function to compute TG velocity
-def computeVel(res, x, y, z, t):
-    res[0][...] = sin(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-## Function to compute reference vorticity
-def computeVort(res, x, y, z, t):
-    res[0][...] = - cos(x) * sin(y) * sin(z)
-    res[1][...] = - sin(x) * cos(y) * sin(z)
-    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-    return res
-
-## Global resolution
-d3D = Discretization([33, 33, 33], [2, 2, 2])
-
-
-def test_reprojection():
-    # Domain
-    box = pp.Box(length=[2.0 * pi, pi, pi])
-    # Vector Fields
-    vorti = pp.Field(domain=box, formula=computeVort,
-                     name='Vorticity', is_vector=True)
-
-    # Usual CartesianTopology topology definition
-    topo = box.create_topology(dim=box.dimension, discretization=d3D)
-
-    op = Reprojection(vorti, threshold=0.05, frequency=4,
-                      discretization=topo, io_params=True)
-    op.discretize()
-    op.setup()
-    # === Simulation setup ===
-    simu = Simulation(nb_iter=8)
-    # init fields
-    vorti.initialize(topo=topo)
-    # Apply correction
-    simu.initialize()
-    while not simu.is_over:
-        op.apply(simu)
-        simu.advance()
diff --git a/hysop/old/operator.old/tests/test_spectrum.py b/hysop/old/operator.old/tests/test_spectrum.py
deleted file mode 100755
index d2e866eb535198d15b011416ed0cb02ebe68a4dc..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_spectrum.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""Test fftw spectrum computation
-"""
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.spectrum import Spectrum
-from hysop.tools.parameters import Discretization
-from hysop.problem.simulation import Simulation
-from hysop import testsenv
-import numpy as np
-pi = np.pi
-sin = np.sin
-cos = np.cos
-
-
-def computeScal(res, x, y, z, t):
-    """test function, scalar input, 3D"""
-    res[0][...] = z * sin((3 * pi * x) * (2 * pi * y))
-    return res
-
-
-@testsenv.fftw_failed
-def test_spectrum():
-    """build/apply spectrum op
-    """
-    dom = Box()
-    field = Field(domain=dom, name='Field',
-                  is_vector=False, formula=computeScal)
-    d3D = Discretization([257, 257, 257])
-
-    op = Spectrum(field, discretization=d3D)
-    op.discretize()
-    op.setup()
-    topo = op.discrete_fields[field].topology
-    field.initialize(topo=topo)
-    simu = Simulation(nb_iter=1)
-    simu.initialize()
-    op.apply(simu)
diff --git a/hysop/old/operator.old/tests/test_stretching.py b/hysop/old/operator.old/tests/test_stretching.py
deleted file mode 100755
index 02bc6cfd8101e5ce979ad3f8fd9dcd32cdce08d8..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_stretching.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Tests for stretching component resolution
-"""
-from hysop import Field, Box
-import numpy as np
-from hysop.operator.stretching import Stretching, StretchingLinearized, GradUW
-from hysop.problem.simulation import Simulation
-from hysop.methods import TimeIntegrator, Formulation,\
-    SpaceDiscretization
-from hysop.methods import RK3, FDC4
-from hysop.tools.parameters import Discretization
-from hysop.tools.misc import WorkSpaceTools
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-
-# 3d discretisation with size 2 ghosts layer
-d3d = Discretization([65, ] * 3, [2, ] * 3)
-
-
-def compute_vel(res, x, y, z, t):
-    """3d vector field
-    """
-    amodul = cos(pi * 1. / 3.)
-    pix = pi * x
-    piy = pi * y
-    piz = pi * z
-    pi2x = 2. * pix
-    pi2y = 2. * piy
-    pi2z = 2. * piz
-    res[0][...] = 2. * sin(pix) * sin(pix) \
-        * sin(pi2y) * sin(pi2z) * amodul
-    res[1][...] = - sin(pi2x) * sin(piy) \
-        * sin(piy) * sin(pi2z) * amodul
-    res[2][...] = - sin(pi2x) * sin(piz) \
-        * sin(piz) * sin(pi2y) * amodul
-    return res
-
-
-def compute_vort(res, x, y, z, t):
-    """3d vector field
-    """
-    amodul = cos(pi * 1. / 3.)
-    pix = pi * x
-    piy = pi * y
-    piz = pi * z
-    pi2x = 2. * pix
-    pi2y = 2. * piy
-    pi2z = 2. * piz
-    res[0][...] = 2. * pi * sin(pi2x) * amodul *\
-        (- cos(pi2y) * sin(piz) * sin(piz) +
-         sin(piy) * sin(piy) * cos(pi2z))
-
-    res[1][...] = 2. * pi * sin(pi2y) * amodul *\
-        (2. * cos(pi2z) * sin(pix) * sin(pix) +
-         sin(piz) * sin(piz) * cos(pi2x))
-
-    res[2][...] = -2. * pi * sin(pi2z) * amodul *\
-        (cos(pi2x) * sin(piy) * sin(piy) +
-         sin(pix) * sin(pix) * cos(pi2y))
-
-    return res
-
-
-def init(method=None, work=False):
-    """Build, init, setup for operator
-    """
-    # Domain
-    box = Box()
-
-    # Fields
-    velo = Field(
-        domain=box, formula=compute_vel,
-        name='Velocity', is_vector=True)
-    vorti = Field(
-        domain=box, formula=compute_vort,
-        name='Vorticity', is_vector=True)
-    # Stretching operator
-    op = Stretching(velo, vorti, discretization=d3d, method=method)
-    op.discretize()
-    rwork = None
-    if work:
-        # Find required size for internal work vector, if needed
-        wkp = op.get_work_properties()['rwork']
-        # Allocate work space
-        rwork = WorkSpaceTools.check_work_array(len(wkp), wkp[0])
-    topo = op.discrete_fields[velo].topology
-    op.setup(rwork=rwork)
-    simulation = Simulation(start=0, end=1., time_step=0.05)
-    # initialize fields and simu
-    velo.initialize(topo=topo)
-    vorti.initialize(topo=topo)
-    simulation.initialize()
-    return op, simulation
-
-
-def test_stretching():
-    """Default case
-    """
-    #method = {TimeIntegrator: RK3, Formulation: Conservative,
-    #          SpaceDiscretization: FDC4}
-    op, simu = init()
-    op.apply(simu)
-
-
-def test_stretching_external_work():
-    """Default setup but with user-defined work arrays
-    """
-    op, simu = init(work=True)
-    op.apply(simu)
-
-
-def test_stretching_graduw():
-    """GradUW formulation
-    """
-    method = {TimeIntegrator: RK3, Formulation: GradUW,
-              SpaceDiscretization: FDC4}
-    op, simu = init(method=method)
-    op.apply(simu)
-
-
-# def test_compare_stretching():
-#     """Run conservative and graduv form,
-#     check if results are close enough
-#     """
-#     op1, simu = init()
-#     op1.apply(simu)
-#     method = {TimeIntegrator: RK3, Formulation: GradUW,
-#               SpaceDiscretization: FDC4}
-#     op2, simu = init(method=method)
-#     op2.apply(simu)
-#     w1 = op1.discrete_op.vorticity
-#     w2 = op2.discrete_op.vorticity
-#     h = w1.topology.mesh.space_step.max()
-#     for d in xrange(op1.domain.dim):
-#         print np.abs((w1[d] - w2[d])).max()
-#         assert np.allclose(w1[d], w2[d], atol=h ** 2)
-
-
-def init_linearized(method=None, work=False):
-    """Build, init, setup for linearized stretching
-    """
-    # Domain
-    box = Box()
-
-    # Base fields:
-    velo_bf = Field(
-        domain=box, formula=compute_vel,
-        name='VelocityBF', is_vector=True)
-    vorti_bf = Field(
-        domain=box, formula=compute_vort,
-        name='VorticityBF', is_vector=True)
-    # Perturbations
-    velo = Field(
-        domain=box, formula=compute_vel,
-        name='Velocity', is_vector=True)
-    vorti = Field(
-        domain=box, formula=compute_vort,
-        name='Vorticity', is_vector=True)
-
-    # Usual stretching operator
-    stretch1 = Stretching(velo, vorti, discretization=d3d)
-    # Linearized stretching
-    stretch2 = StretchingLinearized(velocity=velo, vorticity=vorti,
-                                    velocity_BF=velo_bf,
-                                    vorticity_BF=vorti_bf,
-                                    discretization=d3d, method=method)
-    stretch1.discretize()
-    stretch2.discretize()
-    rwork = None
-    if work:
-        wkp = stretch2.get_work_properties()['rwork']
-        rwork = WorkSpaceTools.check_work_array(len(wkp), wkp[0])
-    topo = stretch1.discrete_fields[velo].topology
-    # initialize all fields
-    velo.initialize(topo=topo)
-    vorti.initialize(topo=topo)
-    velo_bf.initialize(topo=topo)
-    vorti_bf.initialize(topo=topo)
-    stretch1.setup()
-    stretch2.setup(rwork=rwork)
-    simulation = Simulation(start=0, end=1., time_step=0.05)
-    simulation.initialize()
-    return stretch1, stretch2, simulation
-
-
-def test_stretching_linearized():
-    """Linearized formulation for stretching
-    """
-    str1, str2, simu = init_linearized()
-    str1.apply(simu)
-    str2.apply(simu)
-
-
-def test_stretching_external_work_graduv():
-    """User-defined work arrays for GradUW formulation
-    """
-    method = {TimeIntegrator: RK3, Formulation: GradUW,
-              SpaceDiscretization: FDC4}
-    op, simu = init(work=True, method=method)
-    op.apply(simu)
diff --git a/hysop/old/operator.old/tests/test_velocity_correction.py b/hysop/old/operator.old/tests/test_velocity_correction.py
deleted file mode 100755
index 88af1533d5f8aebee0b2336814718c59c8968869..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/tests/test_velocity_correction.py
+++ /dev/null
@@ -1,91 +0,0 @@
-"""Test correction operator
-"""
-import hysop as pp
-from hysop.operator.velocity_correction import VelocityCorrection
-from hysop.problem.simulation import Simulation
-import numpy as np
-from hysop.tools.numpywrappers import npw
-from hysop.tools.parameters import Discretization
-from hysop.fields.tests.func_for_tests import v_TG, v_TG_2d, w_TG, w_TG_2d
-pi = np.pi
-cos = np.cos
-sin = np.sin
-# Upstream flow velocity
-uinf = 1.0
-tol = 1e-12
-
-
-# Global resolution
-g = 0
-d2D = Discretization([33, 33], [g, g])
-d3D = Discretization([33, 33, 33], [g, g, g])
-
-
-def test_velocity_correction_3d():
-    """Apply velocity correction, 3d domain
-    """
-    # Domain
-    box = pp.Box(length=[2.0 * pi, pi, pi])
-    # Vector Fields
-    velo = pp.Field(domain=box, formula=v_TG,
-                    name='Velocity', is_vector=True)
-    vorti = pp.Field(domain=box, formula=w_TG,
-                     name='Vorticity', is_vector=True)
-
-    # Usual CartesianTopology topology definition
-    topo = box.create_topology(discretization=d3D)
-
-    ref_rate = npw.zeros(3)
-    ref_rate[0] = uinf * box.length[1] * box.length[2]
-    rate = pp.VariableParameter(data=ref_rate)
-    op = VelocityCorrection(velo, vorti, req_flowrate=rate,
-                            discretization=topo, io_params={})
-    op.discretize()
-    op.setup()
-    # === Simulation setup ===
-    simu = Simulation(start=0.0, end=5., time_step=0.005, max_iter=1000000)
-    # init fields
-    velo.initialize(topo=topo)
-    vorti.initialize(topo=topo)
-    # Apply correction
-    op.apply(simu)
-    # check new flowrate values
-    sref = op.cb.surf[0]
-    flowrate = sref.integrate_field_allc(velo, topo)
-    assert (np.abs(flowrate - ref_rate) < tol).all()
-
-
-def test_velocity_correction_2d():
-    """Apply velocity correction, 2d domain
-    """
-    # Domain
-    box = pp.Box(length=[2.0 * pi, pi], origin=[0., 0.])
-
-    # Vector Fields
-    velo = pp.Field(domain=box, formula=v_TG_2d,
-                    name='Velocity', is_vector=True)
-    vorti = pp.Field(domain=box, formula=w_TG_2d,
-                     name='Vorticity', is_vector=False)
-
-    # Usual CartesianTopology topology definition
-    topo = box.create_topology(discretization=d2D)
-
-    ref_rate = npw.zeros(2)
-    ref_rate[0] = uinf * box.length[1]
-    rate = pp.VariableParameter(data=ref_rate)
-    op = VelocityCorrection(velo, vorti, req_flowrate=rate,
-                            discretization=topo)
-    op.discretize()
-    op.setup()
-    # === Simulation setup ===
-    simu = Simulation(start=0.0, end=5., time_step=0.005, max_iter=1000000)
-    # init fields
-    velo.initialize(topo=topo)
-    vorti.initialize(topo=topo)
-
-    # Apply correction
-    op.apply(simu)
-    # check new flowrate values
-    sref = op.cb.surf[0]
-    flowrate = sref.integrate_field_allc(velo, topo)
-    assert (np.abs(flowrate - ref_rate) < tol).all()
diff --git a/hysop/old/operator.old/velocity_correction.py b/hysop/old/operator.old/velocity_correction.py
deleted file mode 100755
index 0225da032912e35175e048329dcb9a7a7a917326..0000000000000000000000000000000000000000
--- a/hysop/old/operator.old/velocity_correction.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Operator used to shift velocity value
- to fit with a required input flowrate.
-
-Check details in :ref:`velocity_correction` in HySoP user guide.
-
-
-"""
-from hysop.constants import debug
-from hysop.operator.discrete.velocity_correction import VelocityCorrection_D
-from hysop.operator.computational import Computational
-from hysop.domain.control_box import ControlBox
-from hysop.operator.continuous import opsetup
-
-
-class VelocityCorrection(Computational):
-    """The velocity field is corrected after solving the
-    Poisson equation. For more details about calculations,
-    see :ref:`velocity_correction` in HySoP user guide.
-    """
-
-    @debug
-    def __init__(self, velocity, vorticity, req_flowrate, **kwds):
-        """Update velocity field (solution of Poisson equation)
-        in order to prescribe proper mean flow and ensure
-        the desired inlet flowrate.
-
-        Parameters
-        ----------
-        velocity : :class:`~hysop.fields.continuous_field.Field`
-            in/out velocity continuous vector field.
-        vorticity : :class:`~hysop.fields.continuous_field.Field`
-            input vorticity vector field.
-        req_flowrate : a
-          :class:`~hysop.fields.variable_parameter.VariableParameter`
-            the desired inlet flowrate value
-        kwds : base class parameters
-        """
-        assert 'variables' not in kwds, 'variables parameter is useless.'
-        super(VelocityCorrection, self).__init__(
-            variables=[velocity, vorticity], **kwds)
-        # velocity field
-        self.velocity = velocity
-        # vorticity field
-        self.vorticity = vorticity
-        self.input = [self.velocity, self.vorticity]
-        self.output = [self.velocity]
-        # Expected value for the flow rate through input surface
-        self.req_flowrate = req_flowrate
-        dom = self.velocity.domain
-        # volume of control used to compute a reference for correction.
-        self.cb = ControlBox(origin=dom.origin, length=dom.length,
-                             parent=dom)
-
-    def discretize(self):
-        super(VelocityCorrection, self)._standard_discretize()
-        assert self._single_topo, 'Multi-resolution case is not allowed.'
-
-    @debug
-    @opsetup
-    def setup(self, rwork=None, iwork=None):
-        if not self._is_uptodate:
-            self.discrete_op =\
-                VelocityCorrection_D(self.discrete_fields[self.velocity],
-                                     self.discrete_fields[self.vorticity],
-                                     self.req_flowrate, self.cb, rwork=rwork,
-                                     iwork=iwork)
-            # Output setup
-            self._set_io('velocity_correction', (1, 2 + self.domain.dim))
-            self.discrete_op.set_writer(self._writer)
-            self._is_uptodate = True
-
-    def compute_correction(self):
-        """Compute the required correction for the current state
-        but do not apply it onto velocity.
-        """
-        self.discrete_op.compute_correction()
-
-    def get_work_properties(self):
-        return {'rwork': None, 'iwork': None}
diff --git a/hysop/old/problem.old/__init__.py b/hysop/old/problem.old/__init__.py
deleted file mode 100644
index b66f485685a7c201ef15684cadb78db5382c0d31..0000000000000000000000000000000000000000
--- a/hysop/old/problem.old/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-Everything concerning Problems.
-
-"""
diff --git a/hysop/old/problem.old/navier_stokes.py b/hysop/old/problem.old/navier_stokes.py
deleted file mode 100644
index 40ec1b5bcb749bae935559bd43079f3c49aa4bc0..0000000000000000000000000000000000000000
--- a/hysop/old/problem.old/navier_stokes.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
-@file navier_stokes.py
-"""
-from hysop.problem.problem import Problem
-from hysop.operator.analytic import Analytic
-from hysop.operator.advection import Advection
-from hysop.operator.stretching import Stretching
-from hysop.operator.poisson import Poisson
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.penalization import Penalization
-
-
-class NSProblem(Problem):
-    """
-    Navier Stokes problem description.
-    """
-    def __init__(self, operators, simulation,
-                 dumpFreq=100, name=None):
-
-        Problem.__init__(self, operators, simulation, dumpFreq, name)
-        for op in operators:
-            if isinstance(op, Advection):
-                self.advection = op
-            if isinstance(op, Stretching):
-                self.stretch = op
-            if isinstance(op, Diffusion):
-                self.diffusion = op
-            if isinstance(op, Poisson):
-                self.poisson = op
-            if isinstance(op, Penalization):
-                self.penal = op
-            if isinstance(op, Analytic):
-                self.velocity = op
diff --git a/hysop/old/problem.old/problem.py b/hysop/old/problem.old/problem.py
deleted file mode 100644
index 9ba0cbdcd08d614bdf6a860cd75c4bbd48c2dace..0000000000000000000000000000000000000000
--- a/hysop/old/problem.old/problem.py
+++ /dev/null
@@ -1,272 +0,0 @@
-"""Description of a problem, i.e. a sequence of operators
-"""
-from hysop.constants import debug
-import cPickle
-from hysop import __VERBOSE__, __GPU_ENABLED__
-from hysop.operator.redistribute import Redistribute, RedistributeIntra
-from hysop.tools.profiler import profile, Profiler
-from hysop.core.mpi import main_rank
-if __GPU_ENABLED__:
-    from hysop.backend.device.opencl.gpu_transfer import DataTransfer
-else:
-    DataTransfer = int
-
-
-class Problem(object):
-    """Problem representation.
-
-    A problem is a sequence of operators applied on a set of
-    continuous fields.
-    """
-
-    @debug
-    def __new__(cls, *args, **kw):
-        return object.__new__(cls, *args, **kw)
-
-    @debug
-    def __init__(self, operators, simulation,
-                 dump_freq=100, name=None):
-        """
-
-        Parameters
-        ----------
-        operators : list of :class:`~hysop.operator.continuous.Continuous`
-            sequence of operators used to define the problem
-        simulation : :class:`~hysop.problem.simulation.Simulation`
-            description of the time discretisation
-        dump_freq : real; optional
-            frequency of dump to file for the problem.
-            Set dump_freq = -1 for no dumps. Default = 100,
-            i.e. every 100 time steps.
-        name : string, optional
-            id for the problem
-        """
-        # Problem name
-        self.name = name
-        # Problem operators
-        self.operators = operators
-        # Computes time step and manage iterations
-        self.simulation = simulation
-
-        # all variables must be defined on the same domain.
-        vref = self.operators[0].variables.keys()[0]
-        self.domain = vref.domain
-        msg = "All variables must be defined on the same domain."
-        for op in self.operators:
-            for v in (v for v in op.variables if v is not vref):
-                print id(v.domain), id(self.domain)
-                print v.domain, self.domain
-                if self.domain is not v.domain:
-                    raise ValueError(msg)
-        # A list of variables that must be initialized before
-        # any call to op.apply()
-        self.input = []
-        # call to problem.dump frequency during apply.
-        if dump_freq >= 0:
-            # dump problem every self.dump_freq iter
-            self.dump_freq = dump_freq
-        else:
-            self.dump_freq = None
-
-        # Id for the problem. Used for dump file name.
-        if name is None:
-            self.name = 'HySoPPb'
-        else:
-            self.name = name
-        # Object to store computational times of lower level functions
-        self.profiler = Profiler(self, self.domain.task_comm)
-        # Default file name prefix for dump.
-        self.filename = str(self.name)
-        self._filedump = self.filename + '_rk_' + str(main_rank)
-
-        # Flag : true when operators for computation are up
-        # and when variables are initialized (i.e. after a call to pre_setup)
-        # Note : 3 categories of op : computation (stretching, poisson ...),
-        # and data distribution (Redistribute)
-        self._is_ready = False
-
-    @debug
-    @profile
-    def setup(self):
-        """Prepare operators (create topologies, allocate memories ...)
-        """
-        # Set up for 'computational' operators
-        if not self._is_ready:
-            self.pre_setup()
-        print "Fin setup op"
-        # for v in self.input:
-        #     v.initialize()
-
-        # other operators
-        for op in self.operators:
-            if isinstance(op, RedistributeIntra) or \
-               isinstance(op, DataTransfer):
-                op.setup()
-
-        for op in self.operators:
-            if isinstance(op, Redistribute):
-                op.setup()
-
-        if __VERBOSE__ and main_rank == 0:
-            print ("====")
-
-    def pre_setup(self):
-        """Discretization and setup for computational operators
-         and fields initialization
-        """
-        if self._is_ready:
-            return
-
-        for op in self.operators:
-            if not isinstance(op, Redistribute) and \
-               not isinstance(op, DataTransfer):
-                op.discretize()
-                op.setup()
-
-        if __VERBOSE__ and main_rank == 0:
-            print ("==== Variables initialization ====")
-
-        # Build the list of 'input' variables, that must be initialized.
-        # 'input' fields are variables that are not output of
-        # any previous operators in the operator sequence.
-        self.input = []
-
-        # First, all operators input vars are appended to input list
-        for op in self.operators:
-            for v in op.input:
-                if v not in self.input:
-                    self.input.append(v)
-
-        # Then starting from the last op, vars which are 'input' of
-        # an operator AND output of a previous op are removed
-        for op in self.operators[-1::-1]:
-            for v in self.input:
-                if v in op.output:
-                    self.input.remove(v)
-                if v in op.input:
-                    self.input.append(v)
-
-        self._is_ready = True
-
-    @debug
-    @profile
-    def solve(self):
-        """Apply all operators of the problem, for all simulation
-        time steps
-
-        * init simulation
-        * for each time step:
-            * apply all operators
-            * increment time step
-            * dump problem (for concerned values, depend on dump_freq)
-
-        """
-        # Init simulation
-        self.simulation.initialize()
-        if main_rank == 0:
-            print ("\n\n Start solving ...")
-        # Run simulation
-        while not self.simulation.is_over:
-            if main_rank == 0:
-                self.simulation.print_state()
-
-            for op in self.operators:
-                if __VERBOSE__:
-                    print (main_rank, op.name)
-                op.apply(self.simulation)
-            testdump = False
-            if self.dump_freq is not None:
-                testdump = \
-                    self.simulation.current_iteration % self.dump_freq is 0
-            self.simulation.advance()
-            if testdump:
-                self.dump()
-
-    @debug
-    def finalize(self):
-        """
-        Finalize method
-        """
-        if main_rank == 0:
-            print ("\n\n==== End ====")
-        for op in self.operators:
-            op.finalize()
-
-        var = []
-        for op in self.operators:
-            for v in op.variables:
-                if not v in var:
-                    var.append(v)
-        for v in var:
-            v.finalize()
-        self.profiler.summarize()
-        if main_rank == 0:
-            print ("===\n")
-
-    def get_profiling_info(self):
-        for op in self.operators:
-            self.profiler += op.profiler
-        for op in self.operators:
-            for v in op.variables:
-                self.profiler += v.profiler
-
-    def __str__(self):
-        """ToString method"""
-        s = "Problem based on\n"
-        s += str(self.domain.topologies)
-        s += "with following operators : \n"
-        for op in self.operators:
-            s += str(op)
-        return s
-
-    def dump(self, filename=None):
-        """Serialize some data of the problem to file
-        (only data required for a proper restart, namely fields in self.input
-        and simulation).
-
-        Parameters
-        ----------
-        filename : string
-            prefix for output file. Real name = filename_rk_N,
-        N being current process number. If None use default value from problem
-        parameters (self.filename)
-        """
-        if filename is not None:
-            self.filename = filename
-            self._filedump = filename + '_rk_' + str(main_rank)
-        db = open(self._filedump, 'wb')
-        cPickle.dump(self.simulation, db)
-        # TODO : review dump process using hdf files instead of pickle.
-        # for v in self.input:
-        #     v.hdf_dump(self.filename)
-
-    def restart(self, filename=None):
-        """
-        Load serialized data to restart from a previous state.
-        self.input variables and simulation are loaded.
-        @param  filename : prefix for downloaded file.
-        Real name = filename_rk_N, N being current process number.
-        If None use default value from problem
-        parameters (self.filename)
-        """
-        if filename is not None:
-            self.filename = filename
-            self._filedump = filename + '_rk_' + str(main_rank)
-        db = open(self._filedump, 'r')
-        self.simulation = cPickle.load(db)
-        self.simulation.reset()
-        for v in self.input:
-            print ("load ...", self.filename)
-            v.load(self.filename)
-
-        for op in self.operators:
-            if isinstance(op, Redistribute):
-                op.setup()
-
-    def setDumpFreq(self, freq):
-        """set rate of problem.dump call (every 'rate' iteration)
-        :param : the frequency of output. <0 or None means no output.
-        """
-        self.dump_freq = freq
-        if freq < 0:
-            self.dump_freq = None
diff --git a/hysop/old/problem.old/problem_tasks.py b/hysop/old/problem.old/problem_tasks.py
deleted file mode 100644
index 9eaf8b9c19b07366a18b925806f44f7b816539d3..0000000000000000000000000000000000000000
--- a/hysop/old/problem.old/problem_tasks.py
+++ /dev/null
@@ -1,187 +0,0 @@
-"""Extending problem description to handle tasks parallelism.
-Each operator owns a task id that define a process group that are sharing the
-same tasks.
-"""
-from hysop.constants import debug
-from hysop import __VERBOSE__
-from hysop.problem.problem import Problem
-from hysop.operator.redistribute import RedistributeInter, RedistributeIntra
-from hysop.operator.redistribute import Redistribute
-from hysop.backend.device.opencl.gpu_transfer import DataTransfer
-from hysop.tools.profiler import profile
-
-
-class ProblemTasks(Problem):
-    """
-    As in Problem, it contains several operators that apply
-    on variables. The operators are labeled by task_id that defines
-    a identifier of a task.
-    Tasks are subset of operators and are assigned to a subset of the MPI
-    process by means of the task_list parameter.
-    """
-    @debug
-    def __new__(cls, *args, **kw):
-        return object.__new__(cls, *args, **kw)
-
-    @debug
-    def __init__(self, operators, simulation, domain, tasks_list,
-                 dumpFreq=100, name=None, main_comm=None):
-        """
-        Creates the problem.
-        @param operators : list of operators.
-        @param simulation : a hysop.simulation.Simulation object
-        to describe simulation parameters.
-        @param tasks_list : list of task identifiers for each process rank
-        @param name : an id for the problem
-        @param dumpFreq : frequency of dump (i.e. saving to a file)
-        for the problem; set dumpFreq = -1 for no dumps. Default = 100.
-        @param main_comm : MPI communicator that contains all process
-        involved in this problem.
-
-        @remark : process number in communicator main_comm must equal the
-        length of tasks_list.
-        """
-        Problem.__init__(self, operators, simulation,
-                         domain=domain, dumpFreq=dumpFreq, name=name)
-        self.tasks_list = tasks_list
-        if main_comm is None:
-            from hysop.core.mpi import main_comm
-        self.main_comm = main_comm
-        self._main_rank = self.main_comm.Get_rank()
-        assert self.main_comm.Get_size() == len(self.tasks_list), \
-            "The given task list length (" + str(self.tasks_list) + ") " \
-            "does not match the communicator size" \
-            " ({0})".format(self.main_comm.Get_size())
-        self.my_task = self.tasks_list[self._main_rank]
-        self.operators_on_task = []
-
-    def pre_setup(self):
-        """
-        - Removes operators that not have the same task identifier
-        as the current process
-        - Keep the Redistribute_intercomm in both 'from' and 'to' task_id
-        - Partial setup : only for 'computational' operators
-        (i.e. excluding rendering, data distribution ...)
-        - Initialize variables.
-        """
-        if self._isReady:
-            pass
-
-        ## Remove operators with a tasks not handled by this process.
-        for op in self.operators:
-            if op.task_id() == self.my_task:
-                self.operators_on_task.append(op)
-
-        # Discretize and setup computational operators
-        for op in self.operators_on_task:
-            if not isinstance(op, Redistribute) and \
-               not isinstance(op, DataTransfer):
-                op.discretize()
-        for op in self.operators_on_task:
-            if not isinstance(op, Redistribute) and \
-               not isinstance(op, DataTransfer):
-                op.setup()
-
-        # Build variables list to initialize
-        # These are operators input variables that are not output of
-        # previous operators in the operator stack.
-        # Set the variables input topology as the the topology of the fist
-        # operator that uses this variable as input.
-        self.input = []
-        for op in self.operators_on_task:
-            for v in op.input:
-                if v not in self.input:
-                    self.input.append(v)
-        for op in self.operators_on_task[::-1]:
-            for v in op.output:
-                if v in self.input:
-                    if isinstance(op, RedistributeInter):
-                        if op._target_id == self.my_task:
-                            self.input.remove(v)
-                    else:
-                        self.input.remove(v)
-            for v in op.input:
-                if v not in self.input:
-                    if isinstance(op, RedistributeInter):
-                        if op._source_id == self.my_task:
-                            self.input.append(v)
-                    else:
-                        self.input.append(v)
-
-        self._isReady = True
-
-    @debug
-    @profile
-    def setup(self):
-        """
-        Prepare operators (create topologies, allocate memories ...)
-        """
-        # Set up for 'computational' operators
-        if not self._isReady:
-            self.pre_setup()
-
-        # for v in self.input:
-        #     v.initialize()
-
-        # other operators
-        for op in self.operators_on_task:
-            if isinstance(op, RedistributeIntra) or \
-               isinstance(op, DataTransfer):
-                op.setup()
-
-        for op in self.operators_on_task:
-            if isinstance(op, RedistributeInter):
-                op.setup()
-
-        if __VERBOSE__ and self._main_rank == 0:
-            print("====")
-
-    @debug
-    @profile
-    def solve(self):
-        """
-        Solve problem.
-
-        Performs simulations iterations by calling each
-        operators of the list until timer ends.\n
-        At end of time step, call an io step.\n
-        Displays timings at simulation end.
-        """
-        self.simulation.initialize()
-        self.main_comm.Barrier()
-        if self._main_rank == 0:
-            print ("\n\n Start solving ...")
-        while not self.simulation.is_over:
-            if self._main_rank == 0:
-                self.simulation.print_state()
-            for op in self.operators:
-                if op.task_id() == self.my_task:
-                    op.apply(self.simulation)
-                    if isinstance(op, RedistributeInter):
-                        if op._source_id == self.my_task:
-                            op.wait()
-            testdump = \
-                self.simulation.current_iteration % self.dumpFreq is 0
-            self.simulation.advance()
-            if self._doDump and testdump:
-                self.dump()
-
-    @debug
-    def finalize(self):
-        """
-        Finalize method
-        """
-        if self._main_rank == 0:
-            print ("\n\n==== End ====")
-        for op in self.operators_on_task:
-            op.finalize()
-        var = []
-        for op in self.operators_on_task:
-            for v in op.variables:
-                if v not in var:
-                    var.append(v)
-        for v in var:
-            v.finalize()
-        self.profiler.summarize()
-        if self._main_rank == 0:
-            print ("===\n")
diff --git a/hysop/old/problem.old/problem_with_GLRendering.py b/hysop/old/problem.old/problem_with_GLRendering.py
deleted file mode 100644
index c8d254dd3e783413e44ff85224e2aa6a930b7270..0000000000000000000000000000000000000000
--- a/hysop/old/problem.old/problem_with_GLRendering.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""Extend Problem description to handle real time rendering wit OpenGL.
-"""
-from hysop.constants import debug
-from hysop.core.mpi import main_rank
-from hysop.problem.problem import Problem
-
-
-class ProblemGLRender(Problem):
-    """
-    For the GPU real-time rendering (i.e. use of an
-    OpenGLRendering object), The loop over time-steps is passed to Qt4
-    """
-
-    @debug
-    def __init__(self, operators, simulation,
-                 dumpFreq=100, name=None):
-        """
-        Create a transport problem instance.
-
-        @param operators : list of operators.
-        @param simulation : a hysop.simulation.Simulation object
-        to describe simulation parameters.
-        @param name : an id for the problem
-        @param dumpFreq : frequency of dump (i.e. saving to a file)
-        for the problem; set dumpFreq = -1 for no dumps. Default = 100.
-        """
-        Problem.__init__(self, operators, simulation,
-                         dumpFreq=dumpFreq,
-                         name=name)
-        self.gl_renderer = None
-
-    @debug
-    def setup(self):
-        """
-        Prepare operators (create topologies, allocate memories ...)
-        """
-        Problem.setup(self)
-        for ope in self.operators:
-            try:
-                if ope.isGLRender:
-                    self.gl_renderer = ope
-                    ope.setMainLoop(self)
-            except AttributeError:
-                pass
-
-    @debug
-    def solve(self):
-        """
-        Solve problem.
-
-        Performs simulations iterations by calling each
-        operators of the list until timer ends.\n
-        At end of time step, call an io step.\n
-        Displays timings at simulation end.
-        """
-        if main_rank == 0:
-            print ("\n\n Start solving ...")
-        self.gl_renderer.startMainLoop()
diff --git a/hysop/old/problem.old/tests/__init__.py b/hysop/old/problem.old/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/hysop/old/problem.old/tests/test_simulation.py b/hysop/old/problem.old/tests/test_simulation.py
deleted file mode 100755
index 876f099349c37dad0f954a86667c4ac83f661205..0000000000000000000000000000000000000000
--- a/hysop/old/problem.old/tests/test_simulation.py
+++ /dev/null
@@ -1,139 +0,0 @@
-"""Tests simulation time loop parameters
-"""
-from hysop.problem.simulation import Simulation, eps
-from hysop.tools.io_utils import Writer, IOParams, IO
-from hysop.core.mpi import main_rank
-import numpy as np
-
-
-def run_simu(s):
-    i = 0
-    tref = s.start
-    while not s.is_over:
-        assert s.current_iteration == i
-        assert s.time == tref + s.time_step
-        tref = s.time
-        i += 1
-        s.advance()
-
-
-def test_simu_default():
-    s = Simulation(nb_iter=10)
-    assert s.time == 0.
-    assert s.end == 1.
-    assert np.allclose(s.time_step, 1. / 10., rtol=eps)
-    s.initialize()
-    assert np.allclose(s.time, s.time_step)
-
-    run_simu(s)
-
-    assert np.allclose(s.time, s.end)
-    assert s.current_iteration == 9
-    assert s.nb_iter == 10
-
-    s.finalize()
-    assert np.allclose(s.time, s.end)
-    assert s.current_iteration == -1
-    assert s.is_over
-
-    s.initialize()
-    assert s.current_iteration == 0
-    assert not s.is_over
-    assert np.allclose(s.time, 0.1)
-    assert s.time == 0.1
-
-
-def test_simu_2():
-    s = Simulation(time_step=0.1, max_iter=5)
-    assert s.time == 0.
-    assert s.end == 1.
-    assert np.allclose(s.time_step, 1. / 10.)
-    s.initialize()
-    assert np.allclose(s.time, s.time_step)
-
-    run_simu(s)
-
-    assert s.current_iteration == 4
-    assert np.allclose(s.time, s.start + 5 * s.time_step)
-
-
-def test_simu_adapt():
-    s = Simulation(time_step=0.1)
-    s.initialize()
-    i = 0
-    tref = s.start
-    while not s.is_over:
-        assert s.current_iteration == i
-        assert s.time == tref + s.time_step
-        tref = s.time
-        if s.current_iteration == 5:
-            s.update_time_step(s.time_step * 0.5)
-        s.advance()
-        i += 1
-
-    assert np.allclose(s.time, s.end)
-    assert np.allclose(s.time_step, 0.05)
-    assert s.current_iteration == 13
-
-
-def test_simu_incr():
-    simu = Simulation(start=0.0, end=1.0, nb_iter=10)
-    io_params = IOParams(filename='temp_test', frequency=2,
-                         fileformat=IO.ASCII)
-    wr = Writer(io_params)
-    leader = io_params.io_leader
-    if main_rank == leader:
-        assert wr.do_write(simu.current_iteration)
-    else:
-        assert not wr.do_write(simu.current_iteration)
-
-    simu.initialize()
-
-    assert not wr.do_write(simu.current_iteration)
-
-    count = 1
-    while not simu.is_over:
-        if count % 2 == 0 and main_rank == leader:
-            assert wr.do_write(simu.current_iteration)
-        else:
-            assert not wr.do_write(simu.current_iteration)
-        simu.advance()
-        count += 1
-    assert simu.current_iteration == 9
-    simu.finalize()
-
-    if main_rank == leader:
-        assert wr.do_write(simu.current_iteration)
-    else:
-        assert not wr.do_write(simu.current_iteration)
-
-
-def test_simu_incr2():
-    simu = Simulation(start=0.0, end=1.0, nb_iter=10)
-    io_params = IOParams(filename='temp_test', frequency=3,
-                         fileformat=IO.ASCII)
-    leader = io_params.io_leader
-    wr = Writer(io_params)
-    if main_rank == leader:
-        assert wr.do_write(simu.current_iteration)
-    else:
-        assert not wr.do_write(simu.current_iteration)
-    simu.update_time_step(0.10000000001)
-    simu.initialize()
-
-    assert not wr.do_write(simu.current_iteration)
-
-    count = 1
-    while not simu.is_over:
-        if count % 3 == 0 and main_rank == leader:
-            assert wr.do_write(simu.current_iteration)
-        else:
-            assert not wr.do_write(simu.current_iteration)
-        simu.advance()
-        count += 1
-    assert simu.current_iteration == 9
-    simu.finalize()
-    if main_rank == leader:
-        assert wr.do_write(simu.current_iteration)
-    else:
-        assert not wr.do_write(simu.current_iteration)
diff --git a/hysop/old/problem.old/tests/test_transport.py b/hysop/old/problem.old/tests/test_transport.py
deleted file mode 100644
index 31fdadd99fa2a4dbe103c7051a44dba8dde657f7..0000000000000000000000000000000000000000
--- a/hysop/old/problem.old/tests/test_transport.py
+++ /dev/null
@@ -1,136 +0,0 @@
-"""
-Testing transport problem.
-"""
-import numpy as np
-from hysop.tools.numpywrappers import npw
-from math import sqrt, pi, cos
-from hysop.domain.box import Box
-from hysop.fields.continuous_field import Field
-from hysop.operator.advection import Advection
-from hysop.problem.transport import TransportProblem
-from hysop.problem.simulation import Simulation
-from hysop.tools.parameters import Discretization
-
-
-def cosinus_product_2D(x, y, t):
-    return cos(2. * pi * x) * cos(pi * y)
-
-
-def cosinus_product_3D(x, y, z, t):
-    return cos(2. * pi * x) * cos(pi * y) * cos(4. * pi * z)
-
-
-def gaussian_scalar_3D(x, y, z, t):
-    r = sqrt(x * x + y * y + z * z)
-    if r < 1:
-        return (1. - r * r) ** 6
-    else:
-        return 0.
-
-
-def rotating_velocity_3D(x, y, z, t):
-    r = sqrt(x * x + y * y + z * z)
-    c = cos(3. * pi * r / 2.)
-    return -c * y, c * x, c * x
-
-
-def gaussian_scalar_2D(x, y, t):
-    r = sqrt(x * x + y * y)
-    if r < 1:
-        return (1. - r * r) ** 6
-    else:
-        return 0.
-
-
-def rotating_velocity_2D(x, y, t):
-    r = sqrt(x * x + y * y)
-    c = cos(3. * pi * r / 2.)
-    return -c * y, c * x
-
-
-def assertion(dim, boxLength, boxMin, nbElem, finalTime, time_step,
-              s, v, rtol=1e-05, atol=1e-08):
-    box = Box(length=boxLength, origin=boxMin)
-    print "domain init ...", id(box)
-    scal = Field(domain=box, formula=s, vectorize_formula=True, name='Scalar')
-    velo = Field(domain=box, formula=v, vectorize_formula=True,
-                 name='Velocity', is_vector=True)
-    advec = Advection(velo, scal, discretization=Discretization(nbElem))
-    simu = Simulation(start=0.0, end=finalTime,
-                      time_step=time_step, max_iter=1)
-    pb = TransportProblem([advec], simu)
-    pb.setup()
-    initial_scalar = npw.copy(scal.discrete_fields.values()[0].data[0])
-    pb.solve()
-    return np.allclose(initial_scalar, scal.discrete_fields.values()[0].data[0],
-                       rtol, atol)
-
-
-def test_nullVelocity_2D():
-    dim = 2
-    nb = 33
-    boxLength = [1., 1.]
-    boxMin = [0., 0.]
-    nbElem = [nb, nb]
-    time_step = 0.01
-    finalTime = time_step
-    assert assertion(dim, boxLength, boxMin,
-                     nbElem, finalTime, time_step,
-                     lambda x, y, t: np.random.random(),
-                     lambda x, y, t: (0., 0.))
-
-
-def test_nullVelocity_3D():
-    dim = 3
-    nb = 17
-    boxLength = [1., 1., 1.]
-    boxMin = [0., 0., 0.]
-    nbElem = [nb, nb, nb]
-    time_step = 0.01
-    finalTime = time_step
-    assert assertion(dim, boxLength, boxMin,
-                     nbElem, finalTime, time_step,
-                     lambda x, y, z, t: np.random.random(),
-                     lambda x, y, z, t: (0., 0., 0.))
-
-
-def test_gaussian_2D():
-    dim = 2
-    nb = 33
-    boxLength = [2., 2.]
-    boxMin = [-1., -1.]
-    nbElem = [nb, nb]
-    time_step = 0.001
-    finalTime = time_step
-    assert assertion(dim, boxLength, boxMin,
-                     nbElem, finalTime, time_step,
-                     gaussian_scalar_2D, rotating_velocity_2D,
-                     rtol=1e-04, atol=1e-05)
-
-
-def test_cosinus_translation_2D():
-    dim = 2
-    nb = 33
-    boxLength = [2., 2.]
-    boxMin = [-1., -1.]
-    nbElem = [nb, nb]
-    time_step = 1.
-    finalTime = 1.
-    assert assertion(dim, boxLength, boxMin,
-                     nbElem, finalTime, time_step,
-                     cosinus_product_2D,
-                     lambda x, y, t: (1., 2.))
-
-
-def test_cosinus_translation_3D():
-    dim = 3
-    nb = 17
-    boxLength = [2., 2., 2.]
-    boxMin = [-1., -1., -1.]
-    nbElem = [nb, nb, nb]
-    time_step = 1.
-    finalTime = time_step
-    assert assertion(dim, boxLength, boxMin,
-                     nbElem, finalTime, time_step,
-                     cosinus_product_3D,
-                     lambda x, y, z, t: (1., 2., 0.5))
diff --git a/hysop/old/problem.old/transport.py b/hysop/old/problem.old/transport.py
deleted file mode 100644
index 7544a6adf99ae67fa0a8c20a85d151f4e54cabc5..0000000000000000000000000000000000000000
--- a/hysop/old/problem.old/transport.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Transport problem
-
-todo : proper description
-"""
-from hysop.problem.problem import Problem
-from hysop.operator.advection import Advection
-from hysop.operator.analytic import Analytic
-
-
-class TransportProblem(Problem):
-    """
-    Transport problem description.
-    """
-    def __init__(self, operators, simulation,
-                 dump_freq=100, name=None):
-        super(TransportProblem, self).__init__(
-            operators, simulation,
-            dump_freq=dump_freq, name="TransportProblem")
-        self.advection, self.velocity = None, None
-        for op in self.operators:
-            if isinstance(op, Advection):
-                self.advection = op
-            if isinstance(op, Analytic):
-                self.velocity = op
-        if self.advection is None:
-            raise ValueError("Transport problem with no Advection operator")
diff --git a/hysop/operator/adapt_timestep.py b/hysop/operator/adapt_timestep.py
index b547e65382a45d6e81a66643352508259ae367b2..b09ce9cfa7ab2d78a31f338fb857994e7c35b8c3 100755
--- a/hysop/operator/adapt_timestep.py
+++ b/hysop/operator/adapt_timestep.py
@@ -2,21 +2,27 @@
 Update time-step, depending on the flow field.
 See :ref:`adaptive time_step` for details.
 """
+import numpy as np
 
 from abc import ABCMeta, abstractmethod
 from hysop.constants import HYSOP_REAL, StretchingCriteria, AdvectionCriteria
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.decorators import debug
-from hysop.tools.numpywrappers import npw
 from hysop.core.graph.node_generator import ComputationalGraphNodeGenerator
 from hysop.core.graph.computational_operator import ComputationalGraphOperator
 from hysop.core.graph.graph import op_apply
 from hysop.fields.continuous_field import Field
 from hysop.parameters import ScalarParameter, TensorParameter
 from hysop.core.mpi import MPI
+from hysop.backend.host.host_operator import HostOperatorBase
 
-class TimestepCriteria(ComputationalGraphOperator):
-    __metaclass__ = ABCMeta
+class TimestepCriteria(HostOperatorBase, metaclass=ABCMeta):
+
+    @debug
+    def __new__(cls, parameter, input_params, output_params,
+            dt_coeff=None, min_dt=None, max_dt=None, **kwds):
+        return super(TimestepCriteria, cls).__new__(cls,
+                input_params=input_params, output_params=output_params, **kwds)
 
     @debug
     def __init__(self, parameter, input_params, output_params,
@@ -58,7 +64,9 @@ class TimestepCriteria(ComputationalGraphOperator):
         self.min_dt   = 0.0 if (min_dt   is None) else min_dt
         self.max_dt   = 1e8 if (max_dt   is None) else max_dt
         self.dt_coeff = 1.0 if (dt_coeff is None) else dt_coeff
-        self.dt       = parameter
+
+        dt = parameter
+        self.dt = dt
 
         # Collect values from all MPI process
         if self.mpi_params.size == 1:
@@ -66,8 +74,8 @@ class TimestepCriteria(ComputationalGraphOperator):
             self._collect_max = lambda e: e
         else:
             comm = self.mpi_params.comm
-            self._sendbuff = npw.zeros((1, ))
-            self._recvbuff = npw.zeros((1, ))
+            self._sendbuff = np.zeros((1, ), dtype=dt.dtype)
+            self._recvbuff = np.zeros((1, ), dtype=dt.dtype)
             def _collect_max(val):
                 self._sendbuff[0] = val
                 comm.Allreduce(self._sendbuff, self._recvbuff, op=MPI.MAX)
@@ -83,8 +91,8 @@ class TimestepCriteria(ComputationalGraphOperator):
     def apply(self, **kwds):
         dt = self.compute_criteria(**kwds)
         dt *= self.dt_coeff
-        dt = self._collect_max(npw.maximum(dt, self.min_dt))
-        dt = self._collect_min(npw.minimum(dt, self.max_dt))
+        dt = self._collect_max(np.maximum(dt, self.min_dt))
+        dt = self._collect_min(np.minimum(dt, self.max_dt))
         assert (dt > 0.0), 'negative or zero timestep encountered.'
         self.dt.set_value(dt)
 
@@ -99,6 +107,14 @@ class TimestepCriteria(ComputationalGraphOperator):
 
 class ConstantTimestepCriteria(TimestepCriteria):
 
+    @debug
+    def __new__(cls, cst, parameter, Finf,
+                    name=None, pretty_name=None, **kwds):
+        return super(ConstantTimestepCriteria, cls).__new__(cls,
+                name=name, pretty_name=pretty_name,
+                input_params=None, output_params=None,
+                parameter=parameter, **kwds)
+
     @debug
     def __init__(self, cst, parameter, Finf,
                     name=None, pretty_name=None, **kwds):
@@ -121,10 +137,10 @@ class ConstantTimestepCriteria(TimestepCriteria):
         kwds: dict
             Base class arguments.
         """
-        if isinstance(cst, (int, long)):
+        if isinstance(cst, int):
             cst = float(cst)
         assert (cst > 0.0), 'negative cst factor.'
-        check_instance(cst, (float, npw.ndarray, list, tuple))
+        check_instance(cst, (float, np.ndarray, list, tuple))
         check_instance(Finf, (ScalarParameter, TensorParameter))
         if isinstance(Finf, ScalarParameter):
             assert isinstance(cst, float)
@@ -132,10 +148,10 @@ class ConstantTimestepCriteria(TimestepCriteria):
         else:
             is_scalar = False
             if isinstance(cst, float):
-                cst = npw.full(shape=Finf.shape, dtype=Finf.dtype, fill_value=cst)
+                cst = np.full(shape=Finf.shape, dtype=Finf.dtype, fill_value=cst)
             if isinstance(cst, (list, tuple)):
                 assert Finf.ndim == 1
-                cst = npw.asarray(cst)
+                cst = np.asarray(cst)
             msg='Shape mismatch between parameter {} and cst {}.'
             msg=msg.format(Finf.shape, cst.shape)
             assert Finf.shape == cst.shape, msg
@@ -157,19 +173,30 @@ class ConstantTimestepCriteria(TimestepCriteria):
         if self.is_scalar:
             assert Finf >= 0
             if (Finf == 0):
-                return npw.inf
+                return np.inf
             else:
                 return cst/Finf
         else:
             assert Finf.min() >= 0
             mask = (Finf!=0)
-            dt = npw.full_like(cst, fill_value=npw.inf)
+            dt = np.full_like(cst, fill_value=np.inf)
             dt[mask] = cst[mask] / Finf[mask]
             return dt.min()
 
 
 class CflTimestepCriteria(TimestepCriteria):
 
+    @debug
+    def __new__(cls, cfl, parameter,
+            Finf=None, Fmin=None, Fmax=None,
+            dx=None,
+            name=None, pretty_name=None,
+            relative_velocities=None, **kwds):
+        return super(CflTimestepCriteria, cls).__new__(cls,
+                name=name, pretty_name=pretty_name,
+                input_params=None, output_params=None,
+                parameter=parameter, **kwds)
+
     @debug
     def __init__(self, cfl, parameter,
             Finf=None, Fmin=None, Fmax=None,
@@ -243,7 +270,7 @@ class CflTimestepCriteria(TimestepCriteria):
 
         rv = ()
         for Vr in relative_velocities:
-            Vr = npw.asarray(Vr, dtype=dtype)
+            Vr = np.asarray(Vr, dtype=dtype)
             assert Vr.shape == shape
             rv += (Vr,)
         relative_velocities = rv
@@ -281,15 +308,15 @@ class CflTimestepCriteria(TimestepCriteria):
         assert len(dx) == Fmin.size == Fmax.size
         assert len(self.relative_velocities)>=1
 
-        dt = npw.inf
+        dt = np.inf
         for Vr in self.relative_velocities:
             Vmin = Fmin - Vr
             Vmax = Fmax - Vr
-            Vinf = npw.maximum(npw.abs(Vmin), npw.abs(Vmax))
-            if npw.all(npw.divide(Vinf, dx)==0):
-                cdt = cfl*npw.inf
+            Vinf = np.maximum(np.abs(Vmin), np.abs(Vmax))
+            if np.all(np.divide(Vinf, dx)==0):
+                cdt = cfl*np.inf
             else:
-                cdt = cfl / npw.max(npw.divide(Vinf, dx))
+                cdt = cfl / np.max(np.divide(Vinf, dx))
             dt = min(dt, cdt)
         return dt
 
@@ -304,6 +331,15 @@ class CflTimestepCriteria(TimestepCriteria):
 
 class AdvectionTimestepCriteria(TimestepCriteria):
 
+    @debug
+    def __new__(cls, lcfl, parameter, criteria,
+                    Finf=None, gradFinf=None,
+                    name=None, pretty_name=None, **kwds):
+        return super(AdvectionTimestepCriteria, cls).__new__(cls,
+                name=name, pretty_name=pretty_name,
+                input_params=None, output_params=None,
+                parameter=parameter, **kwds)
+
     @debug
     def __init__(self, lcfl, parameter, criteria,
                     Finf=None, gradFinf=None,
@@ -370,19 +406,19 @@ class AdvectionTimestepCriteria(TimestepCriteria):
         lcfl = self.lcfl
         if (criteria is AdvectionCriteria.W_INF):
             Finf = self.Finf()
-            if npw.max(Finf)==0:
-                return lcfl*npw.inf
+            if np.max(Finf)==0:
+                return lcfl*np.inf
             else:
-                return lcfl / npw.max(Finf)
+                return lcfl / np.max(Finf)
         elif (criteria is AdvectionCriteria.GRAD_U):
             gradFinf = self.gradFinf()
             if (gradFinf.ndim == 2):
-                gradFinf = npw.diag(gradFinf) #extract diagonal
-            return lcfl / npw.max(gradFinf)
+                gradFinf = np.diag(gradFinf) #extract diagonal
+            return lcfl / np.max(gradFinf)
         elif (criteria is AdvectionCriteria.DEFORMATION):
             gradFinf = self.gradFinf()
             gradFinf = (gradFinf + gradFinf.T)/2.0
-            return lcfl / npw.max( gradFinf.sum(axis=0) )
+            return lcfl / np.max( gradFinf.sum(axis=0) )
         else:
             msg='Unsupported stretching criteria {}.'.format(criteria)
             raise RuntimeError(msg)
@@ -390,6 +426,16 @@ class AdvectionTimestepCriteria(TimestepCriteria):
 
 class StretchingTimestepCriteria(TimestepCriteria):
 
+    @debug
+    def __new__(cls, gradFinf, parameter,
+                    cst=1.0, criteria=StretchingCriteria.GRAD_U,
+                    name=None, pretty_name=None,
+                    **kwds):
+        return super(StretchingTimestepCriteria, cls).__new__(cls,
+                name=name, pretty_name=pretty_name,
+                input_params=None, output_params=None,
+                parameter=parameter, **kwds)
+
     @debug
     def __init__(self, gradFinf, parameter,
                     cst=1.0, criteria=StretchingCriteria.GRAD_U,
@@ -402,7 +448,7 @@ class StretchingTimestepCriteria(TimestepCriteria):
 
         where |dFi/dXj| = |gradF|_inf_ij
 
-        ie. dt = cst / npw.max( gradFinf.sum(axis=1) )
+        ie. dt = cst / np.max( gradFinf.sum(axis=1) )
 
         Parameters
         ----------
@@ -439,13 +485,22 @@ class StretchingTimestepCriteria(TimestepCriteria):
         criteria = self.criteria
         if (criteria is StretchingCriteria.GRAD_U):
             gradFinf = self.gradFinf()
-            return self.cst / npw.max( gradFinf.sum(axis=1) )
+            return self.cst / np.max( gradFinf.sum(axis=1) )
         else:
             msg='Unsupported stretching criteria {}.'.format(criteria)
             raise RuntimeError(msg)
 
+
 class MergeTimeStepCriterias(TimestepCriteria):
 
+    @debug
+    def __new__(cls, parameter, criterias,
+                 equivalent_CFL=None, cfl_criteria=None, start_time=None,
+                 **kwds):
+        return super(MergeTimeStepCriterias, cls).__new__(cls,
+                input_params=None, output_params=None,
+                parameter=parameter, **kwds)
+
     @debug
     def __init__(self, parameter, criterias,
                  equivalent_CFL=None, cfl_criteria=None, start_time=None,
@@ -478,7 +533,7 @@ class MergeTimeStepCriterias(TimestepCriteria):
     @debug
     def apply(self, simulation, **kwds):
         assert simulation.dt is self.dt, 'Parameter mismatch between Simulation and AdaptiveTimeStep.'
-        if self._start_time is None or simulation.t() > self._start_time:
+        if (self._start_time is None) or (simulation.t() > self._start_time):
             super(MergeTimeStepCriterias, self).apply(simulation=simulation, **kwds)
 
 
@@ -490,6 +545,14 @@ class AdaptiveTimeStep(ComputationalGraphNodeGenerator):
     dt = max(min(max_dt, dt_criterias), min_dt)
     """
 
+    @debug
+    def __new__(cls, dt, min_dt=None, max_dt=None, dt_coeff=None,
+                 equivalent_CFL=False, base_kwds=None, start_time=None,
+                 **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(AdaptiveTimeStep, cls).__new__(cls,
+                candidate_input_tensors=None, candidate_output_tensors=None, **base_kwds)
+
     @debug
     def __init__(self, dt, min_dt=None, max_dt=None, dt_coeff=None,
                  equivalent_CFL=False, base_kwds=None, start_time=None,
@@ -646,6 +709,6 @@ class AdaptiveTimeStep(ComputationalGraphNodeGenerator):
                                        cfl_criteria=self.cfl_criteria,
                                        criterias=self.criterias, **self.merge_kwds)
 
-        operators = self.criterias.values()
+        operators = list(self.criterias.values())
         operators.append(merge)
         return operators
diff --git a/hysop/operator/advection.py b/hysop/operator/advection.py
index 2f2560e1cc624b523f898c544207f3b18c8e6c75..e2c29b5b6243818a174e94c5384a834ec9322d2a 100644
--- a/hysop/operator/advection.py
+++ b/hysop/operator/advection.py
@@ -3,7 +3,7 @@
 Advection operator generator.
 """
 from hysop.constants import Implementation
-from hysop.tools.types import check_instance, to_list
+from hysop.tools.types import check_instance, to_list, first_not_None
 from hysop.tools.decorators import debug
 from hysop.fields.continuous_field import Field
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
@@ -71,6 +71,23 @@ class Advection(ComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.FORTRAN
 
+    @debug
+    def __new__(cls, velocity,
+                 advected_fields,
+                 variables,
+                 dt,
+                 advected_fields_out=None,
+                 implementation=None,
+                 base_kwds=None,
+                 **kwds):
+        return super(Advection, cls).__new__(cls,
+            velocity=velocity, dt=dt,
+            advected_fields_in=None,
+            advected_fields_out=None,
+            variables=variables,
+            implementation=implementation,
+            base_kwds=base_kwds, **kwds)
+
     @debug
     def __init__(self, velocity,
                  advected_fields,
@@ -127,7 +144,7 @@ class Advection(ComputationalGraphNodeFrontend):
 
         advected_fields = tuple(advected_fields)
         advected_fields_out = tuple(advected_fields_out)
-        base_kwds = base_kwds or dict()
+        base_kwds = first_not_None(base_kwds, {})
 
         check_instance(velocity, Field)
         check_instance(advected_fields,     tuple, values=Field)
@@ -144,3 +161,4 @@ class Advection(ComputationalGraphNodeFrontend):
             variables=variables,
             implementation=implementation,
             base_kwds=base_kwds, **kwds)
+
diff --git a/hysop/operator/analytic.py b/hysop/operator/analytic.py
index 7d243477ddde545c38b655a4f62eed8fae8b604f..820cd76983c0edb47d2c724502f5a2d8a265e0c5 100644
--- a/hysop/operator/analytic.py
+++ b/hysop/operator/analytic.py
@@ -15,13 +15,22 @@ class AnalyticField(ComputationalGraphNodeGenerator):
     chosen implementation backend.
     """
 
+    @debug
+    def __new__(cls, field, formula, variables, extra_input_kwds=None,
+            implementation=None, base_kwds=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(AnalyticField, cls).__new__(cls,
+                candidate_input_tensors=None,
+                candidate_output_tensors=None,
+                **base_kwds)
+
     @debug
     def __init__(self, field, formula, variables, extra_input_kwds=None,
             implementation=None, base_kwds=None, **kwds):
         """
         AnalyticField operator frontend.
 
-        Apply a user-defined formula onto a field, possibly 
+        Apply a user-defined formula onto a field, possibly
         dependent on space variables and external fields/parameters.
 
         Parameters
@@ -50,23 +59,23 @@ class AnalyticField(ComputationalGraphNodeGenerator):
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
         extra_input_kwds = first_not_None(extra_input_kwds, {})
         base_kwds = first_not_None(base_kwds, {})
-            
+
         assert 'extra_input_kwds' not in kwds
         assert 'component' not in kwds
         assert 'coords' not in kwds
 
         if (implementation is Implementation.PYTHON) and (extra_input_kwds is not None):
-            candidate_input_tensors = filter(lambda f: isinstance(f, Field), extra_input_kwds.values())
+            candidate_input_tensors = tuple(filter(lambda f: isinstance(f, Field), extra_input_kwds.values()))
         else:
             extra_input_kwds = {}
-            candidate_input_tensors = ()
+            candidate_input_tensors = tuple()
         candidate_output_tensors = (field,)
-        
+
         formula = to_tuple(formula)
         if len(formula) == 1:
             formula = formula*field.nb_components
         check_instance(formula, tuple, size=field.nb_components)
-        
+
         super(AnalyticField, self).__init__(
                 candidate_input_tensors=candidate_input_tensors,
                 candidate_output_tensors=candidate_output_tensors,
@@ -78,7 +87,7 @@ class AnalyticField(ComputationalGraphNodeGenerator):
         self._extra_input_kwds = extra_input_kwds
         self._implementation = implementation
         self._kwds = kwds
-    
+
     @debug
     def _generate(self):
         nodes = []
@@ -95,7 +104,7 @@ class AnalyticField(ComputationalGraphNodeGenerator):
                     field=field,
                     formula=formula,
                     variables=variables,
-                    implementation=impl, 
+                    implementation=impl,
                     extra_input_kwds=extra_input_kwds,
                     **kwds)
             nodes.append(node)
@@ -118,7 +127,7 @@ class AnalyticScalarField(ComputationalGraphNodeFrontend):
                 Implementation.OPENCL: OpenClAnalyticField
         }
         return __implementations
-    
+
     @classmethod
     def default_implementation(cls):
         return Implementation.PYTHON
@@ -129,7 +138,7 @@ class AnalyticScalarField(ComputationalGraphNodeFrontend):
         """
         AnalyticField operator frontend.
 
-        Apply a user-defined formula onto a field, possibly 
+        Apply a user-defined formula onto a field, possibly
         dependent on space variables and external fields/parameters.
 
         Parameters
@@ -158,6 +167,6 @@ class AnalyticScalarField(ComputationalGraphNodeFrontend):
         if (implementation is Implementation.PYTHON):
             kwds['extra_input_kwds'] = extra_input_kwds
 
-        super(AnalyticScalarField, self).__init__(field=field, formula=formula, 
-                variables=variables, implementation=implementation, 
+        super(AnalyticScalarField, self).__init__(field=field, formula=formula,
+                variables=variables, implementation=implementation,
                 base_kwds=base_kwds, **kwds)
diff --git a/hysop/operator/base/advection_dir.py b/hysop/operator/base/advection_dir.py
index 10026cad40c8fe3155f6e3dcec578f7c71446195..addedf7b3a77340c66848950f1638893f7487e4d 100644
--- a/hysop/operator/base/advection_dir.py
+++ b/hysop/operator/base/advection_dir.py
@@ -37,6 +37,17 @@ class DirectionalAdvectionBase(object):
         am.update(cls.__available_methods)
         return am
 
+    @debug
+    def __new__(cls, velocity, relative_velocity, splitting_direction,
+                    advected_fields_in, advected_fields_out,
+                    variables, dt, velocity_cfl,
+                    remesh_criteria_eps=None,
+                    **kwds):
+        return super(DirectionalAdvectionBase, cls).__new__(cls,
+                input_fields=None, output_fields=None,
+                input_params=None, output_params=None,
+                splitting_direction=splitting_direction, **kwds)
+
     @debug
     def __init__(self, velocity, relative_velocity, splitting_direction,
                     advected_fields_in, advected_fields_out,
@@ -259,7 +270,7 @@ class DirectionalAdvectionBase(object):
         assert remesh_kernel.n >=1 , 'Bad remeshing kernel.'
         if remesh_kernel.n >1:
             assert remesh_kernel.n % 2 == 0, 'Odd remeshing kernel moments.'
-        min_ghosts = int(npw.floor(scalar_cfl)+1+remesh_kernel.n/2)
+        min_ghosts = int(npw.floor(scalar_cfl)+1+remesh_kernel.n//2)
         return min_ghosts
 
     @debug
@@ -279,17 +290,17 @@ class DirectionalAdvectionBase(object):
 
         self.is_bilevel = None
         if any(self.dvelocity.compute_resolution != \
-               self.dadvected_fields_out.values()[0].compute_resolution):
+               next(iter(self.dadvected_fields_out.values())).compute_resolution):
             self.is_bilevel = self.dvelocity.compute_resolution
 
     @debug
     def get_work_properties(self):
         requests  = super(DirectionalAdvectionBase, self).get_work_properties()
-        f = self.dadvected_fields_in.values()[0]
+        f = next(iter(self.dadvected_fields_in.values()))
         (pos, request, request_id) = MemoryRequest.cartesian_dfield_like(name='position', dfield=f,
                 ghosts=0, nb_components=1, is_read_only=False)
         requests.push_mem_request(request_id, request)
-        assert all(self.dadvected_fields_in.values()[0].compute_resolution == pos.resolution)
+        assert all(f.compute_resolution == pos.resolution)
         self.dposition = pos
         return requests
 
diff --git a/hysop/operator/base/convergence.py b/hysop/operator/base/convergence.py
index 9ca58e0d521bb254c2918e377558cff3c2107583..def197ec4ca033b8e0d09a0c11411c415ab3589a 100644
--- a/hysop/operator/base/convergence.py
+++ b/hysop/operator/base/convergence.py
@@ -34,6 +34,12 @@ class ConvergenceBase(object):
         am.update(cls.__available_methods)
         return am
 
+    @debug
+    def __new__(cls, variables, convergence=None, u_old=None,
+                implementation=None, **kwds):
+        return super(ConvergenceBase, cls).__new__(
+            cls, input_fields=None, output_params=None, **kwds)
+
     @debug
     def __init__(self, variables, convergence=None, u_old=None,
                  implementation=None, **kwds):
@@ -54,7 +60,7 @@ class ConvergenceBase(object):
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
         check_instance(u_old, Field, allow_none=True)
 
-        field = variables.keys()[0]
+        field = next(iter(variables))
         if convergence is None:
             convergence = TensorParameter(name="|residual|", dtype=field.dtype,
                                           shape=(field.nb_components, ),
diff --git a/hysop/operator/base/curl.py b/hysop/operator/base/curl.py
index f6651ca5376fbf8f1ac27ac88901c8423f144363..e586e32fbbefa235e114545953f0cb2b00937151 100644
--- a/hysop/operator/base/curl.py
+++ b/hysop/operator/base/curl.py
@@ -19,14 +19,19 @@ class CurlOperatorBase(SpectralOperatorBase):
     """
     Compute the curl using a specific implementation.
     """
-    
+
+    @debug
+    def __new__(cls, Fin, Fout, variables, **kwds):
+        return super(CurlOperatorBase, cls).__new__(cls,
+                input_fields=None, output_fields=None, **kwds)
+
     @debug
     def __init__(self, Fin, Fout, variables, **kwds):
         """
         Create an operator that computes the curl of an input field Fin.
 
         Given Fin, a 2D ScalarField, a 2D VectorField or a 3D VectorField, compute Fout = curl(Fin).
-        
+
         Only the following configurations are supported:
                  dim   nb_components  |   dim   nb_components
         Input:    2        (1,2)      |    3          3
@@ -45,7 +50,7 @@ class CurlOperatorBase(SpectralOperatorBase):
         kwds: dict, optional
             Extra parameters passed towards base class (MultiSpaceDerivatives).
         """
-        
+
         check_instance(Fin,  Field)
         check_instance(Fout, Field)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
@@ -83,18 +88,18 @@ class CurlOperatorBase(SpectralOperatorBase):
         else:
             msg='Unsupported dimension {}.'.format(dim)
             raise RuntimeError(msg)
-        
+
         # input and output fields
         input_fields  = { Fin:  variables[Fin] }
         output_fields = { Fout: variables[Fout] }
 
-        super(CurlOperatorBase, self).__init__(input_fields=input_fields, 
+        super(CurlOperatorBase, self).__init__(input_fields=input_fields,
                 output_fields=output_fields, **kwds)
 
         self.Fin  = Fin
         self.Fout = Fout
         self.dim  = dim
-        
+
     @debug
     def discretize(self):
         if self.discretized:
@@ -109,19 +114,22 @@ class SpectralCurlOperatorBase(CurlOperatorBase):
     """
     Compute the curl using a specific spectral implementation.
     """
+    def __new__(cls, **kwds):
+        return super(SpectralCurlOperatorBase, cls).__new__(cls, **kwds)
+
     def __init__(self, **kwds):
         super(SpectralCurlOperatorBase, self).__init__(**kwds)
-        
+
         dim = self.dim
         Fin, Fout = self.Fin, self.Fout
         assert Fin is not Fout, 'Cannot compute curl inplace!'
-        
+
         if (dim==2):
             tg0 = self.new_transform_group()
             tg1 = self.new_transform_group()
             if (Fin.nb_components==1):
                 assert (Fout.nb_components==2)
-                F0 = tg0.require_forward_transform(Fin, axes=0, 
+                F0 = tg0.require_forward_transform(Fin, axes=0,
                         custom_output_buffer='auto')
                 B0 = tg0.require_backward_transform(Fout[0], axes=0,
                         custom_input_buffer='auto', matching_forward_transform=F0)
@@ -130,7 +138,7 @@ class SpectralCurlOperatorBase(CurlOperatorBase):
                         custom_output_buffer='auto')
                 B1 = tg1.require_backward_transform(Fout[1], axes=1,
                         custom_input_buffer='auto', matching_forward_transform=F1)
-            
+
                 expr0 = Assignment(B0.s, +F0.s.diff(F0.s.frame.coords[1]))
                 expr1 = Assignment(B1.s, -F1.s.diff(F1.s.frame.coords[0]))
             elif (Fin.nb_components==2):
@@ -141,10 +149,10 @@ class SpectralCurlOperatorBase(CurlOperatorBase):
 
                 F1 = tg1.require_forward_transform(Fin[0], axes=0,
                         custom_output_buffer='auto')
-                B1 = tg1.require_backward_transform(Fout, axes=0, 
+                B1 = tg1.require_backward_transform(Fout, axes=0,
                         custom_input_buffer='auto', matching_forward_transform=F1,
                         action=SpectralTransformAction.ACCUMULATE)
-                
+
                 expr0 = Assignment(B0.s, +F0.s.diff(F0.s.frame.coords[0]))
                 expr1 = Assignment(B1.s, -F1.s.diff(F1.s.frame.coords[1]))
             else:
@@ -212,11 +220,11 @@ class SpectralCurlOperatorBase(CurlOperatorBase):
             backward_transforms = (B0, B1, B2, B3, B4, B5)
         else:
             raise NotImplementedError
-       
+
         self.forward_transforms  = forward_transforms
         self.backward_transforms = backward_transforms
         self.K = K
-    
+
     @debug
     def discretize(self):
         if self.discretized:
@@ -224,13 +232,13 @@ class SpectralCurlOperatorBase(CurlOperatorBase):
         super(SpectralCurlOperatorBase, self).discretize()
         dFin, dFout = self.dFin, self.dFout
         K = self.K
-        
+
         dK = ()
         for (tg,Ki) in K:
             _, dKi, _ = tg.discrete_wave_numbers[Ki]
             dK += (dKi,)
         self.dK = dK
-    
+
     def setup(self, work):
         super(SpectralCurlOperatorBase, self).setup(work)
 
diff --git a/hysop/operator/base/custom_symbolic_operator.py b/hysop/operator/base/custom_symbolic_operator.py
index 6e5b02e5877fe8e9f79bee168292b62497af4664..f2f8f32bbc46fb250871eb6d60ba2b462423f7c4 100644
--- a/hysop/operator/base/custom_symbolic_operator.py
+++ b/hysop/operator/base/custom_symbolic_operator.py
@@ -1,6 +1,7 @@
 from abc import ABCMeta
+import sympy as sm
+import numpy as np
 
-from hysop.deps import sm
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types import check_instance, to_tuple, InstanceOf, first_not_None, to_set
 from hysop.tools.decorators import debug
@@ -39,10 +40,14 @@ class ExprDiscretizationInfo(object):
     SimpleCounterTypes = (SymbolicArray, SymbolicBuffer,)
     IndexedCounterTypes = (DiscreteScalarFieldView,)
 
-    def __init__(self):
+    def __new__(cls, **kwds):
+        return super(ExprDiscretizationInfo, cls).__new__(cls, **kwds)
+
+    def __init__(self, **kwds):
         """
         Helper class to store information about discretized symbolic expressions.
         """
+        super(ExprDiscretizationInfo, self).__init__(**kwds)
         self.read_counter = SortedDict()
         self.write_counter = SortedDict()
         self.parameters = SortedDict()
@@ -75,7 +80,7 @@ class ExprDiscretizationInfo(object):
 
     def copy(self):
         edi = ExprDiscretizationInfo()
-        for (obj, counts) in self.read_counter.iteritems():
+        for (obj, counts) in self.read_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counter = edi.read_counter.setdefault(obj,
                                                       npw.int_zeros(shape=(obj.nb_components,)))
@@ -85,7 +90,7 @@ class ExprDiscretizationInfo(object):
             else:
                 msg = 'Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        for (obj, counts) in self.write_counter.iteritems():
+        for (obj, counts) in self.write_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counter = edi.write_counter.setdefault(obj,
                                                        npw.int_zeros(shape=(obj.nb_components,)))
@@ -100,7 +105,7 @@ class ExprDiscretizationInfo(object):
 
     def update(self, other):
         check_instance(other, ExprDiscretizationInfo)
-        for (obj, counts) in other.read_counter.iteritems():
+        for (obj, counts) in other.read_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counter = self.read_counter.setdefault(obj,
                                                        npw.int_zeros(shape=(obj.nb_components,)))
@@ -111,7 +116,7 @@ class ExprDiscretizationInfo(object):
             else:
                 msg = 'Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        for (obj, counts) in other.write_counter.iteritems():
+        for (obj, counts) in other.write_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counter = self.write_counter.setdefault(obj,
                                                         npw.int_zeros(shape=(obj.nb_components,)))
@@ -130,8 +135,9 @@ class ExprDiscretizationInfo(object):
             self.parameters[p.name] = param
 
     def __iadd__(self, rhs):
-        check_instance(rhs, int)
-        for (obj, counts) in self.read_counter.iteritems():
+        check_instance(rhs, (np.integer, int))
+        rhs = int(rhs)
+        for (obj, counts) in self.read_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counts[counts > 0] += rhs
             elif isinstance(obj, self.SimpleCounterTypes):
@@ -139,7 +145,7 @@ class ExprDiscretizationInfo(object):
             else:
                 msg = 'Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        for (obj, counts) in self.write_counter.iteritems():
+        for (obj, counts) in self.write_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counts[counts > 0] += rhs
             elif isinstance(obj, self.SimpleCounterTypes):
@@ -150,8 +156,9 @@ class ExprDiscretizationInfo(object):
         return self
 
     def __imul__(self, rhs):
-        check_instance(rhs, int)
-        for (obj, counts) in self.read_counter.iteritems():
+        check_instance(rhs, (np.integer, int))
+        rhs = int(rhs)
+        for (obj, counts) in self.read_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counts[...] = rhs*counts
             elif isinstance(obj, self.SimpleCounterTypes):
@@ -159,7 +166,7 @@ class ExprDiscretizationInfo(object):
             else:
                 msg = 'Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        for (obj, counts) in self.write_counter.iteritems():
+        for (obj, counts) in self.write_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counts[...] = rhs*counts
             elif isinstance(obj, self.SimpleCounterTypes):
@@ -194,6 +201,10 @@ class ExprDiscretizationInfo(object):
 class SymbolicExpressionInfo(object):
 
     """Helper class store information about parsed symbolic expressions."""
+    def __new__(cls, name, exprs,
+                dt=None, dt_coeff=None,
+                compute_resolution=None, **kwds):
+        return super(SymbolicExpressionInfo, cls).__new__(cls, **kwds)
 
     def __init__(self, name, exprs,
                  dt=None, dt_coeff=None,
@@ -249,7 +260,7 @@ class SymbolicExpressionInfo(object):
             self._dim = None
         else:
             compute_resolution = to_tuple(compute_resolution)
-            check_instance(compute_resolution, tuple, values=(int, long))
+            check_instance(compute_resolution, tuple, values=int)
             self._dim = len(compute_resolution)
             self.compute_resolution = compute_resolution
 
@@ -264,13 +275,13 @@ class SymbolicExpressionInfo(object):
 
     def _get_fields(self):
         """Return input and output fields."""
-        fields = {k: v for (k, v) in self.input_fields.iteritems()}
+        fields = {k: v for (k, v) in self.input_fields.items()}
         fields.update(self.output_fields)
         return fields
 
     def _get_params(self):
         """Return input and output fields."""
-        fields = {k: v for (k, v) in self.input_params.iteritems()}
+        fields = {k: v for (k, v) in self.input_params.items()}
         fields.update(self.output_params)
         return fields
 
@@ -338,15 +349,15 @@ class SymbolicExpressionInfo(object):
         check_instance(output_dfields, dict, keys=Field, values=DiscreteScalarFieldView)
         assert len(set(self.input_fields.keys()) - set(input_dfields.keys())) == 0
         assert len(set(self.output_fields.keys()) - set(output_dfields.keys())) == 0
-        self.input_dfields = {k: v for (k, v) in input_dfields.iteritems()
+        self.input_dfields = {k: v for (k, v) in input_dfields.items()
                               if (k in self.input_fields)}
-        self.output_dfields = {k: v for (k, v) in output_dfields.iteritems()
+        self.output_dfields = {k: v for (k, v) in output_dfields.items()
                                if (k in self.output_fields)}
-        self.inout_dfields = {k: v for (k, v) in self.output_dfields.iteritems()
+        self.inout_dfields = {k: v for (k, v) in self.output_dfields.items()
                               if ((k in self.input_dfields) and (self.input_dfields[k].dfield is v.dfield))}
         self.stencils = SortedDict()
 
-        dfields = input_dfields.values() + output_dfields.values()
+        dfields = tuple(input_dfields.values()) + tuple(output_dfields.values())
         if (force_symbolic_axes is not None):
             if isinstance(force_symbolic_axes, tuple):
                 axes = force_symbolic_axes
@@ -373,7 +384,7 @@ class SymbolicExpressionInfo(object):
         SymbolicExpressionParser.setup_expressions(self, work)
 
     def check_dfield_sizes(self):
-        dfields = set(f for f in (self.input_dfields.values() + self.output_dfields.values()))
+        dfields = set(self.input_dfields.values()).union(self.output_dfields.values())
         if len(dfields) > 0:
             dfield0 = next(iter(dfields))
             compute_resolution = first_not_None(self.compute_resolution, dfield0.compute_resolution)
@@ -388,7 +399,7 @@ class SymbolicExpressionInfo(object):
 
     def check_arrays(self):
         compute_resolution = self.compute_resolution
-        arrays = set(a for a in (self.input_arrays.values() + self.output_arrays.values()))
+        arrays = set(self.input_arrays.values()).union(self.output_arrays.values())
         for a in arrays:
             if not a.is_bound:
                 msg = 'FATAL ERROR: {}::{} has not been bound to any memory '
@@ -414,7 +425,7 @@ class SymbolicExpressionInfo(object):
         self.compute_resolution = tuple(compute_resolution)
 
     def check_buffers(self):
-        buffers = set(b for b in (self.input_buffers.values() + self.output_buffers.values()))
+        buffers = set(self.input_buffers.values()).union(self.output_buffers.values())
         for b in buffers:
             if not b.is_bound:
                 msg = 'FATAL ERROR: {}::{} has not been bound to any memory '
@@ -476,17 +487,17 @@ class SymbolicExpressionInfo(object):
                           for p in self.input_params.keys()) if self.input_params else 'none',
                 ', '.join('{}'.format(p)
                           for p in self.output_params.keys()) if self.output_params else 'none',
-                '\n'+'\n'.join('    {}: {}'.format(f.name, (d.short_description() if isinstance(d, TopologyView)
-                                                            else d)) for (f, d) in self.fields.iteritems()))
+                '\n'+'\n'self.join('{}: {}'.format(f.name, (d.short_description() if isinstance(d, TopologyView)
+                                                            else d)) for (f, d) in self.fields.items()))
         if self.min_ghosts:
             msg += '''
   min_ghosts_per_components:{}
   min_ghosts:{}
 '''.format(
                 '\n'+'\n'.join('    {}/ [{}]'.format(f.name, ', '.join(str(x) for x in gpc))
-                               for f, gpc in self.min_ghosts_per_components.iteritems()),
+                               for f, gpc in self.min_ghosts_per_components.items()),
                 '\n'+'\n'.join('    {}/ [{}]'.format(f.name, ', '.join(str(x) for x in g))
-                               for f, g in self.min_ghosts.iteritems()))
+                               for f, g in self.min_ghosts.items()))
         if self.is_discretized:
             msg += '''
 
@@ -532,7 +543,7 @@ class SymbolicExpressionParser(object):
         kind = None
         fields = SortedDict()
         arrays = SortedDict()
-        exprs = filter(lambda e: isinstance(e, ValidExpressions), cls.extract_expressions(exprs))
+        exprs = tuple(filter(lambda e: isinstance(e, ValidExpressions), cls.extract_expressions(exprs)))
         for expr in exprs:
             check_instance(expr, ValidExpressions)
             lhs = expr.args[0]
@@ -582,6 +593,7 @@ class SymbolicExpressionParser(object):
             else:
                 msg = 'Assignment LHS cannot be of type {}.'.format(type(lhs))
                 raise TypeError(msg)
+
             if (field is not None):
                 assert isinstance(field, AppliedSymbolicField)
                 index = field.index
@@ -640,9 +652,9 @@ class SymbolicExpressionParser(object):
                 cls.parse_subexpr(variables, info, expr)
             except:
                 msg = 'Failed to parse symbolic expression type {}.'
-                print
-                print msg.format(type(expr))
-                print
+                print()
+                print(msg.format(type(expr)))
+                print()
                 raise
 
     @classmethod
@@ -668,7 +680,7 @@ class SymbolicExpressionParser(object):
             assert expr.ndim == 0, expr
             expr = expr.tolist()
 
-        if isinstance(expr, (str, int, long, float, complex, npw.number)):
+        if isinstance(expr, (str, int, float, complex, npw.number)):
             return
         elif isinstance(expr, (AppliedSymbolicField, SymbolicScalarParameter, SymbolicArray)):
             cls.read(variables, info, expr)
@@ -860,7 +872,7 @@ class SymbolicExpressionParser(object):
                 raise TypeError(msg)
 
             min_ghosts_expr_i = SortedDict()
-            for (obj, reqs) in obj_reqs.iteritems():
+            for (obj, reqs) in obj_reqs.items():
                 if isinstance(obj, tuple) and isinstance(obj[0], Field):
                     if (obj in field_requirements):
                         field_requirements[obj].update_requirements(reqs)
@@ -880,8 +892,8 @@ class SymbolicExpressionParser(object):
                 min_ghosts_expr_i[k] = v
             min_ghosts_per_expr[i] = min_ghosts_expr_i
 
-        lhs_fields = {v[2]: k for (k, v) in updated_fields.iteritems()}
-        lhs_arrays = {v[1]: k for (k, v) in updated_arrays.iteritems()}
+        lhs_fields = {v[2]: k for (k, v) in updated_fields.items()}
+        lhs_arrays = {v[1]: k for (k, v) in updated_arrays.items()}
         lhs_objects = lhs_fields.copy()
         lhs_objects.update(lhs_arrays)
 
@@ -923,10 +935,11 @@ class SymbolicExpressionParser(object):
         nobjects = len(all_objects)
         assert (nobjects == len(ro_fields)+len(ro_arrays)+len(lhs_fields)+len(lhs_arrays))
         info.nobjects = nobjects
+
         expr_ghost_map = npw.int_zeros(shape=(nlhsobjects, nobjects))
-        for (fi_name, i) in lhs_objects.iteritems():
+        for (fi_name, i) in lhs_objects.items():
             min_ghosts = min_ghosts_per_expr[i]
-            for (fj_name, min_ghost) in min_ghosts.iteritems():
+            for (fj_name, min_ghost) in min_ghosts.items():
                 assert (fj_name in all_objects), fj_name
                 j = all_objects[fj_name]
                 expr_ghost_map[i, j] = min_ghost
@@ -935,7 +948,7 @@ class SymbolicExpressionParser(object):
         if nlhsobjects:
             G_f = expr_ghost_map[:, :nlhsobjects]
             min_ghosts_per_step[0, :] = npw.max(G_f, axis=0)
-            for s in xrange(1, nsteps):
+            for s in range(1, nsteps):
                 min_ghosts_per_step[s] = npw.max(G_f + min_ghosts_per_step[s-1][:, None], axis=0)
             min_ghosts_lhs = min_ghosts_per_step[nsteps-1]
 
@@ -948,10 +961,10 @@ class SymbolicExpressionParser(object):
         else:
             min_ghosts = npw.int_zeros(shape=(nobjects,))
 
-        lhs_objects = {v: k for (k, v) in lhs_objects.iteritems()}
-        lhs_objects = tuple(lhs_objects[i] for i in xrange(nlhsobjects))
-        all_objects = {v: k for (k, v) in all_objects.iteritems()}
-        all_objects = tuple(all_objects[i] for i in xrange(nobjects))
+        lhs_objects = {v: k for (k, v) in lhs_objects.items()}
+        lhs_objects = tuple(lhs_objects[i] for i in range(nlhsobjects))
+        all_objects = {v: k for (k, v) in all_objects.items()}
+        all_objects = tuple(all_objects[i] for i in range(nobjects))
 
         info.expr_ghost_map = expr_ghost_map
         info.min_ghosts_per_integration_step = min_ghosts_per_step
@@ -969,7 +982,7 @@ class SymbolicExpressionParser(object):
             assert expr.ndim == 0
             expr = expr.tolist()
 
-        if isinstance(expr, (int, long, sm.Integer, float, complex, sm.Rational, sm.Float, npw.number)):
+        if isinstance(expr, (int, sm.Integer, float, complex, sm.Rational, sm.Float, npw.number)):
             return {}
         elif isinstance(expr, Cast):
             return cls._extract_obj_requirements(info, expr.expr)
@@ -1027,7 +1040,7 @@ class SymbolicExpressionParser(object):
             min_ghosts = max(stencil.L, stencil.R)
 
             obj_reqs = cls._extract_obj_requirements(info, dexpr)
-            for (obj, req) in obj_reqs.iteritems():
+            for (obj, req) in obj_reqs.items():
                 req.min_ghosts[-1-direction] += min_ghosts
             return obj_reqs
         elif isinstance(expr, Assignment):
@@ -1046,7 +1059,7 @@ class SymbolicExpressionParser(object):
             obj_requirements = SortedDict()
             for e in expr.args:
                 obj_reqs = cls._extract_obj_requirements(info, e)
-                for (obj, reqs) in obj_reqs.iteritems():
+                for (obj, reqs) in obj_reqs.items():
                     if obj in obj_requirements:
                         obj_requirements[obj].update_requirements(reqs)
                     else:
@@ -1146,7 +1159,7 @@ class SymbolicExpressionParser(object):
             else:
                 expr = list(E)
             return expr, di
-        elif isinstance(expr, (int, long, float, complex, npw.number)):
+        elif isinstance(expr, (int, float, complex, npw.number)):
             return expr, di
         elif cls.should_transpose_expr(info, expr):
             expr = cls.transpose_expr(info, expr)
@@ -1223,9 +1236,9 @@ class SymbolicExpressionParser(object):
                 except:
                     msg = 'Failed to build a {} from arguments {}.'
                     msg = msg.format(expr.func, new_args)
-                    print
-                    print msg
-                    print
+                    print()
+                    print(msg)
+                    print()
                     raise
                 return expr, di
             else:
@@ -1276,11 +1289,10 @@ class SymbolicExpressionParser(object):
         return symbols[axes[i]]
 
 
-class CustomSymbolicOperatorBase(DirectionalOperatorBase):
+class CustomSymbolicOperatorBase(DirectionalOperatorBase, metaclass=ABCMeta):
     """
     Common implementation interface for custom symbolic (code generated) operators.
     """
-    __metaclass__ = ABCMeta
 
     __default_method = {
         ComputeGranularity: 0,
@@ -1326,6 +1338,22 @@ class CustomSymbolicOperatorBase(DirectionalOperatorBase):
         self._expr_info.interpolation = interpolation
         self._expr_info.space_discretization = space_discretization
 
+    @debug
+    def __new__(cls, name, exprs, variables,
+                splitting_direction=None, splitting_dim=None, dt_coeff=None,
+                dt=None, time=None, **kwds):
+        return super(CustomSymbolicOperatorBase, cls).__new__(cls, name=name,
+                                                              input_fields=None,
+                                                              output_fields=None,
+                                                              input_params=None,
+                                                              output_params=None,
+                                                              input_tensor_fields=None,
+                                                              output_tensor_fields=None,
+                                                              splitting_direction=splitting_direction,
+                                                              splitting_dim=splitting_dim,
+                                                              dt_coeff=dt_coeff,
+                                                              **kwds)
+
     @debug
     def __init__(self, name, exprs, variables,
                  splitting_direction=None, splitting_dim=None, dt_coeff=None,
@@ -1359,8 +1387,8 @@ class CustomSymbolicOperatorBase(DirectionalOperatorBase):
         """
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
         check_instance(exprs, tuple, values=ValidExpressions, minsize=1)
-        check_instance(splitting_direction, (int, long), allow_none=True)
-        check_instance(splitting_dim, (int, long), allow_none=True)
+        check_instance(splitting_direction, int, allow_none=True)
+        check_instance(splitting_dim, int, allow_none=True)
         check_instance(dt_coeff, float, allow_none=True)
         check_instance(dt, ScalarParameter, allow_none=True)
 
@@ -1371,7 +1399,7 @@ class CustomSymbolicOperatorBase(DirectionalOperatorBase):
         dt_coeff = first_not_None(dt_coeff, 1.0)
 
         # Expand tensor fields to scalar fields
-        scalar_variables = {sfield: topod for (tfield, topod) in variables.iteritems()
+        scalar_variables = {sfield: topod for (tfield, topod) in variables.items()
                             for sfield in tfield.fields}
 
         expr_info = SymbolicExpressionParser.parse(name, scalar_variables, *exprs, dt=dt, dt_coeff=dt_coeff,
@@ -1451,7 +1479,7 @@ class CustomSymbolicOperatorBase(DirectionalOperatorBase):
                 min_ghosts = npw.int_zeros(shape=(field.nb_components, field.dim))
                 if has_direction:
                     req.axes = axes
-                for index in xrange(field.nb_components):
+                for index in range(field.nb_components):
                     fname = '{}_{}'.format(field.name, index)
                     G = expr_info.min_ghosts_per_field_name.get(fname, 0)
                     if (field, index) in field_reqs:
@@ -1480,9 +1508,9 @@ class CustomSymbolicOperatorBase(DirectionalOperatorBase):
                     min_ghosts_per_components[field] = min_ghosts
 
         expr_info.min_ghosts = {k: npw.max(v, axis=0) for (k, v) in
-                                min_ghosts_per_components.iteritems()}
+                                min_ghosts_per_components.items()}
         expr_info.min_ghosts_per_components = {field: gpc[:, -1-direction]
-                                               for (field, gpc) in min_ghosts_per_components.iteritems()}
+                                               for (field, gpc) in min_ghosts_per_components.items()}
 
         for (array, reqs) in array_reqs:
             expr_info.min_ghosts[array] = reqs.min_ghosts.copy()
diff --git a/hysop/operator/base/derivative.py b/hysop/operator/base/derivative.py
index 2f9541d162fe5fdf03ff0440050d3a3cf70fd705..c10296de40664bbe74516116d9fc319ba12dd4fc 100644
--- a/hysop/operator/base/derivative.py
+++ b/hysop/operator/base/derivative.py
@@ -1,7 +1,6 @@
-
 from abc import ABCMeta, abstractmethod
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types       import check_instance, to_tuple, first_not_None, InstanceOf
 from hysop.tools.decorators  import debug
@@ -17,16 +16,22 @@ from hysop.fields.continuous_field import Field, ScalarField
 from hysop.operator.base.spectral_operator import SpectralOperatorBase
 
 
-class SpaceDerivativeBase(object):
+class SpaceDerivativeBase(object, metaclass=ABCMeta):
     """
     Common implementation interface for derivative operators.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __new__(cls, F, dF, A=None,
+            derivative=None, direction=None,
+            variables=None, name=None, pretty_name=None, require_tmp=None, **kwds):
+        return super(SpaceDerivativeBase, cls).__new__(cls, name=name, pretty_name=pretty_name,
+                input_fields=None, output_fields=None,
+                input_params=None, **kwds)
 
     @debug
-    def __init__(self, F, dF, A=None, 
-            derivative=None, direction=None, 
+    def __init__(self, F, dF, A=None,
+            derivative=None, direction=None,
             variables=None, name=None, pretty_name=None, require_tmp=None, **kwds):
         """
         SpaceDerivative operator base.
@@ -64,7 +69,7 @@ class SpaceDerivativeBase(object):
             Pretty name of this operator.
         kwds: dict, optional
             Base class keyword arguments.
-        
+
         Notes
         -----
         There is two way to build a derivative:
@@ -76,11 +81,11 @@ class SpaceDerivativeBase(object):
                  => derivative=(k0,...,kn)
         """
         assert (derivative is not None)
-        A          = first_not_None(A, 1)
+        A = first_not_None(A, 1)
 
         check_instance(F,  ScalarField)
         check_instance(dF, ScalarField, allow_none=True)
-        
+
         if isinstance(derivative, tuple):
             assert len(derivative)==F.dim
         else:
@@ -89,7 +94,7 @@ class SpaceDerivativeBase(object):
             _derivative[direction] = derivative
             derivative = tuple(_derivative)
         check_instance(derivative, tuple, size=F.dim, minval=0)
-        
+
         nz_derivatives = tuple(x for x in derivative if (x>0))
         if len(nz_derivatives) == 1:
             directional_derivative = nz_derivatives[0]
@@ -101,7 +106,7 @@ class SpaceDerivativeBase(object):
         else:
             assert (direction is None)
             directional_derivative = None
-        
+
         expr = F.s()
         for (i,xi) in enumerate(F.domain.frame.coords):
             if derivative[i]>0:
@@ -120,18 +125,18 @@ class SpaceDerivativeBase(object):
         name = first_not_None(name, default_name)
         pretty_name = first_not_None(pretty_name, default_pretty_name)
         variables = first_not_None(variables, {})
-        
+
         check_instance(name, str)
-        check_instance(pretty_name, (str, unicode))
+        check_instance(pretty_name, str)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
-        
+
         input_fields  = { F: variables.get(F, None) }
         output_fields = { dF: variables.get(dF, input_fields[F]) }
         input_params = {}
 
         is_inplace = (dF is F)
         require_tmp = first_not_None(require_tmp, is_inplace)
-        
+
         scale_by_field, scale_by_parameter, scale_by_value = (False, False, False)
         if isinstance(A, ScalarField):
             input_fields[A] = variables.get(A, input_fields[F])
@@ -160,7 +165,7 @@ class SpaceDerivativeBase(object):
         self.scale_by_field = scale_by_field
         self.scale_by_parameter = scale_by_parameter
         self.scale_by_value = scale_by_value
-     
+
     @debug
     def discretize(self):
         if self.discretized:
@@ -178,11 +183,11 @@ class SpaceDerivativeBase(object):
     def get_work_properties(self):
         requests = super(SpaceDerivativeBase, self).get_work_properties()
         if self.require_tmp:
-            request = MemoryRequest.empty_like(a=self.dFout, nb_components=1, 
+            request = MemoryRequest.empty_like(a=self.dFout, nb_components=1,
                     shape=self.dFout.compute_resolution, backend=self.backend)
             requests.push_mem_request('tmp', request)
         return requests
-    
+
     @debug
     def setup(self, work):
         super(SpaceDerivativeBase, self).setup(work)
@@ -194,7 +199,10 @@ class SpaceDerivativeBase(object):
 
 
 class FiniteDifferencesSpaceDerivativeBase(SpaceDerivativeBase):
-        
+
+    def __new__(cls, **kwds):
+        return super(FiniteDifferencesSpaceDerivativeBase, cls).__new__(cls, **kwds)
+
     def __init__(self, **kwds):
         super(FiniteDifferencesSpaceDerivativeBase, self).__init__(**kwds)
 
@@ -202,7 +210,7 @@ class FiniteDifferencesSpaceDerivativeBase(SpaceDerivativeBase):
         msg='FiniteDifferencesSpaceDerivative only supports directional derivatives.'
         assert isinstance(direction, int), msg
         assert isinstance(directional_derivative, int), msg
-        
+
     @classmethod
     def default_method(cls):
         dm = super(FiniteDifferencesSpaceDerivativeBase, cls).default_method()
@@ -214,20 +222,25 @@ class FiniteDifferencesSpaceDerivativeBase(SpaceDerivativeBase):
         am = super(FiniteDifferencesSpaceDerivativeBase, cls).available_methods()
         am.update({ SpaceDiscretization: InstanceOf(int) })
         return am
-    
+
     @debug
     def handle_method(self, method):
         super(FiniteDifferencesSpaceDerivativeBase, self).handle_method(method)
-        
+
         if not hasattr(self, 'space_discretization'):
             space_discretization = method.pop(SpaceDiscretization)
             assert (2 <= space_discretization), space_discretization
             assert (space_discretization % 2 == 0), space_discretization
             self.space_discretization = space_discretization
-    
+
 
 
 class SpectralSpaceDerivativeBase(SpectralOperatorBase, SpaceDerivativeBase):
+    @debug
+    def __new__(cls, testing=False, **kwds):
+        kwds['require_tmp'] = False
+        return super(SpectralSpaceDerivativeBase, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, testing=False, **kwds):
         kwds['require_tmp'] = False
@@ -235,21 +248,21 @@ class SpectralSpaceDerivativeBase(SpectralOperatorBase, SpaceDerivativeBase):
 
         F, dF = self.Fin, self.Fout
         derivative = self.derivative
-        
+
         tg = self.new_transform_group()
 
         axes = tuple(i for (i,di) in enumerate(derivative[::-1]) if (di>0))
         if testing and not axes:
-            axes = tuple(xrange(F.dim))
+            axes = tuple(range(F.dim))
         elif not axes:
             msg='No transform axes found, got derivative={}.'.format(derivative)
             raise RuntimeError(msg)
-        
+
         Ft = tg.require_forward_transform(F, axes=axes, custom_output_buffer='auto')
-        Bt = tg.require_backward_transform(dF, axes=axes, custom_input_buffer='auto', 
+        Bt = tg.require_backward_transform(dF, axes=axes, custom_input_buffer='auto',
                                             matching_forward_transform=Ft)
         assert (Ft.output_dtype == Bt.input_dtype)
-        
+
         dFt = Ft.s
         assert len(derivative)==F.dim
         for (i,di) in enumerate(derivative):
@@ -257,7 +270,7 @@ class SpectralSpaceDerivativeBase(SpectralOperatorBase, SpaceDerivativeBase):
                 dFt = dFt.diff(Ft.s.all_vars[i], di)
         expr = Assignment(Bt.s, dFt)
         kds = tg.push_expressions(expr)
-        
+
         self.Ft = Ft
         self.Bt = Bt
         self.tg = tg
diff --git a/hysop/operator/base/diffusion.py b/hysop/operator/base/diffusion.py
index 227a432572bf599b09fa8016b4d13016adcc78ea..817562eadda4ff01b4eea5c9db26368e4e0de714 100644
--- a/hysop/operator/base/diffusion.py
+++ b/hysop/operator/base/diffusion.py
@@ -13,9 +13,17 @@ class DiffusionOperatorBase(PoissonOperatorBase):
     Common base for spectral diffusion operator.
     """
 
+    @debug
+    def __new__(cls, Fin, Fout, variables,
+                 nu, dt, name=None, pretty_name=None, **kwds):
+        return super(DiffusionOperatorBase, cls).__new__(cls,
+                Fin=Fin, Fout=Fout, variables=variables,
+                name=name, pretty_name=pretty_name,
+                input_params=None, **kwds)
+
     @debug
     def __init__(self, Fin, Fout, variables,
-                       nu, dt, 
+                       nu, dt,
                        name=None, pretty_name=None,
                        **kwds):
         """
@@ -41,7 +49,7 @@ class DiffusionOperatorBase(PoissonOperatorBase):
                 dF/dt = nu*Laplacian(F)
                 in  = Win
                 out = Wout
-                
+
             *Implicit resolution in spectral space:
                 F_hat(tn+1) = 1/(1-nu*dt*sum(Ki**2)) * F_hat(tn)
         """
@@ -50,7 +58,7 @@ class DiffusionOperatorBase(PoissonOperatorBase):
 
         input_params   = { dt.name: dt,
                            nu.name: nu }
-        
+
         default_name = 'Diffusion_{}_{}'.format(Fin.name, Fout.name)
         default_pretty_name = 'Diffusion_{}_{}'.format(Fin.pretty_name, Fout.pretty_name)
         name = first_not_None(name, default_name)
diff --git a/hysop/operator/base/enstrophy.py b/hysop/operator/base/enstrophy.py
index 1305461cad775707c2b004c18d5b7ad629306b41..ed12f108fc80a4e31bbc87aaed8a77f5fb7c21a9 100644
--- a/hysop/operator/base/enstrophy.py
+++ b/hysop/operator/base/enstrophy.py
@@ -9,12 +9,17 @@ from hysop.core.memory.memory_request import MemoryRequest
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 from hysop.parameters.scalar_parameter import ScalarParameter
 
-class EnstrophyBase(object):
+class EnstrophyBase(object, metaclass=ABCMeta):
     """
     Common implementation interface for enstrophy.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __new__(cls, vorticity, enstrophy, WdotW, rho, rho_0,
+                    variables, name=None, pretty_name=None, **kwds):
+        return super(EnstrophyBase, cls).__new__(cls,
+            input_fields=None, output_fields=None, output_params=None,
+            name=name, pretty_name=pretty_name, **kwds)
 
     @debug
     def __init__(self, vorticity, enstrophy, WdotW, rho, rho_0,
@@ -51,8 +56,8 @@ class EnstrophyBase(object):
         check_instance(rho, Field, allow_none=True)
         check_instance(rho_0, float)
 
-        W = vorticity.pretty_name.decode('utf-8')
-        wdotw = u'{}\u22c5{}'.format(W,W).encode('utf-8')
+        W = vorticity.pretty_name
+        wdotw = '{}â‹…{}'.format(W,W)
         if (WdotW is None):
             assert (vorticity in variables), variables
             WdotW = vorticity.tmp_like(name='WdotW', pretty_name=wdotw, nb_components=1)
@@ -67,7 +72,7 @@ class EnstrophyBase(object):
             input_fields[rho] = variables[rho]
 
         default_name = 'enstrophy'
-        default_pname = u'\u222b{}'.format(wdotw.decode('utf-8')).encode('utf-8')
+        default_pname = '∫{}'.format(wdotw)
 
         pretty_name = first_not_None(pretty_name, name, default_pname)
         name = first_not_None(name, default_name)
diff --git a/hysop/operator/base/external_force.py b/hysop/operator/base/external_force.py
index ee94138d958d956c469c93053ecbd371069652b0..5435e8659a55aaa3b5d4b72cc5cda8a41c8856cc 100644
--- a/hysop/operator/base/external_force.py
+++ b/hysop/operator/base/external_force.py
@@ -11,13 +11,16 @@ from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 from hysop.fields.continuous_field import Field
 from hysop.symbolic.relational import Assignment
 from hysop.core.memory.memory_request import MemoryRequest
+from hysop.core.graph.graph import op_apply
 from hysop.parameters.tensor_parameter import TensorParameter
 from hysop.parameters.scalar_parameter import ScalarParameter
 from hysop.tools.interface import NamedObjectI
 
-class ExternalForce(NamedObjectI):
+class ExternalForce(NamedObjectI, metaclass=ABCMeta):
     """Interface to implement a custom external force."""
-    __metaclass__ = ABCMeta
+
+    def __new__(cls, name, dim, Fext, **kwds):
+        return super(ExternalForce, cls).__new__(cls, name=name, **kwds)
 
     def __init__(self, name, dim, Fext, **kwds):
         super(ExternalForce, self).__init__(name=name, **kwds)
@@ -31,7 +34,7 @@ class ExternalForce(NamedObjectI):
             msg=msg.format(dim, f.dim, f.short_description())
             assert f.dim == dim, msg
 
-    
+
     @property
     def Fext(self):
         return self._Fext
@@ -79,7 +82,16 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
     """
     Compute the curl of a symbolic expression and perfom Euler time integration.
     """
-    
+
+    @debug
+    def __new__(cls, vorticity, Fext, dt, variables,
+                    Fmin=None, Fmax=None, Finf=None,
+                    implementation=None, **kwds):
+        return super(SpectralExternalForceOperatorBase, cls).__new__(cls,
+                input_fields=None, output_fields=None,
+                input_params=None, output_params=None,
+                **kwds)
+
     @debug
     def __init__(self, vorticity, Fext, dt, variables,
                     Fmin=None, Fmax=None, Finf=None,
@@ -97,7 +109,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
             Fmax = max(force)
             Finf = max(abs(Fmin), abs(Fmax))
             W   += dt*force
-        
+
         where Fext is computed from user given ExternalForce.
 
         Parameters
@@ -131,7 +143,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
         check_instance(Fmax, (ScalarParameter,TensorParameter), allow_none=True)
         check_instance(Finf, (ScalarParameter,TensorParameter), allow_none=True)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
-        
+
         if (Fmin is not None):
             Fmin.value = npw.asarray((1e8,)*vorticity.nb_components, dtype=Fmin.dtype)
         if (Fmax is not None):
@@ -167,7 +179,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
         else:
             msg='Unsupported dimension {}.'.format(dim)
             raise RuntimeError(msg)
-            
+
         msg='TensorParameter shape mismatch, expected {} but got {{}} for parameter {{}}.'
         msg=msg.format(pshape)
         if isinstance(Fmin, TensorParameter):
@@ -201,7 +213,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
         output_fields = {f: self.get_topo_descriptor(variables, f) for f in output_fields}
         input_params  = {p.name: p for p in input_params}
         output_params = {p.name: p for p in output_params}
-        
+
         # TODO share tmp buffers for the whole tensor
         force = vorticity.tmp_like(name='Fext', ghosts=0, mem_tag='tmp_fext')
         for (Fi, Wi) in zip(force.fields, vorticity.fields):
@@ -209,7 +221,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
             output_fields[Fi] = self.get_topo_descriptor(variables, Wi)
 
         super(SpectralExternalForceOperatorBase, self).__init__(
-                input_fields=input_fields, output_fields=output_fields, 
+                input_fields=input_fields, output_fields=output_fields,
                 input_params=input_params, output_params=output_params,
                 **kwds)
 
@@ -221,13 +233,13 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
         self.dim = dim
         self.w_components = w_components
         self.f_components = f_components
-        
+
         self.Fmin = Fmin
         self.Fmax = Fmax
         self.Finf = Finf
         self.compute_statistics  = compute_statistics
-        
-################### 
+
+###################
 # from now on, we delegate everything to the ExternalForce implementation
 ###################
     def initialize(self, **kwds):
@@ -235,7 +247,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
             return
         self.Fext.initialize(self)
         return super(SpectralExternalForceOperatorBase, self).initialize(**kwds)
-        
+
     @debug
     def discretize(self):
         if self.discretized:
@@ -247,15 +259,16 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
 
     def get_work_properties(self):
         requests = super(SpectralExternalForceOperatorBase, self).get_work_properties()
-        for (name, request) in self.Fext.get_mem_requests(self).iteritems():
+        for (name, request) in self.Fext.get_mem_requests(self).items():
             requests.push_mem_request(name, request)
         return requests
-    
+
     def setup(self, work):
         self.Fext.pre_setup(self, work)
         super(SpectralExternalForceOperatorBase, self).setup(work)
         self.Fext.post_setup(self, work)
-    
+
+    @op_apply
     def apply(self, **kwds):
         self.Fext.apply(self, **kwds)
 
diff --git a/hysop/operator/base/integrate.py b/hysop/operator/base/integrate.py
index bfbda81770bf998931a54c9d320c2cfd6534721e..196a4058200f0049e19e5776e04d1f2d523bda7d 100644
--- a/hysop/operator/base/integrate.py
+++ b/hysop/operator/base/integrate.py
@@ -10,12 +10,18 @@ from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 from hysop.parameters.scalar_parameter import ScalarParameter, TensorParameter
 
 
-class IntegrateBase(object):
+class IntegrateBase(object, metaclass=ABCMeta):
     """
     Common implementation interface for field integration.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __new__(cls, field, variables,
+                 name=None, pretty_name=None, cst=1,
+                 parameter=None, scaling=None, expr=None, **kwds):
+        return super(IntegrateBase, cls).__new__(cls,
+                name=name, pretty_name=pretty_name,
+                input_fields=None, output_params=None, **kwds)
 
     @debug
     def __init__(self, field, variables,
@@ -80,7 +86,7 @@ class IntegrateBase(object):
         output_params = {parameter.name: parameter}
 
         default_name = 'integrate_{}'.format(field.name)
-        default_pname = u'\u222b{}'.format(field.pretty_name.decode('utf-8')).encode('utf-8')
+        default_pname = '∫{}'.format(field.pretty_name)
 
         pretty_name = first_not_None(pretty_name, name, default_pname)
         name = first_not_None(name, default_name)
diff --git a/hysop/operator/base/memory_reordering.py b/hysop/operator/base/memory_reordering.py
index 1d6c4c9019a52a33181979a02bce56115b58830c..bdba3fb5565e37afb29a13a84bb787638d865fd9 100644
--- a/hysop/operator/base/memory_reordering.py
+++ b/hysop/operator/base/memory_reordering.py
@@ -8,12 +8,20 @@ from hysop.fields.continuous_field import ScalarField
 from hysop.core.memory.memory_request import MemoryRequest
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 
-class MemoryReorderingBase(object):
+class MemoryReorderingBase(object, metaclass=ABCMeta):
     """
     Common implementation interface for memory reordering operators.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __new__(cls, input_field, output_field, variables,
+                    target_memory_order, name=None, pretty_name=None,
+                    **kwds):
+        return super(MemoryReorderingBase, cls).__new__(cls,
+                input_fields=None,
+                output_fields=None,
+                name=name, pretty_name=pretty_name,
+                **kwds)
 
     @debug
     def __init__(self, input_field, output_field, variables,
@@ -57,10 +65,10 @@ class MemoryReorderingBase(object):
             raise NotImplementedError(target_memory_order)
 
         default_name = '{}_{}'.format(mr, input_field.name)
-        default_pname = u'{}_{}'.format(mr, input_field.pretty_name.decode('utf-8'))
+        default_pname = '{}_{}'.format(mr, input_field.pretty_name)
         if (output_field.name != input_field.name):
             default_name += '_{}'.format(output_field.name)
-            default_pname += u'_{}'.format(output_field.pretty_name.decode('utf-8'))
+            default_pname += '_{}'.format(output_field.pretty_name)
 
         name = first_not_None(name, default_name)
         pname = first_not_None(pretty_name, default_pname)
@@ -100,7 +108,7 @@ class MemoryReorderingBase(object):
                         output_field=output_field,
                         input_topology_states=input_topology_states)
         assert len(input_topology_states)==1
-        istate = input_topology_states.values()[0]
+        istate = next(iter(input_topology_states.values()))
         ostate.memory_order = self.target_memory_order
         return ostate
 
diff --git a/hysop/operator/base/min_max.py b/hysop/operator/base/min_max.py
index 2ec39a464b69f1803725ca5a87b23e10c55884f3..ff8bc693e7647e812d34da0b150031fa49e1a5df 100644
--- a/hysop/operator/base/min_max.py
+++ b/hysop/operator/base/min_max.py
@@ -1,13 +1,13 @@
 """
 @file min_max.py
 MinMaxFieldStatisticsBase: compute min(F), max(F) and/or max(|F|) for a given field
-MinMaxGradientStatisticsBase: compute min(gradF), max(gradF) and/or max(|gradF|) for a given field, component and direction-wise.
+MinMaxGradientStatisticsBase: compute min(gradF), max(gradF) and/or max(|gradF|) for a given field, direction-wise.
 """
+import numpy as np
 from abc import abstractmethod
 from hysop.tools.types       import check_instance, first_not_None, to_tuple
 from hysop.tools.enum        import EnumFactory
 from hysop.tools.decorators  import debug
-from hysop.tools.numpywrappers import npw
 from hysop.fields.continuous_field import Field
 from hysop.constants import Backend
 from hysop.parameters.tensor_parameter import TensorParameter
@@ -41,7 +41,7 @@ class MinMaxFieldStatisticsBase(object):
         if (field is not None):
             dtype      = first_not_None(dtype,      field.dtype)
             pbasename  = first_not_None(pbasename,  field.name)
-            ppbasename = first_not_None(ppbasename, field.pretty_name.decode('utf-8'))
+            ppbasename = first_not_None(ppbasename, field.pretty_name)
 
         def make_param(k, quiet):
             return TensorParameter(name=names[k], pretty_name=pretty_names[k],
@@ -54,9 +54,9 @@ class MinMaxFieldStatisticsBase(object):
         }
 
         pretty_names = {
-            'Fmin': u'{}\u208b'.format(ppbasename),
-            'Fmax': u'{}\u208a'.format(ppbasename),
-            'Finf': u'|{}|\u208a'.format(ppbasename),
+            'Fmin': '{}â‚‹'.format(ppbasename),
+            'Fmax': '{}â‚Š'.format(ppbasename),
+            'Finf': '|{}|â‚Š'.format(ppbasename),
         }
 
         if (field is not None):
@@ -66,7 +66,7 @@ class MinMaxFieldStatisticsBase(object):
 
         parameters = {}
         _parameters = dict(Fmin=Fmin, Fmax=Fmax, Finf=Finf)
-        for (k,v) in _parameters.iteritems():
+        for (k,v) in _parameters.items():
             param = _parameters[k]
             if isinstance(param, TensorParameter):
                 pass
@@ -77,10 +77,24 @@ class MinMaxFieldStatisticsBase(object):
             else:
                 param = None
             if (param is not None):
-                assert npw.prod(param.shape) == nb_components
+                assert np.prod(param.shape) == nb_components
             parameters[k] = param
         return parameters
 
+    @debug
+    def __new__(cls, field, components=None, coeffs=None,
+            Fmin=None, Fmax=None, Finf=None, all_quiet=None,
+            name=None, pretty_name=None,
+            pbasename=None, ppbasename=None,
+            variables=None, **kwds):
+        if MinMaxDerivativeStatisticsBase in cls.__mro__:
+            return super(MinMaxFieldStatisticsBase, cls).__new__(cls,
+                    name=name, pretty_name=pretty_name,
+                    variables=variables, output_params=None, **kwds)
+        else:
+            return super(MinMaxFieldStatisticsBase, cls).__new__(cls,
+                    name=name, pretty_name=pretty_name,
+                    input_fields=None, output_params=None, **kwds)
 
     @debug
     def __init__(self, field, components=None, coeffs=None,
@@ -160,7 +174,7 @@ class MinMaxFieldStatisticsBase(object):
         check_instance(field, Field)
         check_instance(components, tuple, values=int,
                 allow_none=True, minval=0, maxval=field.nb_components-1)
-        check_instance(coeffs, dict, keys=str, values=(int, float, npw.number), allow_none=True)
+        check_instance(coeffs, dict, keys=str, values=(int, float, np.number), allow_none=True)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors,
                 allow_none=True)
         check_instance(name, str, allow_none=True)
@@ -170,8 +184,8 @@ class MinMaxFieldStatisticsBase(object):
 
         coeffs      = first_not_None(coeffs, {})
         name        = first_not_None(name, 'MinMax[{}]'.format(field.name))
-        pretty_name = first_not_None(pretty_name, u'|{}|\u221e'.format(
-                                        field.pretty_name.decode('utf-8')))
+        pretty_name = first_not_None(pretty_name, '|{}|∞'.format(
+                                        field.pretty_name))
         variables   = first_not_None(variables, {field: None})
         all_quiet   = first_not_None(all_quiet, False)
 
@@ -193,7 +207,7 @@ class MinMaxFieldStatisticsBase(object):
                     input_fields=input_fields, output_params=output_params, **kwds)
             self.has_derivative = False
 
-        for (pname, param) in parameters.iteritems():
+        for (pname, param) in parameters.items():
             setattr(self, pname, param)
             coeffs.setdefault(pname, 1)
 
@@ -226,18 +240,18 @@ class MinMaxFieldStatisticsBase(object):
         else:
             comm = self.mpi_params.comm
             Fmin, Fmax = self.Fmin, self.Fmax
-            if (self.Fmax is not None): 
-                sendbuff = npw.zeros_like(Fmax.value)
-                recvbuff = npw.zeros_like(Fmax.value)
+            if (self.Fmax is not None):
+                sendbuff = np.zeros_like(Fmax.tensor_value)
+                recvbuff = np.zeros_like(Fmax.tensor_value)
                 def _collect_max(val, sendbuff=sendbuff, recvbuff=recvbuff):
                     sendbuff[...] = val
                     comm.Allreduce(sendbuff, recvbuff, op=MPI.MAX)
                     return recvbuff.copy()
             else:
                 _collect_max = None
-            if (self.Fmin is not None): 
-                sendbuff = npw.zeros_like(Fmin.value)
-                recvbuff = npw.zeros_like(Fmin.value)
+            if (self.Fmin is not None):
+                sendbuff = np.zeros_like(Fmin.tensor_value)
+                recvbuff = np.zeros_like(Fmin.tensor_value)
                 def _collect_min(val, sendbuff=sendbuff, recvbuff=recvbuff):
                     sendbuff[...] = val
                     comm.Allreduce(sendbuff, recvbuff, op=MPI.MIN)
@@ -251,6 +265,13 @@ class MinMaxFieldStatisticsBase(object):
         """Backend agnostic computation of min and max parameters."""
         dfield, components, coeffs = self._dfield, self._components, self._coeffs
         Fmin, Fmax, Finf = self.Fmin, self.Fmax, self.Finf
+
+        # For now field.min/field.max take into account ghosts...
+        # This is because pyopencl.reduction does not support reductions on array views.
+        # Here we force synchronize ghosts in every direction, including diagonals,
+        #  in order to overwrite potential bad boundary values.
+        dfield.exchange_ghosts()
+
         if (Fmin is not None):
             fmin = Fmin.tensor_value
             for i in components:
@@ -262,19 +283,34 @@ class MinMaxFieldStatisticsBase(object):
                 fmax[i] = dfield.data[i].max().get()
             Fmax.value = self._collect_max(fmax * coeffs['Fmax'])
         if (Finf is not None):
-            self.Finf.value = npw.maximum(npw.abs(Fmin()), npw.abs(Fmax())) * coeffs['Finf']
+            self.Finf.value = np.maximum(np.abs(Fmin()), np.abs(Fmax())) * coeffs['Finf']
 
 
 class MinMaxDerivativeStatisticsBase(MinMaxFieldStatisticsBase):
     """
     Abstract operator base to compute min and max statistics on the derivative
-    of a specific field component.
+    of a specific scalar field.
     """
 
+    @debug
+    def __new__(cls, F, dF=None, A=None,
+            derivative=None, direction=None,
+            Fmin=None, Fmax=None, Finf=None, coeffs=None,
+            all_quiet=False,
+            name=None, pretty_name=None,
+            pbasename=None, ppbasename=None,
+            variables=None, **kwds):
+        return super(MinMaxDerivativeStatisticsBase, cls).__new__(cls,
+                field=dF, coeffs=coeffs, Fmin=Fmin, Fmax=Fmax, Finf=Finf,
+                name=name, pretty_name=pretty_name,
+                pbasename=pbasename, variables=variables,
+                F=F, dF=dF, A=A,
+                derivative=derivative, direction=direction,
+                **kwds)
+
     @debug
     def __init__(self, F, dF=None, A=None,
-            derivative=None, component=None, direction=None,
-            out_component=None, scaling_view=None,
+            derivative=None, direction=None,
             Fmin=None, Fmax=None, Finf=None, coeffs=None,
             all_quiet=False,
             name=None, pretty_name=None,
@@ -289,8 +325,8 @@ class MinMaxDerivativeStatisticsBase(MinMaxFieldStatisticsBase):
             Finf: max value of the absolute value of a
                     derivative of the field (computed using Fmin and Fmax).
 
-        First compute the derivative of a component of a field F in a given direction
-        at a given order and on a given backend out of place in a specific output component of dF.
+        First compute the derivative of a scalar field F in a given direction
+        at a given order and on a given backend out of place in a specific output scalar field dF.
         The derivative is then possibly scaled by another field/parameter/value A.
 
         After the scaled derivative has been computed, compute user requested statistics
@@ -298,21 +334,17 @@ class MinMaxDerivativeStatisticsBase(MinMaxFieldStatisticsBase):
         parameters stored in coeffs.
 
         1) Compute derivative
-            dF[k] = alpha * d^n(F[i])/dXj**n
+            dF = alpha * d^n(F)/dXj**n
 
         2) Compute statistics
-            Fmin = Smin * min(dF[k])
-            Fmax = Smax * max(dF[k])
+            Fmin = Smin * min(dF)
+            Fmax = Smax * max(dF)
             Finf = Sinf * max(|Fmin|, |Fmax|)
 
         where F  is an input field
               dF is an output field (by default a temporary field).
-              k = out_component < dF.nb_components
-              i =     component <  F.nb_components
               n = derivative order > 0
-              alpha = A[scaling_view]
-                where A is a Field, a Parameter or a scalar.
-                      scaling_view is a component, a slice or None.
+              alpha = A, where A is a Field, a Parameter or a scalar.
               Fmin = created or supplied TensorParameter.
               Fmax = created or supplied TensorParameter.
               Finf = created or supplied TensorParameter.
@@ -340,22 +372,9 @@ class MinMaxDerivativeStatisticsBase(MinMaxFieldStatisticsBase):
         derivative: int, optional
             Which derivative to generate.
             Defaults to 1.
-        component: int, optional
-            The component on which to take the derivative.
-            Defaults to 0.
         direction: int, optional
             Directions in which to take the derivative.
             Defaults to 0.
-        out_component: int, optional
-            The component were the result will be stored.
-            Defaults to component.
-        scaling_view: int or slice, optional
-            View on the scaling field/parameter/value A.
-            Should be a component if this is a field.
-            Should be a slice if this is a TensorParameter.
-            Should be None it this is a ScalarParameter or
-            a numerical value.
-            Should only be given if A is given.
         F...: TensorParameter or boolean, optional
             The output parameters that will contain the statistics.
             At least one statistic should be specified (either by boolean or TensorParameter).
@@ -413,8 +432,6 @@ class MinMaxDerivativeStatisticsBase(MinMaxFieldStatisticsBase):
 
         if (dF is None):
             dF = F.tmp_like('dF', nb_components=1)
-            assert (out_component is None), out_component
-            out_component = 0
         variables.setdefault(dF, variables[F])
 
         super(MinMaxDerivativeStatisticsBase, self).__init__(field=dF,
@@ -422,5 +439,5 @@ class MinMaxDerivativeStatisticsBase(MinMaxFieldStatisticsBase):
                 name=name, pretty_name=pretty_name,
                 pbasename=pbasename, variables=variables,
                 F=F, dF=dF, A=A,
-                derivative=derivative, component=component, direction=direction,
-                out_component=out_component, scaling_view=scaling_view, **kwds)
+                derivative=derivative, direction=direction,
+                **kwds)
diff --git a/hysop/operator/base/poisson.py b/hysop/operator/base/poisson.py
index f88d987ff070761e59f4ed4b46e3db6f75fed0d7..7940f6ef63fe91eabfd6b8cbf32d9a70f3c731ba 100644
--- a/hysop/operator/base/poisson.py
+++ b/hysop/operator/base/poisson.py
@@ -12,19 +12,29 @@ class PoissonOperatorBase(SpectralOperatorBase):
     """
     Solves the poisson equation using a specific implementation.
     """
-    
+
+    @debug
+    def __new__(cls, Fin, Fout, variables,
+            name=None, pretty_name=None,
+            dump_energy=None, dump_input_energy=None, dump_output_energy=None,
+            plot_energy=None, plot_input_energy=None, plot_output_energy=None, plot_inout_energy=None,
+            **kwds):
+        return super(PoissonOperatorBase, cls).__new__(cls,
+                name=name, pretty_name=pretty_name,
+                input_fields=None, output_fields=None, **kwds)
+
     @debug
-    def __init__(self, Fin, Fout, variables, 
+    def __init__(self, Fin, Fout, variables,
             name=None, pretty_name=None,
             dump_energy=None, dump_input_energy=None, dump_output_energy=None,
             plot_energy=None, plot_input_energy=None, plot_output_energy=None, plot_inout_energy=None,
-            **kwds): 
+            **kwds):
         """
         Initialize a n-dimensional Poisson operator base (using spectral methods).
 
         Solves:
             Laplacian(Fout) = Fin
-        
+
         Parameters
         ----------
         Fout: Field
@@ -46,7 +56,7 @@ class PoissonOperatorBase(SpectralOperatorBase):
         plot_output_energy: IOParams, optional, defaults to None
             Plot output field energy in a custom file. Defaults to no plot.
         plot_inout_energy: IOParams, optional, defaults to None
-            Plot input and output field energy on the same graph in a custom file. 
+            Plot input and output field energy on the same graph in a custom file.
             Defaults to no plot.
         kwds: dict, optional
             Base class arguments.
@@ -62,21 +72,21 @@ class PoissonOperatorBase(SpectralOperatorBase):
         check_instance(plot_output_energy, (IOParams,int), allow_none=True)
         check_instance(plot_inout_energy, (IOParams,int), allow_none=True)
         assert Fin.nb_components == Fout.nb_components
-        
+
         input_fields  = { Fin:  variables[Fin]  }
         output_fields = { Fout: variables[Fout] }
-        
+
         default_name = 'Poisson_{}_{}'.format(Fin.name, Fout.name)
         default_pretty_name = 'Poisson_{}_{}'.format(Fin.pretty_name, Fout.pretty_name)
         name = first_not_None(name, default_name)
         pretty_name = first_not_None(name, default_pretty_name)
-        
+
         dump_input_E    = first_not_None(dump_input_energy,  dump_energy)
         dump_output_E   = first_not_None(dump_output_energy, dump_energy)
         plot_input_E    = first_not_None(plot_input_energy,  plot_energy)
         plot_output_E   = first_not_None(plot_output_energy, plot_energy)
         plot_inout_E    = first_not_None(plot_inout_energy,  plot_energy)
-        
+
         do_plot_inout_E  = isinstance(plot_inout_E,  IOParams) and (plot_inout_E.frequency>=0)
         _, compute_input_E_freqs  = EnergyDumper.do_compute_energy(dump_input_E, plot_input_E, plot_inout_E)
         _, compute_output_E_freqs = EnergyDumper.do_compute_energy(dump_output_E, plot_output_E, plot_inout_E)
@@ -87,10 +97,10 @@ class PoissonOperatorBase(SpectralOperatorBase):
         forward_transforms  = ()
         backward_transforms = ()
         wave_numbers        = ()
-        
+
         tg = self.new_transform_group()
         for (Fi,Fo) in zip(Fin.fields, Fout.fields):
-            Ft = tg.require_forward_transform(Fi, custom_output_buffer='auto', 
+            Ft = tg.require_forward_transform(Fi, custom_output_buffer='auto',
                     dump_energy=dump_input_E, plot_energy=plot_input_E,
                     compute_energy_frequencies=compute_input_E_freqs)
             Bt = tg.require_backward_transform(Fo, custom_input_buffer='auto', matching_forward_transform=Ft,
@@ -99,7 +109,7 @@ class PoissonOperatorBase(SpectralOperatorBase):
             assert (Ft.output_dtype == Bt.input_dtype)
             expr = Assignment(Bt.s, laplacian(Ft.s, Ft.s.frame))
             kds  = tg.push_expressions(expr)
-            
+
             forward_transforms  += (Ft,)
             backward_transforms += (Bt,)
             wave_numbers        += (kds,)
@@ -115,7 +125,7 @@ class PoissonOperatorBase(SpectralOperatorBase):
         self.plot_inout_energy_ioparams = plot_inout_E
         self.input_energy_params  = tuple(Ft._energy_parameter for Ft in self.forward_transforms)
         self.output_energy_params = tuple(Bt._energy_parameter for Bt in self.backward_transforms)
-    
+
     @debug
     def discretize(self):
         super(PoissonOperatorBase, self).discretize()
@@ -132,15 +142,15 @@ class PoissonOperatorBase(SpectralOperatorBase):
             all_nd_dkds += (all_dkds,)
         self.all_dkds    = all_dkds
         self.all_nd_dkds = all_nd_dkds
-        
+
         inout_energy_plotters = ()
         if self.do_plot_inout_energy:
             for (dFin, dFout, Pin, Pout) in zip(
                     self.dFin.dfields, self.dFout.dfields,
                     self.input_energy_params, self.output_energy_params):
                 fname = '{}_{}'.format(dFin.name, dFout.name)
-                iname = u'{}.input.{}'.format(type(self).__name__, dFin.pretty_name.decode('utf-8'))
-                oname = u'{}.output.{}'.format(type(self).__name__, dFout.pretty_name.decode('utf-8'))
+                iname = '{}.input.{}'.format(type(self).__name__, dFin.pretty_name)
+                oname = '{}.output.{}'.format(type(self).__name__, dFout.pretty_name)
                 plt = EnergyPlotter(energy_parameters={iname: Pin, oname: Pout}, fname=fname,
                         io_params=self.plot_inout_energy_ioparams)
                 inout_energy_plotters += (plt,)
diff --git a/hysop/operator/base/poisson_curl.py b/hysop/operator/base/poisson_curl.py
index 0f95a700bb64db93d708bf72be2e7c39d476c9d2..af59caebda86ee72b94cb4dba02c93ab5e1a42c8 100644
--- a/hysop/operator/base/poisson_curl.py
+++ b/hysop/operator/base/poisson_curl.py
@@ -20,18 +20,31 @@ class PoissonCurlOperatorBase(object):
     """
     Solves the poisson-rotational equation using a specific implementation.
     """
-    
+
     @debug
-    def __init__(self, vorticity, velocity, variables, 
-            diffusion=None, dt=None, projection=None, 
-            dump_energy=None, dump_velocity_energy=None, 
+    def __new__(cls, vorticity, velocity, variables,
+            diffusion=None, dt=None, projection=None,
+            dump_energy=None, dump_velocity_energy=None,
             dump_input_vorticity_energy=None, dump_output_vorticity_energy=None,
-            plot_energy=None, plot_velocity_energy=None, 
+            plot_energy=None, plot_velocity_energy=None,
             plot_input_vorticity_energy=None, plot_output_vorticity_energy=None, plot_inout_vorticity_energy=None,
-            **kwds): 
+            **kwds):
+        return super(PoissonCurlOperatorBase, cls).__new__(cls,
+                input_fields=None, output_fields=None,
+                input_params=None, output_params=None, **kwds)
+
+
+    @debug
+    def __init__(self, vorticity, velocity, variables,
+            diffusion=None, dt=None, projection=None,
+            dump_energy=None, dump_velocity_energy=None,
+            dump_input_vorticity_energy=None, dump_output_vorticity_energy=None,
+            plot_energy=None, plot_velocity_energy=None,
+            plot_input_vorticity_energy=None, plot_output_vorticity_energy=None, plot_inout_vorticity_energy=None,
+            **kwds):
         """
         PoissonCurl operator to solve incompressible flows using various fft backends.
-        
+
         Parameters
         ----------
         velocity : :class:`~hysop.fields.continuous_field.Field
@@ -48,7 +61,7 @@ class PoissonCurlOperatorBase(object):
             If diffusion is not enabled, this parameter is ignored.
         projection: hysop.constants.FieldProjection or positive integer, optional
             Project vorticity such that resolved velocity is divergence free (for 3D fields).
-            When active, projection is done prior to every solve, unless projection is 
+            When active, projection is done prior to every solve, unless projection is
             an integer in which case it is done every given steps.
             This parameter is ignored for 2D fields and defaults to no projection.
         dump_energy: IOParams, optional, defaults to None
@@ -69,16 +82,16 @@ class PoissonCurlOperatorBase(object):
             Plot output vorticity field energy and save the plot to a custom file. Defaults to no plot.
         plot_inout_vorticity_energy: IOParams, optional, defaults to None
             Plot vorticity field energy before and after diffusion and projection on the same graph.
-        kwds : 
+        kwds :
             Base class parameters.
 
         Notes:
         ------
         All dump energy arguments also enables scalar energy dumping.
         This is not true for energy plotting.
-        Passing an integer instead of a IOParams will disable dump and plot arguments. 
+        Passing an integer instead of a IOParams will disable dump and plot arguments.
         """
-        
+
         check_instance(velocity,  Field)
         check_instance(vorticity, Field)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
@@ -106,25 +119,25 @@ class PoissonCurlOperatorBase(object):
                 raise RuntimeError(msg)
         else:
             dt = None
-        
+
         # check for projection
         if (projection == FieldProjection.NONE) or (projection is None) \
                 or (projection==0) or (velocity.dim!=3):
             projection = FieldProjection.NONE
         should_project = (projection is not FieldProjection.NONE)
-        
+
         if (projection == FieldProjection.NONE):
             do_project = lambda simu: False
         elif (projection == FieldProjection.EVERY_STEP):
             do_project = lambda simu: True
-        else: # projection is an integer representing frequency 
+        else: # projection is an integer representing frequency
             freq = projection
             if (freq>=1):
                 do_project = lambda simu: ((simu.current_iteration % freq)==0)
             else:
                 msg='Got negative projection frequency {}.'.format(freq)
                 raise ValueError(msg)
-        
+
         # check fields
         dim=velocity.dim
         wcomp = vorticity.nb_components
@@ -138,7 +151,7 @@ class PoissonCurlOperatorBase(object):
             raise RuntimeError('Velocity projection only available in 3D.')
         if (velocity.dtype != vorticity.dtype):
             raise RuntimeError('Datatype mismatch between velocity and vorticity.')
-        
+
         # input and output fields
         vtopology = variables[velocity]
         wtopology = variables[vorticity]
@@ -151,7 +164,7 @@ class PoissonCurlOperatorBase(object):
             input_params[dt.name] = dt
         if (should_diffuse or should_project):
             output_fields[vorticity] = wtopology
-       
+
         dump_Uout_E   = first_not_None(dump_velocity_energy,         dump_energy)
         dump_Win_E    = first_not_None(dump_input_vorticity_energy,  dump_energy)
         dump_Wout_E   = first_not_None(dump_output_vorticity_energy, dump_energy)
@@ -183,14 +196,14 @@ class PoissonCurlOperatorBase(object):
         compute_Uout_E_param = EnergyDumper.build_energy_parameter(do_compute=do_compute_Uout_E, field=velocity,  output_params=output_params, prefix='out')
         compute_Wout_E_param = EnergyDumper.build_energy_parameter(do_compute=do_compute_Wout_E, field=vorticity, output_params=output_params, prefix='out')
 
-        super(PoissonCurlOperatorBase, self).__init__(input_fields=input_fields, 
-                output_fields=output_fields, input_params=input_params, 
+        super(PoissonCurlOperatorBase, self).__init__(input_fields=input_fields,
+                output_fields=output_fields, input_params=input_params,
                 output_params=output_params, **kwds)
 
         self.W = vorticity
         self.U = velocity
         self.dim = dim
-        
+
         self.should_diffuse = should_diffuse
         self.nu = diffusion
         self.dt = dt
@@ -207,7 +220,7 @@ class PoissonCurlOperatorBase(object):
         self.compute_energy_parameters  = {'Uout': compute_Uout_E_param, 'Win': compute_Win_E_param, 'Wout': compute_Wout_E_param }
         self.compute_energy_frequencies = {'Uout': compute_Uout_E_freqs, 'Win': compute_Win_E_freqs, 'Wout': compute_Wout_E_freqs }
 
-    
+
     @debug
     def discretize(self):
         if self.discretized:
@@ -215,12 +228,20 @@ class PoissonCurlOperatorBase(object):
         super(PoissonCurlOperatorBase, self).discretize()
         self.dW = self.get_input_discrete_field(self.W)
         self.dU = self.get_output_discrete_field(self.U)
-        
+
 
 class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorBase):
+
+    @debug
+    def __new__(cls, vorticity, velocity, variables,
+            diffusion=None, dt=None, projection=None, **kwds):
+        return super(SpectralPoissonCurlOperatorBase, cls).__new__(cls,
+                vorticity=vorticity, velocity=velocity, variables=variables,
+                diffusion=diffusion, dt=dt, projection=projection, **kwds)
+
     @debug
-    def __init__(self, vorticity, velocity, variables, 
-            diffusion=None, dt=None, projection=None, **kwds): 
+    def __init__(self, vorticity, velocity, variables,
+            diffusion=None, dt=None, projection=None, **kwds):
 
         super(SpectralPoissonCurlOperatorBase, self).__init__(
                 vorticity=vorticity, velocity=velocity, variables=variables,
@@ -234,8 +255,8 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
 
         # build spectral transforms
         tg = self.new_transform_group()
-        W_forward_transforms  = to_tuple(tg.require_forward_transform(vorticity, 
-            compute_energy_frequencies=ce_freqs['Win'], 
+        W_forward_transforms  = to_tuple(tg.require_forward_transform(vorticity,
+            compute_energy_frequencies=ce_freqs['Win'],
             dump_energy=de_iops['Win']))
         U_backward_transforms = to_tuple(tg.require_backward_transform(velocity,
                                                             custom_input_buffer='B0',
@@ -247,7 +268,7 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
                 dump_energy=de_iops['Wout']))
         else:
             W_backward_transforms = (None,)*dim
-        
+
         W_Fts = npw.asarray([x.s for x in W_forward_transforms])
         U_Bts = npw.asarray([x.s for x in U_backward_transforms])
         W_Bts = npw.asarray([None if (x is None) else x.s for x in W_backward_transforms])
@@ -256,17 +277,17 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
         kd1s = ()
         for Wi in W_Fts:
             expr1 = grad(Wi, Wi.frame)
-            kd1 = sorted(tg.push_expressions(*to_tuple(expr1)), key=lambda k: k.axis)
+            kd1 = tuple(sorted(tg.push_expressions(*to_tuple(expr1)), key=lambda k: k.axis))
             kd1s += (kd1,)
 
         # laplacian
         kd2s = ()
         for Wi in W_Fts:
             expr2 = laplacian(Wi, Wi.frame)
-            kd2 = sorted(tg.push_expressions(*to_tuple(expr2)), key=lambda k: k.axis)
+            kd2 = tuple(sorted(tg.push_expressions(*to_tuple(expr2)), key=lambda k: k.axis))
             kd2s += (kd2,)
 
-        # curl 
+        # curl
         if (dim==2):
             W2,    = W_forward_transforms
             U0, U1 = U_backward_transforms
@@ -287,14 +308,14 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
             Uin  = (W2, W1, W0, W2, W1, W0)
             Uout = (U0, U0, U1, U1, U2, U2)
             Uk   = tuple(tg.push_expressions(e)[0] for e in exprs)
-        else: 
+        else:
             raise NotImplementedError
-        
+
         self.tg = tg
         self.W_forward_transforms  = W_forward_transforms
         self.U_backward_transforms = U_backward_transforms
         self.W_backward_transforms = W_backward_transforms
-        
+
         self.W_Fts = W_Fts
         self.U_Bts = U_Bts
         self.W_Bts = W_Bts
@@ -305,25 +326,25 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
         self.Uin  = Uin
         self.Uout = Uout
         self.Uk   = Uk
-    
+
     @debug
     def discretize(self):
         if self.discretized:
             return
         super(SpectralPoissonCurlOperatorBase, self).discretize()
-            
-        fig_titles = {'Win':    u'Energy of input vorticity {}',
-                      'Uout':   u'Energy of output velocity {}',
-                      'Wout':   u'Energy of output vorticity {}' }
+
+        fig_titles = {'Win':    'Energy of input vorticity {}',
+                      'Uout':   'Energy of output velocity {}',
+                      'Wout':   'Energy of output vorticity {}' }
         get_transforms = { 'Win':  self.W_forward_transforms,
                            'Wout': self.W_backward_transforms,
                            'Uout': self.U_backward_transforms }
         get_dfield = { 'Win':  self.dW,
                        'Wout': self.dW,
                        'Uout': self.dU }
-        
+
         compute_fns = ()
-        for (k, v) in self.do_compute_energy.iteritems():
+        for (k, v) in self.do_compute_energy.items():
             if not v:
                 assert not self.do_dump_energy[k]
                 assert not self.do_plot_energy[k]
@@ -332,15 +353,15 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
             for tr in get_transforms[k]:
                 if tr._energy_parameter is None:
                     msg='Energy parameter of {}.{} has not been build.'
-                    raise RuntimeError(msg.format(tr.field.name, 
+                    raise RuntimeError(msg.format(tr.field.name,
                         'forward' if tr.is_forward else 'backward'))
             E_params  = tuple(tr._energy_parameter for tr in get_transforms[k])
             E_buffers = tuple(p.value for p in E_params)
             E_size    = max(p.size for p in E_params)
-            
+
             Ep = self.compute_energy_parameters[k]
             Ep.reallocate_buffer(shape=(E_size,), dtype=self.dU.dtype)
-            
+
             if self.do_dump_energy[k]:
                 iop = self.dump_energy_ioparams[k]
                 assert (iop is not None)
@@ -351,18 +372,19 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
             if self.do_plot_energy[k]:
                 iop = self.plot_energy_ioparams[k]
                 assert (iop is not None)
-                pname = u'{}.{}'.format(self.__class__.__name__, dfield.pretty_name.decode('utf-8'))
+                pname = '{}.{}'.format(self.__class__.__name__, dfield.pretty_name)
                 energy_parameters = { pname : Ep }
-                plt = EnergyPlotter(energy_parameters=energy_parameters, 
+                plt = EnergyPlotter(energy_parameters=energy_parameters,
                         io_params=iop, fname=k,
-                        fig_title=fig_titles[k].format(dfield.pretty_name.decode('utf-8')))
+                        fig_title=fig_titles[k].format(dfield.pretty_name))
             else:
                 plt = None
 
             freqs = self.compute_energy_frequencies[k]
+            iops = tuple(self.io_params.clone(frequency=f, with_last=True) for f in freqs)
 
-            def compute_fn(simulation, plt=plt, dmp=dmp, dst=Ep._value, srcs=E_buffers, freqs=freqs):
-                should_compute = any(simulation.should_dump(frequency=f, with_last=True) for f in freqs)
+            def compute_fn(simulation, plt=plt, dmp=dmp, dst=Ep._value, srcs=E_buffers, iops=iops):
+                should_compute = any(iop.sould_dump(simulation=simulation) for iop in iops)
                 if should_compute:
                     dst[...] = 0.0
                     for src in srcs:
@@ -379,11 +401,11 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
         if self.do_plot_energy['Winout']:
             iop = self.plot_energy_ioparams['Winout']
             assert (iop is not None)
-            pname_in  = u'{}.input.{}'.format(self.__class__.__name__, self.dW.pretty_name.decode('utf-8'))
-            pname_out = u'{}.output.{}'.format(self.__class__.__name__, self.dW.pretty_name.decode('utf-8'))
-            energy_parameters = { pname_in:  self.compute_energy_parameters['Win'], 
+            pname_in  = '{}.input.{}'.format(self.__class__.__name__, self.dW.pretty_name)
+            pname_out = '{}.output.{}'.format(self.__class__.__name__, self.dW.pretty_name)
+            energy_parameters = { pname_in:  self.compute_energy_parameters['Win'],
                                   pname_out: self.compute_energy_parameters['Wout'] }
-            plt = EnergyPlotter(energy_parameters=energy_parameters, 
+            plt = EnergyPlotter(energy_parameters=energy_parameters,
                     io_params=iop, fname='Winout',
                     fig_title='Input and output vorticity energy (diffusion={:.2e}, project={})'.format(self.nu(), self.projection))
             def compute_fn(simulation, plt=plt):
@@ -414,16 +436,16 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
                 idx, dkd, nd_dkd = self.tg.discrete_wave_numbers[wi]
                 dkd2[idx] = dkd
             dkd2s += (tuple(dkd2),)
-        
+
         dUk = ()
         for ki in self.Uk:
             _, dki, _ = self.tg.discrete_wave_numbers[ki]
             dUk += (dki,)
-        
+
         self.dkd1s = dkd1s
         self.dkd2s = dkd2s
         self.dUk   = dUk
-    
+
     def get_work_properties(self):
         requests = super(SpectralPoissonCurlOperatorBase, self).get_work_properties()
         Ut = self.U_backward_transforms
@@ -440,22 +462,22 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
                 assert (Ft.output_shape == Bt.input_shape), (Ft.output_shape, Bt.input_shape)
             shape = Ft.output_shape
             dtype = Ft.output_dtype
-            request = MemoryRequest(backend=self.tg.backend, dtype=dtype, 
+            request = MemoryRequest(backend=self.tg.backend, dtype=dtype,
                                     shape=shape, nb_components=1,
                                     alignment=self.min_fft_alignment)
             requests.push_mem_request('fft_buffer_{}'.format(i), request)
         return requests
 
-    
+
     def setup(self, work):
         dim = self.dim
         Ks, KKs = self.dkd1s, self.dkd2s
 
-        W_forward_transforms  = self.W_forward_transforms 
+        W_forward_transforms  = self.W_forward_transforms
         W_backward_transforms = self.W_backward_transforms
         U_backward_transforms = self.U_backward_transforms
 
-        for (i,(W_Ft,W_Bt)) in enumerate(zip(W_forward_transforms, 
+        for (i,(W_Ft,W_Bt)) in enumerate(zip(W_forward_transforms,
                                              W_backward_transforms)):
             dtmp, = work.get_buffer(self, 'fft_buffer_{}'.format(i))
             W_Ft.configure_output_buffer(dtmp)
@@ -480,7 +502,7 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
         self.WIN  = WIN
         self.K  = K
         self.KK = KK
-                
+
         UIN, UOUT, UK = (), (), ()
         assert len(self.Uin) == len(self.Uout) == len(self.dUk)
         for i,(Uin, Uout, Uk) in enumerate(zip(self.Uin, self.Uout, self.dUk)):
@@ -489,7 +511,7 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
             UIN  += (Uin,)
             UOUT += (Uout,)
             UK   += (Uk,)
-        
+
         self.UIN  = UIN
         self.UOUT = UOUT
         self.UK   = UK
diff --git a/hysop/operator/base/redistribute_operator.py b/hysop/operator/base/redistribute_operator.py
index 0f537bc7c7438fb2127d6deb1a7e3a66462bb16f..c5e7ce2e13126a400e17b97fd9009a982dbbedaf 100644
--- a/hysop/operator/base/redistribute_operator.py
+++ b/hysop/operator/base/redistribute_operator.py
@@ -1,3 +1,5 @@
+# coding: utf-8
+
 from abc import ABCMeta, abstractmethod
 from hysop.tools.decorators import debug, not_implemented
 from hysop.tools.types import check_instance, to_set, first_not_None
@@ -8,13 +10,11 @@ from hysop.topology.topology import Topology
 from hysop.fields.continuous_field import ScalarField
 
 
-class RedistributeOperatorBase(ComputationalGraphOperator):
+class RedistributeOperatorBase(ComputationalGraphOperator, metaclass=ABCMeta):
     """
     Abstract interface to redistribute operators.
     """
 
-    __metaclass__ = ABCMeta
-
     @classmethod
     @not_implemented
     def can_redistribute(cls, source_topo, target_topo):
@@ -32,6 +32,12 @@ class RedistributeOperatorBase(ComputationalGraphOperator):
         """
         return Backend.all
 
+    def __new__(cls, variable, source_topo, target_topo,
+                name=None, pretty_name=None, **kwds):
+        return super(RedistributeOperatorBase, cls).__new__(cls,
+                                                            name=name, pretty_name=pretty_name,
+                                                            input_fields=None, output_fields=None, **kwds)
+
     def __init__(self, variable, source_topo, target_topo,
                  name=None, pretty_name=None, **kwds):
         """
@@ -66,12 +72,11 @@ class RedistributeOperatorBase(ComputationalGraphOperator):
 
         default_name = 'R{},{}_{}'.format(source_topo_id, target_topo_id, variable.name)
 
-        default_pname = u'R{}{}{}_{}'.format(
+        default_pname = 'R{}{}{}_{}'.format(
             subscript(source_topo_id),
-            u'\u2192',
-            subscript(target_topo_id),
-            variable.pretty_name.decode('utf-8'))
-        default_pname = default_pname.encode('utf-8')
+            '→',
+                                          subscript(target_topo_id),
+                                          variable.pretty_name)
 
         pretty_name = first_not_None(pretty_name, name, default_pname)
         name = first_not_None(name, default_name)
@@ -98,13 +103,13 @@ class RedistributeOperatorBase(ComputationalGraphOperator):
     def get_field_requirements(self):
         reqs = super(RedistributeOperatorBase, self).get_field_requirements()
 
-        for field, ftopo in self.input_fields.iteritems():
+        for field, ftopo in self.input_fields.items():
             if ftopo is not None:
                 _, req = reqs.get_input_requirement(field)
                 req.axes = None
                 req.memory_order = None
 
-        for field, ftopo in self.output_fields.iteritems():
+        for field, ftopo in self.output_fields.items():
             if ftopo is not None:
                 _, req = reqs.get_output_requirement(field)
                 req.axes = None
@@ -138,7 +143,7 @@ class RedistributeOperatorBase(ComputationalGraphOperator):
     def get_preserved_input_fields(self):
         return set(self.input_fields.keys())
 
-    def available_methods(self):
+   def available_methods(self):
         return {}
 
     def default_method(self):
diff --git a/hysop/operator/base/solenoidal_projection.py b/hysop/operator/base/solenoidal_projection.py
index 1e6880b88a3c3af1270428dcd97d666d231e44bb..8181ea633244eaed73d45c5876737f6ec40635c0 100644
--- a/hysop/operator/base/solenoidal_projection.py
+++ b/hysop/operator/base/solenoidal_projection.py
@@ -15,14 +15,20 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
     """
     Solves solenoidal projection (project a 3d field F such that div(F)=0)
     """
-    
+
+    @debug
+    def __new__(cls, input_field, output_field, variables,
+            input_field_div=None, output_field_div=None, **kwds):
+        return super(SolenoidalProjectionOperatorBase, cls).__new__(cls,
+                input_fields=input_field, output_fields=output_field, **kwds)
+
     @debug
-    def __init__(self, input_field, output_field, variables, 
-            input_field_div=None, output_field_div=None, **kwds): 
+    def __init__(self, input_field, output_field, variables,
+            input_field_div=None, output_field_div=None, **kwds):
         """
-        SolenoidalProjection projects a 3D vector field 
+        SolenoidalProjection projects a 3D vector field
         onto the space of divergence free fields.
-        
+
         Parameters
         ----------
         input_field:  :class:`~hysop.fields.continuous_field.Field`
@@ -35,10 +41,10 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
             Optionally compute output field divergence.
         variables: dict
             dictionary of fields as keys and topologies as values.
-        kwds : 
+        kwds :
             base class parameters.
         """
-        
+
         check_instance(output_field, Field)
         check_instance(input_field,  Field)
         check_instance(input_field_div,  Field, allow_none=True)
@@ -56,11 +62,11 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
             output_fields[input_field_div] = variables[input_field_div]
         if (output_field_div is not None):
             output_fields[output_field_div] = variables[output_field_div]
-        
+
         dim   = input_field.domain.dim
         icomp = input_field.nb_components
         ocomp = output_field.nb_components
-        
+
         if (input_field.domain != output_field.domain):
             msg = 'input_field and output_field do not share the same domain.'
             raise ValueError(msg)
@@ -81,10 +87,10 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
             msg='input_field_div component mistmach, got {} components but expected 1.'
             msg=msg.format(output_field_div.nb_components)
             raise RuntimeError(msg)
-        
-        super(SolenoidalProjectionOperatorBase, self).__init__(input_fields=input_fields, 
+
+        super(SolenoidalProjectionOperatorBase, self).__init__(input_fields=input_fields,
                 output_fields=output_fields, **kwds)
-        
+
         Fin  = input_field
         Fout = output_field
         compute_divFin  = (input_field_div is not None)
@@ -100,9 +106,9 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
         kd1s, kd2s = (), ()
         for Wi in Fts:
             expr = grad(Wi, Wi.frame)
-            kd1 = sorted(tg.push_expressions(*to_tuple(expr)), key=lambda k: k.axis)
+            kd1 = tuple(sorted(tg.push_expressions(*to_tuple(expr)), key=lambda k: k.axis))
             expr = laplacian(Wi, Wi.frame)
-            kd2 = sorted(tg.push_expressions(expr), key=lambda k: k.axis)
+            kd2 = tuple(sorted(tg.push_expressions(expr), key=lambda k: k.axis))
             kd1s += (kd1,)
             kd2s += (kd2,)
 
@@ -122,16 +128,16 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
         self.Fout    = output_field
         self.divFin  = input_field_div
         self.divFout = output_field_div
-        
+
         self.compute_divFin  = compute_divFout
         self.compute_divFout = compute_divFout
-        
+
         self.tg = tg
         self.forward_transforms  = forward_transforms
         self.backward_transforms = backward_transforms
         self.backward_divFin_transform  = backward_divFin_transform
         self.backward_divFout_transform = backward_divFout_transform
-        
+
         self.Bts   = Bts
         self.Fts   = Fts
         self.kd1s  = kd1s
@@ -144,17 +150,17 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
         super(SolenoidalProjectionOperatorBase, self).discretize()
         dFin  = self.get_input_discrete_field(self.Fin)
         dFout = self.get_output_discrete_field(self.Fout)
-        
+
         if self.compute_divFin:
             ddivFin  = self.output_discrete_fields[self.divFin]
         else:
             ddivFin = None
-        
+
         if self.compute_divFout:
             ddivFout  = self.output_discrete_fields[self.divFout]
         else:
             ddivFout = None
-        
+
         kd1s, kd2s = self.kd1s, self.kd2s
         dkd1s = ()
         for kd1 in kd1s:
@@ -163,7 +169,7 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
                 idx, dwi, _ = self.tg.discrete_wave_numbers[wi]
                 dkd1[idx] = dwi
             dkd1s += (tuple(dkd1),)
-        
+
         dkd2s = ()
         for kd2 in kd2s:
             dkd2 = [None,]*len(kd1)
@@ -171,7 +177,7 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
                 idx, dwi, _ = self.tg.discrete_wave_numbers[wi]
                 dkd2[idx] = dwi
             dkd2s += (tuple(dkd2),)
-        
+
         self.dFin     = dFin
         self.dFout    = dFout
         self.ddivFin  = ddivFin
@@ -179,17 +185,17 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
         self.dkd1s = tuple(dkd1s)
         self.dkd2s = tuple(dkd2s)
 
-    
+
     def get_work_properties(self):
         requests = super(SolenoidalProjectionOperatorBase, self).get_work_properties()
-        for (i,(Ft,Bt)) in enumerate(zip(self.forward_transforms, 
+        for (i,(Ft,Bt)) in enumerate(zip(self.forward_transforms,
                                          self.backward_transforms)):
             assert (Ft.backend == Bt.backend)
             assert (Ft.output_dtype == Bt.input_dtype), (Ft.output_dtype, Bt.input_dtype)
             assert (Ft.output_shape == Bt.input_shape), (Ft.output_shape, Bt.input_shape)
             shape = Ft.output_shape
             dtype = Ft.output_dtype
-            request = MemoryRequest(backend=self.tg.backend, dtype=dtype, 
+            request = MemoryRequest(backend=self.tg.backend, dtype=dtype,
                                     shape=shape, nb_components=1,
                                     alignment=self.min_fft_alignment)
             requests.push_mem_request('fft_buffer_{}'.format(i), request)
@@ -197,7 +203,7 @@ class SolenoidalProjectionOperatorBase(SpectralOperatorBase):
 
     def setup(self, work):
         dkd1s, dkd2s = self.dkd1s, self.dkd2s
-        
+
         output_axes = self.forward_transforms[0].output_axes
         for (i,(Ft,Bt)) in enumerate(zip(self.forward_transforms, self.backward_transforms)):
             dtmp, = work.get_buffer(self, 'fft_buffer_{}'.format(i))
diff --git a/hysop/operator/base/spatial_filtering.py b/hysop/operator/base/spatial_filtering.py
index ff79da9c0b4e8a31565994d5311c29b29b10d112..a660f46c5834b3f864054362a582275f7fed17c5 100644
--- a/hysop/operator/base/spatial_filtering.py
+++ b/hysop/operator/base/spatial_filtering.py
@@ -29,6 +29,12 @@ class SpatialFilterBase(object):
     Common base implementation for lowpass spatial filtering: small grid -> coarse grid
     """
 
+    def __new__(cls, input_field, output_field,
+                     input_topo,  output_topo,
+                     **kwds):
+        return super(SpatialFilterBase, cls).__new__(cls,
+                input_fields=None, output_fields=None, **kwds)
+
     def __init__(self, input_field, output_field,
                        input_topo,  output_topo,
                        **kwds):
@@ -145,6 +151,13 @@ class SpectralRestrictionFilterBase(RestrictionFilterBase, SpectralOperatorBase)
     Base implementation for lowpass spatial filtering: small grid -> coarse grid
     using the spectral method.
     """
+
+    @debug
+    def __new__(cls, plot_input_energy=None,
+                     plot_output_energy=None,
+                     **kwds):
+        return super(SpectralRestrictionFilterBase, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, plot_input_energy=None,
                        plot_output_energy=None,
@@ -331,7 +344,7 @@ class RemeshRestrictionFilterBase(RestrictionFilterBase):
                 nz_weights[idx] = W
         Ws = weights.sum()
         weights = weights / Ws
-        nz_weights = {k: v/Ws for (k,v) in nz_weights.iteritems()}
+        nz_weights = {k: v/Ws for (k,v) in nz_weights.items()}
 
         assert abs(weights.sum() - 1.0) < 1e-8, weights.sum()
         assert abs(npw.sum(nz_weights.values()) - 1.0) < 1e-8, npw.sum(nz_weights.values())
diff --git a/hysop/operator/base/spectral_operator.py b/hysop/operator/base/spectral_operator.py
index d3d3396b37d7c5ea436f53cfa63144711736b9e9..9ee4d0aaf50e21a1974d2b78675c6f4bab657ed3 100644
--- a/hysop/operator/base/spectral_operator.py
+++ b/hysop/operator/base/spectral_operator.py
@@ -33,13 +33,17 @@ from hysop.numerics.fft.fft import FFTI, simd_alignment, is_byte_aligned, HysopF
 
 class SpectralComputationalGraphNodeFrontend(ComputationalGraphNodeFrontend):
 
-    def __init__(self, implementation, **kwds):
-        impl, extra_kwds = self.get_actual_implementation(implementation=implementation, **kwds)
+    def __new__(cls, implementation, enforce_implementation=True, **kwds):
+        return super(SpectralComputationalGraphNodeFrontend, cls).__new__(cls,
+            implementation=implementation, **kwds)
+
+    def __init__(self, implementation, enforce_implementation=True, **kwds):
+        impl, extra_kwds = self.get_actual_implementation(implementation=implementation,
+                enforce_implementation=enforce_implementation, **kwds)
         for k in extra_kwds.keys():
             assert k not in kwds
         kwds.update(extra_kwds)
-        super(SpectralComputationalGraphNodeFrontend, self).__init__(
-            implementation=impl, **kwds)
+        super(SpectralComputationalGraphNodeFrontend, self).__init__(implementation=impl, **kwds)
 
     @classmethod
     def get_actual_implementation(cls, implementation,
@@ -81,7 +85,7 @@ class SpectralComputationalGraphNodeFrontend(ComputationalGraphNodeFrontend):
         """
         implementation = first_not_None(implementation, cls.default_implementation())
         assert implementation in cls.implementations()
-        extra_kwds = {'enable_opencl_host_buffer_mapping': False}
+        extra_kwds = {}
         if (enforce_implementation):
             return (implementation, extra_kwds)
         if (implementation == Implementation.OPENCL):
@@ -92,7 +96,6 @@ class SpectralComputationalGraphNodeFrontend(ComputationalGraphNodeFrontend):
                 raise RuntimeError(msg)
             from hysop.backend.device.opencl import cl
             if (cl_env.device.type == cl.device_type.CPU):
-                extra_kwds['enable_opencl_host_buffer_mapping'] = True
                 if (Implementation.PYTHON in cls.implementations()):
                     from hysop.backend.host.host_operator import HostOperator, OpenClMappable
                     op_cls = cls.implementations()[Implementation.PYTHON]
@@ -106,6 +109,7 @@ class SpectralComputationalGraphNodeFrontend(ComputationalGraphNodeFrontend):
                         raise TypeError(msg)
                     assert Backend.HOST in op_cls.supported_backends()
                     assert Backend.OPENCL in op_cls.supported_backends()
+                    extra_kwds['enable_opencl_host_buffer_mapping'] = True
                     return (Implementation.PYTHON, extra_kwds)
         return (implementation, extra_kwds)
 
@@ -118,8 +122,11 @@ class SpectralOperatorBase(object):
     min_fft_alignment = simd_alignment  # FFTW SIMD.
 
     @debug
-    def __init__(self, fft_interface=None, fft_interface_kwds=None,
-                 **kwds):
+    def __new__(cls, fft_interface=None, fft_interface_kwds=None, **kwds):
+        return super(SpectralOperatorBase, cls).__new__(cls, **kwds)
+
+    @debug
+    def __init__(self, fft_interface=None, fft_interface_kwds=None, **kwds):
         """
         Initialize a spectral operator base.
         kwds: dict
@@ -201,7 +208,7 @@ class SpectralOperatorBase(object):
             msg += '\nPlease use the Fortran FFTW interface if possible or '
             msg += 'use another discretization method for operator {}.\n'
             msg = msg.format(self.node_tag)
-            print msg
+            print(msg)
             raise NotImplementedError
 
         for tg in self.transform_groups.values():
@@ -215,9 +222,9 @@ class SpectralOperatorBase(object):
     def get_mem_requests(self, **kwds):
         memory_requests = {}
         for tg in self.transform_groups.values():
-            for (k, v) in tg.get_mem_requests(**kwds).iteritems():
+            for (k, v) in tg.get_mem_requests(**kwds).items():
                 check_instance(k, str)  # temporary buffer name
-                check_instance(v, int)  # nbytes
+                check_instance(v, (int,np.integer))  # nbytes
                 K = (k, tg.backend)
                 if K in memory_requests:
                     memory_requests[K] = max(memory_requests[K], v)
@@ -227,9 +234,9 @@ class SpectralOperatorBase(object):
 
     def get_work_properties(self, **kwds):
         requests = super(SpectralOperatorBase, self).get_work_properties(**kwds)
-        for ((k, backend), v) in self.get_mem_requests(**kwds).iteritems():
+        for ((k, backend), v) in self.get_mem_requests(**kwds).items():
             check_instance(k, str)
-            check_instance(v, (int, long))
+            check_instance(v, (int,np.integer))
             if (v > 0):
                 mrequest = MemoryRequest(backend=backend, size=v,
                                          alignment=self.min_fft_alignment)
@@ -262,6 +269,9 @@ class SpectralTransformGroup(object):
     """
     DEBUG = False
 
+    def __new__(cls, op, tag, mem_tag, **kwds):
+        return super(SpectralTransformGroup, cls).__new__(cls, **kwds)
+
     def __init__(self, op, tag, mem_tag, **kwds):
         """
         Parameters
@@ -288,6 +298,7 @@ class SpectralTransformGroup(object):
         All forward_fields and backward_fields have to live on the same domain and
         their boundary conditions should comply with given expressions.
         """
+        super(SpectralTransformGroup, self).__init__(**kwds)
         mem_tag = first_not_None(mem_tag, 'fft_pool')
         check_instance(op, SpectralOperatorBase)
         check_instance(tag, str)
@@ -339,11 +350,11 @@ class SpectralTransformGroup(object):
 
     @property
     def forward_fields(self):
-        return map(lambda x: x[0], self._forward_transforms.keys())
+        return tuple(map(lambda x: x[0], self._forward_transforms.keys()))
 
     @property
     def backward_fields(self):
-        return map(lambda x: x[0], self._backward_transforms.keys())
+        return tuple(map(lambda x: x[0], self._backward_transforms.keys()))
 
     @property
     def forward_transforms(self):
@@ -519,28 +530,28 @@ class SpectralTransformGroup(object):
         nd_freqs = freqs.reshape(nd_shape)
 
         if cls.DEBUG:
-            print
-            print 'BUILD WAVENUMBER'
-            print 'backend:       {}'.format(backend.kind)
-            print 'grid_shape:    {}'.format(grid_resolution)
-            print 'length:        {}'.format(length)
-            print '-----'
-            print 'ftype:         {}'.format(ftype)
-            print 'ctype:         {}'.format(ctype)
-            print 'compute shape: {}'.format(compute_resolution)
-            print 'compute axes:  {}'.format(compute_axes)
-            print '-----'
-            print 'wave_number:'
-            print '  *symbolic:   {}'.format(wave_number)
-            print '  *axis:       {}'.format(axis)
-            print '  *transform:  {}'.format(transform)
-            print '  *exponent:   {}'.format(exponent)
-            print '----'
-            print 'L:             {}'.format(L)
-            print 'N:             {}'.format(N)
-            print 'freqs:         {}'.format(freqs)
-            print 'nd_freqs:      {}'.format(nd_freqs)
-            print '----'
+            print()
+            print('BUILD WAVENUMBER')
+            print('backend:       {}'.format(backend.kind))
+            print('grid_shape:    {}'.format(grid_resolution))
+            print('length:        {}'.format(length))
+            print('-----')
+            print('ftype:         {}'.format(ftype))
+            print('ctype:         {}'.format(ctype))
+            print('compute shape: {}'.format(compute_resolution))
+            print('compute axes:  {}'.format(compute_axes))
+            print('-----')
+            print('wave_number:')
+            print('  *symbolic:   {}'.format(wave_number))
+            print('  *axis:       {}'.format(axis))
+            print('  *transform:  {}'.format(transform))
+            print('  *exponent:   {}'.format(exponent))
+            print('----')
+            print('L:             {}'.format(L))
+            print('N:             {}'.format(N))
+            print('freqs:         {}'.format(freqs))
+            print('nd_freqs:      {}'.format(nd_freqs))
+            print('----')
 
         return (idx, freqs, nd_freqs)
 
@@ -549,16 +560,16 @@ class SpectralTransformGroup(object):
         memory_requests = {}
         for fwd in self.forward_transforms.values():
             mem_requests = fwd.get_mem_requests(**kwds)
-            check_instance(mem_requests, dict, keys=str, values=(int, long))
-            for (k, v) in mem_requests.iteritems():
+            check_instance(mem_requests, dict, keys=str, values=(int,np.integer))
+            for (k, v) in mem_requests.items():
                 if k in memory_requests:
                     memory_requests[k] = max(memory_requests[k], v)
                 else:
                     memory_requests[k] = v
         for bwd in self.backward_transforms.values():
             mem_requests = bwd.get_mem_requests(**kwds)
-            check_instance(mem_requests, dict, keys=str, values=(int, long))
-            for (k, v) in mem_requests.iteritems():
+            check_instance(mem_requests, dict, keys=str, values=(int,np.integer))
+            for (k, v) in mem_requests.items():
                 if k in memory_requests:
                     memory_requests[k] = max(memory_requests[k], v)
                 else:
@@ -828,7 +839,7 @@ class SpectralTransformGroup(object):
     @property
     def output_parameters(self):
         parameters = set()
-        for pt in self._forward_transforms.values() + self._backward_transforms.values():
+        for pt in tuple(self._forward_transforms.values()) + tuple(self._backward_transforms.values()):
             parameters.update(pt.output_parameters)
         return parameters
 
@@ -854,10 +865,10 @@ class SpectralTransformGroup(object):
                     self._indexed_wave_numbers[_wn] = _wn.indexed_buffer()
             exprs_wave_numbers.update(wn)
             if (self.DEBUG):
-                print '\n\nPARSING EXPRESSION {}'.format(expr)
-                print '  new_expr:     {}'.format(e)
-                print '  transforms:   {}'.format(transforms)
-                print '  wave_numbers: {}'.format(wn)
+                print('\n\nPARSING EXPRESSION {}'.format(expr))
+                print('  new_expr:     {}'.format(e))
+                print('  transforms:   {}'.format(transforms))
+                print('  wave_numbers: {}'.format(wn))
 
         return tuple(exprs_wave_numbers)
 
@@ -889,10 +900,20 @@ class PlannedSpectralTransform(object):
     """
     DEBUG = False
 
+    def __new__(cls, transform_group, tag, symbolic_transform, action,
+                 custom_input_buffer=None, custom_output_buffer=None,
+                 matching_forward_transform=None,
+                 dump_energy=None, plot_energy=None, compute_energy_frequencies=None,
+                 **kwds):
+        return super(PlannedSpectralTransform, cls).__new__(cls, **kwds)
+
     def __init__(self, transform_group, tag, symbolic_transform, action,
                  custom_input_buffer=None, custom_output_buffer=None,
                  matching_forward_transform=None,
-                 dump_energy=None, plot_energy=None, compute_energy_frequencies=None):
+                 dump_energy=None, plot_energy=None, compute_energy_frequencies=None,
+                 **kwds):
+
+        super(PlannedSpectralTransform, self).__init__(**kwds)
 
         check_instance(transform_group, SpectralTransformGroup)
         check_instance(transform_group.op, SpectralOperatorBase)
@@ -1221,7 +1242,7 @@ class PlannedSpectralTransform(object):
         assert transform_info[-1][3][1] == self._output_dtype
 
         # filter out untransformed axes
-        tidx = tuple(filter(lambda i: not STU.is_none(transform_info[i][1]),  xrange(dim)))
+        tidx = tuple(filter(lambda i: not STU.is_none(transform_info[i][1]),  range(dim)))
         assert tidx, 'Could not determine any transformed axe.'
         ntransforms = len(tidx)
         transform_info = tuple(map(transform_info.__getitem__, tidx))
@@ -1300,9 +1321,9 @@ class PlannedSpectralTransform(object):
             if self._do_plot_energy:
                 piop = self._plot_energy_ioparams
                 assert (piop is not None)
-                pname = u'{}.{}.{}'.format(self.op.__class__.__name__,
+                pname = '{}.{}.{}'.format(self.op.__class__.__name__,
                                            'forward'if is_forward else 'backward',
-                                           dfield.pretty_name.decode('utf-8'))
+                                           dfield.pretty_name)
                 energy_parameters = {pname: self._energy_parameter}
                 self._energy_plotter = EnergyPlotter(energy_parameters=energy_parameters,
                                                      io_params=self._plot_energy_ioparams,
@@ -1348,32 +1369,31 @@ class PlannedSpectralTransform(object):
                     for slc in slices:
                         ss += prefix+str(slc)
                     return ss
-            print '\n\n== SPECTRAL PLANNING INFO OF FIELD {} =='.format(dfield.pretty_name)
-            print 'transform direction:     {}'.format('FORWARD' if self.is_forward
-                                                       else 'BACKWARD')
-            print 'transforms:              {}'.format(self.transforms)
-            print ':CARTESIAN INFO:'
-            print 'cart shape:              {}'.format(dfield.topology.cart_shape)
-            print 'global grid resolution:  {}'.format(dfield.mesh.grid_resolution)
-            print 'local  grid resolution:  {}'.format(dfield.compute_resolution)
-            print ':INPUT:'
-            print 'input axes:              {}'.format(self._input_axes)
-            print 'input dtype:             {}'.format(self._input_dtype)
-            print 'input transform shape:   {}'.format(self._input_transform_shape)
-            print 'input shape:             {}'.format(self._input_shape)
-            print 'input slices:            {}'.format(self._input_slices)
-            print ':OUTPUT:'
-            print 'output axes:             {}'.format(self._output_axes)
-            print 'output_dtype:            {}'.format(self._output_dtype)
-            print 'output transform shape:  {}'.format(self._output_transform_shape)
-            print 'output shape:            {}'.format(self._output_shape)
-            print 'output_slices:           {}'.format(self._output_slices)
-            print ':TRANSFORM INFO:'
-            print 'transform_info:          {}'.format(axis_format(transform_info))
-            print ':TRANSPOSE INFO:'
-            print 'transpose_info:          {}'.format(axis_format(transpose_info))
-            print ':ZERO FILL:'
-            print 'zero_fill_output_slices: {}'.format(slc_format(self._zero_fill_output_slices))
+            print('\n\n== SPECTRAL PLANNING INFO OF FIELD {} =='.format(dfield.pretty_name))
+            print('transform direction:     {}'.format('FORWARD' if self.is_forward else 'BACKWARD'))
+            print('transforms:              {}'.format(self.transforms))
+            print(':CARTESIAN INFO:')
+            print('cart shape:              {}'.format(dfield.topology.cart_shape))
+            print('global grid resolution:  {}'.format(dfield.mesh.grid_resolution))
+            print('local  grid resolution:  {}'.format(dfield.compute_resolution))
+            print(':INPUT:')
+            print('input axes:              {}'.format(self._input_axes))
+            print('input dtype:             {}'.format(self._input_dtype))
+            print('input transform shape:   {}'.format(self._input_transform_shape))
+            print('input shape:             {}'.format(self._input_shape))
+            print('input slices:            {}'.format(self._input_slices))
+            print(':OUTPUT:')
+            print('output axes:             {}'.format(self._output_axes))
+            print('output_dtype:            {}'.format(self._output_dtype))
+            print('output transform shape:  {}'.format(self._output_transform_shape))
+            print('output shape:            {}'.format(self._output_shape))
+            print('output_slices:           {}'.format(self._output_slices))
+            print(':TRANSFORM INFO:')
+            print('transform_info:          {}'.format(axis_format(transform_info)))
+            print(':TRANSPOSE INFO:')
+            print('transpose_info:          {}'.format(axis_format(transpose_info)))
+            print(':ZERO FILL:')
+            print('zero_fill_output_slices: {}'.format(slc_format(self._zero_fill_output_slices)))
 
     def get_mapped_input_buffer(self):
         return self.get_mapped_full_input_buffer()[self.input_slices]
@@ -1708,10 +1728,11 @@ class PlannedSpectralTransform(object):
                     B1_tag:   nbytes,
                     TMP_tag:  tmp_nbytes}
 
-        if (self._energy_nbytes > 0):
-            requests[ENERGY_tag] = self._energy_nbytes
-        if (self._mutexes_nbytes > 0):
-            requests[MUTEXES_tag] = self._mutexes_nbytes
+        if self._do_compute_energy:
+            if (self._energy_nbytes > 0):
+                requests[ENERGY_tag] = self._energy_nbytes
+            if (self._mutexes_nbytes > 0):
+                requests[MUTEXES_tag] = self._mutexes_nbytes
 
         return requests
 
@@ -1748,7 +1769,7 @@ class PlannedSpectralTransform(object):
         except ValueError:
             TMP = None
 
-        if (self._energy_nbytes > 0):
+        if (self._energy_nbytes is not None) and (self._energy_nbytes > 0):
             ENERGY, = work.get_buffer(op, ENERGY_tag, handle=True)
             energy_buffer = ENERGY[:self._energy_nbytes].view(dtype=self.dfield.dtype)
             assert energy_buffer.size == self._max_wavenumber+1
@@ -1756,7 +1777,7 @@ class PlannedSpectralTransform(object):
             ENERGY = None
             energy_buffer = None
 
-        if (self._mutexes_nbytes > 0):
+        if (self._mutexes_nbytes is not None) and (self._mutexes_nbytes > 0):
             MUTEXES, = work.get_buffer(op, MUTEXES_tag, handle=True)
             mutexes_buffer = MUTEXES[:self._mutexes_nbytes].view(dtype=np.int32)
             assert mutexes_buffer.size == self._max_wavenumber+1
@@ -1828,7 +1849,7 @@ class PlannedSpectralTransform(object):
         if SETUP_DEBUG:
             def print_op(description, category):
                 prefix = '     |> '
-                print '{}{: <40}[{}]'.format(prefix, description, category)
+                print('{}{: <40}[{}]'.format(prefix, description, category))
 
             msg = '''
 SPECTRAL TRANSFORM SETUP
@@ -1841,10 +1862,10 @@ SPECTRAL TRANSFORM SETUP
                 op.pretty_tag,
                 dim, ntransforms, self.tag,
                 is_forward, is_backward)
-            print msg
+            print(msg)
 
         fft_plans = ()
-        for i in xrange(ntransforms):
+        for i in range(ntransforms):
             transpose = transpose_info[i]
             transform = transform_info[i]
             (permutation, _, _, input_shape, output_shape) = transpose
@@ -1883,7 +1904,7 @@ SPECTRAL TRANSFORM SETUP
                     custom_input_buffer, custom_output_buffer,
                     src_shape, src_dtype, src_view,
                     dst_shape, dst_dtype, dst_view)
-                print msg
+                print(msg)
 
             src_nbytes = compute_nbytes(src_shape, src_dtype)
             dst_nbytes = compute_nbytes(dst_shape, dst_dtype)
@@ -2062,6 +2083,9 @@ SPECTRAL TRANSFORM SETUP
         else:
             compute_energy_queue = None
 
+        self._frequency_ioparams = tuple(self.io_params.clone(frequency=f, with_last=True)
+                                    for f in self._compute_energy_frequencies)
+
         self._queue = queue
         self._compute_energy_queue = compute_energy_queue
         self._ready = True
@@ -2098,8 +2122,7 @@ SPECTRAL TRANSFORM SETUP
         msg = 'No simulation was passed in {}.__call__().'.format(type(self))
         assert (simulation is not None), msg
         evt = wait_for
-        should_compute_energy = any(simulation.should_dump(frequency=f, with_last=True)
-                                    for f in self._compute_energy_frequencies)
+        should_compute_energy = any(iop.should_dump(simulation=simulation) for iop in self._frequency_ioparams)
         if should_compute_energy:
             evt = self._compute_energy_queue(wait_for=evt)
             if self._do_dump_energy:
@@ -2112,3 +2135,4 @@ SPECTRAL TRANSFORM SETUP
         evt = wait_for
         self._energy_plotter.update(simulation=simulation, wait_for=evt)
         return wait_for
+
diff --git a/hysop/operator/base/stretching_dir.py b/hysop/operator/base/stretching_dir.py
index 9ad570e3ae5b4d87291ba83dd054239d840c885e..33131ce1e386e30a2ba18d6f8aae8534e9c2ffdd 100644
--- a/hysop/operator/base/stretching_dir.py
+++ b/hysop/operator/base/stretching_dir.py
@@ -1,4 +1,6 @@
-from hysop.deps import sm, np
+import numpy as np
+import sympy as sm
+
 from hysop.tools.numpywrappers import npw
 from hysop.tools.decorators  import debug
 from hysop.constants import StretchingFormulation
@@ -38,6 +40,15 @@ class DirectionalStretchingBase(object):
         am.update(cls.__available_methods)
         return am
 
+    @debug
+    def __new__(cls, formulation, velocity, vorticity, variables, dt,
+                 C=None, A=None, name=None,
+                 implementation=None, base_kwds=None, **kwds):
+        return super(DirectionalStretchingBase, cls).__new__(cls,
+            input_fields=None, output_fields=None,
+            input_params=None, output_params=None,
+            **kwds)
+
     @debug
     def __init__(self, formulation, velocity, vorticity, variables, dt,
                  C=None, A=None, name=None,
@@ -91,8 +102,8 @@ class DirectionalStretchingBase(object):
             Should only be given for MIXED_GRAD_UW formulations.
             ValueError will be raised on other formulations.
             The linear combination coefficients A is a scalar.
-            Contained value should be a numerical coefficient, a parameter 
-            (or a generic symbolic expression) such that C*(A*grad(U).W + (1-A)*grad^T(U).W) 
+            Contained value should be a numerical coefficient, a parameter
+            (or a generic symbolic expression) such that C*(A*grad(U).W + (1-A)*grad^T(U).W)
             is directionally splittable.
             Here * is the classical elementwise multiplication.
             Default value is 0.5.
diff --git a/hysop/operator/base/transpose_operator.py b/hysop/operator/base/transpose_operator.py
index bd91896df16c2bcbb25e089450702082132f64a3..0bf258ce75edaf59cc69cc24a4ee11c9a5a144d2 100644
--- a/hysop/operator/base/transpose_operator.py
+++ b/hysop/operator/base/transpose_operator.py
@@ -9,31 +9,38 @@ from hysop.fields.continuous_field import ScalarField
 from hysop.core.memory.memory_request import MemoryRequest
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 
-class TransposeOperatorBase(object):
+class TransposeOperatorBase(object, metaclass=ABCMeta):
     """
     Common implementation interface for transposition operators.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __new__(cls, input_field, output_field, variables, axes,
+                   name=None, pretty_name=None, **kwds):
+        return super(TransposeOperatorBase, cls).__new__(cls,
+                input_fields=None,
+                output_fields=None,
+                name=name, pretty_name=pretty_name,
+                **kwds)
 
     @debug
     def __init__(self, input_field, output_field, variables, axes,
                    name=None, pretty_name=None, **kwds):
         """
         Initialize a transposition operator operating on CartesianTopologyDescriptors.
-        
+
         input_field: ScalarField
             Input continuous scalar field to be transposed, at least 2D.
         output_field: ScalarField
             Output continuous scalar field where the result is stored
             Transposed shape should match the input.
-            output_field can be the same as input_field resulting in 
+            output_field can be the same as input_field resulting in
             an inplace transposition.
         variables: dict
             Dictionary of fields as keys and CartesianTopologyDescriptors as values.
             Should contain input and output field.
         axes: tuple or list of ints
-            Permutation of axes in numpy notations 
+            Permutation of axes in numpy notations
             Axe dim-1 is the contiguous axe, axe 0 has the greatest stride in memory.
         kwds: dict
             Base class keyword arguments.
@@ -41,7 +48,7 @@ class TransposeOperatorBase(object):
         check_instance(input_field, ScalarField)
         check_instance(output_field, ScalarField)
         check_instance(variables, dict, keys=ScalarField, values=CartesianTopologyDescriptors)
-        check_instance(axes, (list,tuple), values=(int,long))
+        check_instance(axes, (list,tuple), values=int)
 
         assert set((input_field,output_field)) == set(variables.keys())
         assert input_field.domain is output_field.domain
@@ -54,20 +61,20 @@ class TransposeOperatorBase(object):
 
         input_fields  = { input_field:  variables[input_field] }
         output_fields = { output_field: variables[output_field] }
-            
+
         saxes = ''.join([DirectionLabels[i] for i in axes]).lower()
         default_name = 'T{}_{}'.format(saxes, input_field.name)
-        default_pname = u'T{}_{}'.format(saxes, input_field.pretty_name.decode('utf-8'))
+        default_pname = 'T{}_{}'.format(saxes, input_field.pretty_name)
         if (output_field.name != input_field.name):
             default_name += '_{}'.format(output_field.name)
-            default_pname += u'_{}'.format(output_field.pretty_name.decode('utf-8'))
+            default_pname += '_{}'.format(output_field.pretty_name)
 
         name = first_not_None(name, default_name)
         pname = first_not_None(pretty_name, default_pname)
 
         super(TransposeOperatorBase, self).__init__(
                 input_fields=input_fields,
-                output_fields=output_fields, 
+                output_fields=output_fields,
                 name=name, pretty_name=pname,
                 **kwds)
 
@@ -76,24 +83,24 @@ class TransposeOperatorBase(object):
         self.nb_components = nb_components
         self.dim = dim
         self.axes = axes
-    
+
     @debug
     def get_node_requirements(self):
         from hysop.core.graph.node_requirements import OperatorRequirements
         reqs = super(TransposeOperatorBase, self).get_node_requirements()
         reqs.enforce_unique_transposition_state=False
         return reqs
-     
+
     def output_topology_state(self, output_field, input_topology_states):
         ostate = super(TransposeOperatorBase,self).output_topology_state(
-                        output_field=output_field, 
+                        output_field=output_field,
                         input_topology_states=input_topology_states)
         assert len(input_topology_states)==1
-        istate = input_topology_states.values()[0]
+        istate = next(iter(input_topology_states.values()))
         axes = self.axes
         ostate.axes = tuple( istate.axes[i] for i in axes )
         return ostate
-    
+
     @debug
     def discretize(self):
         if self.discretized:
@@ -102,17 +109,17 @@ class TransposeOperatorBase(object):
         self.din  = self.get_input_discrete_field(self.input_field)
         self.dout = self.get_output_discrete_field(self.output_field)
         self.is_inplace = (self.din.dfield is self.dout.dfield)
-    
+
     @debug
     def get_work_properties(self):
         requests  = super(TransposeOperatorBase,self).get_work_properties()
-        
+
         if self.is_inplace:
             request = MemoryRequest.empty_like(a=self.dout, nb_components=1)
             requests.push_mem_request('tmp', request)
-        
+
         return requests
-    
+
     @debug
     def setup(self, work):
         super(TransposeOperatorBase,self).setup(work)
@@ -120,16 +127,16 @@ class TransposeOperatorBase(object):
             raise ValueError('work is None.')
         if self.is_inplace:
             self.dtmp, = work.get_buffer(self, 'tmp')
-    
-            
+
+
     @staticmethod
     def get_preferred_axes(src_topo, dst_topo, candidate_axes):
         """
-        Return preferred transposition scheme (performance-wise) 
+        Return preferred transposition scheme (performance-wise)
         given source and destination topology and possible
         candidate transposition schemes.
-        
-        Candidate_axes is a dictionnary containing permutation 
+
+        Candidate_axes is a dictionnary containing permutation
         as keys (tuple of ints), and target transposition state
         (hysop.constants.TranspositionState) as values.
 
@@ -142,15 +149,15 @@ class TransposeOperatorBase(object):
         """
         from hysop.tools.transposition_states import TranspositionState
         assert candidate_axes, 'candidate axes is None or empty.'
-        dim = len(candidate_axes.keys()[0])
+        dim = len(next(iter(candidate_axes)))
         tstates = TranspositionState[dim]
-        check_instance(candidate_axes, dict, keys=tuple, 
+        check_instance(candidate_axes, dict, keys=tuple,
                 values=(tstates, type(None)))
-        
+
         if tstates.default in candidate_axes.values():
             idx  = candidate_axes.values().index(tstates.default)
         else:
             idx = 0
-        
-        axes = candidate_axes.keys()[idx]
+
+        axes = tuple(candidate_axes.keys())[idx]
         return axes
diff --git a/hysop/operator/convergence.py b/hysop/operator/convergence.py
index 1f31631b866a9ab08b795bed3e800df95b60b9a7..07b08032f77868a521a75030ea4194a39000472c 100644
--- a/hysop/operator/convergence.py
+++ b/hysop/operator/convergence.py
@@ -17,7 +17,6 @@ class Convergence(ComputationalGraphNodeFrontend):
     Computes the convergence citeria for a given field.
 
     Available implementations are:
-        *OPENCL (gpu based implementation) (default)
         *PYTHON
     """
 
@@ -34,9 +33,17 @@ class Convergence(ComputationalGraphNodeFrontend):
         return Implementation.PYTHON
 
     @debug
-    def __init__(self, variables, u_old=None, convergence=None,
+    def __new__(cls, variables, error=None, convergence=None, u_old=None,
+                implementation=None, **kwds):
+        return super(Convergence, cls).__new__(
+            cls,
+            error=error, convergence=convergence,
+            variables=variables, implementation=implementation, **kwds)
+
+    @debug
+    def __init__(self, variables, error=None, convergence=None, u_old=None,
                  implementation=None, **kwds):
-        """Initialize a convergence operator.
+        r"""Initialize a convergence operator.
 
         Computes ||u-u_old||_\infty or ||u-u_old||_\infty/||u||_\infty
         depending on method absolute or relative.
diff --git a/hysop/operator/curl.py b/hysop/operator/curl.py
index ed68c1bcb6f8facfdcfd7809b791a7ffa8d0184c..344915f0303a3ad0900576cba15cbb60815d9a65 100644
--- a/hysop/operator/curl.py
+++ b/hysop/operator/curl.py
@@ -12,7 +12,7 @@ class Curl(ComputationalGraphNodeFrontend):
     @classmethod
     def fd(*args, **kwds):
         return FiniteDifferencesCurl(*args, **kwds)
-    
+
     @classmethod
     def spectral(*args, **kwds):
         return SpectralCurl(*args, **kwds)
@@ -27,13 +27,23 @@ class Curl(ComputationalGraphNodeFrontend):
         raise NotImplementedError
 
     @debug
-    def __init__(self, Fin, Fout, variables, 
+    def __new__(cls, Fin, Fout, variables,
+                    implementation=None, base_kwds=None, **kwds):
+        return super(Curl, cls).__new__(cls,
+                Fin=Fin, Fout=Fout, variables=variables,
+                candidate_input_tensors=None,
+                candidate_output_tensors=None,
+                implementation=implementation,
+                base_kwds=base_kwds, **kwds)
+
+    @debug
+    def __init__(self, Fin, Fout, variables,
                     implementation=None, base_kwds=None, **kwds):
         """
         Create an operator that computes the curl of an input field Fin.
 
         Given Fin, a 2D ScalarField or VectorField or a 3D VectorField, compute Fout = curl(Fin).
-        
+
         Only the following configurations are supported:
                  dim   nb_components  |   dim   nb_components
         Input:    2        (1,2)      |    3          3
@@ -63,9 +73,10 @@ class Curl(ComputationalGraphNodeFrontend):
         super(Curl, self).__init__(Fin=Fin, Fout=Fout, variables=variables,
                 candidate_input_tensors=(Fin,),
                 candidate_output_tensors=(Fout,),
-                implementation=implementation, 
+                implementation=implementation,
                 base_kwds=base_kwds, **kwds)
 
+
 class SpectralCurl(Curl):
     @classmethod
     def implementations(cls):
@@ -76,7 +87,7 @@ class SpectralCurl(Curl):
                 Implementation.OPENCL: OpenClSpectralCurl,
         }
         return __implementations
-    
+
     @classmethod
     def default_implementation(cls):
         return Implementation.PYTHON
diff --git a/hysop/operator/custom.py b/hysop/operator/custom.py
index e4aa9bf824fb76ff37d3da544b1c5c6d1fffb190..99820828fa7d313925b5966dc2533785ff4c407f 100755
--- a/hysop/operator/custom.py
+++ b/hysop/operator/custom.py
@@ -30,6 +30,11 @@ class CustomOperator(ComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    @debug
+    def __new__(cls, func, invars=None, outvars=None, extra_args=None, ghosts=None, **kwds):
+        return super(CustomOperator, cls).__new__(cls,
+            func=func, invars=invars, outvars=outvars, extra_args=extra_args, ghosts=ghosts, **kwds)
+
     @debug
     def __init__(self, func, invars=None, outvars=None, extra_args=None, ghosts=None, **kwds):
         check_instance(invars, (tuple, list), values=(Field, Parameter),
@@ -62,3 +67,4 @@ class CustomOperator(ComputationalGraphNodeFrontend):
 
         super(CustomOperator, self).__init__(
             func=func, invars=invars, outvars=outvars, extra_args=extra_args, ghosts=ghosts, **kwds)
+
diff --git a/hysop/operator/custom_symbolic.py b/hysop/operator/custom_symbolic.py
index 6f41185b863e60331073ef747112b88ba36bb9f3..056c73030c6c3734d8740c3cd9739f11b9211914 100644
--- a/hysop/operator/custom_symbolic.py
+++ b/hysop/operator/custom_symbolic.py
@@ -4,7 +4,7 @@
 CustomSymbolicOperator solver frontend.
 """
 from hysop.constants         import Implementation
-from hysop.tools.types       import check_instance, to_tuple
+from hysop.tools.types       import check_instance, to_tuple, first_not_None
 from hysop.tools.decorators  import debug
 from hysop.fields.continuous_field import Field
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
@@ -13,7 +13,7 @@ from hysop.core.graph.computational_node_frontend import ComputationalGraphNodeF
 class CustomSymbolicOperator(ComputationalGraphNodeFrontend):
     """
     Interface for custom symbolic operators.
-    Available implementations are: 
+    Available implementations are:
         *OPENCL (opencl code generated kernels)
     """
 
@@ -24,20 +24,27 @@ class CustomSymbolicOperator(ComputationalGraphNodeFrontend):
                 Implementation.OPENCL: OpenClCustomSymbolicOperator
         }
         return __implementations
-    
+
     @classmethod
     def default_implementation(cls):
         return Implementation.OPENCL
-    
+
+    @debug
+    def __new__(cls, name, exprs, variables,
+                implementation=None, base_kwds=None, **kwds):
+        return super(CustomSymbolicOperator, cls).__new__(cls,
+                name=name, exprs=exprs, variables=variables,
+                base_kwds=base_kwds, implementation=implementation, **kwds)
+
     @debug
-    def __init__(self, name, exprs, variables, 
+    def __init__(self, name, exprs, variables,
                 implementation=None, base_kwds=None, **kwds):
         """
         Initialize a CustomSymbolicOperator operator frontend.
 
         See hysop.operator.base.CustomSymbolicOperatorBase to see
         how expressions are parsed.
-        
+
         Parameters
         ----------
         name: str
@@ -54,15 +61,15 @@ class CustomSymbolicOperator(ComputationalGraphNodeFrontend):
             Base class keywords arguments.
             If None, an empty dict will be passed.
         kwds:
-            Keywords arguments that will be passed towards implementation 
+            Keywords arguments that will be passed towards implementation
             operator __init__.
         """
-        base_kwds = base_kwds or dict()
+        base_kwds = first_not_None(base_kwds, {})
         exprs = to_tuple(exprs)
 
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
         check_instance(base_kwds, dict, keys=str)
-        
+
         super(CustomSymbolicOperator, self).__init__(name=name, exprs=exprs, variables=variables,
                 base_kwds=base_kwds, implementation=implementation, **kwds)
 
diff --git a/hysop/operator/derivative.py b/hysop/operator/derivative.py
index 4c958199797915c8f3e54e69271adf5f1cba7c25..b53ce8b84c5eb4ff216e84c5987d05dd9cdcc189 100644
--- a/hysop/operator/derivative.py
+++ b/hysop/operator/derivative.py
@@ -34,10 +34,22 @@ class SpaceDerivative(ComputationalGraphNodeFrontend):
         """SpaceDerivative.fd(...) <=> FiniteDifferencesSpaceDerivative(...)"""
         return FiniteDifferencesSpaceDerivative(*args, **kwds)
 
+    @debug
+    def __new__(cls, F, dF, A=None,
+            derivative=None, direction=None,
+            name=None, pretty_name=None,
+            variables=None, implementation=None,
+            base_kwds=None, **kwds):
+        return super(SpaceDerivative, cls).__new__(cls,
+                F=F, dF=dF, A=A,
+                direction=direction, derivative=derivative,
+                variables=variables, implementation=implementation,
+                base_kwds=base_kwds, name=name, pretty_name=pretty_name, **kwds)
+
     @debug
     def __init__(self, F, dF, A=None,
             derivative=None, direction=None,
-            name=None, pretty_name=None, 
+            name=None, pretty_name=None,
             variables=None, implementation=None,
             base_kwds=None, **kwds):
         """
@@ -86,7 +98,7 @@ class SpaceDerivative(ComputationalGraphNodeFrontend):
             Base class keyword arguments.
         kwds: dict, optional
             Extra parameters passed towards operator implementation.
-        
+
         Notes
         -----
         There is two way to build a derivative:
@@ -106,7 +118,7 @@ class SpaceDerivative(ComputationalGraphNodeFrontend):
         check_instance(implementation, Implementation, allow_none=True)
         check_instance(base_kwds, dict, keys=str, allow_none=True)
         check_instance(name, str, allow_none=True)
-        check_instance(pretty_name, (str,unicode), allow_none=True)
+        check_instance(pretty_name, str, allow_none=True)
 
         assert F in variables
         assert dF in variables
@@ -148,7 +160,7 @@ class SpectralSpaceDerivative(SpaceDerivative):
 
 
 class FiniteDifferencesSpaceDerivative(SpaceDerivative):
-    """
+    r"""
     Operator frontend to compute the derivative of a component
     of a field in a given direction using finite differences.
 
@@ -165,7 +177,7 @@ class FiniteDifferencesSpaceDerivative(SpaceDerivative):
                 Implementation.OPENCL: OpenClFiniteDifferencesSpaceDerivative
         }
         return __implementations
-    
+
     @classmethod
     def default_implementation(cls):
         return Implementation.PYTHON
@@ -176,10 +188,19 @@ class MultiSpaceDerivatives(DirectionalOperatorGeneratorI, ComputationalGraphNod
     """Generate multiple SpaceDerivative operators at once."""
 
     @debug
-    def __init__(self, Fs, dFs, As=None, 
-             cls=FiniteDifferencesSpaceDerivative, 
-             directions=None, derivatives=None, 
-             extra_params=None, base_kwds=None, 
+    def __new__(mcls, Fs, dFs, As=None,
+             cls=FiniteDifferencesSpaceDerivative,
+             directions=None, derivatives=None,
+             extra_params=None, base_kwds=None,
+             variables=None, **op_kwds):
+        base_kwds = {} if (base_kwds is None) else base_kwds
+        return super(MultiSpaceDerivatives, mcls).__new__(mcls, **base_kwds)
+
+    @debug
+    def __init__(self, Fs, dFs, As=None,
+             cls=FiniteDifferencesSpaceDerivative,
+             directions=None, derivatives=None,
+             extra_params=None, base_kwds=None,
              variables=None, **op_kwds):
          """
          Create a operator generator that can handle multiple SpaceDerivative operators.
@@ -195,7 +216,7 @@ class MultiSpaceDerivatives(DirectionalOperatorGeneratorI, ComputationalGraphNod
          """
          from hysop.operator.min_max import MinMaxDerivativeStatistics
          if not issubclass(cls, (SpaceDerivative, MinMaxDerivativeStatistics)) or \
-                (cls in (SpaceDerivative, MinMaxDerivativeStatistics)):
+                        (cls in (SpaceDerivative, MinMaxDerivativeStatistics)):
              msg="cls should be a subclass of SpaceDerivative or MinMaxSpaceDerivativeStatistics, got {}."
              msg+='\ncls MRO is:\n  '
              msg+='\n  '.join(str(t) for t in cls.__mro__)
@@ -232,7 +253,7 @@ class MultiSpaceDerivatives(DirectionalOperatorGeneratorI, ComputationalGraphNod
                  'direction':  fmt(directions),
                  'derivative': fmt(derivatives),
          }
-        
+
          # Extract variables for each operator
          _variables = ()
          for (f,df,a) in zip(Fs, dFs, params['A']):
@@ -245,7 +266,7 @@ class MultiSpaceDerivatives(DirectionalOperatorGeneratorI, ComputationalGraphNod
              _variables += (var,)
          params['variables'] = _variables
 
-         params.update({k:fmt(v) for (k,v) in extra_params.iteritems()})
+         params.update({k:fmt(v) for (k,v) in extra_params.items()})
          self._params = params
          self._cls = cls
          self._op_kwds = op_kwds
@@ -262,10 +283,10 @@ class MultiSpaceDerivatives(DirectionalOperatorGeneratorI, ComputationalGraphNod
             op = cls(**op_kwds)
             operators += (op,)
         return operators
-    
+
     def generate_only_once_per_direction(self):
         return True
-    
+
     @debug
     def generate_direction(self, i, dt_coeff):
         should_generate = super(MultiSpaceDerivatives, self).generate_direction(
@@ -273,10 +294,10 @@ class MultiSpaceDerivatives(DirectionalOperatorGeneratorI, ComputationalGraphNod
         if not should_generate:
             return ()
         directions = self.directions
-        ids = tuple(j for j in xrange(len(directions)) if directions[j] == i)
+        ids = tuple(j for j in range(len(directions)) if directions[j] == i)
         ops = tuple(self.nodes[j] for j in ids)
         return ops
-    
+
     @debug
     def generate(self, **kwds):
         if ('splitting_dim' in kwds):
diff --git a/hysop/operator/diffusion.py b/hysop/operator/diffusion.py
index 41da64275095239442a9ee5cb9d8d41e594490d9..f05cb2f242176a49a78442bd4a7837dece708795 100644
--- a/hysop/operator/diffusion.py
+++ b/hysop/operator/diffusion.py
@@ -43,6 +43,17 @@ class Diffusion(SpectralComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    @debug
+    def __new__(cls, Fin, variables, nu, dt,
+            Fout=None, implementation=None,
+            base_kwds=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(Diffusion, cls).__new__(cls,
+                Fin=Fin, Fout=Fout,
+                variables=variables, nu=nu, dt=dt,
+                implementation=implementation,
+                base_kwds=base_kwds, **kwds)
+
     @debug
     def __init__(self, Fin, variables, nu, dt,
             Fout=None, implementation=None,
@@ -96,3 +107,4 @@ class Diffusion(SpectralComputationalGraphNodeFrontend):
                                         variables=variables, nu=nu, dt=dt,
                                         implementation=implementation,
                                         base_kwds=base_kwds, **kwds)
+
diff --git a/hysop/operator/directional/advection_dir.py b/hysop/operator/directional/advection_dir.py
index f3d057bfdd79b55937277104cd7fdb57048d8937..8966b1e648f825359f5ebb17afbd7be1c4a6b080 100644
--- a/hysop/operator/directional/advection_dir.py
+++ b/hysop/operator/directional/advection_dir.py
@@ -38,6 +38,16 @@ class DirectionalAdvection(DirectionalOperatorFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    @debug
+    def __new__(cls, velocity, advected_fields, variables, dt,
+                advected_fields_out=None, relative_velocity=None,
+                implementation=None, base_kwds=None, **kwds):
+        return super(DirectionalAdvection, cls).__new__(cls,
+                velocity=velocity, relative_velocity=relative_velocity,
+                dt=dt, advected_fields_in=advected_fields, advected_fields_out=advected_fields_out,
+                variables=variables, implementation=implementation, base_kwds=base_kwds,
+                candidate_input_tensors=None, candidate_output_tensors=None, **kwds)
+
     @debug
     def __init__(self, velocity, advected_fields, variables, dt,
                                  advected_fields_out=None,
@@ -55,8 +65,8 @@ class DirectionalAdvection(DirectionalOperatorFrontend):
         advected_fields: Field or array like of Fields
             instance or list of continuous fields to be advected.
         advected_fields_out: Field or array like of Field, optional, defaults to None
-            advection output, if set to None, advection is done inplace 
-            on a per variable basis. 
+            advection output, if set to None, advection is done inplace
+            on a per variable basis.
         relative_velocity: array_like of constants representing relative velocity, optional
             Relative velocity that has to be taken into account.
             Vi = Ui - Urel[i] for each components.
@@ -85,13 +95,13 @@ class DirectionalAdvection(DirectionalOperatorFrontend):
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
         check_instance(base_kwds, dict, keys=str)
         check_instance(dt, ScalarParameter)
-    
+
         advected_fields = to_list(advected_fields)
         nb_fields = len(advected_fields)
         assert len(set(advected_fields))==len(advected_fields)
 
         relative_velocity = to_list(first_not_None(relative_velocity, [0]))
-        relative_velocity = map(lambda x: x if isinstance(x, str) else float(x), relative_velocity)
+        relative_velocity = tuple(map(lambda x: x if isinstance(x, str) else float(x), relative_velocity))
         if len(relative_velocity)==1:
             relative_velocity *= velocity.nb_components
         relative_velocity = tuple(relative_velocity)
@@ -107,9 +117,9 @@ class DirectionalAdvection(DirectionalOperatorFrontend):
                     advected_fields_out[i] = advected_fields[i]
         advected_fields = tuple(advected_fields)
         advected_fields_out = tuple(advected_fields_out)
-        
+
         check_instance(advected_fields,  tuple, values=Field)
-        check_instance(advected_fields_out, tuple, values=Field, 
+        check_instance(advected_fields_out, tuple, values=Field,
                 size=len(advected_fields))
         check_instance(velocity, Field)
         check_instance(relative_velocity, tuple, values=(str,float), size=velocity.nb_components)
@@ -119,10 +129,10 @@ class DirectionalAdvection(DirectionalOperatorFrontend):
 
         candidate_input_tensors  = advected_fields + (velocity,)
         candidate_output_tensors = advected_fields_out
-        
+
         super(DirectionalAdvection, self).__init__(velocity=velocity, relative_velocity=relative_velocity,
                 dt=dt, advected_fields_in=advected_fields, advected_fields_out=advected_fields_out,
-                variables=variables, implementation=implementation, base_kwds=base_kwds, 
+                variables=variables, implementation=implementation, base_kwds=base_kwds,
                 candidate_input_tensors=candidate_input_tensors,
                 candidate_output_tensors=candidate_output_tensors,
                 **kwds)
diff --git a/hysop/operator/directional/diffusion_dir.py b/hysop/operator/directional/diffusion_dir.py
index 98482771914770fe3384d4db40dbcd59803c644f..0b345d7dec84eeb3395e246db8d07358942a08c3 100644
--- a/hysop/operator/directional/diffusion_dir.py
+++ b/hysop/operator/directional/diffusion_dir.py
@@ -2,8 +2,8 @@
 @file diffusion_dir.py
 Directional diffusion frontend (operator generator).
 """
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.constants         import DirectionLabels, Implementation
 from hysop.tools.types       import check_instance, to_tuple, to_list, first_not_None
 from hysop.tools.decorators  import debug
@@ -20,34 +20,46 @@ class DirectionalDiffusion(DirectionalSymbolic):
     """
     Directional diffusion using the symbolic code generation framework.
     """
-    
+
+    @debug
+    def __new__(cls, fields, coeffs, variables, dt,
+                laplacian_formulation=True,
+                name=None, implementation=None, base_kwds=None, **kwds):
+        base_kwds  = first_not_None(base_kwds, {})
+        return super(DirectionalDiffusion, cls).__new__(cls,
+                name=name, variables=variables, dt=dt,
+                base_kwds=base_kwds, implementation=implementation, exprs=None,
+                candidate_input_tensors=None,
+                candidate_output_tensors=None,
+                **kwds)
+
     @debug
-    def __init__(self, fields, coeffs, variables, dt, 
+    def __init__(self, fields, coeffs, variables, dt,
                 laplacian_formulation=True,
                 name=None, implementation=None, base_kwds=None, **kwds):
         """
         Initialize directional diffusion frontend.
 
-        Diffusion is the net movement of regions of high concentration to neighbour regions of lower 
+        Diffusion is the net movement of regions of high concentration to neighbour regions of lower
         concentration, ie the evolution of a field down its gradient.
-        
-        Solves 
+
+        Solves
                dFi/dt = Di * laplacian(Fi)
-            or 
-               dFi/dt = div( Di * grad(Fi) ) 
+            or
+               dFi/dt = div( Di * grad(Fi) )
 
         for multiple fields Fi, where * represents elementwise multiplication,
         using a given time integrator, inplace.
-        
+
         Di can be a scalar, a vector or a matrix, see Notes.
-       
+
         Parameters
         ----------
         fields: array like of continuous fields.
             The fields Fi that will be diffused.
         coeffs: array like of coefficients
             The diffusion coefficients Di can be scalar or tensor like (ie. possibly anisotropic).
-            Contained values should be numerical coefficients, parameters or symbolic 
+            Contained values should be numerical coefficients, parameters or symbolic
             expressions such that Di*grad(Fi) is directionally splittable.
         variables: dict
             Dictionary of fields as keys and topology descriptors as values.
@@ -64,25 +76,25 @@ class DirectionalDiffusion(DirectionalSymbolic):
             Base class keywords arguments.
             If None, an empty dict will be passed.
         kwds:
-            Keywords arguments that will be passed towards implementation 
+            Keywords arguments that will be passed towards implementation
             operator __init__.
-        
+
         Notes
         -----
         Solves dFi/dt = div( Di * grad(Fi) ) with the following notations:
-        
-        Fi = (Fi0(x0,...,xMi), 
-              Fi1(x0,...,xMi), 
-                    ... 
+
+        Fi = (Fi0(x0,...,xMi),
+              Fi1(x0,...,xMi),
+                    ...
               FiNi(x0,...,xMi))
 
-        Mi = Fi.dim - 1 
+        Mi = Fi.dim - 1
         Ni = Fi.nb_components - 1
-        
-        grad(Fi) = (dFi0/dx0, ..., dFi0/dxM), 
-                   (dFi1/dx0, ..., dFi1/dxM), 
+
+        grad(Fi) = (dFi0/dx0, ..., dFi0/dxM),
+                   (dFi1/dx0, ..., dFi1/dxM),
                               ...
-                   (dFiNi/dx0, ..., dFiNi/dxM) 
+                   (dFiNi/dx0, ..., dFiNi/dxM)
 
              (d00,  ...,  d0Ni)
              ( .     .    .   )
@@ -90,16 +102,16 @@ class DirectionalDiffusion(DirectionalSymbolic):
              ( .     .    .   )
              (dNi0, ..., dNiNi)
 
-        if Di.ndim is 1, Di[:,None] will be used ie: 
-             
+        if Di.ndim is 1, Di[:,None] will be used ie:
+
              (d0,  ...,  d0)
              ( .    .    . )
         Di = ( .    .    . )
              ( .    .    . )
              (dNi, ..., dNi)
-        
-        if Di.ndim is 0, Di[None,None] will be used ie: 
-             
+
+        if Di.ndim is 0, Di[None,None] will be used ie:
+
              (d0, ..., d0)
              (.    .    .)
         Di = (.    .    .)
@@ -124,26 +136,26 @@ class DirectionalDiffusion(DirectionalSymbolic):
         check_instance(base_kwds, dict, keys=str)
         check_instance(name, str)
         check_instance(fields, (tuple,), values=Field)
-        check_instance(coeffs, (tuple,), values=(int,long,float,npw.ndarray,sm.Basic), size=len(fields))
+        check_instance(coeffs, (tuple,), values=(int,float,npw.ndarray,sm.Basic), size=len(fields))
         check_instance(laplacian_formulation, bool)
-        
+
         exprs = self._gen_expressions(fields, coeffs, laplacian_formulation)
 
         super(DirectionalDiffusion, self).__init__(name=name, variables=variables, dt=dt,
-                base_kwds=base_kwds, implementation=implementation, exprs=exprs, 
+                base_kwds=base_kwds, implementation=implementation, exprs=exprs,
                 candidate_input_tensors=fields,
                 candidate_output_tensors=fields,
                 **kwds)
-        
+
         from hysop.operator.base.custom_symbolic_operator import CustomSymbolicOperatorBase
         if not issubclass(self._operator, CustomSymbolicOperatorBase):
             msg='Class {} does not inherit from the directional operator interface '
             msg+='({}).'
             msg=msg.format(self._operator, CustomSymbolicOperatorBase)
             raise TypeError(msg)
-        
+
         self.coeffs = coeffs
-        
+
     def _gen_expressions(self, fields, coeffs, laplacian_formulation):
         from hysop.symbolic.field import div, grad, laplacian
         exprs = ()
diff --git a/hysop/operator/directional/directional.py b/hysop/operator/directional/directional.py
index 1746b79c344954402fab589dc5a2a629e0cf775a..a474e80abff35278e9c3565fba25ad73e0b1cb4c 100644
--- a/hysop/operator/directional/directional.py
+++ b/hysop/operator/directional/directional.py
@@ -13,6 +13,10 @@ class DirectionalOperatorBase(object):
     Implementation interface for directional operators.
     """
 
+    @debug
+    def __new__(cls, splitting_dim, splitting_direction, dt_coeff, **kwds):
+        return super(DirectionalOperatorBase, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, splitting_dim, splitting_direction, dt_coeff, **kwds):
         """
@@ -36,8 +40,8 @@ class DirectionalOperatorBase(object):
         dt_coeff: float
             Coefficient that should be applied on simulation timestep.
         """
-        check_instance(splitting_dim, (int, long))
-        check_instance(splitting_direction, (int, long))
+        check_instance(splitting_dim, int)
+        check_instance(splitting_direction, int)
         check_instance(dt_coeff, float)
 
         dim = splitting_dim
@@ -78,6 +82,9 @@ class DirectionalOperatorBase(object):
 
 
 class DirectionalOperatorGeneratorI(object):
+    def __new__(cls, **kwds):
+        return super(DirectionalOperatorGeneratorI, cls).__new__(cls, **kwds)
+
     def __init__(self, **kwds):
         super(DirectionalOperatorGeneratorI, self).__init__(**kwds)
         self._generated = False
@@ -125,12 +132,19 @@ class DirectionalOperatorGeneratorI(object):
         return self.generate_direction(i, dt_coeff)
 
 
-class DirectionalOperatorGenerator(DirectionalOperatorGeneratorI):
+class DirectionalOperatorGenerator(DirectionalOperatorGeneratorI, metaclass=ABCMeta):
     """
     Simple ComputationalGraphNodeGenerator to generate an operator in
     multiple directions.
     """
-    __metaclass__ = ABCMeta
+
+    @debug
+    def __new__(cls, operator, base_kwds,
+                candidate_input_tensors,
+                candidate_output_tensors,
+                name=None, pretty_name=None,
+                **op_kwds):
+        return super(DirectionalOperatorGenerator, cls).__new__(cls, **base_kwds)
 
     @debug
     def __init__(self, operator, base_kwds,
@@ -248,21 +262,21 @@ class DirectionalOperatorGenerator(DirectionalOperatorGeneratorI):
         kargs.update(self.custom_directional_kwds(i))
 
         name = '{}_{}_{}'.format(basename, DirectionLabels[i], self._direction_counter[i])
-        pname = u'{}_{}{}'.format(basepname, DirectionLabels[i],
-                                  subscript(self._direction_counter[i]))
-        pname = pname.encode('utf-8')
+        pname = '{}_{}{}'.format(basepname, DirectionLabels[i],
+                                 subscript(self._direction_counter[i]))
 
         try:
             op = self._operator(name=name, pretty_name=pname,
                                 splitting_direction=i, dt_coeff=dt_coeff, **kargs)
         except:
             sargs = ['*{} = {}'.format(k, v.__class__)
-                     for (k, v) in kargs.iteritems()]
+                     for (k, v) in kargs.items()]
             msg = 'FATAL ERROR during {}.generate():\n'
-            msg += ' => failed to call {}.__init__()\n    with the following keywords:'
+            msg += ' => failed to initialize an instance of type {}'
+            msg += '\n    by using the following keyword arguments:'
             msg += '\n     '+'\n     '.join(sargs)
-            msg = msg.format(self.__class__, self._operator)
-            print '\n{}\n'.format(msg)
+            msg = msg.format(self.__class__, self.impl)
+            print('\n{}'.format(msg))
             raise
 
         if (self._input_fields_to_dump is not None):
@@ -292,13 +306,17 @@ class DirectionalOperatorGenerator(DirectionalOperatorGeneratorI):
         return {}
 
 
-class DirectionalOperatorFrontend(DirectionalOperatorGenerator):
+class DirectionalOperatorFrontend(DirectionalOperatorGenerator, metaclass=ABCMeta):
     """
     Frontend facility for directional operators that provide
     multiple implementations.
     """
 
-    __metaclass__ = ABCMeta
+    @debug
+    def __new__(cls, implementation=None, base_kwds=None, **op_kwds):
+        base_kwds = {} if (base_kwds is None) else base_kwds
+        return super(DirectionalOperatorFrontend, cls).__new__(cls, operator=None,
+                                                               base_kwds=base_kwds, **op_kwds)
 
     @debug
     def __init__(self, implementation=None, base_kwds=None, **op_kwds):
@@ -331,7 +349,7 @@ class DirectionalOperatorFrontend(DirectionalOperatorGenerator):
             the implementation implementation
         """
 
-        base_kwds = base_kwds or dict()
+        base_kwds = {} if (base_kwds is None) else base_kwds
 
         check_instance(implementation, Implementation, allow_none=True)
         check_instance(base_kwds, dict, keys=str)
diff --git a/hysop/operator/directional/stretching_diffusion_dir.py b/hysop/operator/directional/stretching_diffusion_dir.py
index 598027c76c33fd59e74920c8e3d759dae35f3ead..d7d50c40287417ceb92b1a944ed28cdd15f7870e 100644
--- a/hysop/operator/directional/stretching_diffusion_dir.py
+++ b/hysop/operator/directional/stretching_diffusion_dir.py
@@ -2,8 +2,8 @@
 @file stretching_dir.py
 Directional stretching frontend (operator generator).
 """
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.constants         import DirectionLabels, Implementation, StretchingFormulation
 from hysop.tools.types       import check_instance, to_tuple, to_list, first_not_None
 from hysop.tools.decorators  import debug
@@ -20,7 +20,11 @@ class DirectionalStretchingDiffusion(DirectionalStretching):
     """
     Directional stretching + diffusion using the symbolic code generation framework.
     """
-    
+
+    @debug
+    def __new__(cls, viscosity, **kwds):
+        return super(DirectionalStretchingDiffusion, cls).__new__(cls, **kwds)
+
     @debug
     def __init__(self, viscosity, **kwds):
         """
@@ -52,7 +56,7 @@ class DirectionalStretchingDiffusion(DirectionalStretching):
         check_instance(viscosity, (float, Field))
         self.viscosity = viscosity
         super(DirectionalStretchingDiffusion, self).__init__(**kwds)
-        
+
     def _gen_expressions(self, formulation, velocity, vorticity, C, A):
         from hysop.symbolic.field import laplacian
         viscosity = self.viscosity
diff --git a/hysop/operator/directional/stretching_dir.py b/hysop/operator/directional/stretching_dir.py
index 8a1d84d6abe4e3e20412b76b2fc91cf5314db1ed..ed68b5aedf10fef8a1b656bcb2c3011697c32e39 100644
--- a/hysop/operator/directional/stretching_dir.py
+++ b/hysop/operator/directional/stretching_dir.py
@@ -2,8 +2,8 @@
 @file stretching_dir.py
 Directional stretching frontend (operator generator).
 """
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.constants         import DirectionLabels, Implementation, StretchingFormulation
 from hysop.tools.types       import check_instance, to_tuple, to_list, first_not_None
 from hysop.tools.decorators  import debug
@@ -22,6 +22,17 @@ class DirectionalStretching(DirectionalSymbolic):
     Directional stretching using the symbolic code generation framework.
     """
 
+    @debug
+    def __new__(cls, formulation, velocity, vorticity, variables, dt,
+                 C=None, A=None, name=None, implementation=None, base_kwds=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(DirectionalStretching, cls).__new__(cls,
+                name=name, variables=variables, dt=dt,
+                candidate_input_tensors=None,
+                candidate_output_tensors=None,
+                base_kwds=base_kwds, implementation=implementation,
+                exprs=None, **kwds)
+
     @debug
     def __init__(self, formulation, velocity, vorticity, variables, dt,
                  C=None, A=None, name=None, implementation=None, base_kwds=None, **kwds):
@@ -181,6 +192,7 @@ class DirectionalStretching(DirectionalSymbolic):
         return exprs
 
 
+
 class StaticDirectionalStretching(DirectionalOperatorFrontend):
     """
     Directional stretching using the symbolic code generation framework.
@@ -196,6 +208,19 @@ class StaticDirectionalStretching(DirectionalOperatorFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    @debug
+    def __new__(cls, formulation, velocity, vorticity, variables, dt,
+                 C=None, A=None, name=None, implementation=None, base_kwds=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(StaticDirectionalStretching, cls).__new__(cls,
+            name=name, variables=variables, dt=dt,
+            formulation=formulation,
+            velocity=velocity, vorticity=vorticity,
+            C=C, A=A,
+            candidate_input_tensors=None,
+            candidate_output_tensors=None,
+            base_kwds=base_kwds, implementation=implementation, **kwds)
+
     @debug
     def __init__(self, formulation, velocity, vorticity, variables, dt,
                  C=None, A=None, name=None, implementation=None, base_kwds=None, **kwds):
@@ -322,3 +347,4 @@ class StaticDirectionalStretching(DirectionalOperatorFrontend):
             candidate_input_tensors=(velocity, vorticity,),
             candidate_output_tensors=(vorticity,),
             base_kwds=base_kwds, implementation=implementation, **kwds)
+
diff --git a/hysop/operator/directional/symbolic_dir.py b/hysop/operator/directional/symbolic_dir.py
index 1ba95ade149399f7ba85ad30ca4f968af4b4fcc9..da94dd12861b9306da2079edc431be22be3dec8c 100644
--- a/hysop/operator/directional/symbolic_dir.py
+++ b/hysop/operator/directional/symbolic_dir.py
@@ -17,10 +17,10 @@ from hysop.symbolic.relational import Assignment
 class DirectionalSymbolic(DirectionalOperatorFrontend):
     """
     Custom symbolic expression directional splitting.
-    Available implementations are: 
+    Available implementations are:
         *OpenCL
     """
-    
+
     @classmethod
     def implementations(cls):
         from hysop.backend.device.opencl.operator.custom_symbolic import \
@@ -29,33 +29,48 @@ class DirectionalSymbolic(DirectionalOperatorFrontend):
                 Implementation.OPENCL: OpenClCustomSymbolicOperator
         }
         return __implementations
-    
+
     @classmethod
     def default_implementation(cls):
         return Implementation.OPENCL
-    
-    
+
+
     @debug
-    def __init__(self, name, exprs, variables, 
+    def __new__(cls, name, exprs, variables,
                 no_split=False, fixed_residue=0, force_residue=None,
-                implementation=None, base_kwds=None, 
+                implementation=None, base_kwds=None,
+                candidate_input_tensors=None,
+                candidate_output_tensors=None,
+                **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(DirectionalSymbolic, cls).__new__(cls,
+                name=name, variables=variables,
+                base_kwds=base_kwds, implementation=implementation,
+                candidate_input_tensors=candidate_input_tensors,
+                candidate_output_tensors=candidate_output_tensors,
+                **kwds)
+
+    @debug
+    def __init__(self, name, exprs, variables,
+                no_split=False, fixed_residue=0, force_residue=None,
+                implementation=None, base_kwds=None,
                 candidate_input_tensors=None,
                 candidate_output_tensors=None,
                 **kwds):
         """
         Initialize a DirectionalSymbolic operator frontend.
-        
+
         Expressions are first splitted using contiguous memory
         accesses imposed by the discretization of field derivatives
         with finite differences.
-        
+
         If the splitting is not possible (ie. an expression contains
         multiple derivatives in different directions that cannot be
         splitted), a ValueError is raised.
 
         See hysop.operator.base.custom_symbolic.CustomSymbolicOperatorBase
         to see how expressions are parsed.
-        
+
         Parameters
         ----------
         name: str
@@ -81,12 +96,12 @@ class DirectionalSymbolic(DirectionalOperatorFrontend):
             Base class keywords arguments.
             If None, an empty dict will be passed.
         kwds:
-            Keywords arguments that will be passed towards implementation 
+            Keywords arguments that will be passed towards implementation
             operator __init__.
-        
+
         Notes
         -----
-        A valid implementation should at least support the 
+        A valid implementation should at least support the
         hysop.base.custom_symbolic.CustomSymbolicOperatorBase interface.
         """
         base_kwds = first_not_None(base_kwds, {})
@@ -100,7 +115,7 @@ class DirectionalSymbolic(DirectionalOperatorFrontend):
         if len(fixed_residue) == 1:
             fixed_residue *= len(exprs)
         check_instance(fixed_residue, tuple, size=len(exprs))
-        
+
         force_residue = to_tuple(force_residue)
         if len(force_residue) == 1:
             force_residue *= len(exprs)
@@ -108,13 +123,13 @@ class DirectionalSymbolic(DirectionalOperatorFrontend):
 
         candidate_input_tensors  = first_not_None(candidate_input_tensors, variables.keys())
         candidate_output_tensors = first_not_None(candidate_output_tensors, variables.keys())
-        
+
         super(DirectionalSymbolic, self).__init__(name=name, variables=variables,
-                base_kwds=base_kwds, implementation=implementation, 
+                base_kwds=base_kwds, implementation=implementation,
                 candidate_input_tensors=candidate_input_tensors,
                 candidate_output_tensors=candidate_output_tensors,
                 **kwds)
-        
+
         if not issubclass(self._operator, CustomSymbolicOperatorBase):
             msg='Class {} does not inherit from the directional operator interface '
             msg+='({}).'
@@ -131,7 +146,7 @@ class DirectionalSymbolic(DirectionalOperatorFrontend):
             directional_exprs = {}
             for e, fixed_res, forced_res in zip(exprs, fixed_residue, force_residue):
                 se = split_assignement(e, fixed_res, forced_res, None)
-                for (k,v) in se.iteritems():
+                for (k,v) in se.items():
                     if (v==0):
                         continue
                     if (k in directional_exprs):
diff --git a/hysop/operator/dummy.py b/hysop/operator/dummy.py
index 85a54aa90439bf3c803069f427ed75d4beb1b975..3b3426a6b3212bd705693143d519a0972803d724 100644
--- a/hysop/operator/dummy.py
+++ b/hysop/operator/dummy.py
@@ -15,6 +15,11 @@ from hysop.core.graph.graph import op_apply
 
 class PythonDummy(HostOperator):
 
+    @debug
+    def __new__(cls, variables, **kwds):
+        return super(PythonDummy, cls).__new__(cls,
+            input_fields=None, output_fields=None, **kwds)
+
     @debug
     def __init__(self, variables, **kwds):
         check_instance(variables, dict, keys=Field,
@@ -91,3 +96,4 @@ class Dummy(ComputationalGraphNodeFrontend):
     @classmethod
     def default_implementation(cls):
         return Implementation.PYTHON
+
diff --git a/hysop/operator/enstrophy.py b/hysop/operator/enstrophy.py
index 09b2a327631645e1682dcc633850673dca21a81f..ed8ffa7d7d282f52a1c58f32d2c1832ac80b0c5f 100644
--- a/hysop/operator/enstrophy.py
+++ b/hysop/operator/enstrophy.py
@@ -3,7 +3,7 @@
 Enstrophy solver frontend.
 """
 from hysop.constants         import Implementation
-from hysop.tools.types       import check_instance
+from hysop.tools.types       import check_instance, first_not_None
 from hysop.tools.enum        import EnumFactory
 from hysop.tools.decorators  import debug
 from hysop.fields.continuous_field import Field
@@ -34,6 +34,17 @@ class Enstrophy(ComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.OPENCL
 
+    @debug
+    def __new__(cls, vorticity, enstrophy, variables,
+                rho=None, rho_0=1.0, WdotW=None,
+                implementation=None, base_kwds=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(Enstrophy, cls).__new__(cls,
+                vorticity=vorticity, rho=rho,
+                enstrophy=enstrophy, WdotW=WdotW, rho_0=rho_0,
+                variables=variables, base_kwds=base_kwds,
+                implementation=implementation, **kwds)
+
     @debug
     def __init__(self, vorticity, enstrophy, variables,
                 rho=None, rho_0=1.0, WdotW=None,
@@ -82,7 +93,7 @@ class Enstrophy(ComputationalGraphNodeFrontend):
         An Enstrophy operator implementation should at least support
         the hysop.operator.base.enstrophy.EnstrophyBase interface.
         """
-        base_kwds = base_kwds or dict()
+        base_kwds = first_not_None(base_kwds, {})
 
         check_instance(vorticity, Field)
         check_instance(enstrophy, ScalarParameter)
@@ -96,3 +107,4 @@ class Enstrophy(ComputationalGraphNodeFrontend):
                 enstrophy=enstrophy, WdotW=WdotW, rho_0=rho_0,
                 variables=variables, base_kwds=base_kwds,
                 implementation=implementation, **kwds)
+
diff --git a/hysop/operator/external_force.py b/hysop/operator/external_force.py
index 0709bbfbe17bb743a2cce46336f34a92358116b4..92d2d2f61bae0cf3ebb051bdd883ef3983b7f03b 100644
--- a/hysop/operator/external_force.py
+++ b/hysop/operator/external_force.py
@@ -23,14 +23,29 @@ class SpectralExternalForce(ComputationalGraphNodeFrontend):
                 Implementation.OPENCL: OpenClSpectralExternalForce,
         }
         return __implementations
-    
+
     @classmethod
     def default_implementation(cls):
         return Implementation.OPENCL
 
+    @debug
+    def __new__(cls, vorticity, Fext, dt, variables,
+                    Fmin=None, Fmax=None, Finf=None,
+                    all_quiet=False, pbasename=None, ppbasename=None,
+                    implementation=None, base_kwds=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(SpectralExternalForce, cls).__new__(cls,
+                vorticity=vorticity,
+                Fext=Fext, dt=dt, variables=variables,
+                Fmin=None, Fmax=None, Finf=None,
+                candidate_input_tensors=None,
+                candidate_output_tensors=None,
+                implementation=implementation,
+                base_kwds=base_kwds, **kwds)
+
     @debug
     def __init__(self, vorticity, Fext, dt, variables,
-                    Fmin=None, Fmax=None, Finf=None, 
+                    Fmin=None, Fmax=None, Finf=None,
                     all_quiet=False, pbasename=None, ppbasename=None,
                     implementation=None, base_kwds=None, **kwds):
         """
@@ -46,7 +61,7 @@ class SpectralExternalForce(ComputationalGraphNodeFrontend):
             Fmax = max(tmp)
             Finf = max(abs(Fmin), abs(Fmax))
             W   += dt*tmp
-        
+
         where Fext is computed from user given ExternalForce.
 
         Parameters
@@ -63,7 +78,7 @@ class SpectralExternalForce(ComputationalGraphNodeFrontend):
             If set to True, the TensorParameter will be generated automatically.
         all_quiet: bool
             Force all autogenerated TensorParameter to be quiet.
-            By default, only the autogenerated TensorParameters that are not required 
+            By default, only the autogenerated TensorParameters that are not required
             by the user are set to be quiet.
         pbasename: str, optional
             Parameters basename for created parameters.
@@ -96,29 +111,29 @@ class SpectralExternalForce(ComputationalGraphNodeFrontend):
         check_instance(Fext, ExternalForce)
         check_instance(dt, ScalarParameter)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
-        
+
         # Pregenerate parameters so that we can directly store them in self.
         default_pbasename  = 'curl_{}'.format(Fext.name)
-        default_ppbasename = u'{}x{}'.format(nabla, Fext.pretty_name)
+        default_ppbasename = '{}x{}'.format(nabla, Fext.pretty_name)
         pbasename  = first_not_None(pbasename,  default_pbasename)
         ppbasename = first_not_None(ppbasename, default_ppbasename)
-        parameters = MinMaxFieldStatisticsBase.build_parameters(field=vorticity, 
-                components=None, dtype=None, all_quiet=all_quiet, 
-                Fmin=Fmin, Fmax=Fmax, Finf=Finf, 
+        parameters = MinMaxFieldStatisticsBase.build_parameters(field=vorticity,
+                components=None, dtype=None, all_quiet=all_quiet,
+                Fmin=Fmin, Fmax=Fmax, Finf=Finf,
                 pbasename=pbasename, ppbasename=ppbasename)
 
         (Fmin, Fmax, Finf) = tuple(parameters[k] for k in ('Fmin', 'Fmax', 'Finf'))
-        
+
         check_instance(Fmin, TensorParameter, allow_none=True)
         check_instance(Fmax, TensorParameter, allow_none=True)
         check_instance(Finf, TensorParameter, allow_none=True)
 
-        super(SpectralExternalForce, self).__init__(vorticity=vorticity, 
+        super(SpectralExternalForce, self).__init__(vorticity=vorticity,
                 Fext=Fext, dt=dt, variables=variables,
                 Fmin=Fmin, Fmax=Fmax, Finf=Finf,
                 candidate_input_tensors=(vorticity,),
                 candidate_output_tensors=(vorticity,),
-                implementation=implementation, 
+                implementation=implementation,
                 base_kwds=base_kwds, **kwds)
 
         self.Fmin = Fmin
diff --git a/hysop/operator/flowrate_correction.py b/hysop/operator/flowrate_correction.py
index 6bc5c5fc52e2eb6e79bb5985805db630b1ea95fb..2003df1ea84ed539cc9072abbcc5a361ce653753 100644
--- a/hysop/operator/flowrate_correction.py
+++ b/hysop/operator/flowrate_correction.py
@@ -30,6 +30,18 @@ class FlowRateCorrection(ComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    def __new__(cls, velocity, vorticity,
+                 dt, flowrate, variables,
+                 implementation=None, **kwds):
+        return super(FlowRateCorrection, cls).__new__(cls,
+            velocity=velocity,
+            vorticity=vorticity,
+            dt=dt,
+            flowrate=flowrate,
+            variables=variables,
+            implementation=implementation,
+            **kwds)
+
     def __init__(self, velocity, vorticity,
                  dt, flowrate, variables,
                  implementation=None, **kwds):
@@ -67,3 +79,4 @@ class FlowRateCorrection(ComputationalGraphNodeFrontend):
             variables=variables,
             implementation=implementation,
             **kwds)
+
diff --git a/hysop/operator/gradient.py b/hysop/operator/gradient.py
index 1837d9704c748ddfd79705e63c35547a1ff7a21b..c90d10436ccf8b72a99ce03ede839d1513e27b62 100644
--- a/hysop/operator/gradient.py
+++ b/hysop/operator/gradient.py
@@ -33,6 +33,20 @@ class Gradient(MultiSpaceDerivatives):
     def implementations(cls):
         return SpaceDerivative.implementations()
 
+    @debug
+    def __new__(mcls, F, gradF, directions=None, implementation=None,
+                        cls=FiniteDifferencesSpaceDerivative,
+                        base_kwds=None, **kwds):
+        base_kwds = {} if (base_kwds is None) else base_kwds
+        base_kwds.update(dict(candidate_input_tensors=None, candidate_output_tensors=None))
+        return super(Gradient, mcls).__new__(mcls,
+                Fs=None, dFs=None, cls=cls,
+                candidate_input_tensors=(F,),
+                candidate_output_tensors=(gradF,),
+                derivatives=None, directions=directions,
+                implementation=implementation,
+                base_kwds=base_kwds, **kwds)
+
     @debug
     def __init__(self, F, gradF, directions=None, implementation=None,
                         cls=FiniteDifferencesSpaceDerivative,
@@ -116,7 +130,7 @@ class Gradient(MultiSpaceDerivatives):
 
         Fs  = tuple(f for f in F.fields for d in directions)
         dFs = gradF.fields
-        directions  = tuple(d for _ in xrange(nfields) for d in directions)
+        directions  = tuple(d for _ in range(nfields) for d in directions)
         derivatives = (1,)*len(directions)
 
         base_kwds.update(dict(
@@ -147,6 +161,19 @@ class MinMaxGradientStatistics(Gradient):
     This will generate multiple MinMaxDerivativeStatistics operators.
     """
 
+    @debug
+    def __new__(mcls, F, gradF=None, directions=None, coeffs=None,
+            Fmin=None, Fmax=None, Finf=None,
+            all_quiet=True, print_tensors=True,
+            name=None, pretty_name=None, pbasename=None, ppbasename=None,
+            variables=None, implementation=None, base_kwds=None,
+            cls=MinMaxFiniteDifferencesDerivativeStatistics,
+            **kwds):
+        return super(MinMaxGradientStatistics, mcls).__new__(mcls,
+                F=F, gradF=gradF,
+                directions=directions, extra_params=None,
+                cls=cls, variables=variables, **kwds)
+
     @debug
     def __init__(self, F, gradF=None, directions=None, coeffs=None,
             Fmin=None, Fmax=None, Finf=None,
@@ -274,7 +301,7 @@ class MinMaxGradientStatistics(Gradient):
                         allow_none=True)
         check_instance(name, str, allow_none=True)
         check_instance(pbasename, str, allow_none=True)
-        check_instance(ppbasename, (str,unicode), allow_none=True)
+        check_instance(ppbasename, str, allow_none=True)
         check_instance(implementation, Implementation, allow_none=True)
         check_instance(base_kwds, dict, allow_none=True)
         check_instance(all_quiet, bool, allow_none=True)
@@ -313,17 +340,17 @@ class MinMaxGradientStatistics(Gradient):
         }
 
         _pretty_names = {
-            'Fmin': u'{}\u208b',
-            'Fmax': u'{}\u208a',
-            'Finf': u'|{}|\u208a'
+            'Fmin': '{}â‚‹',
+            'Fmax': '{}â‚Š',
+            'Finf': '|{}|â‚Š'
         }
 
         pbasename  = first_not_None(pbasename, gradF.name)
         ppbasename = first_not_None(ppbasename, gradF.pretty_name)
 
-        names = { k: v.format(pbasename) for (k,v) in _names.iteritems() }
-        pretty_names = { k: v.format(ppbasename.decode('utf-8'))
-                            for (k,v) in _pretty_names.iteritems() }
+        names = { k: v.format(pbasename) for (k,v) in _names.items() }
+        pretty_names = { k: v.format(ppbasename)
+                            for (k,v) in _pretty_names.items() }
 
         def make_param(k, quiet):
             return TensorParameter(name=names[k], pretty_name=pretty_names[k],
@@ -331,7 +358,7 @@ class MinMaxGradientStatistics(Gradient):
 
         parameters = {}
         _parameters = dict(Fmin=Fmin, Fmax=Fmax, Finf=Finf)
-        for (k,v) in _parameters.iteritems():
+        for (k,v) in _parameters.items():
             param = _parameters[k]
             if isinstance(param, TensorParameter):
                 pass
@@ -355,7 +382,7 @@ class MinMaxGradientStatistics(Gradient):
             raise ValueError(unused_coeffs)
 
         name = first_not_None(name, 'MinMax({})')
-        pretty_name = first_not_None(pretty_name, u'|\u00b1{}|')
+        pretty_name = first_not_None(pretty_name, '|\±{}|')
 
         extra_params = { 'name': gradF.new_empty_array(),
                          'pretty_name': gradF.new_empty_array(),
@@ -363,17 +390,17 @@ class MinMaxGradientStatistics(Gradient):
                          'implementation': implementation }
 
         for (idx, Fi) in gradF.nd_iter():
-            for (statname, stat) in parameters.iteritems():
+            for (statname, stat) in parameters.items():
                 if (stat is None):
                     continue
                 pname  = _names[statname].format(Fi.name)
-                ppname = _pretty_names[statname].format(Fi.pretty_name.decode('utf-8'))
+                ppname = _pretty_names[statname].format(Fi.pretty_name)
                 S = stat.view(idx=idx, name=pname, pretty_name=ppname)
                 stats = extra_params.setdefault(statname, gradF.new_empty_array())
                 stats[idx] = S
             extra_params['name'][idx] = name.format(Fi.name)
             extra_params['pretty_name'][idx] = pretty_name.format(
-                                                Fi.pretty_name.decode('utf-8')).encode('utf-8')
+                                                Fi.pretty_name)
 
         super(MinMaxGradientStatistics, self).__init__(F=F, gradF=gradF,
                 directions=directions, extra_params=extra_params,
@@ -389,7 +416,7 @@ class MinMaxGradientStatistics(Gradient):
                 super(MergeTensorViewsOperator, self).apply(**kwds)
                 if not print_tensors:
                     return
-                for (k,v) in _parameters.iteritems():
+                for (k,v) in _parameters.items():
                     if (v is not None) and (v is not False):
                         param = parameters[k]
                         msg='>Parameter {} set to:\n{}'.format(param.pretty_name, param.value)
@@ -404,7 +431,7 @@ class MinMaxGradientStatistics(Gradient):
                 _phony_input_params.update({p.name:p for p in extra_params[pname].ravel()})
                 _phony_output_params[param.name] = param
         op = MergeTensorViewsOperator(name=name.format(gradF.name),
-                pretty_name=pretty_name.format(gradF.pretty_name.decode('utf-8')),
+                pretty_name=pretty_name.format(gradF.pretty_name),
                 input_params=_phony_input_params,
                 output_params=_phony_output_params)
         self._phony_op = op
diff --git a/hysop/operator/hdf_io.py b/hysop/operator/hdf_io.py
index 67f6e8666c6367a8a3adbcb88ea6575c69b7638f..a45cf0c41026247e3f773671b34a7721260f7992 100755
--- a/hysop/operator/hdf_io.py
+++ b/hysop/operator/hdf_io.py
@@ -12,8 +12,17 @@ import sys
 import os
 import functools
 from abc import ABCMeta, abstractmethod
+
+try:
+    import h5py
+except ImportError as e:
+    h5py = None
+    msg = 'Warning: h5py not found, you may not be able to'
+    msg += ' use hdf5 I/O functionnalities.'
+    print(msg)
+    raise
+
 from hysop import __H5PY_PARALLEL_COMPRESSION_ENABLED__, vprint
-from hysop.deps import h5py, sys
 from hysop.core.graph.graph import discretized
 from hysop.constants import DirectionLabels, HYSOP_REAL, Backend, TranspositionState, MemoryOrdering
 from hysop.tools.decorators import debug
@@ -21,21 +30,19 @@ from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.numpywrappers import npw
 from hysop.tools.io_utils import IO, IOParams, XMF
 from hysop.core.graph.graph import op_apply
-from hysop.core.graph.computational_graph import ComputationalGraphOperator
+from hysop.backend.host.host_operator import HostOperatorBase
 from hysop.fields.continuous_field import Field
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 from hysop.core.memory.memory_request import MemoryRequest
 from hysop.topology.topology_descriptor import TopologyDescriptor
 
 
-class HDF_IO(ComputationalGraphOperator):
+class HDF_IO(HostOperatorBase, metaclass=ABCMeta):
     """
     Abstract interface to read/write from/to hdf files, for
     hysop fields.
     """
 
-    __metaclass__ = ABCMeta
-
     @classmethod
     def supported_backends(cls):
         """
@@ -43,9 +50,14 @@ class HDF_IO(ComputationalGraphOperator):
         """
         return Backend.all
 
-    def __init__(self, var_names=None,
+    def __new__(cls, var_names=None,
                 name_prefix='', name_postfix='',
                 force_backend=None, **kwds):
+        return super(HDF_IO, cls).__new__(cls, **kwds)
+
+    def __init__(self, var_names=None,
+                 name_prefix='', name_postfix='',
+                 force_backend=None, **kwds):
         """Read/write some fields data from/into hdf/xmdf files.
         Parallel io.
 
@@ -85,8 +97,8 @@ class HDF_IO(ComputationalGraphOperator):
         self.name_postfix = name_postfix
 
         if (h5py is None):
-            print ('You try to use HDF5 reader but h5py module ',)
-            print ('has not been found on your system.', )
+            print('You try to use HDF5 reader but h5py module')
+            print('has not been found on your system.')
             raise h5py_error
 
         # If no filename is given, set it to
@@ -148,7 +160,7 @@ class HDF_IO(ComputationalGraphOperator):
         # like a OpenCL mapped memory backend or when we do not want
         # to allocate memory for a topology that is just used for I/O.
         td_kwds = self._td_kwds
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+        for (field, topo_descriptor) in self.input_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                 backend=self._force_backend,
                 operator=self,
@@ -156,7 +168,7 @@ class HDF_IO(ComputationalGraphOperator):
                 handle=topo_descriptor, **td_kwds)
             self.input_fields[field] = topo_descriptor
 
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                 backend=self._force_backend,
                 operator=self,
@@ -186,7 +198,7 @@ class HDF_IO(ComputationalGraphOperator):
 
     def discretize(self):
         super(HDF_IO, self).discretize()
-        topo = self.input_fields[sorted(self.input_fields.keys(), key=lambda f:f.name)[0]]
+        topo = self.input_fields[next(iter(sorted(self.input_fields.keys(), key=lambda f:f.name)))]
         use_local_hdf5 = (topo.cart_size == 1)
         use_local_hdf5 |= (topo.proc_shape[0] == topo.cart_size) and (topo.cart_size <= 16) and (not self.io_params.hdf5_disable_slicing)
         # XDMF JOIN do not support more than 16 arguments
@@ -208,7 +220,7 @@ class HDF_IO(ComputationalGraphOperator):
 
         local_compute_slices = {}
         global_compute_slices = {}
-        for (field, itopo) in self.input_fields.iteritems():
+        for (field, itopo) in self.input_fields.items():
             mesh = itopo.mesh
             assert (topo.domain._domain is itopo.domain._domain), 'domain mismatch'
             assert npw.array_equal(refmesh.grid_resolution,
@@ -218,8 +230,8 @@ class HDF_IO(ComputationalGraphOperator):
                 local_compute_slices[field] = mesh.local_compute_slices
                 global_compute_slices[field] = mesh.global_compute_slices
             else:
-                local_compute_slices[field] = tuple(slice(0, 0) for _ in xrange(self.domain.dim))
-                global_compute_slices[field] = tuple(slice(0, 0) for _ in xrange(self.domain.dim))
+                local_compute_slices[field] = tuple(slice(0, 0) for _ in range(self.domain.dim))
+                global_compute_slices[field] = tuple(slice(0, 0) for _ in range(self.domain.dim))
         self._local_compute_slices = local_compute_slices
         self._global_compute_slices = global_compute_slices
         self.refmesh = refmesh
@@ -229,7 +241,7 @@ class HDF_IO(ComputationalGraphOperator):
             var_names = {}
             # Get field names and initialize dataset dict.
             for df in self.discrete_fields:
-                for d in xrange(df.nb_components):
+                for d in range(df.nb_components):
                     if df.nb_components == 1:
                         name = name_prefix + df.name + name_postfix
                     else:
@@ -241,12 +253,12 @@ class HDF_IO(ComputationalGraphOperator):
             for var in self.var_names:
                 # Discrete field associated to var
                 var_d = var.discretize(topo)
-                for d in xrange(var_d.nb_components):
+                for d in range(var_d.nb_components):
                     name = name_prefix + self.var_names[var]
                     name += '_' + DirectionLabels[d] + name_postfix
                     self.dataset[name] = var_d.data[d]
 
-        for (f, name) in self.var_names.iteritems():
+        for (f, name) in self.var_names.items():
             assert f in self._local_compute_slices
             assert f in self._global_compute_slices
             self._local_compute_slices[name] = self._local_compute_slices[f]
@@ -274,7 +286,7 @@ class HDF_IO(ComputationalGraphOperator):
     @classmethod
     def supports_multiple_topologies(cls):
         return True
-    
+
     @classmethod
     def supports_mpi(cls):
         return True
@@ -296,8 +308,14 @@ class HDF_Writer(HDF_IO):
 </Xdmf>
 """
 
-    def __init__(self, variables, 
-            name=None, pretty_name=None, **kwds):
+    def __new__(cls, variables,
+                name=None, pretty_name=None, **kwds):
+        return super(HDF_Writer, cls).__new__(cls,
+                                              input_fields=None, output_fields=None,
+                                              name=name, pretty_name=pretty_name, **kwds)
+
+    def __init__(self, variables,
+                 name=None, pretty_name=None, **kwds):
         """
         Write some fields data into hdf/xmdf files.
         Parallel writings.
@@ -310,9 +328,9 @@ class HDF_Writer(HDF_IO):
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
 
         vnames = ['{}'.format(field.name) for field in variables.keys()]
-        vpnames = [field.pretty_name.decode('utf-8') for field in variables.keys()]
+        vpnames = [field.pretty_name for field in variables.keys()]
         name = first_not_None(name, 'write_{}'.format('_'.join(vnames)))
-        pname = first_not_None(pretty_name, u'write_{}'.format(u'_'.join(vpnames)))
+        pname = first_not_None(pretty_name, 'write_{}'.format('_'.join(vpnames)))
         super(HDF_Writer, self).__init__(input_fields=variables, output_fields=None,
                                          name=name, pretty_name=pname, **kwds)
 
@@ -337,7 +355,7 @@ class HDF_Writer(HDF_IO):
         requests = super(HDF_Writer, self).get_work_properties(**kwds)
 
         max_bytes = 0
-        for (name, data) in self.dataset.iteritems():
+        for (name, data) in self.dataset.items():
             if (data.backend.kind == Backend.HOST):
                 continue
             if (data.backend.kind == Backend.OPENCL):
@@ -357,7 +375,7 @@ class HDF_Writer(HDF_IO):
     def setup(self, work, **kwds):
         super(HDF_Writer, self).setup(work=work, **kwds)
         self._setup_grid_template()
-        for (name, data) in self.dataset.iteritems():
+        for (name, data) in self.dataset.items():
             data = data[self._local_compute_slices[name]]
             if (data.backend.kind is Backend.HOST):
                 def get_data(data=data.handle):
@@ -419,9 +437,11 @@ class HDF_Writer(HDF_IO):
         topo = self.topology
         dim = topo.domain.dim
         dx = list(topo.mesh.space_step)
+
         mesh = self.refmesh
         res = list(mesh.grid_resolution)
         orig = list(topo.domain.origin)
+
         resolution = [1, ]*3
         origin = [0.0, ]*3
         step = [0.0, ]*3
@@ -438,7 +458,7 @@ class HDF_Writer(HDF_IO):
         ds_names = self.dataset.keys()
         joinrkfiles = None
         if self.use_local_hdf5 and (self.topology.cart_size > 1):
-            joinrkfiles = range(self.topology.cart_size)
+            joinrkfiles = tuple(range(self.topology.cart_size))
         grid_attributes = XMF.prepare_grid_attributes(
             ds_names,
             resolution, origin, step, joinrkfiles=joinrkfiles)
@@ -511,27 +531,26 @@ class HDF_Writer(HDF_IO):
         # Get the names of output input_fields and create the corresponding
         # datasets
         if self.use_local_hdf5:
-            for name in self.dataset:
-                ds = self._hdf_file.create_dataset(name,
-                                                   self._local_grid_resolution,
-                                                   dtype=npw.float64,
+            for name in sorted(self.dataset):
+                ds = self._hdf_file.create_dataset(name=name,
+                                                   shape=self._local_grid_resolution,
+                                                   dtype=self.dataset[name].dtype,
                                                    compression=compression,
-                                                   track_times=False) # required if we want to compare checksums in tests 
-                ds[...] = self._data_getters[name]().astype(npw.float64)
+                                                   track_times=False)  # required if we want to compare checksums in tests
+                ds[...] = self._data_getters[name]()
         elif self.use_parallel_hdf5:
-            for name in self.dataset:
-                ds = self._hdf_file.create_dataset(name,
-                                                   self._global_grid_resolution,
-                                                   dtype=npw.float64,
+            for name in sorted(self.dataset):
+                ds = self._hdf_file.create_dataset(name=name,
+                                                   shape=self._global_grid_resolution,
+                                                   dtype=self.dataset[name].dtype,
                                                    compression=compression,
-                                                   track_times=False) # required if we want to compare checksums in tests 
+                                                   track_times=False)  # required if we want to compare checksums in tests
                 if (compression is None):
                     # no need for collective here because we do not use any filter
-                    ds[self._global_compute_slices[name]] = self._data_getters[name]().astype(npw.float64)
+                    ds[self._global_compute_slices[name]] = self._data_getters[name]()
                 else:
                     with ds.collective:
-                        ds[self._global_compute_slices[name]
-                           ] = self._data_getters[name]().astype(npw.float64)
+                        ds[self._global_compute_slices[name]] = self._data_getters[name]()
         else:
             msg = 'Unknown HDF5 mode.'
             raise RuntimeError(msg)
@@ -573,8 +592,8 @@ class HDF_Writer(HDF_IO):
                 subprocess.check_call(command)
             except OSError as e:
                 msg = "\nFATAL ERROR: Could not find or execute postprocessing script '{}'.".format(command[0])
-                print msg
-                print
+                print(msg)
+                print()
                 raise
             except subprocess.CalledProcessError as e:
                 if (e.returncode == 10):
@@ -585,7 +604,7 @@ class HDF_Writer(HDF_IO):
                     msg = '\nFATAL ERROR: Failed to call I/O postprocessing command.\n{}\n'
                     msg = msg.format(' '.join(command))
                     print(msg)
-                    print
+                    print()
                     raise
 
         if self.io_params.dump_is_temporary:
@@ -599,6 +618,11 @@ class HDF_Reader(HDF_IO):
     Parallel reading of hdf/xdmf files to fill some fields in.
     """
 
+    def __new__(cls, variables, restart=None, name=None, **kwds):
+        return super(HDF_Reader, cls).__new__(cls,
+                                              input_fields=None, output_fields=None,
+                                              name=name, **kwds)
+
     def __init__(self, variables, restart=None, name=None, **kwds):
         """Read some fields data from hdf/xmdf files.
         Parallel readings.
@@ -617,10 +641,10 @@ class HDF_Reader(HDF_IO):
         """
 
         vnames = ['{}[{}]'.format(var.name[:3], topo.id)
-                    for var,topo in variables.iteritems()]
+                  for var, topo in variables.items()]
         name = name or 'read_{}'.format(','.join(vnames))
         super(HDF_Reader, self).__init__(input_fields=None, output_fields=variables,
-                                            name=name, **kwds)
+                                         name=name, **kwds)
         self.restart = restart
         if self.restart is not None:
             # filename = prefix_N, N = counter value
diff --git a/hysop/operator/integrate.py b/hysop/operator/integrate.py
index 8bc7db166c65e7f14beea8ffb6f83aa613feb1e7..2420de098aa43795cb8004b9ded6aee794f21e81 100644
--- a/hysop/operator/integrate.py
+++ b/hysop/operator/integrate.py
@@ -3,7 +3,7 @@
 Enstrophy solver frontend.
 """
 from hysop.constants import Implementation
-from hysop.tools.types import check_instance
+from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.enum import EnumFactory
 from hysop.tools.decorators import debug
 from hysop.fields.continuous_field import Field
@@ -34,6 +34,70 @@ class Integrate(ComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    @debug
+    def __new__(cls, field, variables,
+                 parameter=None, scaling=None,
+                 base_kwds=None, expr=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(Integrate, cls).__new__(cls,
+                field=field, variables=variables,
+                parameter=parameter, scaling=scaling, expr=expr,
+                base_kwds=base_kwds, **kwds)
+
+    @debug
+    def __init__(self, field, variables,
+                 parameter=None, scaling=None,
+                 base_kwds=None, expr=None, **kwds):
+        """
+        Initialize a Integrate operator frontend.
+
+        Integrate a field on it compute domain and put the result in a parameter.
+
+        in:  field
+            Possibly as multi-component field that should be integrated.
+        out: parameter
+             P = scaling * integral_V(field)
+             where V is the field domain volume
+             and scaling depends on specified scaling method.
+
+        parameter
+        ----------
+        field: Field
+            Input continuous field to be integrated.
+        variables: dict
+            dictionary of fields as keys and topologies as values.
+        parameter: ScalarParameter or TensorParameter
+            The output parameter that will contain the integral.
+            Should match field.nb_components.
+            A default parameter will be created if not specified.
+        scaling: None, float, str or array-like of str, optional
+            Scaling method used after integration.
+            'volumic':   scale by domain size (product of mesh space steps)
+            'normalize': scale by first integration (first value will be 1.0)
+            Defaults to volumic integration.
+        expr: None, str, optional
+            expression performed on each entry of the array before sum, elements are referenced as `x[i]`
+        implementation: Implementation, optional, defaults to None
+            target implementation, should be contained in available_implementations().
+            If None, implementation will be set to default_implementation().
+        base_kwds: dict, optional, defaults to None
+            Base class keywords arguments.
+            If None, an empty dict will be passed.
+        kwds:
+            Extra keywords arguments that will be passed towards implementation
+            enstrophy operator __init__.
+
+        Notes
+        -----
+        An Integrate operator implementation should at least support
+        the hysop.operator.base.integrate.IntegrateBase interface.
+        """
+        base_kwds = first_not_None(base_kwds, {})
+        return super(Integrate, cls).__new__(cls,
+                field=field, variables=variables,
+                parameter=parameter, scaling=scaling, expr=expr,
+                base_kwds=base_kwds, **kwds)
+
     @debug
     def __init__(self, field, variables,
                  parameter=None, scaling=None,
@@ -82,7 +146,7 @@ class Integrate(ComputationalGraphNodeFrontend):
         An Integrate operator implementation should at least support
         the hysop.operator.base.integrate.IntegrateBase interface.
         """
-        base_kwds = base_kwds or dict()
+        base_kwds = first_not_None(base_kwds, {})
 
         check_instance(field, Field)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
@@ -105,3 +169,4 @@ class Integrate(ComputationalGraphNodeFrontend):
         super(Integrate, self).__init__(field=field, variables=variables,
                                         parameter=parameter, scaling=scaling, expr=expr,
                                         base_kwds=base_kwds, **kwds)
+
diff --git a/hysop/operator/kinetic_energy.py b/hysop/operator/kinetic_energy.py
index cc563cbd64f45e2bf032ca681fe9266b24149f7a..206c78e94e9b3a27cf337063e6432194bc1a3825 100644
--- a/hysop/operator/kinetic_energy.py
+++ b/hysop/operator/kinetic_energy.py
@@ -17,25 +17,40 @@ from hysop.operator.enstrophy import Enstrophy
 class KineticEnergy(Enstrophy):
     """
     Interface computing kinetic energy using the kinetic energy operators.
-    Available implementations are: 
+    Available implementations are:
         *OPENCL (gpu based implementation)
     """
-    
+
+    @debug
+    def __new__(cls, velocity, kinetic_energy, variables,
+                rho=None, rho_0=1.0, UdotU=None,
+                implementation=None, base_kwds=None, **kwds):
+        return super(KineticEnergy, cls).__new__(cls,
+                vorticity=velocity,
+                rho=rho,
+                enstrophy=kinetic_energy,
+                WdotW=UdotU,
+                rho_0=rho_0,
+                variables=variables,
+                base_kwds=base_kwds,
+                implementation=implementation,
+                **kwds)
+
     @debug
-    def __init__(self, velocity, kinetic_energy, variables, 
-                rho=None, rho_0=1.0, UdotU=None, 
+    def __init__(self, velocity, kinetic_energy, variables,
+                rho=None, rho_0=1.0, UdotU=None,
                 implementation=None, base_kwds=None, **kwds):
         """
         Initialize a KineticEnergy operator frontend.
 
-        KineticEnergy is the scaled volume average of rho*(U.U) on the domain where . represents 
+        KineticEnergy is the scaled volume average of rho*(U.U) on the domain where . represents
         the vector dot product).
 
         in:  U (velocity field)
              rho (density field, optional, defaults to 1.0 everywhere)
         out: E = 1.0/(2*V*rho_0) * integral(rho*(U.U)) => kinetic energy (scalar parameter)
              where V is the domain volume, rho_0 the reference density.
-        
+
         Parameters
         ----------
         velocity: Field
@@ -43,7 +58,7 @@ class KineticEnergy(Enstrophy):
         kinetic_energy: ScalarParameter
             KineticEnergy scalar output parameter.
         rho: Field, optional
-            Input continuous density field, if not given, 
+            Input continuous density field, if not given,
             defaults to 1.0 on the whole domain.
         rho_0: float, optional
             Reference density, defaults to 1.0.
@@ -61,10 +76,10 @@ class KineticEnergy(Enstrophy):
             Base class keywords arguments.
             If None, an empty dict will be passed.
         kwds:
-            Extra keywords arguments that will be passed towards implementation 
+            Extra keywords arguments that will be passed towards implementation
             operator __init__.
         """
-        base_kwds = base_kwds or dict()
+        base_kwds = first_not_None(base_kwds, {})
 
         check_instance(velocity, Field)
         check_instance(kinetic_energy, ScalarParameter)
@@ -73,15 +88,15 @@ class KineticEnergy(Enstrophy):
         check_instance(UdotU, Field, allow_none=True)
         check_instance(rho, Field, allow_none=True)
         check_instance(rho_0, float)
-        
+
         super(KineticEnergy, self).__init__(
-                vorticity=velocity, 
-                rho=rho, 
-                enstrophy=kinetic_energy, 
-                WdotW=UdotU, 
+                vorticity=velocity,
+                rho=rho,
+                enstrophy=kinetic_energy,
+                WdotW=UdotU,
                 rho_0=rho_0,
-                variables=variables, 
-                base_kwds=base_kwds, 
-                implementation=implementation, 
+                variables=variables,
+                base_kwds=base_kwds,
+                implementation=implementation,
                 **kwds)
 
diff --git a/hysop/operator/mean_field.py b/hysop/operator/mean_field.py
index e48595100424c2e5d92ccf3ca75b282ba8d2e560..c71c55c6bfb454de67d94f17f609fc4432498172 100644
--- a/hysop/operator/mean_field.py
+++ b/hysop/operator/mean_field.py
@@ -18,16 +18,19 @@ from hysop.core.graph.graph import op_apply
 from hysop.core.graph.computational_graph import ComputationalGraphOperator
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 from hysop.fields.continuous_field import Field
+from hysop.backend.host.host_operator import HostOperatorBase
 
-
-class ComputeMeanField(ComputationalGraphOperator):
+class ComputeMeanField(HostOperatorBase):
     """
     Interface to compute the mean of a field in chosen axes.
     """
 
     @debug
-    def __init__(self, fields, variables, io_params,
-                **kwds):
+    def __new__(cls, fields, variables, io_params, **kwds):
+        return super(ComputeMeanField, cls).__new__(cls, input_fields=None, io_params=io_params, **kwds)
+
+    @debug
+    def __init__(self, fields, variables, io_params, **kwds):
         """
         Compute and write the mean of fields in given direction, possiblity on a subview.
 
@@ -51,7 +54,7 @@ class ComputeMeanField(ComputationalGraphOperator):
         input_fields = { field: variables[field] for field in fields.keys() }
         super(ComputeMeanField, self).__init__(input_fields=input_fields, io_params=io_params, **kwds)
         self.averaged_fields = fields
-    
+
     @debug
     def get_field_requirements(self):
         # set good transposition state and no ghosts (because of OPENCL mean)
@@ -64,11 +67,11 @@ class ComputeMeanField(ComputationalGraphOperator):
             req.min_ghosts=(0,)*field.dim
             req.max_ghosts=(0,)*field.dim
         return requirements
-        
+
     def discretize(self):
         super(ComputeMeanField, self).discretize()
         averaged_dfields = {}
-        for field, (view, axes) in self.averaged_fields.iteritems():
+        for field, (view, axes) in self.averaged_fields.items():
             dfield = self.input_discrete_fields[field]
 
             axes = to_tuple(first_not_None(axes, range(dfield.dim)))
@@ -99,7 +102,7 @@ class ComputeMeanField(ComputationalGraphOperator):
             check_instance(view, tuple, values=(type(Ellipsis), slice), size=dfield.dim-len(axes))
             averaged_dfields[dfield] = (view, axes)
         self.averaged_dfields = averaged_dfields
-    
+
     def setup(self, work=None):
         super(ComputeMeanField, self).setup(work=work)
         path = self.io_params.filename
@@ -107,16 +110,16 @@ class ComputeMeanField(ComputationalGraphOperator):
             os.makedirs(path)
         self.path = path
         self.write_counter = 0
-    
+
     def filename(self, field, i):
         return '{}/{}_{}'.format(self.path, field.name, i)
-    
+
     @op_apply
     def apply(self, simulation, **kwds):
         if (simulation is None):
             raise ValueError("Missing simulation value for monitoring.")
         if self.io_params.should_dump(simulation=simulation):
-            for (dfield, (view, axes)) in self.averaged_dfields.iteritems():
+            for (dfield, (view, axes)) in self.averaged_dfields.items():
                 filename = self.filename(dfield, self.write_counter)
                 arrays = {}
                 for (i,data) in enumerate(dfield.data):
@@ -132,6 +135,6 @@ class ComputeMeanField(ComputationalGraphOperator):
     @classmethod
     def supported_backends(cls):
         return Backend.all
-    
+
 
 
diff --git a/hysop/operator/memory_reordering.py b/hysop/operator/memory_reordering.py
index e997e30806795f25416bd2da34241f406bcddd5d..18779f0ee3ccb423c70e90a04a776235f1eb97a2 100644
--- a/hysop/operator/memory_reordering.py
+++ b/hysop/operator/memory_reordering.py
@@ -53,6 +53,18 @@ class MemoryReordering(ComputationalGraphNodeGenerator):
         msg += 'implementation should match the discrete field topology backend.'
         raise RuntimeError(msg)
 
+    @debug
+    def __new__(cls, fields, variables,
+                target_memory_order,
+                output_fields=None,
+                implementation=None,
+                name=None,
+                base_kwds=None,
+                **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(MemoryReordering, cls).__new__(cls, name=name,
+                                                    candidate_input_tensors=None, candidate_output_tensors=None, **base_kwds)
+
     @debug
     def __init__(self, fields, variables,
                  target_memory_order,
@@ -128,8 +140,8 @@ class MemoryReordering(ComputationalGraphNodeGenerator):
         assert target_memory_order in (MemoryOrdering.C_CONTIGUOUS,
                                        MemoryOrdering.F_CONTIGUOUS)
 
-        candidate_input_tensors = filter(lambda x: x.is_tensor, input_fields)
-        candidate_output_tensors = filter(lambda x: x.is_tensor, output_fields)
+        candidate_input_tensors = tuple(filter(lambda x: x.is_tensor, input_fields))
+        candidate_output_tensors = tuple(filter(lambda x: x.is_tensor, output_fields))
 
         base_kwds = first_not_None(base_kwds, {})
         if (not 'mpi_params' in base_kwds):
diff --git a/hysop/operator/min_max.py b/hysop/operator/min_max.py
index c15b66cf2a1abb12f421f825e2434e8367f60679..e2f925581595a7c74b135826d08799c654531759 100644
--- a/hysop/operator/min_max.py
+++ b/hysop/operator/min_max.py
@@ -41,6 +41,18 @@ class MinMaxFieldStatistics(ComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    @debug
+    def __new__(cls, field, components=None, coeffs=None,
+            Fmin=None, Fmax=None, Finf=None, all_quiet=False,
+            name=None, pbasename=None, ppbasename=None,
+            variables=None, implementation=None, base_kwds=None, **kwds):
+        return super(MinMaxFieldStatistics, cls).__new__(cls,
+                field=field, components=components,
+                coeffs=coeffs, Fmin=Fmin, Fmax=Fmax, Finf=Finf, all_quiet=all_quiet,
+                name=name, pbasename=pbasename, ppbasename=ppbasename,
+                variables=variables, implementation=implementation, base_kwds=base_kwds,
+                **kwds)
+
     @debug
     def __init__(self, field, components=None, coeffs=None,
             Fmin=None, Fmax=None, Finf=None, all_quiet=False,
@@ -135,7 +147,7 @@ class MinMaxFieldStatistics(ComputationalGraphNodeFrontend):
                 values=CartesianTopologyDescriptors, allow_none=True)
         check_instance(name, str, allow_none=True)
         check_instance(pbasename, str, allow_none=True)
-        check_instance(ppbasename, (str, unicode), allow_none=True)
+        check_instance(ppbasename, str, allow_none=True)
         check_instance(implementation, Implementation, allow_none=True)
         check_instance(base_kwds, dict, keys=str, allow_none=True)
 
@@ -169,7 +181,7 @@ class MinMaxFieldStatistics(ComputationalGraphNodeFrontend):
 class MinMaxDerivativeStatistics(ComputationalGraphNodeFrontend):
     """
     Operator frontend to compute min and max statistics on a specific
-    derivative of a field component, without keeping its output.
+    derivative of a scalar field, without keeping its output.
     """
 
     @classmethod
@@ -180,10 +192,23 @@ class MinMaxDerivativeStatistics(ComputationalGraphNodeFrontend):
     def default_implementation(cls):
         raise NotImplementedError
 
+    @debug
+    def __new__(cls, F, dF=None, A=None,
+            derivative=None, direction=None,
+            Fmin=None, Fmax=None, Finf=None, coeffs=None, all_quiet=False,
+            name=None, pbasename=None, ppbasename=None,
+            variables=None, implementation=None, base_kwds=None, **kwds):
+        return super(MinMaxDerivativeStatistics, cls).__new__(cls,
+            F=F, dF=dF, A=A,
+            derivative=derivative, direction=direction,
+            Fmin=Fmin, Fmax=Fmax, Finf=Finf, coeffs=coeffs, all_quiet=all_quiet,
+            name=name, pbasename=pbasename, ppbasename=ppbasename,
+            variables=variables, implementation=implementation,
+            base_kwds=base_kwds, **kwds)
+
     @debug
     def __init__(self, F, dF=None, A=None,
-            derivative=None, component=None, direction=None,
-            out_component=None, scaling_view=None,
+            derivative=None, direction=None,
             Fmin=None, Fmax=None, Finf=None, coeffs=None, all_quiet=False,
             name=None, pbasename=None, ppbasename=None,
             variables=None, implementation=None, base_kwds=None, **kwds):
@@ -197,9 +222,9 @@ class MinMaxDerivativeStatistics(ComputationalGraphNodeFrontend):
             Finf: max value of the absolute value of a
                     derivative of the field (computed using Fmin and Fmax).
 
-        First compute the derivative of a component of a field F in a given direction
-        at a given order and on a given backend out of place in a specific output component of
-        dF. The derivative is then possibly scaled by another field/parameter/value A.
+        First compute the derivative of a scalar field F in a given direction
+        at a given order and on a given backend out of place in scalar field dF
+        The derivative is then possibly scaled by another field/parameter/value A.
 
         After the scaled derivative has been computed, compute user requested statistics
         (min and max values) on this new field and scale those statistics by other scaling
@@ -209,18 +234,14 @@ class MinMaxDerivativeStatistics(ComputationalGraphNodeFrontend):
             dF[k] = alpha * d^n(F[i])/dXj**n
 
         2) Compute statistics
-            Fmin = Smin * min(dF[k])
-            Fmax = Smax * max(dF[k])
+            Fmin = Smin * min(dF)
+            Fmax = Smax * max(dF)
             Finf = Sinf * max(|Fmin|, |Fmax|)
 
         where F  is an input field
               dF is an output field (by default a temporary field).
-              k = out_component < dF.nb_components
-              i =     component <  F.nb_components
               n = derivative order > 0
-              alpha = A[scaling_view]
-                where A is a Field, a Parameter or a scalar.
-                      scaling_view is a component, a slice or None.
+              alpha = A, where A is a Field, a Parameter or a scalar.
               Fmin = created or supplied TensorParameter.
               Fmax = created or supplied TensorParameter.
               Finf = created or supplied TensorParameter.
@@ -248,22 +269,9 @@ class MinMaxDerivativeStatistics(ComputationalGraphNodeFrontend):
         derivative: int, optional
             Which derivative to generate.
             Defaults to 1.
-        component: int, optional
-            The component on which to take the derivative.
-            Defaults to 0.
         direction: int, optional
             Directions in which to take the derivative.
             Defaults to 0.
-        out_component: int, optional
-            The component were the result will be stored.
-            Defaults to component.
-        scaling_view: int or slice, optional
-            View on the scaling field/parameter/value A.
-            Should be a component if this is a field.
-            Should be a slice if this is a TensorParameter.
-            Should be None it this is a ScalarParameter or
-            a numerical value.
-            Should only be given if A is given.
         F...: TensorParameter or boolean, optional
             The output parameters that will contain the statistics.
             At least one statistic should be specified (either by boolean or TensorParameter).
@@ -315,15 +323,13 @@ class MinMaxDerivativeStatistics(ComputationalGraphNodeFrontend):
         check_instance(F, Field)
         check_instance(dF, Field, allow_none=True)
         check_instance(derivative, int, allow_none=True)
-        check_instance(component, int, allow_none=True)
         check_instance(direction, int, allow_none=True)
-        check_instance(out_component, int, allow_none=True)
         check_instance(coeffs, dict, keys=str, values=(int, float, npw.number), allow_none=True)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors,
                         allow_none=True)
         check_instance(name, str, allow_none=True)
         check_instance(pbasename, str, allow_none=True)
-        check_instance(ppbasename, (str, unicode), allow_none=True)
+        check_instance(ppbasename, str, allow_none=True)
         check_instance(implementation, Implementation, allow_none=True)
         check_instance(base_kwds, dict, keys=str, allow_none=True)
 
@@ -338,14 +344,13 @@ class MinMaxDerivativeStatistics(ComputationalGraphNodeFrontend):
 
         # Pregenerate parameters so that we can directly store them in self.
         parameters = MinMaxDerivativeStatisticsBase.build_parameters(field=F,
-                components=(component,), all_quiet=all_quiet,
+                all_quiet=all_quiet, components=tuple(range(F.nb_components)),
                 Fmin=Fmin, Fmax=Fmax, Finf=Finf,
                 pbasename=pbasename, ppbasename=ppbasename)
         (Fmin, Fmax, Finf) = tuple(parameters[k] for k in ('Fmin', 'Fmax', 'Finf'))
 
         super(MinMaxDerivativeStatistics, self).__init__(F=F, dF=dF, A=A,
-            derivative=derivative, component=component, direction=direction,
-            out_component=out_component, scaling_view=scaling_view,
+            derivative=derivative, direction=direction,
             Fmin=Fmin, Fmax=Fmax, Finf=Finf, coeffs=coeffs, all_quiet=all_quiet,
             name=name, pbasename=pbasename, ppbasename=ppbasename,
             variables=variables, implementation=implementation,
@@ -357,7 +362,7 @@ class MinMaxDerivativeStatistics(ComputationalGraphNodeFrontend):
 class MinMaxSpectralDerivativeStatistics(MinMaxDerivativeStatistics):
     """
     Operator frontend to compute min and max statistics on a specific
-    derivative of a field component using the spectral method.
+    derivative of a scalar field using the spectral method.
     """
     @classmethod
     def implementations(cls):
@@ -380,7 +385,7 @@ class MinMaxSpectralDerivativeStatistics(MinMaxDerivativeStatistics):
 class MinMaxFiniteDifferencesDerivativeStatistics(MinMaxDerivativeStatistics):
     """
     Operator frontend to compute min and max statistics on a specific
-    derivative of a field component using finite differences.
+    derivative of a scalar field using finite differences.
     """
     @classmethod
     def implementations(cls):
diff --git a/hysop/operator/misc.py b/hysop/operator/misc.py
index e6ed568da3885301ca0f7758d2786779d7bfc60b..fc020fbbbc9b192edc5ffbccc345fcd7abb70f31 100644
--- a/hysop/operator/misc.py
+++ b/hysop/operator/misc.py
@@ -57,6 +57,16 @@ class ForceTopologyState(Noop):
     If backend is not given, all input fields will be imposed to live on Backend.HOST.
     """
 
+    @debug
+    def __new__(cls, fields, variables,
+                tstate=None, memorder=None,
+                backend=None, extra_kwds=None,
+                mpi_params=None, cl_env=None,
+                **kwds):
+        kwds.setdefault('mpi_params', None)
+        return super(ForceTopologyState, cls).__new__(cls,
+                                                      input_fields=None, output_fields=None, **kwds)
+
     @debug
     def __init__(self, fields, variables,
                  tstate=None, memorder=None,
@@ -86,9 +96,9 @@ class ForceTopologyState(Noop):
         cl_env = first_not_None(cl_env, extra_kwds.get('cl_env', None))
         mpi_params = first_not_None(mpi_params, extra_kwds.get('mpi_params', None), getattr(cl_env, 'mpi_params', None))
 
-        extra_kwds.setdefault('cl_env',  cl_env)
         extra_kwds.setdefault('mpi_params', mpi_params)
-        kwds.setdefault('cl_env',  cl_env)
+        extra_kwds.setdefault('cl_env',  cl_env)
+        #kwds.setdefault('cl_env',  cl_env)
         kwds.setdefault('mpi_params', mpi_params)
 
         super(ForceTopologyState, self).__init__(input_fields=input_fields,
@@ -102,7 +112,7 @@ class ForceTopologyState(Noop):
     def get_field_requirements(self):
         from hysop.topology.topology_descriptor import TopologyDescriptor
         from hysop.fields.field_requirements import DiscreteFieldRequirements
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+        for (field, topo_descriptor) in self.input_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                 backend=self.backend,
                 operator=self,
@@ -111,7 +121,7 @@ class ForceTopologyState(Noop):
                 **self.extra_kwds)
             self.input_fields[field] = topo_descriptor
 
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                 backend=self.backend,
                 operator=self,
@@ -123,7 +133,7 @@ class ForceTopologyState(Noop):
         # and we use default DiscreteFieldRequirements (ie. no min ghosts, no max ghosts,
         # can_split set to True in all directions, all TranspositionStates).
         input_field_requirements = {}
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+        for (field, topo_descriptor) in self.input_fields.items():
             if (topo_descriptor is None):
                 req = None
             else:
@@ -134,7 +144,7 @@ class ForceTopologyState(Noop):
             input_field_requirements[field] = req
 
         output_field_requirements = {}
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             if (topo_descriptor is None):
                 req = None
             else:
diff --git a/hysop/operator/parameter_plotter.py b/hysop/operator/parameter_plotter.py
index d41ce08b44e4b447249dec8f8ae4b185e044a18b..85a058eafefc540843feb24b47f6ff749dc77b40 100644
--- a/hysop/operator/parameter_plotter.py
+++ b/hysop/operator/parameter_plotter.py
@@ -2,12 +2,13 @@ from abc import abstractmethod
 from hysop.tools.types import to_tuple, check_instance, first_not_None
 from hysop.tools.numpywrappers import npw
 from hysop.tools.io_utils import IO
+from hysop.core.graph.graph import op_apply
 from hysop.core.graph.computational_graph import ComputationalGraphOperator
 from hysop.parameters.scalar_parameter import ScalarParameter
 from hysop.parameters.tensor_parameter import TensorParameter
+from hysop.backend.host.host_operator import HostOperatorBase
 
-
-class PlottingOperator(ComputationalGraphOperator):
+class PlottingOperator(HostOperatorBase):
     """
     Base operator for plotting.
     """
@@ -15,6 +16,18 @@ class PlottingOperator(ComputationalGraphOperator):
     def supports_mpi(cls):
         return True
 
+    def __new__(cls, name=None,
+                 dump_dir=None,
+                 update_frequency=1,
+                 save_frequency=100,
+                 axes_shape=(1,),
+                 figsize=(30, 18),
+                 visu_rank=0,
+                 fig=None,
+                 axes=None,
+                 **kwds):
+        return super(PlottingOperator, cls).__new__(cls, **kwds)
+
     def __init__(self, name=None,
                  dump_dir=None,
                  update_frequency=1,
@@ -58,6 +71,9 @@ class PlottingOperator(ComputationalGraphOperator):
         self.running = True
         self.plt = plt
 
+        self.update_ioparams = self.io_params.clone(frequency=self.update_frequency, with_last=True)
+        self.save_ioparams   = self.io_params.clone(frequency=self.save_frequency, with_last=True)
+
     def draw(self):
         if (not self.running):
             return
@@ -65,18 +81,19 @@ class PlottingOperator(ComputationalGraphOperator):
         self.fig.show()
         self.plt.pause(0.001)
 
+    @op_apply
     def apply(self, **kwds):
         self._update(**kwds)
         self._save(**kwds)
 
     def _update(self, simulation, **kwds):
-        if simulation.should_dump(frequency=self.update_frequency, with_last=True):
+        if self.update_ioparams.should_dump(simulation=simulation):
             self.update(simulation=simulation, **kwds)
             if self.should_draw:
                 self.draw()
 
     def _save(self, simulation, **kwds):
-        if simulation.should_dump(frequency=self.save_frequency, with_last=True):
+        if self.save_ioparams.should_dump(simulation=simulation):
             self.save(simulation=simulation, **kwds)
 
     @abstractmethod
@@ -131,7 +148,7 @@ class ParameterPlotter(PlottingOperator):
 
             parameters = {}
             axes_shape = (1,)*2
-            for (pos, params) in _parameters.iteritems():
+            for (pos, params) in _parameters.items():
                 pos = to_tuple(pos)
                 pos = (2-len(pos))*(0,) + pos
                 check_instance(pos, tuple, values=int)
@@ -149,7 +166,7 @@ class ParameterPlotter(PlottingOperator):
                     raise TypeError(type(params))
                 check_instance(params, dict, keys=str, values=TensorParameter)
                 _params = {}
-                for (pname, p) in params.iteritems():
+                for (pname, p) in params.items():
                     if isinstance(p, ScalarParameter):
                         _params[pname] = p
                     else:
@@ -167,10 +184,10 @@ class ParameterPlotter(PlottingOperator):
         data = {}
         lines = {}
         times = npw.empty(shape=(alloc_size,), dtype=npw.float32)
-        for (pos, params) in parameters.iteritems():
+        for (pos, params) in parameters.items():
             params_data = {}
             params_lines = {}
-            for (pname, p) in params.iteritems():
+            for (pname, p) in params.items():
                 pdata = npw.empty(shape=(alloc_size,), dtype=p.dtype)
                 pline = self.get_axes(pos).plot([], [], label=pname)[0]
                 params_data[p] = pdata
@@ -205,16 +222,16 @@ class ParameterPlotter(PlottingOperator):
             times = npw.empty(shape=(2*self.times.size,), dtype=self.times.dtype)
             times[:self.times.size] = self.times
             self.times = times
-            for (pos, params) in self.data.iteritems():
-                for (p, pdata) in params.iteritems():
+            for (pos, params) in self.data.items():
+                for (p, pdata) in params.items():
                     new_pdata = npw.empty(shape=(2*pdata.size,), dtype=pdata.dtype)
                     new_pdata[:pdata.size] = pdata
                     params[p] = new_pdata
 
         times, data, lines = self.times, self.data, self.lines
         times[self.counter] = simulation.t()
-        for (pos, params) in self.parameters.iteritems():
-            for (pname, p) in params.iteritems():
+        for (pos, params) in self.parameters.items():
+            for (pname, p) in params.items():
                 data[pos][p][self.counter] = p()
                 lines[pos][p].set_xdata(times[:self.counter])
                 lines[pos][p].set_ydata(data[pos][p][:self.counter])
diff --git a/hysop/operator/penalization.py b/hysop/operator/penalization.py
index e9304b830198a9eefbc6a073a9beb22684a17e90..010a0f01f930bf7d9743b67587397e76bd28bac2 100644
--- a/hysop/operator/penalization.py
+++ b/hysop/operator/penalization.py
@@ -19,7 +19,7 @@ from hysop.backend.host.python.operator.penalization import PythonPenalizeVortic
 
 
 class PenalizeVorticity(ComputationalGraphNodeFrontend):
-    """
+    r"""
     Solve
     \f{eqnarray*}
     \frac{\partial w}{\partial t} &=& \lambda\chi_s\nabla\times(v_D - v)
@@ -38,12 +38,29 @@ class PenalizeVorticity(ComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    @debug
+    def __new__(cls, obstacles, variables,
+                 velocity, vorticity,
+                 dt, coeff=None, ubar=None, formulation=None,
+                 implementation=None, **kwds):
+        return super(PenalizeVorticity, cls).__new__(cls,
+            velocity=velocity,
+            vorticity=vorticity,
+            coeff=coeff,
+            ubar=ubar,
+            obstacles=obstacles,
+            dt=dt,
+            formulation=formulation,
+            variables=variables,
+            implementation=implementation,
+            **kwds)
+
     @debug
     def __init__(self, obstacles, variables,
                  velocity, vorticity,
                  dt, coeff=None, ubar=None, formulation=None,
                  implementation=None, **kwds):
-        """
+        r"""
         Parameters
         ----------
         obstacles : dict or list of :class:`~hysop.Field`
diff --git a/hysop/operator/poisson.py b/hysop/operator/poisson.py
index a5a87dd5d238543852e69605666bc2b0ebb41ac2..8f4e995a915aad11c1d0aba37283f0ea57acefa7 100644
--- a/hysop/operator/poisson.py
+++ b/hysop/operator/poisson.py
@@ -3,7 +3,7 @@
 Poisson solver frontend.
 """
 from hysop.constants         import Implementation
-from hysop.tools.types       import check_instance
+from hysop.tools.types       import check_instance, first_not_None
 from hysop.tools.enum        import EnumFactory
 from hysop.tools.decorators  import debug
 from hysop.fields.continuous_field import Field
@@ -42,6 +42,14 @@ class Poisson(SpectralComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    @debug
+    def __new__(cls, Fin, Fout, variables,
+                implementation=None, base_kwds=None, **kwds):
+        return super(Poisson, cls).__new__(cls,
+                Fin=Fin, Fout=Fout,
+                variables=variables, base_kwds=base_kwds,
+                implementation=implementation, **kwds)
+
     @debug
     def __init__(self, Fin, Fout, variables,
                 implementation=None, base_kwds=None, **kwds):
@@ -70,7 +78,7 @@ class Poisson(SpectralComputationalGraphNodeFrontend):
             Keywords arguments that will be passed towards implementation
             poisson operator __init__.
         """
-        base_kwds = base_kwds or dict()
+        base_kwds = first_not_None(base_kwds, {})
 
         check_instance(Fout,  Field)
         check_instance(Fin, Field)
diff --git a/hysop/operator/poisson_curl.py b/hysop/operator/poisson_curl.py
index 5937836288ba6ca9034ffd19cd035cb9e46eca8f..27f5067cfc85c69f2ea8584031d4f6e439c0b37f 100644
--- a/hysop/operator/poisson_curl.py
+++ b/hysop/operator/poisson_curl.py
@@ -3,7 +3,7 @@
 PoissonCurl solver frontend.
 """
 from hysop.constants         import Implementation
-from hysop.tools.types       import check_instance
+from hysop.tools.types       import check_instance, first_not_None
 from hysop.tools.enum        import EnumFactory
 from hysop.tools.decorators  import debug
 from hysop.fields.continuous_field import Field
@@ -43,16 +43,25 @@ class PoissonCurl(SpectralComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    @debug
+    def __new__(cls, velocity, vorticity, variables,
+                implementation=None, base_kwds=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(PoissonCurl, cls).__new__(cls,
+                velocity=velocity, vorticity=vorticity,
+                variables=variables, base_kwds=base_kwds,
+                implementation=implementation, **kwds)
+
     @debug
     def __init__(self, velocity, vorticity, variables,
                 implementation=None, base_kwds=None, **kwds):
         """
-        Initialize a PoissonCurl operator frontend for 2D or 3D 
+        Initialize a PoissonCurl operator frontend for 2D or 3D
         streamfunction-vorticity formulations.
 
         in  = W (vorticity)
         out = U (velocity)
-        
+
         Vorticity also becomes an output if projection or diffusion is enabled.
 
         PoissonCurl does more than just solving the Poisson equation for velocity:
@@ -89,7 +98,7 @@ class PoissonCurl(SpectralComputationalGraphNodeFrontend):
             If diffusion is not enabled, this parameter is ignored.
         projection: hysop.constants.FieldProjection or positive integer, optional
             Project vorticity such that resolved velocity is divergence free (for 3D fields).
-            When active, projection is done prior to every solve, unless projection is 
+            When active, projection is done prior to every solve, unless projection is
             an integer in which case it is done every given steps.
             This parameter is ignored for 2D fields and defaults to no projection.
         implementation: Implementation, optional, defaults to None
@@ -107,7 +116,7 @@ class PoissonCurl(SpectralComputationalGraphNodeFrontend):
         A PoissonCurl operator implementation should at least support
         the following __init__ attributes: velocity, vorticity, variables
         """
-        base_kwds = base_kwds or dict()
+        base_kwds = first_not_None(base_kwds, {})
 
         check_instance(velocity,  Field)
         check_instance(vorticity, Field)
diff --git a/hysop/operator/redistribute.py b/hysop/operator/redistribute.py
index 49208c93b69880236162ec7135f1e895fffb0661..e08ffb9f872258b92c47ecf3467f711237c6cec4 100644
--- a/hysop/operator/redistribute.py
+++ b/hysop/operator/redistribute.py
@@ -7,8 +7,8 @@
 """
 
 from abc import ABCMeta, abstractmethod
-from hysop.constants import DirectionLabels, Implementation
-from hysop.tools.types import check_instance, to_set, to_tuple
+from hysop.constants import DirectionLabels
+from hysop.tools.types import check_instance, to_set, to_tuple, first_not_None
 from hysop.tools.decorators import debug
 from hysop.fields.continuous_field import Field
 from hysop.topology.topology import Topology
@@ -41,6 +41,16 @@ class Redistribute(ComputationalGraphNodeGenerator):
         assert issubclass(cls, RedistributeOperatorBase), \
             '{} is not a RedistributeOperatorBase.'.format(cls)
 
+    def __new__(cls, variables, source_topos, target_topo, components=None,
+                name=None, pretty_name=None,
+                base_kwds=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(Redistribute, cls).__new__(cls,
+                                                name=name, pretty_name=pretty_name,
+                                                candidate_input_tensors=None,
+                                                candidate_output_tensors=None,
+                                                **base_kwds)
+
     def __init__(self, variables, source_topos, target_topo, components=None,
                  name=None, pretty_name=None,
                  base_kwds=None, **kwds):
@@ -54,8 +64,6 @@ class Redistribute(ComputationalGraphNodeGenerator):
             candidate source mesh topologies (for each field the optimal source topology will be choosed)
         target_topo: :class:`~hysop.topology.topology.Topology` or dict(Field, Topology)
             target mesh topology for all variables (or per variable if a dictionnary is passed)
-        components: None, int or array like of ints or dict(Field,components)
-            which component of the fields must be distributed (default = all components)
         name: string
             prefix for generated operator names
         pretty_name: string
@@ -70,7 +78,7 @@ class Redistribute(ComputationalGraphNodeGenerator):
         """
         assert 'source_topo' not in kwds
 
-        base_kwds = base_kwds or dict()
+        base_kwds = first_not_None(base_kwds, {})
         variables = to_tuple(variables)
         super(Redistribute, self).__init__(name=name, pretty_name=pretty_name,
                                            candidate_input_tensors=variables,
@@ -136,7 +144,7 @@ class Redistribute(ComputationalGraphNodeGenerator):
         return nodes
 
     @staticmethod
-    def _select_redistribute(variable, source_topos, target_topo, components, **kwds):
+    def _select_redistribute(variable, source_topos, target_topo, **kwds):
         assert target_topo not in source_topos
         best_redis = None
         for source_topo in source_topos:
@@ -175,7 +183,7 @@ class Redistribute(ComputationalGraphNodeGenerator):
             return rhs
 
     @staticmethod
-    def _get_compatible_redistribute(variable, source_topo, target_topo, components, **kwds):
+    def _get_compatible_redistribute(variable, source_topo, target_topo, **kwds):
         # look from highest prority operator to smallest priority  operators
         # if nothing is found return None
         for cls, _ in sorted(Redistribute.__redistribute_operators.items(), key=lambda x: x[1]):
diff --git a/hysop/operator/solenoidal_projection.py b/hysop/operator/solenoidal_projection.py
index 15ea4c72aeb02b0a8f42527d890261c9aace2d31..a60cec0765b0f972d8287ad55bbbf5f4a1f6b13c 100644
--- a/hysop/operator/solenoidal_projection.py
+++ b/hysop/operator/solenoidal_projection.py
@@ -4,7 +4,7 @@
 SolenoidalProjection solver frontend.
 """
 from hysop.constants         import Implementation
-from hysop.tools.types       import check_instance
+from hysop.tools.types       import check_instance, first_not_None
 from hysop.tools.enum        import EnumFactory
 from hysop.tools.decorators  import debug
 from hysop.fields.continuous_field import Field
@@ -17,10 +17,10 @@ from hysop.backend.device.opencl.operator.solenoidal_projection import OpenClSol
 class SolenoidalProjection(SpectralComputationalGraphNodeFrontend):
     """
     Interface for solenoidal projection (project a 3d field F such that div(F)=0)
-    Available implementations are: 
+    Available implementations are:
         *PYTHON (numpy fft based solver)
     """
-    
+
     __implementations = {
             Implementation.PYTHON: PythonSolenoidalProjection,
             Implementation.OPENCL: OpenClSolenoidalProjection
@@ -29,24 +29,33 @@ class SolenoidalProjection(SpectralComputationalGraphNodeFrontend):
     @classmethod
     def implementations(cls):
         return cls.__implementations
-    
+
     @classmethod
     def default_implementation(cls):
         return Implementation.PYTHON
-    
+
     @debug
-    def __init__(self, input_field, output_field, variables, 
+    def __new__(cls, input_field, output_field, variables,
+                input_field_div=None, output_field_div=None,
+                implementation=None, base_kwds=None, **kwds):
+        return super(SolenoidalProjection, cls).__new__(cls,
+                input_field=input_field, output_field=output_field,
+                input_field_div=input_field_div, output_field_div=output_field_div,
+                variables=variables, base_kwds=base_kwds, implementation=implementation, **kwds)
+
+    @debug
+    def __init__(self, input_field, output_field, variables,
                 input_field_div=None, output_field_div=None,
                 implementation=None, base_kwds=None, **kwds):
         """
         Initialize a SolenoidalProjection operator frontend for 3D solenoidal projection.
-        
+
         Fin  (3d input_field)
         Fout (3d output_field)
-        
+
         Solves
            laplacian(Fout) = laplacian(Fin) - grad(div(Fin))
-        
+
         Parameters
         ----------
         input_field: Field
@@ -66,7 +75,7 @@ class SolenoidalProjection(SpectralComputationalGraphNodeFrontend):
             Base class keywords arguments.
             If None, an empty dict will be passed.
         kwds:
-            Keywords arguments that will be passed towards implementation 
+            Keywords arguments that will be passed towards implementation
             poisson operator __init__.
 
         Notes
@@ -74,7 +83,7 @@ class SolenoidalProjection(SpectralComputationalGraphNodeFrontend):
         A SolenoidalProjection operator implementation should inherit from
         hysop.operator.base.solenoidal_projection.SolenoidalProjectionBase.
         """
-        base_kwds = base_kwds or dict()
+        base_kwds = first_not_None(base_kwds, {})
 
         check_instance(input_field,  Field)
         check_instance(output_field, Field)
@@ -82,11 +91,11 @@ class SolenoidalProjection(SpectralComputationalGraphNodeFrontend):
         check_instance(output_field_div, Field, allow_none=True)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
         check_instance(base_kwds, dict, keys=str)
-        
+
         dim   = input_field.domain.dim
         icomp = input_field.nb_components
         ocomp = output_field.nb_components
-        
+
         if (input_field.domain != output_field.domain):
             msg = 'input_field and output_field do not share the same domain.'
             raise ValueError(msg)
@@ -108,6 +117,7 @@ class SolenoidalProjection(SpectralComputationalGraphNodeFrontend):
             msg=msg.format(output_field_div.nb_components)
             raise RuntimeError(msg)
 
-        super(SolenoidalProjection, self).__init__(input_field=input_field, output_field=output_field, 
+        super(SolenoidalProjection, self).__init__(input_field=input_field, output_field=output_field,
                 input_field_div=input_field_div, output_field_div=output_field_div,
                 variables=variables, base_kwds=base_kwds, implementation=implementation, **kwds)
+
diff --git a/hysop/operator/spatial_filtering.py b/hysop/operator/spatial_filtering.py
index 7facb047c99ba255df3341420c0151904e414def..4abd1c09ea6b979c6796e22fe7bcc1edb7b7c604 100644
--- a/hysop/operator/spatial_filtering.py
+++ b/hysop/operator/spatial_filtering.py
@@ -17,8 +17,31 @@ FilteringMethod = EnumFactory.create('FilteringMethod',
 ['SPECTRAL', 'REMESH', 'POLYNOMIAL', 'SUBGRID'])
 
 class SpatialFilterFrontend(MultiComputationalGraphNodeFrontend):
+
+    def __new__(cls, input_variable, output_variable,
+                 filtering_method, implementation=None,
+                 base_kwds=None,
+                 **kwds):
+        return super(SpatialFilterFrontend, cls).__new__(cls,
+                input_field=None, input_topo=None,
+                output_field=None, output_topo=None,
+                implementation_key=filtering_method,
+                implementation=implementation,
+                base_kwds=base_kwds, **kwds)
+
+    def __new__(cls, input_variable, output_variable,
+                 filtering_method, implementation=None,
+                 base_kwds=None,
+                 **kwds):
+        return super(SpatialFilterFrontend, cls).__new__(cls,
+                input_field=None, input_topo=None,
+                output_field=None, output_topo=None,
+                implementation_key=None,
+                implementation=None,
+                base_kwds=base_kwds, **kwds)
+
     def __init__(self, input_variable, output_variable,
-                 filtering_method, implementation=None, 
+                 filtering_method, implementation=None,
                  base_kwds=None,
                  **kwds):
         """
@@ -61,7 +84,7 @@ class SpatialFilterFrontend(MultiComputationalGraphNodeFrontend):
         super(SpatialFilterFrontend, self).__init__(input_field=input_field, input_topo=input_topo,
                                                     output_field=output_field, output_topo=output_topo,
                                                     implementation_key=filtering_method,
-                                                    implementation=implementation, 
+                                                    implementation=implementation,
                                                     base_kwds=base_kwds, **kwds)
 
 
@@ -139,12 +162,34 @@ class InterpolationFilterFrontend(SpatialFilterFrontend):
                 FilteringMethod.POLYNOMIAL: Implementation.PYTHON,
         }
         return adi
-    
+
 
 class SpatialFilter(ComputationalGraphNodeGenerator):
     """
     Graphnode generator to generate interpolation or restriction filter for multiple fields at once.
     """
+
+    @debug
+    def __new__(cls, input_variables, output_variables,
+                 filtering_method, implementation=None,
+                 base_kwds=None, **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(SpatialFilter, cls).__new__(cls,
+                candidate_input_tensors=None,
+                candidate_output_tensors=None,
+                **base_kwds)
+
+    @debug
+    def __new__(cls, input_variables, output_variables,
+                 filtering_method, implementation=None,
+                 base_kwds=None,
+                 **kwds):
+        base_kwds = first_not_None(base_kwds, {})
+        return super(SpatialFilter, cls).__new__(cls,
+                candidate_input_tensors=None,
+                candidate_output_tensors=None,
+                **base_kwds)
+
     @debug
     def __init__(self, input_variables, output_variables,
                  filtering_method, implementation=None,
@@ -196,7 +241,7 @@ class SpatialFilter(ComputationalGraphNodeGenerator):
                 candidate_input_tensors=None,
                 candidate_output_tensors=None,
                 **base_kwds)
-            
+
         self._input_fields     = input_fields
         self._output_fields    = output_fields
         self._input_variables  = input_variables
@@ -211,32 +256,32 @@ class SpatialFilter(ComputationalGraphNodeGenerator):
         for (ifield, ofield) in zip(self._input_fields, self._output_fields):
             stopo = ComputationalGraphNode.get_topo_discretization(self._input_variables, ifield)
             ttopo = ComputationalGraphNode.get_topo_discretization(self._output_variables, ofield)
-            check_instance(stopo, tuple, values=(int,long))
-            check_instance(ttopo, tuple, values=(int,long))
+            check_instance(stopo, tuple, values=int)
+            check_instance(ttopo, tuple, values=int)
             assert len(stopo)==len(ttopo)
 
             fm    = self._fm
             impl  = self._impl
             kwds  = self._kwds.copy()
-            
+
             # if source topology is destination topology there is nothing to be done
             if (ttopo == stopo):
                 continue
-            elif all(ns <= nt for (ns, nt) in zip(stopo, ttopo)): 
+            elif all(ns <= nt for (ns, nt) in zip(stopo, ttopo)):
                 # here we build an interpolation filter operator
                 node = InterpolationFilterFrontend(
                         input_variable=(ifield,stopo),
-                        output_variable=(ofield,ttopo), 
+                        output_variable=(ofield,ttopo),
                         filtering_method=fm,
-                        implementation=impl, 
+                        implementation=impl,
                         **kwds)
-            elif all(ns >= nt for (ns, nt) in zip(stopo, ttopo)): 
+            elif all(ns >= nt for (ns, nt) in zip(stopo, ttopo)):
                 # here we build a restriction filter operator
                 node = RestrictionFilterFrontend(
                         input_variable=(ifield,stopo),
-                        output_variable=(ofield,ttopo), 
+                        output_variable=(ofield,ttopo),
                         filtering_method=fm,
-                        implementation=impl, 
+                        implementation=impl,
                         **kwds)
             else:
                 msg='Inconsistant topology descriptors {} and {} for field {} and {}, '
diff --git a/hysop/operator/tests/test_absorption.py b/hysop/operator/tests/test_absorption.py
index 0e0f20c5d1343797123dae94f53c314fff196849..686eab917010b289a2dea74825d96576f5a62a27 100644
--- a/hysop/operator/tests/test_absorption.py
+++ b/hysop/operator/tests/test_absorption.py
@@ -2,7 +2,9 @@
 Test of vorticity absorption
 """
 import random
-from hysop.deps import it, sm, random
+import itertools as it
+import sympy as sm
+
 from hysop.constants import HYSOP_REAL
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
 from hysop.testsenv import opencl_failed, iter_clenv
@@ -72,9 +74,9 @@ class TestVorticityAbsorption(object):
 
         domain = Box(length=(1,)*dim)
         velo = Field(domain=domain, name='velo', dtype=dtype,
-                     nb_components=3, register_object=False)
+                     nb_components=3)
         vorti = Field(domain=domain, name='vorti', dtype=dtype,
-                      nb_components=3, register_object=False)
+                      nb_components=3)
 
         self._test_one(shape=shape, dim=dim, dtype=dtype,
                        domain=domain, velo=velo, vorti=vorti,
@@ -82,17 +84,17 @@ class TestVorticityAbsorption(object):
 
     def _test_one(self, shape, dim, dtype,
                   domain, velo, vorti, start_coord):
-        print '\nTesting {}D VorticityAbsorption: dtype={} shape={}'.format(
-                dim, dtype.__name__, shape)
+        print('\nTesting {}D VorticityAbsorption: dtype={} shape={}'.format(
+                dim, dtype.__name__, shape))
 
         self.t.value = random.random()
         self.dt.value = random.random()
         self.flowrate.value = npw.random.random(size=(3, ))
-        print ' >Parameter t has been set to {}.'.format(self.t())
-        print ' >Parameter dt has been set to {}.'.format(self.dt())
-        print ' >Start coord : {}.'.format(start_coord)
-        print ' >Flowrate : {}.'.format(self.flowrate())
-        print ' >Testing all implementations:'
+        print(' >Parameter t has been set to {}.'.format(self.t()))
+        print(' >Parameter dt has been set to {}.'.format(self.dt()))
+        print(' >Start coord : {}.'.format(start_coord))
+        print(' >Flowrate : {}.'.format(self.flowrate()))
+        print(' >Testing all implementations:')
 
         implementations = VorticityAbsorption.implementations()
         variables = {velo: shape, vorti: shape}
@@ -103,11 +105,11 @@ class TestVorticityAbsorption(object):
                              name='vorticity_absorption_{}'.format(str(impl).lower()))
             if impl is Implementation.PYTHON:
                 msg='   *Python: '
-                print msg,
+                print(msg, end=' ')
                 yield VorticityAbsorption(start_coord=start_coord,
                                           flowrate=self.flowrate,
                                           **base_kwds)
-                print
+                print()
             else:
                 msg='Unknown implementation to test {}.'.format(impl)
                 raise NotImplementedError(msg)
@@ -163,11 +165,11 @@ class TestVorticityAbsorption(object):
             iname = 'F{}'.format(i)
             mask = npw.isfinite(field)
             if not mask.all():
-                print
-                print field
-                print
-                print field[~mask]
-                print
+                print()
+                print(field)
+                print()
+                print(field[~mask])
+                print()
                 msg = msg0.format(iname)
                 raise ValueError(msg)
 
@@ -180,17 +182,17 @@ class TestVorticityAbsorption(object):
             dinf = npw.max(dist)
             deps = int(npw.ceil(dinf/eps))
             if (deps < 200):
-                print '{}eps, '.format(deps),
+                print('{}eps, '.format(deps), end=' ')
                 continue
             has_nan = npw.any(npw.isnan(fout))
             has_inf = npw.any(npw.isinf(fout))
-            print
-            print
-            print 'Test output comparisson failed for component {}:'.format(i)
-            print ' *has_nan: {}'.format(has_nan)
-            print ' *has_inf: {}'.format(has_inf)
-            print ' *dinf={} ({} eps)'.format(dinf, deps)
-            print
+            print()
+            print()
+            print('Test output comparisson failed for component {}:'.format(i))
+            print(' *has_nan: {}'.format(has_nan))
+            print(' *has_inf: {}'.format(has_inf))
+            print(' *dinf={} ({} eps)'.format(dinf, deps))
+            print()
             msg = 'Test failed on component {} for implementation {}.'.format(i, impl)
             raise RuntimeError(msg)
 
diff --git a/hysop/operator/tests/test_analytic.py b/hysop/operator/tests/test_analytic.py
index 83e39704728eec7b00a00ca6d9be03f601961ded..305454c167ead3f9f7ca988b9461bde3dd1f9a18 100644
--- a/hysop/operator/tests/test_analytic.py
+++ b/hysop/operator/tests/test_analytic.py
@@ -2,7 +2,9 @@
 Test of fields defined with an analytic formula.
 """
 import random
-from hysop.deps import it, sm, random
+import itertools as it
+import sympy as sm
+
 from hysop.constants import HYSOP_REAL
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
 from hysop.testsenv import opencl_failed, iter_clenv
@@ -55,19 +57,19 @@ class TestAnalyticField(object):
             coords = frame.coords
 
             def gen_F0():
-                kis = tuple(random.randint(1,5) for _ in xrange(dim))
+                kis = tuple(random.randint(1,5) for _ in range(dim))
                 qis = tuple(npw.random.rand(dim).round(decimals=3).tolist())
                 basis = tuple( (sm.cos(ki*xi+qi), sm.sin(ki*xi+qi))
                         for (ki,qi,xi) in zip(kis, qis, coords) )
                 F0 = sm.Integer(1) / (sm.Integer(1) + cls.t.s)
-                for i in xrange(dim):
+                for i in range(dim):
                     F0 *= random.choice(basis[i])
                 return F0
 
             def gen_F1():
-                base = tuple(tuple( coords[i]**k for k in xrange(5) ) for i in xrange(dim))
+                base = tuple(tuple( coords[i]**k for k in range(5) ) for i in range(dim))
                 base = tuple(set(npw.prod(x) for x in it.product(*base)))
-                base = tuple(set(random.choice(base) for _ in xrange(3)))
+                base = tuple(set(random.choice(base) for _ in range(3)))
                 F1 = sm.Integer(1) / (sm.Integer(1) + cls.t.s)
                 F1 *= (base * (npw.random.rand(len(base)).round(2))).sum()
                 return F1
@@ -111,7 +113,7 @@ class TestAnalyticField(object):
 
         domain = Box(length=(1,)*dim)
         F = Field(domain=domain, name='F', dtype=dtype,
-                nb_components=2, register_object=False)
+                nb_components=2)
 
         self._test_one(shape=shape, dim=dim, dtype=dtype,
                 domain=domain, F=F)
@@ -120,14 +122,14 @@ class TestAnalyticField(object):
     def _test_one(self, shape, dim, dtype,
             domain, F):
 
-        print '\nTesting {}D AnalyticField: dtype={} shape={}'.format(
-                dim, dtype.__name__, shape)
-        print ' >Input analytic functions:'
+        print('\nTesting {}D AnalyticField: dtype={} shape={}'.format(
+                dim, dtype.__name__, shape))
+        print(' >Input analytic functions:')
         for (i,Fi) in enumerate(self.analytic_expressions[dim]['F']):
-            print '   *F{}(x,t) = {}'.format(i, Fi)
+            print('   *F{}(x,t) = {}'.format(i, Fi))
         self.t.value = random.random()
-        print ' >Parameter t has been set to {}.'.format(self.t())
-        print ' >Testing all implementations:'
+        print(' >Parameter t has been set to {}.'.format(self.t()))
+        print(' >Testing all implementations:')
 
         implementations = AnalyticScalarField.implementations()
 
@@ -141,22 +143,22 @@ class TestAnalyticField(object):
                              name='analytic_{}'.format(str(impl).lower()))
             if impl is Implementation.PYTHON:
                 msg='   *Python: '
-                print msg,
+                print(msg, end=' ')
                 yield AnalyticField(formula=self.__analytic_init,
                                     extra_input_kwds={'fns':fns, 't':self.t},
                                     **base_kwds)
-                print
+                print()
             elif impl is Implementation.OPENCL:
                 assert base_kwds['field'] is F
                 msg='   *Opencl: '
-                print msg
+                print(msg)
                 for cl_env in iter_clenv():
                     msg='      >platform {}, device {}:'.format(cl_env.platform.name.strip(),
                                                           cl_env.device.name.strip())
-                    print msg,
+                    print(msg, end=' ')
                     yield AnalyticField(cl_env=cl_env, formula=exprs, **base_kwds)
-                    print
-                print
+                    print()
+                print()
             else:
                 msg='Unknown implementation to test {}.'.format(impl)
                 raise NotImplementedError(msg)
@@ -188,11 +190,11 @@ class TestAnalyticField(object):
             iname = 'F{}'.format(i)
             mask = npw.isfinite(field)
             if not mask.all():
-                print
-                print field
-                print
-                print field[~mask]
-                print
+                print()
+                print(field)
+                print()
+                print(field[~mask])
+                print()
                 msg = msg0.format(iname)
                 raise ValueError(msg)
 
@@ -206,44 +208,44 @@ class TestAnalyticField(object):
             dinf = npw.max(dist)
             deps = int(npw.ceil(dinf/eps))
             if (deps < 10):
-                print '{}eps, '.format(deps),
+                print('{}eps, '.format(deps), end=' ')
                 continue
             has_nan = npw.any(npw.isnan(fout))
             has_inf = npw.any(npw.isinf(fout))
 
-            print
-            print
-            print 'Test output comparisson for {} failed for component {}:'.format(iname, i)
-            print ' *has_nan: {}'.format(has_nan)
-            print ' *has_inf: {}'.format(has_inf)
-            print ' *dinf={} ({} eps)'.format(dinf, deps)
-            print
+            print()
+            print()
+            print('Test output comparisson for {} failed for component {}:'.format(iname, i))
+            print(' *has_nan: {}'.format(has_nan))
+            print(' *has_inf: {}'.format(has_inf))
+            print(' *dinf={} ({} eps)'.format(dinf, deps))
+            print()
             if cls.enable_debug_mode:
-                print 'REFERENCE INPUTS:'
+                print('REFERENCE INPUTS:')
                 for (i,w) in enumerate(Wref):
-                    print 'W{}'.format(i)
-                    print w
-                    print
+                    print('W{}'.format(i))
+                    print(w)
+                    print()
                 if (name == 'Psi'):
-                    print 'REFERENCE OUTPUT:'
+                    print('REFERENCE OUTPUT:')
                     for (i,u) in enumerate(Psiref):
-                        print 'Psi{}'.format(i)
-                        print u
-                        print
-                    print
-                    print 'OPERATOR {} OUTPUT:'.format(op.name.upper())
-                    print
+                        print('Psi{}'.format(i))
+                        print(u)
+                        print()
+                    print()
+                    print('OPERATOR {} OUTPUT:'.format(op.name.upper()))
+                    print()
                     for (i,u) in enumerate(Psiout):
-                        print 'Psi{}'.format(i)
-                        print u
-                        print
+                        print('Psi{}'.format(i))
+                        print(u)
+                        print()
                 else:
-                    print 'MODIFIED INPUTS:'
+                    print('MODIFIED INPUTS:')
                     for (i,w) in enumerate(Wout):
-                        print 'W{}'.format(i)
-                        print w
-                        print
-                print
+                        print('W{}'.format(i))
+                        print(w)
+                        print()
+                print()
 
             msg = 'Test failed for {} on component {} for implementation {}.'.format(iname, i, impl)
             raise RuntimeError(msg)
diff --git a/hysop/operator/tests/test_bilevel_advection.py b/hysop/operator/tests/test_bilevel_advection.py
index 12de79395a90865c5399ebb14c7cb64b001dda07..9854135c302d6ddf754fa2c1305d503b19607a8c 100644
--- a/hysop/operator/tests/test_bilevel_advection.py
+++ b/hysop/operator/tests/test_bilevel_advection.py
@@ -1,4 +1,5 @@
-from hysop.deps import sys
+import sys
+
 from hysop.testsenv import __ENABLE_LONG_TESTS__
 from hysop.testsenv import iter_clenv
 from hysop.tools.numpywrappers import npw
@@ -60,15 +61,15 @@ class TestBilevelAdvectionOperator(object):
         domain = Box(length=(2*npw.pi,)*dim)
         for dtype in flt_types:
             Vin  = Field(domain=domain, name='Vin', dtype=dtype,
-                    nb_components=dim, register_object=False)
+                    nb_components=dim)
             Sin  = Field(domain=domain, name='Sin', dtype=dtype,
-                    nb_components=1, register_object=False)
+                    nb_components=1)
             Sout = Field(domain=domain, name='Sout', dtype=dtype,
-                    nb_components=1, register_object=False)
+                    nb_components=1)
             for time_integrator in time_integrators:
                 for remesh_kernel in remesh_kernels:
                     for velocity_cfl in velocity_cfls:
-                        print
+                        print()
                         self._test_one(time_integrator=time_integrator, remesh_kernel=remesh_kernel,
                                        shape=shape, shape_s=shape_s, dim=dim, dtype=dtype,
                                        is_inplace=is_inplace, domain=domain,
@@ -94,9 +95,9 @@ class TestBilevelAdvectionOperator(object):
             dtype, is_inplace, domain, velocity_cfl,
             Vin, Sin, Sout):
 
-        print '\nTesting {}D BilevelAdvection_{}_{}: inplace={} dtype={} shape={}, cfl={}'.format(
+        print('\nTesting {}D BilevelAdvection_{}_{}: inplace={} dtype={} shape={}, cfl={}'.format(
                 dim, time_integrator.name(), remesh_kernel,
-                is_inplace, dtype.__name__, shape, velocity_cfl),
+                is_inplace, dtype.__name__, shape, velocity_cfl), end=' ')
         if is_inplace:
             vin = Vin
             sin, sout = Sin, Sin
@@ -111,9 +112,8 @@ class TestBilevelAdvectionOperator(object):
         dt.value = (0.99 * velocity_cfl) / (max(shape)-1)
 
         ref_impl = Implementation.FORTRAN
-        implementations = DirectionalAdvection.implementations().keys()
-        implementations += Advection.implementations().keys()
-        implementations = list(set(implementations))
+        implementations = set(DirectionalAdvection.implementations().keys()).union(Advection.implementations().keys())
+        implementations = list(implementations)
         assert (ref_impl in implementations)
         implementations.remove(ref_impl)
         implementations = [ref_impl] + implementations
@@ -121,8 +121,8 @@ class TestBilevelAdvectionOperator(object):
         implementations.remove(Implementation.PYTHON) # no bilevel support in python
 
         method = {
-            TimeIntegrator: time_integrator, 
-            Remesh: remesh_kernel, 
+            TimeIntegrator: time_integrator,
+            Remesh: remesh_kernel,
         }
 
         def iter_impl(impl, method=method):
@@ -136,7 +136,7 @@ class TestBilevelAdvectionOperator(object):
                         msg='platform {}, device {}, {}::{}'.format(cl_env.platform.name.strip(),
                                                             cl_env.device.name.strip(),
                                                             type(interp_method), interp_method)
-                        da = DirectionalAdvection(velocity=vin, 
+                        da = DirectionalAdvection(velocity=vin,
                                 advected_fields=sin, advected_fields_out=sout, dt=dt,
                                 velocity_cfl=velocity_cfl, variables=variables, implementation=impl,
                                 method=method, name='advection_{}'.format(str(impl).lower()))
@@ -164,25 +164,24 @@ class TestBilevelAdvectionOperator(object):
 
         # Compare to other implementations
         advec_axes = (tuple(),)
-        advec_axes += tuple((x,) for x in xrange(dim))
+        advec_axes += tuple((x,) for x in range(dim))
         if (dim>1):
-            advec_axes += (tuple(xrange(dim)),)
+            advec_axes += (tuple(range(dim)),)
 
         reference_fields = {}
         for impl in implementations:
-            print '\n >Implementation {}:'.format(impl),
+            print('\n >Implementation {}:'.format(impl), end=' ')
             is_ref = (impl == ref_impl)
             for (sop, graph) in iter_impl(impl):
-                print '\n   *{}: '.format(sop),
+                print('\n   *{}: '.format(sop), end=' ')
 
                 graph.build()
 
                 for axes in advec_axes:
-                    #print 'SWITCHING TO AXES {}'.format(axes)
                     ref_outputs = reference_fields.setdefault(axes, {})
                     napplies = 10
                     Vi = npw.asarray([+1.0 if (i in axes) else +0.0
-                                        for i in xrange(dim)], dtype=dtype)
+                                        for i in range(dim)], dtype=dtype)
 
                     dvin  = graph.get_input_discrete_field(vin).as_contiguous_dfield()
                     dsin  = graph.get_input_discrete_field(sin).as_contiguous_dfield()
@@ -195,21 +194,21 @@ class TestBilevelAdvectionOperator(object):
                         dvin.initialize(self.__velocity_init, axes=axes)
                         dsin.initialize(self.__scalar_init)
                         _input = tuple(dsin.data[i].get().handle.copy()
-                                for i in xrange(dsin.nb_components))
+                                for i in range(dsin.nb_components))
                         S0 = dsin.integrate()
 
-                        for k in xrange(napplies+1):
+                        for k in range(napplies+1):
                             if (k>0):
                                 graph.apply()
 
                             output = tuple(df.sdata[df.compute_slices].get().handle.copy()
                                         for df in dsout.dfields)
 
-                            for i in xrange(dsout.nb_components):
+                            for i in range(dsout.nb_components):
                                 mask = npw.isfinite(output[i][dsout.compute_slices])
                                 if not mask.all():
                                     msg='\nFATAL ERROR: Output is not finite on axis {}.\n'.format(i)
-                                    print msg
+                                    print(msg)
                                     npw.fancy_print(output[i], replace_values={(lambda a: npw.isfinite(a)): '.'})
                                     raise RuntimeError(msg)
 
@@ -218,13 +217,13 @@ class TestBilevelAdvectionOperator(object):
                                 dsref.initialize(self.__scalar_init, offsets=dxk.tolist())
                                 d = dsout.distance(dsref, p=2)
                                 if npw.any(d > 1e-1):
-                                    print 'FATAL ERROR: Could not match analytic advection.'
-                                    print 'DSOUT'
-                                    print dsout.sdata[dsout.compute_slices]
-                                    print 'DSREF'
-                                    print dsref.sdata[dsref.compute_slices]
-                                    print 'DSREF - DSOUT'
-                                    print (dsout.sdata[dsout.compute_slices].get() - dsref.sdata[dsref.compute_slices].get())
+                                    print('FATAL ERROR: Could not match analytic advection.')
+                                    print('DSOUT')
+                                    print(dsout.sdata[dsout.compute_slices])
+                                    print('DSREF')
+                                    print(dsref.sdata[dsref.compute_slices])
+                                    print('DSREF - DSOUT')
+                                    print((dsout.sdata[dsout.compute_slices].get() - dsref.sdata[dsref.compute_slices].get()))
                                     msg='Test failed with V={}, k={}, dxk={}, inter-field L2 distances are {}.'
                                     msg=msg.format(Vi, k, to_tuple(dxk, cast=float), to_tuple(d, cast=float))
                                     raise RuntimeError(msg)
@@ -232,26 +231,26 @@ class TestBilevelAdvectionOperator(object):
                             else:
                                 assert k in ref_outputs
                                 reference = ref_outputs[k]
-                                for i in xrange(dsout.nb_components):
+                                for i in range(dsout.nb_components):
                                     di = npw.abs(reference[i] - output[i])
                                     max_di = npw.max(di)
                                     neps = 10000
                                     max_tol = neps*npw.finfo(dsout.dtype).eps
                                     if (max_di>max_tol):
-                                        print 'FATAL ERROR: Could not match other implementation results.'
-                                        print '\nComparisson failed at step {} and component {}:'.format(k,i)
+                                        print('FATAL ERROR: Could not match other implementation results.')
+                                        print('\nComparisson failed at step {} and component {}:'.format(k,i))
                                         for (j,dv) in dvin.iter_fields():
-                                            print 'VELOCITY INPUT {}'.format(DirectionLabels[j])
-                                            print dv.sdata[dv.compute_slices]
-                                        print 'SCALAR INPUT'
-                                        print _input[i]
-                                        print 'SCALAR REFERENCE'
-                                        print reference[i]
-                                        print 'SCALAR OUTPUT'
-                                        print output[i]
-                                        print 'ABS(REF - OUT)'
+                                            print('VELOCITY INPUT {}'.format(DirectionLabels[j]))
+                                            print(dv.sdata[dv.compute_slices])
+                                        print('SCALAR INPUT')
+                                        print(_input[i])
+                                        print('SCALAR REFERENCE')
+                                        print(reference[i])
+                                        print('SCALAR OUTPUT')
+                                        print(output[i])
+                                        print('ABS(REF - OUT)')
                                         npw.fancy_print(di, replace_values={(lambda a: a<max_tol): '.'})
-                                        print
+                                        print()
                                         msg='Output did not match reference output for component {} at time step {}.'
                                         msg+='\n > max computed distance was {}.'.format(max_di)
                                         msg+='\n > max tolerence was set to {} ({} eps).'.format(max_tol, neps)
@@ -274,11 +273,11 @@ class TestBilevelAdvectionOperator(object):
 
     def test_3D(self):
         self._test(dim=3, is_inplace=True)
-                    
+
     def perform_tests(self):
         # Scales is only 3D
         self._test(dim=3, is_inplace=True)
-        print
+        print()
 
 
 if __name__ == '__main__':
diff --git a/hysop/operator/tests/test_custom_symbolic.py b/hysop/operator/tests/test_custom_symbolic.py
index 49d8e4287bd376c5a28363e8c134651d73039821..e3bb497a4828efd16201f9105858f096190d3761 100644
--- a/hysop/operator/tests/test_custom_symbolic.py
+++ b/hysop/operator/tests/test_custom_symbolic.py
@@ -1,5 +1,8 @@
+import itertools as it
+import numpy as np
+import sympy as sm
+
 from hysop import Field, Box
-from hysop.deps import np, it, sm
 from hysop.constants import Implementation, ComputeGranularity, SpaceDiscretization, HYSOP_REAL
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
 from hysop.testsenv import opencl_failed, iter_clenv
@@ -67,15 +70,15 @@ class TestCustomSymbolic(object):
     @staticmethod
     def iter_implementations(op_cls, base_kwds):
         for impl in op_cls.implementations():
-            base_kwds = {k:v for (k,v) in base_kwds.iteritems()}
+            base_kwds = {k:v for (k,v) in base_kwds.items()}
             base_kwds['implementation'] = impl
             if impl is Implementation.OPENCL:
                 for cl_env in iter_clenv():
-                    print '      *platform {}, device {}: '.format(cl_env.platform.name.strip(),
-                                                          cl_env.device.name.strip()),
+                    print('      *platform {}, device {}: '.format(cl_env.platform.name.strip(),
+                                                          cl_env.device.name.strip()), end=' ')
                     yield impl, op_cls(cl_env=cl_env, **base_kwds)
             else:
-                print '      *implementation {}: '.format(impl),
+                print('      *implementation {}: '.format(impl), end=' ')
                 yield impl, op_cls(**base_kwds)
 
     @classmethod
@@ -118,50 +121,50 @@ class TestCustomSymbolic(object):
                 raise NotImplementedError(msg)
 
             if (not has_nan) and (not has_inf) and mask.all():
-                print msg,
+                print(msg, end=' ')
                 continue
-            print
-            print 'Failed to match output of {}:'.format(oname)
-            print
-            print 'Test output comparisson failed for component {}:'.format(i)
-            print ' *has_nan: {}'.format(has_nan)
-            print ' *has_inf: {}'.format(has_inf)
-            print
+            print()
+            print('Failed to match output of {}:'.format(oname))
+            print()
+            print('Test output comparisson failed for component {}:'.format(i))
+            print(' *has_nan: {}'.format(has_nan))
+            print(' *has_inf: {}'.format(has_inf))
+            print()
             if cls.enable_debug_mode:
                 mask[...] = False
-            print 'REFERENCE INPUTS:'
+            print('REFERENCE INPUTS:')
             for name, _in in zip(in_names, refin_buffers):
-                print name
+                print(name)
                 try:
-                    print _in[~mask]
+                    print(_in[~mask])
                 except IndexError:
-                    print _in
-                print
-            print
-            print '{} REFERENCE OUTPUT:'.format(oname)
-            print refout[~mask]
-            print
-            print '{} OPERATOR OUTPUT:'.format(oname)
-            print out[~mask]
-            print
+                    print(_in)
+                print()
+            print()
+            print('{} REFERENCE OUTPUT:'.format(oname))
+            print(refout[~mask])
+            print()
+            print('{} OPERATOR OUTPUT:'.format(oname))
+            print(out[~mask])
+            print()
             if is_fp(out.dtype):
-                print '{} DISTANCES:'.format(oname)
-                print distances[~mask]
-                print
-                print '{} DISTANCES (EPS):'.format(oname)
-                print eps_distances[~mask]
-                print
+                print('{} DISTANCES:'.format(oname))
+                print(distances[~mask])
+                print()
+                print('{} DISTANCES (EPS):'.format(oname))
+                print(eps_distances[~mask])
+                print()
 
             msg = 'Test failed on component {} for implementation {}.'.format(i, impl)
             raise RuntimeError(msg)
-        print
+        print()
 
     def _test_simple(self, dim, _size_min=None, _size_max=None):
         enable_extra_tests = self.enable_extra_tests
         assert dim >= 1
 
-        print 'TEST SIMPLE'
-        print ' DIM {}'.format(dim)
+        print('TEST SIMPLE')
+        print(' DIM {}'.format(dim))
 
         size_min = first_not_None(_size_min, self.size_min)
         size_max = first_not_None(_size_max, self.size_max)
@@ -176,7 +179,7 @@ class TestCustomSymbolic(object):
                                 size=1).tolist())
         discretization = discretization + discretization0
 
-        print ' DISCRETIZATION {}'.format(discretization)
+        print(' DISCRETIZATION {}'.format(discretization))
 
         if __ENABLE_LONG_TESTS__:
             dtypes = (np.float32, np.float64)
@@ -184,13 +187,13 @@ class TestCustomSymbolic(object):
             dtypes = (HYSOP_REAL,)
 
         for dtype in dtypes:
-            print '  DTYPE {}'.format(dtype.__name__)
+            print('  DTYPE {}'.format(dtype.__name__))
             A = Field(domain=domain, name='A', dtype=dtype,
-                    nb_components=1, register_object=False)
+                    nb_components=1)
             B = Field(domain=domain, name='B', dtype=dtype,
-                    nb_components=1, register_object=False)
+                    nb_components=1)
             C = Field(domain=domain, name='C', dtype=dtype,
-                    nb_components=3, register_object=False)
+                    nb_components=3)
 
             P0 = ScalarParameter('P0', dtype=np.float32, initial_value=1.0)
             P1 = ScalarParameter('P1', dtype=np.float64, initial_value=2.0, const=True)
@@ -204,8 +207,8 @@ class TestCustomSymbolic(object):
             P0s = P0.s
             T0s = T0.s
 
-            for granularity in xrange(dim):
-                print '   GRANULARITY {}'.format(granularity)
+            for granularity in range(dim):
+                print('   GRANULARITY {}'.format(granularity))
 
                 self._test_affect((A,), (42,),      discretization, granularity)
                 self._test_affect((A,B,C), (1,2,3), discretization, granularity)
@@ -251,7 +254,7 @@ class TestCustomSymbolic(object):
     def _test_expr(self, exprs, compute_outputs, variables, method,
             apply_kwds=None, no_ref_view=False, dt=None):
         exprs = to_tuple(exprs)
-        print '    CustomExpr: {}'.format(' || '.join(str(e) for e in exprs))
+        print('    CustomExpr: {}'.format(' || '.join(str(e) for e in exprs)))
 
         assert ComputeGranularity in method
         base_kwds = dict(name='array_affect', exprs=exprs, variables=variables, method=method, dt=dt)
@@ -271,7 +274,7 @@ class TestCustomSymbolic(object):
                 refin['f'][field] = ifield.sdata.get().handle.copy()
                 refin['dfields'][field] = ifield
                 in_names.append(field.name)
-            for pname, param in problem.input_params.iteritems():
+            for pname, param in problem.input_params.items():
                 refin['p'][pname] = param.value
                 in_names.append(pname)
 
@@ -280,14 +283,14 @@ class TestCustomSymbolic(object):
             for field, ofield in problem.iter_output_discrete_fields(as_scalars=True):
                 refout['f'][field] = ()
                 refout['dfields'][field] = ofield
-                for i in xrange(ofield.nb_components):
+                for i in range(ofield.nb_components):
                     res = compute_outputs(refin['f'], refin['p'], refin['dfields'], ofield, i).copy()
                     check_instance(res, np.ndarray)
                     if not no_ref_view:
                         res = res[ofield.compute_slices]
                     refout['f'][field] += (res,)
                     out_names.append(field.name+'::{}'.format(i))
-            for pname, param in problem.output_params.iteritems():
+            for pname, param in problem.output_params.items():
                 refout['p'][pname] = compute_outputs(refin['f'], refin['p'], refin['dfields'], param, None)
                 out_names.append(pname)
 
@@ -297,11 +300,11 @@ class TestCustomSymbolic(object):
             for field, ofield in problem.iter_output_discrete_fields(as_scalars=True):
                 out['f'][field] = ()
                 out['dfields'][field] = ofield
-                for i in xrange(ofield.nb_components):
+                for i in range(ofield.nb_components):
                     res = ofield.data[i].get()
                     res = res.handle[ofield.compute_slices].copy()
                     out['f'][field] += (res,)
-            for pname, param in problem.output_params.iteritems():
+            for pname, param in problem.output_params.items():
                 out['p'][pname] = param.value
 
             for field in refin['f']:
@@ -327,11 +330,11 @@ class TestCustomSymbolic(object):
         variables = {}
         for f,c in zip(fields, rhs):
             variables[f] = discretization
-            for i in xrange(f.nb_components):
+            for i in range(f.nb_components):
                 e = Assignment(f.s[i](),c)
                 exprs += (e,)
 
-        print '    CustomExpr: {}'.format(' || '.join(str(e) for e in exprs))
+        print('    CustomExpr: {}'.format(' || '.join(str(e) for e in exprs)))
 
         method={ComputeGranularity: granularity}
         base_kwds = dict(name='custom_affect', exprs=exprs, variables=variables, method=method)
@@ -344,7 +347,7 @@ class TestCustomSymbolic(object):
             for ofield, dfield in problem.iter_output_discrete_fields():
                 dfield.initialize(self.__field_init, dtype=dfield.dtype)
                 view = dfield.compute_slices
-                for i in xrange(dfield.nb_components):
+                for i in range(dfield.nb_components):
                     refin += (dfield.data[i].get().handle[view],)
                     in_names.append(ofield.name+'::{}'.format(i))
 
@@ -356,7 +359,7 @@ class TestCustomSymbolic(object):
             for f,c in zip(fields, rhs):
                 dfield = problem.get_output_discrete_field(f)
                 view = dfield.compute_slices
-                for i in xrange(dfield.nb_components):
+                for i in range(dfield.nb_components):
                     res = dfield.data[i].get().handle[view]
                     try:
                         ref = np.full_like(res, fill_value=c)
@@ -371,8 +374,8 @@ class TestCustomSymbolic(object):
     def _test_stencil(self, dim, _size_min=None, _size_max=None):
         enable_extra_tests = self.enable_extra_tests
         assert dim >= 1
-        print 'TEST STENCIL'
-        print ' DIM {}'.format(dim)
+        print('TEST STENCIL')
+        print(' DIM {}'.format(dim))
 
         size_min = first_not_None(_size_min, self.size_min)
         size_max = first_not_None(_size_max, self.size_max)
@@ -388,13 +391,13 @@ class TestCustomSymbolic(object):
                                 size=1).tolist())
         discretization = discretization + discretization0
 
-        print ' DISCRETIZATION {}'.format(discretization)
+        print(' DISCRETIZATION {}'.format(discretization))
         A = Field(domain=domain, name='A', dtype=np.float32,
-                nb_components=1, register_object=False)
+                nb_components=1)
         B = Field(domain=domain, name='B', dtype=np.float32,
-                nb_components=2, register_object=False)
+                nb_components=2)
         C = Field(domain=domain, name='C', dtype=np.float64,
-                nb_components=3, register_object=False)
+                nb_components=3)
 
         P0 = ScalarParameter('P0', dtype=np.float32, initial_value=3.14)
         T0 = TensorParameter('T',  shape=(3,), dtype=np.int32, initial_value=[4,3,2])
@@ -416,9 +419,9 @@ class TestCustomSymbolic(object):
             orders = (4,)
 
         for order in orders:
-            print '  ORDER {}'.format(order)
-            for granularity in xrange(dim):
-                print '   GRANULARITY {}'.format(granularity)
+            print('  ORDER {}'.format(order))
+            for granularity in range(dim):
+                print('   GRANULARITY {}'.format(granularity))
 
                 if __ENABLE_LONG_TESTS__:
                     expr    = Assignment(As, B.s[0]().diff(x0,x0))
@@ -499,8 +502,8 @@ class TestCustomSymbolic(object):
     def _test_time_integrator(self, dim, _size_min=None, _size_max=None):
         enable_extra_tests = self.enable_extra_tests
         assert dim >= 1
-        print 'TEST INTEGRATOR'
-        print ' DIM {}'.format(dim)
+        print('TEST INTEGRATOR')
+        print(' DIM {}'.format(dim))
 
         size_min = first_not_None(_size_min, self.size_min)
         size_max = first_not_None(_size_max, self.size_max)
@@ -516,13 +519,13 @@ class TestCustomSymbolic(object):
                                 size=1).tolist())
         discretization = discretization + discretization0
 
-        print ' DISCRETIZATION {}'.format(discretization)
+        print(' DISCRETIZATION {}'.format(discretization))
         A = Field(domain=domain, name='A', dtype=np.float32,
-                nb_components=1, register_object=False)
+                nb_components=1)
         B = Field(domain=domain, name='B', dtype=np.float32,
-                nb_components=2, register_object=False)
+                nb_components=2)
         C = Field(domain=domain, name='C', dtype=np.float32,
-                nb_components=3, register_object=False)
+                nb_components=3)
 
         P0 = ScalarParameter('P0', dtype=np.float32, initial_value=3.14)
         T0 = TensorParameter('T',  shape=(3,), dtype=np.int32, initial_value=[4,3,2])
@@ -544,8 +547,8 @@ class TestCustomSymbolic(object):
 
         order = 4
         granularity = 0
-        print '  GRANULARITY {}'.format(granularity)
-        print '  ORDER {}'.format(order)
+        print('  GRANULARITY {}'.format(granularity))
+        print('  ORDER {}'.format(order))
 
         D1 = csg.generate_exact_stencil(derivative=1, order=order)
         D2 = csg.generate_exact_stencil(derivative=2, order=order)
@@ -557,7 +560,7 @@ class TestCustomSymbolic(object):
             integrators = (RK2,)
 
         for integrator in integrators:
-            print '   INTEGRATOR {}'.format(integrator)
+            print('   INTEGRATOR {}'.format(integrator))
 
             if __ENABLE_LONG_TESTS__:
                 expr = Assignment(As.diff(t), 0)
diff --git a/hysop/operator/tests/test_diffusion.py b/hysop/operator/tests/test_diffusion.py
index d399f128e14f1a288dc16d98ae173b51049cf9c1..9dad287c761da6b1bfebe1e0428d1415a390cc0e 100644
--- a/hysop/operator/tests/test_diffusion.py
+++ b/hysop/operator/tests/test_diffusion.py
@@ -52,13 +52,13 @@ class TestDiffusionOperator(object):
             nu = ScalarParameter('nu', dtype=dtype, initial_value=random.random(), const=True)
             nb_components = 5 if (dim==2) else 6
             Fin  = Field(domain=domain, name='Fin', dtype=dtype,
-                    nb_components=nb_components, register_object=False)
+                    nb_components=nb_components)
             Fout = Field(domain=domain, name='Fout', dtype=dtype,
-                   nb_components=nb_components, register_object=False)
+                   nb_components=nb_components)
             self._test_one(shape=shape, dim=dim, dtype=dtype,
                     is_inplace=is_inplace, domain=domain,
                     Fin=Fin, Fout=Fout, nu=nu)
-    
+
     @staticmethod
     def __random_init(data, coords, dtype, component):
         shape = data.shape
@@ -82,9 +82,9 @@ class TestDiffusionOperator(object):
 
     def _test_one(self, shape, dim,
             dtype, is_inplace, domain, Fin, Fout, nu):
-        print
-        print '\nTesting {}D Diffusion: inplace={} dtype={} shape={}'.format(
-            dim, is_inplace, dtype.__name__, shape),
+        print()
+        print('\nTesting {}D Diffusion: inplace={} dtype={} shape={}'.format(
+            dim, is_inplace, dtype.__name__, shape), end=' ')
 
         dt = ScalarParameter('dt0', initial_value=0.5, const=True,
                                 dtype=dtype)
@@ -110,32 +110,32 @@ class TestDiffusionOperator(object):
                 base_kwds['Fout'] = fout
             if (impl is Implementation.PYTHON):
                 msg='   *Python FFTW: '
-                print msg,
+                print(msg, end=' ')
                 diff = Diffusion(**base_kwds)
                 yield diff.to_graph()
             elif (impl is Implementation.FORTRAN):
                 msg='   *Fortran FFTW: '
-                print msg,
+                print(msg, end=' ')
                 diff = Diffusion(**base_kwds)
                 yield diff.to_graph()
             elif (impl is Implementation.OPENCL):
                 msg='   *OpenCl CLFFT: '
-                print msg
+                print(msg)
                 for cl_env in iter_clenv():
                     msg='     |platform {}, device {}: '.format(cl_env.platform.name.strip(),
                                                               cl_env.device.name.strip())
-                    print msg,
+                    print(msg, end=' ')
                     sys.stdout.flush()
                     diff = Diffusion(cl_env=cl_env, **base_kwds)
                     yield diff.to_graph()
                 msg='   *OpenCl FFTW: '
-                print msg
+                print(msg)
                 cpu_envs = tuple(iter_clenv(device_type='cpu'))
                 if cpu_envs:
                     for cl_env in cpu_envs:
                         msg='     |platform {}, device {}'.format(cl_env.platform.name.strip(),
                                                                   cl_env.device.name.strip())
-                        print msg,
+                        print(msg, end=' ')
                         diff = Diffusion(cl_env=cl_env, enforce_implementation=False, **base_kwds)
                         yield diff.to_graph()
             else:
@@ -146,7 +146,7 @@ class TestDiffusionOperator(object):
         reference_fields = {}
         outputs = {}
 
-        print '\n >Testing all Implementations:'
+        print('\n >Testing all Implementations:')
         for impl in implementations:
             if (impl is Implementation.FORTRAN):
                 if ((dim not in (2,3))
@@ -154,15 +154,15 @@ class TestDiffusionOperator(object):
                       or (dtype != HYSOP_REAL)
                       or any((bd != BoundaryCondition.PERIODIC) for bd in Fin.lboundaries)
                       or any((bd != BoundaryCondition.PERIODIC) for bd in Fin.rboundaries)):
-                    print '   *Fortran FFTW: NO SUPPORT'
+                    print('   *Fortran FFTW: NO SUPPORT')
                     continue
             elif (impl is Implementation.OPENCL):
                 if (dim>3):
-                    print '   *OpenCl: NO SUPPORT'
+                    print('   *OpenCl: NO SUPPORT')
                     continue
             for op in iter_impl(impl):
                 if (impl is ref_impl):
-                    print 'REF IMPL',
+                    print('REF IMPL', end=' ')
                     sys.stdout.flush()
                 if (not is_inplace):
                     op.push_copy(dst=fin, src=fout)
@@ -204,7 +204,7 @@ class TestDiffusionOperator(object):
                     if ref_impl in outputs.keys() and not impl is ref_impl:
                         reference = outputs[ref_impl]
                         output = outputs[impl]
-                        for i in xrange(dfout.nb_components):
+                        for i in range(dfout.nb_components):
                             if npw.may_share_memory(reference[i], output[i]):
                                 msg='Output and reference arrays may share the same memory.'
                                 raise RuntimeError(msg)
@@ -219,7 +219,7 @@ class TestDiffusionOperator(object):
                             mask = npw.isfinite(output[i])
                             if not mask.all():
                                 msg='\nFATAL ERROR: Output is not finite on axis {}.\n'.format(i)
-                                print msg
+                                print(msg)
                                 npw.fancy_print(output[i],
                                         replace_values={(lambda a: npw.isfinite(a)): '.'})
                                 raise RuntimeError(msg)
@@ -228,14 +228,14 @@ class TestDiffusionOperator(object):
                             neps = 100
                             max_tol = neps*npw.finfo(dfout.dtype).eps
                             if (max_di>max_tol):
-                                print
-                                print 'SCALAR INPUT'
-                                print _input[i]
-                                print 'SCALAR REFERENCE'
-                                print reference[i]
-                                print 'SCALAR OUTPUT'
-                                print output[i]
-                                print 'ABS(REF - OUT)'
+                                print()
+                                print('SCALAR INPUT')
+                                print(_input[i])
+                                print('SCALAR REFERENCE')
+                                print(reference[i])
+                                print('SCALAR OUTPUT')
+                                print(output[i])
+                                print('ABS(REF - OUT)')
                                 npw.fancy_print(di, replace_values={(lambda a: a<max_tol): '.'})
                                 msg ='Output did not match reference output for component {}'
                                 msg+='\n > max computed distance was {}.'
@@ -257,7 +257,7 @@ class TestDiffusionOperator(object):
                     sys.stdout.write('\bx\n\n')
                     sys.stdout.flush()
                     raise
-                print
+                print()
 
 
     def test_diffusion_1D_inplace(self):
@@ -271,7 +271,7 @@ class TestDiffusionOperator(object):
         self.test_diffusion_1D_inplace()
         self.test_diffusion_2D_inplace()
         self.test_diffusion_3D_inplace()
-        print
+        print()
 
 
 if __name__ == '__main__':
diff --git a/hysop/operator/tests/test_directional_advection.py b/hysop/operator/tests/test_directional_advection.py
index 307fb45dd53b86ef981c31f8da1cba65e31d0b3c..9109452a3873caacd23f1fdf74ad42a53925c7cf 100644
--- a/hysop/operator/tests/test_directional_advection.py
+++ b/hysop/operator/tests/test_directional_advection.py
@@ -1,5 +1,6 @@
+import sys
 import numpy as np
-from hysop.deps import sys
+
 from hysop.testsenv import __ENABLE_LONG_TESTS__
 from hysop.testsenv import iter_clenv
 from hysop.tools.numpywrappers import npw
@@ -67,19 +68,19 @@ class TestDirectionalAdvectionOperator(object):
         domain = Box(length=(2*npw.pi,)*dim)
         for dtype in flt_types:
             Vin  = Field(domain=domain, name='Vin', dtype=dtype,
-                    nb_components=dim, register_object=False)
+                    nb_components=dim)
             Sin  = Field(domain=domain, name='Sin', dtype=dtype,
-                    nb_components=2, register_object=False)
+                    nb_components=2)
             Sout = Field(domain=domain, name='Sout', dtype=dtype,
-                    nb_components=2, register_object=False)
+                    nb_components=2)
             for time_integrator in time_integrators:
                 for remesh_kernel in remesh_kernels:
                     for velocity_cfl in velocity_cfls:
-                        print
                         self._test_one(time_integrator=time_integrator, remesh_kernel=remesh_kernel,
                                        shape=shape, dim=dim, dtype=dtype,
                                        is_inplace=is_inplace, domain=domain,
                                        Vin=Vin, Sin=Sin, Sout=Sout, velocity_cfl=velocity_cfl)
+                        print()
 
     @classmethod
     def __velocity_init(cls, data, coords, component, axes):
@@ -101,9 +102,9 @@ class TestDirectionalAdvectionOperator(object):
             dtype, is_inplace, domain, velocity_cfl,
             Vin, Sin, Sout):
 
-        print '\nTesting {}D DirectionalAdvection_{}_{}: inplace={} dtype={} shape={}, cfl={}'.format(
+        print('\nTesting {}D DirectionalAdvection_{}_{}: inplace={} dtype={} shape={}, cfl={}'.format(
                 dim, time_integrator.name(), remesh_kernel,
-                is_inplace, dtype.__name__, shape, velocity_cfl),
+                is_inplace, dtype.__name__, shape, velocity_cfl), end=' ')
         if is_inplace:
             vin = Vin
             sin, sout = Sin, Sin
@@ -115,10 +116,10 @@ class TestDirectionalAdvectionOperator(object):
 
         # Use optimal timestep, ||Vi||_inf is 1 on a per-axis basis
         dt = velocity_cfl * np.divide(domain.length, shape).min()
-        dt = ScalarParameter('dt', initial_value=dt, constant=True)
+        dt = ScalarParameter('dt', initial_value=dt, const=True)
 
         ref_impl = Implementation.PYTHON
-        implementations = DirectionalAdvection.implementations().keys()
+        implementations = list(DirectionalAdvection.implementations().keys())
         assert ref_impl in implementations
         implementations.remove(ref_impl)
         implementations = [ref_impl] + implementations
@@ -161,25 +162,24 @@ class TestDirectionalAdvectionOperator(object):
 
         # Compare to other implementations
         advec_axes = (tuple(),)
-        advec_axes += tuple((x,) for x in xrange(dim))
+        advec_axes += tuple((x,) for x in range(dim))
         if dim>1:
-            advec_axes += (tuple(xrange(dim)),)
+            advec_axes += (tuple(range(dim)),)
 
         reference_fields = {}
         for impl in implementations:
-            print '\n >Implementation {}:'.format(impl),
+            print('\n >Implementation {}:'.format(impl), end=' ')
             is_ref = (impl == ref_impl)
             for sop, graph in iter_impl(impl):
-                print '\n   *{}: '.format(sop),
+                print('\n   *{}: '.format(sop), end=' ')
 
                 graph.build()
 
                 for axes in advec_axes:
-                    #print 'SWITCHING TO AXES {}'.format(axes)
                     ref_outputs = reference_fields.setdefault(axes, {})
                     napplies = 10
                     Vi = npw.asarray([+1.0 if (i in axes) else +0.0
-                                        for i in xrange(dim)], dtype=dtype)
+                                        for i in range(dim)], dtype=dtype)
 
                     dvin  = graph.get_input_discrete_field(vin)
                     dsin  = graph.get_input_discrete_field(sin)
@@ -192,20 +192,20 @@ class TestDirectionalAdvectionOperator(object):
                         dvin.initialize(self.__velocity_init, axes=axes)
                         dsin.initialize(self.__scalar_init)
                         _input = tuple(dsin.data[i].get().handle.copy()
-                                for i in xrange(dsin.nb_components))
+                                for i in range(dsin.nb_components))
                         S0 = dsin.integrate()
 
-                        for k in xrange(napplies+1):
+                        for k in range(napplies+1):
                             graph.apply()
 
                             output = tuple(dsout.data[i].get().handle.copy()
-                                        for i in xrange(dsout.nb_components))
+                                        for i in range(dsout.nb_components))
 
-                            for i in xrange(dsout.nb_components):
+                            for i in range(dsout.nb_components):
                                 mask = npw.isfinite(output[i][dsout.compute_slices])
                                 if not mask.all():
                                     msg='\nFATAL ERROR: Output is not finite on axis {}.\n'.format(i)
-                                    print msg
+                                    print(msg)
                                     npw.fancy_print(output[i], replace_values={(lambda a: npw.isfinite(a)): '.'})
                                     raise RuntimeError(msg)
 
@@ -216,16 +216,16 @@ class TestDirectionalAdvectionOperator(object):
                                 dsref.initialize(self.__scalar_init, offsets=dxk.tolist())
                                 d = dsout.distance(dsref, p=2)
                                 if npw.any(d > 1e-1):
-                                    print '\nFATAL ERROR: Could not match analytic advection.'
-                                    print 'DSOUT'
+                                    print('\nFATAL ERROR: Could not match analytic advection.')
+                                    print('DSOUT')
                                     for output in dsout:
-                                        print output.sdata[output.compute_slices]
-                                    print 'DSREF'
+                                        print(output.sdata[output.compute_slices])
+                                    print('DSREF')
                                     for ref in dsref:
-                                        print ref.sdata[ref.compute_slices]
-                                    print 'DSREF - DSOUT'
+                                        print(ref.sdata[ref.compute_slices])
+                                    print('DSREF - DSOUT')
                                     for (output, ref) in zip(dsout, dsref):
-                                        print (output.sdata[output.compute_slices].get() - ref.sdata[ref.compute_slices].get())
+                                        print((output.sdata[output.compute_slices].get() - ref.sdata[ref.compute_slices].get()))
                                     msg='Test failed with V={}, k={}, dxk={}, inter-field L2 distances are {}.'
                                     msg=msg.format(Vi, k, to_tuple(dxk, cast=float), to_tuple(d, cast=float))
                                     raise RuntimeError(msg)
@@ -233,26 +233,26 @@ class TestDirectionalAdvectionOperator(object):
                             else:
                                 assert k in ref_outputs
                                 reference = ref_outputs[k]
-                                for i in xrange(dsout.nb_components):
+                                for i in range(dsout.nb_components):
                                     di = npw.abs(reference[i] - output[i])
                                     max_di = npw.max(di)
                                     neps = 1000
                                     max_tol = neps*npw.finfo(dsout.dtype).eps
                                     if (max_di>max_tol):
-                                        print '\nFATAL ERROR: Could not match other implementation results.'
-                                        print '\nComparisson failed at step {} and component {}:'.format(k,i)
+                                        print('\nFATAL ERROR: Could not match other implementation results.')
+                                        print('\nComparisson failed at step {} and component {}:'.format(k,i))
                                         for (j,dv) in enumerate(dvin):
-                                            print 'VELOCITY INPUT {}'.format(DirectionLabels[j])
-                                            print dv.sdata[dv.compute_slices]
-                                        print 'SCALAR INPUT'
-                                        print _input[i]
-                                        print 'SCALAR REFERENCE'
-                                        print reference[i]
-                                        print 'SCALAR OUTPUT'
-                                        print output[i]
-                                        print 'ABS(REF - OUT)'
+                                            print('VELOCITY INPUT {}'.format(DirectionLabels[j]))
+                                            print(dv.sdata[dv.compute_slices])
+                                        print('SCALAR INPUT')
+                                        print(_input[i])
+                                        print('SCALAR REFERENCE')
+                                        print(reference[i])
+                                        print('SCALAR OUTPUT')
+                                        print(output[i])
+                                        print('ABS(REF - OUT)')
                                         npw.fancy_print(di, replace_values={(lambda a: a<max_tol): '.'})
-                                        print
+                                        print()
                                         msg='Output did not match reference output for component {} at time step {}.'
                                         msg+='\n > max computed distance was {}.'.format(max_di)
                                         msg+='\n > max tolerence was set to {} ({} eps).'.format(max_tol, neps)
@@ -284,7 +284,7 @@ class TestDirectionalAdvectionOperator(object):
         self.test_advec_1D_inplace()
         self.test_advec_2D_inplace()
         self.test_advec_3D_inplace()
-        print
+        print()
 
 
 if __name__ == '__main__':
diff --git a/hysop/operator/tests/test_directional_diffusion.py b/hysop/operator/tests/test_directional_diffusion.py
index 62619260403598dfd1db2df06aa874e306242ed4..a4aa365316af337b72bf407e14a8e3d1d093ebde 100644
--- a/hysop/operator/tests/test_directional_diffusion.py
+++ b/hysop/operator/tests/test_directional_diffusion.py
@@ -1,4 +1,5 @@
-from hysop.deps import sys
+import sys
+
 from hysop.tools.numpywrappers import npw
 from hysop.testsenv import __ENABLE_LONG_TESTS__
 from hysop.testsenv import iter_clenv
@@ -64,17 +65,17 @@ class TestDirectionalDiffusionOperator(object):
         domain = Box(length=(2*npw.pi,)*dim)
         for dtype in flt_types:
             Fin  = Field(domain=domain, name='Fin', dtype=dtype,
-                    nb_components=3, register_object=False)
+                    nb_components=3)
             Fout = Field(domain=domain, name='Fout', dtype=dtype,
-                   nb_components=3, register_object=False)
+                   nb_components=3)
             coeffs = npw.random.rand(Fin.nb_components, dim).astype(dtype)
             for order in orders:
                 for time_integrator in time_integrators:
-                    print
                     self._test_one(time_integrator=time_integrator, order=order,
                             shape=shape, dim=dim, dtype=dtype,
                             is_inplace=is_inplace, domain=domain,
                             Fin=Fin, Fout=Fout, coeffs=coeffs)
+                    print()
 
     @staticmethod
     def __random_init(data, coords, dtype, component):
@@ -100,9 +101,9 @@ class TestDirectionalDiffusionOperator(object):
     def _test_one(self, time_integrator, order, shape, dim,
             dtype, is_inplace, domain, Fin, Fout, coeffs):
 
-        print '\nTesting {}D DirectionalDiffusion_{}_FCD{}: inplace={} dtype={} shape={}'.format(
+        print('\nTesting {}D DirectionalDiffusion_{}_FCD{}: inplace={} dtype={} shape={}'.format(
             dim, time_integrator.name(), order,
-            is_inplace, dtype.__name__, shape),
+            is_inplace, dtype.__name__, shape), end=' ')
 
         dt = ScalarParameter('dt0', initial_value=0.1, const=True,
                                 dtype=dtype)
@@ -142,9 +143,9 @@ class TestDirectionalDiffusionOperator(object):
         reference_fields = {}
 
         for impl in implementations:
-            print '\n >Implementation {}:'.format(impl),
+            print('\n >Implementation {}:'.format(impl), end=' ')
             for sop,op,split in iter_impl(impl):
-                print '\n   *{}: '.format(sop),
+                print('\n   *{}: '.format(sop), end=' ')
                 split.push_operators(op)
                 if (not is_inplace):
                     split.push_copy(dst=fin, src=fout)
@@ -169,7 +170,7 @@ class TestDirectionalDiffusionOperator(object):
                         raise RuntimeError(msg)
 
                     _input = tuple(dfin.data[i].get().handle.copy()
-                                    for i in xrange(dfin.nb_components))
+                                    for i in range(dfin.nb_components))
 
                     if (reference is None):
                         reference  = self._compute_reference(fin=dfin, dfin=_input, dt=dt(), dim=dim,
@@ -177,9 +178,9 @@ class TestDirectionalDiffusionOperator(object):
                     split.apply()
 
                     output = tuple(dfout.data[i].get().handle.copy()[view]
-                                for i in xrange(dfout.nb_components))
+                                for i in range(dfout.nb_components))
 
-                    for i in xrange(dfout.nb_components):
+                    for i in range(dfout.nb_components):
                         if npw.may_share_memory(reference[i], output[i]):
                             msg='Output and reference arrays may share the same memory.'
                             raise RuntimeError(msg)
@@ -194,7 +195,7 @@ class TestDirectionalDiffusionOperator(object):
                         mask = npw.isfinite(output[i])
                         if not mask.all():
                             msg='\nFATAL ERROR: Output is not finite on axis {}.\n'.format(i)
-                            print msg
+                            print(msg)
                             npw.fancy_print(output[i], replace_values={(lambda a: npw.isfinite(a)): '.'})
                             raise RuntimeError(msg)
                         di = npw.abs(reference[i] - output[i])
@@ -202,16 +203,16 @@ class TestDirectionalDiffusionOperator(object):
                         neps = 1000
                         max_tol = neps*npw.finfo(dfout.dtype).eps
                         if (max_di>max_tol):
-                            print
-                            print 'SCALAR INPUT'
-                            print _input[i]
-                            print 'SCALAR REFERENCE'
-                            print reference[i]
-                            print 'SCALAR OUTPUT'
-                            print output[i]
-                            print 'ABS(REF - OUT)'
+                            print()
+                            print('SCALAR INPUT')
+                            print(_input[i])
+                            print('SCALAR REFERENCE')
+                            print(reference[i])
+                            print('SCALAR OUTPUT')
+                            print(output[i])
+                            print('ABS(REF - OUT)')
                             npw.fancy_print(di, replace_values={(lambda a: a<max_tol): '.'})
-                            print
+                            print()
                             msg='Output did not match reference output for component {}'.format(i)
                             msg+='\n > max computed distance was {}.'.format(max_di)
                             msg+='\n > max tolerence was set to {} ({} eps).'.format(max_tol, neps)
@@ -241,26 +242,26 @@ class TestDirectionalDiffusionOperator(object):
         csg.configure(dtype=MPQ, dim=1)
         D2 = csg.generate_exact_stencil(derivative=2, order=order)
 
-        directions = range(dim) + range(dim)[::-1]
+        directions = tuple(range(dim)) + tuple(range(dim))[::-1]
         dt_coeffs = (0.5,)*(2*dim)
-        Xin   = { 'F{}'.format(i): dfin[i] for i in xrange(len(dfin)) }
-        views = { 'F{}'.format(i): view    for i in xrange(len(dfin)) }
+        Xin   = { 'F{}'.format(i): dfin[i] for i in range(len(dfin)) }
+        views = { 'F{}'.format(i): view    for i in range(len(dfin)) }
 
         ghost_exchangers = tuple(fin.build_ghost_exchanger(directions=i, data=dfin)
-                                    for i in xrange(dim))
+                                    for i in range(dim))
 
         for (j, dt_coeff) in zip(directions, dt_coeffs):
             ndir = (dim-j-1)
             def rhs(out, X, **kwds):
                 coeff = coeffs[:,j]
-                for i in xrange(len(dfin)):
+                for i in range(len(dfin)):
                     fi = 'F{}'.format(i)
                     Fi_in  = X[fi]
                     d2Fi_dxj = out[fi]
                     d2Fi_dxj[...] = D2(a=Fi_in, out=None, axis=ndir,
                             symbols={D2.dx:fin.space_step[ndir]}) * coeff[i]
             out = time_integrator(Xin=Xin, RHS=rhs, dt=(dt_coeff*dt))
-            for i in xrange(len(dfin)):
+            for i in range(len(dfin)):
                 fi = 'F{}'.format(i)
                 dfin[i][...] = out[fi]
             ghost_exchangers[ndir].exchange_ghosts()
@@ -291,7 +292,7 @@ class TestDirectionalDiffusionOperator(object):
         # self.test_diffusion_1D_out_of_place()
         # self.test_diffusion_2D_out_of_place()
         # self.test_diffusion_3D_out_of_place()
-        print
+        print()
 
 
 if __name__ == '__main__':
diff --git a/hysop/operator/tests/test_directional_stretching.py b/hysop/operator/tests/test_directional_stretching.py
index 48e40dcf9f87fcd3d7a68d77c76819743a0d801d..02328b3a7a91ccdaf993e4db86bfceb99af775a4 100644
--- a/hysop/operator/tests/test_directional_stretching.py
+++ b/hysop/operator/tests/test_directional_stretching.py
@@ -1,4 +1,6 @@
-from hysop.deps import sys, sm
+import sys
+import sympy as sm
+
 from hysop.testsenv import __ENABLE_LONG_TESTS__, iter_clenv
 from hysop.tools.numpywrappers import npw
 from hysop.tools.contexts import printoptions
@@ -40,7 +42,7 @@ class TestDirectionalStretchingOperator(object):
 
     @classmethod
     def teardown_class(cls):
-        print
+        print()
 
     def _test(self, formulation, is_inplace, As=None,
             size_min=None, size_max=None):
@@ -68,21 +70,21 @@ class TestDirectionalStretchingOperator(object):
         domain = Box(length=(2*npw.pi,)*dim)
         for dtype in flt_types:
             Vin  = Field(domain=domain, name='Vin', dtype=dtype,
-                   nb_components=dim, register_object=False)
+                   nb_components=dim)
             Win  = Field(domain=domain, name='Win', dtype=dtype,
-                   nb_components=dim, register_object=False)
+                   nb_components=dim)
             Wout = Field(domain=domain, name='Wout', dtype=dtype,
-                   nb_components=dim, register_object=False)
+                   nb_components=dim)
             C = npw.random.rand(dim).astype(dtype)
             for A in As:
                 for order in orders:
                     for time_integrator in time_integrators:
-                        print
                         self._test_one(time_integrator=time_integrator, order=order,
                                 shape=shape, dim=dim, dtype=dtype,
                                 is_inplace=is_inplace, domain=domain,
                                 Vin=Vin, Win=Win, Wout=Wout,
                                 C=C, A=A, formulation=formulation)
+                        print()
 
     @staticmethod
     def __random_init(data, coords, component):
@@ -99,9 +101,9 @@ class TestDirectionalStretchingOperator(object):
     def _test_one(self, time_integrator, order, shape, dim,
             dtype, is_inplace, domain, Vin, Win, Wout, C, A, formulation):
 
-        print '\nTesting DirectionalStretching::{}_{}_FCD{}: is_inplace={} dtype={} shape={} C={} A={}'.format(
+        print('\nTesting DirectionalStretching::{}_{}_FCD{}: is_inplace={} dtype={} shape={} C={} A={}'.format(
             formulation, time_integrator.name(), order,
-            is_inplace, dtype.__name__, shape, C, A),
+            is_inplace, dtype.__name__, shape, C, A), end=' ')
 
         dt = ScalarParameter('dt0', initial_value=1.0, const=False,
                 dtype=dtype)
@@ -148,10 +150,10 @@ class TestDirectionalStretchingOperator(object):
         # Compare to other implementations
         (V_input, W_input, W_reference) = (None, None, None)
         for impl in implementations:
-            print '\n >Implementation {}: '.format(impl),
+            print('\n >Implementation {}: '.format(impl), end=' ')
             for (sop, op, split) in iter_impl(impl):
                 if sop != '':
-                    print '\n   *{}: '.format(sop),
+                    print('\n   *{}: '.format(sop), end=' ')
                 split.push_operators(op)
                 if (not is_inplace):
                     split.push_copy(dst=win, src=wout)
@@ -202,7 +204,7 @@ class TestDirectionalStretchingOperator(object):
                     if not is_inplace:
                         assert npw.all(W0==W1), W1-W0
 
-                    for i in xrange(dwout.nb_components):
+                    for i in range(dwout.nb_components):
                         if npw.may_share_memory(W_reference[i], W_output[i]):
                             msg='W_output and W_reference arrays may share the same memory.'
                             raise RuntimeError(msg)
@@ -227,18 +229,18 @@ class TestDirectionalStretchingOperator(object):
                         max_tol = neps*eps
                         if (max_di>max_tol):
                             if self.enable_debug_mode:
-                                print
-                                print 'V INPUT'
-                                print V_input[i]
-                                print 'W INPUT'
-                                print W_input[i]
-                                print 'W_REFERENCE'
-                                print W_reference[i]
-                                print 'W_OUTPUT'
-                                print W_output[i]
-                                print 'ABS(REF - OUT)'
+                                print()
+                                print('V INPUT')
+                                print(V_input[i])
+                                print('W INPUT')
+                                print(W_input[i])
+                                print('W_REFERENCE')
+                                print(W_reference[i])
+                                print('W_OUTPUT')
+                                print(W_output[i])
+                                print('ABS(REF - OUT)')
                                 npw.fancy_print(di, replace_values={(lambda a: a<max_tol): '.'})
-                                print
+                                print()
                             msg='W_OUTPUT did not match W_REFERENCE for component {}'.format(i)
                             msg+='\n > max computed distance was {} ({} eps).'.format(max_di,
                                     int(npw.ceil(max_di/eps)))
@@ -265,10 +267,10 @@ class TestDirectionalStretchingOperator(object):
         Vin = tuple(d.get().handle.copy() for d in dvin.data)
         Win = tuple(d.get().handle.copy() for d in dwin.data)
 
-        directions = range(dim) + range(dim)[::-1]
+        directions = tuple(range(dim)) + tuple(range(dim))[::-1]
         dt_coeffs = (0.5,)*(2*dim)
 
-        wis   = tuple('W{}'.format(i) for i in xrange(3))
+        wis   = tuple('W{}'.format(i) for i in range(3))
         Xin   = dict(zip(wis, Win))
         views = { 'W{}'.format(i): Wi.compute_slices for (i,Wi) in enumerate(dwin) }
 
@@ -294,7 +296,7 @@ class TestDirectionalStretchingOperator(object):
                         d1_vw = D1(a=a, axis=ndir, symbols={D1.dx:dwin.space_step[ndir]})
                         D1_VW += (d1_vw,)
                     assert (A is None), A
-                    for i in xrange(3):
+                    for i in range(3):
                         ghosts = [0,]*3
                         #if (i==j):
                             #ghosts[2-j] = dwin[j].ghosts[j]
@@ -302,34 +304,34 @@ class TestDirectionalStretchingOperator(object):
                         dW_dt[i][...] = C[i] * D1_VW[i]
                 elif (formulation is StretchingFormulation.GRAD_UW):
                     assert (A is None), A
-                    for i in xrange(3):
+                    for i in range(3):
                         V_view = dvin[i].compute_slices
                         dW_dt[i][...] = C[i] * D1_V[i][V_view] * W[j]
                 elif (formulation is StretchingFormulation.GRAD_UW_T):
                     assert (A is None), A
-                    for i in xrange(3):
+                    for i in range(3):
                         if (i==j):
                             V_view = dvin[i].compute_slices
                             dW_dt[j][...] = C[j] * sum(D1_V[k][V_view] * W[k]
-                                    for k in xrange(3))
+                                    for k in range(3))
                         else:
                             dW_dt[i][...] = 0
                 elif (formulation is StretchingFormulation.MIXED_GRAD_UW):
                     a=first_not_None(A, sm.Rational(1,2))
                     a=float(a)
-                    for i in xrange(3):
+                    for i in range(3):
                         V_view = dvin[i].compute_slices
                         dW_dt[i][...] = a * D1_V[i][V_view] * W[j]
                         if (i==j):
                             dW_dt[i][...] += (1.0-a) * sum(D1_V[k][V_view] * W[k]
-                                    for k in xrange(3))
+                                    for k in range(3))
                         dW_dt[i][...] *= C[i]
                 else:
                     msg='Unknown stretching formulation {}.'.format(formulation)
                     raise NotImplementedError(msg)
 
             out = time_integrator(Xin=Xin, RHS=rhs, views=views, dt=(dt_coeff*dt))
-            for i in xrange(3):
+            for i in range(3):
                 Win[i][views[wis[i]]] = out[wis[i]]
             ghost_exchanger.exchange_ghosts()
         views = dvin.get_attributes('compute_slices')
diff --git a/hysop/operator/tests/test_fd_derivative.py b/hysop/operator/tests/test_fd_derivative.py
index 036e75cc0e7039479d40fcdbf8d03faa17acfc0a..1a648bdf8cbdee4de9892e7bb00b1e50a4a93dfd 100644
--- a/hysop/operator/tests/test_fd_derivative.py
+++ b/hysop/operator/tests/test_fd_derivative.py
@@ -2,7 +2,9 @@
 Test gradient of fields.
 """
 import random
-from hysop.deps import it, sm, random
+import itertools as it
+import sympy as sm
+
 from hysop.constants import HYSOP_REAL, Backend
 from hysop.methods import SpaceDiscretization
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
@@ -57,12 +59,12 @@ class TestFiniteDifferencesDerivative(object):
             coords = frame.coords
 
             def gen_Fi():
-                kis = tuple(random.randint(1,5)*2*sm.pi for _ in xrange(dim))
+                kis = tuple(random.randint(1,5)*2*sm.pi for _ in range(dim))
                 qis = tuple(npw.random.rand(dim).round(decimals=3).tolist())
                 basis = tuple( (sm.cos(ki*xi+qi), sm.sin(ki*xi+qi))
                         for (ki,qi,xi) in zip(kis, qis, coords) )
                 F0 = sm.Integer(1) / (sm.Integer(1) + npw.random.randint(1,5)*cls.t.s)
-                for i in xrange(dim):
+                for i in range(dim):
                     F0 *= random.choice(basis[i])
                 return F0
 
@@ -116,8 +118,8 @@ class TestFiniteDifferencesDerivative(object):
 
         domain = Box(length=(1,)*dim)
         F = Field(domain=domain, name='F', dtype=dtype,
-                nb_components=3, register_object=False)
-        gradF = F.gradient(register_object=False)
+                nb_components=3)
+        gradF = F.gradient()
 
         self._test_one(shape=shape, dim=dim, dtype=dtype,
                 domain=domain, F=F, gradF=gradF)
@@ -126,22 +128,22 @@ class TestFiniteDifferencesDerivative(object):
     def _test_one(self, shape, dim, dtype,
             domain, F, gradF):
 
-        print '\nTesting {}D Gradient: dtype={} shape={}'.format(
-                dim, dtype.__name__, shape)
-        print ' >Input analytic functions:'
+        print('\nTesting {}D Gradient: dtype={} shape={}'.format(
+                dim, dtype.__name__, shape))
+        print(' >Input analytic functions:')
         for (i,Fi) in enumerate(self.analytic_expressions[dim]['F']):
-            print '   *F{}(x,t) = {}'.format(i, Fi)
-        print ' >Expected analytic outputs:'
+            print('   *F{}(x,t) = {}'.format(i, Fi))
+        print(' >Expected analytic outputs:')
         from hysop.tools.sympy_utils import subscript, partial
         for (i,j) in npw.ndindex(*self.analytic_expressions[dim]['gradF'].shape):
             dFij = self.analytic_expressions[dim]['gradF'][i,j]
-            print '   *{p}F{}/{p}x{}(x,t) = {}'.format(subscript(i).encode('utf-8'),
-                                                       subscript(j).encode('utf-8'),
+            print('   *{p}F{}/{p}x{}(x,t) = {}'.format(subscript(i),
+                                                       subscript(j),
                                                        dFij,
-                                                       p=partial.encode('utf-8'))
+                                                       p=partial))
         self.t.value = random.random()
-        print ' >Parameter t has been set to {}.'.format(self.t())
-        print ' >Testing all implementations:'
+        print(' >Parameter t has been set to {}.'.format(self.t()))
+        print(' >Testing all implementations:')
 
         implementations = FiniteDifferencesSpaceDerivative.implementations()
 
@@ -157,17 +159,17 @@ class TestFiniteDifferencesDerivative(object):
                              method={SpaceDiscretization: 8})
             if impl is Implementation.PYTHON:
                 msg='   *Python: '
-                print msg,
+                print(msg, end=' ')
                 op = Gradient(**base_kwds)
                 yield op.to_graph()
-                print
+                print()
             elif impl is Implementation.OPENCL:
                 msg='   *Opencl: '
-                print msg
+                print(msg)
                 for cl_env in iter_clenv():
                     msg='      >platform {}, device {}:'.format(cl_env.platform.name.strip(),
                                                           cl_env.device.name.strip())
-                    print msg,
+                    print(msg, end=' ')
                     op0 = Gradient(cl_env=cl_env, **base_kwds)
                     op1 = ForceTopologyState(fields=gradF, variables={gradF: shape},
                                                   backend=Backend.OPENCL,
@@ -175,8 +177,8 @@ class TestFiniteDifferencesDerivative(object):
                     op = op0.to_graph()
                     op.push_nodes(op1)
                     yield op
-                    print
-                print
+                    print()
+                print()
             else:
                 msg='Unknown implementation to test {}.'.format(impl)
                 raise NotImplementedError(msg)
@@ -223,44 +225,44 @@ class TestFiniteDifferencesDerivative(object):
                 dist = npw.abs(fout-fref)
                 dL1 = npw.nansum(dist) / dist.size
                 if (dL1 < 2e-1):
-                    print '{}, '.format(dL1),
+                    print('{}, '.format(dL1), end=' ')
                     continue
                 has_nan = npw.any(npw.isnan(fout))
                 has_inf = npw.any(npw.isinf(fout))
 
-                print
-                print
-                print 'Test output comparisson for {} failed for component {}:'.format(name, i)
-                print ' *has_nan: {}'.format(has_nan)
-                print ' *has_inf: {}'.format(has_inf)
-                print ' *dL1={}'.format(dL1)
-                print
+                print()
+                print()
+                print('Test output comparisson for {} failed for component {}:'.format(name, i))
+                print(' *has_nan: {}'.format(has_nan))
+                print(' *has_inf: {}'.format(has_inf))
+                print(' *dL1={}'.format(dL1))
+                print()
                 if cls.enable_debug_mode:
-                    print 'REFERENCE INPUTS:'
+                    print('REFERENCE INPUTS:')
                     for (i,w) in enumerate(Fref):
-                        print 'F{}'.format(i)
-                        print w
-                        print
+                        print('F{}'.format(i))
+                        print(w)
+                        print()
                     if (name == 'gradF'):
-                        print 'REFERENCE OUTPUT:'
+                        print('REFERENCE OUTPUT:')
                         for (i,u) in enumerate(gradFref):
-                            print 'gradF{}'.format(i)
-                            print u
-                            print
-                        print
-                        print 'OPERATOR {} OUTPUT:'.format(op.name.upper())
-                        print
+                            print('gradF{}'.format(i))
+                            print(u)
+                            print()
+                        print()
+                        print('OPERATOR {} OUTPUT:'.format(op.name.upper()))
+                        print()
                         for (i,u) in enumerate(gradFout):
-                            print 'gradF{}'.format(i)
-                            print u
-                            print
+                            print('gradF{}'.format(i))
+                            print(u)
+                            print()
                     else:
-                        print 'MODIFIED INPUTS:'
+                        print('MODIFIED INPUTS:')
                         for (i,w) in enumerate(Fout):
-                            print 'F{}'.format(i)
-                            print w
-                            print
-                    print
+                            print('F{}'.format(i))
+                            print(w)
+                            print()
+                    print()
 
                 msg = 'Test failed for {} on component {} for implementation {}.'.format(name, i, impl)
                 raise RuntimeError(msg)
diff --git a/hysop/operator/tests/test_penalization.py b/hysop/operator/tests/test_penalization.py
index e3e02ca68d523207d23f3a6a53cffd994a407434..5dc3eccd1d08fbd995a249055ad174741595a671 100644
--- a/hysop/operator/tests/test_penalization.py
+++ b/hysop/operator/tests/test_penalization.py
@@ -2,7 +2,7 @@
 Test of vorticity penalization
 """
 import random
-from hysop.deps import random
+
 from hysop.constants import HYSOP_REAL
 from hysop.testsenv import __ENABLE_LONG_TESTS__
 from hysop.tools.contexts import printoptions
@@ -82,11 +82,11 @@ class TestPenalizeVorticity(object):
 
         domain = Box(length=(1,)*dim)
         velo = Field(domain=domain, name='velo', dtype=dtype,
-                     nb_components=3, register_object=False)
+                     nb_components=3)
         vorti = Field(domain=domain, name='vorti', dtype=dtype,
-                      nb_components=3, register_object=False)
+                      nb_components=3)
         obstacle = Field(domain=domain, name='sphere', dtype=dtype,
-                         nb_components=1, register_object=False)
+                         nb_components=1)
 
         self._test_one(shape=shape, dim=dim, dtype=dtype,
                        domain=domain, velo=velo, vorti=vorti,
@@ -94,14 +94,14 @@ class TestPenalizeVorticity(object):
 
     def _test_one(self, shape, dim, dtype,
                   domain, velo, vorti, obstacle):
-        print '\nTesting {}D PenalizeVorticity: dtype={} shape={}'.format(
-                dim, dtype.__name__, shape)
+        print('\nTesting {}D PenalizeVorticity: dtype={} shape={}'.format(
+                dim, dtype.__name__, shape))
 
         self.t.value = random.random()
         self.dt.value = random.random()
-        print ' >Parameter t has been set to {}.'.format(self.t())
-        print ' >Parameter dt has been set to {}.'.format(self.dt())
-        print ' >Testing all implementations:'
+        print(' >Parameter t has been set to {}.'.format(self.t()))
+        print(' >Parameter dt has been set to {}.'.format(self.dt()))
+        print(' >Testing all implementations:')
 
         implementations = PenalizeVorticity.implementations()
         variables = {velo: shape, vorti: shape, obstacle: shape}
@@ -112,10 +112,10 @@ class TestPenalizeVorticity(object):
                              name='penalize_vorticity_{}'.format(str(impl).lower()))
             if impl is Implementation.PYTHON:
                 msg='   *Python: '
-                print msg,
+                print(msg, end=' ')
                 yield PenalizeVorticity(obstacles=[obstacle], coeff=1e8,
                                         **base_kwds)
-                print
+                print()
             else:
                 msg='Unknown implementation to test {}.'.format(impl)
                 raise NotImplementedError(msg)
@@ -178,11 +178,11 @@ class TestPenalizeVorticity(object):
             iname = 'F{}'.format(i)
             mask = npw.isfinite(field)
             if not mask.all():
-                print
-                print field
-                print
-                print field[~mask]
-                print
+                print()
+                print(field)
+                print()
+                print(field[~mask])
+                print()
                 msg = msg0.format(iname)
                 raise ValueError(msg)
 
@@ -195,17 +195,17 @@ class TestPenalizeVorticity(object):
             dinf = npw.max(dist)
             deps = int(npw.ceil(dinf/eps))
             if (deps < 200):
-                print '{}eps, '.format(deps),
+                print('{}eps, '.format(deps), end=' ')
                 continue
             has_nan = npw.any(npw.isnan(fout))
             has_inf = npw.any(npw.isinf(fout))
-            print
-            print
-            print 'Test output comparisson failed for component {}:'.format(i)
-            print ' *has_nan: {}'.format(has_nan)
-            print ' *has_inf: {}'.format(has_inf)
-            print ' *dinf={} ({} eps)'.format(dinf, deps)
-            print
+            print()
+            print()
+            print('Test output comparisson failed for component {}:'.format(i))
+            print(' *has_nan: {}'.format(has_nan))
+            print(' *has_inf: {}'.format(has_inf))
+            print(' *dinf={} ({} eps)'.format(dinf, deps))
+            print()
             msg = 'Test failed on component {} for implementation {}.'.format(i, impl)
             raise RuntimeError(msg)
 
diff --git a/hysop/operator/tests/test_poisson.py b/hysop/operator/tests/test_poisson.py
index 8d9406699d7a6cd2751fb55c8fba1c4bed894d23..9659ad22a721df44e8abded6d828faffd9d49f73 100644
--- a/hysop/operator/tests/test_poisson.py
+++ b/hysop/operator/tests/test_poisson.py
@@ -1,6 +1,8 @@
 import random
 import primefac
-from hysop.deps import it, sm, random
+import itertools as it
+import sympy as sm
+
 from hysop.constants import HYSOP_REAL, BoundaryCondition
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
 from hysop.testsenv import opencl_failed, iter_clenv, test_context, domain_boundary_iterator
@@ -54,7 +56,7 @@ class TestPoissonOperator(object):
 
         def gen_psi():
             psis = ()
-            for i in xrange(nb_components):
+            for i in range(nb_components):
                 if polynomial:
                     psi, y = make_multivariate_polynomial(origin, end,
                                                           lboundaries, rboundaries,
@@ -121,27 +123,27 @@ class TestPoissonOperator(object):
                          rboundaries=rboundaries)
 
             Psi = Field(domain=domain, name='Psi', dtype=dtype,
-                        nb_components=nb_components, register_object=False)
+                        nb_components=nb_components)
             W = Field(domain=domain, name='W', dtype=dtype,
-                      nb_components=nb_components, register_object=False)
+                      nb_components=nb_components)
 
             self._test_one(shape=shape, dim=dim, dtype=dtype,
                            domain=domain, Psi=Psi, W=W,
                            polynomial=polynomial, nb_components=nb_components)
             if (max_runs is not None) and (i == max_runs):
-                missing = ((4**(dim+1) - 1) / 3) - i
-                print
-                print '>> MAX RUNS ACHIEVED FOR {}D DOMAINS -- SKIPING {} OTHER BOUNDARY CONDITIONS <<'.format(
-                    dim, missing)
-                print
-                print
+                missing = ((4**(dim+1) - 1) // 3) - i
+                print()
+                print('>> MAX RUNS ACHIEVED FOR {}D DOMAINS -- SKIPING {} OTHER BOUNDARY CONDITIONS <<'.format(
+                    dim, missing))
+                print()
+                print()
                 break
         else:
-            assert (i == (4**(dim+1)-1)/3), (i+1, (4**(dim+1)-1)/3)
-            print
-            print '>> TESTED ALL {}D BOUNDARY CONDITIONS <<'.format(dim)
-            print
-            print
+            assert (i == (4**(dim+1)-1)//3), (i+1, (4**(dim+1)-1)//3)
+            print()
+            print('>> TESTED ALL {}D BOUNDARY CONDITIONS <<'.format(dim))
+            print()
+            print()
 
     def _test_one(self, shape, dim, dtype,
                   domain, Psi, W, polynomial, nb_components):
@@ -168,7 +170,7 @@ class TestPoissonOperator(object):
         for (Psi_i, Psis_i) in zip(Psi.fields, analytic_expressions['Psi']):
             msg += '\n   *{}(x,t) = {}'.format(Psi_i.pretty_name, format_expr(Psis_i))
         msg += '\n >Testing all implementations:'
-        print msg
+        print(msg)
 
         implementations = Poisson.implementations()
         variables = {Psi: shape, W: shape}
@@ -179,29 +181,29 @@ class TestPoissonOperator(object):
                              name='poisson_{}'.format(str(impl).lower()))
             if impl is Implementation.FORTRAN:
                 msg = '   *Fortran FFTW: '
-                print msg,
+                print(msg, end=' ')
                 yield Poisson(**base_kwds)
             elif impl is Implementation.PYTHON:
                 msg = '   *Python FFTW: '
-                print msg,
+                print(msg, end=' ')
                 yield Poisson(**base_kwds)
             elif impl is Implementation.OPENCL:
                 from hysop.backend.device.opencl import cl
                 msg = '   *OpenCl CLFFT: '
-                print msg
+                print(msg)
                 for cl_env in iter_clenv():
                     msg = '     |platform {}, device {}'.format(cl_env.platform.name.strip(),
                                                                 cl_env.device.name.strip())
-                    print msg,
+                    print(msg, end=' ')
                     yield Poisson(cl_env=cl_env, **base_kwds)
                 msg = '   *OpenCl FFTW: '
-                print msg
+                print(msg)
                 cpu_envs = tuple(iter_clenv(device_type='cpu'))
                 if cpu_envs:
                     for cl_env in cpu_envs:
                         msg = '     |platform {}, device {}'.format(cl_env.platform.name.strip(),
                                                                     cl_env.device.name.strip())
-                        print msg,
+                        print(msg, end=' ')
                         yield Poisson(cl_env=cl_env, enforce_implementation=False, **base_kwds)
             else:
                 msg = 'Unknown implementation to test {}.'.format(impl)
@@ -215,7 +217,7 @@ class TestPoissonOperator(object):
                 if ((nb_components > 1) or (dim != 3) or (not dtype is HYSOP_REAL)
                         or any((bd != BoundaryCondition.PERIODIC) for bd in W.lboundaries)
                         or any((bd != BoundaryCondition.PERIODIC) for bd in W.rboundaries)):
-                    print '   *Fortran FFTW: NO SUPPORT'
+                    print('   *Fortran FFTW: NO SUPPORT')
                     continue
             for op in iter_impl(impl):
                 op = op.build()
@@ -238,7 +240,7 @@ class TestPoissonOperator(object):
                 self._check_output(impl, op, Wref, Psiref, Wout, Psiout)
                 if (impl is Implementation.FORTRAN):
                     op.finalize(clean_fftw_solver=True)
-                print
+                print()
 
     @classmethod
     def _check_output(cls, impl, op, Wref, Psiref, Wout, Psiout):
@@ -253,11 +255,11 @@ class TestPoissonOperator(object):
                 iname = '{}{}'.format(name, i)
                 mask = npw.isfinite(field)
                 if not mask.all():
-                    print
-                    print field
-                    print
-                    print field[~mask]
-                    print
+                    print()
+                    print(field)
+                    print()
+                    print(field[~mask])
+                    print()
                     msg = msg0.format(iname)
                     raise ValueError(msg)
 
@@ -282,42 +284,42 @@ class TestPoissonOperator(object):
                     dinf = npw.max(dist)
                     deps = int(npw.ceil(dinf/eps))
                 if (deps < 10000):
-                    print '{}eps, '.format(deps),
+                    print('{}eps, '.format(deps), end=' ')
                     continue
 
-                print
-                print
-                print 'Test output comparisson for {} failed for component {}:'.format(name, i)
-                print ' *has_nan: {}'.format(has_nan)
-                print ' *has_inf: {}'.format(has_inf)
-                print ' *dinf={} ({} eps)'.format(dinf, deps)
-                print
+                print()
+                print()
+                print('Test output comparisson for {} failed for component {}:'.format(name, i))
+                print(' *has_nan: {}'.format(has_nan))
+                print(' *has_inf: {}'.format(has_inf))
+                print(' *dinf={} ({} eps)'.format(dinf, deps))
+                print()
                 if cls.enable_debug_mode:
-                    print 'REFERENCE INPUTS:'
+                    print('REFERENCE INPUTS:')
                     for (i, w) in enumerate(Wref):
-                        print 'W{}'.format(i)
-                        print w
-                        print
+                        print('W{}'.format(i))
+                        print(w)
+                        print()
                     if (name == 'Psi'):
-                        print 'REFERENCE OUTPUT:'
+                        print('REFERENCE OUTPUT:')
                         for (i, u) in enumerate(Psiref):
-                            print 'Psi{}'.format(i)
-                            print u
-                            print
-                        print
-                        print 'OPERATOR {} OUTPUT:'.format(op.name.upper())
-                        print
+                            print('Psi{}'.format(i))
+                            print(u)
+                            print()
+                        print()
+                        print('OPERATOR {} OUTPUT:'.format(op.name.upper()))
+                        print()
                         for (i, u) in enumerate(Psiout):
-                            print 'Psi{}'.format(i)
-                            print u
-                            print
+                            print('Psi{}'.format(i))
+                            print(u)
+                            print()
                     else:
-                        print 'MODIFIED INPUTS:'
+                        print('MODIFIED INPUTS:')
                         for (i, w) in enumerate(Wout):
-                            print 'W{}'.format(i)
-                            print w
-                            print
-                    print
+                            print('W{}'.format(i))
+                            print(w)
+                            print()
+                    print()
 
                 msg = 'Test failed for {} on component {} for implementation {}.'
                 msg = msg.format(name, i, impl)
@@ -379,7 +381,7 @@ class TestPoissonOperator(object):
             self.test_2d_float64(max_runs=max_2d_runs)
             if __ENABLE_LONG_TESTS__:
                 self.test_3d_float64(max_runs=max_3d_runs)
-                self.test_4d_float32(max_runs=max_4d_runs)
+                self.test_4d_float64(max_runs=max_4d_runs)
 
 
 if __name__ == '__main__':
diff --git a/hysop/operator/tests/test_poisson_curl.py b/hysop/operator/tests/test_poisson_curl.py
index 30ec59dae044816fae324ae289e1526127e4055b..e8a989cfafdfc6f116b9aa95e98df56a12b2774a 100644
--- a/hysop/operator/tests/test_poisson_curl.py
+++ b/hysop/operator/tests/test_poisson_curl.py
@@ -1,7 +1,9 @@
 # coding: utf-8
 
 import random, primefac
-from hysop.deps import it, sm, random
+import itertools as it
+import sympy as sm
+
 from hysop.constants import HYSOP_REAL, BoxBoundaryCondition, BoundaryCondition
 from hysop.defaults import VelocityField, VorticityField
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
@@ -58,7 +60,7 @@ class TestPoissonCurlOperator(object):
         coords = frame.coords
         def gen_psi():
             psis = ()
-            for i in xrange(nb_components):
+            for i in range(nb_components):
                 if polynomial:
                     psi, y = make_multivariate_polynomial(origin, end,
                                                         lboundaries[i], rboundaries[i],
@@ -128,18 +130,18 @@ class TestPoissonCurlOperator(object):
             self._test_one(shape=shape, dim=dim, dtype=dtype,
                     domain=domain, W=W, U=U, polynomial=polynomial)
             if (max_runs is not None) and (i==max_runs):
-                missing = ((4**(dim+1) - 1) / 3) - i
-                print
-                print '>> MAX RUNS ACHIEVED FOR {}D DOMAINS -- SKIPING {} OTHER BOUNDARY CONDITIONS <<'.format(dim, missing)
-                print
-                print
+                missing = ((4**(dim+1) - 1) // 3) - i
+                print()
+                print('>> MAX RUNS ACHIEVED FOR {}D DOMAINS -- SKIPING {} OTHER BOUNDARY CONDITIONS <<'.format(dim, missing))
+                print()
+                print()
                 break
         else:
-            assert (i==(4**(dim+1)-1)/3), (i+1, (4**(dim+1)-1)/3)
-            print
-            print '>> TESTED ALL {}D BOUNDARY CONDITIONS <<'.format(dim)
-            print
-            print
+            assert (i==(4**(dim+1)-1)//3), (i+1, (4**(dim+1)-1)//3)
+            print()
+            print('>> TESTED ALL {}D BOUNDARY CONDITIONS <<'.format(dim))
+            print()
+            print()
 
     def _test_one(self, shape, dim, dtype,
             domain, U, W, polynomial):
@@ -157,14 +159,14 @@ class TestPoissonCurlOperator(object):
 
         msg='\nTesting {}D PoissonCurl: dtype={} shape={} polynomial={}, bc=[{}]'.format(
                 dim, dtype.__name__, shape, polynomial, domain.format_boundaries())
-        print msg
-        print ' >Input analytic vorticity is (truncated):'
+        print(msg)
+        print(' >Input analytic vorticity is (truncated):')
         for (Wi, Wis) in zip(W.fields, analytic_expressions['W']):
-            print '  *{}(x) = {}'.format(Wi.pretty_name, format_expr(Wis))
-        print ' >Expected output velocity is:'
+            print('  *{}(x) = {}'.format(Wi.pretty_name, format_expr(Wis)))
+        print(' >Expected output velocity is:')
         for (Ui, Uis) in zip(U.fields, analytic_expressions['U']):
-            print '  *{}(x) = {}'.format(Ui.pretty_name, format_expr(Uis))
-        print ' >Testing all implementations:'
+            print('  *{}(x) = {}'.format(Ui.pretty_name, format_expr(Uis)))
+        print(' >Testing all implementations:')
 
         implementations = PoissonCurl.implementations().keys()
         variables = { U:shape, W:shape }
@@ -175,28 +177,28 @@ class TestPoissonCurlOperator(object):
                              name='poisson_{}'.format(str(impl).lower()))
             if impl is Implementation.FORTRAN:
                 msg='   *Fortran FFTW: '
-                print msg,
+                print(msg, end=' ')
                 yield PoissonCurl(**base_kwds)
             elif impl is Implementation.PYTHON:
                 msg='   *Python FFTW: '
-                print msg,
+                print(msg, end=' ')
                 yield PoissonCurl(**base_kwds)
             elif impl is Implementation.OPENCL:
                 msg='   *OpenCl CLFFT: '
-                print msg
+                print(msg)
                 for cl_env in iter_clenv():
                     msg='     |platform {}, device {}'.format(cl_env.platform.name.strip(),
                                                           cl_env.device.name.strip())
-                    print msg,
+                    print(msg, end=' ')
                     yield PoissonCurl(cl_env=cl_env, projection=0, **base_kwds)
                 msg='   *OpenCl FFTW: '
-                print msg
+                print(msg)
                 cpu_envs = tuple(iter_clenv(device_type='cpu'))
                 if cpu_envs:
                     for cl_env in cpu_envs:
                         msg='     |platform {}, device {}'.format(cl_env.platform.name.strip(),
                                                                   cl_env.device.name.strip())
-                        print msg,
+                        print(msg, end=' ')
                         yield PoissonCurl(cl_env=cl_env, enforce_implementation=False, **base_kwds)
             else:
                 msg='Unknown implementation to test {}.'.format(impl)
@@ -209,7 +211,7 @@ class TestPoissonCurlOperator(object):
             if (impl is Implementation.FORTRAN) and ((dtype != HYSOP_REAL)
                      or any((bd != BoxBoundaryCondition.PERIODIC) for bd in domain.lboundaries)
                      or any((bd != BoxBoundaryCondition.PERIODIC) for bd in domain.rboundaries)):
-                    print '   *Fortran FFTW: NO SUPPORT'
+                    print('   *Fortran FFTW: NO SUPPORT')
                     continue
             for (i,op) in enumerate(iter_impl(impl)):
                 from hysop.tools.debug_dumper import DebugDumper
@@ -235,7 +237,7 @@ class TestPoissonCurlOperator(object):
                 Wout = tuple( data.get().handle.copy() for data in dw.data )
                 Uout = tuple( data.get().handle.copy() for data in du.data )
                 self._check_output(impl, op, Wref, Uref, Wout, Uout)
-                print
+                print()
 
     @classmethod
     def _check_output(cls, impl, op, Wref, Uref, Wout, Uout):
@@ -250,11 +252,11 @@ class TestPoissonCurlOperator(object):
                 iname = '{}{}'.format(name,i)
                 mask = npw.isfinite(field)
                 if not mask.all():
-                    print
-                    print field
-                    print
-                    print field[~mask]
-                    print
+                    print()
+                    print(field)
+                    print()
+                    print(field[~mask])
+                    print()
                     msg = msg0.format(iname)
                     raise ValueError(msg)
 
@@ -281,42 +283,42 @@ class TestPoissonCurlOperator(object):
                     except:
                         deps = 'inf'
                 if (deps < 10000):
-                    print '{}eps, '.format(deps),
+                    print('{}eps, '.format(deps), end=' ')
                     continue
 
-                print
-                print
-                print 'Test output comparisson for {} failed for component {}:'.format(name, i)
-                print ' *has_nan: {}'.format(has_nan)
-                print ' *has_inf: {}'.format(has_inf)
-                print ' *dinf={} ({} eps)'.format(dinf, deps)
-                print
+                print()
+                print()
+                print('Test output comparisson for {} failed for component {}:'.format(name, i))
+                print(' *has_nan: {}'.format(has_nan))
+                print(' *has_inf: {}'.format(has_inf))
+                print(' *dinf={} ({} eps)'.format(dinf, deps))
+                print()
                 if cls.enable_debug_mode:
-                    print 'REFERENCE INPUTS:'
+                    print('REFERENCE INPUTS:')
                     for (i,w) in enumerate(Wref):
-                        print 'W{}'.format(i)
-                        print w
-                        print
+                        print('W{}'.format(i))
+                        print(w)
+                        print()
                     if (name == 'U'):
-                        print 'REFERENCE OUTPUT:'
+                        print('REFERENCE OUTPUT:')
                         for (i,u) in enumerate(Uref):
-                            print 'U{}'.format(i)
-                            print u
-                            print
-                        print
-                        print 'OPERATOR {} OUTPUT:'.format(op.name.upper())
-                        print
+                            print('U{}'.format(i))
+                            print(u)
+                            print()
+                        print()
+                        print('OPERATOR {} OUTPUT:'.format(op.name.upper()))
+                        print()
                         for (i,u) in enumerate(Uout):
-                            print 'U{}'.format(i)
-                            print u
-                            print
+                            print('U{}'.format(i))
+                            print(u)
+                            print()
                     else:
-                        print 'MODIFIED INPUTS:'
+                        print('MODIFIED INPUTS:')
                         for (i,w) in enumerate(Wout):
-                            print 'W{}'.format(i)
-                            print w
-                            print
-                    print
+                            print('W{}'.format(i))
+                            print(w)
+                            print()
+                    print()
 
                 msg = 'Test failed for {} on component {} for implementation {}.'
                 msg = msg.format(name, i, impl)
diff --git a/hysop/operator/tests/test_restriction_filter.py b/hysop/operator/tests/test_restriction_filter.py
index 868d174b5ec1a3c78cbc024bda1caab04098c82e..02929cb4acce71027ea315169d32f1c4541db3ea 100755
--- a/hysop/operator/tests/test_restriction_filter.py
+++ b/hysop/operator/tests/test_restriction_filter.py
@@ -64,7 +64,7 @@ class TestMultiresolutionFilter(object):
 
         domain = Box(length=(1,)*dim)
         f = Field(domain=domain, name='velo', dtype=dtype,
-                  nb_components=1, register_object=False)
+                  nb_components=1)
 
         self._test_one(shape_f=shape_f, shape_c=shape_c,
                        dim=dim, dtype=dtype,
@@ -72,6 +72,8 @@ class TestMultiresolutionFilter(object):
 
     def _test_one(self, shape_f, shape_c, dim, dtype,
                   domain, f, factor):
+        print('\nTesting {}D SpatialFilter: dtype={} shape_f={} shape_c={}'.format(
+            dim, dtype.__name__, shape_f, shape_c))
         implementations = (Implementation.PYTHON, )
         mpi_params = MPIParams(comm=domain.task_comm(),
                                task_id=domain.current_task())
@@ -92,11 +94,11 @@ class TestMultiresolutionFilter(object):
                              filtering_method=FilteringMethod.SUBGRID)
             if impl is Implementation.PYTHON:
                 msg = '   *Python: '
-                print msg,
+                print(msg, end=' ')
                 yield SpatialFilter(input_variables={f: topo_f},
                                     output_variables={f: topo_c},
                                     **base_kwds)
-                print
+                print()
             else:
                 msg = 'Unknown implementation to test {}.'.format(impl)
                 raise NotImplementedError(msg)
@@ -119,11 +121,11 @@ class TestMultiresolutionFilter(object):
                     iname = 'F{}'.format(i)
                     mask = npw.isfinite(field)
                     if not mask.all():
-                        print
-                        print field
-                        print
-                        print field[~mask]
-                        print
+                        print()
+                        print(field)
+                        print()
+                        print(field[~mask])
+                        print()
                         msg = msg0.format(iname)
                         raise ValueError(msg)
 
@@ -136,14 +138,14 @@ class TestMultiresolutionFilter(object):
                 dinf = npw.max(dist)
                 deps = int(npw.ceil(dinf/eps))
                 if (deps < 10):
-                    print '{}eps, '.format(deps),
+                    print('{}eps, '.format(deps), end=' ')
                     return
-                print
-                print
-                print 'Test output comparisson failed for flowrate:'
-                print ' *dinf={} ({} eps)'.format(dinf, deps)
-                print ' *flowrate={} ({})'.format(flowrate, ref_flowrate)
-                print
+                print()
+                print()
+                print('Test output comparisson failed for flowrate:')
+                print(' *dinf={} ({} eps)'.format(dinf, deps))
+                print(' *flowrate={} ({})'.format(flowrate, ref_flowrate))
+                print()
                 msg = 'Test failed on flowrate for implementation {}.'.format(impl)
                 raise RuntimeError(msg)
 
@@ -153,6 +155,7 @@ class TestMultiresolutionFilter(object):
 
     def perform_tests(self):
         self._test(dim=3, dtype=HYSOP_REAL)
+        print()
 
     def test_3d(self):
         self._test(dim=3, dtype=HYSOP_REAL)
diff --git a/hysop/operator/tests/test_scales_advection.py b/hysop/operator/tests/test_scales_advection.py
index 10e40fd71a51e9600033dd6cb0f3135999271752..62e6d083fff4cbaafc740d02c30421c083ae7747 100644
--- a/hysop/operator/tests/test_scales_advection.py
+++ b/hysop/operator/tests/test_scales_advection.py
@@ -1,4 +1,5 @@
-from hysop.deps import sys
+import sys
+
 from hysop.testsenv import __ENABLE_LONG_TESTS__
 from hysop.testsenv import iter_clenv
 from hysop.tools.numpywrappers import npw
@@ -67,11 +68,11 @@ class TestScalesAdvectionOperator(object):
         domain = Box(length=(2*npw.pi,)*dim)
         for dtype in flt_types:
             Vin = Field(domain=domain, name='Vin', dtype=dtype,
-                        nb_components=dim, register_object=False)
+                        nb_components=dim)
             Sin = Field(domain=domain, name='Sin', dtype=dtype,
-                        nb_components=5, register_object=False)
+                        nb_components=5)
             Sout = Field(domain=domain, name='Sout', dtype=dtype,
-                         nb_components=5, register_object=False)
+                         nb_components=5)
             for time_integrator in time_integrators:
                 for remesh_kernel in remesh_kernels:
                     for velocity_cfl in velocity_cfls:
@@ -101,9 +102,9 @@ class TestScalesAdvectionOperator(object):
                   dtype, is_inplace, domain, velocity_cfl,
                   Vin, Sin, Sout):
 
-        print '\nTesting {}D ScalesAdvection_{}_{}: inplace={} dtype={} shape={}, cfl={}'.format(
+        print('\nTesting {}D ScalesAdvection_{}_{}: inplace={} dtype={} shape={}, cfl={}'.format(
             dim, time_integrator.name(), remesh_kernel,
-            is_inplace, dtype.__name__, shape, velocity_cfl),
+            is_inplace, dtype.__name__, shape, velocity_cfl), end=' ')
         if is_inplace:
             vin = Vin
             sin, sout = Sin, Sin
@@ -148,24 +149,23 @@ class TestScalesAdvectionOperator(object):
 
         # Compare to other implementations
         advec_axes = (tuple(),)
-        advec_axes += tuple((x,) for x in xrange(dim))
+        advec_axes += tuple((x,) for x in range(dim))
         if (dim > 1):
-            advec_axes += (tuple(xrange(dim)),)
+            advec_axes += (tuple(range(dim)),)
 
         reference_fields = {}
         for impl in implementations:
-            print '\n >Implementation {}:'.format(impl),
+            print('\n >Implementation {}:'.format(impl), end=' ')
             is_ref = (impl == ref_impl)
             for (sop, graph) in iter_impl(impl):
-                print '\n   *{}: '.format(sop),
+                print('\n   *{}: '.format(sop), end=' ')
 
                 graph.build()
                 for axes in advec_axes:
-                    # print 'SWITCHING TO AXES {}'.format(axes)
                     ref_outputs = reference_fields.setdefault(axes, {})
                     napplies = 10
                     Vi = npw.asarray([+1.0 if (i in axes) else +0.0
-                                      for i in xrange(dim)], dtype=dtype)
+                                      for i in range(dim)], dtype=dtype)
 
                     dvin = graph.get_input_discrete_field(vin).as_contiguous_dfield()
                     dsin = graph.get_input_discrete_field(sin).as_contiguous_dfield()
@@ -180,22 +180,22 @@ class TestScalesAdvectionOperator(object):
                         dsout.initialize(self.__scalar_init)
 
                         _input = tuple(dsin.data[i].get().handle.copy()
-                                       for i in xrange(dsin.nb_components))
+                                       for i in range(dsin.nb_components))
                         S0 = dsin.integrate()
 
-                        for k in xrange(napplies+1):
+                        for k in range(napplies+1):
                             if (k > 0):
                                 graph.apply()
 
                             output = tuple(dsout.data[i].get().handle.copy()[dsout.compute_slices]
-                                           for i in xrange(dsout.nb_components))
+                                           for i in range(dsout.nb_components))
 
-                            for i in xrange(dsout.nb_components):
+                            for i in range(dsout.nb_components):
                                 mask = npw.isfinite(output[i][dsout.compute_slices])
                                 if not mask.all():
                                     msg = '\nFATAL ERROR: Output is not finite on axis {}.\n'.format(
                                         i)
-                                    print msg
+                                    print(msg)
                                     npw.fancy_print(output[i], replace_values={
                                                     (lambda a: npw.isfinite(a)): '.'})
                                     raise RuntimeError(msg)
@@ -205,14 +205,13 @@ class TestScalesAdvectionOperator(object):
                                 dsref.initialize(self.__scalar_init, offsets=dxk.tolist())
                                 d = dsout.distance(dsref, p=2)
                                 if npw.any(d > 1e-1):
-                                    print 'FATAL ERROR: Could not match analytic advection.'
-                                    print 'DSOUT'
-                                    print dsout.sdata[dsout.compute_slices]
-                                    print 'DSREF'
-                                    print dsref.sdata[dsref.compute_slices]
-                                    print 'DSREF - DSOUT'
-                                    print (dsout.sdata[dsout.compute_slices].get(
-                                    ) - dsref.sdata[dsref.compute_slices].get())
+                                    print('FATAL ERROR: Could not match analytic advection.')
+                                    print('DSOUT')
+                                    print(dsout.sdata[dsout.compute_slices])
+                                    print('DSREF')
+                                    print(dsref.sdata[dsref.compute_slices])
+                                    print('DSREF - DSOUT')
+                                    print(dsout.sdata[dsout.compute_slices].get() - dsref.sdata[dsref.compute_slices].get())
                                     msg = 'Test failed with V={}, k={}, dxk={}, inter-field L2 distances are {}.'
                                     msg = msg.format(Vi, k, to_tuple(
                                         dxk, cast=float), to_tuple(d, cast=float))
@@ -221,28 +220,27 @@ class TestScalesAdvectionOperator(object):
                             else:
                                 assert k in ref_outputs
                                 reference = ref_outputs[k]
-                                for i in xrange(dsout.nb_components):
+                                for i in range(dsout.nb_components):
                                     di = npw.abs(reference[i] - output[i])
                                     max_di = npw.max(di)
                                     neps = 10000
                                     max_tol = neps*npw.finfo(dsout.dtype).eps
                                     if (max_di > max_tol):
-                                        print 'FATAL ERROR: Could not match other implementation results.'
-                                        print '\nComparisson failed at step {} and component {}:'.format(
-                                            k, i)
+                                        print('FATAL ERROR: Could not match other implementation results.')
+                                        print('\nComparisson failed at step {} and component {}:'.format(k, i))
                                         for (j, dv) in dvin.iter_fields():
-                                            print 'VELOCITY INPUT {}'.format(DirectionLabels[j])
-                                            print dv.sdata[dv.compute_slices]
-                                        print 'SCALAR INPUT'
-                                        print _input[i]
-                                        print 'SCALAR REFERENCE'
-                                        print reference[i]
-                                        print 'SCALAR OUTPUT'
-                                        print output[i]
-                                        print 'ABS(REF - OUT)'
+                                            print('VELOCITY INPUT {}'.format(DirectionLabels[j]))
+                                            print(dv.sdata[dv.compute_slices])
+                                        print('SCALAR INPUT')
+                                        print(_input[i])
+                                        print('SCALAR REFERENCE')
+                                        print(reference[i])
+                                        print('SCALAR OUTPUT')
+                                        print(output[i])
+                                        print('ABS(REF - OUT)')
                                         npw.fancy_print(di, replace_values={
                                                         (lambda a: a < max_tol): '.'})
-                                        print
+                                        print()
                                         msg = 'Output did not match reference output for component {} at time step {}.'
                                         msg += '\n > max computed distance was {}.'.format(max_di)
                                         msg += '\n > max tolerence was set to {} ({} eps).'.format(
@@ -267,7 +265,7 @@ class TestScalesAdvectionOperator(object):
     def perform_tests(self):
         # Scales is only 3D
         self._test(dim=3, is_inplace=True)
-        print
+        print()
 
     def test_3D(self):
         self._test(dim=3, is_inplace=True)
diff --git a/hysop/operator/tests/test_solenoidal_projection.py b/hysop/operator/tests/test_solenoidal_projection.py
index e9eed17478d3f24080c09ce27a7b11b3b6a5e499..95274b94a165449c9af4fefab9f2081dcc087cd9 100644
--- a/hysop/operator/tests/test_solenoidal_projection.py
+++ b/hysop/operator/tests/test_solenoidal_projection.py
@@ -1,5 +1,8 @@
 import random, primefac, scipy
-from hysop.deps import it, sm, random, np
+import itertools as it
+import numpy as np
+import sympy as sm
+
 from hysop.constants import HYSOP_REAL, Implementation, BoxBoundaryCondition
 from hysop.defaults import VelocityField, VorticityField
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
@@ -49,7 +52,7 @@ class TestSolenoidalProjectionOperator(object):
         def gen_fn(nb_components, left_boundaries, right_boundaries):
             assert len(left_boundaries)==len(right_boundaries)==nb_components
             fns = ()
-            for i in xrange(nb_components):
+            for i in range(nb_components):
                 if polynomial:
                     fn, y = make_multivariate_polynomial(origin, end,
                                                         left_boundaries[i], right_boundaries[i],
@@ -129,27 +132,27 @@ class TestSolenoidalProjectionOperator(object):
             U     = VelocityField(domain=domain, name='U', dtype=dtype)
             U0    = VelocityField(domain=domain, name='U0', dtype=dtype)
             U1    = VelocityField(domain=domain, name='U1', dtype=dtype)
-            divU  = U.div(name='divU', register_object=False)
-            divU0 = U0.div(name='divU0', register_object=False)
-            divU1 = U1.div(name='divU1', register_object=False)
+            divU  = U.div(name='divU')
+            divU0 = U0.div(name='divU0')
+            divU1 = U1.div(name='divU1')
 
             self._test_one(shape=shape, dtype=dtype, polynomial=polynomial,
                     domain=domain, U=U, U0=U0, U1=U1,
                     divU=divU, divU0=divU0, divU1=divU1)
             if (max_runs is not None) and (i==max_runs):
-                missing = ((4**(dim+1) - 1) / 3) - i
-                print
-                print '>> MAX RUNS ACHIEVED FOR {}D DOMAINS -- SKIPING {} OTHER BOUNDARY CONDITIONS <<'.format(dim, missing)
-                print
-                print
+                missing = ((4**(dim+1) - 1) // 3) - i
+                print()
+                print('>> MAX RUNS ACHIEVED FOR {}D DOMAINS -- SKIPING {} OTHER BOUNDARY CONDITIONS <<'.format(dim, missing))
+                print()
+                print()
                 break
         else:
-            assert (i==(4**(dim+1)-1)/3), (i+1, (4**(dim+1)-1)/3)
-            print
-            print '>> TESTED ALL {}D BOUNDARY CONDITIONS <<'.format(dim)
-            print
-            print
-    
+            assert (i==(4**(dim+1)-1)//3), (i+1, (4**(dim+1)-1)//3)
+            print()
+            print('>> TESTED ALL {}D BOUNDARY CONDITIONS <<'.format(dim))
+            print()
+            print()
+
     @staticmethod
     def __random_init(data, coords, component, dtype):
         if is_fp(dtype):
@@ -170,9 +173,9 @@ class TestSolenoidalProjectionOperator(object):
     def _test_one(self, shape, dtype, polynomial,
             domain, U, U0, U1, divU, divU0, divU1):
 
-        print '\nTesting {}D SolenoidalProjection: dtype={} shape={}, polynomial={}, bc=[{}]'.format(
-                3, dtype.__name__, shape, polynomial, domain.format_boundaries())
-        print ' >Building U = U0 + U1 = curl(Psi) + grad(Phi)...'
+        print('\nTesting {}D SolenoidalProjection: dtype={} shape={}, polynomial={}, bc=[{}]'.format(
+                3, dtype.__name__, shape, polynomial, domain.format_boundaries()))
+        print(' >Building U = U0 + U1 = curl(Psi) + grad(Phi)...')
 
         # U = curl(Psi) + grad(Phi)
         Psi = VorticityField(velocity=U, name='Psi')
@@ -202,19 +205,19 @@ class TestSolenoidalProjectionOperator(object):
         def format_expr(e):
             return truncate_expr(round_expr(e, 3), 80)
 
-        print ' >Input analytic velocity:'
+        print(' >Input analytic velocity:')
         for (Ui,ui) in zip(U, analytic_solutions['Us']):
-            print '   *{} = {}'.format(Ui.pretty_name, format_expr(ui))
-        print '   *div(U) = {}'.format(format_expr(analytic_solutions['divUs'][0]))
-        print ' >Expected velocity vector potential (solenoidal projection) is:'
+            print('   *{} = {}'.format(Ui.pretty_name, format_expr(ui)))
+        print('   *div(U) = {}'.format(format_expr(analytic_solutions['divUs'][0])))
+        print(' >Expected velocity vector potential (solenoidal projection) is:')
         for (Ui,ui) in zip(U0, analytic_solutions['U0s']):
-            print '   *{} = {}'.format(Ui.pretty_name, format_expr(ui))
-        print '   *div(U0) = {}'.format(format_expr(analytic_solutions['divU0s'][0]))
-        print ' >Expected velocity scalar potential is:'
+            print('   *{} = {}'.format(Ui.pretty_name, format_expr(ui)))
+        print('   *div(U0) = {}'.format(format_expr(analytic_solutions['divU0s'][0])))
+        print(' >Expected velocity scalar potential is:')
         for (Ui,ui) in zip(U1, analytic_solutions['U1s']):
-            print '   *{} = {}'.format(Ui.pretty_name, format_expr(ui))
-        print '   *div(U1) = {}'.format(format_expr(analytic_solutions['divU1s'][0]))
-        print ' >Testing all available implementations:'
+            print('   *{} = {}'.format(Ui.pretty_name, format_expr(ui)))
+        print('   *div(U1) = {}'.format(format_expr(analytic_solutions['divU1s'][0])))
+        print(' >Testing all available implementations:')
 
         implementations = SolenoidalProjection.implementations()
 
@@ -228,24 +231,24 @@ class TestSolenoidalProjectionOperator(object):
                              name='projection_{}'.format(str(impl).lower()))
             if impl is Implementation.PYTHON:
                 msg='   *Python FFTW: '
-                print msg,
+                print(msg, end=' ')
                 yield SolenoidalProjection(**base_kwds)
             elif impl is Implementation.OPENCL:
                 msg='   *OpenCl CLFFT: '
-                print msg
+                print(msg)
                 for cl_env in iter_clenv():
                     msg='     |platform {}, device {}'.format(cl_env.platform.name.strip(),
                                                           cl_env.device.name.strip())
-                    print msg,
+                    print(msg, end=' ')
                     yield SolenoidalProjection(cl_env=cl_env, **base_kwds)
                 msg='   *OpenCl FFTW: '
-                print msg
+                print(msg)
                 cpu_envs = tuple(iter_clenv(device_type='cpu'))
                 if cpu_envs:
                     for cl_env in cpu_envs:
                         msg='     |platform {}, device {}'.format(cl_env.platform.name.strip(),
                                                                   cl_env.device.name.strip())
-                        print msg,
+                        print(msg, end=' ')
                         yield SolenoidalProjection(cl_env=cl_env, enforce_implementation=False, **base_kwds)
             else:
                 msg='Unknown implementation to test {}.'.format(impl)
@@ -288,13 +291,13 @@ class TestSolenoidalProjectionOperator(object):
                 divU0out = tuple( data.get().handle.copy() for data in du0_div.data )
 
                 s = npw.prod(du.space_step)
-                print '[divU={}, divU0={}]'.format(
+                print('[divU={}, divU0={}]'.format(
                         npw.sqrt(npw.sum(divUout[0]**2)*s),
-                        npw.sqrt(npw.sum(divU0out[0]**2)*s)),
+                        npw.sqrt(npw.sum(divU0out[0]**2)*s)))
 
                 self._check_output(impl, op, Uref, divUref, U0ref, divU0ref,
                                              Uout, divUout, U0out, divU0out)
-                print
+                print()
 
     @classmethod
     def _check_output(cls, impl, op,
@@ -316,18 +319,18 @@ class TestSolenoidalProjectionOperator(object):
                 iname = '{}{}'.format(name,i)
                 mask = npw.isfinite(field)
                 if not mask.all():
-                    print
-                    print field
-                    print
-                    print field[~mask]
-                    print
+                    print()
+                    print(field)
+                    print()
+                    print(field[~mask])
+                    print()
                     msg = msg0.format(iname)
                     raise ValueError(msg)
 
         for (out_buffers, ref_buffers, name) in zip((Uout, U0out, divUout, divU0out),
                                                     (Uref, U0ref, divUref, divU0ref),
                                                     ('U', 'U0', 'divU', 'divU0')):
-            print '| {}=('.format(name),
+            print('     | {}=('.format(name), end=' ')
             for i, (fout,fref) in enumerate(zip(out_buffers, ref_buffers)):
                 iname = '{}{}'.format(name,i)
                 assert fout.dtype == fref.dtype, iname
@@ -341,50 +344,50 @@ class TestSolenoidalProjectionOperator(object):
                 except ValueError:
                     deps = npw.nan
                 if (deps < 10000):
-                    print '{}eps'.format(deps),
+                    print('{}eps'.format(deps), end=' ')
                     if (i!=len(out_buffers)-1):
-                        print ',',
+                        print(',',end=' ')
                     continue
 
                 has_nan = npw.any(npw.isnan(fout))
                 has_inf = npw.any(npw.isinf(fout))
 
-                print
-                print
-                print 'Test output comparisson for {} failed for component {}:'.format(name, i)
-                print ' *has_nan: {}'.format(has_nan)
-                print ' *has_inf: {}'.format(has_inf)
-                print ' *dinf={} ({} eps)'.format(dinf, deps)
-                print
+                print()
+                print()
+                print('Test output comparisson for {} failed for component {}:'.format(name, i))
+                print(' *has_nan: {}'.format(has_nan))
+                print(' *has_inf: {}'.format(has_inf))
+                print(' *dinf={} ({} eps)'.format(dinf, deps))
+                print()
                 if cls.enable_debug_mode:
-                    print 'REFERENCE INPUTS:'
+                    print('REFERENCE INPUTS:')
                     for (j,w) in enumerate(Uref):
-                        print 'U_{}'.format(j)
-                        print w
-                        print
+                        print('U_{}'.format(j))
+                        print(w)
+                        print()
                     if name in ('divU', 'U0', 'divU0'):
-                        print 'REFERENCE OUTPUT FOR {}:'.format(name)
-                        print '{}_{}'.format(name, i)
-                        print fref
-                        print
-                        print 'OPERATOR {} OUTPUT FOR {}:'.format(op.name.upper(), name)
-                        print
-                        print '{}_{}'.format(name, i)
-                        print fout
-                        print
+                        print('REFERENCE OUTPUT FOR {}:'.format(name))
+                        print('{}_{}'.format(name, i))
+                        print(fref)
+                        print()
+                        print('OPERATOR {} OUTPUT FOR {}:'.format(op.name.upper(), name))
+                        print()
+                        print('{}_{}'.format(name, i))
+                        print(fout)
+                        print()
                     else:
-                        print 'MODIFIED INPUTS:'
+                        print('MODIFIED INPUTS:')
                         for (j,w) in enumerate(out_buffers):
-                            print '{}_{}'.format(name, j)
-                            print w
-                            print
-                        print
-                    print
+                            print('{}_{}'.format(name, j))
+                            print(w)
+                            print()
+                        print()
+                    print()
 
                 msg = 'Test failed for {} on component {} for implementation {}.'
                 msg = msg.format(name, i, impl)
                 raise RuntimeError(msg)
-            print ')',
+            print(')', end=' ')
 
 
     def test_3d_float32(self, **kwds):
diff --git a/hysop/operator/tests/test_spectral_curl.py b/hysop/operator/tests/test_spectral_curl.py
index 577b997340689a40b4209af5d917a585f7ac05be..1bc46c03ca48ec6cc368e5b9a9d300b3613cb501 100644
--- a/hysop/operator/tests/test_spectral_curl.py
+++ b/hysop/operator/tests/test_spectral_curl.py
@@ -1,6 +1,7 @@
-import random
-import primefac
-from hysop.deps import it, sm, random
+import random, primefac
+import itertools as it
+import sympy as sm
+
 from hysop.constants import HYSOP_REAL, BoxBoundaryCondition
 from hysop.defaults import VelocityField, VorticityField
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
@@ -58,7 +59,7 @@ class TestSpectralCurl(object):
 
         def gen_Fin():
             Fins = ()
-            for i in xrange(nb_components):
+            for i in range(nb_components):
                 if polynomial:
                     fin, y = make_multivariate_polynomial(origin, end,
                                                           lboundaries[i], rboundaries[i],
@@ -131,19 +132,19 @@ class TestSpectralCurl(object):
             self._test_one(shape=shape, dim=dim, dtype=dtype,
                            domain=domain, Fin=Fin, Fout=Fout, polynomial=polynomial)
             if (max_runs is not None) and (i == max_runs):
-                missing = ((4**(dim+1) - 1) / 3) - i
-                print
-                print '>> MAX RUNS ACHIEVED FOR {}D DOMAINS -- SKIPING {} OTHER BOUNDARY CONDITIONS <<'.format(
-                    dim, missing)
-                print
-                print
+                missing = ((4**(dim+1) - 1) // 3) - i
+                print()
+                print('>> MAX RUNS ACHIEVED FOR {}D DOMAINS -- SKIPING {} OTHER BOUNDARY CONDITIONS <<'.format(
+                    dim, missing))
+                print()
+                print()
                 break
         else:
-            assert (i == (4**(dim+1)-1)/3), (i+1, (4**(dim+1)-1)/3)
-            print
-            print '>> TESTED ALL {}D BOUNDARY CONDITIONS <<'.format(dim)
-            print
-            print
+            assert (i == (4**(dim+1)-1)//3), (i+1, (4**(dim+1)-1)//3)
+            print()
+            print('>> TESTED ALL {}D BOUNDARY CONDITIONS <<'.format(dim))
+            print()
+            print()
 
     def _test_one(self, shape, dim, dtype,
                   domain, Fout, Fin, polynomial):
@@ -162,14 +163,14 @@ class TestSpectralCurl(object):
 
         msg = '\nTesting {}D Curl: dtype={} shape={} polynomial={}, bc=[{}]'.format(
             dim, dtype.__name__, shape, polynomial, domain.format_boundaries())
-        print msg
-        print ' >Input analytic field is (truncated):'
+        print(msg)
+        print(' >Input analytic field is (truncated):')
         for (fin, fins) in zip(Fin.fields, analytic_expressions['Fin']):
-            print '  *{}(x) = {}'.format(fin.pretty_name, format_expr(fins))
-        print ' >Expected output analytic field is:'
+            print('   *{}(x) = {}'.format(fin.pretty_name, format_expr(fins)))
+        print(' >Expected output analytic field is:')
         for (fout, fouts) in zip(Fout.fields, analytic_expressions['Fout']):
-            print '  *{}(x) = {}'.format(fout.pretty_name, format_expr(fouts))
-        print ' >Testing all implementations:'
+            print('   *{}(x) = {}'.format(fout.pretty_name, format_expr(fouts)))
+        print(' >Testing all implementations:')
 
         implementations = SpectralCurl.implementations().keys()
         variables = {Fout: shape, Fin: shape}
@@ -180,15 +181,15 @@ class TestSpectralCurl(object):
                              name='curl_{}'.format(str(impl).lower()))
             if impl is Implementation.PYTHON:
                 msg = '   *Python FFTW: '
-                print msg,
+                print(msg, end=' ')
                 yield SpectralCurl(**base_kwds)
             elif impl is Implementation.OPENCL:
                 msg = '   *OpenCl CLFFT: '
-                print msg
+                print(msg)
                 for cl_env in iter_clenv():
-                    msg = '     |platform {}, device {}'.format(cl_env.platform.name.strip(),
+                    msg = '     |platform {}, device {}:'.format(cl_env.platform.name.strip(),
                                                                 cl_env.device.name.strip())
-                    print msg,
+                    print(msg, end=' ')
                     yield SpectralCurl(cl_env=cl_env, **base_kwds)
             else:
                 msg = 'Unknown implementation to test {}.'.format(impl)
@@ -222,7 +223,7 @@ class TestSpectralCurl(object):
                 Wout = tuple(data.get().handle.copy() for data in dFin.data)
                 Uout = tuple(data.get().handle.copy() for data in dFout.data)
                 self._check_output(impl, op, Fin_ref, Fout_ref, Wout, Uout)
-                print
+                print()
 
     @classmethod
     def _check_output(cls, impl, op, Fin_ref, Fout_ref, Wout, Uout):
@@ -237,11 +238,11 @@ class TestSpectralCurl(object):
                 iname = '{}{}'.format(name, i)
                 mask = npw.isfinite(field)
                 if not mask.all():
-                    print
-                    print field
-                    print
-                    print field[~mask]
-                    print
+                    print()
+                    print(field)
+                    print()
+                    print(field[~mask])
+                    print()
                     msg = msg0.format(iname)
                     raise ValueError(msg)
 
@@ -268,42 +269,42 @@ class TestSpectralCurl(object):
                     except:
                         deps = 'inf'
                 if (deps < 10000):
-                    print '{}eps, '.format(deps),
+                    print('{}eps, '.format(deps), end=' ')
                     continue
 
-                print
-                print
-                print 'Test output comparisson for {} failed for component {}:'.format(name, i)
-                print ' *has_nan: {}'.format(has_nan)
-                print ' *has_inf: {}'.format(has_inf)
-                print ' *dinf={} ({} eps)'.format(dinf, deps)
-                print
+                print()
+                print()
+                print('Test output comparisson for {} failed for component {}:'.format(name, i))
+                print(' *has_nan: {}'.format(has_nan))
+                print(' *has_inf: {}'.format(has_inf))
+                print(' *dinf={} ({} eps)'.format(dinf, deps))
+                print()
                 if cls.enable_debug_mode:
-                    print 'REFERENCE INPUTS:'
+                    print('REFERENCE INPUTS:')
                     for (i, w) in enumerate(Fin_ref):
-                        print 'Fin{}'.format(i)
-                        print w
-                        print
+                        print('Fin{}'.format(i))
+                        print(w)
+                        print()
                     if (name == 'Fout'):
-                        print 'REFERENCE OUTPUT:'
+                        print('REFERENCE OUTPUT:')
                         for (i, u) in enumerate(Fout_ref):
-                            print 'Fout{}'.format(i)
-                            print u
-                            print
-                        print
-                        print 'OPERATOR {} OUTPUT:'.format(op.name.upper())
-                        print
+                            print('Fout{}'.format(i))
+                            print(u)
+                            print()
+                        print()
+                        print('OPERATOR {} OUTPUT:'.format(op.name.upper()))
+                        print()
                         for (i, u) in enumerate(Uout):
-                            print 'Fout{}'.format(i)
-                            print u
-                            print
+                            print('Fout{}'.format(i))
+                            print(u)
+                            print()
                     else:
-                        print 'MODIFIED INPUTS:'
+                        print('MODIFIED INPUTS:')
                         for (i, w) in enumerate(Wout):
-                            print 'Fin{}'.format(i)
-                            print w
-                            print
-                    print
+                            print('Fin{}'.format(i))
+                            print(w)
+                            print()
+                    print()
 
                 msg = 'Test failed for {} on component {} for implementation {}.'
                 msg = msg.format(name, i, impl)
diff --git a/hysop/operator/tests/test_spectral_derivative.py b/hysop/operator/tests/test_spectral_derivative.py
index e456c9e871e22a702fb0855dacca301d824ffe51..649e872c2eac2960e966d198f397d61072b82687 100644
--- a/hysop/operator/tests/test_spectral_derivative.py
+++ b/hysop/operator/tests/test_spectral_derivative.py
@@ -1,7 +1,10 @@
 """
 Test gradient of fields.
 """
-from hysop.deps import it, sm, random
+import random
+import itertools as it
+import sympy as sm
+
 from hysop.constants import HYSOP_REAL, Backend, BoundaryCondition, BoxBoundaryCondition
 from hysop.methods import SpaceDiscretization
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
@@ -135,19 +138,19 @@ class TestSpectralDerivative(object):
                            max_derivative=max_derivative)
 
             if (max_runs is not None) and (i == max_runs):
-                missing = ((4**(dim+1) - 1) / 3) - i
-                print
-                print '>> MAX RUNS ACHIEVED FOR {}D DOMAINS -- SKIPING {} OTHER BOUNDARY CONDITIONS <<'.format(
-                    dim, missing)
-                print
-                print
+                missing = ((4**(dim+1) - 1) // 3) - i
+                print()
+                print('>> MAX RUNS ACHIEVED FOR {}D DOMAINS -- SKIPING {} OTHER BOUNDARY CONDITIONS <<'.format(
+                    dim, missing))
+                print()
+                print()
                 break
         else:
-            assert (i == (4**(dim+1)-1)/3), (i+1, (4**(dim+1)-1)/3)
-            print
-            print '>> TESTED ALL {}D BOUNDARY CONDITIONS <<'.format(dim)
-            print
-            print
+            assert (i == (4**(dim+1)-1)//3), (i+1, (4**(dim+1)-1)//3)
+            print()
+            print('>> TESTED ALL {}D BOUNDARY CONDITIONS <<'.format(dim))
+            print()
+            print()
 
     def _test_one(self, shape, dim, dtype,
                   domain, F, polynomial, max_derivative):
@@ -175,7 +178,7 @@ class TestSpectralDerivative(object):
         msg += '\n >Input analytic functions (truncated):'
         msg += '\n   *{}(x,t) = {}'.format(F.pretty_name, format_expr(Fs))
         msg += '\n >Testing derivatives:'
-        print msg
+        print(msg)
 
         for idx in sorted(symbolic_dvars.keys(), key=lambda x: sum(x)):
             xvars = symbolic_dvars[idx]
@@ -188,7 +191,7 @@ class TestSpectralDerivative(object):
                                          space_symbols=domain.frame.coords)
             dFs = analytic_expressions['dF'][idx]
             fdFs = analytic_functions['dF'][idx]
-            print '   *{}'.format(dF.pretty_name)
+            print('   *{}'.format(dF.pretty_name))
 
             variables = {F: shape, dF: shape}
 
@@ -199,21 +202,21 @@ class TestSpectralDerivative(object):
                                  testing=True)
                 if impl is Implementation.PYTHON:
                     msg = '     |Python: '
-                    print msg,
+                    print(msg, end=' ')
                     op = SpectralSpaceDerivative(**base_kwds)
                     yield op.to_graph()
-                    print
+                    print()
                 elif impl is Implementation.OPENCL:
                     msg = '     |Opencl: '
-                    print msg
+                    print(msg)
                     for cl_env in iter_clenv():
                         msg = '        >platform {}, device {}:'.format(
                             cl_env.platform.name.strip(),
                             cl_env.device.name.strip())
-                        print msg,
+                        print(msg, end=' ')
                         op = SpectralSpaceDerivative(cl_env=cl_env, **base_kwds)
                         yield op.to_graph()
-                        print
+                        print()
                 else:
                     msg = 'Unknown implementation to test {}.'.format(impl)
                     raise NotImplementedError(msg)
@@ -276,45 +279,45 @@ class TestSpectralDerivative(object):
                         deps = np.inf
                     if (deps <= 5*10**(nidx+2)):
                         if (j == 1):
-                            print '{}eps ({})'.format(deps, dinf),
+                            print('{}eps ({})'.format(deps, dinf), end=' ')
                         else:
-                            print '{}eps, '.format(deps),
+                            print('{}eps, '.format(deps), end=' ')
                         continue
 
-                print
-                print
-                print 'Test output comparisson for {} failed for component {}:'.format(name, i)
-                print ' *has_nan: {}'.format(has_nan)
-                print ' *has_inf: {}'.format(has_inf)
-                print ' *dinf={}'.format(dinf)
-                print ' *deps={}'.format(deps)
-                print
+                print()
+                print()
+                print('Test output comparisson for {} failed for component {}:'.format(name, i))
+                print(' *has_nan: {}'.format(has_nan))
+                print(' *has_inf: {}'.format(has_inf))
+                print(' *dinf={}'.format(dinf))
+                print(' *deps={}'.format(deps))
+                print()
                 if cls.enable_debug_mode:
-                    print 'REFERENCE INPUTS:'
+                    print('REFERENCE INPUTS:')
                     for (i, w) in enumerate(Fref):
-                        print 'F{}'.format(i)
-                        print w
-                        print
+                        print('F{}'.format(i))
+                        print(w)
+                        print()
                     if (name == 'dF'):
-                        print 'REFERENCE OUTPUT:'
+                        print('REFERENCE OUTPUT:')
                         for (i, u) in enumerate(dFref):
-                            print 'dF{}'.format(i)
-                            print u
-                            print
-                        print
-                        print 'OPERATOR {} OUTPUT:'.format(op.name.upper())
-                        print
+                            print('dF{}'.format(i))
+                            print(u)
+                            print()
+                        print()
+                        print('OPERATOR {} OUTPUT:'.format(op.name.upper()))
+                        print()
                         for (i, u) in enumerate(dFout):
-                            print 'dF{}'.format(i)
-                            print u
-                            print
+                            print('dF{}'.format(i))
+                            print(u)
+                            print()
                     else:
-                        print 'MODIFIED INPUTS:'
+                        print('MODIFIED INPUTS:')
                         for (i, w) in enumerate(Fout):
-                            print 'F{}'.format(i)
-                            print w
-                            print
-                    print
+                            print('F{}'.format(i))
+                            print(w)
+                            print()
+                    print()
 
                 msg = 'Test failed for {} on component {} for implementation {}.'.format(name,
                                                                                          i, impl)
diff --git a/hysop/operator/tests/test_transpose.py b/hysop/operator/tests/test_transpose.py
index 1fefbf291e425903263617573fc2922363b97353..fee7dc2281d01fc135726a555aebaced2d100e7b 100644
--- a/hysop/operator/tests/test_transpose.py
+++ b/hysop/operator/tests/test_transpose.py
@@ -1,5 +1,7 @@
 import random
-from hysop.deps import np, it
+import itertools as it
+import numpy as np
+
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
 from hysop.testsenv import opencl_failed, iter_clenv
 from hysop.tools.contexts import printoptions
@@ -66,9 +68,9 @@ class TestTransposeOperator(object):
         domain = Box(length=(1.0,)*dim)
         for nb_components in (2,):
             Fin = Field(domain=domain, name='Fin', dtype=dtype,
-                        nb_components=nb_components, register_object=False)
+                        nb_components=nb_components)
             Fout = Field(domain=domain, name='Fout', dtype=dtype,
-                         nb_components=nb_components, register_object=False)
+                         nb_components=nb_components)
             for axes in all_axes:
                 for shape in shapes:
                     self._test_one(shape=shape, axes=axes,
@@ -94,8 +96,8 @@ class TestTransposeOperator(object):
                   dim, dtype, is_inplace,
                   domain, Fin, Fout):
 
-        print '\nTesting {}D Transpose: inplace={} dtype={} shape={} axes={}'.format(
-            dim, is_inplace, dtype.__name__, shape, axes)
+        print('\nTesting {}D Transpose: inplace={} dtype={} shape={} axes={}'.format(
+            dim, is_inplace, dtype.__name__, shape, axes))
         if is_inplace:
             fin, fout = Fin, Fin
             variables = {fin: shape}
@@ -108,7 +110,7 @@ class TestTransposeOperator(object):
         assert ref_impl in implementations
 
         # Compute reference solution
-        print '  *reference PYTHON implementation.'
+        print('  *reference PYTHON implementation.')
         transpose = Transpose(fields=fin, output_fields=fout,
                               variables=variables, axes=axes,
                               implementation=ref_impl).build()
@@ -128,10 +130,10 @@ class TestTransposeOperator(object):
         for i, (in_, out_) in enumerate(zip(refin, refout)):
             ref = np.transpose(in_, axes=axes)
             if (ref != out_).any():
-                print
-                print np.transpose(in_, axes=axes)
-                print
-                print out_
+                print()
+                print(np.transpose(in_, axes=axes))
+                print()
+                print(out_)
                 msg = 'Reference did not match numpy for component {}.'.format(i)
                 raise RuntimeError(msg)
 
@@ -145,7 +147,7 @@ class TestTransposeOperator(object):
                 for cl_env in iter_clenv():
                     msg = '  *platform {}, device {}'.format(cl_env.platform.name.strip(),
                                                              cl_env.device.name.strip())
-                    print msg
+                    print(msg)
                     yield Transpose(cl_env=cl_env, **base_kwds)
             else:
                 msg = 'Unknown implementation to test {}.'.format(impl)
@@ -179,21 +181,21 @@ class TestTransposeOperator(object):
                 has_nan = np.any(np.isnan(out))
                 has_inf = np.any(np.isinf(out))
 
-                print
-                print 'Test output comparisson failed for component {}:'.format(i)
-                print ' *has_nan: {}'.format(has_nan)
-                print ' *has_inf: {}'.format(has_inf)
-                print
-                print 'REFERENCE INPUT:'
-                print refin
-                print
-                print 'REFERENCE OUTPUT:'
-                print refout
-                print
-                print 'OPERATOR {} OUTPUT:'.format(op.name.upper())
-                print out
-                print
-                print
+                print()
+                print('Test output comparisson failed for component {}:'.format(i))
+                print(' *has_nan: {}'.format(has_nan))
+                print(' *has_inf: {}'.format(has_inf))
+                print()
+                print('REFERENCE INPUT:')
+                print(refin)
+                print()
+                print('REFERENCE OUTPUT:')
+                print(refout)
+                print()
+                print('OPERATOR {} OUTPUT:'.format(op.name.upper()))
+                print(out)
+                print()
+                print()
 
             msg = 'Test failed on component {} for implementation {}.'.format(i, impl)
             raise RuntimeError(msg)
@@ -210,7 +212,7 @@ class TestTransposeOperator(object):
 
     def test_upper_dimensions_out_of_place(self):
         if __ENABLE_LONG_TESTS__:
-            for i in xrange(5, 9):
+            for i in range(5, 9):
                 self._test(dim=i, dtype=None, is_inplace=False,
                            size_min=3, size_max=4, naxes=1)
 
@@ -226,7 +228,7 @@ class TestTransposeOperator(object):
 
     def test_upper_dimensions_inplace(self):
         if __ENABLE_LONG_TESTS__:
-            for i in xrange(5, 9):
+            for i in range(5, 9):
                 self._test(dim=i, dtype=None, is_inplace=True,
                            size_min=3, size_max=4, naxes=1)
 
@@ -236,7 +238,7 @@ class TestTransposeOperator(object):
         self._test(dim=3, is_inplace=False, dtype=np.complex64)
         if __ENABLE_LONG_TESTS__:
             self._test(dim=4, is_inplace=False, dtype=np.float64)
-            for i in xrange(5, 9):
+            for i in range(5, 9):
                 self._test(dim=i, dtype=None, is_inplace=False,
                            size_min=3, size_max=4, naxes=1)
 
@@ -245,7 +247,7 @@ class TestTransposeOperator(object):
         self._test(dim=3, is_inplace=True, dtype=np.complex128)
         if __ENABLE_LONG_TESTS__:
             self._test(dim=4, is_inplace=True, dtype=np.float32)
-            for i in xrange(5, 9):
+            for i in range(5, 9):
                 self._test(dim=i, dtype=None, is_inplace=True,
                            size_min=3, size_max=4, naxes=1)
 
diff --git a/hysop/operator/tests/test_velocity_correction.py b/hysop/operator/tests/test_velocity_correction.py
index b1aec222c2c1d4d349769836a601a35a1c1b21bd..850ae3f2d9679ad46e23f4752ed85b6b979a4894 100644
--- a/hysop/operator/tests/test_velocity_correction.py
+++ b/hysop/operator/tests/test_velocity_correction.py
@@ -2,7 +2,9 @@
 Test of velocity correction
 """
 import random
-from hysop.deps import it, sm, random
+import itertools as it
+import sympy as sm
+
 from hysop.constants import HYSOP_REAL
 from hysop.testsenv import __ENABLE_LONG_TESTS__, __HAS_OPENCL_BACKEND__
 from hysop.testsenv import opencl_failed, iter_clenv
@@ -90,25 +92,25 @@ class TestFlowRateCorrection(object):
 
         domain = Box(length=(1,)*dim)
         velo = Field(domain=domain, name='velo', dtype=dtype,
-                     nb_components=3, register_object=False)
+                     nb_components=3)
         vorti = Field(domain=domain, name='vorti', dtype=dtype,
-                      nb_components=3, register_object=False)
+                      nb_components=3)
 
         self._test_one(shape=shape, dim=dim, dtype=dtype,
                        domain=domain, velo=velo, vorti=vorti)
 
     def _test_one(self, shape, dim, dtype,
                   domain, velo, vorti):
-        print '\nTesting {}D FlowRateCorrection: dtype={} shape={}'.format(
-                dim, dtype.__name__, shape)
+        print('\nTesting {}D FlowRateCorrection: dtype={} shape={}'.format(
+                dim, dtype.__name__, shape))
 
         self.t.value = random.random()
         self.dt.value = random.random()
         self.flowrate.value = npw.random.random(size=(3, ))
-        print ' >Parameter t has been set to {}.'.format(self.t())
-        print ' >Parameter dt has been set to {}.'.format(self.dt())
-        print ' >Flowrate : {}.'.format(self.flowrate())
-        print ' >Testing all implementations:'
+        print(' >Parameter t has been set to {}.'.format(self.t()))
+        print(' >Parameter dt has been set to {}.'.format(self.dt()))
+        print(' >Flowrate : {}.'.format(self.flowrate()))
+        print(' >Testing all implementations:')
 
         implementations = FlowRateCorrection.implementations()
         variables = {velo: shape, vorti: shape}
@@ -119,10 +121,10 @@ class TestFlowRateCorrection(object):
                              name='vorticity_correction_{}'.format(str(impl).lower()))
             if impl is Implementation.PYTHON:
                 msg='   *Python: '
-                print msg,
+                print(msg, end=' ')
                 yield FlowRateCorrection(flowrate=self.flowrate,
                                          **base_kwds)
-                print
+                print()
             else:
                 msg='Unknown implementation to test {}.'.format(impl)
                 raise NotImplementedError(msg)
@@ -144,11 +146,11 @@ class TestFlowRateCorrection(object):
                     iname = 'F{}'.format(i)
                     mask = npw.isfinite(field)
                     if not mask.all():
-                        print
-                        print field
-                        print
-                        print field[~mask]
-                        print
+                        print()
+                        print(field)
+                        print()
+                        print(field[~mask])
+                        print()
                         msg = msg0.format(iname)
                         raise ValueError(msg)
 
@@ -167,14 +169,13 @@ class TestFlowRateCorrection(object):
                 dinf = npw.max(dist)
                 deps = int(npw.ceil(dinf/eps))
                 if (deps < 10):
-                    print '{}eps, '.format(deps),
-                    return
-                print
-                print
-                print 'Test output comparisson failed for flowrate:'
-                print ' *dinf={} ({} eps)'.format(dinf, deps)
-                print ' *flowrate={} ({})'.format(flowrate, ref_flowrate)
-                print
+                    print('{}eps, '.format(deps), end=' ')
+                    continue
+                print()
+                print('Test output comparisson failed for flowrate:')
+                print(' *dinf={} ({} eps)'.format(dinf, deps))
+                print(' *flowrate={} ({})'.format(flowrate, ref_flowrate))
+                print()
                 msg = 'Test failed on flowrate for implementation {}.'.format(impl)
                 raise RuntimeError(msg)
 
diff --git a/hysop/operator/transpose.py b/hysop/operator/transpose.py
index 48cb847b88ddcd357f95ba29b5faf9002cf83ec6..b80b8e8253e22034ed51e0640192978bc596db3b 100644
--- a/hysop/operator/transpose.py
+++ b/hysop/operator/transpose.py
@@ -22,10 +22,10 @@ class TranspositionNotImplementedError(NotImplementedError):
 
 class Transpose(ComputationalGraphNodeGenerator):
     """
-    Operator generator for inplace and out of place field transposition 
+    Operator generator for inplace and out of place field transposition
     and permutations in general.
-    
-    Available implementations are: 
+
+    Available implementations are:
         *python (numpy based transpose, n-dimensional)
         *opencl (opencl codegen based transpose up to 16D)
 
@@ -33,10 +33,10 @@ class Transpose(ComputationalGraphNodeGenerator):
     graph node generator generates an operator per supplied field,
     possibly on different implementation backends.
 
-    See hysop.operator.base.transpose_operator.TransposeOperatorBase for operator backend 
+    See hysop.operator.base.transpose_operator.TransposeOperatorBase for operator backend
     implementation interface.
     """
-    
+
     @classmethod
     def implementations(cls):
         from hysop.backend.host.python.operator.transpose   import PythonTranspose
@@ -46,23 +46,36 @@ class Transpose(ComputationalGraphNodeGenerator):
                 Implementation.OPENCL: OpenClTranspose
         }
         return _implementations
-    
+
     @classmethod
     def default_implementation(cls):
         msg='Transpose has no default implementation, '
         msg+='implementation should match the discrete field topology backend.'
         raise RuntimeError(msg)
-    
+
+    @debug
+    def __new__(cls, fields, variables, axes,
+                output_fields=None,
+                implementation=None,
+                name=None,
+                base_kwds=None,
+                **kwds):
+        base_kwds = {} if (base_kwds is None) else {}
+        return super(Transpose, cls).__new__(cls, name=name,
+                candidate_input_tensors=None,
+                candidate_output_tensors=None,
+                **base_kwds)
+
     @debug
     def __init__(self, fields, variables, axes,
                 output_fields=None,
-                implementation=None, 
+                implementation=None,
                 name=None,
-                base_kwds=None, 
+                base_kwds=None,
                 **kwds):
         """
         Initialize a Transpose operator generator operating on CartesianTopology topologies.
-        
+
         Parameters
         ----------
         fields: Field, list or tuple of Fields
@@ -71,7 +84,7 @@ class Transpose(ComputationalGraphNodeGenerator):
         output_fields: Field, list or tuple of Fields, optional
             Output continuous fields where the results are stored.
             Transposed shapes should match the input.
-            By default output_fields are the same as input_fields 
+            By default output_fields are the same as input_fields
             resulting in inplace transpositions.
             Input and output are matched by order int list/tuple.
         variables: dict
@@ -79,24 +92,24 @@ class Transpose(ComputationalGraphNodeGenerator):
         axes: tuple of ints, or array like of tuples, or dict of (tuple, TranspositionState).
             Permutation of axes in numpy notations (as a tuple of ints).
             Axe dim-1 is the contiguous axe, axe 0 has the greatest stride in memory.
-            
+
             It can also be an array like of axes and the operator implementation will choose
-            the most suitable axe permutation scheme in this list. 
+            the most suitable axe permutation scheme in this list.
 
             If a dictionnary is provided, this gives the operator implementation to choose
             the most suitable axe permutation scheme with the knowledge of the target
             transposition state as well.
 
             All fields on the same backend will perform the same permutation.
-            Operator chosen permutation can be retrieved by using generated 
+            Operator chosen permutation can be retrieved by using generated
             operator 'axes' attribute.
 
         implementation: Implementation, optional, defaults to None
             target implementation, should be contained in available_implementations().
-            If implementation is set and topology does not match backend, 
+            If implementation is set and topology does not match backend,
                 RuntimeError will be raised on _generate.
             If None, implementation will be set according to topologies backend,
-            different implementations may be choosen for different Fields if 
+            different implementations may be choosen for different Fields if
             defined on different backends.
         name: string
             prefix for generated operator names
@@ -105,9 +118,9 @@ class Transpose(ComputationalGraphNodeGenerator):
             Base class keywords arguments.
             If None, an empty dict will be passed.
         kwds:
-            keywords arguments that will be passed towards implementation 
+            keywords arguments that will be passed towards implementation
             transpose operator __init__.
-        
+
         Notes
         -----
         Out of place transpose will always be faster to process.
@@ -128,10 +141,10 @@ class Transpose(ComputationalGraphNodeGenerator):
         A Transpose operator implementation should support the TransposeOperatorBase
         interface (see hysop.operator.base.transpose_operator.TransposeOperatorBase).
 
-        This ComputationalGraphNodeFrontend will generate a operator for each 
+        This ComputationalGraphNodeFrontend will generate a operator for each
         input and output ScalarField pair.
 
-        All implementations should raise TranspositionNotImplementedError is the user supplied 
+        All implementations should raise TranspositionNotImplementedError is the user supplied
         parameters leads to unimplemented or unsupported transposition features.
         """
         input_fields = to_tuple(fields)
@@ -142,19 +155,19 @@ class Transpose(ComputationalGraphNodeGenerator):
                                     for (ifield,ofield) in zip(input_fields, output_fields) )
         else:
             output_fields = tuple( ifield for ifield in input_fields )
-        
+
         check_instance(input_fields,  tuple, values=Field)
         check_instance(output_fields, tuple, values=Field, size=len(input_fields))
-        
-        candidate_input_tensors  = filter(lambda x: x.is_tensor, input_fields)
-        candidate_output_tensors = filter(lambda x: x.is_tensor, output_fields)
-        
-        base_kwds = base_kwds or dict()
-        super(Transpose,self).__init__(name=name, 
+
+        candidate_input_tensors  = tuple(filter(lambda x: x.is_tensor, input_fields))
+        candidate_output_tensors = tuple(filter(lambda x: x.is_tensor, output_fields))
+
+        base_kwds = {} if (base_kwds is None) else {}
+        super(Transpose,self).__init__(name=name,
                 candidate_input_tensors=candidate_input_tensors,
                 candidate_output_tensors=candidate_output_tensors,
                 **base_kwds)
-        
+
         # expand tensors
         ifields, ofields = (), ()
         for (ifield, ofield) in zip(input_fields, output_fields):
@@ -162,7 +175,7 @@ class Transpose(ComputationalGraphNodeGenerator):
             msg+='and field {} of shape {}.'
             if (ifield.is_tensor ^ ofield.is_tensor):
                 if ifield.is_tensor:
-                    msg=msg.format(ifield.short_description(), ifield.shape, 
+                    msg=msg.format(ifield.short_description(), ifield.shape,
                                    ofield.short_description(), '(1,)')
                 else:
                     msg=msg.format(ifield.short_description(), '(1,)',
@@ -174,13 +187,13 @@ class Transpose(ComputationalGraphNodeGenerator):
                 raise RuntimeError(msg)
             ifields += ifield.fields
             ofields += ofield.fields
-        
+
         input_fields  = ifields
         output_fields = ofields
 
         check_instance(input_fields,  tuple, values=ScalarField)
         check_instance(output_fields, tuple, values=ScalarField, size=len(input_fields))
-        
+
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
         check_instance(base_kwds, dict, keys=str)
 
@@ -227,7 +240,7 @@ class Transpose(ComputationalGraphNodeGenerator):
                 msg += 'input_field {} dimension {}.'
                 msg.format(input_field, idim, input_fields[0].name, dim)
                 raise ValueError(msg)
-        
+
         candidate_axes = self._check_axes(axes, dim)
 
         self.input_fields   = input_fields
@@ -236,14 +249,14 @@ class Transpose(ComputationalGraphNodeGenerator):
         self.candidate_axes = candidate_axes
         self.implementation = implementation
         self.kwds           = kwds
-    
+
     @staticmethod
     def _check_axes(axes, dim):
         msg = 'Unkown type for axes parameter.'
         if isinstance(axes, dict):
             candidate_axes = axes
         elif isinstance(axes, (list,tuple,set,frozenset)):
-            if isinstance(axes[0], (int,long)):
+            if isinstance(axes[0], int):
                 msg='Axes should be ordered but got a set.'
                 assert not isinstance(axes, (set,frozenset)), msg
                 candidate_axes = (tuple(axes),)
@@ -255,10 +268,10 @@ class Transpose(ComputationalGraphNodeGenerator):
         else:
             raise TypeError(msg)
         del axes
-            
+
         check_instance(candidate_axes, dict, keys=tuple)
-        for (axes, target_tstate) in candidate_axes.iteritems():
-            check_instance(axes, tuple, values=(int,long))
+        for (axes, target_tstate) in candidate_axes.items():
+            check_instance(axes, tuple, values=int)
             check_instance(target_tstate, TranspositionState[dim], allow_none=True)
             if len(axes)!=dim:
                 msg='All axes should have the dimension of the transposed fields {} '
@@ -323,7 +336,7 @@ class Transpose(ComputationalGraphNodeGenerator):
             raise TypeError(msg)
 
         return op_cls
-    
+
     @debug
     def _generate(self):
         nodes = []
@@ -331,15 +344,15 @@ class Transpose(ComputationalGraphNodeGenerator):
             src_topo = ComputationalGraphNode.get_topo_descriptor(self.variables, ifield)
             dst_topo = ComputationalGraphNode.get_topo_descriptor(self.variables, ofield)
             kwds     = self.kwds.copy()
-            
+
             TransposeOp = self._get_op_and_check_implementation(src_topo, dst_topo)
             axes = TransposeOp.get_preferred_axes(src_topo, dst_topo, self.candidate_axes)
 
             variables = { ifield: src_topo }
             variables[ofield] = dst_topo
-                
+
             # instantiate operator
-            node = TransposeOp(input_field=ifield, output_field=ofield, 
+            node = TransposeOp(input_field=ifield, output_field=ofield,
                                variables=variables, axes=axes,
                                **kwds)
             nodes.append(node)
diff --git a/hysop/operator/vorticity_absorption.py b/hysop/operator/vorticity_absorption.py
index 7a2541ed80819c1a6ccf3b38e81a5d03b7b09850..1269d952189b3bf6705bc8b9bece1c04bac75a5c 100644
--- a/hysop/operator/vorticity_absorption.py
+++ b/hysop/operator/vorticity_absorption.py
@@ -1,7 +1,8 @@
 """
 @file vorticity_absorption.py
 """
-from hysop.deps import sm
+import sympy as sm
+
 from hysop.constants import Implementation
 from hysop.core.graph.computational_node_frontend import ComputationalGraphNodeFrontend
 from hysop.tools.types import check_instance
@@ -33,6 +34,21 @@ class VorticityAbsorption(ComputationalGraphNodeFrontend):
     def default_implementation(cls):
         return Implementation.PYTHON
 
+    def __new__(cls, velocity, vorticity,
+                 dt, flowrate, start_coord, variables,
+                 custom_filter=None,
+                 implementation=None, **kwds):
+        return super(VorticityAbsorption, cls).__new__(cls,
+            velocity=velocity,
+            vorticity=vorticity,
+            dt=dt,
+            flowrate=flowrate,
+            start_coord=start_coord,
+            variables=variables,
+            custom_filter=custom_filter,
+            implementation=implementation,
+            **kwds)
+
     def __init__(self, velocity, vorticity,
                  dt, flowrate, start_coord, variables,
                  custom_filter=None,
diff --git a/hysop/operators.py b/hysop/operators.py
index 526f0b2a2610dd1a9000e736406168a2d321877b..44b7939f7db7065a8aa42a94d88cb6b5a0a0b20e 100644
--- a/hysop/operators.py
+++ b/hysop/operators.py
@@ -35,14 +35,14 @@ from hysop.operator.custom import CustomOperator
 from hysop.operator.convergence import Convergence
 from hysop.operator.spatial_filtering import SpatialFilter
 
-from hysop.operator.derivative import SpaceDerivative,                  \
-    SpectralSpaceDerivative,          \
-    FiniteDifferencesSpaceDerivative, \
-    MultiSpaceDerivatives
-
-from hysop.operator.min_max import MinMaxFieldStatistics,                       \
-    MinMaxFiniteDifferencesDerivativeStatistics, \
-    MinMaxSpectralDerivativeStatistics
+from hysop.operator.derivative import (SpaceDerivative,
+    SpectralSpaceDerivative,
+    FiniteDifferencesSpaceDerivative,
+    MultiSpaceDerivatives)
+
+from hysop.operator.min_max import (MinMaxFieldStatistics,
+    MinMaxFiniteDifferencesDerivativeStatistics,
+    MinMaxSpectralDerivativeStatistics)
 
 from hysop.operator.gradient import Gradient, MinMaxGradientStatistics
 from hysop.operator.curl import Curl, SpectralCurl
diff --git a/hysop/parameters/buffer_parameter.py b/hysop/parameters/buffer_parameter.py
index 0a100f5b82e45af543546cdf66adc8a81113e1fd..637dba25b048691e3dba4c6cab34b20583d0887a 100644
--- a/hysop/parameters/buffer_parameter.py
+++ b/hysop/parameters/buffer_parameter.py
@@ -1,6 +1,6 @@
-
+import numpy as np
 import sympy as sm
-from hysop.deps import np
+
 from hysop.constants import HYSOP_REAL
 from hysop.tools.types import check_instance
 from hysop.parameters.tensor_parameter import TensorParameter, Parameter
@@ -11,17 +11,17 @@ class BufferParameter(Parameter):
     A parameter is a value that may change as simulation advances.
     """
 
-    def __new__(cls, shape=None, dtype=HYSOP_REAL, initial_value=None, symbol=None, 
+    def __new__(cls, shape=None, dtype=HYSOP_REAL, initial_value=None, symbol=None,
                         **kwds):
         parameter_types = (np.ndarray,)
-        initial_value = TensorParameter._compute_initial_value(shape=shape, 
+        initial_value = TensorParameter._compute_initial_value(shape=shape,
                 dtype=dtype, initial_value=initial_value)
-        obj = super(BufferParameter,cls).__new__(cls, 
+        obj = super(BufferParameter,cls).__new__(cls,
                 parameter_types=parameter_types, initial_value=initial_value, **kwds)
         obj._symbol = None
         obj._update_symbol(symbol)
         return obj
-    
+
     def reallocate_buffer(self, shape, dtype, initial_value=None, symbol=None):
         self._value  = TensorParameter._compute_initial_value(shape, dtype, initial_value)
         self._update_symbol(symbol)
@@ -33,11 +33,11 @@ class BufferParameter(Parameter):
             symbol.bind_memory_selfect(memory_selfect=self._value, force=True)
             self._symbol = symbol
         elif (self._symbol is None):
-            self._symbol = HostSymbolicBuffer(memory_object=self._value, name=self.var_name, 
+            self._symbol = HostSymbolicBuffer(memory_object=self._value, name=self.var_name,
                     pretty_name=self.pretty_name, var_name=self.var_name)
         else:
             self._symbol.bind_memory_object(memory_object=self._value, force=True)
-    
+
     def long_description(self):
         ss = '''\
 BufferParameter[name={}]
@@ -47,7 +47,7 @@ BufferParameter[name={}]
            self.shape,
            self.dtype)
         return ss
-    
+
     def short_description(self):
         attrs=('name', 'shape', 'dtype')
         info = []
@@ -60,14 +60,14 @@ BufferParameter[name={}]
         ss = 'BufferParameter[{}]'
         ss = ss.format(attrs)
         return ss
-    
+
     def _get_value_impl(self):
         """Return a read-only reference on the underlying data buffer."""
         assert (self._value is not None)
         view = self._value.view()
         view.flags.writeable = False
         return view.view()
-    
+
     def _set_value_impl(self, value):
         """Given value will be copied into internal buffer."""
         assert (self._value is not None)
@@ -81,11 +81,11 @@ BufferParameter[name={}]
             msg=msg.format(self.dtype, value.dtype)
             raise ValueError(msg)
         self._value[...] = value
-    
+
     def iterviews(self):
         """Iterate over all parameters views to yield scalarparameters."""
         yield (None,self)
-    
+
     def _get_shape(self):
         """Get parameter shape."""
         return self._value.shape if (self._value is not None) else None
diff --git a/hysop/parameters/default_parameters.py b/hysop/parameters/default_parameters.py
index 2e920f0c510d197b576aa835ff761e5a42253c89..d1ffec6ff1eb81da5ef11402c76205c050e29de8 100644
--- a/hysop/parameters/default_parameters.py
+++ b/hysop/parameters/default_parameters.py
@@ -26,7 +26,7 @@ def EnstrophyParameter(name=None, pretty_name=None, **kwds):
 
 def KineticEnergyParameter(name=None, pretty_name=None, **kwds):
     name        = first_not_None(name, 'kinetic_energy')
-    pretty_name = first_not_None(pretty_name, u'E\u2096')
+    pretty_name = first_not_None(pretty_name, 'Eâ‚™')
     return ScalarParameter(name=name, pretty_name=pretty_name, **kwds)
 
 def VolumicIntegrationParameter(name=None, pretty_name=None, field=None, dtype=None, **kwds):
@@ -36,7 +36,7 @@ def VolumicIntegrationParameter(name=None, pretty_name=None, field=None, dtype=N
         dtype       = first_not_None(dtype, field.dtype)
     dtype = first_not_None(dtype, HYSOP_REAL)
     name += '_v'
-    pretty_name += u'\u1d65'.encode('utf-8')
+    pretty_name += 'áµ¥'
     if (field.nb_components>1):
         return TensorParameter(name=name, pretty_name=pretty_name,
                                shape=(field.nb_components,),
diff --git a/hysop/parameters/parameter.py b/hysop/parameters/parameter.py
index f09da60d8fd6dba4678cd57cf60cee96ed3925b0..499beb96fcebe21685a68032e64da6397a3252cd 100644
--- a/hysop/parameters/parameter.py
+++ b/hysop/parameters/parameter.py
@@ -10,12 +10,11 @@ from hysop.tools.handle import TaggedObject
 from hysop.tools.variable import Variable, VariableTag
 
 
-class Parameter(TaggedObject, VariableTag):
+class Parameter(TaggedObject, VariableTag, metaclass=ABCMeta):
     """
     A parameter is a value of a given type that may change value as simulation advances.
     Parameters are only available on the host backend.
     """
-    __metaclass__ = ABCMeta
 
     def __new__(cls, name, parameter_types,
                 initial_value=None, allow_None=False,
@@ -62,7 +61,7 @@ class Parameter(TaggedObject, VariableTag):
             Return allowed parameter types for this parameter.
         """
         check_instance(name, str)
-        check_instance(pretty_name, (str, unicode), allow_none=True)
+        check_instance(pretty_name, str, allow_none=True)
         check_instance(var_name, str, allow_none=True)
         check_instance(allow_None, bool)
         check_instance(const, bool)
@@ -87,8 +86,6 @@ class Parameter(TaggedObject, VariableTag):
                                             tag_prefix='p', variable_kind=Variable.PARAMETER, **kwds)
 
         pretty_name = first_not_None(pretty_name, name)
-        if isinstance(pretty_name, unicode):
-            pretty_name = pretty_name.encode('utf-8')
 
         var_name = first_not_None(var_name, name)
 
@@ -105,6 +102,13 @@ class Parameter(TaggedObject, VariableTag):
 
         return obj
 
+    def __init__(self, name, parameter_types,
+                initial_value=None, allow_None=False,
+                quiet=False, const=False,
+                pretty_name=None, var_name=None,
+                is_view=False, **kwds):
+        super(Parameter, self).__init__(tag_prefix='p', variable_kind=Variable.PARAMETER, **kwds)
+
     def __eq__(self, other):
         return (self is other)
 
diff --git a/hysop/parameters/scalar_parameter.py b/hysop/parameters/scalar_parameter.py
index 98b3b62afb4286ee6bdc9d80d0aa8db61599e023..176c27ba94ebcdcc54bfdbc6cab11bb4fc13c35d 100644
--- a/hysop/parameters/scalar_parameter.py
+++ b/hysop/parameters/scalar_parameter.py
@@ -1,13 +1,13 @@
+import numpy as np
 
-from hysop.deps import np
 from hysop.constants import HYSOP_REAL
 from hysop.tools.types import check_instance
 from hysop.parameters.tensor_parameter import TensorParameter
 
 class ScalarParameter(TensorParameter):
     """
-    A scalar parameter is TensorParameter with its shape 
-    set to (1,). A parameter is a value that may change 
+    A scalar parameter is TensorParameter with its shape
+    set to (1,). A parameter is a value that may change
     as simulation advances.
     """
 
@@ -15,7 +15,10 @@ class ScalarParameter(TensorParameter):
         assert 'shape' not in kwds, 'Cannot specify shape for a scalar parameter.'
         obj = super(ScalarParameter,cls).__new__(cls, name, shape=(1,), **kwds)
         return obj
-    
+
+    def __init__(self, name, **kwds):
+        super(ScalarParameter, self).__init__(name, shape=(1,), **kwds)
+
     def iterviews(self):
         """Iterate over all parameters views to yield scalarparameters."""
         yield (None,self)
@@ -37,16 +40,13 @@ class ScalarParameter(TensorParameter):
     def __int__(self):
         """Return value as an int."""
         return int(self.value)
-    def __long__(self):
-        """Return value as a long."""
-        return long(self.value)
     def __float__(self):
         """Return value as a float."""
         return float(self.value)
     def __complex__(self):
         """Return value as a complex."""
         return complex(self.value)
-    
+
     def long_description(self):
         ss = '''\
 ScalarParameter[name={}]
@@ -60,7 +60,7 @@ ScalarParameter[name={}]
            self.min_value, self.max_value,
            self.ignore_nans, self.value)
         return ss
-    
+
     def short_description(self):
         attrs=('name', 'dtype', 'value')
         info = []
diff --git a/hysop/parameters/tensor_parameter.py b/hysop/parameters/tensor_parameter.py
index 9ac2c0767244c69d2368b85dad6b9f8c9c1e7451..89fb6664bfc81a7f000086c9f2101fd6601f5025 100644
--- a/hysop/parameters/tensor_parameter.py
+++ b/hysop/parameters/tensor_parameter.py
@@ -1,5 +1,6 @@
+import numpy as np
+import sympy as sm
 
-from hysop.deps import np, sm
 from hysop.constants import HYSOP_REAL
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.numerics import is_signed, is_unsigned, is_fp, is_complex
@@ -12,8 +13,8 @@ class TensorParameter(Parameter):
     that may change value as simulation advances.
     """
 
-    def __new__(cls, name, shape, dtype=HYSOP_REAL, 
-            pretty_name=None, initial_value=None, 
+    def __new__(cls, name, shape, dtype=HYSOP_REAL,
+            pretty_name=None, initial_value=None,
             min_value=None, max_value=None, ignore_nans=False, **kwds):
         """
         Create or get an existing TensorParameter with a specific name
@@ -25,7 +26,7 @@ class TensorParameter(Parameter):
             A name for the parameter that uniquely identifies it.
         pretty_name: string
             A pretty name for the parameter.
-        shape: array like of ints 
+        shape: array like of ints
             Shape of this TensorParameter.
         dtype: type convertible to np.dtype, optional
             Underlying dtype of this TensorParameter.
@@ -41,10 +42,10 @@ class TensorParameter(Parameter):
             Set this to True to allow NaN values.
         kwds: dict
             Base class arguments.
-            
+
         Attributes
         ----------
-        shape: array like of ints 
+        shape: array like of ints
             Shape of this TensorParameter.
         dtype: type convertible to np.dtype
             Underlying dtype of this TensorParameter.
@@ -55,13 +56,13 @@ class TensorParameter(Parameter):
         ignore_nans: bool
             True if this TensorParameter can have NaN values.
         """
-        
+
         if ('allow_None' in kwds) and (kwds['allow_None'] is True):
             msg='A TensorParameter cannot be allowed to be set to None.'
             raise ValueError(msg)
 
         check_instance(name, (str, SymbolicBase))
-        
+
         if isinstance(name, SymbolicBase):
             symbol      = name
             name        = symbol._name
@@ -70,25 +71,25 @@ class TensorParameter(Parameter):
             symbol = None
             pretty_name = first_not_None(pretty_name, name)
         check_instance(name, str)
-        check_instance(pretty_name, (str, unicode))
-        check_instance(shape, (list,tuple), values=(int,long,np.integer), allow_none=True)
+        check_instance(pretty_name, str)
+        check_instance(shape, (list,tuple), values=(int,np.integer), allow_none=True)
         check_instance(ignore_nans, bool)
         assert (min_value is None) or (max_value is None) or (min_value <= max_value)
 
         parameter_types = (np.ndarray,)
         if is_signed(dtype):
-            parameter_types += ( np.int8, np.int16, np.int32, np.int64, int, long )
+            parameter_types += ( np.int8, np.int16, np.int32, np.int64, int)
         elif is_unsigned(dtype):
             parameter_types += ( np.uint8, np.uint16, np.uint32, np.uint64 )
         elif is_fp(dtype):
             parameter_types += ( np.float16, np.float32, np.float64, np.longdouble, float)
         elif is_complex(dtype):
             parameter_types += ( np.complex64, np.complex128, np.clongdouble, complex )
-        
+
         initial_value = cls._compute_initial_value(shape, dtype, initial_value,
                 min_value, max_value, ignore_nans)
 
-        obj = super(TensorParameter,cls).__new__(cls, 
+        obj = super(TensorParameter,cls).__new__(cls,
                 name=name, pretty_name=pretty_name,
                 parameter_types=parameter_types, allow_None=False,
                 initial_value=initial_value, **kwds)
@@ -96,7 +97,7 @@ class TensorParameter(Parameter):
         obj._min_value = min_value
         obj._max_value = max_value
         obj._ignore_nans = ignore_nans
-        
+
         from hysop.symbolic.parameter import SymbolicTensorParameter, SymbolicScalarParameter
         if obj.__class__ is TensorParameter:
             if symbol:
@@ -112,7 +113,15 @@ class TensorParameter(Parameter):
                 obj._symbol = SymbolicScalarParameter(parameter=obj)
 
         return obj
-        
+
+    def __init__(self, name, shape, dtype=HYSOP_REAL,
+            pretty_name=None, initial_value=None,
+            min_value=None, max_value=None, ignore_nans=False, **kwds):
+        super(TensorParameter, self).__init__(
+                name=name, pretty_name=pretty_name,
+                parameter_types=None, allow_None=False,
+                initial_value=initial_value, **kwds)
+
     @classmethod
     def _compute_initial_value(cls, shape, dtype, initial_value,
             min_value=None, max_value=None, ignore_nans=None):
@@ -151,36 +160,36 @@ class TensorParameter(Parameter):
 
     def _update_symbol(self):
         raise NotImplementedError
-        
+
     def view(self, idx, name=None, pretty_name=None, **kwds):
         """Take a view on a scalar contained in the Parameter."""
         assert (self._value is not None)
         initial_value = self._value[tuple(slice(k,k+1) for k in idx)]
         _name        = self.name + '_' + '_'.join(str(i) for i in idx)
-        _pretty_name = self.pretty_name + subscripts(ids=idx, sep='').encode('utf-8')
+        _pretty_name = self.pretty_name + subscripts(ids=idx, sep='')
         name        = first_not_None(name, _name)
         pretty_name = first_not_None(pretty_name, _pretty_name)
         if initial_value.size == 1:
-            from scalar_parameter import ScalarParameter
+            from hysop.parameters.scalar_parameter import ScalarParameter
             return ScalarParameter(name=name, pretty_name=pretty_name,
-                                   initial_value=initial_value.ravel(), dtype=self.dtype, 
-                                   min_value=self.min_value, max_value=self.max_value, 
-                                   ignore_nans=self.ignore_nans, 
-                                   const=self.const, quiet=self.quiet, 
+                                   initial_value=initial_value.ravel(), dtype=self.dtype,
+                                   min_value=self.min_value, max_value=self.max_value,
+                                   ignore_nans=self.ignore_nans,
+                                   const=self.const, quiet=self.quiet,
                                    is_view=True, **kwds)
         else:
             return TensorParameter(name=name, pretty_name=pretty_name,
                                    initial_value=initial_value, dtype=self.dtype, shape=initial_value.shape,
-                                   min_value=self.min_value, max_value=self.max_value, 
-                                   ignore_nans=self.ignore_nans, 
-                                   const=self.const, quiet=self.quiet, 
+                                   min_value=self.min_value, max_value=self.max_value,
+                                   ignore_nans=self.ignore_nans,
+                                   const=self.const, quiet=self.quiet,
                                    is_view=True, **kwds)
 
     def iterviews(self):
         """Iterate over all parameters views to yield scalarparameters."""
         for idx in np.ndindex(self.shape):
             yield (idx, self.view(idx))
-    
+
     @classmethod
     def __check_values(cls, a, min_value, max_value, ignore_nans, dtype):
         if np.isscalar(a):
@@ -202,7 +211,7 @@ class TensorParameter(Parameter):
                 assert np.all(np.max(a) <= max_value), 'max value constraint failed.'
 
     def check_values(self, a):
-        return self.__check_values(a, 
+        return self.__check_values(a,
                    dtype=self.dtype,
                    min_value=self.min_value,
                    max_value=self.max_value,
@@ -247,7 +256,7 @@ class TensorParameter(Parameter):
         even for ScalarParameter parameters.
         """
         return self._value.copy()
-    
+
     def _set_value_impl(self, value):
         """Given value will be copied into internal buffer."""
         assert (self._value is not None)
@@ -295,7 +304,7 @@ TensorParameter[name={}, pname={}]
            self.min_value, self.max_value,
            self.ignore_nans, self.value)
         return ss
-    
+
     def short_description(self):
         attrs=('name', 'pretty_name', 'shape', 'dtype', 'min_value', 'max_value')
         info = []
diff --git a/hysop/problem.py b/hysop/problem.py
index 6e16c55258b27f5b2c48ad63e95364b1a92235fc..b91699877bce7cd2211f8650f22e01d8e51f2292 100644
--- a/hysop/problem.py
+++ b/hysop/problem.py
@@ -1,4 +1,3 @@
-from __future__ import absolute_import
 import sys
 import datetime
 
@@ -202,7 +201,7 @@ class Problem(ComputationalGraph):
         msg += '\n  for {} iterations ({}s per iteration) '
         msg = msg.format(datetime.timedelta(seconds=round(avg_time)),
                          avg_time, max(simu.current_iteration+1, 1),
-                         avg_time/max(simu.current_iteration+1, 1))
+                         avg_time / max(simu.current_iteration+1, 1))
         vprint_banner(msg, spacing=True, at_border=2)
 
         simu.finalize()
diff --git a/hysop/simulation.py b/hysop/simulation.py
index 9c5eb491ba01b088768e8cfad5e11401d86f1bf9..6738824bc721e8d006c1deb3fc637c35486feb85 100644
--- a/hysop/simulation.py
+++ b/hysop/simulation.py
@@ -9,7 +9,7 @@ Usage
     # Initialize the simulation
     s = Simulation(start=0.2, end=1., time_step=0.1)
     # do some initialisation stuff with operators,
-    # print initial state ...
+    # print(initial state ...)
 
     # time loop
     s.initialize() # --> ready to start
@@ -28,11 +28,12 @@ Usage
     s.finalize()
     io.apply(s)
 """
+import sys, os
 import numpy as np
 from abc import ABCMeta, abstractmethod
+
 from hysop import dprint, vprint
 from hysop.constants import HYSOP_REAL
-from hysop.deps import sys, os
 from hysop.parameters.scalar_parameter import ScalarParameter
 from hysop.tools.types import first_not_None, to_set, check_instance
 from hysop.tools.numpywrappers import npw
@@ -410,7 +411,7 @@ class Simulation(object):
         """
         msg = "== Iteration : {0:3d}, from t = {1:6.8} to t = {2:6.8f} =="
         if verbose:
-            print msg.format(self.current_iteration, self.t(), self.time)
+            print(msg.format(self.current_iteration, self.t(), self.time))
         else:
             vprint(msg.format(self.current_iteration, self.t(), self.time))
 
@@ -471,16 +472,3 @@ class Simulation(object):
         s += str(self.current_iteration) + ', max number of iterations : '
         s += str(self.max_iter)
         return s
-
-    def should_dump(self, frequency, with_last=False):
-        import warnings
-        from hysop.tools.warning import HysopDeprecationWarning
-        msg = 'This method will be deprecated soon. Please use io_params.should_dump(simulation, with_last) instead.'
-        warnings.warn(msg, HysopDeprecationWarning)
-
-        dump = (frequency >= 0) and (with_last and self._next_is_last)
-        if (frequency >= 0):
-            dump |= self.is_time_of_interest
-        if (frequency > 0):
-            dump |= ((self.current_iteration % frequency) == 0)
-        return dump
diff --git a/hysop/symbolic/__init__.py b/hysop/symbolic/__init__.py
index 3351927873e2c60548aa0b11b77895683b828825..bdf15020c14ff94f0f4494b23d92dbf4c791eae4 100644
--- a/hysop/symbolic/__init__.py
+++ b/hysop/symbolic/__init__.py
@@ -19,40 +19,40 @@ dtime_symbol = TimeSymbol('dt')
 
 space_symbols = tuple(SpaceSymbol(
     name='x{}'.format(i),
-    pretty_name=xsymbol+subscript(i), 
+    pretty_name=xsymbol+subscript(i),
     var_name='x{}'.format(i),
     latex_name='x_{{{}}}'.format(i))
-        for i in xrange(16))
+        for i in range(16))
 """Dummy symbols representing space."""
 
 freq_symbols = tuple(SpaceSymbol(
     name='nu{}'.format(i),
-    pretty_name=freq_symbol+subscript(i), 
+    pretty_name=freq_symbol+subscript(i),
     var_name='nu{}'.format(i),
     latex_name='{{\nu}}_{{{}}}'.format(i))
-        for i in xrange(16))
+        for i in range(16))
 """Dummy symbols representing wave numbers."""
 
 dspace_symbols = tuple(SpaceSymbol(
     name='dx_{}'.format(i),
-    pretty_name=u'd'+xsymbol+subscript(i), 
+    pretty_name='d'+xsymbol+subscript(i),
     var_name='dx_{}'.format(i),
     latex_name='dx_{{{}}}'.format(i))
-        for i in xrange(16))
+        for i in range(16))
 """Dummy symbols representing space infinitesimals."""
 
 local_indices_symbols = tuple(SpaceSymbol(
     name = 'i{}'.format(i),
-    pretty_name=u'i'+subscript(i), 
+    pretty_name='i'+subscript(i),
     var_name='i{}'.format(i),
     latex_name='i_{{{}}}'.format(i))
-        for i in xrange(16))
+        for i in range(16))
 """Dummy symbols local array indices."""
 
 global_indices_symbols = tuple(SpaceSymbol(
     name = 'I{}'.format(i),
-    pretty_name=u'I'+subscript(i), 
+    pretty_name='I'+subscript(i),
     var_name='I{}'.format(i),
     latex_name='I_{{{}}}'.format(i))
-        for i in xrange(16))
+        for i in range(16))
 """Dummy symbols global array indices."""
diff --git a/hysop/symbolic/array.py b/hysop/symbolic/array.py
index a1ae050f02ef99e0adb722e30bd6a632b834bbf4..c480787afdec346fa0d13ac335b3b11646cce333 100644
--- a/hysop/symbolic/array.py
+++ b/hysop/symbolic/array.py
@@ -27,14 +27,14 @@ class SymbolicMemoryObject(DummySymbolicScalar):
             msg='{}::{} memory object has not been bound yet.'
             msg=msg.format(self.__class__.__name__, self.name)
             raise RuntimeError(msg)
-    
+
     @property
     def memory_object(self):
         if (self._memory_object is None):
             msg='Symbolic memory_object {} has not been setup yet.'.format(self.name)
             raise RuntimeError(msg)
         return self._memory_object
-    
+
     @abstractmethod
     def bind_memory_object(self, memory_object, force=False):
         if (not force) and (self._memory_object is not None):
@@ -76,7 +76,7 @@ class SymbolicMemoryObject(DummySymbolicScalar):
         return '{}[dim={}, shape=[], strides={}, dtype={}]'.format(
                 self.__class__.__name__,
                 self.dim, self.shape, self.strides, self.dtype)
-    
+
     def __eq__(self, other):
         return id(self) == id(other)
     def __hash__(self):
@@ -100,14 +100,14 @@ class IndexedBuffer(sm.Indexed):
     def index(self):
         assert len(self.args)==2
         return self.args[1]
-    
+
     @property
     def ctype(self):
         return self.indexed_object.ctype
 
 class SymbolicArray(SymbolicMemoryObject):
     """
-    An array is considered to be indexed by local indices in 
+    An array is considered to be indexed by local indices in
     autogenerated vectorized code (in symbolic code generation
     framework).
     """
@@ -125,7 +125,7 @@ class SymbolicArray(SymbolicMemoryObject):
                     name=name, **kwds)
         msg='Dimension could not be deduced from memory_object, '
         msg+='please specify a dimension for symbolic array {}.'
-        msg=msg.format(name.encode('utf-8'))
+        msg=msg.format(name)
         if (memory_object is None):
             if (dim is None):
                 raise RuntimeError(msg)
@@ -142,7 +142,7 @@ class SymbolicArray(SymbolicMemoryObject):
     def __getitem__(self, key):
         msg='Symbolic array {} cannot be indexed.'.format(self.name)
         raise RuntimeError(msg)
-    
+
     def new_requirements(self):
         return self.ArrayRequirements(self)
 
@@ -168,17 +168,17 @@ class SymbolicArray(SymbolicMemoryObject):
 class SymbolicBuffer(SymbolicMemoryObject):
     """
     A buffer will not be indexed by local indices by default.
-    The user has to index a SymbolicBuffer so that it can be 
+    The user has to index a SymbolicBuffer so that it can be
     used for code generation.
     """
     def __getitem__(self, key):
         assert isinstance(key, (int, npw.integer, sm.Expr))
         return IndexedBuffer(self, key)
-    
+
     @property
     def buffer(self):
         return self._memory_object
-    
+
     def to_backend(self, backend):
         if (backend is Backend.HOST):
             self.__class__ = HostSymbolicBuffer
@@ -197,7 +197,7 @@ class SymbolicNdBuffer(SymbolicBuffer):
                     name=name, **kwds)
         msg='Dimension could not be deduced from memory_object, '
         msg+='please specify a dimension for symbolic array {}.'
-        msg=msg.format(name.encode('utf-8'))
+        msg=msg.format(name)
         if (memory_object is None):
             if (dim is None):
                 raise RuntimeError(msg)
@@ -209,16 +209,16 @@ class SymbolicNdBuffer(SymbolicBuffer):
             raise RuntimeError(msg)
         check_instance(dim, int)
         obj._dim = dim
-        obj._symbolic_strides = tuple(SymbolicConstant(name='s{}'.format(i), 
-            pretty_name='s'+subscript(i), 
-            dtype=npw.int32) for i in xrange(dim))
-        obj._symbolic_ghosts  = tuple(SymbolicConstant(name='g{}'.format(i), 
-            pretty_name='g'+subscript(i), 
-            dtype=npw.int32) for i in xrange(dim))
+        obj._symbolic_strides = tuple(SymbolicConstant(name='s{}'.format(i),
+            pretty_name='s'+subscript(i),
+            dtype=npw.int32) for i in range(dim))
+        obj._symbolic_ghosts  = tuple(SymbolicConstant(name='g{}'.format(i),
+            pretty_name='g'+subscript(i),
+            dtype=npw.int32) for i in range(dim))
         obj._allow_update_symbolic_constants = True
         obj.update_symbolic_constants(memory_object=memory_object, strides=strides, dtype=dtype, ghosts=ghosts, force=False)
         return obj
-    
+
     def bind_memory_object(self, memory_object, strides=None, dtype=None, ghosts=None, force=False, **kwds):
         super(SymbolicNdBuffer, self).bind_memory_object(memory_object=memory_object, force=force, **kwds)
         self.update_symbolic_constants(memory_object=memory_object, strides=strides, dtype=dtype, ghosts=ghosts, force=force)
@@ -231,7 +231,7 @@ class SymbolicNdBuffer(SymbolicBuffer):
             assert (dtype   is not None), 'Could not determine dtype from memory_object.'
             itemsize = dtype.itemsize
             strides = to_tuple(strides)
-            check_instance(strides, tuple, values=(int,long), size=self._dim)
+            check_instance(strides, tuple, values=int, size=self._dim)
             for ss,si in zip(self._symbolic_strides, strides):
                 assert si%itemsize == 0
                 ss.bind_value(si//itemsize, force=force)
@@ -239,18 +239,18 @@ class SymbolicNdBuffer(SymbolicBuffer):
             ghosts  = first_not_None(ghosts, getattr(memory_object, 'ghosts', None))
             assert (ghosts is not None), 'Could not determine ghosts from memory_object.'
             ghosts = to_tuple(ghosts)
-            check_instance(ghosts, tuple, values=(int,long), size=self._dim)
+            check_instance(ghosts, tuple, values=int, size=self._dim)
             for sg,gi in zip(self._symbolic_ghosts, ghosts):
                 sg.bind_value(gi, force=force)
-    
+
     @property
     def dim(self):
         return self._dim
-    
+
     @property
     def symbolic_strides(self):
         return self._symbolic_strides
-    
+
     @property
     def symbolic_ghosts(self):
         return self._symbolic_ghosts
@@ -262,8 +262,8 @@ class SymbolicNdBuffer(SymbolicBuffer):
         assert len(idx) == self._dim, idx
         offset = npw.dot(self._symbolic_strides, npw.add(idx, self._symbolic_ghosts))
         return self.__getitem__(key=offset)
-    
-    
+
+
 
 class SymbolicHostMemoryObject(object):
     def bind_memory_object(self, memory_object, **kwds):
@@ -296,34 +296,32 @@ class HostSymbolicNdBuffer(SymbolicHostMemoryObject, SymbolicNdBuffer):
     pass
 class OpenClSymbolicNdBuffer(SymbolicDeviceMemoryObject, SymbolicNdBuffer):
     pass
-    
+
 
 if __name__ == '__main__':
-    from hysop.deps import sm
     from hysop.core.arrays.all import default_host_array_backend
     a = npw.ones(shape=(10,10), dtype=npw.int8)
     b = default_host_array_backend.zeros(shape=(10,), dtype=npw.uint16)
-    
+
     A = HostSymbolicArray(a, name='a')
     B = b.as_symbolic_array('b')
-    
+
     C = HostSymbolicBuffer(a, name='c')
     D = b.as_symbolic_buffer('d')
 
-    print 7*A + 9*B + 10*C - 4*D
+    print(7*A + 9*B + 10*C - 4*D)
 
     assert A.array  is a
     assert B.array  is b.handle
     assert C.buffer is a
     assert D.buffer is b.handle
-    
+
     try:
         A[5]
         assert False
     except RuntimeError as e:
-        pass 
-
-    print C[5]
-    print D[C]
+        pass
 
-    print 
+    print(C[5])
+    print(D[C])
+    print()
diff --git a/hysop/symbolic/base.py b/hysop/symbolic/base.py
index 2e8f9a30c3c669079d7a284ec254d2060b2c0e15..16c92b17362757a418505fba565a46a98489e375 100644
--- a/hysop/symbolic/base.py
+++ b/hysop/symbolic/base.py
@@ -1,5 +1,5 @@
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types import first_not_None
 from hysop.symbolic import Symbol, Dummy, subscript
@@ -15,11 +15,14 @@ class ValueHolderI(object):
 
     def __new__(cls, *args, **kwds):
         return super(ValueHolderI, cls).__new__(cls, *args, **kwds)
-    
+
+    def __init__(self, *args, **kwds):
+        super(ValueHolderI, self).__init__(*args, **kwds)
+
     def get_holded_value(self):
         """Get holded value, defaults to None."""
         return None
-    
+
     @classmethod
     def get_holded_values(cls, expr):
         replace = {}
@@ -33,7 +36,7 @@ class ValueHolderI(object):
                     collect(e)
         collect(expr)
         return replace
-    
+
     @classmethod
     def replace_holded_values(cls, expr):
         replace = cls.get_holded_values(expr)
@@ -42,7 +45,9 @@ class ValueHolderI(object):
         except AttributeError:
             return expr
 
+
 class ScalarDataViewHolder(ValueHolderI):
+
     def __new__(cls, holded_data_ref=None, holded_data_access=None, **kwds):
         if isinstance(holded_data_ref, npw.ndarray) and \
                 (holded_data_access is None) and (holded_data_ref.size == 1):
@@ -52,6 +57,9 @@ class ScalarDataViewHolder(ValueHolderI):
         obj._holded_data_access = holded_data_access
         return obj
 
+    def __init__(self, holded_data_ref=None, holded_data_access=None, **kwds):
+        super(ScalarDataViewHolder, self).__init__(**kwds)
+
     def get_holded_value(self):
         if (self._holded_value_ref is None):
             return None
@@ -61,20 +69,25 @@ class ScalarDataViewHolder(ValueHolderI):
             return self._holded_data_access(self._holded_value_ref)
         else:
             return self._holded_value_ref[self._holded_data_access]
-    
+
     def _hashable_content(self):
         """See sympy.core.basic.Basic._hashable_content()"""
         hc = super(ScalarDataViewHolder, self)._hashable_content()
         hc += (id(self._holded_value_ref), self._holded_data_access,)
         return hc
 
+
 class ScalarBaseTag(object):
     """Tag for object that can be inserted as element of tensors."""
+
     def __new__(cls, idx=None, **kwds):
         obj = super(ScalarBaseTag, cls).__new__(cls, **kwds)
         obj._idx = idx
         return obj
-    
+
+    def __init__(self, idx=None, **kwds):
+        super(ScalarBaseTag, self).__init__(**kwds)
+
     @property
     def idx(self):
         return self._idx
@@ -85,6 +98,7 @@ class ScalarBaseTag(object):
         hc += (self._idx,)
         return hc
 
+
 class ScalarBase(ScalarDataViewHolder, ScalarBaseTag):
     """Base for symbolic scalars."""
     def __new__(cls, name, value=None, view=None, **kwds):
@@ -94,18 +108,22 @@ class ScalarBase(ScalarDataViewHolder, ScalarBaseTag):
         if (view is not None):
             assert (kwds.get('holded_data_access', None) is None)
             kwds['holded_data_access'] = view
-        obj = super(ScalarBase, cls).__new__(cls, name=name, 
-                **kwds)
-        obj._iterable = False 
+        obj = super(ScalarBase, cls).__new__(cls, name=name, **kwds)
+        obj._iterable = False
         return obj
+
+    def __init__(self, name, value=None, view=None, **kwds):
+        super(ScalarBase, self).__init__(name=name, **kwds)
+
     def vreplace(self):
         """Call ValueHolderI.replace_holded_values on self."""
         return self.replace_holded_values(self)
-    
+
     def __getitem__(self, key):
         assert key == 0
         return self
 
+
 class TensorBase(npw.ndarray):
     """
     Base for symbolic tensors.
@@ -114,17 +132,17 @@ class TensorBase(npw.ndarray):
     """
 
     __array_priority__ = 1.0
-    
+
     def __new__(cls, shape, init=None,
             name=None, pretty_name=None,
             scalar_cls=None, scalar_kwds=None, make_scalar_kwds=None,
-            value=None, set_read_only=True, 
-            dtype=object, **kwds):  
+            value=None, set_read_only=True,
+            dtype=object, **kwds):
         """Create a new TensorBase."""
 
         set_read_only = first_not_None(set_read_only, True)
         obj = super(TensorBase, cls).__new__(cls, shape=shape, dtype=dtype, **kwds)
-        
+
         if (init is None):
             assert (name is not None)
             pretty_name = first_not_None(pretty_name, name)
@@ -135,13 +153,13 @@ class TensorBase(npw.ndarray):
             vsep = '_'
             with obj.write_context():
                 for idx in npw.ndindex(*shape):
-                    name = '{}_{}'.format(name, vsep.join(str(i) 
+                    name = '{}_{}'.format(name, vsep.join(str(i)
                         for i in idx))
-                    pname = u'{}{}'.format(pretty_name.decode('utf-8'), u''.join(subscript(i) 
+                    pname = '{}{}'.format(pretty_name, ''.join(subscript(i)
                         for i in idx))
-                    vname = '{}_{}'.format(name, vsep.join(str(i) 
+                    vname = '{}_{}'.format(name, vsep.join(str(i)
                         for i in idx))
-                    lname = '{}_{{{}}}'.format(name, lsep.join(str(i) 
+                    lname = '{}_{{{}}}'.format(name, lsep.join(str(i)
                         for i in idx))
                     if (make_scalar_kwds is None):
                         skwds = scalar_kwds
@@ -153,16 +171,23 @@ class TensorBase(npw.ndarray):
                             assert k not in scalar_kwds, msg
                         idx_kwds.update(scalar_kwds)
                         skwds = idx_kwds
-                    obj[idx] = scalar_cls(name=name, 
-                                          pretty_name=pname, 
-                                          var_name=vname, 
+                    obj[idx] = scalar_cls(name=name,
+                                          pretty_name=pname,
+                                          var_name=vname,
                                           latex_name=lname,
-                                          value=value, idx=idx, 
+                                          value=value, idx=idx,
                                           **skwds)
         else:
             obj[...] = init
         return obj
 
+    def __init__(self, shape, init=None,
+            name=None, pretty_name=None,
+            scalar_cls=None, scalar_kwds=None, make_scalar_kwds=None,
+            value=None, set_read_only=True,
+            dtype=object, **kwds):
+        super(TensorBase, self).__init__(**kwds)
+
     def latex(self, matrix='b', with_packages=False):
         """
         Return a latex representation of this tensor.
@@ -170,21 +195,21 @@ class TensorBase(npw.ndarray):
         assert self.ndim <= 2
         ss = ''
         if with_packages:
-            ss +=  '\usepackage{amsmath}'
+            ss +=  r'\usepackage{amsmath}'
         ss += '\n$$'
-        ss += '\n\\begin{{{}matrix}}'.format(matrix)
-        for i in xrange(self.shape[0]):
+        ss += '\n'+r'\begin{{{}matrix}}'.format(matrix)
+        for i in range(self.shape[0]):
             if self.ndim==1:
                 ss += '\n    ' + _latex(self[i]) + ' \\\\'
             else:
                 ss += '\n    ' + ' & '.join(_latex(val) for val in self[i]) + ' \\\\'
-        ss += '\n\\end{{{}matrix}}'.format(matrix)
+        ss += '\n'+r'\end{{{}matrix}}'.format(matrix)
         ss += '\n$$'
         return ss
 
     def sstr(self):
         return self.elementwise_fn(sstr)
-    
+
     def strrepr(self):
         return self.elementwise_fn(sstrrepr)
 
@@ -197,7 +222,7 @@ class TensorBase(npw.ndarray):
         else:
             a = self
         return npw.array2string(a, formatter={'all': lambda x: str(x)}, separator='  ')
-    
+
     def __repr__(self):
         return npw.array2string(self, formatter={'all': lambda x: sstrrepr(x)})
 
@@ -218,7 +243,7 @@ class TensorBase(npw.ndarray):
 
     def elementwise_fn(self, fn):
         """
-        Apply function fn on each element of the tensor and 
+        Apply function fn on each element of the tensor and
         return the result as a Tensor.
         """
         if self.ndim:
@@ -228,7 +253,7 @@ class TensorBase(npw.ndarray):
         else:
             data = fn(self.tolist())
         return data
-    
+
     def __hash__(self):
         """Hash this object by its id."""
         return id(self)
@@ -236,15 +261,15 @@ class TensorBase(npw.ndarray):
     def diff(self, *symbols, **assumptions):
         """Elementwise sympy.diff()."""
         return self.elementwise_fn(lambda x: sm.diff(x, *symbols, **assumptions))
-    
+
     def freeze(self):
         """Apply elementwise UnevaluatedExpr on each scalar expressions."""
         return self.elementwise_fn(lambda x: UnevaluatedExpr(x))
-    
+
     def no_split(self):
         """Apply elementwise UnsplittedExpr on each scalar expressions."""
         return self.elementwise_fn(lambda x: UnsplittedExpr(x))
-    
+
     def simplify(self):
         """Elementwise sympy.simplify()."""
         return self.elementwise_fn(lambda x: sm.simplify(x))
@@ -253,7 +278,7 @@ class TensorBase(npw.ndarray):
     def xreplace(self, replacements):
         """Elementwise sympy.xreplace()."""
         replace = {}
-        for (k,v) in replacements.iteritems():
+        for (k,v) in replacements.items():
             if isinstance(k, npw.ndarray):
                 for idx in npw.ndindex(*k.shape):
                     kk = k[idx]
@@ -266,7 +291,7 @@ class TensorBase(npw.ndarray):
                         replace[kk] = vv
             elif (k is not None) and (v is not None):
                 replace[k] = v
-        
+
         data = npw.empty_like(self)
         for idx in npw.ndindex(*self.shape):
             data[idx] = self[idx].xreplace(replace)
@@ -284,6 +309,7 @@ class SymbolicScalar(ScalarBase, Symbol):
     """Symbolic scalar symbol."""
     pass
 
+
 class DummySymbolicScalar(ScalarBase, Dummy):
     """Symbolic scalar dummy symbol."""
     pass
@@ -296,6 +322,11 @@ class SymbolicTensor(TensorBase):
         return super(SymbolicTensor, cls).__new__(cls, name=name, shape=shape, init=init,
             scalar_cls=scalar_cls, **kwds)
 
+    def __init__(self, name, shape, init=None, scalar_cls=None, **kwds):
+        super(SymbolicTensor, self).__init__(name=name, shape=shape, init=init,
+            scalar_cls=scalar_cls, **kwds)
+
+
 class DummySymbolicTensor(TensorBase):
     """Dummy symbolic tensor symbol."""
     def __new__(cls, name, shape, init=None, scalar_cls=None, **kwds):
@@ -303,6 +334,11 @@ class DummySymbolicTensor(TensorBase):
         return super(DummySymbolicTensor, cls).__new__(cls, name=name, shape=shape, init=init,
                 scalar_cls=scalar_cls, **kwds)
 
+    def __init__(self, name, shape, init=None, scalar_cls=None, **kwds):
+        super(DummySymbolicTensor, self).__init__(name=name, shape=shape, init=init,
+                scalar_cls=scalar_cls, **kwds)
+
+
 def vreplace(expr):
     ValueHolderI.replace_holded_values(expr)
 
@@ -310,30 +346,30 @@ def vreplace(expr):
 if __name__ == '__main__':
     a = SymbolicScalar('a', value=sm.Symbol('A'))
     b = DummySymbolicScalar('a', value=sm.Symbol('B')) # different symbol with the same name
-    c = DummySymbolicScalar('a', value=[sm.Symbol('C0'),sm.Symbol('C1')], view=1) 
+    c = DummySymbolicScalar('a', value=[sm.Symbol('C0'),sm.Symbol('C1')], view=1)
     d = SymbolicScalar('a', value=sm.Symbol('D')) # same symbol as a (hashed by name)
-    print a + b + c + d
-    print ValueHolderI.replace_holded_values(a + b + c + d)
-    print
+    print(a + b + c + d)
+    print(ValueHolderI.replace_holded_values(a + b + c + d))
+    print()
     A = SymbolicTensor('A', shape=(3,3), value=12)
     B = SymbolicTensor('B', shape=(3,3), set_read_only=False, value=npw.eye(3,3))
     C = DummySymbolicTensor('C', shape=(8,))
-    print A
-    print B
-    print C
+    print(A)
+    print(B)
+    print(C)
     B[0,0] = 0
     B[1,0] = -1
-    print
-    print A
-    print B
-    print
-    print A.vreplace()
-    print B.vreplace()
-    print
-    print A*B
-    print
-    print (A.dot(B)).elementwise_fn(sm.cos)
-    print
-    print (A.dot(B)).elementwise_fn(sm.cos).diff(B[1,1])
-    print
-    print A.latex()
+    print()
+    print(A)
+    print(B)
+    print()
+    print(A.vreplace())
+    print(B.vreplace())
+    print()
+    print(A*B)
+    print()
+    print((A.dot(B)).elementwise_fn(sm.cos))
+    print()
+    print((A.dot(B)).elementwise_fn(sm.cos).diff(B[1,1]))
+    print()
+    print(A.latex())
diff --git a/hysop/symbolic/constant.py b/hysop/symbolic/constant.py
index 3ff7c109dc9d22a9929bd75555bbd3c11dffd6d5..4e6e7a431aff04ac0dc3b65463416abadac76be9 100644
--- a/hysop/symbolic/constant.py
+++ b/hysop/symbolic/constant.py
@@ -7,7 +7,7 @@ class SymbolicConstant(DummySymbolicScalar):
     def __new__(cls, name, **kwds):
         if ('dtype' not in kwds):
             msg='dtype has not been specified for SymbolicConstant {}.'
-            msg=msg.format(name).encode('utf-8')
+            msg=msg.format(name)
             raise RuntimeError(msg)
         dtype = kwds.pop('dtype')
         value = kwds.pop('value', None)
@@ -27,7 +27,7 @@ class SymbolicConstant(DummySymbolicScalar):
             msg='{}::{} value has not been bound yet.'
             msg=msg.format(self.__class__.__name__, self.name)
             raise RuntimeError(msg)
-    
+
     def bind_value(self, value, force=False):
         if (not force) and (self._value is not None):
             msg='A value has already been bound to SymbolicConstant {}.'.format(self.name)
@@ -57,7 +57,7 @@ class SymbolicConstant(DummySymbolicScalar):
         self.assert_bound()
         return '{}[dtype={}, ctype={}]'.format(
                 self.__class__.__name__, self.dtype, self.ctype)
-    
+
     def __eq__(self, other):
         return id(self) == id(other)
     def __hash__(self):
diff --git a/hysop/symbolic/directional.py b/hysop/symbolic/directional.py
index eacaf51ca7174930678bd4da5039d6fdfbf2fa32..fbfb6e7e4124f72977df96ca6d18d5cd8f1f0fde 100644
--- a/hysop/symbolic/directional.py
+++ b/hysop/symbolic/directional.py
@@ -1,12 +1,13 @@
-
 import warnings
-from hysop.deps import np, sm
+import numpy as np
+import sympy as sm
+
 from hysop.tools.types import first_not_None, check_instance
 from hysop.symbolic import space_symbols
 from hysop.symbolic.field import div, grad
 from hysop.tools.warning import HysopWarning
 from hysop.tools.sympy_utils import get_derivative_variables
-    
+
 def collect_direction(expr, var):
     is_tensor = isinstance(expr, np.ndarray)
     if is_tensor:
@@ -44,7 +45,7 @@ def split(F, fixed_residue, force_residue, coords):
     coords = first_not_None(coords, space_symbols)
     for i, xi in enumerate(coords):
         res[i] = collect_direction(F, xi)
-    try: 
+    try:
         if (force_residue is not None):
             residue = force_residue
         else:
@@ -58,7 +59,7 @@ def split(F, fixed_residue, force_residue, coords):
         for i in res.keys():
             res[i] += (residue/count)
     if (fixed_residue is not None):
-        for i,r in res.iteritems():
+        for i,r in res.items():
             if (r!=0):
                 res[i] += fixed_residue
     return res
@@ -66,7 +67,7 @@ def split(F, fixed_residue, force_residue, coords):
 def split_assignement(expr, fixed_residue, force_residue, coords):
     lhs, rhs = expr.args
     res = split(rhs, fixed_residue, force_residue, coords=coords)
-    for (i,ei) in res.iteritems():
+    for (i,ei) in res.items():
         if (ei != 0):
             res[i] = expr.func(lhs, ei)
     return res
@@ -78,21 +79,21 @@ if __name__ == '__main__':
     from hysop.parameters.scalar_parameter import ScalarParameter
 
     enable_pretty_printing()
-    
+
     box = Box(length=(1,)*3)
     frame = box.frame
 
     U = Field(domain=box, name='U',  is_vector=True)
     W = Field(domain=box, name='W',  is_vector=True)
     alpha = ScalarParameter(name=greak[0]).s
-    
-    
+
+
     u = U.s(*frame.coords)
     w = W.s(*frame.coords)
-    
-    expr = (alpha*grad(u, frame) + (1-alpha)*grad(u, frame).T).dot(w) 
+
+    expr = (alpha*grad(u, frame) + (1-alpha)*grad(u, frame).T).dot(w)
     # expr = div(np.outer(u,w), frame)
-    print expr
-    for i, x in split(expr, frame.coords).iteritems():
-        print i, x.simplify()
+    print(expr)
+    for i, x in split(expr, frame.coords).items():
+        print(i, x.simplify())
 
diff --git a/hysop/symbolic/field.py b/hysop/symbolic/field.py
index 85026b2d30c9bc19bb200dddebc3dc7b46a43b12..76afce7d672bbd4e7bdedeb1d8cda715c2052197 100644
--- a/hysop/symbolic/field.py
+++ b/hysop/symbolic/field.py
@@ -1,12 +1,13 @@
 from abc import abstractmethod
-from hysop.deps import sm
+import sympy as sm
+
 from hysop.constants import BoundaryCondition
 
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.sympy_utils import get_derivative_variables
 from hysop.tools.numerics import find_common_dtype
-        
+
 from hysop.fields.continuous_field import Field, TensorField
 from hysop.fields.discrete_field import DiscreteField, DiscreteTensorField
 from hysop.symbolic import Symbol
@@ -16,10 +17,16 @@ from hysop.symbolic.func import UndefinedFunction, AppliedSymbolicFunction, Func
 from hysop.domain.domain import Domain
 
 class FieldExpressionI(object):
+    def __new__(cls, *args, **kwds):
+        return super(FieldExpressionI, cls).__new__(cls, *args, **kwds)
+
+    def __init__(self, *args, **kwds):
+        super(FieldExpressionI, self).__init__(**kwds)
+
     @abstractmethod
     def lboundaries(self):
         pass
-    
+
     @abstractmethod
     def rboundaries(self):
         pass
@@ -27,45 +34,56 @@ class FieldExpressionI(object):
     @abstractmethod
     def domain(self):
         pass
-    
+
     @abstractmethod
     def dtype(self):
         pass
-    
+
     @property
     def boundaries(self):
         return (self.lboundaries, self.rboundaries)
-    
+
     def format_boundaries(self):
         from hysop.constants import format_boundaries as fb
         return fb(*self.boundaries)
-   
+
 
 class FieldExpression(FieldExpressionI):
+
+    def __new__(cls, *args, **kwds):
+        domain = kwds.pop('domain', None)
+        dtype = kwds.pop('dtype', None)
+        lboundaries = kwds.pop('lboundaries', None)
+        rboundaries = kwds.pop('rboundaries', None)
+        obj = super(FieldExpression, cls).__new__(cls, *args, **kwds)
+        obj._domain = domain
+        obj._dtype = dtype
+        obj._lboundaries = lboundaries
+        obj._rboundaries = rboundaries
+        return obj
+
     def __init__(self, *args, **kwds):
-        self._domain      = kwds.pop('domain', None)
-        self._dtype       = kwds.pop('dtype', None)
-        self._lboundaries = kwds.pop('lboundaries', None)
-        self._rboundaries = kwds.pop('rboundaries', None)
         super(FieldExpression, self).__init__(*args, **kwds)
-    
+
     @property
     def lboundaries(self):
         assert (self._lboundaries is not None)
         return self._lboundaries
+
     @lboundaries.setter
     def lboundaries(self, lb):
-        check_instance(lb, npw.ndarray, values=BoundaryCondition, 
+        check_instance(lb, npw.ndarray, values=BoundaryCondition,
                             size=self.domain.dim, ndim=1)
         self._lboundaries = lb
-    
+
     @property
     def rboundaries(self):
         assert (self._rboundaries is not None)
         return self._rboundaries
+
     @rboundaries.setter
     def rboundaries(self, rb):
-        check_instance(rb, npw.ndarray, values=BoundaryCondition, 
+        check_instance(rb, npw.ndarray, values=BoundaryCondition,
                             size=self.domain.dim, ndim=1)
         self._rboundaries = rb
 
@@ -73,22 +91,26 @@ class FieldExpression(FieldExpressionI):
     def domain(self):
         assert (self._domain is not None)
         return self._domain
+
     @domain.setter
     def domain(self, dom):
         assert (self._domain is None)
         check_instance(dom, Domain)
-        self._domain = dom 
+        self._domain = dom
 
     @property
     def dtype(self):
         assert (self._dtype is not None)
         return self._dtype
+
     @dtype.setter
     def dtype(self, dt):
         assert (self._dtype is None)
         check_instance(dt, npw.dtype)
-        self._dtype = dt 
+        self._dtype = dt
 
+class DerivativeFieldExpr(FieldExpression, sm.Derivative):
+    pass
 
 class FieldExpressionBuilder(object):
     class BoundaryIncompatibilityError(ValueError):
@@ -123,12 +145,10 @@ class FieldExpressionBuilder(object):
             if cls.is_field_expr(expr):
                 return expr
             elif isinstance(expr, sm.Derivative):
-                class DerivativeFieldExpr(FieldExpression, sm.Derivative):
-                    pass
                 e = _to_field_expression_impl(expr.args[0])
                 if cls.is_field_expr(e):
                     dtype, domain = e.dtype, e.domain
-                    lb, rb = e.lboundaries.copy(), e.rboundaries.copy(), 
+                    lb, rb = e.lboundaries.copy(), e.rboundaries.copy(),
                     assert len(space_symbols)==lb.size==rb.size
                     for xi in get_derivative_variables(expr):
                         assert xi in space_symbols, xi
@@ -153,7 +173,7 @@ class FieldExpressionBuilder(object):
                     except cls.BoundaryIncompatibilityError:
                         msg='\nError during the handling of expression {}.'.format(expr)
                         msg+='\nSome boundaries were not compatible:'
-                        msg+='\n  *'+'\n  *'.join('{}: {}'.format(a, a.format_boundaries()) 
+                        msg+='\n  *'+'\n  *'.join('{}: {}'.format(a, a.format_boundaries())
                                                 for a in field_expression_args)
                         raise cls.BoundaryIncompatibilityError(msg)
                 else:
@@ -183,7 +203,7 @@ class FieldExpressionBuilder(object):
         new_expr.lboundaries = fea0.lboundaries.copy()
         new_expr.rboundaries = fea0.rboundaries.copy()
         return new_expr
-        
+
 
     @classmethod
     def check_boundary_compatibility(cls, arg0, *args):
@@ -197,7 +217,7 @@ class FieldExpressionBuilder(object):
         else:
             return True
 
-        
+
 
 class FieldBase(FunctionBase):
 
@@ -205,8 +225,7 @@ class FieldBase(FunctionBase):
         '''for sympify'''
         return self
 
-    def __new__(cls, field, idx=None, 
-                    **kwds):
+    def __new__(cls, field, idx=None, **kwds):
         assert 'name' not in kwds
         assert 'pretty_name' not in kwds
         assert 'latex_name' not in kwds
@@ -219,18 +238,27 @@ class FieldBase(FunctionBase):
         var_name    = field.var_name
         latex_name  = field.latex_name
         assert (0<=index<field.nb_components), index
-        obj = super(FieldBase, cls).__new__(cls, name=name, pretty_name=pretty_name, 
-                                                 var_name=var_name, latex_name=latex_name, **kwds)
+        try:
+            obj = super(FieldBase, cls).__new__(cls, name=name, pretty_name=pretty_name,
+                                                     var_name=var_name, latex_name=latex_name, **kwds)
+        except TypeError:
+            obj = super(FieldBase, cls).__new__(cls, name=name, **kwds)
         obj._field = field
         obj._index = index
         return obj
 
+    def __init__(self, field, idx=None, **kwds):
+        try:
+            super(FieldBase, self).__init__(name=None, pretty_name=None, var_name=None, latex_name=None, **kwds)
+        except TypeError:
+            super(FieldBase, self).__init__(name=None, **kwds)
+
     def _hashable_content(self):
         """See sympy.core.basic.Basic._hashable_content()"""
         hc = super(FieldBase, self)._hashable_content()
         hc += (self._field, self._index,)
         return hc
-    
+
     @property
     def field(self):
         """Get associated field."""
@@ -239,31 +267,36 @@ class FieldBase(FunctionBase):
     def index(self):
         """Get component index of the target field."""
         return self._index
-    @property 
+    @property
     def indexed_field(self):
         """Get a unique identifier for an indexed field component."""
         return (self._field, self._index)
-    
+
     def __getitem__(self, key):
         assert key == 0
         return self
 
+
 class SymbolicDiscreteField(FieldBase, Symbol):
     """
     Symbolic discrete field symbol.
     """
     def __new__(cls, field, name=None, fn=None, **kwds):
         check_instance(field, DiscreteField)
-        return super(SymbolicDiscreteField, cls).__new__(cls, field=field, 
+        return super(SymbolicDiscreteField, cls).__new__(cls, field=field,
                 fn=fn, **kwds)
 
+    def __init__(self, field, name=None, fn=None, **kwds):
+        super(SymbolicDiscreteField, self).__init__(field=field, fn=fn, **kwds)
+
     @classmethod
     def from_field(cls, field):
         if (field.nb_components == 1):
             return cls(field=field)
         else:
             return SymbolicDiscreteFieldTensor(field=field)
-    
+
+
 class SymbolicField(FieldBase, UndefinedFunction):
     """
     Symbolic unapplied scalar field as an undefined function of some frame coordinates and time.
@@ -272,9 +305,11 @@ class SymbolicField(FieldBase, UndefinedFunction):
     def __new__(cls, field, fn=None, bases=None, **kwds):
         bases = first_not_None(bases, (AppliedSymbolicField,))
         check_instance(field, Field)
-        return super(SymbolicField, cls).__new__(cls, bases=bases, 
-                field=field, fn=fn, **kwds)
-    
+        return super(SymbolicField, cls).__new__(cls, bases=bases, field=field, fn=fn, **kwds)
+
+    def __init__(self, field, fn=None, bases=None, **kwds):
+        super(SymbolicField, self).__init__(bases=bases, field=field, fn=fn, **kwds)
+
     def __hash__(self):
         "Fix sympy v1.2 hashes"
         h = super(SymbolicField, self).__hash__()
@@ -301,11 +336,14 @@ class AppliedSymbolicField(FieldExpressionI, AppliedSymbolicFunction):
     def __new__(cls, *args, **kwds):
         args = args if args else cls.field.domain.frame.vars
         return super(AppliedSymbolicField, cls).__new__(cls, *args, **kwds)
-    
+
+    def __init__(self, *args, **kwds):
+        super(AppliedSymbolicField, self).__init__(*args, **kwds)
+
     def _sympy_(self):
         '''for sympify'''
         return self
-    
+
     def _hashable_content(self):
         """See sympy.core.basic.Basic._hashable_content()"""
         hc = super(AppliedSymbolicField, self)._hashable_content()
@@ -315,21 +353,21 @@ class AppliedSymbolicField(FieldExpressionI, AppliedSymbolicFunction):
     @property
     def field(self):
         return type(self).field
-    
+
     @property
     def index(self):
         """Get component index of the target field."""
         return type(self).index
 
-    @property 
+    @property
     def indexed_field(self):
         """Get a unique identifier for an indexed field component."""
         return (self.field, self.index)
-    
+
     @property
     def lboundaries(self):
         return self.field.lboundaries
-    
+
     @property
     def rboundaries(self):
         return self.field.rboundaries
@@ -337,7 +375,7 @@ class AppliedSymbolicField(FieldExpressionI, AppliedSymbolicFunction):
     @property
     def domain(self):
         return self.field.domain
-    
+
     @property
     def dtype(self):
         return self.field.dtype
@@ -353,6 +391,10 @@ class SymbolicFieldTensor(SymbolicFunctionTensor):
             init[idx] = field.symbol
         return super(SymbolicFieldTensor, cls).__new__(cls, shape=shape, init=init)
 
+    def __init__(self, field, **kwds):
+        super(SymbolicFieldTensor, self).__init__(shape=None, init=None)
+
+
 class SymbolicDiscreteFieldTensor(TensorBase):
     """Symbolic tensor symbol."""
     def __new__(cls, dfield, name=None, **kwds):
@@ -362,9 +404,12 @@ class SymbolicDiscreteFieldTensor(TensorBase):
         init  = npw.empty(shape=shape, dtype=object)
         for (idx, df) in dfield.nd_iter():
             init[idx] = df.symbol
-        return super(SymbolicDiscreteFieldTensor, cls).__new__(cls, shape=shape, 
+        return super(SymbolicDiscreteFieldTensor, cls).__new__(cls, shape=shape,
                                                                     init=init, **kwds)
 
+    def __init__(self, dfield, name=None, **kwds):
+        super(SymbolicDiscreteFieldTensor, self).__init__(shape=None, init=None, **kwds)
+
 
 def diff(F, *symbols, **assumptions):
     is_tensor = isinstance(F, npw.ndarray)
@@ -385,7 +430,7 @@ def grad(F, frame, axis=-1):
         assert (axis==-1) or (axis==0)
         shape = (1,)
         new_shape = (frame.dim,)
-    
+
     gradF = npw.ndarray(shape=new_shape, dtype=object)
     for idx in npw.ndindex(*shape):
         for i, xp in enumerate(frame.coords):
@@ -394,7 +439,7 @@ def grad(F, frame, axis=-1):
                 gradF[new_idx] = diff(F[idx], xp)
             else:
                 gradF[i] = diff(F, xp)
-    
+
     return gradF.view(TensorBase)
 
 def div(F, frame, axis=-1):
@@ -403,7 +448,7 @@ def div(F, frame, axis=-1):
         shape = F.shape
         ndim  = F.ndim
         axis = (axis+ndim)%ndim
-        
+
         divF = npw.empty_like(F)
         for idx in npw.ndindex(*shape):
             divF[idx] = diff(F[idx], frame.coords[idx[axis]])
@@ -481,61 +526,60 @@ if __name__ == '__main__':
     assert s1.field is S1
     assert u[0].field  is U
     assert v[-1].field is V
-    
+
     with printoptions(linewidth=1000):
-        print s0, s0.index, s0.field.short_description()
-        print
-        print s1, s1.index, s1.field.short_description()
-        print
-        print u[0], u[0].index, u[0].field.short_description()
-        print u[1], u[1].index, u[1].field.short_description()
-        print u[2], u[2].index, u[2].field.short_description()
-        print 
-        print v 
-        print v[0], v[0].index, v[0].field.short_description()
-        print v[1], v[1].index, v[1].field.short_description()
-        print
-        print s0(*frame.vars)
-        print s1(*frame.vars)
-        print u(*frame.vars)
-        print v(*frame.vars)
-        print
-        print grad(s0(*frame.vars), frame)
-        print
-        print grad(s0(frame.coords[0]), frame)
-        print
-        print grad(s1(frame.coords[-1]), frame)
-        print
-        print grad(v(*frame.vars), frame)
-        print
-        print grad(u(*frame.vars), frame)
-        print 
-        print grad(u(frame.coords[1]), frame)
-        print
-        print div(u(*frame.vars), frame)
-        print
-        print div(grad(s0(*frame.vars), frame), frame)
-        print
-        print div(grad(v(*frame.vars), frame), frame)
-        print
-        print laplacian(s0(*frame.vars), frame)
-        print
-        print rot(u(*frame.vars), frame)
-        print
-        print convective_derivative(u(*frame.vars), u(*frame.vars), frame)
-        print
-        print npw.eye(8, dtype=npw.uint8).view(TensorBase).latex()
-        print
-        print convective_derivative(u(*frame.vars), u(*frame.vars), frame).latex()
-        print 
+        print(s0, s0.index, s0.field.short_description())
+        print()
+        print(s1, s1.index, s1.field.short_description())
+        print()
+        print(u[0], u[0].index, u[0].field.short_description())
+        print(u[1], u[1].index, u[1].field.short_description())
+        print(u[2], u[2].index, u[2].field.short_description())
+        print()
+        print(v )
+        print(v[0], v[0].index, v[0].field.short_description())
+        print(v[1], v[1].index, v[1].field.short_description())
+        print()
+        print(s0(*frame.vars))
+        print(s1(*frame.vars))
+        print(u(*frame.vars))
+        print(v(*frame.vars))
+        print()
+        print(grad(s0(*frame.vars), frame))
+        print()
+        print(grad(s0(frame.coords[0]), frame))
+        print()
+        print(grad(s1(frame.coords[-1]), frame))
+        print()
+        print(grad(v(*frame.vars), frame))
+        print()
+        print(grad(u(*frame.vars), frame))
+        print()
+        print(grad(u(frame.coords[1]), frame))
+        print()
+        print(div(u(*frame.vars), frame))
+        print()
+        print(div(grad(s0(*frame.vars), frame), frame))
+        print()
+        print(div(grad(v(*frame.vars), frame), frame))
+        print()
+        print(laplacian(s0(*frame.vars), frame))
+        print()
+        print(rot(u(*frame.vars), frame))
+        print()
+        print(convective_derivative(u(*frame.vars), u(*frame.vars), frame))
+        print()
+        print(npw.eye(8, dtype=npw.uint8).view(TensorBase).latex())
+        print()
+        print(convective_derivative(u(*frame.vars), u(*frame.vars), frame).latex())
+        print()
         D = SymbolicTensor('D', shape=(3,3))
-        print D
-        print div(D.dot(grad(s0(*frame.vars), frame)), frame)
-        print
-        print div(D.dot(grad(u(*frame.vars), frame)), frame)
-        print
-        print D.dot(grad(s0(*frame.vars), frame))
-        print
-        print grad(div(u(*frame.vars), frame), frame) - div(grad(u(), frame), frame)
-
+        print(D)
+        print(div(D.dot(grad(s0(*frame.vars), frame)), frame))
+        print()
+        print(div(D.dot(grad(u(*frame.vars), frame)), frame))
+        print()
+        print(D.dot(grad(s0(*frame.vars), frame)))
+        print()
+        print(grad(div(u(*frame.vars), frame), frame) - div(grad(u(), frame), frame))
 
diff --git a/hysop/symbolic/frame.py b/hysop/symbolic/frame.py
index a3d7dda04179690d25ae13caac8d808a97665a41..924d04914fbf28d1bc7a83c62f947e7b0647285a 100644
--- a/hysop/symbolic/frame.py
+++ b/hysop/symbolic/frame.py
@@ -9,7 +9,7 @@ class SymbolicFrame(object):
         """Initialize a frame with given dimension."""
         super(SymbolicFrame, self).__init__(**kwds)
         assert dim>0, 'Incompatible dimension.'
-        
+
         coords = list(space_symbols[:dim])
         if (freq_axes is not None):
             freq_axes = to_tuple(freq_axes)
@@ -22,17 +22,17 @@ class SymbolicFrame(object):
     def dim(self):
         """Get the dimension of this frame."""
         return self._dim
-    
+
     @property
     def coords(self):
         """Return the symbolic spatial coordinates associated to this frame."""
         return self._coords
-    
+
     @property
     def freqs(self):
         """Return the symbolic (spatial) frequency coordinates associated to this frame."""
         return freq_symbols[:self.dim]
-    
+
     @property
     def dcoords(self):
         """Return the spatial coordinates infinitesimals associated to this frame."""
@@ -42,8 +42,8 @@ class SymbolicFrame(object):
     def time(self):
         """Get the time variable for conveniance."""
         return time_symbol
-    
-    
+
+
     @property
     def dtime(self):
         """Get the infinitesimal time variable for conveniance."""
@@ -69,8 +69,8 @@ class SymbolicFrame(object):
 
 if __name__  == '__main__':
     A = SymbolicFrame(8)
-    print A.dim
-    print A.coords
-    print A[3]
-    print A.time
-    print A
+    print(A.dim)
+    print(A.coords)
+    print(A[3])
+    print(A.time)
+    print(A)
diff --git a/hysop/symbolic/func.py b/hysop/symbolic/func.py
index 7b90f9a5554632faa2bbb88f722582bef890d29f..dd3978c1b15bde8c5c2af8de44b535098cf9164e 100644
--- a/hysop/symbolic/func.py
+++ b/hysop/symbolic/func.py
@@ -1,5 +1,6 @@
+import sympy as sm
+import numpy as np
 
-from hysop.deps import sm, np
 from hysop.tools.types import check_instance, first_not_None
 from hysop.symbolic import time_symbol, space_symbols
 from hysop.symbolic import AppliedUndef, UndefinedFunction
@@ -14,24 +15,37 @@ class FunctionBase(ScalarBaseTag):
         obj.fn = staticmethod(fn)
         return obj
 
+    def __init__(self, *args, **kwds):
+        kwds.pop('fn')
+        super(FunctionBase, self).__init__(*args, **kwds)
+
     def _hashable_content(self):
         """See sympy.core.basic.Basic._hashable_content()"""
         hc = super(FunctionBase, self)._hashable_content()
         hc += (self.fn,)
         return hc
 
+
 class SymbolicFunction(FunctionBase, UndefinedFunction):
     """Unapplied symbolic scalar function."""
     def __new__(cls, name, fn=None, bases=None, **kwds):
         bases = first_not_None(bases, (AppliedSymbolicFunction,))
-        return super(SymbolicFunction, cls).__new__(cls, bases=bases, 
+        return super(SymbolicFunction, cls).__new__(cls, bases=bases,
+                name=name, fn=fn, **kwds)
+
+    def __init__(self, name, fn=None, bases=None, **kwds):
+        super(SymbolicFunction, self).__init__(bases=bases,
                 name=name, fn=fn, **kwds)
 
+
 class AppliedSymbolicFunction(AppliedUndef):
     """Applied symbolic scalar function."""
     def __new__(cls, *args, **kwds):
         return super(AppliedSymbolicFunction, cls).__new__(cls, *args, **kwds)
 
+    def __init__(self, *args, **kwds):
+        super(AppliedSymbolicFunction, self).__init__(**kwds)
+
     def freplace(self):
         if (self.fn is not None):
             return self.fn(*self.args)
@@ -41,9 +55,10 @@ class AppliedSymbolicFunction(AppliedUndef):
     def __call__(self):
         return self.freplace()
 
+
 class SymbolicFunctionTensor(TensorBase):
     """Symbolic tensor symbol."""
-    def __new__(cls, shape, name=None, fn=None, init=None, 
+    def __new__(cls, shape, name=None, fn=None, init=None,
             scalar_cls=None, scalar_kwds=None, **kwds):
         scalar_cls = first_not_None(scalar_cls, SymbolicFunction)
         scalar_kwds = first_not_None(scalar_kwds, {})
@@ -51,6 +66,11 @@ class SymbolicFunctionTensor(TensorBase):
         return super(SymbolicFunctionTensor, cls).__new__(cls, name=name, shape=shape, init=init,
             scalar_cls=scalar_cls, scalar_kwds=scalar_kwds, **kwds)
 
+    def __init__(self, shape, name=None, fn=None, init=None,
+            scalar_cls=None, scalar_kwds=None, **kwds):
+        super(SymbolicFunctionTensor, self).__init__(name=name, shape=None, init=None,
+            scalar_cls=scalar_cls, scalar_kwds=scalar_kwds, **kwds)
+
     def __call__(self, *args, **kwds):
         return self.elementwise_fn(lambda x: x(*args, **kwds))
 
@@ -60,7 +80,7 @@ class SymbolicFunctionTensor(TensorBase):
 if __name__ == '__main__':
     def fn(x0,x1):
         return x0-x1
-    
+
     f = SymbolicFunction('f', fn=sm.cos)
     g = SymbolicFunction('g', fn=lambda *x: sm.sin(np.prod(x)))
     h = SymbolicFunction('h', fn=lambda *x: sm.tan(np.sum(x)))
@@ -77,34 +97,34 @@ if __name__ == '__main__':
     b = SymbolicFunctionTensor('b', shape=(4,), fn=sm.cos)
     A = a(time_symbol)
     B = b(space_symbols[0])
-    print f
-    print g
-    print h
-    print i
-    print j
-    print
-    print type(f).__mro__[:2]
-    print 
-    print F
-    print G
-    print H
-    print I
-    print J
-    print 
-    print type(F).__mro__[:4]
-    print
-    print F()
-    print G()
-    print H()
-    print I()
-    print J()
-    print
-    print a
-    print b
-    print
-    print A
-    print B
-    print
-    print A.freplace()
-    print B.freplace()
+    print(f)
+    print(g)
+    print(h)
+    print(i)
+    print(j)
+    print()
+    print(type(f).__mro__[:2])
+    print()
+    print(F)
+    print(G)
+    print(H)
+    print(I)
+    print(J)
+    print()
+    print(type(F).__mro__[:4])
+    print()
+    print(F())
+    print(G())
+    print(H())
+    print(I())
+    print(J())
+    print()
+    print(a)
+    print(b)
+    print()
+    print(A)
+    print(B)
+    print()
+    print(A.freplace())
+    print(B.freplace())
 
diff --git a/hysop/symbolic/misc.py b/hysop/symbolic/misc.py
index 813df0ec6cfff848744537097642c4131b42e0c9..140fb57809c07d4726a70de428796be8c24016a3 100644
--- a/hysop/symbolic/misc.py
+++ b/hysop/symbolic/misc.py
@@ -1,9 +1,9 @@
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.tools.numpywrappers import npw
 from hysop.tools.types import check_instance, first_not_None, to_tuple
 from hysop.symbolic import Expr, Symbol
-        
+
 InstructionTermination = ''
 
 class Select(Expr):
@@ -14,13 +14,13 @@ class Select(Expr):
         obj.b = b
         obj.c = c
         return obj
-    
+
     def __str__(self):
         return '({} ? {} : {})'.format(self.c, self.b, self.a)
     def __repr__(self):
         return 'Select({}, {}, {})'.format(repr(self.a), repr(self.b), repr(self.c))
     def _sympystr(self, printer):
-        return '({} ? {} : {})'.format(printer._print(self.c), 
+        return '({} ? {} : {})'.format(printer._print(self.c),
                                        printer._print(self.b),
                                        printer._print(self.a))
 
@@ -83,7 +83,7 @@ class CodeSection(Expr):
         assert all(type(e) not in (tuple, list, set, frozenset, dict) for e in exprs)
         obj = super(CodeSection, cls).__new__(cls, *exprs)
         return obj
-    
+
     def _sympystr(self, printer):
         return 'CodeSection([{}])'.format(
                 '; '.join(printer._print(a) for a in self.args))
@@ -101,7 +101,7 @@ class MutexOp(Expr):
         obj.mutexes  = mutexes
         obj.mutex_id = mutex_id
         return obj
-    
+
 class MutexLock(MutexOp):
     def _sympystr(self, printer):
         return 'MutexLock({}, {})'.format(
diff --git a/hysop/symbolic/parameter.py b/hysop/symbolic/parameter.py
index 68a45ddcc4b0564569f8f1090bc22242e94ea704..313c8cfa92cb6590f139b9544d4d57ee7a81e66c 100644
--- a/hysop/symbolic/parameter.py
+++ b/hysop/symbolic/parameter.py
@@ -4,7 +4,7 @@ from hysop.tools.types import check_instance, to_tuple, first_not_None
 from hysop.tools.numpywrappers import npw
 
 class SymbolicTensorParameter(SymbolicTensor):
-    def __new__(cls, parameter, name=None, pretty_name=None, 
+    def __new__(cls, parameter, name=None, pretty_name=None,
             value=None, shape=None, scalar_cls=None, **kwds):
         from hysop.parameters.tensor_parameter import TensorParameter
         check_instance(parameter, TensorParameter)
@@ -14,30 +14,42 @@ class SymbolicTensorParameter(SymbolicTensor):
         pname = first_not_None(pretty_name, parameter.pretty_name)
         scalar_cls = first_not_None(scalar_cls, SymbolicScalarParameter)
         scalar_kwds = dict(parameter=parameter)
-        def make_scalar_kwds(idx): 
+        def make_scalar_kwds(idx):
             return dict(view=lambda parameter: parameter._value[idx])
-        return super(SymbolicTensorParameter, cls).__new__(cls, 
+        return super(SymbolicTensorParameter, cls).__new__(cls,
                 name=name, pretty_name=pname,
-                shape=shape, value=value, scalar_cls=scalar_cls, 
+                shape=shape, value=value, scalar_cls=scalar_cls,
                 make_scalar_kwds=make_scalar_kwds,
                 scalar_kwds=scalar_kwds, **kwds)
 
+    def __init__(self, parameter, name=None, pretty_name=None,
+            value=None, shape=None, scalar_cls=None, **kwds):
+        super(SymbolicTensorParameter, self).__init__(
+                name=name, pretty_name=pretty_name,
+                shape=shape, value=value, scalar_cls=scalar_cls,
+                make_scalar_kwds=None,
+                scalar_kwds=None, **kwds)
+
 class SymbolicScalarParameter(SymbolicScalar):
     def __new__(cls, parameter, name=None, pretty_name=None, value=None, **kwds):
         from hysop.parameters.tensor_parameter import TensorParameter
         check_instance(parameter, TensorParameter)
         value = first_not_None(value, parameter._value)
-        name   = first_not_None(name, parameter.name)
-        pname  = first_not_None(pretty_name, parameter.pretty_name)
+        name  = first_not_None(name, parameter.name)
+        pname = first_not_None(pretty_name, parameter.pretty_name)
         obj = super(SymbolicScalarParameter, cls).__new__(cls,
                 name=name, pretty_name=pname,
                 value=value, **kwds)
         obj.parameter = parameter
         return obj
 
+    def __init__(self, parameter, name=None, pretty_name=None, value=None, **kwds):
+        super(SymbolicScalarParameter, self).__init__(name=name, pretty_name=pretty_name,
+                value=value, **kwds)
+
 
 if __name__ == '__main__':
-    from hysop.deps import sm
+    import sympy as sm
     from hysop.parameters.tensor_parameter import TensorParameter
     from hysop.parameters.scalar_parameter import ScalarParameter
     a = ScalarParameter('A', dtype=npw.int32, initial_value=4)
@@ -52,26 +64,26 @@ if __name__ == '__main__':
         D[1,1] = B[1]
         D[2,2] = B[2]
 
-    print A
-    print B
-    print C
-    print D
-    print
-    print A.vreplace()
-    print B.vreplace()
-    print C.vreplace()
-    print D.vreplace()
-    print
+    print(A)
+    print(B)
+    print(C)
+    print(D)
+    print()
+    print(A.vreplace())
+    print(B.vreplace())
+    print(C.vreplace())
+    print(D.vreplace())
+    print()
     a.set_value(1)
     b.set_value(npw.asarray([-1,0,1], dtype=b.dtype))
     c.set_value(npw.full_like(c.value, 3))
-    print A
-    print B
-    print C
-    print D
-    print
-    print A.vreplace()
-    print B.vreplace()
-    print C.vreplace()
-    print D.vreplace()
-    print
+    print(A)
+    print(B)
+    print(C)
+    print(D)
+    print()
+    print(A.vreplace())
+    print(B.vreplace())
+    print(C.vreplace())
+    print(D.vreplace())
+    print()
diff --git a/hysop/symbolic/relational.py b/hysop/symbolic/relational.py
index 8ed218e3b1afcdf7503fe2823781d962d1a93f12..4ebb2073424201a3d2f658844ac34b72fc9776b5 100644
--- a/hysop/symbolic/relational.py
+++ b/hysop/symbolic/relational.py
@@ -1,5 +1,5 @@
+import sympy as sm
 
-from hysop.deps import sm
 from hysop.symbolic import Expr
 from hysop.tools.types import first_not_None
 from hysop.tools.numpywrappers import npw
@@ -19,11 +19,11 @@ class NAryRelation(Expr):
     def __new__(cls, *exprs):
         obj = super(NAryRelation, cls).__new__(cls, *exprs)
         return obj
-    
+
     def __str__(self):
         rel_op = ' {} '.format(self.rel_op)
         return '({})'.format(rel_op.join(str(x) for x in self.args))
-    
+
     def _sympystr(self, printer):
         rel_op = ' {} '.format(self.rel_op)
         return '({})'.format(rel_op.join('{}'.format(printer._print(x)) for x in self.args))
@@ -35,7 +35,7 @@ class NAryRelation(Expr):
     @property
     def is_number(self):
         return True
-        
+
     @property
     def free_symbols(self):
         return ()
@@ -119,7 +119,7 @@ class Assignment(BinaryRelation):
     lhs : Expr
     rhs : Expr
     """
-    
+
     def __str__(self):
         lhs = first_not_None(getattr(self.lhs,'name',None), self.lhs)
         rhs = first_not_None(getattr(self.rhs,'name',None), self.rhs)
@@ -136,11 +136,11 @@ class Assignment(BinaryRelation):
                     printer._print(self.lhs),
                     self.rel_op,
                     printer._print(self.rhs))
-    
+
     @property
     def rel_op(self):
         return '='
-    
+
     @classmethod
     def assign(cls, lhs, rhs, skip_zero_rhs=False):
         exprs = ()
@@ -177,7 +177,7 @@ class Assignment(BinaryRelation):
             msg=msg.format(type(lhs), type(rhs))
             raise TypeError(msg)
         return exprs
-        
+
 
 class AugmentedAssignment(Assignment):
     """
@@ -205,8 +205,8 @@ class DivAugmentedAssignment(AugmentedAssignment):
 
 class ModAugmentedAssignment(AugmentedAssignment):
     _symbol = '%'
-    
-    
+
+
 
 class NAryFunction(Expr):
     """
@@ -223,11 +223,11 @@ class NAryFunction(Expr):
     def __new__(cls, *exprs):
         obj = super(NAryFunction, cls).__new__(cls, *exprs)
         return obj
-    
+
     def __str__(self):
         return '{}({})'.format(self.fname,
                                ', '.join(str(x) for x in self.args))
-    
+
     def _sympystr(self, printer):
         return '{}({})'.format(self.fname,
                                ', '.join('{}'.format(printer._print(x)) for x in self.args))
@@ -239,7 +239,7 @@ class NAryFunction(Expr):
     @property
     def is_number(self):
         return True
-        
+
     @property
     def free_symbols(self):
         return ()
diff --git a/hysop/symbolic/spectral.py b/hysop/symbolic/spectral.py
index 6b3b82bcfad348f18e76992ab44c2503cef582b1..8ba27630ee46d19314cd8bbf4e0040d2964db7da 100644
--- a/hysop/symbolic/spectral.py
+++ b/hysop/symbolic/spectral.py
@@ -22,6 +22,9 @@ class WaveNumberIndex(sm.Symbol):
         obj._real_index = None
         return obj
 
+    def __init__(self, axis):
+        super(WaveNumberIndex, self).__init__()
+
     def bind_axes(self, axes):
         assert (self._axes is None) or (axes == self._axes)
         dim = len(axes)
@@ -39,7 +42,7 @@ class WaveNumberIndex(sm.Symbol):
 
 class WaveNumber(Dummy):
     """Wave number symbol for SpectralTransform derivatives (and integrals)."""
-    
+
     __transform2str = {
             TransformType.FFT:      'c2c',
             TransformType.RFFT:     'r2c',
@@ -62,24 +65,24 @@ class WaveNumber(Dummy):
             TransformType.IDST_III: 's2',
             TransformType.IDST_IV:  's4',
     }
-    
+
     __wave_numbers = {}
 
     def __new__(cls, axis, transform, exponent, **kwds):
         check_instance(transform, TransformType)
         check_instance(axis, int, minval=0)
         check_instance(exponent, int, minval=1)
-        
+
         if (transform is TransformType.NONE):
             return None
-        
+
         if (exponent == 0):
             return 1
-        
+
         key = (transform, axis, exponent)
         if key in cls.__wave_numbers:
             return cls.__wave_numbers[key]
-        
+
         tr_str = cls.__transform2str[transform]
         if len(tr_str)==2:
             tr_pstr = tr_str[0] + subscript(int(tr_str[1]))
@@ -97,8 +100,8 @@ class WaveNumber(Dummy):
         if (exponent > 1):
             name += '__{}'.format(exponent)
             pretty_name += '__{}'.format(exponent)
-        
-        obj = super(WaveNumber, cls).__new__(cls, 
+
+        obj = super(WaveNumber, cls).__new__(cls,
                 name=name, pretty_name=pretty_name, **kwds)
         obj._axis      = int(axis)
         obj._transform = transform
@@ -108,6 +111,9 @@ class WaveNumber(Dummy):
 
         return obj
 
+    def __init__(self, axis, transform, exponent, **kwds):
+        super(WaveNumber, self).__init__(name=None, pretty_name=None, **kwds)
+
     @property
     def axis(self):
         return self._axis
@@ -117,7 +123,7 @@ class WaveNumber(Dummy):
     @property
     def exponent(self):
         return self._exponent
-    
+
     @property
     def is_real(self):
         tr  = self._transform
@@ -125,7 +131,7 @@ class WaveNumber(Dummy):
         is_real = STU.is_R2R(tr)
         is_real |= ((not STU.is_R2R(tr)) and (exp % 2 == 0))
         return is_real
-    
+
     @property
     def is_complex(self):
         tr  = self._transform
@@ -135,7 +141,7 @@ class WaveNumber(Dummy):
     def pow(self, exponent):
         exponent *= self.exponent
         return WaveNumber(axis=self.axis, transform=self.transform, exponent=exponent)
-    
+
     def indexed_buffer(self, name=None):
         name = first_not_None(name, self.name)
         buf = SymbolicBuffer(name=name, memory_object=None)
@@ -163,7 +169,7 @@ class AppliedSpectralTransform(AppliedSymbolicField):
     def short_description(self):
         ss = '{}(field={}, axes={}, is_forward={}, transforms=[{}])'
         return ss.format(self.__class__.__name__,
-                self.field.pretty_name, self.transformed_axes, 
+                self.field.pretty_name, self.transformed_axes,
                 '1' if self.is_forward else '0',
                 self.format_transforms())
 
@@ -195,7 +201,7 @@ class AppliedSpectralTransform(AppliedSymbolicField):
     def format_transforms(self):
         transforms = self.transforms
         return ' x '.join(str(tr) for tr in transforms)
-    
+
     @property
     def field(self):
         return self._field
@@ -217,7 +223,7 @@ class AppliedSpectralTransform(AppliedSymbolicField):
     @property
     def frame(self):
         return self._frame
-    
+
     @property
     def lboundaries(self):
         return self._field.lboundaries
@@ -230,7 +236,7 @@ class AppliedSpectralTransform(AppliedSymbolicField):
     @property
     def dtype(self):
         return self._field.dtype
-    
+
     @property
     def transforms(self):
         return self._transforms
@@ -256,19 +262,19 @@ class AppliedSpectralTransform(AppliedSymbolicField):
             i = self._all_vars.index(v)
             return self._wave_numbers[i]*self
         return sm.Derivative(self, v)
-    
+
     def _hashable_content(self):
         """See sympy.core.basic.Basic._hashable_content()"""
         hc = super(AppliedSpectralTransform, self)._hashable_content()
         hc += (self.__class__,)
         return hc
-    
+
     def __hash__(self):
         h = super(AppliedSpectralTransform, self).__hash__()
         for hc in (self.__class__,):
             h ^= hash(h)
         return h
-    
+
     def __eq__(self, other):
         "Fix sympy v1.2 eq"
         eq = super(AppliedSpectralTransform, self).__eq__(other)
@@ -305,27 +311,27 @@ class SpectralTransform(SymbolicField):
 
         check_instance(field, ScalarField)
         axes = to_tuple(first_not_None(axes, range(field.dim)))
-        check_instance(axes, tuple, values=int, minval=0, 
+        check_instance(axes, tuple, values=int, minval=0,
                                     maxval=dim-1, minsize=1)
-        
+
         transformed_axes = tuple(sorted(set(axes)))
         spatial_axes     = tuple(sorted(set(range(field.dim)) - set(axes)))
-        
+
         frame = field.domain.frame
         freq_vars  = tuple(frame.freqs[dim-1-i]  for i in transformed_axes[::-1])
         space_vars = tuple(frame.coords[dim-1-i] for i in spatial_axes[::-1])
-        
+
         all_vars = ()
-        for i in xrange(dim):
+        for i in range(dim):
             if i in transformed_axes:
                 all_vars += (frame.freqs[dim-1-i],)
             else:
                 all_vars += (frame.coords[dim-1-i],)
         all_vars = all_vars[::-1]
-        
-        transforms = SpectralTransformUtils.transforms_from_field(field, 
+
+        transforms = SpectralTransformUtils.transforms_from_field(field,
                 transformed_axes=transformed_axes)
-        for i in xrange(frame.dim):
+        for i in range(frame.dim):
             assert (transforms[i] is TransformType.NONE) ^ (i in transformed_axes)
 
         wave_numbers = cls.generate_wave_numbers(transforms)[::-1]
@@ -334,7 +340,7 @@ class SpectralTransform(SymbolicField):
 
         frame = SymbolicFrame(dim=field.dim, freq_axes=transformed_axes)
         assert frame.coords == all_vars
-        
+
         obj = super(SpectralTransform, cls).__new__(cls, field=field,
                 bases=(AppliedSpectralTransform,))
         obj._field = field
@@ -352,13 +358,13 @@ class SpectralTransform(SymbolicField):
     @classmethod
     def generate_wave_numbers(cls, transforms):
         return SpectralTransformUtils.generate_wave_numbers(*transforms)
-    
+
     def _hashable_content(self):
         """See sympy.core.basic.Basic._hashable_content()"""
         hc = super(SpectralTransform, self)._hashable_content()
         hc += (self._transformed_axes, self._is_forward)
         return hc
-    
+
     def __hash__(self):
         "Fix sympy v1.2 hashes"
         h = super(SpectralTransform, self).__hash__()
@@ -371,7 +377,7 @@ class SpectralTransform(SymbolicField):
         eq = super(SpectralTransform, self).__eq__(other)
         if (eq is not True):
             return eq
-        for (lhc,rhc) in zip((self._transformed_axes, self._is_forward), 
+        for (lhc,rhc) in zip((self._transformed_axes, self._is_forward),
                              (other._transformed_axes, other._is_forward)):
             eq &= (lhc == rhc)
         return eq
@@ -389,19 +395,19 @@ if __name__ == '__main__':
     from hysop.symbolic.field import laplacian, curl
     from hysop.symbolic.relational import Assignment
     from hysop.tools.sympy_utils import Greak
-    
+
     dim = 3
     d = Box(dim=dim, lboundaries=(BoxBoundaryCondition.SYMMETRIC,
-                                  BoxBoundaryCondition.OUTFLOW, 
+                                  BoxBoundaryCondition.OUTFLOW,
                                   BoxBoundaryCondition.SYMMETRIC),
-                     rboundaries=(BoxBoundaryCondition.SYMMETRIC, 
+                     rboundaries=(BoxBoundaryCondition.SYMMETRIC,
                                   BoxBoundaryCondition.OUTFLOW,
                                   BoxBoundaryCondition.OUTFLOW))
 
     U = VelocityField(domain=d)
     W = VorticityField(velocity=U)
     psi = W.field_like(name='psi', pretty_name=Greak[23])
-    
+
     W_hat   = SpectralTransform(W, forward=True)
     U_hat   = SpectralTransform(U, forward=False)
     psi_hat = SpectralTransform(psi)
@@ -410,26 +416,26 @@ if __name__ == '__main__':
     sol = sm.solve(eqs, psi_hat.tolist())
     sol = curl(psi_hat, psi_hat.frame).xreplace(sol)
 
-    print 'VELOCITY'
-    print U.short_description()
-    print
-    print 'VORTICITY'
-    print W.short_description()
-    print
-    print 'W_hat'
-    print W_hat
-    print
-    print 'U_hat'
-    print U_hat
-    print
-    print 'Psi_hat'
-    print psi_hat
-    print
+    print('VELOCITY')
+    print(U.short_description())
+    print()
+    print('VORTICITY')
+    print(W.short_description())
+    print()
+    print('W_hat')
+    print(W_hat)
+    print()
+    print('U_hat')
+    print(U_hat)
+    print()
+    print('Psi_hat')
+    print(psi_hat)
+    print()
     for eq in Assignment.assign(U_hat, sol):
         eq, trs, wn = SpectralTransformUtils.parse_expression(eq)
-        print
-        print eq
+        print()
+        print(eq)
         for tr in trs:
-            print tr.short_description()
-        print wn
+            print(tr.short_description())
+        print(wn)
 
diff --git a/hysop/symbolic/tmp.py b/hysop/symbolic/tmp.py
index 04ab6629d32a58a1a4c48f90a6aecafa59fb1622..1605e214880a5b9f74fd91daa88ee732fd24fe6b 100644
--- a/hysop/symbolic/tmp.py
+++ b/hysop/symbolic/tmp.py
@@ -5,17 +5,17 @@ class TmpScalar(SymbolicScalar):
     def __new__(cls, *args, **kwds):
         if ('dtype' not in kwds):
             msg='dtype has not been specified for TmpScalar {}.'
-            msg=msg.format(kwds.get('name','unknown').encode('utf-8'))
+            msg=msg.format(kwds.get('name','unknown'))
             raise RuntimeError(msg)
         dtype = kwds.pop('dtype')
         obj = super(TmpScalar,cls).__new__(cls, *args, **kwds)
         obj._dtype = dtype
         return obj
-    
+
     @property
     def dtype(self):
         return self._dtype
-    
+
     @property
     def ctype(self):
         from hysop.backend.device.codegen.base.variables import dtype_to_ctype
diff --git a/hysop/testsenv.py b/hysop/testsenv.py
index 046f2b4d3191a5c5c0cc7bbe13597945f0df0a5b..d368c36333ea13e209a1e8c843e80198fd575f3c 100644
--- a/hysop/testsenv.py
+++ b/hysop/testsenv.py
@@ -35,12 +35,12 @@ if __SCALES_ENABLED__:
         return f
 else:
     scales_failed = pytest.mark.xfail
-            
+
 
 @contextlib.contextmanager
 def test_context():
-    with printoptions(threshold=10000, linewidth=1000, 
-                      nanstr='nan', infstr='inf', 
+    with printoptions(threshold=10000, linewidth=1000,
+                      nanstr='nan', infstr='inf',
                       formatter={'float': lambda x: '{:>6.2f}'.format(x)}):
         yield
 
@@ -51,7 +51,7 @@ def domain_boundary_iterator(dim):
     from hysop.constants import BoxBoundaryCondition
     choices = (BoxBoundaryCondition.OUTFLOW, BoxBoundaryCondition.SYMMETRIC)
     choices = tuple(it.product(choices, repeat=2))
-    for i in xrange(dim,-1,-1):
+    for i in range(dim,-1,-1):
         bd0 = ((BoxBoundaryCondition.PERIODIC, BoxBoundaryCondition.PERIODIC),)*i
         for bd1 in it.product(choices, repeat=dim-i):
             bd = bd0+bd1
@@ -69,7 +69,7 @@ if __HAS_OPENCL_BACKEND__:
         """For opencl tests that must not fail
         """
         return f
-        
+
     @static_vars(cl_environments={})
     def iter_clenv(device_type=None, all_platforms=None, **kwds):
         """
@@ -95,7 +95,7 @@ if __HAS_OPENCL_BACKEND__:
                 mpi_params = default_mpi_params()
                 for i,plat in enumerate(cl.get_platforms()):
                     for j,dev in enumerate(plat.get_devices()):
-                        cl_env = get_or_create_opencl_env(platform_id=i, device_id=j, 
+                        cl_env = get_or_create_opencl_env(platform_id=i, device_id=j,
                                 mpi_params=mpi_params, **kwds)
                         if (i==__DEFAULT_PLATFORM_ID__) and (j==__DEFAULT_DEVICE_ID__):
                             cl_environments[None].insert(0, cl_env)
@@ -105,19 +105,19 @@ if __HAS_OPENCL_BACKEND__:
                 for cl_env in iter_clenv(cl_device_type=None, all_platforms=True):
                     if (cl_env.device.type & cl_device_type):
                         cl_environments[cl_device_type].append(cl_env)
-        
+
         if len(cl_environments[cl_device_type])==0:
             msg='     |Could not generate any opencl environment for device type {}.'
             msg=msg.format(device_type)
             if (cl_device_type == None):
                 raise RuntimeError(msg)
             else:
-                print msg
+                print(msg)
         for cl_env in cl_environments[cl_device_type]:
             yield cl_env
             if not all_platforms:
                 return
-            
+
 else:
     opencl_failed = pytest.mark.xfail
     iter_clenv = None
@@ -167,7 +167,7 @@ class postclean(object):
             """return wrapped function + post exec
             """
             f(*args)
-            print "RM ...", self.working_dir, IO.default_path(), main_rank
+            print("RM ...", self.working_dir, IO.default_path(), main_rank)
             if main_rank == 0:
                 if os.path.exists(IO.default_path()):
                     shutil.rmtree(IO.default_path())
@@ -178,12 +178,12 @@ class postclean(object):
 
 
 class TestCartesianField(object):
-    """
+    r"""
     Generate data layouts like cartesian fields (only for testing purposes).
-       
+
     This is usefull to test operators internals without the need
     to create real cartesian discrete fields.
-    
+
     /!\ warning: grid size is not the same as grid shape:
         grid_shape = grid_size[::-1]
           3d size  id (X, Y, Z)
@@ -210,12 +210,12 @@ class TestCartesianField(object):
         self.extra_ghosts       = npw.asintarray(first_not_None(extra_ghosts,(0,)*dim))
         assert (self.min_ghosts>=0).all()
         assert (self.extra_ghosts>=0).all()
-    
-        self.data = tuple(self._alloc_array(init, ghosts_init, extra_ghosts_init) 
-                                    for i in xrange(nb_components))
+
+        self.data = tuple(self._alloc_array(init, ghosts_init, extra_ghosts_init)
+                                    for i in range(nb_components))
 
     def _alloc_array(self, init, ghosts_init, extra_ghosts_init):
-        
+
         view0, view1 = self.view(self.dim)
         data = npw.empty(shape=self.grid_shape, dtype=self.dtype)
         self._init_array(data[...],         extra_ghosts_init)
@@ -231,19 +231,19 @@ class TestCartesianField(object):
         else:
             check_instance(vals, (int,float,npw.number))
             a[...] = vals
-    
+
     def view(self, dim):
         assert (1 <= dim <= self.dim)
-        
+
         compute_grid_size = self.compute_grid_size
         min_ghosts, extra_ghosts, ghosts = (self.min_ghosts, self.extra_ghosts, self.ghosts)
-        
+
         field_view = [] # view of the computational grid with minimal ghosts in the full grid
         cg_view = []    # view of the computational grid without ghosts in the field_view subgrid
-        for i in xrange(self.dim):
+        for i in range(self.dim):
             j = (self.dim - i - 1)
             if (j < dim):
-                field_sl = slice(ghosts[j] - min_ghosts[j], 
+                field_sl = slice(ghosts[j] - min_ghosts[j],
                                  compute_grid_size[j]+ghosts[j]+min_ghosts[j])
                 cg_sl = slice(min_ghosts[j], compute_grid_size[j]+min_ghosts[j])
                 field_view.append(field_sl)
@@ -255,7 +255,7 @@ class TestCartesianField(object):
     @property
     def ghosts(self):
         return self.min_ghosts + self.extra_ghosts
-    
+
     @property
     def min_grid_size(self):
         return self.compute_grid_size + 2*self.min_ghosts
@@ -263,11 +263,11 @@ class TestCartesianField(object):
     @property
     def grid_size(self):
         return self.compute_grid_size + 2*self.ghosts
-    
+
     @property
     def min_grid_shape(self):
         return self.min_grid_size[::-1]
-    
+
     @property
     def compute_grid_shape(self):
         return self.compute_grid_size[::-1]
@@ -281,11 +281,11 @@ class TestCartesianField(object):
                 object: lambda x: '{:^{width}}'.format(x, width=element_width)[:element_width],
                 float:  lambda x: '{:{width}.2f}'.format(x, width=element_width)
         }
-        _print_opts = dict(threshold=10000, linewidth=1000, 
+        _print_opts = dict(threshold=10000, linewidth=1000,
                              formatter={'object': lambda x: _formatter.get(type(x),
                                                                 _formatter[object])(x)})
         _print_opts.update(print_opts)
-        
+
         strarr = npw.empty_like(self.data[0], dtype=object)
         if raw_data:
             strarr[...] = self.data[0]
@@ -296,7 +296,7 @@ class TestCartesianField(object):
             strarr[view0][view1] = '.'
 
         with printoptions(**_print_opts):
-            print strarr
+            print(strarr)
 
     def __str__(self):
         ss = \
@@ -307,7 +307,7 @@ class TestCartesianField(object):
     min_grid_size:     {}
     extra_ghosts:      {}
     grid_size:         {}'''.format(self.name, self.dtype.__name__,
-                self.compute_grid_size, self.min_ghosts, self.min_grid_size, 
+                self.compute_grid_size, self.min_ghosts, self.min_grid_size,
                 self.extra_ghosts, self.grid_size)
         return ss
 
@@ -316,8 +316,8 @@ if __name__ == '__main__':
     a =  TestCartesianField('A', 2, npw.int32, 1,
                             [16,8], [2,1], [1,1],
                             (0,10), -1, -2)
-    print a
-    print
+    print(a)
+    print()
     a.fancy_print()
-    print
-    print a.data[0]
+    print()
+    print(a.data[0])
diff --git a/hysop/tools/cache.py b/hysop/tools/cache.py
index 44a9984349ee3ffa7f4710032fe95d2a1a1e10c5..97301791968c9fb8c4df99f99a11219b5d0f86e3 100644
--- a/hysop/tools/cache.py
+++ b/hysop/tools/cache.py
@@ -19,7 +19,7 @@ if (machine_id in  (None,'')):
     machine_id = uuid.getnode()
 
 @contextlib.contextmanager
-def lock_file(filepath, mode, compressed=True, 
+def lock_file(filepath, mode, compressed=True,
         timeout=3600, check_interval=1):
     """
     Opens a locked file with specified mode, possibly compressed.
@@ -35,24 +35,24 @@ def lock_file(filepath, mode, compressed=True,
                     raise
         if not os.path.exists(filepath):
             open(filepath, 'a').close()
-        with portalocker.Lock(filename=filepath, timeout=timeout, mode=mode, 
-                check_interval=check_interval) as fl: 
+        with portalocker.Lock(filename=filepath, timeout=timeout, mode=mode,
+                check_interval=check_interval) as fl:
             if compressed:
-                with gzip.GzipFile(fileobj=fl) as f:
+                with gzip.GzipFile(fileobj=fl, mode=mode) as f:
                     yield f
             else:
                 yield fl
     except portalocker.exceptions.LockException as e:
         msg='\nFATAL ERROR: Could not obtain lock for file \'{}\' after waiting for {}s.\n'
         msg=msg.format(filepath, timeout)
-        print msg
+        print(msg)
         raise e
 
 @contextlib.contextmanager
 def read_only_lock(filepath, compressed=True,
         timeout=3600, check_interval=1):
     """Opens a locked read only file, possibly compressed."""
-    with lock_file(filepath=filepath, mode='r', compressed=compressed,
+    with lock_file(filepath=filepath, mode='rb', compressed=compressed,
             timeout=timeout, check_interval=check_interval) as f:
         yield f
 
@@ -60,7 +60,7 @@ def read_only_lock(filepath, compressed=True,
 def write_only_lock(filepath, compressed=True,
         timeout=3600, check_interval=1):
     """Opens a locked write only file, possibly compressed."""
-    with lock_file(filepath=filepath, mode='w', compressed=compressed,
+    with lock_file(filepath=filepath, mode='wb', compressed=compressed,
             timeout=timeout, check_interval=check_interval) as f:
         yield f
 
@@ -77,7 +77,7 @@ def load_cache(filepath, match_type=dict, on_fail={}, **kwds):
             data = on_fail
     return data
 
-def update_cache(filepath, key, data, match_type=dict, on_fail={}, **kwds): 
+def update_cache(filepath, key, data, match_type=dict, on_fail={}, **kwds):
     """
     Update cache entry in given file atomically with a (key,data) pair.
     Cached data is a pickled dictionnary.
@@ -99,13 +99,13 @@ def load_data_from_cache(filepath, key, match_type=dict, on_fail={}, **kwds):
 def load_attributes_from_cache(filepath, key, instance, attrs, **kwds):
     """
     Load cached entries from a given file atomically.
-    
+
     Cached data is assumed to be a dictionnary.
     If key is present in pickled data, try to get all
     given attributes data by keys given in attrs.
     Set instance attributes with those values.
 
-    If one attribute is missing or key is not present 
+    If one attribute is missing or key is not present
     in loaded data, set all values to None in instance
     and return False.
 
diff --git a/hysop/tools/callback.py b/hysop/tools/callback.py
index b7bd848a5c783f80533b6f18c59d529eea5ca840..f5d1f605b2fbe95253ee7973736d3a27011be2e0 100644
--- a/hysop/tools/callback.py
+++ b/hysop/tools/callback.py
@@ -22,7 +22,7 @@ class TimerInterface(object):
         self.max   = None
         self.nruns = 0
         self.data  = []
-    
+
     def mean(self):
         if self.nruns==0:
             return None
@@ -37,11 +37,11 @@ class TimerInterface(object):
 
     def status(self):
         if self.state is None: #waiting 1st run
-            return 'W'       
+            return 'W'
         elif self.state < 0:   #running
             return 'R'
         else:                  #sleeping
-            return 'S'       
+            return 'S'
 
     def register_timing(self,timing):
         if self.min is None:
@@ -59,10 +59,10 @@ class TimerInterface(object):
 
     def __str__(self):
         return '({}) nruns={:4d}, min={}, max={}, mean={}, total={}' \
-                .format(self.status(), self.nruns, 
-                    time2str(self.min), time2str(self.max), 
+                .format(self.status(), self.nruns,
+                    time2str(self.min), time2str(self.max),
                     time2str(self.mean()), time2str(self.total()))
-    
+
     @staticmethod
     def _as_group(groupname,tasks,tic_callbacks=[],tac_callbacks=[]):
         return TimingGroup(name=groupname,tasks=tasks,
@@ -90,7 +90,7 @@ class MemInterface(TimerInterface):
         elif bdw > self.max_bandwidth:
             self.max_bandwidth = bdw
         self.bandwidth.append(bdw)
-    
+
     def mean_bandwidth(self):
         if self.nruns==0:
             return None
@@ -111,7 +111,7 @@ class MemInterface(TimerInterface):
 class MemcpyInterface(MemInterface):
     def __init__(self,membytes,**kargs):
         super(MemcpyInterface,self).__init__(membytes=membytes,**kargs)
-    
+
     def __str__(self):
         s = '\n{:15s} min_bdw={}, max_bdw={}, mean_bdw={}, total_mem_moved={}'.format(
                 '',
@@ -126,10 +126,10 @@ class ComputeInterface(MemInterface):
             raise ValueError('per_work_statistic is not a WorkStatistics')
         if total_work<1:
             raise ValueError('total_work < 1.')
-        
+
         membytes = total_work*per_work_statistic.global_mem_transactions()
         super(ComputeInterface, self).__init__(membytes=membytes,**kargs)
-        
+
         self.ftype = ftype
         self.total_work           = total_work
         self.per_work_statistic   = per_work_statistic
@@ -138,17 +138,17 @@ class ComputeInterface(MemInterface):
     def register_timing(self,timing):
         super(ComputeInterface,self).register_timing(timing)
         self.total_work_statistic += self.total_work*self.per_work_statistic
-    
+
     def stats_per_second(self):
         if self.nruns==0:
             return None
         else:
             return self.total_work_statistic.compute_timed_statistics(self.total())
-    
+
     def __str__(self):
-        
+
         s=''
-        
+
         timed_stats = self.stats_per_second()
         if (timed_stats is not None):
 
@@ -167,13 +167,13 @@ class ComputeInterface(MemInterface):
             flops *= float_op_factor
 
             opi = flops/timed_stats.global_mem_transactions()
-        
+
             if timed_stats.global_mem_throughput()>0:
                 s += '  throughput={}'.format(bdw2str(timed_stats.global_mem_throughput()))
                 if timed_stats.global_mem_throughput() < timed_stats.total_mem_throughput():
                     s+= ' (tot={})'.format(bdw2str(timed_stats.total_mem_throughput()))
                 s += ' OPI={}'.format(unit2str(opi,'FLOP/B',decimal=True,rounded=2))
-            for (op_category, ops_per_second) in timed_stats.ops_per_second().iteritems():
+            for (op_category, ops_per_second) in timed_stats.ops_per_second().items():
                 if op_category!='FLOPS':
                     s += '  {}'.format(unit2str(ops_per_second,op_category,decimal=True,rounded=2))
                 else:
@@ -189,7 +189,7 @@ class CallbackTask(object):
         self.tic_callbacks = []
         self.tac_callbacks = []
         self.register_callbacks(tic_callbacks,tac_callbacks)
-    def tic(self,**kargs): 
+    def tic(self,**kargs):
         self._on_tic(**kargs)
         for cb in self.tic_callbacks:
             cb(self,**kargs)
@@ -200,13 +200,13 @@ class CallbackTask(object):
     def register_callbacks(self,tic_callbacks=[],tac_callbacks=[]):
         tic_callbacks = _to_list(tic_callbacks)
         tac_callbacks = _to_list(tac_callbacks)
-        for cb in tic_callbacks: 
+        for cb in tic_callbacks:
             if cb not in self.tic_callbacks:
                 self.tic_callbacks.append(cb)
-        for cb in tac_callbacks: 
+        for cb in tac_callbacks:
             if cb not in self.tac_callbacks:
                 self.tac_callbacks.append(cb)
-    
+
     def _on_tic(self,**kargs):
         msg='_on_tic not implemented in class {}.'.format(self.__class__.__name__)
         raise NotImplementedError(msg)
@@ -219,7 +219,7 @@ class CallbackTask(object):
 
     def report(self,offset):
         return self.offset_str(offset) + '{:15s}'.format(self.name)
-    
+
     @staticmethod
     def offset_str(count):
         return '  '*count
@@ -229,11 +229,11 @@ class CallbackTask(object):
 
 class CallbackGroup(CallbackTask):
     def __init__(self,name,tasks,**kargs):
-        super(CallbackGroup,self).__init__(name,**kargs)    
+        super(CallbackGroup,self).__init__(name,**kargs)
         self.tasks  = tasks
         self.ticked = np.zeros(shape=(len(tasks),), dtype=bool)
         self.tacked = self.ticked.copy()
-        
+
         taskid = {}
         for i,task in enumerate(tasks):
             taskid[task.name] = i
@@ -253,10 +253,10 @@ class CallbackGroup(CallbackTask):
                 super(CallbackGroup,self).tac(**args)
                 self.ticked[:] = False
                 self.tacked[:] = False
-               
+
         for task in tasks:
             task.register_callbacks(tic_callbacks=_on_task_tic,tac_callbacks=_on_task_tac)
-    
+
     def _check(self):
         if len(self.tasks)==0:
             raise ValueError('Empty task list!')
@@ -267,17 +267,17 @@ class CallbackGroup(CallbackTask):
         raise RuntimeError('CallbackGroup.tic() should not be called explicitely.')
     def tac(self,**kargs):
         raise RuntimeError('CallbackGroup.tac() should never be called explicitely.')
-    
+
     def report(self,offset=0):
         s = ''
         s += '{}{}'.format(self.offset_str(offset), self.name)
-        tasks = sorted(self.tasks, key=lambda x:x.total(), reverse=True)
+        tasks = tuple(sorted(self.tasks, key=lambda x:x.total(), reverse=True))
         for task in tasks:
             s += '\n'+task.report(offset+1)
         s += '\n{}{:15s} {}'.format(self.offset_str(offset+1),'total:',
                 self.__str__())
         return s
-    
+
     @classmethod
     def _as_group(cls,groupname,tasks,tic_callbacks=[],tac_callbacks=[],**kargs):
         return self.__class__(name=groupname,tasks=tasks,
@@ -324,8 +324,8 @@ class TimingGroup(CallbackGroup, TimerInterface):
 
     def __str__(self):
         return TimerInterface.__str__(self)
-    
-class TimingTask(CallbackTask,TimerInterface): 
+
+class TimingTask(CallbackTask,TimerInterface):
     def __init__(self,**kargs):
         super(TimingTask,self).__init__(**kargs)
 
@@ -333,7 +333,7 @@ class TimingTask(CallbackTask,TimerInterface):
         return '{} {}'.format(CallbackTask.report(self,offset), TimerInterface.__str__(self))
     def __str__(self):
         return self.report()
-    
+
 class MemcpyTask(CallbackTask,MemcpyInterface):
     def __init__(self,MPI,**kargs):
         super(MemcpyTask,self).__init__(**kargs)
@@ -341,7 +341,7 @@ class MemcpyTask(CallbackTask,MemcpyInterface):
 
     def report(self,offset=0):
         return '{} {}'.format(
-                CallbackTask.report(self,offset), 
+                CallbackTask.report(self,offset),
                 self.format(MemcpyInterface.__str__(self),offset))
     def __str__(self):
         return self.report()
@@ -358,7 +358,7 @@ class ComputeTask(CallbackTask,ComputeInterface):
 
     def report(self,offset=0):
         return '{} {}'.format(
-                CallbackTask.report(self,offset), 
+                CallbackTask.report(self,offset),
                 self.format(ComputeInterface.__str__(self),offset))
     def __str__(self):
         return self.report()
@@ -390,7 +390,7 @@ class CallbackProfiler(object):
         self._check_registered(target)[target].tic(**kargs)
     def tac(self,target,**kargs):
         self._check_registered(target)[target].tac(**kargs)
-    
+
     def register_tasks(self,tasks, tic_callbacks=[], tac_callbacks=[],**kargs):
         tasks  = _to_list(tasks)
         for task in tasks:
@@ -409,8 +409,8 @@ class CallbackProfiler(object):
                     task = MPITimingTask(MPI=self._MPI,name=taskname,**kargs)
             task.register_callbacks(tic_callbacks,tac_callbacks)
         self.tasks[taskname] = task
-    
-    def register_group(self,groupname, tasknames, 
+
+    def register_group(self,groupname, tasknames,
             tic_callbacks=[], tac_callbacks=[]):
         if groupname in self.groups:
             source = set(tasknames)
@@ -432,7 +432,7 @@ class CallbackProfiler(object):
             self.groups[groupname] = group
 
     def registered_targets(self):
-        return self.groups.keys() + self.tasks.keys()
+        return tuple(self.groups.keys()) + tuple(self.tasks.keys())
 
     def register_callbacks(self,target,tic_callbacks=[],tac_callbacks=[]):
         dic = self._check_registered(target)
@@ -452,30 +452,30 @@ class CallbackProfiler(object):
         if target in self.registered_targets():
             msg='Target {} was already registered!'.format(target)
             raise ValueError(msg)
-    
+
     def report(self,mode='recursive'):
         s = '=== Callback Profiler Report ==='
         if mode=='all':
             if self.has_tasks():
                 s += '\n ::Individual tasks::'
-                for taskname,task in self.tasks.iteritems():
+                for taskname,task in self.tasks.items():
                     s += '\n'+task.report(1)
                 if self.has_groups():
                     s += '\n ::Group tasks::'
-                    for taskname,task in self.groups.iteritems():
+                    for taskname,task in self.groups.items():
                         s += '\n'+task.report(1)
         elif mode =='recursive':
             if self.has_groups():
-                groups = sorted(self.groups.values(), key=lambda x:x.total(), reverse=True)
+                groups = tuple(sorted(self.groups.values(), key=lambda x:x.total(), reverse=True))
                 for group in groups:
                     s += '\n'+group.report(1)
             if self.has_tasks():
                 individual_tasknames = set(self.tasks.keys()).difference(self._tasks_in_group)
-                tasknames = sorted(individual_tasknames, key=lambda x:self.tasks[x].total(), reverse=True)
+                tasknames = tuple(sorted(individual_tasknames, key=lambda x:self.tasks[x].total(), reverse=True))
                 for taskname in tasknames:
                     task = self.tasks[taskname]
                     s += '\n'+task.report(1)
         return s
-    
+
     def __str__(self):
         return self.report()
diff --git a/hysop/tools/contexts.py b/hysop/tools/contexts.py
index 7703a92adc0f09b6f820debd534de60f1c07c85f..ea340e07a24bd3034b8916c42969b7f67f0fc983 100644
--- a/hysop/tools/contexts.py
+++ b/hysop/tools/contexts.py
@@ -1,9 +1,13 @@
+import os, time, sys
+import numpy as np
+from contextlib import contextmanager, ExitStack
 
-import os
-from hysop.deps import np, time, sys
-from contextlib import contextmanager
+@contextmanager
+def nested(*managers):
+    with ExitStack() as stack:
+         yield tuple(stack.enter_context(m) for m in managers)
 
-class Timer(object):    
+class Timer(object):
     def __enter__(self, factor=1):
         self.start = time.time()
         self.factor = factor
@@ -21,7 +25,7 @@ class Timer(object):
 def printoptions(*args, **kwargs):
     original = np.get_printoptions()
     np.set_printoptions(*args, **kwargs)
-    yield 
+    yield
     np.set_printoptions(**original)
 
 
@@ -32,7 +36,7 @@ def systrace(fn=None):
         print('{} {}:{}'.format(event, frame.f_code.co_filename, frame.f_lineno))
     fn = fn or __trace
     sys.settrace(fn)
-    yield 
+    yield
     sys.settrace(__old_trace)
 
 @contextmanager
@@ -66,7 +70,7 @@ def stdout_redirected(to=os.devnull):  # C-level redirection (file descriptor le
         return fd
     stdout = sys.stdout
     stdout_fd = fileno(stdout)
-    with os.fdopen(os.dup(stdout_fd), 'wb') as copied: 
+    with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
         stdout.flush()
         os.dup2(fileno(to), stdout_fd)
         try:
@@ -84,7 +88,7 @@ def stderr_redirected(to=os.devnull):  # C-level redirection (file descriptor le
         return fd
     stderr = sys.stderr
     stderr_fd = fileno(stderr)
-    with os.fdopen(os.dup(stderr_fd), 'wb') as copied: 
+    with os.fdopen(os.dup(stderr_fd), 'wb') as copied:
         stderr.flush()
         os.dup2(fileno(to), stderr_fd)
         try:
diff --git a/hysop/tools/debug_dumper.py b/hysop/tools/debug_dumper.py
index 868179b3f0def67cf98a1d5ba8779f1b69122a10..9e2a5a18106eff3669efd80ad7b76b116f7ecc4e 100644
--- a/hysop/tools/debug_dumper.py
+++ b/hysop/tools/debug_dumper.py
@@ -13,18 +13,22 @@ from hysop.fields.discrete_field import DiscreteScalarFieldView
 
 class DebugDumper(object):
     def __init__(self, name, path, force_overwrite=False,
-                 enable_on_op_apply=False, dump_precision=10,
-                 comm=MPI.COMM_WORLD, io_leader=0):
+                 enable_on_op_apply=False, dump_precision=12,
+                 comm=MPI.COMM_WORLD, io_leader=0, dump_data=False):
         assert isinstance(name, str), name
         assert isinstance(path, str), path
+
         directory = os.path.join(path, name)
+
         blobs_directory = os.path.join(directory, 'data')
-        if not os.path.isdir(blobs_directory) and comm.rank==0:
-            os.makedirs(blobs_directory)
+        if dump_data:
+            if not os.path.isdir(blobs_directory) and comm.rank==0:
+                os.makedirs(blobs_directory)
+        self.blobs_directory = blobs_directory
+        self.dump_data = dump_data
 
         self.name = name
         self.directory = directory
-        self.blobs_directory = blobs_directory
         self.dump_id = 0
         self.enable_on_op_apply = enable_on_op_apply
         self.dump_precision = dump_precision
@@ -60,10 +64,10 @@ class DebugDumper(object):
     def lformat(cls, id_, iteration, time, tag, min_, max_, mean, variance, dtype, shape, description='', dump_precision=None):
         try:
             return '{:<4}  {:<10}  {:<20.{p}f}  {:<40}  {:<+20.{p}f}  {:<+20.{p}f}  {:<+20.{p}f}  {:<+20.{p}f}  {:<20}  {:<20}  {}'.format(
-                id_, iteration, time, tag, min_, max_, mean, variance, dtype, shape, description, p=dump_precision)
+                id_, iteration, time, tag, min_, max_, mean, variance, str(dtype), str(shape), description, p=dump_precision)
         except:
             return '{:<4}  {:<10}  {:<20}  {:<40}  {:<20}  {:<20}  {:<20}  {:<20}  {:<20}  {:<20}  {}'.format(
-                id_, iteration, time, tag, min_, max_, mean, variance, dtype, shape, description)
+                id_, iteration, time, tag, min_, max_, mean, variance, str(dtype), str(shape), description)
 
     def print_header(self, with_datetime=False):
         now = datetime.datetime.now()
@@ -113,18 +117,18 @@ class DebugDumper(object):
                 dtype = d.dtype
                 shape = None
                 id_ = self.dump_id
-                _min = comm.allreduce(float(np.nanmin(d)),  op=MPI.MIN)
-                _max = comm.allreduce(float(np.nanmax(d)),  op=MPI.MAX)
-                mean = comm.allreduce(float(np.nanmean(d))) / comm_size
-                variance = comm.allreduce(float(np.nansum((d-mean)**2))) / \
-                    float(comm.allreduce(long(d.size)))
+                _min = comm.allreduce(np.nanmin(d),  op=MPI.MIN)
+                _max = comm.allreduce(np.nanmax(d),  op=MPI.MAX)
+                mean = comm.allreduce(np.nanmean(d), op=MPI.SUM) / comm_size
+                size = float(comm.allreduce(d.size))
+                variance = comm.allreduce((d.size/size)*np.nanmean((d-mean)**2))
                 entry = '\n'+self.lformat(id_, iteration, time, tag_, _min, _max,
                                           mean, variance, dtype, shape, description_, self.dump_precision)
 
                 if self.is_io_leader:
                     self.runfile.write(entry)
 
-                if (comm_size == 1):
+                if self.dump_data and (comm_size == 1):
                     dst = '{}/{}'.format(self.blobs_directory, self.dump_id)
                     np.savez_compressed(dst, data=d)
 
diff --git a/hysop/tools/debug_utils.py b/hysop/tools/debug_utils.py
index 4bf2c57b3abebc10c2ac722fad1d2d9f7579b6f9..c3adf14edb9bdd1ee62e367352d764b26135c873 100644
--- a/hysop/tools/debug_utils.py
+++ b/hysop/tools/debug_utils.py
@@ -10,30 +10,30 @@ from hysop.fields.discrete_field import DiscreteScalarFieldView
 from hysop.tools.types import check_instance, first_not_None
 
 class ImshowDebugger(object):
-    def __init__(self, data, ntimes=1, cmap='coolwarm', 
-                        enable_on_op_apply=False, 
+    def __init__(self, data, ntimes=1, cmap='coolwarm',
+                        enable_on_op_apply=False,
                         **kwds):
 
         check_instance(data, dict, keys=str)
         ndata = len(data)
         assert ndata >= 1, ndata
         assert ntimes >= 1, ntimes
-        
+
         fig,_axes = plt.subplots(ndata, ntimes, **kwds)
         _axes = np.asarray(_axes).reshape(ndata, ntimes)
 
         axes = {}
         for (i,k) in enumerate(data.keys()):
             axes[k] = _axes[i]
-        
+
         imgs = {}
-        for (k,v) in data.iteritems():
+        for (k,v) in data.items():
             v = self.get_data(v)
-            for j in xrange(ntimes): 
+            for j in range(ntimes):
                 axes[k][j].set_title('{} at t=Tn-{}'.format(k,j))
                 axes[k][j].set_xlim(0, v.shape[1]-1)
                 axes[k][j].set_ylim(0, v.shape[0]-1)
-                img = axes[k][j].imshow(self.normalize(v), cmap=cmap, 
+                img = axes[k][j].imshow(self.normalize(v), cmap=cmap,
                         vmin=0.0, vmax=1.0, interpolation='bilinear')
                 imgs.setdefault(k,[]).append(img)
 
@@ -50,7 +50,7 @@ class ImshowDebugger(object):
 
         self.cl_queues = []
         self.running = True
-        
+
         mng = plt.get_current_fig_manager()
         #mng.window.maximize()
 
@@ -83,14 +83,14 @@ class ImshowDebugger(object):
             queue.finish()
         imgs = self.imgs
         ntimes = self.ntimes
-        for (k,data) in self.data.iteritems():
+        for (k,data) in self.data.items():
             data = self.get_data(data)
-            for j in xrange(ntimes-1,0,-1): 
+            for j in range(ntimes-1,0,-1):
                 imgs[k][j].set_data(imgs[k][j-1].get_array())
             imgs[k][0].set_array(self.normalize(data))
         self.fig.canvas.draw()
         plt.pause(0.01)
-    
+
     def _break(self, msg=None, nostack=False):
         if not self.running:
             return
diff --git a/hysop/tools/decorators.py b/hysop/tools/decorators.py
index b52f76d1d73cb16547ffef3a74a7a0f78cdf93f3..7dd331ea245979f2fc01b8fbd54e2b66c66ea37e 100644
--- a/hysop/tools/decorators.py
+++ b/hysop/tools/decorators.py
@@ -1,9 +1,9 @@
 
-import functools
+import functools, inspect, types, warnings, sys, traceback
 from functools import wraps as __wraps
 from abc import ABCMeta
+
 from hysop.constants import __DEBUG__, __VERBOSE__, __PROFILE__, HYSOP_ROOT
-from hysop.deps import inspect, types, warnings, sys, traceback
 from hysop.tools.sys_utils import SysUtils
 from hysop.tools.warning import HysopDeprecationWarning
 
@@ -64,20 +64,20 @@ def debug(f):
                 cls = cls.__name__
                 if hasattr(args[0], 'name'):
                     cls+='({})'.format(args[0].name)
-            print '{}{}{}{}{}()'.format(
-            '{}:'.format(fw.__code__.co_filename.replace(HYSOP_ROOT, 'hysop')) if verbose else '',
-            '{} '.format(fw.__code__.co_firstlineno) if verbose else '', 
-             '>'*debug.call_depth if not verbose else '',
-             '{}::'.format(cls)   if (cls is not None) else '',
-             fw.__name__),
+            print('{}{}{}{}{}()'.format(
+                '{}:'.format(fw.__code__.co_filename.replace(HYSOP_ROOT, 'hysop')) if verbose else '',
+                '{} '.format(fw.__code__.co_firstlineno) if verbose else '',
+                '>'*debug.call_depth if not verbose else '',
+                '{}::'.format(cls)   if (cls is not None) else '',
+                fw.__name__))
             if 'name' in kw:
-                print 'with name {}.'.format(kw['name']),
+                print('with name {}.'.format(kw['name']))
 
-            if f.__name__ is '__new__':
+            if f.__name__ == '__new__':
                 fullclassname = args[0].__mro__[0].__module__ + '.'
                 fullclassname += args[0].__mro__[0].__name__
-                print '=> {}'.format(fullclassname),
-            print
+                print('=> {}'.format(fullclassname))
+            print()
 
             # Calling f
             ret = f(*args, **kw)
@@ -86,21 +86,21 @@ def debug(f):
                 # ret = f(*args, **kw)
             # except Exception as e:
                 # fn='{}{}{}{}()'.format(
-                                # '{}:'.format(f.__code__.co_filename.replace(HYSOP_ROOT, 'hysop')), 
+                                # '{}:'.format(f.__code__.co_filename.replace(HYSOP_ROOT, 'hysop')),
                                 # '{}'.format(f.__code__.co_firstlineno),
                                 # '::{}.'.format(cls) if (cls is not None) else '::',
                                 # f.__name__)
                 # if not hasattr(e, 'debug_error'):
                     # msg = '\nFATAL ERROR: Failed to call {}:'.format(fn)
-                    # print msg
-                    # print 'got exception:'
+                    # print(msg)
+                    # print('got exception:')
                     # exc_type, exc_value, exc_traceback = sys.exc_info()
                     # traceback.print_exception(exc_type, exc_value, exc_traceback, limit=3)
                     # print
-                    # print 'DEBUG CALLSTACK IS:'
+                    # print('DEBUG CALLSTACK IS:')
                 # msg = '  >{} with len(*args)={} and **kwds={}.'
                 # msg=msg.format(fn, len(args), kw.keys())
-                # print msg
+                # print(msg)
                 # e.debug_error = True
                 # raise e
 
@@ -143,14 +143,14 @@ def deprecated(f):
                 cls = args[0].__class__.__name__
             elif  hasattr(args[0], '__name__'):
                 cls = args[0].__name__
-        
+
         msg = 'Use of deprecated function {}{}{}{}()'.format(
-                                    '{}:'.format(f.__code__.co_filename.replace(HYSOP_ROOT, 'hysop')), 
+                                    '{}:'.format(f.__code__.co_filename.replace(HYSOP_ROOT, 'hysop')),
                                     '{}'.format(f.__code__.co_firstlineno),
                                     '::{}.'.format(cls) if (cls is not None) else '::',
                                     f.__name__),
         warnings.warn(msg, HysopDeprecationWarning)
-            
+
         return f(*args, **kwargs)
     return func
 
@@ -179,7 +179,7 @@ def not_implemented(f):
     fname = f.__name__
     @wraps(f)
     def wrapper(*args, **kargs):
-        args = list(args) + kargs.values()
+        args = list(args) + list(kargs.values())
         if len(args)>0:
             arg0  = args[0]
             if isinstance(arg0,type): # @classmethod
diff --git a/hysop/tools/enum.py b/hysop/tools/enum.py
index bf00ef36c0e0d7ce2041ac548ae3673d7d22bf37..e5aed8ad1d44625abe886548302d001a6ae11e20 100644
--- a/hysop/tools/enum.py
+++ b/hysop/tools/enum.py
@@ -3,9 +3,9 @@
 """
 import numpy as np
 import keyword, re
-    
+
 registered_enums = {}
-    
+
 class _EnumInstanceGenerator(object):
     # pickle instance generator
     def __call__(self, enum_name, enum_field):
@@ -23,14 +23,14 @@ class EnumFactory(object):
     """
     Class with utilities to create enums.
     """
-    
+
     class MetaEnum(type):
         pass
-    
+
     @staticmethod
     def create(name, fields, dtype=np.int32, base_cls=object):
         """Create a static enum with corresponding dtype.
-        
+
         Parameters
         -----------
         name : str
@@ -50,30 +50,30 @@ class EnumFactory(object):
         Examples
         --------
         fields = {'X':0, 'Y':1, 'Z':42}
-        TestEnum = EnumFactory.create('Test', fields) 
-    
+        TestEnum = EnumFactory.create('Test', fields)
+
         X = TestEnum()
         Y = TestEnum('Y')
         Z = TestEnum.Z
-        
-        print TestEnum.dtype
-        print TestEnum.fields()
-        print TestEnum['X'], TestEnum['Y'], TestEnum['Z']
-        
-        print TestEnum.rfields()
-        print TestEnum[0], TestEnum[1], TestEnum[42]
-        print
-        print TestEnum.X, TestEnum.Y, TestEnum.Z
-        print X, Y, Z
-        print
-        print X.__class__.__name__
-        print X.value(), X()
-        print X.svalue(), str(X)
-        print repr(X)
+
+        print(TestEnum.dtype)
+        print(TestEnum.fields())
+        print(TestEnum['X'], TestEnum['Y'], TestEnum['Z'])
+
+        print(TestEnum.rfields())
+        print(TestEnum[0], TestEnum[1], TestEnum[42])
+        print()
+        print(TestEnum.X, TestEnum.Y, TestEnum.Z)
+        print(X, Y, Z)
+        print()
+        print(X.__class__.__name__)
+        print(X.value(), X())
+        print(X.svalue(), str(X))
+        print(repr(X))
 
         See Also
         -------
-        Generate corresponding enum C or C++ code with 
+        Generate corresponding enum C or C++ code with
         `hysop.backend.device.codegen.base.enum_codegen.EnumCodeGenerator`.
 
         """
@@ -91,7 +91,7 @@ class EnumFactory(object):
             if len(set(fields)) != len(fields):
                 msg = 'Field names collision: {}.'.format(fields)
                 raise ValueError(msg)
-            fields = dict(zip(fields, xrange(len(fields))))
+            fields = dict(zip(fields, range(len(fields))))
         else:
             msg='fields have to be of type list,set,tuple or dict but got {}.'
             msg=msg.format(fields.__class__.__name__)
@@ -104,7 +104,7 @@ class EnumFactory(object):
                 msg+='\n\tregistered enum: {}\n\tnew values: {}'
                 msg=msg.format(name, enum.fields().keys(), fields.keys())
                 raise ValueError(msg)
-            elif any([ fields[k] != v for (k,v) in enum.fields().iteritems()]):
+            elif any([ fields[k] != v for (k,v) in enum.fields().items()]):
                 msg='Enum \'{}\' was already created with different values:'
                 msg+='\n\tregistered enum: {}\n\tnew values: {}'
                 msg=msg.format(name, enum.fields(), fields)
@@ -131,14 +131,14 @@ class EnumFactory(object):
                 msg=msg.format(k)
                 raise ValueError(msg)
 
-        fields  = dict(zip(fields.keys(), np.asarray(fields.values()).astype(dtype)))
+        fields  = dict(zip(fields.keys(), np.asarray(tuple(fields.values())).astype(dtype)))
         rfields = dict(zip(fields.values(), fields.keys()))
 
         def __fields(cls):
             return cls._fields
         def __rfields(cls):
             return cls._rfields
-        
+
         def __getitem__(cls,val):
             if isinstance(val,str) and (val in cls._fields.keys()):
                 return cls._fields[val]
@@ -150,7 +150,7 @@ class EnumFactory(object):
             return name
         def __repr__(cls):
             return name
-       
+
         def __value(cls,field):
             if field in cls._fields.keys():
                 return cls.dtype(cls._fields[val])
@@ -176,9 +176,9 @@ class EnumFactory(object):
                     CodegenVariable, CodegenArray
             assert vals is not None
             size = len(vals)
-            value  = [getattr(cls,cls.svalue(v)) if isinstance(v, (int,long)) 
+            value  = [getattr(cls,cls.svalue(v)) if isinstance(v, int)
                     else v for v in vals]
-            svalue = [cls.svalue(v) if isinstance(v, (int,long)) else str(v) for v in vals]
+            svalue = [cls.svalue(v) if isinstance(v, int) else str(v) for v in vals]
             if len(vals)==1:
                 return CodegenVariable(name=name,typegen=typegen,ctype=dtype_to_ctype(cls.dtype),
                         value=value[0],svalue=svalue[0],**kwds)
@@ -187,10 +187,10 @@ class EnumFactory(object):
                         value=value,svalue=svalue,dim=1,**kwds)
 
         mcls_dic = {'name':name,
-                    'dtype':dtype, 
+                    'dtype':dtype,
 
-                    '_fields':fields, 
-                    '_rfields':rfields, 
+                    '_fields':fields,
+                    '_rfields':rfields,
 
                     'fields':__fields,
                     'rfields':__rfields,
@@ -204,14 +204,13 @@ class EnumFactory(object):
                     '__str__':__str__,
                     '__repr__':__repr__}
         mcls = type(name+'MetaEnum', (EnumFactory.MetaEnum,), mcls_dic)
-        
-        class Enum(base_cls):
-            __metaclass__=mcls
-            def __init__(self, field=sorted(fields.keys())[0]):
+
+        class Enum(base_cls, metaclass=mcls):
+            def __init__(self, field=tuple(sorted(fields.keys()))[0]):
                 assert isinstance(field, str) and len(field)>0
                 self._field = field
                 self._value = self.__class__.dtype(self.__class__._fields[field])
-            
+
             def svalue(self):
                 return self._field
             def value(self):
@@ -241,14 +240,14 @@ class EnumFactory(object):
 
             def __hash__(self):
                 return hash(self._field)
-                
+
             # pickling
             def __reduce__(self):
                 return (_EnumInstanceGenerator(), (name, self._field))
-        
+
         generated_enum = type(name+'Enum', (Enum,), {})
         _all = []
-        for k,v in fields.iteritems():
+        for k,v in fields.items():
             instance = generated_enum(field=k)
             setattr(mcls, k, instance)
             _all.append(instance)
@@ -260,29 +259,29 @@ class EnumFactory(object):
 
 if __name__ == '__main__':
     fields = {'X':0,'Y':1,'Z':42}
-    TestEnum = EnumFactory.create('Test', fields) 
+    TestEnum = EnumFactory.create('Test', fields)
 
     X = TestEnum()
     Y = TestEnum('Y')
     Z = TestEnum.Z
 
-    print type(TestEnum)
-    print TestEnum.__class__
-    print TestEnum.__class__.__name__
-    print
-    print TestEnum.fields()
-    print TestEnum['X'], TestEnum['Y'], TestEnum['Z']
-    print 
-    print TestEnum.rfields()
-    print TestEnum[0], TestEnum[1], TestEnum[42]
-    print
-    print TestEnum.X, TestEnum.Y, TestEnum.Z
-    print X, Y, Z
-    print
-    print X.__class__.__name__
-    print X.value(), X()
-    print X.svalue(), str(X)
-    print repr(X), repr(Y), repr(Z)
-    print
-    print TestEnum.dtype, type(X.value())
+    print(type(TestEnum))
+    print(TestEnum.__class__)
+    print(TestEnum.__class__.__name__)
+    print()
+    print(TestEnum.fields())
+    print(TestEnum['X'], TestEnum['Y'], TestEnum['Z'])
+    print()
+    print(TestEnum.rfields())
+    print(TestEnum[0], TestEnum[1], TestEnum[42])
+    print()
+    print(TestEnum.X, TestEnum.Y, TestEnum.Z)
+    print(X, Y, Z)
+    print()
+    print(X.__class__.__name__)
+    print(X.value(), X())
+    print(X.svalue(), str(X))
+    print(repr(X), repr(Y), repr(Z))
+    print()
+    print(TestEnum.dtype, type(X.value()))
 
diff --git a/hysop/tools/field_utils.py b/hysop/tools/field_utils.py
index 2722b0bac971b0eaf8568f203a2f7c6e3506ef68..042a67150aa0047bd90d7a113942802295f48cc6 100644
--- a/hysop/tools/field_utils.py
+++ b/hysop/tools/field_utils.py
@@ -1,37 +1,45 @@
+# coding: utf-8
+
 from hysop.tools.types import first_not_None, to_tuple
 from hysop.tools.sympy_utils import nabla, partial, subscript, subscripts, \
-                                    exponent, exponents, xsymbol, get_derivative_variables
+    exponent, exponents, xsymbol, get_derivative_variables
 
+import sympy as sm
 from sympy.printing.str import StrPrinter, StrReprPrinter
-from sympy.printing.ccode import C99CodePrinter
 from sympy.printing.latex import LatexPrinter
+from packaging import version
+if version.parse(sm.__version__) > version.parse("1.7"):
+    from sympy.printing.c import C99CodePrinter
+else:
+    from sympy.printing.ccode import C99CodePrinter
+
 
 class BasePrinter(object):
     def print_Derivative(self, expr):
         (bvar, pvar, vvar, lvar) = print_all_names(expr.args[0])
-        pvar = pvar.decode('utf-8')
+        pvar = pvar
         all_xvars = get_derivative_variables(expr)
-        xvars   = tuple(set(all_xvars))
-        varpows = tuple(all_xvars.count(x) for x in xvars) 
-        bxvars  = tuple(print_name(x) for x in xvars)
-        pxvars  = tuple(print_pretty_name(x).decode('utf-8') for x in xvars)
-        vxvars  = tuple(print_var_name(x) for x in xvars)
-        lxvars  = tuple(print_latex_name(x) for x in xvars)
+        xvars = tuple(set(all_xvars))
+        varpows = tuple(all_xvars.count(x) for x in xvars)
+        bxvars = tuple(print_name(x) for x in xvars)
+        pxvars = tuple(print_pretty_name(x) for x in xvars)
+        vxvars = tuple(print_var_name(x) for x in xvars)
+        lxvars = tuple(print_latex_name(x) for x in xvars)
         return DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar,
                                                      bxvars, pxvars, vxvars, lxvars,
                                                      varpows=varpows)
-        
+
     def _print(self, expr, **kwds):
         try:
             return super(BasePrinter, self)._print(expr, **kwds)
         except:
             print
-            msg='FATAL ERROR: {} failed to print expression {}.'
-            msg=msg.format(type(self).__name__, expr)
-            print msg
+            msg = 'FATAL ERROR: {} failed to print expression {}.'
+            msg = msg.format(type(self).__name__, expr)
+            print(msg)
             print
             raise
-   
+
 
 class NamePrinter(BasePrinter, StrReprPrinter):
     def _print(self, expr, **kwds):
@@ -40,18 +48,22 @@ class NamePrinter(BasePrinter, StrReprPrinter):
         elif hasattr(expr, '_name'):
             return expr._name
         return super(NamePrinter, self)._print(expr, **kwds)
+
     def _print_Derivative(self, expr):
         return super(NamePrinter, self).print_Derivative(expr)[0]
+
     def _print_Add(self, expr):
         return super(NamePrinter, self)._print_Add(expr).replace(' ', '')
+
     def _print_Mul(self, expr):
         return super(NamePrinter, self)._print_Mul(expr).replace(' ', '')
+
     def emptyPrinter(self, expr):
-        msg='\n{} does not implement _print_{}(self, expr).'
-        msg+='\nExpression is {}.'.format(expr)
-        msg+='\nExpression type MRO is:'
-        msg+='\n  *'+'\n  *'.join(t.__name__ for t in type(expr).__mro__)
-        msg=msg.format(self.__class__.__name__, expr.__class__.__name__)
+        msg = '\n{} does not implement _print_{}(self, expr).'
+        msg += '\nExpression is {}.'.format(expr)
+        msg += '\nExpression type MRO is:'
+        msg += '\n  *'+'\n  *'.join(t.__name__ for t in type(expr).__mro__)
+        msg = msg.format(self.__class__.__name__, expr.__class__.__name__)
         raise NotImplementedError(msg)
 
 
@@ -62,14 +74,16 @@ class PrettyNamePrinter(BasePrinter, StrPrinter):
         elif hasattr(expr, '_pretty_name'):
             return expr._pretty_name
         return super(PrettyNamePrinter, self)._print(expr, **kwds)
+
     def _print_Derivative(self, expr):
         return super(PrettyNamePrinter, self).print_Derivative(expr)[1]
+
     def emptyPrinter(self, expr):
-        msg='\n{} does not implement _print_{}(self, expr).'
-        msg+='\nExpression is {}.'.format(expr)
-        msg+='\nExpression type MRO is:'
-        msg+='\n  *'+'\n  *'.join(t.__name__ for t in type(expr).__mro__)
-        msg=msg.format(self.__class__.__name__, expr.__class__.__name__)
+        msg = '\n{} does not implement _print_{}(self, expr).'
+        msg += '\nExpression is {}.'.format(expr)
+        msg += '\nExpression type MRO is:'
+        msg += '\n  *'+'\n  *'.join(t.__name__ for t in type(expr).__mro__)
+        msg = msg.format(self.__class__.__name__, expr.__class__.__name__)
         raise NotImplementedError(msg)
 
 
@@ -80,23 +94,27 @@ class VarNamePrinter(BasePrinter, C99CodePrinter):
         elif hasattr(expr, '_var_name'):
             return expr._var_name
         return super(VarNamePrinter, self)._print(expr, **kwds).replace(' ', '')
+
     def _print_Derivative(self, expr):
         return super(VarNamePrinter, self).print_Derivative(expr)[2]
+
     def _print_Add(self, expr):
         s = super(VarNamePrinter, self)._print_Add(expr)
         s = s.replace(' + ', '_plus_').replace(' - ', '_minus_')
         s = s.replace('+', 'plus_').replace('-', 'minus_')
         return s
+
     def _print_Mul(self, expr):
         s = super(VarNamePrinter, self)._print_Mul(expr)
         s = s.replace(' * ', '_times_').replace('+', 'plus_').replace('-', 'minus_')
         return s
+
     def emptyPrinter(self, expr):
-        msg='\n{} does not implement _print_{}(self, expr).'
-        msg+='\nExpression is {}.'.format(expr)
-        msg+='\nExpression type MRO is:'
-        msg+='\n  *'+'\n  *'.join(t.__name__ for t in type(expr).__mro__)
-        msg=msg.format(self.__class__.__name__, expr.__class__.__name__)
+        msg = '\n{} does not implement _print_{}(self, expr).'
+        msg += '\nExpression is {}.'.format(expr)
+        msg += '\nExpression type MRO is:'
+        msg += '\n  *'+'\n  *'.join(t.__name__ for t in type(expr).__mro__)
+        msg = msg.format(self.__class__.__name__, expr.__class__.__name__)
         raise NotImplementedError(msg)
 
 
@@ -107,35 +125,44 @@ class LatexNamePrinter(BasePrinter, LatexPrinter):
         elif hasattr(expr, '_latex_name'):
             return expr._latex_name
         return super(LatexNamePrinter, self)._print(expr, **kwds)
+
     def _print_Derivative(self, expr):
         return super(LatexNamePrinter, self).print_Derivative(expr)[3]
+
     def _print_int(self, expr):
         return str(expr)
+
     def emptyPrinter(self, expr):
-        msg='\n{} does not implement _print_{}(self, expr).'
-        msg+='\nExpression is {}.'.format(expr)
-        msg+='\nExpression type MRO is:'
-        msg+='\n  *'+'\n  *'.join(t.__name__ for t in type(expr).__mro__)
-        msg=msg.format(self.__class__.__name__, expr.__class__.__name__)
+        msg = '\n{} does not implement _print_{}(self, expr).'
+        msg += '\nExpression is {}.'.format(expr)
+        msg += '\nExpression type MRO is:'
+        msg += '\n  *'+'\n  *'.join(t.__name__ for t in type(expr).__mro__)
+        msg = msg.format(self.__class__.__name__, expr.__class__.__name__)
         raise NotImplementedError(msg)
 
+
 pbn = NamePrinter()
 ppn = PrettyNamePrinter()
 #pvn = VarNamePrinter()
 pln = LatexNamePrinter()
 
+
 def print_name(expr):
     return pbn.doprint(expr)
 
+
 def print_pretty_name(expr):
     return ppn.doprint(expr)
 
+
 def print_var_name(expr):
     return VarNamePrinter().doprint(expr)
 
+
 def print_latex_name(expr):
     return pln.doprint(expr)
 
+
 def print_all_names(expr):
     name = print_name(expr)
     pretty_name = print_pretty_name(expr)
@@ -145,50 +172,94 @@ def print_all_names(expr):
 
 
 def to_str(*args):
-    if len(args)==1:
-        args=to_tuple(args[0])
+    if len(args) == 1:
+        args = to_tuple(args[0])
+
     def _to_str(x):
-        if isinstance(x, unicode):
-            return x.encode('utf-8')
-        else:
-            return str(x)
+        return str(x)
     return tuple(_to_str(y) for y in args)
 
+
 # exponents formatting functions
-bexp_fn = lambda x: '^{}'.format(x) if (x>1) else ''
-pexp_fn = lambda x, sep=',': exponents(x, sep=sep) if (x>1) else u''
-vexp_fn = lambda x: 'e{}'.format(x) if (x>1) else ''
-lexp_fn = lambda x: '^<LBRACKET>{}<RBRACKET>'.format(x) if (x>1) else ''
+def bexp_fn(x): return '^{}'.format(x) if (x > 1) else ''
+
+
+pexp_fn = lambda x, sep=',': exponents(x, sep=sep) if (x > 1) else ''
+
+
+def vexp_fn(x): return 'e{}'.format(x) if (x > 1) else ''
+
+
+def lexp_fn(x): return '^<LBRACKET>{}<RBRACKET>'.format(x) if (x > 1) else ''
 
 # powers formatting functions
-bpow_fn = lambda x: '**{}'.format(x) if (x>1) else ''
-ppow_fn = lambda x, sep=',': exponents(x,sep=sep) if (x>1) else u''
-vpow_fn = lambda x: 'p{}'.format(x) if (x>1) else ''
-lpow_fn = lambda x: '^<LBRACKET>{}<RBRACKET>'.format(x) if (x>1) else ''
+
+
+def bpow_fn(x): return '**{}'.format(x) if (x > 1) else ''
+
+
+ppow_fn = lambda x, sep=',': exponents(x, sep=sep) if (x > 1) else ''
+
+
+def vpow_fn(x): return 'p{}'.format(x) if (x > 1) else ''
+
+
+def lpow_fn(x): return '^<LBRACKET>{}<RBRACKET>'.format(x) if (x > 1) else ''
 
 # subcripts formatting functions
-bsub_fn = lambda x: '_{}'.format(x) if (x is not None) else ''
-psub_fn = lambda x, sep=',': subscripts(x,sep=sep) if (x is not None) else u''
-vsub_fn = lambda x: 's{}'.format(x) if (x is not None) else ''
-lsub_fn = lambda x: '_<LBRACKET>{}<RBRACKET>'.format(x) if (x is not None) else ''
+
+
+def bsub_fn(x): return '_{}'.format(x) if (x is not None) else ''
+
+
+psub_fn = lambda x, sep=',': subscripts(x, sep=sep) if (x is not None) else ''
+
+
+def vsub_fn(x): return 's{}'.format(x) if (x is not None) else ''
+
+
+def lsub_fn(x): return '_<LBRACKET>{}<RBRACKET>'.format(x) if (x is not None) else ''
 
 # components formatting functions
-bcomp_fn = lambda x: ','.join(to_str(x)) if (x is not None) else ''
-pcomp_fn = lambda x, sep=',': subscripts(x,sep=sep) if (x is not None) else u''
-vcomp_fn = lambda x: '_'+'_'.join(to_str(x)) if (x is not None) else ''
-lcomp_fn = lambda x: '_<LBRACKET>{}<RBRACKET>'.format(','.join(to_str(x))) if (x is not None) else ''
+
+
+def bcomp_fn(x): return ','.join(to_str(x)) if (x is not None) else ''
+
+
+pcomp_fn = lambda x, sep=',': subscripts(x, sep=sep) if (x is not None) else ''
+
+
+def vcomp_fn(x): return '_'+'_'.join(to_str(x)) if (x is not None) else ''
+
+
+def lcomp_fn(x): return '_<LBRACKET>{}<RBRACKET>'.format(','.join(to_str(x))) if (x is not None) else ''
 
 # join formatting functions
-bjoin_fn = lambda x: '_'.join(to_str(x)) if (x is not None) else ''
-pjoin_fn = lambda x: ''.join(to_str(x)) if (x is not None) else u''
-vjoin_fn = lambda x: '_'.join(to_str(x)) if (x is not None) else ''
-ljoin_fn = lambda x: ''.join(to_str(x)) if (x is not None) else ''
+
+
+def bjoin_fn(x): return '_'.join(to_str(x)) if (x is not None) else ''
+
+
+def pjoin_fn(x): return ''.join(to_str(x)) if (x is not None) else ''
+
+
+def vjoin_fn(x): return '_'.join(to_str(x)) if (x is not None) else ''
+
+
+def ljoin_fn(x): return ''.join(to_str(x)) if (x is not None) else ''
+
 
 # divide formatting functions
-bdivide_fn = lambda x,y: '{}/{}'.format(x,y)
-pdivide_fn = lambda x,y: '{}/{}'.format(*to_str(x,y))
-vdivide_fn = lambda x,y: '{}__{}'.format(x,y)
-ldivide_fn = lambda x,y: '\dfrac<LBRACKET>{}<RBRACKET><LBRACKET>{}<RBRACKET>'.format(x,y)
+def bdivide_fn(x, y): return '{}/{}'.format(x, y)
+
+
+def pdivide_fn(x, y): return '{}/{}'.format(*to_str(x, y))
+
+
+def vdivide_fn(x, y): return '{}__{}'.format(x, y)
+
+
+def ldivide_fn(x, y): return r'\dfrac<LBRACKET>{}<RBRACKET><LBRACKET>{}<RBRACKET>'.format(x, y)
 
 
 class DifferentialStringFormatter(object):
@@ -200,7 +271,7 @@ class DifferentialStringFormatter(object):
         *A pretty string in utf-8 (pretty_name).
         *A variable name that can be used as a valid C identifier for code generation (var_name).
         *A latex string that can be compiled and displayed with latex (latex_name).
-    
+
     Prefix used for methods:
         b = name
         p = pretty_name
@@ -210,11 +281,11 @@ class DifferentialStringFormatter(object):
     See __main__ at the bottom of this file for usage.
     """
 
-    exp_fns    = (bexp_fn,  pexp_fn,  vexp_fn,  lexp_fn)
-    pow_fns    = (bpow_fn,  ppow_fn,  vpow_fn,  lpow_fn)
-    sub_fns    = (bsub_fn,  psub_fn,  vsub_fn,  lsub_fn)
-    comp_fns   = (bcomp_fn, pcomp_fn, vcomp_fn, lcomp_fn)
-    join_fns   = (bjoin_fn, pjoin_fn, vjoin_fn, ljoin_fn)
+    exp_fns = (bexp_fn,  pexp_fn,  vexp_fn,  lexp_fn)
+    pow_fns = (bpow_fn,  ppow_fn,  vpow_fn,  lpow_fn)
+    sub_fns = (bsub_fn,  psub_fn,  vsub_fn,  lsub_fn)
+    comp_fns = (bcomp_fn, pcomp_fn, vcomp_fn, lcomp_fn)
+    join_fns = (bjoin_fn, pjoin_fn, vjoin_fn, ljoin_fn)
     divide_fns = (bdivide_fn, pdivide_fn, vdivide_fn, ldivide_fn)
 
     @staticmethod
@@ -223,18 +294,16 @@ class DifferentialStringFormatter(object):
             '<LBRACKET>': '{',
             '<RBRACKET>': '}',
         }
-        for (k,v) in special_characters.iteritems():
-            ss = ss.replace(k,v)
-        if isinstance(ss, unicode):
-            ss = ss.encode('utf-8')
+        for (k, v) in special_characters.items():
+            ss = ss.replace(k, v)
         return ss
 
     @classmethod
     def return_names(cls, *args, **kwds):
         # fsc = format special characters
-        fsc=kwds.get('fsc', True)
-        assert len(args)>=1
-        if len(args)==1:
+        fsc = kwds.get('fsc', True)
+        assert len(args) >= 1
+        if len(args) == 1:
             if fsc:
                 return args[0]
             else:
@@ -247,27 +316,27 @@ class DifferentialStringFormatter(object):
 
     @classmethod
     def format_partial_name(cls, bvar, pvar, vvar, lvar,
-                            bpow_fn=bpow_fn, ppow_fn=ppow_fn, vpow_fn=vpow_fn, lpow_fn=lpow_fn,    
-                            bcomp_fn=bcomp_fn, pcomp_fn=pcomp_fn, vcomp_fn=vcomp_fn, lcomp_fn=lcomp_fn,    
-                            blp='(', plp='', vlp='', llp='', 
-                            brp=')', prp='', vrp='',  lrp='', 
-                            bd='d', pd=partial, vd='d', ld='<LBRACKET>\partial<RBRACKET>', 
-                            dpow=1, varpow=1, components=None, 
+                            bpow_fn=bpow_fn, ppow_fn=ppow_fn, vpow_fn=vpow_fn, lpow_fn=lpow_fn,
+                            bcomp_fn=bcomp_fn, pcomp_fn=pcomp_fn, vcomp_fn=vcomp_fn, lcomp_fn=lcomp_fn,
+                            blp='(', plp='', vlp='', llp='',
+                            brp=')', prp='', vrp='',  lrp='',
+                            bd='d', pd=partial, vd='d', ld=r'<LBRACKET>\partial<RBRACKET>',
+                            dpow=1, varpow=1, components=None,
                             trigp=3, fsc=True):
         assert (varpow != 0)
-        bd  = '' if (dpow==0) else bd
-        pd  = '' if (dpow==0) else pd
-        vd  = '' if (dpow==0) else vd
-        ld  = '' if (dpow==0) else ld
-        blp = '' if len(bvar) <= trigp else blp 
-        brp = '' if len(bvar) <= trigp else brp 
-        plp = '' if len(pvar) <= trigp else plp 
-        prp = '' if len(pvar) <= trigp else prp 
-        vlp = '' if len(vvar) <= trigp else vlp 
-        vrp = '' if len(vvar) <= trigp else vrp 
-        llp = '' if len(lvar) <= trigp else llp 
-        lrp = '' if len(lvar) <= trigp else lrp 
-        template=u'{d}{dpow}{lp}{var}{components}{rp}{varpow}' 
+        bd = '' if (dpow == 0) else bd
+        pd = '' if (dpow == 0) else pd
+        vd = '' if (dpow == 0) else vd
+        ld = '' if (dpow == 0) else ld
+        blp = '' if len(bvar) <= trigp else blp
+        brp = '' if len(bvar) <= trigp else brp
+        plp = '' if len(pvar) <= trigp else plp
+        prp = '' if len(pvar) <= trigp else prp
+        vlp = '' if len(vvar) <= trigp else vlp
+        vrp = '' if len(vvar) <= trigp else vrp
+        llp = '' if len(lvar) <= trigp else llp
+        lrp = '' if len(lvar) <= trigp else lrp
+        template = '{d}{dpow}{lp}{var}{components}{rp}{varpow}'
         bname = template.format(d=bd, dpow=bpow_fn(dpow),
                                 components=bcomp_fn(components),
                                 var=bvar, varpow=bpow_fn(varpow),
@@ -287,53 +356,53 @@ class DifferentialStringFormatter(object):
         return cls.return_names(bname, pname, vname, lname, fsc=fsc)
 
     @classmethod
-    def format_partial_names(cls, bvars, pvars, vvars, lvars, varpows, 
-                            bjoin_fn=bjoin_fn, pjoin_fn=pjoin_fn, vjoin_fn=vjoin_fn, ljoin_fn=ljoin_fn, 
-                            components=None, fsc=True, **kwds):
+    def format_partial_names(cls, bvars, pvars, vvars, lvars, varpows,
+                             bjoin_fn=bjoin_fn, pjoin_fn=pjoin_fn, vjoin_fn=vjoin_fn, ljoin_fn=ljoin_fn,
+                             components=None, fsc=True, **kwds):
         bvars, pvars, vvars, lvars = to_tuple(bvars), to_tuple(pvars), to_tuple(vvars), to_tuple(lvars)
         varpows = to_tuple(varpows)
-        assert len(bvars)==len(pvars)==len(vvars)==len(lvars)==len(varpows)
-        assert any(v>0 for v in varpows)
+        assert len(bvars) == len(pvars) == len(vvars) == len(lvars) == len(varpows)
+        assert any(v > 0 for v in varpows)
         nvars = len(bvars)
         if (components is not None):
             components = to_tuple(components)
-            assert len(components)==nvars
+            assert len(components) == nvars
         else:
             components = (None,)*nvars
 
         bnames, pnames, vnames, lnames = (), (), (), ()
         for (bvar, pvar, vvar, lvar, varpow, component) in \
                 zip(bvars, pvars, vvars, lvars, varpows, components):
-            if (varpow==0):
+            if (varpow == 0):
                 continue
             res = cls.format_partial_name(bvar=bvar, pvar=pvar, vvar=vvar, lvar=lvar,
-                                          varpow=varpow, components=component, 
+                                          varpow=varpow, components=component,
                                           fsc=False, **kwds)
-            assert len(res)==4
+            assert len(res) == 4
             bnames += (res[0],)
             pnames += (res[1],)
             vnames += (res[2],)
             lnames += (res[3],)
-        return cls.return_names(bjoin_fn(bnames), pjoin_fn(pnames), 
+        return cls.return_names(bjoin_fn(bnames), pjoin_fn(pnames),
                                 vjoin_fn(vnames), ljoin_fn(lnames), fsc=fsc)
 
     @classmethod
     def format_pd(cls, bvar, pvar, vvar, lvar,
-                       bxvars='x', pxvars=xsymbol, vxvars='x', lxvars='x', 
-                       varpows=1, var_components=None, xvars_components=None,
-                       bdivide_fn=bdivide_fn, pdivide_fn=pdivide_fn, vdivide_fn=vdivide_fn, ldivide_fn=ldivide_fn, 
-                       fsc=True, **kwds):
+                  bxvars='x', pxvars=xsymbol, vxvars='x', lxvars='x',
+                  varpows=1, var_components=None, xvars_components=None,
+                  bdivide_fn=bdivide_fn, pdivide_fn=pdivide_fn, vdivide_fn=vdivide_fn, ldivide_fn=ldivide_fn,
+                  fsc=True, **kwds):
 
         for k in ('dpow', 'components', 'bvars', 'pvars', 'vvars', 'lvars', 'varpow'):
             assert k not in kwds, 'Cannot specify reserved keyword {}.'.format(k)
 
         bxvars, pxvars, vxvars, lxvars = to_tuple(bxvars), to_tuple(pxvars), to_tuple(vxvars), to_tuple(lxvars)
         varpows = to_tuple(varpows)
-        assert len(bxvars)==len(pxvars)==len(vxvars)==len(lxvars)==len(varpows)
-        assert any(v>0 for v in varpows)
+        assert len(bxvars) == len(pxvars) == len(vxvars) == len(lxvars) == len(varpows)
+        assert any(v > 0 for v in varpows)
         dpow = sum(varpows)
 
-        numerator = cls.format_partial_name(bvar=bvar, pvar=pvar, 
+        numerator = cls.format_partial_name(bvar=bvar, pvar=pvar,
                                             vvar=vvar, lvar=lvar,
                                             fsc=False, dpow=dpow,
                                             components=var_components,
@@ -345,25 +414,25 @@ class DifferentialStringFormatter(object):
                                                components=xvars_components,
                                                **kwds)
 
-        return cls.return_names(bdivide_fn(numerator[0], denominator[0]), 
-                                pdivide_fn(numerator[1], denominator[1]), 
-                                vdivide_fn(numerator[2], denominator[2]), 
+        return cls.return_names(bdivide_fn(numerator[0], denominator[0]),
+                                pdivide_fn(numerator[1], denominator[1]),
+                                vdivide_fn(numerator[2], denominator[2]),
                                 ldivide_fn(numerator[3], denominator[3]), fsc=fsc)
-        
+
 
 if __name__ == '__main__':
     def _print(*args, **kwds):
         if isinstance(args[0], tuple):
-            assert len(args)==1
+            assert len(args) == 1
             args = args[0]
         if ('multiline' in kwds) and (kwds['multiline'] is True):
             for a in args:
-                print a
+                print(a)
         else:
-            print (u', '.join(a.decode('utf-8') for a in args)).encode('utf-8')
+            print(', '.join(a for a in args))
 
     print
-    bvar, pvar, vvar, lvar = 'Fext', u'F\u1d49xt', 'Fext', '<LBRACKET>F_<LBRACKET>ext<RBRACKET><RBRACKET>'
+    bvar, pvar, vvar, lvar = 'Fext', 'Fₑₓₜ', 'Fext', '<LBRACKET>F_<LBRACKET>ext<RBRACKET><RBRACKET>'
     _print(DifferentialStringFormatter.return_names(bvar, pvar, vvar, lvar))
 
     print
@@ -371,55 +440,54 @@ if __name__ == '__main__':
     _print(DifferentialStringFormatter.format_partial_name(bvar, pvar, vvar, lvar, dpow=1))
     _print(DifferentialStringFormatter.format_partial_name(bvar, pvar, vvar, lvar, dpow=2))
     _print(DifferentialStringFormatter.format_partial_name(bvar, pvar, vvar, lvar, dpow=3, components=0))
-    _print(DifferentialStringFormatter.format_partial_name(bvar, pvar, vvar, lvar, dpow=4, components=(0,2)))
-    
+    _print(DifferentialStringFormatter.format_partial_name(bvar, pvar, vvar, lvar, dpow=4, components=(0, 2)))
+
     print
     bvar, pvar, vvar, lvar = ('x',)*4
     _print(DifferentialStringFormatter.format_partial_name(bvar, pvar, vvar, lvar, varpow=1))
     _print(DifferentialStringFormatter.format_partial_name(bvar, pvar, vvar, lvar, varpow=2))
     _print(DifferentialStringFormatter.format_partial_name(bvar, pvar, vvar, lvar, varpow=3, components=0))
-    _print(DifferentialStringFormatter.format_partial_name(bvar, pvar, vvar, lvar, varpow=4, components=(0,2)))
-    
+    _print(DifferentialStringFormatter.format_partial_name(bvar, pvar, vvar, lvar, varpow=4, components=(0, 2)))
+
     print
-    bvar, pvar, vvar, lvar = (('x','y'),)*4
+    bvar, pvar, vvar, lvar = (('x', 'y'),)*4
     try:
-        _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(0,0)))
+        _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(0, 0)))
         raise RuntimeError()
     except AssertionError:
         pass
-    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(0,1)))
-    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(1,0)))
-    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(1,1)))
-    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(1,2)))
-    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(2,2)))
-    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(2,2), components=(0,1)))
-    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(2,2), components=((0,1),(1,0))))
+    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(0, 1)))
+    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(1, 0)))
+    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(1, 1)))
+    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(1, 2)))
+    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(2, 2)))
+    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(2, 2), components=(0, 1)))
+    _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(2, 2), components=((0, 1), (1, 0))))
 
     print
-    bvar, pvar, vvar, lvar = 'Fext', u'F\u1d49xt', 'Fext', '<LBRACKET>F_<LBRACKET>ext<RBRACKET><RBRACKET>'
-    bxvars, pxvars, vxvars, lxvars = (('x','y'),)*4
+    bvar, pvar, vvar, lvar = 'Fext', 'Fₑₓₜ', 'Fext', '<LBRACKET>F_<LBRACKET>ext<RBRACKET><RBRACKET>'
+    bxvars, pxvars, vxvars, lxvars = (('x', 'y'),)*4
     _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar))
     _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar, varpows=2))
-    _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar, 
-                                              bxvars, pxvars, vxvars, lxvars,
-                                              varpows=(1,0)))
-    _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar, 
-                                              bxvars, pxvars, vxvars, lxvars,
-                                              varpows=(0,1)))
-    _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar, 
-                                              bxvars, pxvars, vxvars, lxvars,
-                                              varpows=(1,1)))
-    _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar, 
-                                              bxvars, pxvars, vxvars, lxvars,
-                                              varpows=(5,2)))
-    
+    _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar,
+                                                 bxvars, pxvars, vxvars, lxvars,
+                                                 varpows=(1, 0)))
+    _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar,
+                                                 bxvars, pxvars, vxvars, lxvars,
+                                                 varpows=(0, 1)))
+    _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar,
+                                                 bxvars, pxvars, vxvars, lxvars,
+                                                 varpows=(1, 1)))
+    _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar,
+                                                 bxvars, pxvars, vxvars, lxvars,
+                                                 varpows=(5, 2)))
+
     print
     bxvars, pxvars, vxvars, lxvars = (('x',)*5,)*4
     varpows = (1,)*5
-    xvars_components = range(5)
-    var_components=(0,4,3,2)
-    _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar, 
-                                              bxvars, pxvars, vxvars, lxvars,
-                                              varpows=varpows, xvars_components=xvars_components,
-                                              var_components=var_components), multiline=True)
-
+    xvars_components = tuple(range(5))
+    var_components = (0, 4, 3, 2)
+    _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar,
+                                                 bxvars, pxvars, vxvars, lxvars,
+                                                 varpows=varpows, xvars_components=xvars_components,
+                                                 var_components=var_components), multiline=True)
diff --git a/hysop/tools/handle.py b/hysop/tools/handle.py
index 04d2a7fd8438759e52e9cd100626a06630559c49..48cedf0eca531ae5b81af978def1048251723fae 100644
--- a/hysop/tools/handle.py
+++ b/hysop/tools/handle.py
@@ -1,16 +1,14 @@
-
 from abc import ABCMeta, abstractmethod
-from hysop.deps import np
+import numpy as np
+
 from hysop.tools.decorators import not_implemented, debug
 from hysop.tools.types import to_tuple, first_not_None
 from hysop.tools.sympy_utils import subscript
 from hysop.core.mpi import MPI
 
-class TaggedObjectView(object):
+class TaggedObjectView(object, metaclass=ABCMeta):
     """View on a TaggedObject, just forwards tag and id for views."""
 
-    __metaclass__ = ABCMeta
-    
     @debug
     def __new__(cls, obj_view=None, **kwds):
         obj = super(TaggedObjectView, cls).__new__(cls, **kwds)
@@ -39,7 +37,7 @@ class TaggedObjectView(object):
             return getattr(self, '_TaggedObject__get_object_tag')()
         else:
             return getattr(self.__obj_view, '_TaggedObject__get_object_tag')()
-    
+
     def __get_object_pretty_tag(self):
         """Unique pretty tag of the underlying object view."""
         if (not hasattr(self, '_TaggedObjectView__obj_view')) or (self.__obj_view is None):
@@ -51,7 +49,7 @@ class TaggedObjectView(object):
     def __get_object_full_tag(self):
         """Unique tag of the underlying object view with cls information."""
         return '{}::{}'.format(self.__class__.__name__, self.__get_object_tag())
-    
+
     def __get_object_full_pretty_tag(self):
         """Unique tag of the underlying object view with cls information."""
         return '{}::{}'.format(self.__class__.__name__, self.__get_object_pretty_tag())
@@ -71,12 +69,12 @@ class TaggedObjectView(object):
     @abstractmethod
     def __hash__(self):
          pass
-    
+
     def __repr__(self):
         return self.full_tag
 
 
-class TaggedObject(object):
+class TaggedObject(object, metaclass=ABCMeta):
     """
     Generic class to count object instances and associate a tag to it.
     A tag is basically the id of the object instance formatted to a string.
@@ -84,13 +82,11 @@ class TaggedObject(object):
     object id (for logging or debug purposes).
     """
 
-    __metaclass__ = ABCMeta
-
     # Counter of instances to set a unique id for each object.
     __ids = {}
 
     @debug
-    def __new__(cls, tag_prefix=None, tag_postfix=None, tag_formatter=None, 
+    def __new__(cls, tag_prefix=None, tag_postfix=None, tag_formatter=None,
                      tagged_cls=None, **kwds):
         """
         Create a TaggedObject object and assign it an id.
@@ -100,7 +96,18 @@ class TaggedObject(object):
         assert (tag_formatter is None) or callable(tag_formatter)
         tagged_cls = first_not_None(tagged_cls, cls)
 
-        obj = super(TaggedObject, cls).__new__(cls, **kwds) 
+        try:
+            obj = super(TaggedObject, cls).__new__(cls, **kwds)
+        except TypeError:
+            msg = '\nFATAL ERROR during {}.__new__(cls, **kwds).'.format(cls.__name__)
+            msg+= '\nThis may be due to extra keyword arguments:\n  *'
+            msg+= '\n  *'.join(kwds.keys())
+            msg+= '\nIf you believe that those arguments are valid, check the following types: \n  *'
+            msg+= '\n  *'.join(map(str, cls.__mro__[:-1]))
+            msg+= '\n'
+            print(msg)
+            raise
+
         if tagged_cls in TaggedObject.__ids:
             obj.__tag_id = TaggedObject.__ids[tagged_cls]
             TaggedObject.__ids[tagged_cls] += 1
@@ -112,21 +119,29 @@ class TaggedObject(object):
         obj.__tag_postfix   = tag_postfix
         obj.__tag_formatter = tag_formatter
         return obj
-    
+
     @debug
-    def __init__(self, *args, **kwds):
+    def __init__(self, tag_prefix=None, tag_postfix=None, tag_formatter=None,
+                     tagged_cls=None, **kwds):
         """
         Initialize a TaggedObject with a tag prefix/postfix/formatter, all optional.
         """
-        tag_prefix    = kwds.get('tag_prefix', None)
-        tag_postfix   = kwds.get('tag_postfix', None)
-        tag_formatter = kwds.get('tag_formatter', None)
         assert (tag_prefix  is None) or isinstance(tag_prefix,  str)
         assert (tag_postfix is None) or isinstance(tag_postfix, str)
         assert (tag_formatter is None) or callable(tag_formatter)
 
-        super(TaggedObject, self).__init__(**kwds)
-        
+        try:
+            super(TaggedObject, self).__init__(**kwds)
+        except TypeError:
+            cls = self.__class__
+            msg = '\nFATAL ERROR during {}.__init__(**kwds).'.format(cls.__name__)
+            msg+= '\nThis may be due to extra keyword arguments:\n  *'
+            msg+= '\n  *'.join(kwds.keys())
+            msg+= '\nIf you believe that those arguments are valid, check the following types: \n  *'
+            msg+= '\n  *'.join(map(str, cls.__mro__[:-1]))
+            print(msg)
+            raise
+
         # reaffect attributes (some classes use only __init__ for simplicity)
         self.__tag_prefix    = first_not_None(self.__tag_prefix, tag_prefix, '')
         self.__tag_postfix   = first_not_None(self.__tag_postfix, tag_postfix, '')
@@ -142,41 +157,41 @@ class TaggedObject(object):
     def __get_object_tag(self):
         """
         Get the formatted tag of this object as a string.
-        This is an instance identifier 
+        This is an instance identifier
         """
         tag_formatter = first_not_None(self.__tag_formatter, lambda x: x)
         tag_prefix  = first_not_None(self.__tag_prefix, '')
         tag_postfix = first_not_None(self.__tag_postfix, '')
         return '{}{}{}'.format(
-                tag_prefix, 
-                tag_formatter(self.__tag_id), 
+                tag_prefix,
+                tag_formatter(self.__tag_id),
                 tag_postfix)
-    
+
     def __get_object_pretty_tag(self):
         """
         Get the formatted pretty tag of this object as a string.
-        This is an instance identifier 
+        This is an instance identifier
         """
         tag_formatter = first_not_None(self.__tag_formatter, lambda x: x)
         tag_prefix  = first_not_None(self.__tag_prefix, '')
         tag_postfix = first_not_None(self.__tag_postfix, '')
-        return u'{}{}{}'.format(
-                tag_prefix, 
-                tag_formatter(subscript(self.__tag_id)), 
-                tag_postfix).encode('utf-8')
-    
+        return '{}{}{}'.format(
+                tag_prefix,
+                tag_formatter(subscript(self.__tag_id)),
+                tag_postfix)
+
     def __get_object_full_tag(self):
         """
         Get the formatted tag of this object as a string.
-        This is an instance identifier along with a 
+        This is an instance identifier along with a
         class identifier.
         """
         return '{}::{}'.format(self.__class__.__name__, self.__get_object_tag())
-    
+
     def __get_object_full_pretty_tag(self):
         """
         Get the formatted tag of this object as a string.
-        This is an instance identifier along with a 
+        This is an instance identifier along with a
         class identifier.
         """
         return '{}::{}'.format(self.__class__.__name__, self.__get_object_pretty_tag())
@@ -189,7 +204,7 @@ class TaggedObject(object):
 
     def __repr__(self):
         return self.full_tag
-     
+
     @abstractmethod
     def __eq__(self, other):
         pass
@@ -203,11 +218,11 @@ class TaggedObject(object):
 
 class RegisteredObject(TaggedObject):
      """
-     Generic class to manage unique immutable object instances (like Topologies, 
-     Domains and so on). Has a TaggedObject interface but each tag id is unique among 
+     Generic class to manage unique immutable object instances (like Topologies,
+     Domains and so on). Has a TaggedObject interface but each tag id is unique among
      all objects of the same class created with the exact same hashed __new__ arguments.
      """
-     
+
      # Counter of instances to set a unique id for each unique object.
      __registered_objects = {}
 
@@ -243,7 +258,7 @@ class RegisteredObject(TaggedObject):
                  msg = 'Keyword argument {} is a np.ndarray but it has not been set to readonly.'
                  msg=msg.format(key)
                  raise RuntimeError(msg)
-             return arg.data
+             return arg.tobytes()
          elif isinstance(arg, (MPI.Intracomm, MPI.Intercomm)):
              return id(arg)
          else:
@@ -253,45 +268,45 @@ class RegisteredObject(TaggedObject):
                  msg='\nFATAL ERROR: Cannot hash argument {} with type {} '
                  msg+='which is required for RegisteredObject derived types keyword arguments.\n'
                  msg=msg.format(key, type(arg))
-                 print msg
+                 print(msg)
                  raise
              return arg
 
      @debug
-     def __new__(cls, register_object=True, 
-                      tag_prefix=None, tag_postfix=None, tag_formatter=None, 
+     def __new__(cls, register_object=True,
+                      tag_prefix=None, tag_postfix=None, tag_formatter=None,
                       **kwds):
-        """
+        r"""
         Creates and return a RegisteredObject and assign a unique id if
-        and only if a previous object was not already created with the 
+        and only if a previous object was not already created with the
         exact same keywords arguments (modulo hashing).
-        
+
         If the object was already created, it's instance is simply returned.
 
         All keyword arguments contained in **kwds have to be immutable:
-            *Arguments cannot be None (you have to set all default arguments 
+            *Arguments cannot be None (you have to set all default arguments
                 in child classes __new__).
             *Lists should be replaced by tuples.
             *Sets should be replaced by frozensets.
             *Dictionaries should be internally stored as tuple of tuples (use dict.items()).
             *np.ndarray should be set read-only (by using npw.set_readonly).
-            *MPI.Intracomm and MPI.Intercomm are hashed with their python object id, 
+            *MPI.Intracomm and MPI.Intercomm are hashed with their python object id,
                 child classes are responsible for the immutability of those parameters.
 
         Initialization should be done if and only if self._initialized is False.
-        
+
         /!\ Always pass current class __new__ arguments to the base class __new__.
-            Full modification input keywords arguments should be done in __new__, 
+            Full modification input keywords arguments should be done in __new__,
             before base class __new__ call.
 
             All input keyword arguments should be formatted to the exact same immutable
-            type and default values should be precomputed, otherwise two different 
-            RegisteredObject might be created even if the final objects are the same 
-            after initialization. 
+            type and default values should be precomputed, otherwise two different
+            RegisteredObject might be created even if the final objects are the same
+            after initialization.
 
             Full initialization of object should be done in __new__ after base class __new__
             guarded by initialization safe guard (if obj.obj_initialized).
-            
+
             [modify original **kwds to immutable objects]
             obj = super(CurrentClass,cls).__new__(cls, [current __new__ **kwds], **kwds)
             if not obj.obj_initialized:
@@ -301,13 +316,13 @@ class RegisteredObject(TaggedObject):
         /!\ Only define __init__ if absolutely required (for multiple inheritance of custom
             objects for example and if you know what you are doing).
             __init__ default arguments will not be the ones from __new__ if modified.
-        
+
             self.__init__(**kwds) is always called, even if the object was already
-            created, a safeguard should be set in each child class __init__ method, 
+            created, a safeguard should be set in each child class __init__ method,
             if present. User arguments are passed as *user supplied* from __new__
-            to __init__. This means that all default arguments modified in __new__ 
-            are *not* passed modified to __init__ and thus that all class parameters 
-            should be set in __new__. When using __init__, its arguments should not be 
+            to __init__. This means that all default arguments modified in __new__
+            are *not* passed modified to __init__ and thus that all class parameters
+            should be set in __new__. When using __init__, its arguments should not be
             used.
         """
 
@@ -320,32 +335,24 @@ class RegisteredObject(TaggedObject):
                 assert type(obj) is cls, 'FATAL ERROR: Type mismatch.'
                 assert obj.obj_initialized
             else:
-                obj = super(RegisteredObject,cls).__new__(cls, 
-                        tag_prefix=tag_prefix, 
-                        tag_postfix=tag_postfix, 
-                        tag_formatter=tag_formatter, 
-                        **kwds)
+                obj = super(RegisteredObject,cls).__new__(cls,
+                        tag_prefix=tag_prefix,
+                        tag_postfix=tag_postfix,
+                        tag_formatter=tag_formatter)
                 obj.__initialized = False
                 registered_objects[key] = obj
         else:
-            obj = super(RegisteredObject,cls).__new__(cls, 
-                    tag_prefix=tag_prefix, 
-                    tag_postfix=tag_postfix, 
-                    tag_formatter=tag_formatter, 
-                    **kwds)
+            obj = super(RegisteredObject,cls).__new__(cls,
+                    tag_prefix=tag_prefix,
+                    tag_postfix=tag_postfix,
+                    tag_formatter=tag_formatter)
             obj.__initialized = False
         return obj
 
-     def __del__(self):
-         key = None
-         for k,v in self.__registered_objects.iteritems():
-             if v is self:
-                 key = k
-         if (key is not None):
-             del self.__registered_objects[key]
-
      @debug
-     def __init__(self, **kwds):
+     def __init__(self, register_object=True,
+                      tag_prefix=None, tag_postfix=None, tag_formatter=None,
+                      **kwds):
         """
         Initialize this object.
         If self._initialized was already set to True, raise a RuntimeError.
@@ -353,9 +360,20 @@ class RegisteredObject(TaggedObject):
         """
         if self.__initialized:
             return
-        super(RegisteredObject,self).__init__(**kwds)
+        super(RegisteredObject, self).__init__(
+                    tag_prefix=tag_prefix,
+                    tag_postfix=tag_postfix,
+                    tag_formatter=tag_formatter)
         self.__initialized = True
 
+     def __del__(self):
+         key = None
+         for k,v in self.__registered_objects.items():
+             if v is self:
+                 key = k
+         if (key is not None):
+             del self.__registered_objects[key]
+
      def __get_obj_initialized(self):
          """
          Return the object initialization state.
@@ -376,13 +394,11 @@ class RegisteredObject(TaggedObject):
      obj_initialized = property(__get_obj_initialized)
 
 
-class Handle(object):
+class Handle(object, metaclass=ABCMeta):
     """
     Generic class to encapsulate various objects ('handles').
     """
 
-    __metaclass__ = ABCMeta
-
     @classmethod
     @not_implemented
     def handle_cls(cls):
@@ -405,7 +421,7 @@ class Handle(object):
         Build a Handle from a handle instance.
 
         Raise ValueError if given handle is None.
-        Raise TypeError if given handle does not match targetted 
+        Raise TypeError if given handle does not match targetted
         handle type.
         """
         super(Handle,self).__init__(**kargs)
diff --git a/hysop/tools/hash.py b/hysop/tools/hash.py
index 0d44ab8a0e3e35e6f93f39f62cb572a2b7ea4267..1036fec96be75bce4e6ab55f31c88a5953e49972 100644
--- a/hysop/tools/hash.py
+++ b/hysop/tools/hash.py
@@ -1,5 +1,5 @@
-
-from hysop.deps import np, hashlib
+import hashlib
+import numpy as np
 
 def hash_communicator(comm, h=None):
     from hysop.core.mpi import processor_hash
diff --git a/hysop/tools/hptt_utils.py b/hysop/tools/hptt_utils.py
index ec7af819069ab39deb1e34046ed2ee5ee68e27b1..2594ac7041b2d8ab11ca24dbece94dac2fd47460 100644
--- a/hysop/tools/hptt_utils.py
+++ b/hysop/tools/hptt_utils.py
@@ -26,11 +26,11 @@ if HAS_HPTT:
             return False
         if src.dtype not in (np.float32, np.float64, np.complex64, np.complex128):
             return False
-        if src.flags['C_CONTIGUOUS'] != dst.flags['C_CONTIGUOUS']:
+        if src.flags.c_contiguous != dst.flags.c_contiguous:
             return False
-        if src.flags['F_CONTIGUOUS'] != dst.flags['F_CONTIGUOUS']:
+        if src.flags.f_contiguous != dst.flags.f_contiguous:
             return False
-        if not (src.flags['C_CONTIGUOUS'] ^ src.flags['F_CONTIGUOUS']):
+        if not (src.flags.c_contiguous ^ src.flags.f_contiguous):
             return False
         return not array_share_data(src, dst)
 else:
diff --git a/hysop/tools/hysop_ls.py b/hysop/tools/hysop_ls.py
index 95a8659304899091fc44728e4ecb660973d56823..7a3fe18387961734cfed265c79aa07780f4b5c45 100755
--- a/hysop/tools/hysop_ls.py
+++ b/hysop/tools/hysop_ls.py
@@ -1,6 +1,5 @@
 
-import sys, os, argparse, tempfile, warnings
-import subprocess32 as subprocess
+import sys, os, argparse, tempfile, warnings, subprocess
 
 # default caching directory
 tmp = tempfile.gettempdir()
@@ -19,7 +18,7 @@ class BackendMask(object):
         backends = backends[0].split(',')
         if len(backends) == 0:
             msg='At least one backend should be given.'
-            print msg
+            print(msg)
             sys.exit(2)
         host, opencl, cuda = False, False, False
         for b in backends:
@@ -36,7 +35,7 @@ class BackendMask(object):
                 opencl = True
                 cuda   = True
             elif b=='~host':
-                host   = False 
+                host   = False
             elif b=='~opencl':
                 opencl = False
             elif b=='~cuda':
@@ -54,7 +53,7 @@ class BackendMask(object):
                 cuda   = False
             else:
                 msg='Unknown backend {}. Aborting.'.format(b)
-                print msg
+                print(msg)
                 sys.exit(2)
         self.host = host
         self.opencl = opencl
@@ -81,7 +80,7 @@ class BlackWhiteList(object):
         if obj_whitelist.intersection(obj_blacklist):
             msg='Intersection bewteen {} black and whitelist: {}.'.format(name,
                     ', '.join(obj_whitelist.intersection(obj_blacklist)))
-            print msg
+            print(msg)
             sys.exit(2)
         obj_mask = lambda x: (((not whitelist) or (x     in whitelist)) and \
                               ((not blacklist) or (x not in blacklist)))
@@ -93,7 +92,7 @@ class BlackWhiteList(object):
     def __call__(self, name):
         name = name.strip().lower()
         return self.obj_mask(name)
-    
+
     def __str__(self):
         name = self.name.title()
         ss=()
@@ -130,7 +129,7 @@ class DeviceMask(object):
 
     def __str__(self):
         return '{}\n{}'.format(self.devices, self.device_types)
-          
+
 
 def run(arguments=None):
     # build the argument parser
@@ -141,20 +140,20 @@ def run(arguments=None):
             description=description)
 
     parser.add_argument('-hostfile', '--hostfile', nargs=1, type=str, default=None, required=False,
-            help=('Provide a list of hosts as a file. This file is parsed to extract host names ' 
-                    + 'as if they would have been passed by \'-H\'.'), 
+            help=('Provide a list of hosts as a file. This file is parsed to extract host names '
+                    + 'as if they would have been passed by \'-H\'.'),
             dest='hostfile')
 
     parser.add_argument('-H', '--host', nargs=1, type=str, default=None, required=False,
             dest='hosts',
             help=('List of hosts to prospect. Defaults to localhost if no hostfile is provided. '
                     + 'If a hostfile is provided as well, exclude those hosts from hostfile.'))
-    
-    parser.add_argument('-x', nargs=1, type=str, default=None, required=False, 
+
+    parser.add_argument('-x', nargs=1, type=str, default=None, required=False,
                             metavar='var0;var1;...', dest='env',
                             help=('Provide a semicolon separated list of extra environment variables '
                                     +'to pass to hosts.'))
-    
+
     parser.add_argument('-mca', '--mca', type=str, help='Pass MCA parameters.',
             dest='mca', default=None, required=False)
 
@@ -164,7 +163,7 @@ def run(arguments=None):
                                 + 'Defaults to all available backends. '
                                 + 'If preceded by ~, disable this backend.'))
 
-    parser.add_argument('-dt', '--device-type', nargs=1, type=str, default=None, required=False, 
+    parser.add_argument('-dt', '--device-type', nargs=1, type=str, default=None, required=False,
                             metavar='[all,cpu,gpu,acc]', dest='device_types',
                             help=('Gather only informations on specified device types. '
                                   + 'Defaults to all. '
@@ -184,7 +183,7 @@ def run(arguments=None):
                             metavar='vendor0,vendor1,...', dest='vendors',
                             help=('Print only informations about the given vendor.' +
                                   'If preceded by ~, disable this vendor.'))
-    
+
     parser.add_argument('--pci-ids', nargs=1, type=str, required=False,
                             dest='pciids', default=(None,),
                             help='Specify a path to pci.ids. Should be shared between all scanned nodes.')
@@ -192,11 +191,11 @@ def run(arguments=None):
     parser.add_argument('--cache-dir', nargs=1, type=str, required=False,
                             dest='cache_dir', default=(default_cache_dir,),
                             help='Cache node topology results into this directory. Defaults to {}.'.format(default_cache_dir))
-    
+
     parser.add_argument('--cache-host', nargs=1, type=str, default=('localhost',), required=False,
             dest='cache_host',
             help=('Host that will cache the results into cache_dir. Defaults to localhost.'))
-    
+
     parser.add_argument('--override-cache', help='Override cached node informations.',
             action='store_true', default=False, dest='override_cache')
 
@@ -208,7 +207,7 @@ def run(arguments=None):
 
     parser.add_argument('-D', '--debug', help='Print debugging information.',
             action='store_true', default=False, dest='debug')
-    
+
 
     # parse arguments (also handle help)
     args = parser.parse_args(arguments)
@@ -216,7 +215,7 @@ def run(arguments=None):
     # print version and exit if required
     if args.print_version:
         from hysop import version
-        print 'hysop-ls version {}.'.format(version)
+        print('hysop-ls version {}.'.format(version))
         sys.exit(1)
 
     # debug and verbose
@@ -253,15 +252,15 @@ def run(arguments=None):
         hosts = ('localhost',)
         if verbose:
             msg='No host specified, using localhost.'
-            print msg
+            print(msg)
 
     if not hosts:
         msg='Failed to parse hostfile or no hosts present, aborting.'
-        print msg
+        print(msg)
         sys.exit(2)
     if verbose:
         msg='Hosts are {}.'.format(', '.join(hosts))
-        print msg
+        print(msg)
 
     # caching
     override_cache = args.override_cache
@@ -272,10 +271,10 @@ def run(arguments=None):
             os.makedirs(cache_dir)
         except OSError as e:
             msg='Could not create cache directory:\n {}.'.format(e)
-            print msg
+            print(msg)
             sys.exit(e.errno)
-    if verbose:    
-        print 'Caching directory is \'{}\'.'.format(cache_dir)
+    if verbose:
+        print('Caching directory is \'{}\'.'.format(cache_dir))
 
     # checking for already cached hosts
     from hysop.tools.cache import load_cache
@@ -288,18 +287,18 @@ def run(arguments=None):
             msg='The following hosts have already been cached: {}'
             msg=msg.format(', '.join(cached_hosts))
             msg += '\nUse --override-cache to overwrite cached data if required.'
-            print msg
+            print(msg)
         ncached = len(cached_hosts)
-        
+
     hostlist = args.cache_host[0] + ',' + ','.join(hosts)
-    
+
     # connect to nodes by spawning MPI processes on the fly
     if verbose:
         if len(hosts)>0:
-            print 'Retrieving distant node hardware topologies...'
+            print('Retrieving distant node hardware topologies...')
         else:
-            print 'Retrieving all harware topologies from cache...'
-    
+            print('Retrieving all harware topologies from cache...')
+
     if len(hosts)>0:
         cmd = ['mpirun']
         cmd += ['-H', hostlist]
@@ -310,18 +309,18 @@ def run(arguments=None):
         if (args.mca is not None):
             for mca in args.mca.split(';'):
                 cmd += ['-mca', mca]
-        
+
         pciids = args.pciids[0]
         hostnames = '({},)'.format(','.join('"{}"'.format(h) for h in hosts))
-        fcall = 'collect_node_informations(cache_file="{}", hostnames={}, pciids={})'.format(cache_file, hostnames, 
+        fcall = 'collect_node_informations(cache_file="{}", hostnames={}, pciids={})'.format(cache_file, hostnames,
                 None if (pciids is None) else '"{}"'.format(pciids))
         cmd += ['--', 'python -c \'from hysop.tools.hysop_ls import collect_node_informations; {}\''.format(fcall)]
         cmd = ' '.join(cmd)
-        
+
         if verbose:
-            print cmd
-            print 'This may take some time...'
-                       
+            print(cmd)
+            print('This may take some time...')
+
         FNULL = open(os.devnull, 'w')
         try:
             if debug:
@@ -336,13 +335,13 @@ def run(arguments=None):
             raise RuntimeError(msg)
         finally:
             FNULL.close()
-        
+
         if verbose:
-            print 'Results have been cached to {}.'.format(cache_file)
-    
+            print('Results have been cached to {}.'.format(cache_file))
+
     # load back cached hardware topologies
     if verbose:
-        print 'Loading topologies and computing requested statistics...'
+        print('Loading topologies and computing requested statistics...')
     topologies = load_cache(cache_file)
     topologies = {k:topologies[k] for k in all_hosts}
 
@@ -351,10 +350,10 @@ def run(arguments=None):
     platforms = PlatformMask(args.platforms, args.vendors)
     devices   = DeviceMask(args.devices, args.device_types)
     if debug:
-        print backends
-        print platforms
-        print devices
-    
+        print(backends)
+        print(platforms)
+        print(devices)
+
     from hysop.backend.hardware.hwinfo import TopologyStatistics
     stats = TopologyStatistics()
     for topo in topologies.values():
@@ -364,10 +363,10 @@ def run(arguments=None):
   Hosts: {}
 {}
 '''.format(', '.join(topologies.keys()), stats.to_string(2,2))
-    print msg
+    print(msg)
 
     sys.exit(0)
-  
+
 def collect_node_informations(cache_file, hostnames, pciids=None):
     from mpi4py import MPI
     from hysop.backend.hardware.hwinfo import PCIIds, Topology
@@ -376,12 +375,12 @@ def collect_node_informations(cache_file, hostnames, pciids=None):
     comm = MPI.COMM_WORLD
     rank = comm.Get_rank()
     assert isinstance(hostnames, tuple)
-    
+
     if (rank==0):
         topologies = comm.gather(None, root=0)
         topologies = topologies[1:]
         assert len(topologies) == len(hostnames)
-        
+
         from hysop.tools.cache import update_cache
         for (hostname, topo) in zip(hostnames, topologies):
             update_cache(cache_file, hostname, topo)
diff --git a/hysop/tools/indices.py b/hysop/tools/indices.py
index 2eb993a085393d11c9e72fba2cd709c54da18dc4..7b2d2ca84cdcfa8f66479b99725d20524b70da68 100644
--- a/hysop/tools/indices.py
+++ b/hysop/tools/indices.py
@@ -6,13 +6,13 @@ def condition2Slice(cond):
     ilist = np.where(cond)
     if ilist[0].size == 0:
         isEmpty = True
-        sl = [slice(0, 0) for i in xrange(dim)]
+        sl = [slice(0, 0) for i in range(dim)]
         resol = np.asarray([0] * dim)
     else:
-        start = np.asarray([ilist[i].min() for i in xrange(dim)])
-        end = np.asarray([ilist[i].max() + 1 for i in xrange(dim)])
+        start = np.asarray([ilist[i].min() for i in range(dim)])
+        end = np.asarray([ilist[i].max() + 1 for i in range(dim)])
         sl = [slice(start[i], end[i])
-              for i in xrange(dim)]
+              for i in range(dim)]
         resol = end - start
         isEmpty = False
 
@@ -24,10 +24,10 @@ def removeLastPoint(cond):
     shape = cond.shape
     dim = len(shape)
     ilist = np.where(cond)
-    end = [ilist[i].max() for i in xrange(dim)]
-    subl = [np.where(ilist[i] == end[i]) for i in xrange(dim)]
+    end = [ilist[i].max() for i in range(dim)]
+    subl = [np.where(ilist[i] == end[i]) for i in range(dim)]
     for sl in subl:
-        sublist = [ilist[i][sl] for i in xrange(dim)]
+        sublist = [ilist[i][sl] for i in range(dim)]
         sublist = tuple(sublist)
         cond[sublist] = False
     return cond
diff --git a/hysop/tools/interface.py b/hysop/tools/interface.py
index 3722820a076d08c4f7c5e21308d84799474fcba5..aaeb01f863a6c7710594facf5295fafc7f27cc12 100644
--- a/hysop/tools/interface.py
+++ b/hysop/tools/interface.py
@@ -4,9 +4,8 @@ from hysop.tools.types import check_instance, first_not_None, to_tuple
 from hysop.tools.numpywrappers import npw
 
 
-class SymbolContainerI(object):
-    __metaclass__ = ABCMeta
-    
+class SymbolContainerI(object, metaclass=ABCMeta):
+
     def _get_symbol(self):
         """
         Return a Symbol that can be used to compute symbolic expressions
@@ -14,48 +13,48 @@ class SymbolContainerI(object):
         """
         assert hasattr(self, '_symbol'), 'Symbol has not been defined.'
         return self._symbol
-    
+
     symbol = property(_get_symbol)
     s = property(_get_symbol)
 
 
-class NamedObjectI(object):
-    __metaclass__ = ABCMeta
-    
+class NamedObjectI(object, metaclass=ABCMeta):
+
     def __new__(cls, name, pretty_name=None, latex_name=None, var_name=None, **kwds):
         """
         Create an abstract named object that contains a symbolic value.
         name : string
             A name for the field.
-        pretty_name: string or unicode, optional.
-            A pretty name used for display whenever possible (unicode supported).
+        pretty_name: string, optional.
+            A pretty name used for display whenever possible.
             Defaults to name.
         kwds: dict
             Keywords arguments for base class.
         """
-        
+
         obj = super(NamedObjectI, cls).__new__(cls, **kwds)
-        obj.rename(name=name, pretty_name=pretty_name, 
+        obj.rename(name=name, pretty_name=pretty_name,
                     latex_name=latex_name, var_name=var_name)
         return obj
-    
+
+    def __init__(self, name, pretty_name=None, latex_name=None, var_name=None, **kwds):
+        super(NamedObjectI, self).__init__(**kwds)
+
     def rename(self, name, pretty_name=None, latex_name=None, var_name=None):
         """Change the names of this object."""
         check_instance(name, str)
-        check_instance(pretty_name, (str,unicode), allow_none=True)
+        check_instance(pretty_name, str, allow_none=True)
         check_instance(latex_name, str, allow_none=True)
-        
+
         pretty_name = first_not_None(pretty_name, name)
         latex_name  = first_not_None(latex_name, name)
-        
-        if isinstance(pretty_name, unicode):
-            pretty_name = pretty_name.encode('utf-8')
+
         check_instance(pretty_name, str)
 
         self._name   = name
         self._pretty_name = pretty_name
         self._latex_name = latex_name
-        
+
     def _get_name(self):
         """Return the name of this field."""
         return self._name
@@ -65,10 +64,10 @@ class NamedObjectI(object):
     def _get_latex_name(self):
         """Return the latex name of this field."""
         return self._latex_name
-    
+
     def __str__(self):
         return self.long_description()
-    
+
     @abstractmethod
     def short_description(self):
         """Short description of this field as a string."""
@@ -89,18 +88,18 @@ class NamedScalarContainerI(NamedObjectI, SymbolContainerI):
     def ndim(self):
         """Number of dimensions of this this tensor."""
         return 0
-    
+
     def _get_var_name(self):
         """Return the variable name of this field."""
         return self._var_name
-    
-    def rename(self, name, pretty_name=None, 
+
+    def rename(self, name, pretty_name=None,
                 latex_name=None, var_name=None):
         """Change the names of this object."""
-        super(NamedScalarContainerI, self).rename(name=name, 
+        super(NamedScalarContainerI, self).rename(name=name,
                 pretty_name=pretty_name, latex_name=latex_name)
         self.check_and_set_varname(first_not_None(var_name, self._name))
-    
+
     def check_and_set_varname(self, var_name):
         check_instance(var_name, str, allow_none=True)
 
@@ -111,15 +110,15 @@ class NamedScalarContainerI(NamedObjectI, SymbolContainerI):
             if c in var_name:
                 raise RuntimeError(msg)
         self._var_name = var_name
-    
+
     def nd_iter(self):
         """Return an nd-indexed iterator of contained objects."""
         yield ((1,), self)
-    
+
     def __iter__(self):
         """Return an iterator on unique scalar objects."""
         return (self,).__iter__()
-    
+
     def __tuple__(self):
         """
         Fix hysop.tools/type.to_tuple for FieldContainers,
@@ -130,10 +129,10 @@ class NamedScalarContainerI(NamedObjectI, SymbolContainerI):
     def __contains__(self, obj):
         """Check if a scalar object is contained in self."""
         return (obj is self)
-    
+
     def __getitem__(self, slc):
         return self
-    
+
     var_name = property(_get_var_name)
 
 
@@ -143,14 +142,17 @@ class NamedTensorContainerI(NamedObjectI, SymbolContainerI):
         obj = super(NamedTensorContainerI, cls).__new__(cls, **kwds)
         obj._contained_objects = contained_objects
         return obj
-    
-    def rename(self, name, pretty_name=None, 
+
+    def __init__(self, contained_objects, **kwds):
+        super(NamedTensorContainerI, self).__init__(**kwds)
+
+    def rename(self, name, pretty_name=None,
                 latex_name=None, var_name=None):
         """Change the names of this object."""
         assert (var_name is None), 'Tensor do not have variable names.'
-        super(NamedTensorContainerI, self).rename(name=name, 
+        super(NamedTensorContainerI, self).rename(name=name,
                 pretty_name=pretty_name, latex_name=latex_name)
-    
+
     @property
     def size(self):
         """Full size of this container as if it was a 1D tensor."""
@@ -160,7 +162,7 @@ class NamedTensorContainerI(NamedObjectI, SymbolContainerI):
     def shape(self):
         """Shape of this tensor."""
         return self._contained_objects.shape
-    
+
     @property
     def ndim(self):
         """Number of dimensions of this this tensor."""
@@ -184,11 +186,11 @@ class NamedTensorContainerI(NamedObjectI, SymbolContainerI):
         """Return an nd-indexed iterator of contained objects."""
         for idx in npw.ndindex(*self._contained_objects.shape):
             yield (idx, self._contained_objects[idx])
-    
+
     def __iter__(self):
         """Return an iterator on unique scalar objects."""
         return self._contained_objects.ravel().__iter__()
-    
+
     def __tuple__(self):
         """
         Fix hysop.tools/type.to_tuple for FieldContainers,
@@ -199,7 +201,7 @@ class NamedTensorContainerI(NamedObjectI, SymbolContainerI):
     def __contains__(self, obj):
         """Check if a scalar object is contained in self."""
         return obj in self._contained_objects
-    
+
     @abstractmethod
     def __getitem__(self, slc):
         pass
diff --git a/hysop/tools/io_utils.py b/hysop/tools/io_utils.py
index c751942ac087bd57f26e7972d1c35b7fad272c25..716643cb7829bb491d7c070ce3d2f31ea3c404ed 100755
--- a/hysop/tools/io_utils.py
+++ b/hysop/tools/io_utils.py
@@ -9,15 +9,15 @@
 
 """
 import os
-import h5py
+import sys
 import psutil
 import warnings
 import tempfile
 import socket
 import shutil
 import atexit
+import subprocess
 import numpy as np
-import subprocess32 as subprocess
 from collections import namedtuple
 from inspect import getouterframes, currentframe
 from re import findall
@@ -49,7 +49,10 @@ class IO(object):
     @requires_cmd('stat')
     def get_fs_type(path):
         cmd = ['stat', '-f', '-c', '%T', path]
-        fs_type = subprocess.check_output(cmd)
+        fs_type = ''
+        if mpi.main_rank == 0:
+            fs_type = subprocess.check_output(cmd).decode('utf-8')
+        fs_type = mpi.main_comm.bcast(fs_type, root=0)
         return fs_type.replace('\n', '')
 
     @classmethod
@@ -81,7 +84,7 @@ class IO(object):
             import memory_tempfile
         except ImportError as e:
             print
-            print e
+            print(e)
             print
             msg = 'You are trying to use a RAM filesystem but the \'mempory_tempfile\' is not present on your system.'
             msg += 'Get it from https://gitlab.com/keckj/memory-tempfile.'
@@ -146,6 +149,7 @@ class IO(object):
             msg = 'No suitable caching directory was found in {}.'
             msg = msg.format(candidates)
             raise RuntimeError(msg)
+        cpath = '{}/python{}_{}'.format(cpath, sys.version_info.major, sys.version_info.minor)
         if not os.path.exists(cpath):
             try:
                 if mpi.main_rank == 0:
@@ -220,6 +224,7 @@ class IO(object):
         -------
             a list of strings
         """
+        import h5py
         hdf_file = h5py.File(filename, 'r')
         keys = hdf_file.keys()
         hdf_file.close()
@@ -309,12 +314,12 @@ class IOParams(namedtuple("IOParams", ['filename', 'filepath',
 
         check_instance(filename, str, allow_none=True)
         check_instance(filepath, str, allow_none=True)
-        check_instance(frequency, (int, long))
+        check_instance(frequency, int)
         check_instance(dump_times, tuple, values=(float, np.float64))
-        check_instance(dump_tstart, (int, long, float, np.float64))
-        check_instance(dump_tend, (int, long, float, np.float64))
-        check_instance(io_leader, (int, long))
-        check_instance(visu_leader, (int, long))
+        check_instance(dump_tstart, (int, float, np.float64))
+        check_instance(dump_tend, (int, float, np.float64))
+        check_instance(io_leader, int)
+        check_instance(visu_leader, int)
         check_instance(with_last,   bool)
         check_instance(enable_ram_fs, bool)
         check_instance(force_ram_fs, bool)
@@ -325,7 +330,7 @@ class IOParams(namedtuple("IOParams", ['filename', 'filepath',
         check_instance(append, bool)
         if dump_func:
             assert callable(dump_func), "given function must be callable"
-            assert dump_func.func_code.co_argcount, "given function must take one arg (as simulation object)"
+            assert dump_func.__code__.co_argcount, "given function must take one arg (as simulation object)"
         frequency = int(frequency)
         dump_tstart = float(dump_tstart)
         dump_tend = float(dump_tend)
@@ -431,7 +436,7 @@ class IOParams(namedtuple("IOParams", ['filename', 'filepath',
         all_kwds = {}
         for k in keys:
             if (k == 'kwds'):
-                for (k, v) in kwds.get(k, getattr(self, k)).iteritems():
+                for (k, v) in kwds.get(k, getattr(self, k)).items():
                     all_kwds[k] = v
             else:
                 all_kwds[k] = kwds.get(k, getattr(self, k))
@@ -596,7 +601,7 @@ class Writer(object):
         str_fmt = '%g\t'*(shape1 - 1) + '%g\n'
         # use a big format string
         str_fmt_N = str_fmt * N
-        for i in xrange(shape0/N):
+        for i in range(shape0//N):
             a1 = a[i:i+N, :]
             # put a1 in  1D array form; ravel better than reshape for
             # non-contiguous arrays.
diff --git a/hysop/tools/misc.py b/hysop/tools/misc.py
index 562f4b9f4df2c09c39f5b2aa1a3aa3155ab218d9..3d4022516688dd112e1f023f6efe3d7b9a1a2b2c 100644
--- a/hysop/tools/misc.py
+++ b/hysop/tools/misc.py
@@ -7,9 +7,15 @@
 
 
 """
-from hysop.deps import inspect, np, functools, operator
+import inspect, functools, operator
+import numpy as np
+
 from hysop.constants import HYSOP_REAL, HYSOP_INTEGER
 
+def getargspec(func):
+    spec = inspect.getfullargspec(func)
+    return (spec.args, spec.varargs, spec.varkw, spec.defaults)
+
 def prod(values):
     """
     Like sum but for products (of integers).
@@ -29,7 +35,7 @@ def get_default_args(func):
     """
     returns a dictionary of arg_name:default_values for the input function.
     """
-    args, varargs, keywords, defaults = inspect.getargspec(func)
+    args, varargs, keywords, defaults = getargspec(func)
     if defaults is None:
         return dict()
     else:
@@ -39,15 +45,15 @@ def get_argnames(func):
     """
     returns arguments name and possible varargs.
     """
-    argnames,varargs,_,_ = inspect.getargspec(func)
+    argnames,varargs,_,_ = getargspec(func)
     return argnames, varargs
 
 def args2kargs(func, args):
-    argnames,_,_,_ = inspect.getargspec(func)
+    argnames,_,_,_ = getargspec(func)
     return dict(zip(argnames, args))
 
 def kargs2args(func, kargs, remove=[]):
-    argnames,_,_,_ = inspect.getargspec(func)
+    argnames,_,_,_ = getargspec(func)
     return tuple([kargs[a] for a in argnames if a not in remove])
 
 def upper_pow2(x):
@@ -76,7 +82,7 @@ def next_pow2(x):
         if x==y:
             y = upper_pow2(x+1)
         return y
-    
+
     if np.isscalar(x):
         return _next_pow2(x)
     elif isinstance(x, np.ndarray):
@@ -87,11 +93,11 @@ def next_pow2(x):
 def previous_pow2(x):
     def _previous_pow2(x):
         assert x>=1
-        y = upper_pow2(x)/2
+        y = upper_pow2(x)//2
         if x==y:
-            y = upper_pow2(x-1)/2
+            y = upper_pow2(x-1)//2
         return y
-    
+
     if np.isscalar(x):
         return _previous_pow2(x)
     elif isinstance(x, np.ndarray):
@@ -110,7 +116,7 @@ def upper_pow2_or_3(x):
 class Utils(object):
     """tools to handle array and slices.
     """
-    
+
 
     """
     Perform an indirect sort of seq using python default sorting algorithm.
@@ -118,7 +124,7 @@ class Utils(object):
     """
     @staticmethod
     def argsort(seq):
-        return sorted(range(len(seq)), key=seq.__getitem__)
+        return tuple(sorted(range(len(seq)), key=seq.__getitem__))
 
     @staticmethod
     def upper_pow2(x):
@@ -150,10 +156,10 @@ class Utils(object):
         outslice = {}
         size = inarray.shape[1]
         dimension = (int)(0.5 * inarray.shape[0])
-        for rk in xrange(size):
+        for rk in range(size):
             outslice[rk] = [slice(inarray[2 * d, rk],
                                   inarray[2 * d + 1, rk] + 1)
-                            for d in xrange(dimension)]
+                            for d in range(dimension)]
         return outslice
 
     @staticmethod
@@ -171,7 +177,7 @@ class Utils(object):
         """
         assert len(sl1) == len(sl2)
         res = [None] * len(sl1)
-        for d in xrange(len(sl1)):
+        for d in range(len(sl1)):
             s1 = sl1[d]
             s2 = sl2[d]
             if s1.step != s2.step:
@@ -223,12 +229,12 @@ class WorkSpaceTools(object):
         from hysop.tools.numpywrappers import npw
         result = []
         if isinstance(subshape, list):
-            subsize = [prod(subshape[i]) for i in xrange(len(subshape))]
+            subsize = [prod(subshape[i]) for i in range(len(subshape))]
         else:
             subsize = [prod(subshape), ] * lwork
             subshape = [subshape, ] * lwork
         if work is None:
-            for i in xrange(lwork):
+            for i in range(lwork):
                 result.append(npw.zeros(subsize[i],
                                         dtype=data_type).reshape(subshape[i]))
         else:
@@ -239,21 +245,21 @@ class WorkSpaceTools(object):
             msg1 = 'Work array size is too small.'
             msg2 = 'Work array must be a flat array (1D).'
             try:
-                for i in xrange(lwork):
+                for i in range(lwork):
                     wk = work[i]
                     assert wk.size >= subsize[i], msg1
                     assert len(np.where(
                         np.asarray(wk.shape) > 1)[0]) == 1, msg2
                     result.append(wk.ravel()[:subsize[i]].reshape(subshape[i]))
-                for i in xrange(len(result)):
+                for i in range(len(result)):
                     assert npw.arrays_share_data(result[i], work[i])
 
             except AttributeError:
                 # Work array has been replaced by an OpenCL Buffer
                 # Testing the buffer size instead of shape
-                for i in xrange(lwork):
+                for i in range(lwork):
                     wk = work[i]
-                    s = wk.size / subsize[i]
+                    s = wk.size // subsize[i]
                     WorkSpaceTools._check_ocl_buffer(s, data_type)
 
                 result = work
@@ -359,7 +365,7 @@ class WorkSpaceTools(object):
         shapes = [(0,), ] * lwork
         for prop in properties:
             lp = len(prop)
-            for i in xrange(lp):
+            for i in range(lp):
                 shapes[i] = tuple(np.maximum(shapes[i], prod(prop[i])))
         work = [npw.zeros(shape) for shape in shapes]
 
diff --git a/hysop/tools/mpi_utils.py b/hysop/tools/mpi_utils.py
index 0fab45c3d07df2dca141b0d1021a02e6cb2d5302..92fe6c97adf761c2fb4b371504626a7afdd6fae8 100644
--- a/hysop/tools/mpi_utils.py
+++ b/hysop/tools/mpi_utils.py
@@ -1,5 +1,5 @@
+import numpy as np
 
-from hysop.deps import np
 from hysop.core.mpi import MPI
 from hysop.tools.numerics import get_dtype
 
@@ -50,9 +50,9 @@ def dtype_to_mpi_type(dtype):
 
 def order_to_mpi_order(order):
     from hysop.constants import MemoryOrdering
-    if (order in 'cC') or (order==MemoryOrdering.C_CONTIGUOUS) or (order==MPI.ORDER_C): 
+    if (order in 'cC') or (order==MemoryOrdering.C_CONTIGUOUS) or (order==MPI.ORDER_C):
         return MPI.ORDER_C
-    elif (order in 'fF') or (order==MemoryOrdering.F_CONTIGUOUS) or (order==MPI.ORDER_F): 
+    elif (order in 'fF') or (order==MemoryOrdering.F_CONTIGUOUS) or (order==MPI.ORDER_F):
         return MPI.ORDER_F
     else:
         msg='Unknown value of type {}.'.format(type(order))
@@ -65,8 +65,8 @@ def get_mpi_order(data):
         is_f_contiguous = data.is_fortran_contiguous
     else:
         # assume numpy like interface
-        is_c_contiguous = data.flags['C_CONTIGUOUS']
-        is_f_contiguous = data.flags['F_CONTIGUOUS']
+        is_c_contiguous = data.flags.c_contiguous
+        is_f_contiguous = data.flags.f_contiguous
     if is_c_contiguous:
         return MPI.ORDER_C
     elif is_f_contiguous:
diff --git a/hysop/tools/numba_utils.py b/hysop/tools/numba_utils.py
index 58a73f603acb00a0e61227e4781e82a6fdbbdae2..62ad05cb75fe0251b3c506af24693423de69784a 100644
--- a/hysop/tools/numba_utils.py
+++ b/hysop/tools/numba_utils.py
@@ -16,8 +16,7 @@ def make_numba_signature(*args, **kwds):
         msg='Unknown kwds {}.'.forma(kwds.keys())
         raise RuntimeError(kwds)
     dtype_to_ntype = {
-            int:        nb.int32,
-            long:       nb.int64,
+            int:        nb.int64,
             float:      nb.float64,
 
             np.int8:    nb.int8,
@@ -36,8 +35,8 @@ def make_numba_signature(*args, **kwds):
             np.complex64:  nb.complex64,
             np.complex128: nb.complex128,
     }
-    
-    sizes = ('m','n','p','q','r','s') + tuple('n{}'.format(i) for i in xrange(10))
+
+    sizes = ('m','n','p','q','r','s') + tuple('n{}'.format(i) for i in range(10))
     registered_sizes = {}
     def format_shape(*shape):
         res = '('
@@ -52,7 +51,7 @@ def make_numba_signature(*args, **kwds):
                 res+=','
         res += ')'
         return res
-    
+
     numba_args = ()
     numba_layout = ()
     for i,a in enumerate(args):
@@ -77,9 +76,9 @@ def make_numba_signature(*args, **kwds):
         elif isinstance(a, np.ndarray):
             assert a.dtype.type in dtype_to_ntype, a.dtype.type
             dtype = dtype_to_ntype[a.dtype.type]
-            if a.flags['C_CONTIGUOUS']:
+            if a.flags.c_contiguous:
                 na = nb.types.Array(dtype=dtype, ndim=a.ndim, layout='C')
-            elif a.flags['F_CONTIGUOUS']:
+            elif a.flags.f_contiguous:
                 na = nb.types.Array(dtype=dtype, ndim=a.ndim, layout='F')
             else:
                 na = nb.types.Array(dtype=dtype, ndim=a.ndim, layout='A')
@@ -96,7 +95,7 @@ def make_numba_signature(*args, **kwds):
         else:
             msg='Uknown argument type {}.'.format(type(a).__mro__)
             raise NotImplementedError(msg)
-        
+
         numba_args += (na,)
 
     return nb.void(*numba_args), ','.join(numba_layout)
@@ -107,34 +106,34 @@ def bake_numba_copy(dst, src, target=None):
         target =  __DEFAULT_NUMBA_TARGET__
     signature, layout = make_numba_signature(dst, src)
     if (dst.ndim == 1):
-        @nb.guvectorize([signature], layout, 
+        @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def copy(dst, src):
-            for i in xrange(0, dst.shape[0]):
+            for i in range(0, dst.shape[0]):
                 dst[i] = src[i]
     elif (dst.ndim == 2):
-        @nb.guvectorize([signature], layout, 
+        @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def copy(dst, src):
             for i in prange(0, dst.shape[0]):
-                for j in xrange(0, dst.shape[1]):
+                for j in range(0, dst.shape[1]):
                     dst[i,j] = src[i,j]
     elif (dst.ndim == 3):
-        @nb.guvectorize([signature], layout, 
+        @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def copy(dst, src):
             for i in prange(0, dst.shape[0]):
                 for j in prange(0, dst.shape[1]):
-                    for k in xrange(0, dst.shape[2]):
+                    for k in range(0, dst.shape[2]):
                         dst[i,j,k] = src[i,j,k]
     elif (dst.ndim == 4):
-        @nb.guvectorize([signature], layout, 
+        @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def copy(dst, src):
             for i in prange(0, dst.shape[0]):
                 for j in prange(0, dst.shape[1]):
                     for k in prange(0, dst.shape[2]):
-                        for l in xrange(0, dst.shape[3]):
+                        for l in range(0, dst.shape[3]):
                             dst[i,j,k,l] = src[i,j,k,l]
     else:
         raise NotImplementedError(dst.ndim)
@@ -148,34 +147,34 @@ def bake_numba_accumulate(dst, src, target=None):
         target =  __DEFAULT_NUMBA_TARGET__
     signature, layout = make_numba_signature(dst, src)
     if (dst.ndim == 1):
-        @nb.guvectorize([signature], layout, 
+        @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def accumulate(dst, src):
-            for i in xrange(0, dst.shape[0]):
+            for i in range(0, dst.shape[0]):
                 dst[i] += src[i]
     elif (dst.ndim == 2):
-        @nb.guvectorize([signature], layout, 
+        @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def accumulate(dst, src):
             for i in prange(0, dst.shape[0]):
-                for j in xrange(0, dst.shape[1]):
+                for j in range(0, dst.shape[1]):
                     dst[i,j] += src[i,j]
     elif (dst.ndim == 3):
-        @nb.guvectorize([signature], layout, 
+        @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def accumulate(dst, src):
             for i in prange(0, dst.shape[0]):
                 for j in prange(0, dst.shape[1]):
-                    for k in xrange(0, dst.shape[2]):
+                    for k in range(0, dst.shape[2]):
                         dst[i,j,k] += src[i,j,k]
     elif (dst.ndim == 4):
-        @nb.guvectorize([signature], layout, 
+        @nb.guvectorize([signature], layout,
             target=target, nopython=True, cache=True)
         def accumulate(dst, src):
             for i in prange(0, dst.shape[0]):
                 for j in prange(0, dst.shape[1]):
                     for k in prange(0, dst.shape[2]):
-                        for l in xrange(0, dst.shape[3]):
+                        for l in range(0, dst.shape[3]):
                             dst[i,j,k,l] += src[i,j,k,l]
     else:
         raise NotImplementedError(dst.ndim)
@@ -195,21 +194,21 @@ def bake_numba_transpose(src, dst, axes, target=None):
     assert dst.shape == tuple(src.shape[i] for i in axes)
     assert dst.dtype == src.dtype
     ndim = src.ndim
-    
+
     def noop(dst, src):
         pass
-    
+
     if (ndim == 1):
         transpose = noop
     elif (ndim == 2):
         if axes == (0,1):
             transpose == noop
         elif axes == (1,0):
-            @nb.guvectorize([signature], layout, 
+            @nb.guvectorize([signature], layout,
                 target=target, nopython=True, cache=True)
             def transpose(dst, src):
                 for i in prange(0, src.shape[0]):
-                    for j in xrange(0, src.shape[1]):
+                    for j in range(0, src.shape[1]):
                         dst[j,i] = src[i,j]
         else:
             raise NotImplementedError
@@ -217,44 +216,44 @@ def bake_numba_transpose(src, dst, axes, target=None):
         if   axes == (0,1,2):
             transpose == noop
         elif axes == (0,2,1):
-            @nb.guvectorize([signature], layout, 
+            @nb.guvectorize([signature], layout,
                 target=target, nopython=True, cache=True)
             def transpose(dst, src):
                 for i in prange(0, src.shape[0]):
                     for j in prange(0, src.shape[1]):
-                        for k in xrange(0, src.shape[2]):
+                        for k in range(0, src.shape[2]):
                             dst[i,k,j] = src[i,j,k]
         elif axes == (1,0,2):
-            @nb.guvectorize([signature], layout, 
+            @nb.guvectorize([signature], layout,
                 target=target, nopython=True, cache=True)
             def transpose(dst, src):
                 for i in prange(0, src.shape[0]):
                     for j in prange(0, src.shape[1]):
-                        for k in xrange(0, src.shape[2]):
+                        for k in range(0, src.shape[2]):
                             dst[j,i,k] = src[i,j,k]
         elif axes == (1,2,0):
-            @nb.guvectorize([signature], layout, 
+            @nb.guvectorize([signature], layout,
                 target=target, nopython=True, cache=True)
             def transpose(dst, src):
                 for i in prange(0, src.shape[0]):
                     for j in prange(0, src.shape[1]):
-                        for k in xrange(0, src.shape[2]):
+                        for k in range(0, src.shape[2]):
                             dst[j,k,i] = src[i,j,k]
         elif axes == (2,1,0):
-            @nb.guvectorize([signature], layout, 
+            @nb.guvectorize([signature], layout,
                 target=target, nopython=True, cache=True)
             def transpose(dst, src):
                 for i in prange(0, src.shape[0]):
                     for j in prange(0, src.shape[1]):
-                        for k in xrange(0, src.shape[2]):
+                        for k in range(0, src.shape[2]):
                             dst[k,j,i] = src[i,j,k]
         elif axes == (2,0,1):
-            @nb.guvectorize([signature], layout, 
+            @nb.guvectorize([signature], layout,
                 target=target, nopython=True, cache=True)
             def transpose(dst, src):
                 for i in prange(0, src.shape[0]):
                     for j in prange(0, src.shape[1]):
-                        for k in xrange(0, src.shape[2]):
+                        for k in range(0, src.shape[2]):
                             dst[k,i,j] = src[i,j,k]
         else:
             raise NotImplementedError(axes)
diff --git a/hysop/tools/numerics.py b/hysop/tools/numerics.py
index f4dcd832c8cef203761f0c2495f906da4b2ff0e1..db6c2abc3f2ac4682afcffcfe63a6984a9a1396e 100644
--- a/hysop/tools/numerics.py
+++ b/hysop/tools/numerics.py
@@ -1,33 +1,38 @@
+import numpy as np
+import gmpy2
+from gmpy2 import mpq, mpz, mpfr, f2q
 
 from hysop.constants import HYSOP_REAL, HYSOP_INTEGER, HYSOP_INDEX, HYSOP_BOOL, HYSOP_COMPLEX
-from hysop.deps      import np, gmp
-from gmpy2           import mpq,mpz,mpfr,f2q
 
-MPQ   = mpq(0).__class__
-MPZ   = mpz(0).__class__
-MPFR  = mpfr(0).__class__
-F2Q   = f2q(0).__class__
+MPQ = mpq(0).__class__
+MPZ = mpz(0).__class__
+MPFR = mpfr(0).__class__
+F2Q = f2q(0).__class__
+
 
 def _mpqize(x):
-    if isinstance(x, int) or isinstance(x, long):
-        return mpq(x,1)
+    if isinstance(x, int):
+        return mpq(x, 1)
     elif isinstance(x, float):
         return f2q(x)
     else:
         return mpq(str(x))
+
+
 mpqize = np.vectorize(_mpqize)
 
+
 def get_dtype(x):
     if isinstance(x, np.dtype):
         return x.type
     elif hasattr(x, 'dtype'):
         if callable(x.dtype):
             return x.dtype()
-        elif x.dtype.__class__.__name__ == 'getset_descriptor': # dtype.type has a dtype field...
+        elif x.dtype.__class__.__name__ == 'getset_descriptor':  # dtype.type has a dtype field...
             return x
         else:
             return x.dtype
-    elif isinstance(x, int) or isinstance(x, long):
+    elif isinstance(x, int):
         return np.int64
     elif isinstance(x, float):
         return np.float64
@@ -36,33 +41,40 @@ def get_dtype(x):
     elif (x is None):
         return None
     else:
-        msg='Unknown type in get_dtype (got {}).'
-        msg=msg.format(x.__class__)
+        msg = 'Unknown type in get_dtype (got {}).'
+        msg = msg.format(x.__class__)
         raise TypeError(msg)
 
+
 def get_itemsize(x):
     dtype = np.dtype(get_dtype(x))
     return dtype.itemsize
 
+
 def is_fp(x):
-    types=(np.float16, np.float32, np.float64, np.longdouble)
+    types = (np.float16, np.float32, np.float64, np.longdouble)
     return (get_dtype(x) in types)
 
+
 def is_signed(x):
     types = (np.int8, np.int16, np.int32, np.int64)
     return (get_dtype(x) in types)
 
+
 def is_unsigned(x):
-    types = (np.bool, np.uint8, np.uint16, np.uint32, np.uint64)
+    types = (np.bool_, np.uint8, np.uint16, np.uint32, np.uint64)
     return (get_dtype(x) in types)
 
+
 def is_integer(x):
     return is_signed(x) or is_unsigned(x)
 
+
 def is_complex(x):
     types = (np.complex64, np.complex128, np.clongdouble)
     return (get_dtype(x) in types)
 
+
 def default_invalid_value(dtype):
     nan = float('nan')
     if is_complex(dtype):
@@ -77,19 +89,21 @@ def default_invalid_value(dtype):
         raise NotImplementedError
 
 # promote_dtype
+
+
 def match_dtype(x, dtype):
     """Promote x.dtype to dtype (always safe cast)."""
     xtype = get_dtype(x)
     if isinstance(dtype, str):
-        if dtype=='f':
+        if dtype == 'f':
             return np.promote_types(xtype, np.float16)
-        elif dtype=='i':
+        elif dtype == 'i':
             return np.promote_types(xtype, np.int8)
-        elif dtype=='u':
+        elif dtype == 'u':
             return np.promote_types(xtype, np.uint8)
-        elif dtype=='b':
+        elif dtype == 'b':
             return np.promote_types(xtype, HYSOP_BOOL)
-        elif dtype=='c':
+        elif dtype == 'c':
             return np.promote_types(xtype, np.complex64)
         else:
             raise NotImplementedError(dtype)
@@ -100,6 +114,7 @@ def match_dtype(x, dtype):
     else:
         return dtype
 
+
 def demote_dtype(x, dtype):
     """Demote x.dtype to dtype (not a safe cast)."""
     xtype = get_dtype(x)
@@ -107,14 +122,14 @@ def demote_dtype(x, dtype):
     if is_complex(xtype):
         n //= 2
     if isinstance(dtype, str):
-        if dtype=='c':
+        if dtype == 'c':
             return {1: np.complex64, 2: np.complex64, 4: np.complex64, 8: np.complex128, 16: np.clongdouble}[n]
-        elif dtype=='f':
+        elif dtype == 'f':
             return {1: np.float16, 2: np.float16, 4: np.float32, 8: np.float64, 16: np.longdouble}[n]
-        elif dtype=='i':
-            return {1: np.int8, 2: np.int16, 4:np.int32, 8: np.int64}[n]
-        elif dtype=='u':
-            return {1: np.uint8, 2: np.uint16, 4:np.uint32, 8: np.uint64}[n]
+        elif dtype == 'i':
+            return {1: np.int8, 2: np.int16, 4: np.int32, 8: np.int64}[n]
+        elif dtype == 'u':
+            return {1: np.uint8, 2: np.uint16, 4: np.uint32, 8: np.uint64}[n]
         else:
             raise NotImplementedError(dtype)
     elif (xtype is None):
@@ -124,20 +139,26 @@ def demote_dtype(x, dtype):
     else:
         return dtype
 
+
 def match_float_type(x):
-    return match_dtype(x,'f')
+    return match_dtype(x, 'f')
+
 
 def match_signed_type(x):
-    return match_dtype(x,'i')
+    return match_dtype(x, 'i')
+
 
 def match_unsigned_type(x):
-    return match_dtype(x,'i')
+    return match_dtype(x, 'i')
+
 
 def match_complex_type(x):
-    return match_dtype(x,'c')
+    return match_dtype(x, 'c')
+
 
 def match_bool_type(x):
-    return match_dtype(x,'b')
+    return match_dtype(x, 'b')
+
 
 def complex_to_float_dtype(dtype):
     dtype = get_dtype(dtype)
@@ -149,24 +170,26 @@ def complex_to_float_dtype(dtype):
     elif dtype == np.clongdouble:
         return np.longdouble
     else:
-        msg=msg.format(dtype)
-        msg='Unknown complex type {}.'
+        msg = msg.format(dtype)
+        msg = 'Unknown complex type {}.'
         raise RuntimeError(msg)
 
+
 def float_to_complex_dtype(dtype):
     dtype = get_dtype(dtype)
     assert is_fp(dtype), '{} is not a float'.format(dtype)
-    if dtype==np.float32:
+    if dtype == np.float32:
         return np.complex64
-    elif dtype==np.float64:
+    elif dtype == np.float64:
         return np.complex128
-    elif dtype==np.longdouble:
+    elif dtype == np.longdouble:
         return np.clongdouble
     else:
-        msg='Unknown float type {}.'
-        msg=msg.format(dtype)
+        msg = 'Unknown float type {}.'
+        msg = msg.format(dtype)
         raise RuntimeError(msg)
 
+
 def determine_fp_types(dtype):
     if is_fp(dtype):
         ftype = dtype
@@ -175,23 +198,24 @@ def determine_fp_types(dtype):
         ctype = dtype
         ftype = complex_to_float_dtype(ctype)
     else:
-        msg='{} is not a floating point or complex data type.'
-        msg=msg.format(dtype)
+        msg = '{} is not a floating point or complex data type.'
+        msg = msg.format(dtype)
         raise ValueError(msg)
     return (np.dtype(ftype), np.dtype(ctype))
 
+
 def find_common_dtype(*args):
-    dtypes   = tuple(get_dtype(arg) for arg in args)
+    dtypes = tuple(get_dtype(arg) for arg in args)
     itemsize = tuple(get_itemsize(x) for x in dtypes)
-    n        = max(itemsize)
+    n = max(itemsize)
     if any(is_complex(x) for x in dtypes):
         return {8: np.complex64, 16: np.complex128, 32: np.clongdouble}[n]
     elif any(is_fp(x) for x in dtypes):
         return {2: np.float16, 4: np.float32, 8: np.float64, 16: np.longdouble}[n]
     elif any(is_signed(x) for x in dtypes):
-        return {1: np.int8, 2: np.int16, 4:np.int32, 8: np.int64}[n]
+        return {1: np.int8, 2: np.int16, 4: np.int32, 8: np.int64}[n]
     elif any(is_unsigned(x) for x in dtypes):
-        return {1: np.uint8, 2: np.uint16, 4:np.uint32, 8: np.uint64}[n]
+        return {1: np.uint8, 2: np.uint16, 4: np.uint32, 8: np.uint64}[n]
     else:
-        msg='Did not find any matching dtype.'
+        msg = 'Did not find any matching dtype.'
         raise NotImplementedError(msg)
diff --git a/hysop/tools/numpywrappers.py b/hysop/tools/numpywrappers.py
index 9b5a528ce21ad28bbdd6bcb05ccc5db432c37cff..4b8391d430f61abca240a57ee13c4a7cd4feb40d 100644
--- a/hysop/tools/numpywrappers.py
+++ b/hysop/tools/numpywrappers.py
@@ -4,108 +4,112 @@ Interface to numpy arrays, with hysop predifined types for int, real ...
 Those functions are useful to enforce hysop predefined types in numpy arrays.
 """
 
-from hysop.constants import HYSOP_REAL, HYSOP_COMPLEX, HYSOP_ORDER
-from hysop.constants import HYSOP_INTEGER, HYSOP_INDEX, HYSOP_DIM, HYSOP_BOOL
-
 from hysop.tools.types import check_instance
-from hysop.deps import np as npw
+import numpy as npw
 
 ##########################
 ### EXTRA HYSOP METHODS ##
 
 def __generate_hysop_type_functions():
-    
+
     functions = {
 
             'as{type}array':
 '''
-def __hysop_array_generated_method(a, order=HYSOP_ORDER, **kargs):
+def hysop_array_generated_method(a, order=HYSOP_ORDER, **kargs):
     """
     Convert the input to an array of dtype HYSOP_{TYPE}.
     """
     dtype = HYSOP_{TYPE}
-    return npw.asarray(a=a, dtype=dtype, order=order, **kargs)
+    return np.asarray(a=a, dtype=dtype, order=order, **kargs)
 ''',
             'asany{type}array':
 '''
-def __hysop_array_generated_method(a, order=HYSOP_ORDER, **kargs):
+def hysop_array_generated_method(a, order=HYSOP_ORDER, **kargs):
     """
     Convert the input to an array of dtype HYSOP_{TYPE}.
     """
     dtype = HYSOP_{TYPE}
-    return npw.asanyarray(a=a, dtype=dtype, order=order, **kargs)
+    return np.asanyarray(a=a, dtype=dtype, order=order, **kargs)
 ''',
             '{type}_prod':
 '''
-def __hysop_array_generated_method(a, axis=None, out=None, **kargs):
+def hysop_array_generated_method(a, axis=None, out=None, **kargs):
     """
     Sum of array elements over a given axis.
     """
     dtype = HYSOP_{TYPE}
-    return npw.prod(a=a,axis=axis,out=out,dtype=dtype,**kargs)
+    return np.prod(a=a,axis=axis,out=out,dtype=dtype,**kargs)
 ''',
             '{type}_sum':
 '''
-def __hysop_array_generated_method(a, axis=None, out=None, **kargs):
+def hysop_array_generated_method(a, axis=None, out=None, **kargs):
     """
     Sum of array elements over a given axis.
     """
     dtype = HYSOP_{TYPE}
-    return npw.sum(a=a,axis=axis,out=out,dtype=dtype,**kargs)
+    return np.sum(a=a,axis=axis,out=out,dtype=dtype,**kargs)
 ''',
 
             '{type}_empty':
 '''
-def __hysop_array_generated_method(shape, order=HYSOP_ORDER, **kargs):
+def hysop_array_generated_method(shape, order=HYSOP_ORDER, **kargs):
     """
     Return a new array of given shape and type, without initializing entries.
     """
     dtype = HYSOP_{TYPE}
-    return npw.empty(shape=shape, dtype=dtype, order=order, **kargs)
+    return np.empty(shape=shape, dtype=dtype, order=order, **kargs)
 ''',
 
             '{type}_ones':
 '''
-def __hysop_array_generated_method(shape, order=HYSOP_ORDER, **kargs):
+def hysop_array_generated_method(shape, order=HYSOP_ORDER, **kargs):
     """
     Return a new array of given shape filled with ones of type HYSOP_{TYPE}.
     """
     dtype = HYSOP_{TYPE}
-    return npw.ones(shape=shape, order=order, dtype=dtype, **kargs)
+    return np.ones(shape=shape, order=order, dtype=dtype, **kargs)
 ''',
-    
+
             '{type}_zeros':
 '''
-def __hysop_array_generated_method(shape, order=HYSOP_ORDER, **kargs):
+def hysop_array_generated_method(shape, order=HYSOP_ORDER, **kargs):
     """
     Return a new array of given shape, filled with zeros of type HYSOP_{TYPE}.
     """
     dtype = HYSOP_{TYPE}
-    return npw.zeros(shape=shape, order=order, dtype=dtype, **kargs)
+    return np.zeros(shape=shape, order=order, dtype=dtype, **kargs)
 ''',
 
             '{type}_full':
 '''
-def __hysop_array_generated_method(shape, fill_value, order=HYSOP_ORDER, **kargs):
+def hysop_array_generated_method(shape, fill_value, order=HYSOP_ORDER, **kargs):
     """
     Return a new array of given shape, filled with fill_value of type HYSOP_{TYPE}.
     """
     dtype = HYSOP_{TYPE}
-    return npw.full(shape=shape, fill_value=fill_value, order=order, dtype=dtype, **kargs)
+    return np.full(shape=shape, fill_value=fill_value, order=order, dtype=dtype, **kargs)
 '''
 }
-    
+
     hysop_types = ['real', 'complex', 'integer', 'index', 'dim', 'bool']
 
     for ht in hysop_types:
-        for _fname, fdefinition in functions.iteritems():
+        for _fname, fdefinition in functions.items():
             fname = _fname.format(type=ht, TYPE=ht.upper())
-            fdef  = fdefinition.format(type=ht, TYPE=ht.upper())
-            exec(fdef)
-            setattr(npw, fname, __hysop_array_generated_method)
+            fdef = \
+'''
+import numpy as np
+from hysop.constants import HYSOP_REAL, HYSOP_COMPLEX, HYSOP_ORDER
+from hysop.constants import HYSOP_INTEGER, HYSOP_INDEX, HYSOP_DIM, HYSOP_BOOL
+{}
+'''.format(fdefinition.format(type=ht, TYPE=ht.upper()))
+            namespace = dict()
+            exec(fdef, namespace)
+            setattr(npw, fname, namespace['hysop_array_generated_method'])
             if ht == 'integer':
                 fname = _fname.format(type='int')
-                setattr(npw, fname, __hysop_array_generated_method)
+                setattr(npw, fname, namespace['hysop_array_generated_method'])
 
 __generate_hysop_type_functions()
 
@@ -124,13 +128,13 @@ def slices_empty(slices, shape):
     if slices is Ellipsis:
         return False
     from hysop.core.arrays.array import Array
-    if isinstance(slices, (int,long,npw.integer,npw.ndarray,Array)):
+    if isinstance(slices, (int,npw.integer,npw.ndarray,Array)):
         return
 
     slices = (slices,) if isinstance(slices,slice) else slices
     assert len(shape) >= len(slices)
     shape = shape[:len(slices)]
-    empty = tuple( slices[i].indices(shape[i]) for i in xrange(len(slices)) \
+    empty = tuple( slices[i].indices(shape[i]) for i in range(len(slices)) \
                    if isinstance(slices[i], slice) )
     empty = tuple( (i>=j) for (i,j,_),ss in zip(empty,shape) )
     return any(empty)
@@ -144,7 +148,7 @@ def set_readonly(*args):
         arg.setflags(write=False)
 
 
-def fancy_print(a, replace_values={}, replace_views={}, 
+def fancy_print(a, replace_values={}, replace_views={},
         element_width=6, inital_val=None, **print_opts):
     """
     Print values with ghosts replaced by symbol.
@@ -175,20 +179,20 @@ def fancy_print(a, replace_values={}, replace_views={},
             infstr = 'inf'
             formatter = custom formatter
     """
-    check_instance(a, npw.ndarray) 
+    check_instance(a, npw.ndarray)
     strarr = npw.empty_like(a, dtype=object)
 
     if (inital_val is None):
         strarr[...] = a
     else:
         strarr[...] = inital_val
-    
-    for predicate, replace_val in replace_values.iteritems():
+
+    for predicate, replace_val in replace_values.items():
         assert callable(predicate)
         pred = predicate(a)
         strarr[pred] = replace_val
 
-    for view, replace_val in replace_views.iteritems():
+    for view, replace_val in replace_views.items():
         strarr[view] = replace_val
 
     _formatter = {
@@ -196,15 +200,15 @@ def fancy_print(a, replace_values={}, replace_views={},
             float:  lambda x: '{:{width}.2f}'.format(x, width=element_width)
     }
 
-    _print_opts = dict(threshold=10000, linewidth=1000, 
-                         nanstr='nan', infstr='inf', 
+    _print_opts = dict(threshold=10000, linewidth=1000,
+                         nanstr='nan', infstr='inf',
                          formatter={'object': lambda x: _formatter.get(type(x),
                                                             _formatter[object])(x)})
     _print_opts.update(print_opts)
 
     from hysop.tools.contexts import printoptions
     with printoptions(**_print_opts):
-        print strarr
+        print(strarr)
 
 npw.set_readonly = set_readonly
 npw.ndindex_with_ghosts = ndindex_with_ghosts
diff --git a/hysop/tools/parameters.py b/hysop/tools/parameters.py
index e78f009cdb16fb644974275a22dc22cf9fa73b19..95629b0aa90dd8154357faab288e6f790304e660 100755
--- a/hysop/tools/parameters.py
+++ b/hysop/tools/parameters.py
@@ -50,7 +50,7 @@ class MPIParams(namedtuple('MPIParams', ['comm', 'size', 'task_id',
             rank = MPI.PROC_NULL
             comm = MPI.COMM_NULL
             size = -1
-        elif comm != MPI.COMM_NULL:
+        elif (comm != MPI.COMM_NULL):
             rank = comm.Get_rank()
             size = comm.Get_size()
         else:
@@ -82,8 +82,8 @@ class MPIParams(namedtuple('MPIParams', ['comm', 'size', 'task_id',
 
     def __hash__(self):
         h = hashlib.sha1()
-        h.update(str(self.task_id))
-        h.update(str(self.on_task))
+        h.update(str(self.task_id).encode('utf-8'))
+        h.update(str(self.on_task).encode('utf-8'))
         return hash(h.hexdigest()) ^ id(self.comm)
 
 
@@ -199,7 +199,7 @@ class CartesianDiscretization(namedtuple("CartesianDiscretization",
         h.update(self.resolution.view(npw.uint8))
         h.update(self.ghosts.view(npw.uint8))
         if (self.lboundaries is not None):
-            h.update(str(hash(tuple(int(bd) for bd in self.lboundaries))))
+            h.update(str(hash(tuple(int(bd) for bd in self.lboundaries))).encode('utf-8'))
         if (self.rboundaries is not None):
-            h.update(str(hash(tuple(int(bd) for bd in self.rboundaries))))
+            h.update(str(hash(tuple(int(bd) for bd in self.rboundaries))).encode('utf-8'))
         return hash(h.hexdigest())
diff --git a/hysop/tools/plotDrag.py b/hysop/tools/plotDrag.py
index e10269c119b1e08c0de01b489351b4be49697f3e..7f1841446096d28963cf1c8f69fe62d41a1102f0 100644
--- a/hysop/tools/plotDrag.py
+++ b/hysop/tools/plotDrag.py
@@ -32,8 +32,8 @@ def _ft_read(fileobj, commentchar='#'):
         return None
     fileobj.seek(location)
 
-    blankline = re.compile('\n\s*\n',  re.M)
-    commentline = re.compile('^%s[^\n]*\n' % commentchar, re.M)
+    blankline = re.compile(r'\n\s*\n',  re.M)
+    commentline = re.compile(r'^%s[^\n]*\n' % commentchar, re.M)
     filestr = fileobj.read()
     # remove lines after a blank line
     m = re.search(blankline, filestr)
@@ -43,7 +43,7 @@ def _ft_read(fileobj, commentchar='#'):
     filestr = re.sub(commentline, '', filestr)
     a = [float(x) for x in filestr.split()]
     data = np.array(a)
-    data.shape = (len(a)/shape1, shape1)
+    data.shape = (len(a)//shape1, shape1)
     return data
 
 
@@ -71,7 +71,7 @@ plt.axis([0, 70, 0.3, 1])
 plt.grid('on')
 
 for filename in fileListLambda3:
-    print ("my file is ", filename)
+    print("my file is ", filename)
     file = open(filename)
     table = _ft_read(file)
     time = table[:, 0]
diff --git a/hysop/tools/profiler.py b/hysop/tools/profiler.py
index 729fc3e9c6d73a8849d5cb29577fac20283bad22..f665c4fc084126a4e2120c485540c1a3f93af1a2 100644
--- a/hysop/tools/profiler.py
+++ b/hysop/tools/profiler.py
@@ -27,7 +27,7 @@ class FProfiler(object):
         >>> # do something else ... [2]
         >>> prof += ftime() - start
         >>> # ...
-        >>> # print prof
+        >>> # print(prof)
         >>> # --> display total time spent in do [1] and [2]
         >>> # and number of calls of prof
 
@@ -137,8 +137,8 @@ class Profiler(object):
         summary = self.summary
         if len(summary) > 0:
             if (self._l > 1) and (len(summary) == 1) and \
-                    isinstance(summary.values()[0], FProfiler):
-                s = '>{}::{}'.format(self.get_name(), summary.values()[0])
+                    isinstance(next(iter(summary.values())), FProfiler):
+                s = '>{}::{}'.format(self.get_name(), next(iter(summary.values())))
             else:
                 s = '{}[{}]>{}{}'.format(
                     '\n' if (self._l == 1) else '',
@@ -152,6 +152,37 @@ class Profiler(object):
             s = ''
         return s
 
+    def write(self, prefix='', hprefix='', with_head=True):
+        """
+
+        Parameters
+        ----------
+        prefix : string, optional
+        hprefix : string, optional
+        with_head : bool, optional
+
+        """
+        if prefix != '' and prefix[-1] != ' ':
+            prefix += ' '
+        if hprefix != '' and hprefix[-1] != ' ':
+            hprefix += ' '
+        if self._comm.Get_rank() == 0:
+            s = ""
+            h = hprefix + "Rank"
+            for r in range(self._comm_size):
+                s += prefix + "{0}".format(r)
+                for i in range(len(self.all_names)):
+                    s += " {0}".format(self.all_times[i][r])
+                s += "\n"
+            s += prefix + "-1"
+            for i in range(len(self.all_names)):
+                h += ' ' + self.all_names[i]
+                s += " {0}".format(self.all_times[i][self._comm_size])
+            h += "\n"
+            if with_head:
+                s = h + s
+            print(s)
+
     def summarize(self):
         """
         Update profiling values and prepare data for a report
@@ -201,18 +232,19 @@ class Profiler(object):
             all_calls = comm.allgather([_[2] for _ in self.table])
             all_data = {}
             # all_data structure : task_size, calls nb, self total, task mean, task min, task max
+            nelem = 6
             for r in range(comm_size):
                 for n, t, c in zip(all_names[r], all_times[r], all_calls[r]):
                     if n not in all_data:
-                        all_data[n] = [0, ]*6+[[], ]
+                        all_data[n] = [0, ]*nelem+[[], ]
                     all_data[n][0] += 1
                     all_data[n][1] += c
                     if r == rk:
                         all_data[n][2] = t
-                    all_data[n][6].append(t)
+                    all_data[n][nelem].append(t)
             for n in all_data.keys():
                 all_data[n][1] /= all_data[n][0]
-                all_data[n][3] = npw.average(all_data[n][6])
-                all_data[n][4] = npw.min(all_data[n][6])
-                all_data[n][5] = npw.max(all_data[n][6])
+                all_data[n][3] = npw.average(all_data[n][nelem])
+                all_data[n][4] = npw.min(all_data[n][nelem])
+                all_data[n][5] = npw.max(all_data[n][nelem])
             self.all_data = all_data
diff --git a/hysop/tools/profiling.py b/hysop/tools/profiling.py
index cfad3c0ccafc6ceef4233d2a785fb801387419b6..2460d82c6b8075f8952278a23126c4929d0f0167 100644
--- a/hysop/tools/profiling.py
+++ b/hysop/tools/profiling.py
@@ -42,7 +42,7 @@ if not os.path.exists(args['path']):
 
 def system(cmd):
     if args['verbose']:
-        print cmd.replace(';', '\n')
+        print(cmd.replace(';', '\n'))
     ret = os.system(cmd)
     if ret!=0:
         msg='Command failed: {}'.format(cmd)
@@ -93,7 +93,7 @@ def get_mem_stats(thesummary, amount=None):
     if amount == None:
         amount = len(thesummary.summaries)
     cumulative = 0
-    for i in xrange(amount):
+    for i in range(amount):
         summary = thesummary.summaries[i]
         cumulative += summary.total_size
         yield (i, summary.count,
@@ -134,8 +134,6 @@ def get_stats(stats, amount):
     else:
         lst = stats.stats.keys()
         msg = "   Random listing order was used\n"
-    #for selection in xrange(amount):
-        #print selection
     lst, msg = stats.eval_print_amount(amount, lst, msg)
     for func in lst:
         cc, nc, tt, ct, callers = stats.stats[func]
@@ -201,7 +199,7 @@ def print_stats_file(stats, f, amount):
             ls = stats.stats.keys()
             msg = "   Random listing order was used\n"
 
-        for selection in xrange(amount):
+        for selection in range(amount):
             ls, msg = stats.eval_print_amount(selection, ls, msg)
 
         count = len(ls)
@@ -256,7 +254,7 @@ def launch_cProfile():
     cProfile.run('run()', rawCProfileOutput)
 
     prof = pstats.Stats(rawCProfileOutput)
-    
+
     print_stats_file(prof.strip_dirs().sort_stats('time'),
                      cProfileTimeOutput, 15)
 
@@ -282,7 +280,7 @@ else:
 hysop.reset()
 memStat, memStatPieChart = launch_meliae_profiling()
 # Create results string
-regexp = re.compile('(.*\d{4})\s+(?:(?:/\w+)+/)?(\w+\.pstat)')
+regexp = re.compile(r'(.*\d{4})\s+(?:(?:/\w+)+/)?(\w+\.pstat)')
 files = []
 for file_ in p.files:
     res = regexp.match(file_)
@@ -344,7 +342,7 @@ f_out.write("""\\documentclass[11pt]{scrartcl}
 
     \\begin{footnotesize}
     \\begin{verbatim}
-    """ 
+    """
     + pstatTime
     + """
     \\end{verbatim}
diff --git a/hysop/tools/remeshing_formula_parsing.py b/hysop/tools/remeshing_formula_parsing.py
deleted file mode 100644
index 279d5767c6ecec37b2526870a4739f6a529afc19..0000000000000000000000000000000000000000
--- a/hysop/tools/remeshing_formula_parsing.py
+++ /dev/null
@@ -1,235 +0,0 @@
-"""Functions to parse some remeshing formula code (given as strings from Maple
-or Sympy for instance). Result is a formula usable in the
-hysop.numerics.remeshing module or in OpenCL code in
-hysop/gpu/cl_src/remeshing/weights*
-
-To work properly, this module needs sympy python package.
-"""
-import re
-try:
-    import sympy as sp
-
-    # Weights names
-    weights_names = ['alpha',
-                     'beta',
-                     'gamma',
-                     'delta',
-                     'eta',
-                     'zeta',
-                     'theta',
-                     'iota',
-                     'kappa',
-                     'mu']
-
-    def parse(f, fac=1, vec=False, toOpenCL=True,
-              CLBuiltins=False, keep=False):
-        """Parsing function.
-
-
-        Parameters
-        ----------
-        f : python functions
-            to be parsed as string
-        fac : real
-            numeric factor for all formulas
-        vec : boolean
-            true if we use vector builtin types
-        toOpenCL : boolean
-            true if OpenCL output
-        CLBuiltins : boolean
-            true if OpenCL output uses fma builtin function
-        keep : boolean
-            true low parsing
-
-        """
-        msg = 'Vector works only in OpenCL parsing'
-        assert not (vec and not toOpenCL), msg
-        assert not (CLBuiltins and not toOpenCL),\
-            "CLBuiltins only in OpenCL parsing"
-        t = "float__N__" if vec else "float"
-        cteEnd = ".0" if toOpenCL else "."
-        res = ""
-        # Split each line
-        fl = f.split('\n')
-        # sympy formulas
-        y = sp.symbols('y')
-        print (y)
-        sw = [None] * f.count(';')
-        i = 0
-        for wl in fl:
-            if len(wl) > 2:
-                # replace pow
-                power = re.search('pow\(y, ([0-9]+)\)', wl)
-                if power is not None:
-                    np = "y" + "*y" * (int(power.group(1)) - 1)
-                    wl = wl.replace(power.group(0), np)
-                sw[i] = '('
-                sw[i] += str(sp.horner(eval(wl.split(';')[0].split('=')[1]) * fac))
-                sw[i] += ')/' + str(fac)
-                i += 1
-        for i, s in enumerate(sw):
-            if not keep:
-                if toOpenCL:
-                    res += "inline " + t + " "
-                    res += weights_names[i] + "(" + t + " y){\n"
-                    res += '  return '
-                else:
-                    res += 'lambda y, s: s * '
-                res += '('
-                # replace y**n
-                power = re.findall('y\*\*[0-9]+', s)
-                if power is not None:
-                    for pw in power:
-                        n = int(pw.split('**')[1])
-                        npower = 'y' + "*y" * (n - 1)
-                        s = s.replace(pw, npower)
-                s = s.replace(' ', '')
-                if CLBuiltins:
-                    s = createFMA(s)
-                # From integers to floats
-                s = re.sub(r"(?P<id>[0-9]+)", r"\g<id>" + cteEnd, s)
-                s = s.replace('*', ' * ')
-                s = s.replace('/', ' / ')
-                s = s.replace('+', ' + ')
-                s = s.replace('-', ' - ')
-                s = s.replace('( - ', '(-')
-                s = s.replace('  ', ' ')
-                s = s.replace(", - ", ", -")
-                res += s + ')'
-                if toOpenCL:
-                    res += ";}"
-                if i < len(sw) - 1:
-                    res += "\n"
-            else:
-                res += "w[{0}] = ".format(i)
-                # replace y**n
-                power = re.findall('y\*\*[0-9]+', s)
-                if power is not None:
-                    for pw in power:
-                        n = int(pw.split('**')[1])
-                        npower = 'y' + "*y" * (n - 1)
-                        s = s.replace(pw, npower)
-                # From integers to floats
-                s = re.sub(r"(?P<id>[0-9]+)", r"\g<id>.", s)
-                s = s.replace('*', ' * ')
-                s = s.replace('/', ' / ')
-                s = s.replace('+', ' + ')
-                s = s.replace('-', ' - ')
-                s = s.replace('( - ', '(-')
-                s = s.replace('  ', ' ')
-                s = s.replace(", - ", ", -")
-                res += s + "\n"
-        return res
-
-except:
-    msge = 'Sympy not available - remeshing formula parsing will not work.'
-    msge += 'If you need parsing, try "pip install sympy" and reinstall hysop.'
-    print msge
-
-
-def createFMA(s):
-    """
-    Function to handle fma replacements in formula.
-    :param s : formula to parse
-
-    \code
-    >>> createFMA("(y)")
-    'fma(y, 1, 1)'
-    >>> createFMA("(2*y)")
-    'fma(y, 2, 1)'
-    >>> createFMA("(y+11)")
-    'fma(y, 1, 11)'
-    >>> createFMA("(y+11)")
-    'fma(y, 1, 11)'
-    >>> createFMA("(y-11)")
-    'fma(y, 1, -11)'
-    >>> createFMA("(-y+11)")
-    'fma(y, -1, 11)'
-    >>> createFMA("(-y-11)")
-    'fma(y, -1, -11)'
-    >>> createFMA("(-22*y+11)")
-    'fma(y, -22, 11)'
-    >>> createFMA("(22*y-11)")
-    'fma(y, 22, -11)'
-    >>> createFMA("fma(y, 22, -11)")
-    'fma(y, 22, -11)'
-    >>> createFMA("(y*fma(y, 22, -11)+4)")
-    'fma(y, fma(y, 22, -11), 4)'
-    >>> createFMA("(y*fma(y, 22, -11)-4)")
-    'fma(y, fma(y, 22, -11), -4)'
-    >>> createFMA("(y*y*y*fma(y, 22, -11)+4)")
-    'fma(y*y*y, fma(y, 22, -11), 4)'
-
-    \endcode
-    """
-    def fma_replace(m):
-        """
-        Regexp callback function to replace a * y + c by fma(y, a, c).
-        Matching regexp is (groups are given below):
-        \code
-        "\(((-)?(([0-9]+)\*)?)y(([+-])([0-9]+))?\)"
-            |2|  |--4---|       |--6-||--7---|
-                |-----3----|   |-------5------|
-           |--------1--------|
-        \endcode
-        """
-        s = "fma(y, "
-        if not m.group(1) is None and not m.group(1) == '':
-            # There is a '(-)?(a*)?'
-            if not m.group(2) is None:
-                # There is a '(-)' else '+' is not present and not needed
-                s += m.group(2)
-            if not m.group(3) is None:
-                # There is a '(a*)' else 'a' is 1
-                s += m.group(4)
-            else:
-                s += '1'
-        else:
-            s += '1'
-        s += ', '
-        if not m.group(5) is None:
-            # There is a '(+-)(c)' else 'c' is 1
-            if m.group(6) == '-':
-                # There is a '-' else '+' is obmited
-                s += m.group(6)
-            s += m.group(7)
-        else:
-            s += '1'
-        s += ')'
-        return s
-
-    def fma_recurse(m):
-        """
-        Regexp callback function to replace (y*fma(...)+c) by
-        fma(y, fma(...), c) where '(' and ')' are well balanced.
-        Matching regexp is (groups are given below):
-        \code
-        "\(([y\*]*y)\*(fma\(.*\))([+-])([0-9]+)\)"
-           |---1---|  |----2----||-3--||--4---|
-        \endcode
-        """
-        assert m.group(0).count('(') <= m.group(0).count(')'), \
-            "Matching is too short to get ([fma(,,)]+): " + m.group(0)
-        tmp = ""
-        # get the part of the mathing that have the same number of '(' and ')'
-        for t in m.group(0).split(')')[:m.group(0).count('(')]:
-            tmp += t + ')'
-        #performs the same regexp
-        tmpg = re.search(r"\(([y\*]*y)\*(fma\(.*\))([+-])([0-9]+)\)", tmp)
-        s = "fma(" + tmpg.group(1) + ", " + tmpg.group(2) + ', '
-        if tmpg.group(3) == '-':
-            s += tmpg.group(3)
-        s += tmpg.group(4)
-        s += ')'
-        return m.group(0).replace(tmp, s)
-
-    s = re.sub(r"\(((-)?(([0-9]+)\*)?)y(([+-])([0-9]+))?\)", fma_replace, s)
-    l = len(re.findall(r'fma\(', s))
-    recurse = True
-    while recurse:
-        s = re.sub(r"\(([y\*]*y)\*(fma\(.*\))([+-])([0-9]+)\)", fma_recurse, s)
-        ll = len(re.findall(r'fma\(', s))
-        if l == ll:
-            recurse = False
-        l = ll
-    return s
diff --git a/hysop/tools/spectral_utils.py b/hysop/tools/spectral_utils.py
index f1d792a15c16d9398764fa0aec5518350d783c92..7bbddaa5d7a58cd69b0bd1964ac18b7e97d46a2e 100644
--- a/hysop/tools/spectral_utils.py
+++ b/hysop/tools/spectral_utils.py
@@ -33,7 +33,7 @@ class SpectralTransformUtils(object):
       TransformType.DST_I, TransformType.DST_II, TransformType.DST_III, TransformType.DST_IV,
       TransformType.DCT_I, TransformType.DCT_II, TransformType.DCT_III, TransformType.DCT_IV,
     )
-    
+
     backward_transforms = (
       TransformType.IFFT, TransformType.IRFFT,
       TransformType.IDST_I, TransformType.IDST_II, TransformType.IDST_III, TransformType.IDST_IV,
@@ -113,7 +113,7 @@ class SpectralTransformUtils(object):
             transform_offsets.append((lo,ro))
         return tuple(shape), tuple(transform_offsets)
 
-        
+
     @classmethod
     def compute_wave_numbers(cls, transform, N, L, ftype):
         """Compute wave numbers of a given transform."""
@@ -170,13 +170,13 @@ class SpectralTransformUtils(object):
                 msg='Unknown transform type {}.'.format(tr)
                 raise ValueError(msg)
         return np.dtype(dtype)
-            
+
     @classmethod
     def determine_input_dtype(cls, output_dtype, *transforms):
         """Compute input data type from output data type and list of backward transforms."""
         backward_transforms = cls.get_inverse_transforms(*transforms)
         return cls.determine_output_dtype(output_dtype, *backward_transforms)
-    
+
     @classmethod
     def parse_expression(cls, expr, replace_pows=True):
         """
@@ -184,7 +184,7 @@ class SpectralTransformUtils(object):
         If replace_pow is set, all wave_numbers powers will have their own symbol
         and are replace in expression (this allows to precompute wavenumber powers).
 
-        Returns parsed expression and a set of spectral transforms and 
+        Returns parsed expression and a set of spectral transforms and
         a set of contained wave_numbers.
         """
         from hysop.symbolic.spectral import WaveNumber, AppliedSpectralTransform
@@ -200,7 +200,7 @@ class SpectralTransformUtils(object):
             elif replace_pows and \
                  isinstance(expr, sm.Pow) and \
                  isinstance(expr.args[0], WaveNumber) and \
-                 isinstance(expr.args[1], (int,long,np.integer,sm.Integer)):
+                 isinstance(expr.args[1], (int,np.integer,sm.Integer)):
                 wn = expr.args[0].pow(int(expr.args[1]))
                 wave_numbers.add(wn)
                 return wn
@@ -216,13 +216,13 @@ class SpectralTransformUtils(object):
                     msg='\nFATAL ERROR: Failed to rebuild expr {}'.format(expr)
                     msg+='\n type is {}'.format(expr.func)
                     msg+='\n'
-                    print msg
+                    print(msg)
                     raise
             else:
                 return expr
         expr = _extract(expr)
         return (expr, transforms, wave_numbers)
-        
+
 
     @classmethod
     def generate_wave_number(cls, transform, axis, exponent):
@@ -248,7 +248,7 @@ class SpectralTransformUtils(object):
         Note that transforms are returned in natural ordering (ie. contiguous X-axis last).
         """
         check_instance(field, ScalarField)
-        boundaries = tuple((lbd, rbd) for (lbd, rbd) 
+        boundaries = tuple((lbd, rbd) for (lbd, rbd)
                             in zip(field.lboundaries_kind, field.rboundaries_kind))
         transforms = cls.boundaries_to_transforms(boundaries[::-1], transformed_axes)[::-1]
         return transforms
@@ -278,7 +278,7 @@ class SpectralTransformUtils(object):
         for boundary_pair in boundaries:
             if (boundary_pair not in valid_boundary_pairs):
                 msg='Invalid boundary pair {}, valid ones are\n  *{}'
-                msg=msg.format(boundary_pair, '\n  *'.join(str(vbp) 
+                msg=msg.format(boundary_pair, '\n  *'.join(str(vbp)
                     for vbp in valid_boundary_pairs))
                 raise ValueError(msg)
             (left_bd, right_bd) = boundary_pair
@@ -335,7 +335,7 @@ class SpectralTransformUtils(object):
         )
         extension_pair = (left_ext, right_ext)
         msg='Invalid domain extension pair {}, valid ones are\n  *{}'
-        msg=msg.format(extension_pair, '\n  *'.join(str(vep) 
+        msg=msg.format(extension_pair, '\n  *'.join(str(vep)
             for vep in valid_extension_pairs))
 
         if (extension_pair not in valid_extension_pairs):
@@ -443,26 +443,26 @@ class SpectralTransformUtils(object):
 def make_multivariate_trigonometric_polynomial(Xl, Xr, lboundaries, rboundaries, N):
     """
     Build a tensor product of trigonometric polynomials satisfying boundary conditions on each axis.
-    
+
     lboundaries: np.ndarray of BoundaryCondition
     rboundaries: np.ndarray of BoundaryCondition
     other parameters: scalar or array_like of the same size as boundary arrays
-    
+
     All parameters are expanded to the size of the length of prescribed boundaries.
     See make_trigonometric_polynomial for more informations about parameters.
-    
+
     This method returns a tuple (P,Y) where:
 
     P is a sympy expression representing a multivariate trigonometric polynomials in variables
     Y = (y0, y1, ..., yd)
-    
-    P(Y) = P0(y0) * P1(y1) * ... * Pd(yd) 
-    
+
+    P(Y) = P0(y0) * P1(y1) * ... * Pd(yd)
+
     *d = lboundaries.size-1 = rboundaries.size-1
 
     *P0 is a trigonometric polynomial of order N[0] that satisfies (lboundaries[0], rboundaries[0])
      on domain [Xl[0], Xr[0]].
-    
+
     *P1 is a trigonometric polynomial of order N[1] that satisfies (lboundaries[1], rboundaries[1])
      on domain [Xl[1], Xr[1]].
     .
@@ -488,7 +488,7 @@ def make_multivariate_trigonometric_polynomial(Xl, Xr, lboundaries, rboundaries,
     assert lboundaries.size==rboundaries.size==dim
     assert all(xl<xr for (xl,xr) in zip(Xl,Xr))
     assert all(n>=1 for n in N)
-    
+
     _,Y = tensor_symbol('y', shape=(dim,))
     P = 1
     for (xl,xr,lb,rb,n,yi) in zip(Xl, Xr, lboundaries, rboundaries, N, Y):
@@ -500,26 +500,26 @@ def make_multivariate_trigonometric_polynomial(Xl, Xr, lboundaries, rboundaries,
 def make_multivariate_polynomial(Xl, Xr, lboundaries, rboundaries, N, order):
     """
     Build a tensor product of polynomials satisfying boundary conditions on each axis.
-    
+
     lboundaries: np.ndarray of BoundaryCondition
     rboundaries: np.ndarray of BoundaryCondition
     other parameters: scalar or array_like of the same size as boundary arrays
-    
+
     All parameters are expanded to the size of the length of prescribed boundaries.
     See make_polynomial for more informations about parameters.
-    
+
     This method returns a tuple (P,Y) where:
 
     P is a sympy expression representing a multivariate polynomials in variables
     Y = (y0, y1, ..., yd)
-    
-    P(Y) = P0(y0) * P1(y1) * ... * Pd(yd) 
-    
+
+    P(Y) = P0(y0) * P1(y1) * ... * Pd(yd)
+
     *d = lboundaries.size-1 = rboundaries.size-1
 
     *P0 is a polynomial of order N[0] that satisfies (lboundaries[0], rboundaries[0])
      on domain [Xl[0], Xr[0]] up to order order[0].
-    
+
     *P1 is a polynomial of order N[1] that satisfies (lboundaries[1], rboundaries[1])
      on domain [Xl[1], Xr[1]] up to order order[1].
     .
@@ -548,7 +548,7 @@ def make_multivariate_polynomial(Xl, Xr, lboundaries, rboundaries, N, order):
     assert all(xl<xr for (xl,xr) in zip(Xl,Xr))
     assert all(o>=2 for o in order)
     assert all(n>2*o for (o,n) in zip(order, N))
-    
+
     _,Y = tensor_symbol('y', shape=(dim,))
     P = 1
     for (xl,xr,lb,rb,n,o,yi) in zip(Xl, Xr, lboundaries, rboundaries, N, order, Y):
@@ -562,7 +562,7 @@ def make_polynomial(Xl, Xr, lboundary, rboundary, N, order):
     """
     Build a polynom of order N-1 between on domain [Xl, Xr] that verifies
     prescribed left and right boundary conditions up to a certain order.
-    
+
     Conditions:
       Xl < Xr
       order >= 2
@@ -583,13 +583,13 @@ def make_polynomial(Xl, Xr, lboundary, rboundary, N, order):
     check_instance(rboundary, BoundaryCondition)
     check_instance(N, int)
     check_instance(order, int)
-    
+
     x = sm.Symbol('x')
     a, A = tensor_symbol('a', shape=(N,))
-    
+
     def rand(*n):
         return 2.0*(np.random.rand(*n)-0.5)
-    
+
     K = 2*order
     assert Xl<Xr
     assert order>=2
@@ -605,11 +605,11 @@ def make_polynomial(Xl, Xr, lboundary, rboundary, N, order):
     P = sum(ai*(x**i) for (i,ai) in enumerate(a))
 
     Pd = [P]
-    for i in xrange(K):
+    for i in range(K):
         Pd.append(Pd[-1].diff(x))
 
     eqs = []
-    for i in xrange(order):
+    for i in range(order):
         if (lboundary is BoundaryCondition.PERIODIC):
             leq = Pd[2*i].xreplace({x:Xl}) - Pd[2*i].xreplace({x:Xr})
         elif (lboundary is BoundaryCondition.HOMOGENEOUS_NEUMANN):
@@ -619,7 +619,7 @@ def make_polynomial(Xl, Xr, lboundary, rboundary, N, order):
         else:
             msg='Unknown left boundary condition {}.'.format(lboundary)
             raise NotImplementedError(msg)
-        
+
         if (rboundary is BoundaryCondition.PERIODIC):
             req = Pd[2*i+1].xreplace({x:Xl}) - Pd[2*i+1].xreplace({x:Xr})
         elif (rboundary is BoundaryCondition.HOMOGENEOUS_NEUMANN):
@@ -644,26 +644,26 @@ def make_polynomial(Xl, Xr, lboundary, rboundary, N, order):
     X = np.linspace(Xl, Xr, 1000)
     m,M = np.min(P0(X)), np.max(P0(X))
     P /= (M-m)
-    
+
     return sm.horner(P), x
 
 
 def make_trigonometric_polynomial(Xl, Xr, lboundary, rboundary, N):
     """
-    Build a real trigonometric polynomial of order N-1 
-    between on domain [Xl, Xr] that verifies prescribed left and right 
+    Build a real trigonometric polynomial of order N-1
+    between on domain [Xl, Xr] that verifies prescribed left and right
     boundary conditions.
-    
+
     Conditions:
       Xl < Xr
       N >= 1
 
     Valid boundary conditions are:
-        (PERIODIC, PERIODIC)           
-        (HDIRICHLET, HDIRICHLET)       
-        (HDIRICHLET, HNEUMANN)         
-        (HNEUMANN,   HDIRICHLET)       
-        (HNEUMANN,   HNEUMANN)         
+        (PERIODIC, PERIODIC)
+        (HDIRICHLET, HDIRICHLET)
+        (HDIRICHLET, HNEUMANN)
+        (HNEUMANN,   HDIRICHLET)
+        (HNEUMANN,   HNEUMANN)
 
     Return (P, X) where P is a sympy expression that represent the polynomial and X is the
     corresponding sympy.Symbol.
@@ -678,13 +678,13 @@ def make_trigonometric_polynomial(Xl, Xr, lboundary, rboundary, N):
     y = (x-Xl)/(Xr-Xl)*(2*sm.pi)
 
     boundaries = (lboundary, rboundary)
-    if (boundaries == (BoundaryCondition.PERIODIC, 
+    if (boundaries == (BoundaryCondition.PERIODIC,
                        BoundaryCondition.PERIODIC)):
         fn = lambda n: r()*sm.cos(n*y+sm.pi*r()) + r()*sm.sin(n*y+sm.pi*r())
-    elif (boundaries == (BoundaryCondition.HOMOGENEOUS_DIRICHLET, 
+    elif (boundaries == (BoundaryCondition.HOMOGENEOUS_DIRICHLET,
                          BoundaryCondition.HOMOGENEOUS_DIRICHLET)):
         fn = lambda n: r()*sm.sin(n*y)
-    elif (boundaries == (BoundaryCondition.HOMOGENEOUS_DIRICHLET, 
+    elif (boundaries == (BoundaryCondition.HOMOGENEOUS_DIRICHLET,
                          BoundaryCondition.HOMOGENEOUS_NEUMANN)):
         fn = lambda n: r()*sm.sin((4*n-1)/4.0*y)
     elif (boundaries == (BoundaryCondition.HOMOGENEOUS_NEUMANN,
@@ -696,13 +696,13 @@ def make_trigonometric_polynomial(Xl, Xr, lboundary, rboundary, N):
     else:
         msg='Unknown right boundary condition pair {}.'.format(boundaries)
         raise NotImplementedError(msg)
-    
-    P = sum(fn(i) for i in xrange(1, N+1))
+
+    P = sum(fn(i) for i in range(1, N+1))
     P0 = sm.lambdify(x, P)
     X = np.linspace(Xl, Xr, 1000)
     m, M = np.min(P0(X)), np.max(P0(X))
     P *= 2.0/(M-m)
-    
+
     return (P, x)
 
 
@@ -726,17 +726,17 @@ class EnergyDumper(object):
         assert energy_parameter.dtype in (np.float32, np.float64)
 
         should_write = (io_params.io_leader == main_rank)
-        
+
         if should_write:
             ulp = np.finfo(energy_parameter.dtype).eps ** 4
             formatter = {'float_kind':  lambda x: '{:8.8f}'.format(x)}
 
-            if os.path.isfile(filename): 
+            if os.path.isfile(filename):
                 os.remove(filename)
             f = open(filename, 'a')
 
-            header = u'# Evolution of the power spectrum of {}'.format(energy_parameter.pretty_name.decode('utf-8'))
-            f.write(header.encode('utf-8'))
+            header = '# Evolution of the power spectrum of {}'.format(energy_parameter.pretty_name)
+            f.write(header)
             f.write('\n# with mean removed (first coefficient) and values clamped to ulp = epsilon^4 = {}'.format(ulp))
             f.write('\n# ITERATION  TIME  log10(max(POWER_SPECTRUM[1:], ulp)))')
             f.flush()
@@ -751,18 +751,18 @@ class EnergyDumper(object):
         self.file = f
         self.formatter = formatter
         self.ulp = ulp
-       
+
     def update(self, simulation, wait_for):
         if not self.should_write:
             return
-        if not self.io_params.should_dump(simulation=simulation, with_last=True):
+        if not self.io_params.should_dump(simulation=simulation):
             return
         if (wait_for is not None):
             wait_for.wait()
         assert (self.file is not None)
         energy = self.energy_parameter.value[1:].astype(dtype=np.float64)
         energy = np.log10(np.maximum(energy, self.ulp))
-        values = np.array2string(energy, 
+        values = np.array2string(energy,
                 formatter=self.formatter, max_line_width=np.inf)
         values = '\n{} {} {}'.format(simulation.current_iteration, simulation.t(), values[1:-1])
         self.file.write(values)
@@ -795,6 +795,7 @@ class EnergyDumper(object):
             param = None
         return param
 
+
 class EnergyPlotter(object):
     def __init__(self, energy_parameters, io_params, fname,
             fig_title=None, axes_shape=(1,), figsize=(15,9),
@@ -812,33 +813,33 @@ class EnergyPlotter(object):
         filename = filename.replace('{fname}', fname)
         assert '{ite}' in filename, filename
         assert io_params.frequency>=0
-        
+
         if should_draw:
             fig, axes = plt.subplots(*axes_shape, figsize=figsize)
             fig.canvas.mpl_connect('key_press_event', self._on_key_press)
             fig.canvas.mpl_connect('close_event', self._on_close)
-            
+
             axes = np.asarray(axes).reshape(axes_shape)
             ax = axes[0]
-            
+
             if len(energy_parameters)==1:
-                default_fig_title = u'Energy parameter {}'.format(
-                    energy_parameters.values()[0].pretty_name.decode('utf-8'))
+                default_fig_title = 'Energy parameter {}'.format(
+                    next(iter(energy_parameters.values())).pretty_name)
             else:
-                default_fig_title = u'Energy parameters {}'.format(
-                        u' | '.join(p.pretty_name.decode('utf-8') for p in energy_parameters.values()))
+                default_fig_title = 'Energy parameters {}'.format(
+                        ' | '.join(p.pretty_name for p in energy_parameters.values()))
             fig_title = first_not_None(fig_title, default_fig_title)
             self.fig_title = fig_title
 
             xmax = 1
             lines = ()
-            for (label,p) in energy_parameters.iteritems():
+            for (label,p) in energy_parameters.items():
                 assert p.size>1
                 Ix = np.arange(1, p.size)
                 xmax = max(xmax, p.size-1)
                 line  = ax.plot(Ix, np.zeros(p.size-1), '--o', label=label, markersize=3)[0]
                 lines += (line,)
-            
+
             xmin = 1
             pmax = math.ceil(math.log(xmax, basex))
             xmax = basex**pmax if basex==2 else 1.1*xmax
@@ -859,24 +860,24 @@ class EnergyPlotter(object):
                 ax.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, pos: r'$\mathbf{{{}^{{{}}}}}$'.format(basex, int(round(math.log(x, basex))))))
             ax.yaxis.set_minor_formatter(matplotlib.ticker.FuncFormatter(lambda x, pos, ax=ax: r'$^{{{}}}$'.format(int(round(math.log(x, basey))))))
             ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, pos: r'$\mathbf{{{}^{{{}}}}}$'.format(basey, int(round(math.log(x, basey))))))
-            
+
             running = True
         else:
             fig, axes = None, None
             lines = None
             running = False
-        
-        ulp = np.finfo(np.find_common_type([], list(p.dtype 
+
+        ulp = np.finfo(np.find_common_type([], list(p.dtype
             for p in energy_parameters.values()))).eps
         ulp = 1e-4*(ulp**2)
 
         self.fig  = fig
         self.axes = axes
         self.lines = lines
-        
+
         self.filename  = filename
         self.io_params = io_params
-        
+
         # see https://stackoverflow.com/questions/8257385/automatic-detection-of-display-availability-with-matplotlib
         self.has_gui_running = hasattr(fig, 'show')
 
@@ -891,7 +892,7 @@ class EnergyPlotter(object):
     def update(self, simulation, wait_for):
         if not self.should_draw:
             return
-        if not simulation.should_dump(frequency=self.io_params.frequency, with_last=True):
+        if not self.io_params.should_dump(simulation=simulation):
            return
         if (wait_for is not None):
             wait_for.wait()
@@ -926,16 +927,16 @@ class EnergyPlotter(object):
         ymin = basey**pmin
         ymax = basey**pmax
 
-        major_yticks = tuple(basey**i for i in xrange(pmin, pmax+1, nsubint))
-        minor_yticks = tuple(basey**i for i in xrange(pmin, pmax+1, 1) if i%nsubint!=0)
+        major_yticks = tuple(basey**i for i in range(pmin, pmax+1, nsubint))
+        minor_yticks = tuple(basey**i for i in range(pmin, pmax+1, 1) if i%nsubint!=0)
 
         ax = self.axes[0]
         ax.set_ylim(ymin, ymax)
         ax.yaxis.set_ticks(major_yticks)
         ax.yaxis.set_ticks(minor_yticks, minor=True)
-        ax.set_title(u'{}\niteration={}, t={}'.format(self.fig_title, iteration, time))
+        ax.set_title('{}\niteration={}, t={}'.format(self.fig_title, iteration, time))
         ax.relim()
-    
+
     def _draw(self):
         if (not self.has_gui_running):
             return
@@ -946,7 +947,7 @@ class EnergyPlotter(object):
     def _savefig(self, iteration):
         filename = self.filename.format(ite='{:05}'.format(iteration))
         self.fig.savefig(filename, dpi=self.fig.dpi, bbox_inches='tight')
-    
+
     def _on_close(self, event):
         self.has_gui_running = False
 
@@ -955,24 +956,24 @@ class EnergyPlotter(object):
         if key == 'q':
             self.plt.close(self.fig)
             self.has_gui_running = False
-    
+
 
 if __name__ == '__main__':
     from hysop.tools.sympy_utils import round_expr
     P = make_trigonometric_polynomial(-1.0, +1.0, BoundaryCondition.HOMOGENEOUS_DIRICHLET,
                                                   BoundaryCondition.HOMOGENEOUS_NEUMANN, 10)[0]
-    print round_expr(P,2)
-    print
-    P = make_polynomial(-1.0,+1.0, BoundaryCondition.HOMOGENEOUS_NEUMANN, 
+    print(round_expr(P,2))
+    print()
+    P = make_polynomial(-1.0,+1.0, BoundaryCondition.HOMOGENEOUS_NEUMANN,
                                    BoundaryCondition.HOMOGENEOUS_DIRICHLET, 10, 2)[0]
-    print round_expr(P,2)
-    print
+    print(round_expr(P,2))
+    print()
     lboundaries = np.asarray([BoundaryCondition.HOMOGENEOUS_NEUMANN, BoundaryCondition.PERIODIC])
     rboundaries = np.asarray([BoundaryCondition.HOMOGENEOUS_DIRICHLET, BoundaryCondition.PERIODIC])
     P = make_multivariate_trigonometric_polynomial(-1.0,+1.0, lboundaries, rboundaries, (3,5))[0]
-    print round_expr(P,2)
-    print
+    print(round_expr(P,2))
+    print()
     P = make_multivariate_polynomial(-1.0,+1.0, lboundaries, rboundaries, (6,10), 2)[0]
-    print round_expr(P,2)
-    print
+    print(round_expr(P,2))
+    print()
 
diff --git a/hysop/tools/string_utils.py b/hysop/tools/string_utils.py
index dbbb1105bd0d628b9efd3ad13e1544dc0017397b..0db67a4e152365581eb77a356d65a987da004052 100644
--- a/hysop/tools/string_utils.py
+++ b/hysop/tools/string_utils.py
@@ -20,13 +20,13 @@ def prepend(string, prefix):
         else:
             lines.append(s)
     return '\n'.join(lines)
-    
+
 def vprint_banner(msg, c='*', spacing=False, at_border=0):
     """
     Print a message preceded and succeded by separation lines.
     """
     msg = msg.split('\n')
-    maxlen  = max(len(m) for m in msg) 
+    maxlen  = max(len(m) for m in msg)
     fulllen = maxlen + 2*at_border
     if spacing:
         vprint()
@@ -48,23 +48,22 @@ def framed_str(title, msg, c='=', at_border=2):
     title  = c*at_border + title + c*at_border
     header = title + c*max(0, length-len(title))
     footer = c*len(header)
-    return u'{}\n{}\n{}'.format(header, msg, footer)
+    return '{}\n{}\n{}'.format(header, msg, footer)
 
 def strlen(s):
     """Like length but replace unicode characters by space before applying len()"""
-    #res = len(s.decode('utf-8'))
     return len(s)
 
 def multiline_split(strdata, maxlen, split_sep, replace, newline_prefix=None):
     """
-    Utility function to split one line of a column representation 
+    Utility function to split one line of a column representation
     of string data into smaller pieces:
 
-    Input data (strdata): 
+    Input data (strdata):
        (s0 s1 s2 s3)
-    
+
     Output data:
-      [ (s0.0, s1.0, s2.0, s3.0), 
+      [ (s0.0, s1.0, s2.0, s3.0),
         (s0.1, ----, s2.1, s3.1),
         (----, ----, s2.2, ----),
         (----, ----, s2.3, ----)  ]
@@ -75,9 +74,9 @@ def multiline_split(strdata, maxlen, split_sep, replace, newline_prefix=None):
         replace:   replacement when the string is too short (here ----)
         newline_prefix: prefix for each newline split, per column
 
-    All string inputs can be of type unicode or str.
+    All string inputs can be of type str.
     """
-    sstr = (str, unicode)
+    sstr = str
     check_instance(strdata, tuple, values=sstr)
     ndata = len(strdata)
 
@@ -99,24 +98,24 @@ def multiline_split(strdata, maxlen, split_sep, replace, newline_prefix=None):
         else:
             split_sep[i] = ()
     split_sep = tuple(split_sep)
-    
+
     splitted_data = []
     for (s, ml, ss, nlp) in zip(strdata, maxlen, split_sep, newline_prefix):
         if (ml is None) or strlen(s)<ml:
             data = [s]
         else:
-            s = s.encode('utf-8')
+            s = s
             split = [s]
             for sep in ss:
                 split = list(y+(sep if (i!=len(x.split(sep))-1) else '')
-                             for x in split 
+                             for x in split
                              for (i,y) in enumerate(x.split(sep)))
             data = []
             s=''
             while split:
-                while split and (strlen(s.decode('utf-8')) < ml):
+                while split and (strlen(s) < ml):
                     s += split.pop(0)
-                data.append(s.decode('utf-8'))
+                data.append(s)
                 s=nlp
         splitted_data.append(data)
 
@@ -124,12 +123,12 @@ def multiline_split(strdata, maxlen, split_sep, replace, newline_prefix=None):
     if (nsplits==1):
         return [strdata]
 
-    for (i,x) in enumerate(splitted_data): 
+    for (i,x) in enumerate(splitted_data):
         assert nsplits>=len(x)
         splitted_data[i] += [replace[i]]*(nsplits-len(x))
 
     data = []
-    for i in xrange(nsplits):
+    for i in range(nsplits):
         data.append(type(strdata)(x[i] for x in splitted_data))
     strdata = data
 
diff --git a/hysop/tools/sympy_utils.py b/hysop/tools/sympy_utils.py
index 0b7d861fd9110af6787be6d256215a4a250369ec..6a9ef81747495312837e765e368c5d7794c08a0b 100644
--- a/hysop/tools/sympy_utils.py
+++ b/hysop/tools/sympy_utils.py
@@ -1,26 +1,27 @@
-
-from hysop.deps import np, sm, copy
-from hysop.tools.types import first_not_None, check_instance, to_tuple
+import copy
+import numpy as np
+import sympy as sm
 
 from sympy.utilities import group
 from sympy.printing.str import StrPrinter, StrReprPrinter
 from sympy.printing.latex import LatexPrinter
-    
+
+from hysop.tools.types import first_not_None, check_instance, to_tuple
+
 # unicode subscripts for decimal numbers, signs and parenthesis
-decimal_subscripts  = tuple('\u208{}'.format(i).decode('unicode-escape') for i in xrange(10))
-decimal_exponents   = (u'\u2070', u'\u00b9', u'\u00b2', u'\u00b3')
-decimal_exponents  += tuple('\u207{}'.format(i).decode('unicode-escape') for i in xrange(4,10))
-greak = tuple('\u{:04x}'.format(i).decode('unicode-escape') for i in xrange(945, 970))
-Greak = tuple('\u{:04x}'.format(i).decode('unicode-escape') for i in xrange(913, 938))
-signs = (u'\u208a',u'\u208b')
-parenthesis = (u'\u208d', u'\u208e')
-partial = u'\u2202'
-nabla = u'\u2207'
-xsymbol = u'x'
+decimal_subscripts  = '₀₁₂₃₄₅₆₇₈₉'
+decimal_exponents   = '⁰¹²³⁴⁵⁶⁷⁸⁹'
+greak = 'αβγδεζηθικλμνξοπρςστυφχψω'
+Greak = 'ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡ΢ΣΤΥΦΧΨΩ'
+signs = 'â‚Šâ‚‹'
+parenthesis = '₍₎'
+partial = '∂'
+nabla = '∇'
+xsymbol = 'x'
 freq_symbol = greak[12] # nu
 
 def round_expr(expr, num_digits=3):
-    return expr.xreplace({n : round(n, num_digits) for n in 
+    return expr.xreplace({n : round(n, num_digits) for n in
         expr.atoms(sm.Float).union(expr.atoms(sm.Rational)).difference(expr.atoms(sm.Integer))})
 def truncate_expr(expr, maxlen=80):
     assert maxlen>=3
@@ -35,19 +36,18 @@ def truncate_expr(expr, maxlen=80):
 
 class CustomStrPrinter(StrPrinter):
     def _print_Derivative(self, expr):
-        _partial = partial.encode('utf-8')
-        syms = list(reversed(expr.variables)) 
+        syms = list(reversed(expr.variables))
         nvars = len(syms)
-                
+
         if isinstance(expr.expr, (Symbol,Dummy,AppliedUndef)):
             content = self._print(expr.expr)
         else:
             content = '[{}]'.format(self._print(expr.expr))
-        
-        prefix = '{}{}{}/{}'.format(_partial, exponent(nvars).encode('utf-8') if nvars>1 else '', 
-                content, _partial)
+
+        prefix = '{}{}{}/{}'.format(partial, exponent(nvars) if nvars>1 else '',
+                content, partial)
         for (sym, num) in group(syms, multiple=False):
-            prefix += '{}{}'.format(sym, exponent(num).encode('utf-8') if num>1 else '')
+            prefix += '{}{}'.format(sym, exponent(num) if num>1 else '')
         return prefix
 
 class CustomStrReprPrinter(StrReprPrinter):
@@ -72,25 +72,26 @@ def enable_pretty_printing():
     sm.Basic.__str__ = sstr
 
 class SymbolicBase(object):
-    def __new__(cls, name, var_name=None, latex_name=None, 
+    def __new__(cls, name, var_name=None, latex_name=None,
             pretty_name=None, **kwds):
-        if isinstance(name, unicode):
-            name = name.encode('utf-8')
-        if isinstance(pretty_name, unicode):
-            pretty_name = pretty_name.encode('utf-8')
-        if isinstance(latex_name, unicode):
-            latex_name = latex_name.encode('utf-8')
         check_instance(name, str)
         check_instance(var_name, str, allow_none=True)
         check_instance(latex_name, str, allow_none=True)
         check_instance(pretty_name, str, allow_none=True)
-        obj = super(SymbolicBase, cls).__new__(cls, name=name, **kwds)
+        try:
+            obj = super(SymbolicBase, cls).__new__(cls, name=name, **kwds)
+        except TypeError:
+            obj = super(SymbolicBase, cls).__new__(cls, **kwds)
         obj._name =  name
         obj._var_name    = first_not_None(var_name, name)
         obj._latex_name  = first_not_None(latex_name, name)
         obj._pretty_name = first_not_None(pretty_name, name)
         return obj
-    
+
+    def __init__(self, name, var_name=None, latex_name=None,
+            pretty_name=None, **kwds):
+        pass
+
     @property
     def varname(self):
         return self._var_name
@@ -129,14 +130,17 @@ class Dummy(SymbolicBase, sm.Dummy):
     """Tag for hysop dummy symbolic variables."""
     pass
 
-class UndefinedFunction(SymbolicBase, sm.function.UndefinedFunction):
+from sympy.core.function import UndefinedFunction as SympyUndefinedFunction
+from sympy.core.function import AppliedUndef as SympyAppliedUndef
+
+class UndefinedFunction(SymbolicBase, SympyUndefinedFunction):
     """
     Tag for hysop (unapplied) undefined functions.
     This is a metaclass.
     """
     pass
 
-class AppliedUndef(sm.function.AppliedUndef):
+class AppliedUndef(SympyAppliedUndef):
     """Tag for hysop applied undefined functions."""
 
     def _latex(self, printer):
@@ -148,10 +152,10 @@ class AppliedUndef(sm.function.AppliedUndef):
     def _sympystr(self, printer):
         return self._pretty_name
     #def _pretty(self, printer):
-        #return '{}({})'.format(self._pretty_name, 
+        #return '{}({})'.format(self._pretty_name,
                                 #','.join(printer._print(a) for a in self.args))
     #def _sympystr(self, printer):
-        #return '{}({})'.format(self._pretty_name, 
+        #return '{}({})'.format(self._pretty_name,
                                 #','.join(printer._print(a) for a in self.args))
 
 def subscript(i, with_sign=False, disable_unicode=False):
@@ -167,7 +171,7 @@ def subscript(i, with_sign=False, disable_unicode=False):
     if disable_unicode:
         out = snumber
     else:
-        out =u''
+        out =''
         for s in snumber:
             if s in decimals:
                 out += decimal_subscripts[int(s)]
@@ -189,7 +193,7 @@ def exponent(i, with_sign=False):
         s0 = snumber[0]
         if s0 in decimals:
             snumber = '+'+snumber
-    out = u''
+    out = ''
     for s in snumber:
         if s in decimals:
             out += decimal_exponents[int(s)]
@@ -210,11 +214,11 @@ def subscripts(ids,sep,with_sign=False,with_parenthesis=False,prefix='',disable_
     if with_parenthesis:
         lparen = '(' if disable_unicode else parenthesis[0]
         rparen = ')' if disable_unicode else parenthesis[1]
-        base = '{}{}{}{}' if disable_unicode else u'{}{}{}{}'
-        return base.format(prefix,lparen,sep.join([subscript(i,with_sign,disable_unicode) 
+        base = '{}{}{}{}' if disable_unicode else '{}{}{}{}'
+        return base.format(prefix,lparen,sep.join([subscript(i,with_sign,disable_unicode)
             for i in ids]),rparen)
     else:
-        base = '{}{}' if disable_unicode else u'{}{}'
+        base = '{}{}' if disable_unicode else '{}{}'
         return base.format(prefix,sep.join([subscript(i,with_sign,disable_unicode) for i in ids]))
 
 def exponents(ids,sep,with_sign=False,with_parenthesis=False,prefix=''):
@@ -224,23 +228,23 @@ def exponents(ids,sep,with_sign=False,with_parenthesis=False,prefix=''):
     """
     ids = to_tuple(ids)
     if with_parenthesis:
-        return u'{}{}{}{}'.format(prefix,parenthesis[0],sep.join([exponent(i,with_sign) for i in ids]),parenthesis[1])
+        return '{}{}{}{}'.format(prefix,parenthesis[0],sep.join([exponent(i,with_sign) for i in ids]),parenthesis[1])
     else:
-        return u'{}{}'.format(prefix,sep.join([exponent(i,with_sign) for i in ids]))
+        return '{}{}'.format(prefix,sep.join([exponent(i,with_sign) for i in ids]))
 
 def tensor_symbol(prefix,shape,origin=None,mask=None,
         sep=None,with_parenthesis=False,force_sign=False):
     """
     Generate a np.ndarray of sympy.Symbol.
-    Each of the symbol has given prefix and subscripts are 
+    Each of the symbol has given prefix and subscripts are
     taken from specified origin if specified or else in matrix/tensor notation.
     Other parameters handles subscripts style, see the subscripts() function.
-    
+
     It also returns all generated Symbols as a list.
     """
     origin = np.asarray(origin) if origin is not None else np.asarray([0]*len(shape))
-    sep = sep if sep is not None else u','
-    
+    sep = sep if sep is not None else ','
+
     with_sign = force_sign or ((origin>0).any() and len(shape)>1)
     tensor = np.empty(shape=shape,dtype=object)
     for idx in np.ndindex(*shape):
@@ -248,7 +252,7 @@ def tensor_symbol(prefix,shape,origin=None,mask=None,
             ids = idx-origin
             sname = subscripts(ids,sep,with_sign=with_sign,
                     with_parenthesis=with_parenthesis,prefix=prefix)
-            tensor[idx] = sm.Symbol(sname.encode('utf-8'), real=True)
+            tensor[idx] = sm.Symbol(sname, real=True)
         else:
             tensor[idx] = 0
     tensor_vars = tensor.ravel().tolist()
@@ -291,7 +295,7 @@ def non_eval_xreplace(expr, rule):
         if altered:
             return expr.func(*args, evaluate=False)
     return expr
-       
+
 # Convert powers to mult. in polynomial expressions V
 # Example: x^3 -> x*x*x
 def remove_pows(expr):
@@ -305,7 +309,7 @@ def remove_pows(expr):
 
 def evalf_str(x,n,literal='',significant=True):
     """
-    Call evalf on x up to n-th decimal and removes zeros 
+    Call evalf on x up to n-th decimal and removes zeros
     if significant is set.
     """
     x = x.evalf(n).__str__()
@@ -378,9 +382,9 @@ def get_derivative_variables(expr):
     if isinstance(expr.args[1], containers.Tuple):
         # sympy >= 1.2 arguments are (variable, count)
         # args = [(x0,3), (x1,1)]
-        _vars = tuple(v[0] for v in expr.args[1:] for _ in xrange(v[1]))
+        _vars = tuple(v[0] for v in expr.args[1:] for _ in range(v[1]))
     else:
-        # sympy < 1.2 arguments are repeated 
+        # sympy < 1.2 arguments are repeated
         # args=[x0, x0, x0, x1]
         _vars = tuple(expr.args[1:])
     return _vars
diff --git a/hysop/tools/transposition_states.py b/hysop/tools/transposition_states.py
index 7a0ffb3a12dc0efb4f246e8e5b3c9b97cb933d86..46416816c6965d3e8f0d13b16faa6302275b277f 100644
--- a/hysop/tools/transposition_states.py
+++ b/hysop/tools/transposition_states.py
@@ -1,7 +1,7 @@
+import itertools as it
 
-from hysop.deps import it
 from hysop.tools.types import check_instance
-    
+
 DirectionLabels = 'XYZABCDEFGHIJKLMNOPQRSTUVW'
 
 class TranspositionStateType(type):
@@ -24,13 +24,13 @@ class TranspositionStateType(type):
         if dim not in self.transposition_enums:
             self.transposition_enums[dim] = self.__build_enum(dim)
         return self.transposition_enums[dim]
-    
+
     def __build_cls(self, dim):
         assert dim not in self.transposition_states
         msg='Max dimension is {}.'
         msg=msg.format(len(DirectionLabels))
         assert dim<=len(DirectionLabels), msg
-        
+
         cls_name    = 'TranspositionState{}D'.format(dim)
         cls_bases   = (TranspositionState,)
         cls_methods = {}
@@ -52,11 +52,11 @@ class TranspositionStateType(type):
             return self.__get_enum(dim)
         def __all_axes(cls):
             """Return an iterator on all possible permutations."""
-            return it.permutations(xrange(dim), dim)
+            return it.permutations(range(dim), dim)
         def __filter_axes(cls, predicate):
             """Return a filtered iterator on all possible permutations."""
-            return it.ifilter(predicate, cls.all_axes())
-        
+            return tuple(filter(predicate, cls.all_axes()))
+
         cls_methods['dimension']        = classmethod(__dimension)
         cls_methods['default_axes']     = classmethod(__default_axes)
         cls_methods['default']          = classmethod(__default)
@@ -71,7 +71,7 @@ class TranspositionStateType(type):
         assert dim not in self.transposition_enums
         msg='enum needs to generate {} values.'
         msg=msg.format(dim**dim)
-        assert dim<=5, msg 
+        assert dim<=5, msg
 
         labels = DirectionLabels[:dim]
         entries = it.permutations(labels, dim)
@@ -80,7 +80,7 @@ class TranspositionStateType(type):
         enum = EnumFactory.create('TranspositionState{}DEnum'.format(dim), entries,
                 base_cls=TranspositionStateEnum)
         return enum
-    
+
     def axes_to_tstate(self, axes):
         """
         Convert an axes tuple to an instance of TranspositionState
@@ -92,7 +92,7 @@ class TranspositionStateType(type):
 
     def __getattr__(self, name):
         """
-        Generate a transposition state instance 
+        Generate a transposition state instance
         if attribute name matches any permutation.
         Example: TranspositionState2D.XY
         """
@@ -105,7 +105,7 @@ class TranspositionStateType(type):
                 axes = tuple( labels.find(a) for a in name )
                 return self.axes_to_tstate(axes=axes)
         raise AttributeError
-    
+
     def __getitem__(self, dim):
         """Alias for __get_cls()"""
         return self.__get_cls(dim)
@@ -114,9 +114,8 @@ class TranspositionStateEnum(object):
     """TranspositionStateEnum base class."""
     pass
 
-class TranspositionState(object):
+class TranspositionState(object, metaclass=TranspositionStateType):
     """TranspositionState base class."""
-    __metaclass__ = TranspositionStateType
 
     __slots__ = ('_axes',)
 
@@ -143,7 +142,7 @@ class TranspositionState(object):
         return (self._axes != other._axes)
     def __hash__(self):
         return hash(self._axes)
-    
+
     def __str__(self):
         return ''.join(DirectionLabels[self.dim-i-1] for i in self.axes)
     def __repr__(self):
diff --git a/hysop/tools/types.py b/hysop/tools/types.py
index 0fa5fea24ceaa47b3471e7c3741e980d0a6ee864..fef0a85fe806c81228a13a61a494dd46fd0e4952 100644
--- a/hysop/tools/types.py
+++ b/hysop/tools/types.py
@@ -1,5 +1,5 @@
-from hysop.deps import np
-from collections import Iterable
+from collections.abc import Iterable
+import numpy as np
 
 class InstanceOf(object):
     def __init__(self, cls):
@@ -67,12 +67,10 @@ def check_instance(val, cls, allow_none=False,
                     ', ...' if (len(val)>5) else '')
         except:
             types = type(val)
-        if isinstance(val, unicode):
-            val = val.encode('utf-8')
         msg='\nFATAL ERROR: Type did not match any of types {} for the following value:\n{}\n'
         msg+='which is of type {}.\n'
         msg=msg.format(all_cls, val, types)
-        print msg
+        print(msg)
 
     if not any((isinstance(val, cls) for cls in allcls)):
         msg = 'Expected an instance of {} but got a value of type {}.'
diff --git a/hysop/tools/units.py b/hysop/tools/units.py
index f32b7314514fdc2f3e21b170560fad5d31794b6e..a5b4436ead0834b788c6cf0f32310f29e8b332cc 100644
--- a/hysop/tools/units.py
+++ b/hysop/tools/units.py
@@ -1,4 +1,4 @@
-from hysop.deps import np
+import numpy as np
 
 def binary_unit2str(b,unit,rounded):
     try:
@@ -23,10 +23,10 @@ def decimal_unit2str(b,unit,rounded):
         return '{}{}{}'.format(round(b/10.0**(3*i),rounded),prefix[i],unit)
     except:
         return '{}{}'.format(b,unit)
-     
+
 
 def unit2str(b,unit,decimal,rounded=2):
-    if not isinstance(b, (int,long,float,np.integer,np.floating)):
+    if not isinstance(b, (int,float,np.integer,np.floating)):
         return b
 
     if decimal:
@@ -51,7 +51,7 @@ def freq2str(freq,decimal=True,rounded=2):
 
 
 def time2str(t, on_zero=None, on_none=None):
-    if not isinstance(t, (float,int,long,np.dtype)):
+    if not isinstance(t, (float,int,np.dtype)):
         return t
     if t is None:
         return (on_none or '{:5d}'.format(-1))
diff --git a/hysop/tools/variable.py b/hysop/tools/variable.py
index 3773fca139a17b016256dfad29f3aa6aea2149a1..5e411dac8fe436d30395d0aea540e80cba78e0b2 100644
--- a/hysop/tools/variable.py
+++ b/hysop/tools/variable.py
@@ -6,9 +6,8 @@ from hysop.tools.decorators import debug
 
 Variable = EnumFactory.create('Variable', ['DISCRETE_FIELD', 'PARAMETER'])
 
-class VariableTag(object):
+class VariableTag(object, metaclass=ABCMeta):
     """Tag for HySoP variables."""
-    __metaclass__ = ABCMeta
 
     @debug
     def __new__(cls, variable_kind=None, **kwds):
@@ -16,7 +15,7 @@ class VariableTag(object):
         obj = super(VariableTag, cls).__new__(cls, **kwds)
         obj.__variable_kind = variable_kind
         return obj
-    
+
     @debug
     def __init__(self, variable_kind=None, **kwds):
         check_instance(variable_kind, Variable, allow_none=True)
diff --git a/hysop/tools/warning.py b/hysop/tools/warning.py
index ee8bb9246fb993ce85e8d1ee845f8cd7b7d75aa0..f36c0edb70e5496aa2be906cc737b755e68f4d59 100644
--- a/hysop/tools/warning.py
+++ b/hysop/tools/warning.py
@@ -1,3 +1,4 @@
+import warnings
 
 class HysopWarning(RuntimeWarning):
     """
@@ -30,12 +31,11 @@ class HysopCacheWarning(HysopWarning):
     pass
 
 def configure_hysop_warnings(action):
-    """ 
+    """
     Configure hysop warnings.
     Action can be error, ignore, always, default, module, once.
     See https://docs.python.org/2/library/warnings.html#warning-filter.
     """
-    from hysop.deps import warnings
     warnings.filterwarnings(action=action, category=HysopWarning)
     warnings.filterwarnings(action=action, category=HysopDeprecationWarning)
 
diff --git a/hysop/topology/cartesian_descriptor.py b/hysop/topology/cartesian_descriptor.py
index 0b8b86a15da94b0d868d1f440909c27dc55bd499..d60fba98b4f2151d27d974812e1a7f5ca5d3c0af 100644
--- a/hysop/topology/cartesian_descriptor.py
+++ b/hysop/topology/cartesian_descriptor.py
@@ -1,3 +1,5 @@
+import hashlib, copy
+import numpy as np
 
 from hysop.tools.types import check_instance, to_tuple
 from hysop.topology.topology_descriptor import TopologyDescriptor
@@ -6,7 +8,6 @@ from hysop.tools.parameters import CartesianDiscretization
 from hysop.constants import Backend, BoundaryCondition
 from hysop.fields.continuous_field import Field
 from hysop.tools.numpywrappers import npw
-from hysop.deps import np, hashlib, copy
 
 class CartesianTopologyDescriptor(TopologyDescriptor):
     """
@@ -19,38 +20,38 @@ class CartesianTopologyDescriptor(TopologyDescriptor):
     def __init__(self, mpi_params, domain, backend, cartesian_discretization, **kwds):
         """
         Initialize a CartesianTopologyDescriptor.
-        
+
         Notes
         -----
         kwds allows for backend specific variables.
         CartesianTopologyDescriptor is immutable.
         """
-        super(CartesianTopologyDescriptor, self).__init__(mpi_params=mpi_params, 
+        super(CartesianTopologyDescriptor, self).__init__(mpi_params=mpi_params,
                 domain=domain, backend=backend, **kwds)
-        
+
         check_instance(cartesian_discretization, CartesianDiscretization)
-        
-        # check cartesian_discretization 
+
+        # check cartesian_discretization
         if (cartesian_discretization.ghosts > 0).any():
             msg='No ghost allowed for a topology descriptor.'
             raise ValueError(msg)
-        
+
         global_resolution  = cartesian_discretization.global_resolution
         grid_resolution    = cartesian_discretization.grid_resolution
         lboundaries        = cartesian_discretization.lboundaries
         rboundaries        = cartesian_discretization.rboundaries
-        
+
         check_instance(grid_resolution, np.ndarray, size=domain.dim, minval=2)
         check_instance(global_resolution, np.ndarray, size=domain.dim, minval=2)
-        check_instance(lboundaries, npw.ndarray, dtype=object, 
+        check_instance(lboundaries, npw.ndarray, dtype=object,
                 size=domain.dim, values=BoundaryCondition,
                 allow_none=True)
-        check_instance(rboundaries, npw.ndarray, dtype=object, 
-                size=domain.dim, values=BoundaryCondition, 
+        check_instance(rboundaries, npw.ndarray, dtype=object,
+                size=domain.dim, values=BoundaryCondition,
                 allow_none=True)
-        
-        is_lperiodic = (lboundaries==BoundaryCondition.PERIODIC) 
-        is_rperiodic = (rboundaries==BoundaryCondition.PERIODIC) 
+
+        is_lperiodic = (lboundaries==BoundaryCondition.PERIODIC)
+        is_rperiodic = (rboundaries==BoundaryCondition.PERIODIC)
 
         assert all((grid_resolution + is_lperiodic) == global_resolution)
 
@@ -64,12 +65,12 @@ class CartesianTopologyDescriptor(TopologyDescriptor):
 
         self._cartesian_discretization = cartesian_discretization
         self._space_step  = space_step
-    
+
     @property
     def global_resolution(self):
         """Get the global global_resolution of the discretization (logical grid_size)."""
         return self._cartesian_discretization.global_resolution
-    
+
     @property
     def grid_resolution(self):
         """Get the global grid resolution of the discretization (effective grid size)."""
@@ -79,18 +80,18 @@ class CartesianTopologyDescriptor(TopologyDescriptor):
     def lboundaries(self):
         """Get the left boundaries."""
         return self._cartesian_discretization.lboundaries
-    
+
     @property
     def rboundaries(self):
         """Get the left boundaries."""
         return self._cartesian_discretization.rboundaries
-    
+
     @property
     def boundaries(self):
         """Get left and right boundaries."""
         return (self._cartesian_discretization.lboundaries,
                 self._cartesian_discretization.rboundaries)
-    
+
     @property
     def space_step(self):
         """Get the space step."""
@@ -111,20 +112,22 @@ class CartesianTopologyDescriptor(TopologyDescriptor):
         return self.match(other)
     def __ne__(self, other):
         return self.match(other, invert=True)
+    def __lt__(self, other):
+        return (self != other) and (str(self) < str(other))
 
     def __hash__(self):
         # hash(super(...)) does not work as expected so be call __hash__ directly
-        h = super(CartesianTopologyDescriptor,self).__hash__() 
+        h = super(CartesianTopologyDescriptor,self).__hash__()
         h ^= hash(self._cartesian_discretization)
         return h
 
     def __str__(self):
         return ':CartesianTopologyDescriptor: backend={}, domain={}, grid_resolution={}, bc=[{}]'.format(
                     self.backend, self.domain.full_tag,
-                    self.grid_resolution, 
+                    self.grid_resolution,
                      ','.join(('{}/{}'.format(
                          str(lb).replace('HOMOGENEOUS_','')[:3],
-                         str(rb).replace('HOMOGENEOUS_','')[:3]) 
+                         str(rb).replace('HOMOGENEOUS_','')[:3])
                          for (lb,rb) in zip(*self.boundaries))))
 
     @classmethod
@@ -152,17 +155,17 @@ class CartesianTopologyDescriptor(TopologyDescriptor):
                     msg+='they will be automatically determined from continuous fields.'
                     raise ValueError(msg)
                 global_resolution = handle.resolution
-            else: 
+            else:
                 global_resolution = handle
 
 
             cartesian_discretization = CartesianDiscretization(resolution=global_resolution,
                     lboundaries=field.lboundaries_kind, rboundaries=field.rboundaries_kind,
                     ghosts=None)
-            
+
             kwds.setdefault('mpi_params', operator.mpi_params)
             kwds.setdefault('domain', field.domain)
-            return CartesianTopologyDescriptor(backend=backend, 
+            return CartesianTopologyDescriptor(backend=backend,
                     cartesian_discretization = cartesian_discretization,
                     **kwds)
         elif isinstance(handle, CartesianTopologyDescriptor):
@@ -171,15 +174,14 @@ class CartesianTopologyDescriptor(TopologyDescriptor):
             # handle is a CartesianTopology instance, ghosts and boundary conditions
             # can be imposed freely by user here.
             return handle
-    
+
     def choose_topology(self, known_topologies, **kwds):
         """
         Find optimal topology parameters from known_topologies.
         If None is returned, create_topology will be called instead.
         """
         if known_topologies:
-            ordered_topologies = sorted(known_topologies, 
-                                            key=lambda topo: sum(topo.ghosts))
+            ordered_topologies = tuple(sorted(known_topologies, key=lambda topo: sum(topo.ghosts)))
             return ordered_topologies[0]
         else:
             return None
@@ -191,19 +193,19 @@ class CartesianTopologyDescriptor(TopologyDescriptor):
         by operators on variables and solved during operator's
         method get_field_requirements().
         """
-        discretization = CartesianDiscretization(resolution=self.grid_resolution, 
-                                                 lboundaries=self.lboundaries, 
+        discretization = CartesianDiscretization(resolution=self.grid_resolution,
+                                                 lboundaries=self.lboundaries,
                                                  rboundaries=self.rboundaries,
                                                  ghosts=ghosts)
-        return CartesianTopology(domain=self.domain, 
-                                 discretization=discretization, 
+        return CartesianTopology(domain=self.domain,
+                                 discretization=discretization,
                                  mpi_params=self.mpi_params,
-                                 cutdirs=cutdirs, 
-                                 backend=self.backend, 
+                                 cutdirs=cutdirs,
+                                 backend=self.backend,
                                  **self.extra_kwds)
 
 
-CartesianTopologyDescriptors = (CartesianTopology, CartesianTopologyDescriptor, CartesianDiscretization, 
+CartesianTopologyDescriptors = (CartesianTopology, CartesianTopologyDescriptor, CartesianDiscretization,
                                 tuple, list, np.ndarray, type(None))
 """
 Instance of those types can be used to create a CartesianTopologyDescriptor.
diff --git a/hysop/topology/cartesian_topology.py b/hysop/topology/cartesian_topology.py
index 8f75ca1d5f42bc64a423c28edec7f212604e6d44..5b935b8e53caa3625ab8c97c744d2b81bb4ee318 100644
--- a/hysop/topology/cartesian_topology.py
+++ b/hysop/topology/cartesian_topology.py
@@ -1,4 +1,5 @@
-from hysop.deps import warnings
+import warnings
+
 from hysop.topology.topology import Topology, TopologyState, TopologyView, TopologyWarning
 from hysop.constants import np, math, Backend, MemoryOrdering
 from hysop.constants import HYSOP_ORDER, BoundaryCondition, HYSOP_INTEGER
@@ -82,9 +83,9 @@ class CartesianTopologyState(TopologyState):
     def _set_memory_order(self, memory_order):
         """Set the current memory_order as a tuple of hysop.constants.memory_order."""
         memory_order = first_not_None(memory_order, MemoryOrdering.C_CONTIGUOUS)
-        if (memory_order is 'c'):
+        if (memory_order == 'c'):
             memory_order = MemoryOrdering.C_CONTIGUOUS
-        elif (memory_order is 'f'):
+        elif (memory_order == 'f'):
             memory_order = MemoryOrdering.F_CONTIGUOUS
         assert memory_order in (MemoryOrdering.C_CONTIGUOUS,
                                 MemoryOrdering.F_CONTIGUOUS), memory_order
@@ -194,6 +195,10 @@ class CartesianTopologyView(TopologyView):
 
     __slots__ = ('_mesh_view', '_domain_view', '_topology', '_topology_state')
 
+    @debug
+    def __init__(self, topology_state, topology=None, **kwds):
+        super(CartesianTopologyView, self).__init__(topology_state=topology_state, topology=topology, **kwds)
+
     @debug
     def __new__(cls, topology_state, topology=None, **kwds):
         """
@@ -345,7 +350,7 @@ class CartesianTopologyView(TopologyView):
         return self._proc_transposed(self._topology._is_distributed)
 
     def _get_is_periodic(self):
-        """
+        r"""
         MPI communicator grid periodicity.
             is_periodic[dir] = True means that the MPI grid is periodic along dir.
             /!\ This is not equivalent to domain periodicity, as a periodic
@@ -476,13 +481,30 @@ class CartesianTopology(CartesianTopologyView, Topology):
     CartesianTopology topologies defined on cartesian meshes which communicates
     accross processes through a MPI CartesianTopology communicator.
     """
+    @debug
+    def __init__(self, domain, discretization, mpi_params=None,
+                 cart_dim=None, cart_shape=None,
+                 is_periodic=None, cutdirs=None,
+                 mesh=None, cartesian_topology=None,
+                 cl_env=None, **kwds):
+
+        super(CartesianTopology, self).__init__(
+            mpi_params=mpi_params,
+            domain=domain,
+            discretization=discretization,
+            cart_dim=cart_dim, cart_size=None, proc_shape=None,
+            is_periodic=is_periodic, is_distributed=None,
+            cartesian_topology=id(cartesian_topology), mesh=hash(mesh),
+            topology_state=None, cl_env=cl_env,
+            **kwds)
+
     @debug
     def __new__(cls, domain, discretization, mpi_params=None,
                 cart_dim=None, cart_shape=None,
                 is_periodic=None, cutdirs=None,
                 mesh=None, cartesian_topology=None,
                 cl_env=None, **kwds):
-        """
+        r"""
         Initializes or get an existing CartesianTopology topology.
 
         Parameters
@@ -616,14 +638,17 @@ class CartesianTopology(CartesianTopologyView, Topology):
         check_instance(mpi_params, MPIParams)
         check_instance(domain, Box)
         check_instance(discretization, CartesianDiscretization)
-        check_instance(cart_dim,  int)
-        check_instance(cart_size, int)
+        check_instance(cart_dim,  (int, np.integer))
+        check_instance(cart_size, (int, np.integer))
         check_instance(proc_shape,     np.ndarray, dtype=HYSOP_INTEGER)
         check_instance(is_periodic,    np.ndarray, dtype=bool)
         check_instance(is_distributed, np.ndarray, dtype=bool)
         check_instance(cartesian_topology, MPI.Cartcomm, allow_none=True)
         check_instance(mesh, CartesianMesh, allow_none=True)
 
+        cart_dim = int(cart_dim)
+        cart_size = int(cart_size)
+
         npw.set_readonly(proc_shape, is_periodic, is_distributed)
 
         topology_state = CartesianTopologyState(dim=domain.dim)
@@ -801,7 +826,7 @@ class CartesianTopology(CartesianTopologyView, Topology):
                 assert dim <= domain_dim, 'cutdirs is not of size domain_dim'
                 cart_shape = npw.asintegerarray(MPI.Compute_dims(parent_size, dim))
                 cls._optimize_shape(cart_shape)
-                assert np.sum(cutdirs > 0) == cart_shape.size,\
+                assert sum(cutdirs) == cart_shape.size,\
                     "Created shape {} doesnt respect specified cutdirs {}".format(
                         np.sum(cutdirs > 0), cart_shape.size)
                 shape[is_distributed > 0] = cart_shape
diff --git a/hysop/topology/topology.py b/hysop/topology/topology.py
index ef4ca47f1ff3f02b96a2052a263806cf9bf99c8b..2dd9d81297c57845065f9a0a7bda3c19cfdb42d9 100644
--- a/hysop/topology/topology.py
+++ b/hysop/topology/topology.py
@@ -30,7 +30,7 @@ class TopologyWarning(HysopWarning):
     pass
 
 
-class TopologyState(TaggedObject):
+class TopologyState(TaggedObject, metaclass=ABCMeta):
     """
     Abstract base to define TopologyStates.
 
@@ -40,10 +40,13 @@ class TopologyState(TaggedObject):
     A TopologyState may for example include a transposition state like for
     CartesianTopology topologies.
     """
-    __metaclass__ = ABCMeta
 
     __slots__ = ('_is_read_only',)
 
+    @debug
+    def __new__(cls, is_read_only, **kwds):
+        return super(TopologyState, cls).__new__(cls, tag_prefix='ts', **kwds)
+
     @debug
     def __init__(self, is_read_only, **kwds):
         """Initialize a topology state."""
@@ -93,7 +96,7 @@ class TopologyState(TaggedObject):
         return self.long_description()
 
 
-class TopologyView(TaggedObjectView):
+class TopologyView(TaggedObjectView, metaclass=ABCMeta):
     """
     Abstract base to define views on a Topology dependening on a TopologyState.
 
@@ -105,13 +108,16 @@ class TopologyView(TaggedObjectView):
     resulting in the automatic permutation of attributes when fetching the view attributes
     (global_resolution and ghosts will be transposed).
     """
-    __metaclass__ = ABCMeta
 
     __slots__ = ('_mesh_view', '_domain_view', '_topology', '_topology_state')
 
+    @debug
+    def __init__(self, topology_state, topology=None, **kwds):
+        super(TopologyView, self).__init__(obj_view=topology, **kwds)
+
     @debug
     def __new__(cls, topology_state, topology=None, **kwds):
-        """
+        r"""
         Create and initialize a TopologyView on given topology.
 
         Parameters
@@ -187,7 +193,7 @@ class TopologyView(TaggedObjectView):
         return self._topology._backend
 
     def _get_mpi_params(self):
-        """The parent MPI parameters of this topology.
+        r"""The parent MPI parameters of this topology.
             /!\ Topologies may define a sub communicator
                 CartesianTopology topologies will define a MPI.Cartcomm for example.
         """
@@ -267,7 +273,7 @@ class TopologyView(TaggedObjectView):
     task_id = property(_get_task_id)
 
 
-class Topology(RegisteredObject):
+class Topology(RegisteredObject, metaclass=ABCMeta):
     """
     Abstract base class for hysop Topologies.
 
@@ -281,8 +287,6 @@ class Topology(RegisteredObject):
     You can also find examples of topologies instanciation in test_topology.py.
     """
 
-    __metaclass__ = ABCMeta
-
     @debug
     def __new__(cls, domain, mpi_params=None, backend=Backend.HOST,
                 cl_env=None, allocator=None, queue=None, **kwds):
diff --git a/hysop/topology/topology_descriptor.py b/hysop/topology/topology_descriptor.py
index 81ac7454fd27d6deb451e0e1ee69f0ecbcacc677..13334ce338cca41817c8534601141f63c4d0afdb 100644
--- a/hysop/topology/topology_descriptor.py
+++ b/hysop/topology/topology_descriptor.py
@@ -4,7 +4,7 @@ from hysop.tools.types import check_instance
 from hysop.topology.topology import Topology, TopologyView
 from hysop.mesh.mesh import Mesh
 
-class TopologyDescriptor(object):
+class TopologyDescriptor(object, metaclass=ABCMeta):
     """
     Describes how a topology should be built.
 
@@ -12,14 +12,13 @@ class TopologyDescriptor(object):
     operator graph building and are replaced by a single unique
     topology upon initialization.
     """
-    __metaclass__ = ABCMeta
 
     __slots__ = ('_mpi_params', '_domain', '_backend', '_extra_kwds')
 
     def __init__(self, mpi_params, domain, backend, **kwds):
         """
         Initialize a TopologyDescriptor.
-        
+
         Notes
         -----
         kwds allows for backend specific variables.
@@ -49,13 +48,13 @@ class TopologyDescriptor(object):
     def _get_extra_kwds(self):
         """Get extra keyword arguments."""
         return dict(self._extra_kwds)
-        
+
     mpi_params  = property(_get_mpi_params)
     domain      = property(_get_domain)
     backend     = property(_get_backend)
     extra_kwds  = property(_get_extra_kwds)
     dim         = property(_get_dim)
-    
+
     @staticmethod
     def build_descriptor(backend, operator, field, handle, **kwds):
         """
@@ -79,7 +78,7 @@ class TopologyDescriptor(object):
             # handle is already a TopologyDescriptor, so we return it.
             return handle
         elif isinstance(handle, CartesianTopologyDescriptors):
-            return CartesianTopologyDescriptor.build_descriptor(backend, operator, 
+            return CartesianTopologyDescriptor.build_descriptor(backend, operator,
                     field, handle, **kwds)
         elif (handle is None):
             # this topology will be determined later
@@ -88,7 +87,7 @@ class TopologyDescriptor(object):
             msg='Unknown handle of class {} to build a TopologyDescriptor.'
             msg=msg.format(handle.__class__)
             raise TypeError(msg)
-    
+
     def choose_or_create_topology(self, known_topologies, **kwds):
         """
         Returns a topology that is either taken from known_topologies, a set
@@ -101,7 +100,7 @@ class TopologyDescriptor(object):
         if (topo is None):
             topo = self.create_topology(**kwds)
         return topo
-    
+
     @abstractmethod
     def choose_topology(self, known_topologies, **kwds):
         """
@@ -140,8 +139,8 @@ class TopologyDescriptor(object):
 
     @abstractmethod
     def __hash__(self):
-        h  = id(self.domain) 
-        h ^= hash(self.mpi_params) 
+        h  = id(self.domain)
+        h ^= hash(self.mpi_params)
         h ^= hash(self.backend)
         h ^= hash(self._extra_kwds)
         return h
diff --git a/hysop_examples/example_utils.py b/hysop_examples/argparser.py
similarity index 99%
rename from hysop_examples/example_utils.py
rename to hysop_examples/argparser.py
index f5a533bd6930bf26fcbd8baee01a6d6d8f44c582..b859203c298f7fd2dc359fa961b00efbef2ae385 100644
--- a/hysop_examples/example_utils.py
+++ b/hysop_examples/argparser.py
@@ -71,7 +71,8 @@ class SplitAppendAction(argparse._AppendAction):
                 msg = 'Could not convert values \'{}\' to tuple for parameter {}.'.format(values, self.dest)
                 parser.error(msg)
         if self._append:
-            items = argparse._ensure_value(namespace, self.dest, self._container())
+            items = getattr(namespace, self.dest, None)
+            items = self._container() if (items is None) else items
         else:
             items = self._container()
         if (self._container is list):
@@ -247,13 +248,14 @@ class HysopArgParser(argparse.ArgumentParser):
             dump_dirs = map(lambda ddir: os.path.abspath(ddir), dump_dirs)
             dump_dirs = filter(lambda ddir: os.path.isdir(ddir) and
                                (ddir not in ('/', '/home', '~', os.path.expanduser('~'))), dump_dirs)
+            dump_dirs = tuple(dump_dirs)
             if args.no_interactive:
                 confirm_deletion = True
             else:
                 msg = 'HySoP will clean the following directories prior to launch:'
                 for ddir in dump_dirs:
                     msg += '\n  {}'.format(ddir)
-                print msg
+                print(msg)
                 valid = {"yes": True, "y": True,
                          "no": False, "n": False,
                          '': True}
@@ -287,7 +289,7 @@ class HysopArgParser(argparse.ArgumentParser):
                     self._rmdir(ddir, 'generated_kernels', force=True)
                     self._rmdir(ddir, 'spectral', force=True)
             else:
-                print 'Deletion skipped by user.'
+                print('Deletion skipped by user.')
 
         MPI.COMM_WORLD.Barrier()
 
@@ -417,7 +419,7 @@ class HysopArgParser(argparse.ArgumentParser):
             stream_filters = [self._null_filter]
 
         self._mkdir(args.stdout)
-        with StdoutTee(args.stdout, mode='a',
+        with StdoutTee(args.stdout, mode='a', buff=-1,
                        file_filters=file_filters,
                        stream_filters=stream_filters):
             yield
@@ -433,7 +435,7 @@ class HysopArgParser(argparse.ArgumentParser):
             stream_filters = [self._null_filter]
 
         self._mkdir(args.stderr)
-        with StderrTee(args.stderr, mode='a',
+        with StderrTee(args.stderr, mode='a', buff=-1,
                        file_filters=file_filters,
                        stream_filters=stream_filters):
             yield
@@ -1296,7 +1298,7 @@ class HysopArgParser(argparse.ArgumentParser):
                 ndumps = int(np.floor(T/dt)) + 1
                 toi = tstart + np.arange(ndumps)*dt
                 dump_times.update(toi)
-            dump_times = filter(lambda t: (t >= tstart) & (t <= tend), dump_times)
+            dump_times = tuple(filter(lambda t: (t >= tstart) & (t <= tend), dump_times))
 
             setattr(args, '{}_times'.format(bname), tuple(sorted(dump_times)))
             times_of_interest.update(dump_times)
@@ -1348,7 +1350,7 @@ class HysopArgParser(argparse.ArgumentParser):
         return graphical_io
 
     def _check_graphical_io_args(self, args):
-        if (args.visu_rank < 0):
+        if (args.visu_rank is not None) and (args.visu_rank < 0):
             args.visu_rank = None
         self._check_default(args, 'visu_rank', int, allow_none=True)
         self._check_positive(args, 'visu_rank', strict=False, allow_none=True)
@@ -1721,7 +1723,7 @@ class HysopArgParser(argparse.ArgumentParser):
             return values[argvalue]
         msg = 'Failed to convert argument {}: {}'.format(argname, argvalue)
         msg += '\nPossible values are:\n  *'
-        msg += '\n  *'.join('{}: {}'.format(k, v) for (k, v) in values.iteritems())
+        msg += '\n  *'.join('{}: {}'.format(k, v) for (k, v) in values.items())
         self.error(msg)
 
     def _check_and_set_diffusion_mode(self, argname, args):
diff --git a/hysop_examples/examples/analytic/analytic.py b/hysop_examples/examples/analytic/analytic.py
index 4b1739483a5a1564fa28f793c89a7a8af759b82e..faabf06a4b6db909ffb03bd5a86fd62c7c52e212 100755
--- a/hysop_examples/examples/analytic/analytic.py
+++ b/hysop_examples/examples/analytic/analytic.py
@@ -1,8 +1,6 @@
-#!/usr/bin/env python2
 import numpy as np
 import sympy as sm
 
-
 def compute(args):
     '''
     HySoP Analytic Example: Initialize a field with a space and time dependent analytic formula.
@@ -31,7 +29,7 @@ def compute(args):
     if (impl is Implementation.PYTHON):
         # Setup python specific extra operator keyword arguments
         # (mapping: variable name => variable value)
-        op_kwds['extra_input_kwds'] = {'t': t}
+        pass
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
@@ -65,6 +63,7 @@ def compute(args):
             data[...] = (1.0/(1.0+0.1*t()))
             for x in coords:
                 data[...] *= np.cos(x-t())
+        extra_input_kwds = {'t': t}
     elif (impl is Implementation.OPENCL):
         # With the opencl codegen implementation we use a symbolic expression
         # generated using sympy. OpenCL code will be automatically generated,
@@ -77,6 +76,7 @@ def compute(args):
         compute_scalar = 1/(1+0.1*ts)
         for xi in xs:
             compute_scalar *= sm.cos(xi-ts)
+        extra_input_kwds = {}
     else:
         msg = 'Unknown implementation {}.'.format(impl)
 
@@ -84,7 +84,7 @@ def compute(args):
     analytic = AnalyticField(name='analytic',
                              field=scalar, formula=compute_scalar,
                              variables={scalar: npts}, implementation=impl,
-                             **op_kwds)
+                             extra_input_kwds=extra_input_kwds, **op_kwds)
 
     # Write output field at given frequency
     io_params = IOParams(filename='analytic', frequency=args.dump_freq)
@@ -110,9 +110,9 @@ def compute(args):
                       max_iter=args.max_iter,
                       times_of_interest=args.times_of_interest,
                       t=t)
-    
+
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
@@ -121,7 +121,7 @@ def compute(args):
 
 
 if __name__ == '__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     prog_name = 'analytic'
     default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), prog_name)
diff --git a/hysop_examples/examples/bubble/periodic_bubble.py b/hysop_examples/examples/bubble/periodic_bubble.py
index 9c230e4e85865e4f5ed34d380e6d8c8c4359c178..16ef7279d902b944d3fe611b59c22d16daa8b6e7 100644
--- a/hysop_examples/examples/bubble/periodic_bubble.py
+++ b/hysop_examples/examples/bubble/periodic_bubble.py
@@ -1,7 +1,7 @@
 
 ## HySoP Example: Bubble nD
-## Osher1995 (first part): 
-##  A Level Set Formulation of Eulerian Interface Capturing Methods for 
+## Osher1995 (first part):
+##  A Level Set Formulation of Eulerian Interface Capturing Methods for
 ##  Incompressible Fluid flows.
 ##
 ## This example is without levelset !
@@ -20,7 +20,7 @@ def init_rho(data, coords, Br, Bc, rho1, rho2, eps, component):
     # initialize density with the levelset
     init_phi(data=data, coords=coords, component=component, Br=Br, Bc=Bc)
     data[...] = regularize(data, rho1, rho2, eps)
-    
+
 def init_mu(data, coords, Br, Bc, mu1, mu2, eps, component):
     assert (component==0)
     # initialize viscosity with the levelset
@@ -51,7 +51,7 @@ def H_eps(x, eps):
     H = np.empty_like(x)
     H[np.where(x<-eps)] = 0.0
     H[np.where(x>+eps)] = 1.0
-    
+
     ie = np.where(np.abs(x)<=eps)
     xe = x[ie]
     H[ie] = (xe + eps)/(2*eps) + np.sin(np.pi*xe/eps)/(2*np.pi)
@@ -79,11 +79,11 @@ def compute(args):
     dim  = args.ndim
     npts = args.npts
     box  = Box(origin=args.box_origin, length=args.box_length, dim=dim)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
-    
+
     # Setup usual implementation specific variables
     impl = args.impl
     extra_op_kwds = {'mpi_params': mpi_params}
@@ -92,40 +92,40 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
         msg='Unknown implementation \'{}\'.'.format(impl)
         raise ValueError(msg)
-    
+
     # Define parameters and field (time, timestep, velocity, vorticity, enstrophy)
     t, dt = TimeParameters(dtype=args.dtype)
     velo  = VelocityField(domain=box, dtype=args.dtype)
     vorti = VorticityField(velocity=velo)
-    rho   = DensityField(domain=box, dtype=args.dtype) 
+    rho   = DensityField(domain=box, dtype=args.dtype)
     mu    = ViscosityField(domain=box, dtype=args.dtype, mu=True)
 
     enstrophy = EnstrophyParameter(dtype=args.dtype)
     rhov = VolumicIntegrationParameter(field=rho)
     muv  = VolumicIntegrationParameter(field=mu)
-    
+
     ### Build the directional operators
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advec',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti, rho, mu),
             velocity_cfl = args.cfl,
             variables = {velo: npts, vorti: npts, rho: npts, mu: npts},
@@ -148,14 +148,14 @@ def compute(args):
                  pretty_name='sdiff',
                  formulation = args.stretching_formulation,
                  viscosity = mu,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts, mu: npts},
                  dt=dt, **extra_op_kwds)
     elif (dim==2):
         stretch_diffuse = DirectionalDiffusion(implementation=impl,
                  name='diffuse_{}'.format(vorti.name),
-                 pretty_name=u'diff{}'.format(vorti.pretty_name.decode('utf-8')),
+                 pretty_name='diff{}'.format(vorti.pretty_name),
                  coeffs = mu,
                  fields  = vorti,
                  variables = {vorti: npts, mu: npts},
@@ -163,7 +163,7 @@ def compute(args):
     else:
         msg='Unsupported dimension {}.'.format(dim)
         raise RuntimeError(msg)
-    
+
     #> External force rot(-rho*g)
     from hysop.symbolic import space_symbols
     from hysop.symbolic.base import SymbolicTensor
@@ -178,7 +178,7 @@ def compute(args):
     lhs = Ws.diff(frame.time)
     rhs = curl(Fext, frame)
     exprs = Assignment.assign(lhs, rhs)
-    external_force = DirectionalSymbolic(name='Fext', 
+    external_force = DirectionalSymbolic(name='Fext',
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
                                     variables={vorti: npts,
@@ -187,14 +187,14 @@ def compute(args):
 
     #> Directional splitting operator subgraph
     splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order)
-    splitting.push_operators(advec, 
-            diffuse, 
+    splitting.push_operators(advec,
+            diffuse,
             stretch_diffuse, external_force)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, 
-                            variables={velo:npts, vorti: npts}, 
+    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti,
+                            variables={velo:npts, vorti: npts},
                             projection=args.reprojection_frequency,
                             implementation=impl, **extra_op_kwds)
 
@@ -202,11 +202,11 @@ def compute(args):
     io_params = IOParams(filename='fields', frequency=args.dump_freq)
     dump_fields = HDF_Writer(name='dump',
                              io_params=io_params,
-                             variables={velo: npts, 
-                                        vorti: npts,    
-                                        rho: npts, 
+                             variables={velo: npts,
+                                        vorti: npts,
+                                        rho: npts,
                                         mu: npts})
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -235,7 +235,7 @@ def compute(args):
     integrate_mu = Integrate(name='integrate_mu', field=mu, variables={mu: npts},
                                     parameter=muv, scaling='normalize',
                                     implementation=impl, **extra_op_kwds)
-    
+
 
     ### Adaptive timestep operator
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=1e-3)
@@ -243,13 +243,13 @@ def compute(args):
                                             equivalent_CFL=True)
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                     criteria=AdvectionCriteria.W_INF)
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -257,11 +257,11 @@ def compute(args):
             }
     )
     problem = Problem(method=method)
-    problem.insert(poisson, 
+    problem.insert(poisson,
                    dump_fields,
-                   splitting, 
+                   splitting,
                    # integrate_enstrophy, integrate_rho, integrate_mu,
-                   min_max_rho, min_max_mu, min_max_U, min_max_W, 
+                   min_max_rho, min_max_mu, min_max_U, min_max_W,
                    adapt_dt)
     problem.build(args)
 
@@ -269,22 +269,22 @@ def compute(args):
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
             enstrophy, rhov, muv,
-            min_max_U.Finf, min_max_W.Finf, 
+            min_max_U.Finf, min_max_W.Finf,
             min_max_rho.Fmin, min_max_rho.Fmax,
             min_max_mu.Fmin, min_max_mu.Fmax,
             adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, viscosity and density on all topologies
     Bc, Br = args.Bc, args.Br
     dx  = np.max(np.divide(box.length, np.asarray(args.npts)-1))
@@ -295,21 +295,21 @@ def compute(args):
     problem.initialize_field(field=mu,    formula=init_mu,  mu1=args.mu1, mu2=args.mu2, Bc=Bc, Br=Br, reorder='Bc', eps=eps)
 
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class PeriodicBubbleArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'periodic_bubble'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Periodic Bubble Example: ', fg='blue', style='bold')
@@ -320,7 +320,7 @@ if __name__=='__main__':
             description+='\n'
             description+='\nThis example focuses on a validation study for the '
             description+='hybrid particle-mesh vortex method for varying densities without using a levelset function.'
-    
+
             super(PeriodicBubbleArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
@@ -354,7 +354,7 @@ if __name__=='__main__':
             self._check_default(args, vars_, float, allow_none=False)
             self._check_positive(args, vars_, strict=False, allow_none=False)
             self._check_default(args, ('Bc', 'Br'), tuple, allow_none=False)
-            
+
             Bc, Br = args.Bc, args.Br
             if len(Bc)!=len(Br):
                 msg='Specified {} bubble positions and {} bubble radi.'
@@ -368,7 +368,7 @@ if __name__=='__main__':
                     msg='Specified bubble radius is not a float, got {} which is of type {}.'
                     msg=msg.format(br, type(br).__name__)
                     self.error(msg)
-            
+
         def _add_graphical_io_args(self):
             graphical_io = super(PeriodicBubbleArgParser, self)._add_graphical_io_args()
             graphical_io.add_argument('-pp', '--plot-parameters', action='store_true',
@@ -376,16 +376,16 @@ if __name__=='__main__':
                     help=('Plot the density and viscosity integrals during simulation. '+
                          'Simulation will stop at each time of interest and '+
                          'the plot will be updated every specified freq iterations.'))
-            graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100, 
+            graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100,
                     dest='plot_freq',
                     help='Plotting update frequency in terms of iterations.')
-        
+
         def _check_file_io_args(self, args):
             super(PeriodicBubbleArgParser, self)._check_file_io_args(args)
             self._check_default(args, 'plot_parameters', bool, allow_none=False)
             self._check_default(args, 'plot_freq', int, allow_none=False)
             self._check_positive(args, 'plot_freq', strict=True, allow_none=False)
-            
+
         def _setup_parameters(self, args):
             super(PeriodicBubbleArgParser, self)._setup_parameters(args)
             dim = args.ndim
@@ -399,10 +399,10 @@ if __name__=='__main__':
     parser = PeriodicBubbleArgParser()
 
     parser.set_defaults(impl='cl', ndim=2, npts=(256,),
-                        box_origin=(0.0,), box_length=(1.0,), 
-                        tstart=0.0, tend=0.51, 
+                        box_origin=(0.0,), box_length=(1.0,),
+                        tstart=0.0, tend=0.51,
                         dt=1e-5, cfl=0.5, lcfl=0.125,
-                        dump_freq=10, 
+                        dump_freq=10,
                         dump_times=(0.0, 0.1, 0.20, 0.30, 0.325, 0.4, 0.45, 0.50),
                         rho1=1.0, rho2=10.0, mu1=0.00025, mu2=0.00050,
                         Bc = ((0.5,0.15,0.5),(0.5,0.45,0.5),), Br = (0.1,0.15,))
diff --git a/hysop_examples/examples/bubble/periodic_bubble_levelset.py b/hysop_examples/examples/bubble/periodic_bubble_levelset.py
index ea9b7b3667e2473255de70f387cf51d8771abd40..7d785944c82f8a4b9f444fc1d808f015bc23263b 100644
--- a/hysop_examples/examples/bubble/periodic_bubble_levelset.py
+++ b/hysop_examples/examples/bubble/periodic_bubble_levelset.py
@@ -1,7 +1,7 @@
 
 ## HySoP Example: Bubble nD
-## Osher1995 (first part): 
-##  A Level Set Formulation of Eulerian Interface Capturing Methods for 
+## Osher1995 (first part):
+##  A Level Set Formulation of Eulerian Interface Capturing Methods for
 ##  Incompressible Fluid flows.
 
 import os
@@ -15,7 +15,7 @@ def init_velocity(data, **kwds):
 
 def init_rho(data, **kwds):
     data[...] = 0.0
-    
+
 def init_mu(data, **kwds):
     data[...] = 0.0
 
@@ -53,7 +53,7 @@ def compute(args):
 
     from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
                               ComputeGranularity, Interpolation
-    
+
     from hysop.symbolic import sm, space_symbols
     from hysop.symbolic.base import SymbolicTensor
     from hysop.symbolic.field import curl
@@ -65,11 +65,11 @@ def compute(args):
     dim  = args.ndim
     npts = args.npts
     box  = Box(origin=args.box_origin, length=args.box_length, dim=dim)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
-    
+
     # Setup usual implementation specific variables
     impl = args.impl
     extra_op_kwds = {'mpi_params': mpi_params}
@@ -78,49 +78,49 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
         msg='Unknown implementation \'{}\'.'.format(impl)
         raise ValueError(msg)
-    
+
     # Define parameters and field (time, timestep, velocity, vorticity, enstrophy)
     t, dt = TimeParameters(dtype=args.dtype)
     velo  = VelocityField(domain=box, dtype=args.dtype)
     vorti = VorticityField(velocity=velo)
     phi   = LevelSetField(domain=box, dtype=args.dtype)
-    rho   = DensityField(domain=box, dtype=args.dtype) 
+    rho   = DensityField(domain=box, dtype=args.dtype)
     mu    = ViscosityField(domain=box, dtype=args.dtype, mu=True)
 
     enstrophy = EnstrophyParameter(dtype=args.dtype)
     rhov = VolumicIntegrationParameter(field=rho)
     muv  = VolumicIntegrationParameter(field=mu)
-    
+
     # Symbolic fields
     frame = rho.domain.frame
     phis  = phi.s(*frame.vars)
     rhos  = rho.s(*frame.vars)
     mus   = mu.s(*frame.vars)
     Ws    = vorti.s(*frame.vars)
-    
+
     ### Build the directional operators
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advection',
             pretty_name='Adv',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti, phi),
             velocity_cfl = args.cfl,
             variables = {velo: npts, vorti: npts, phi: npts},
@@ -141,15 +141,15 @@ def compute(args):
     e0 = Assignment(pi, np.pi)
     e1 = Assignment(eps, 2.5*dx)
     e2 = Assignment(x, phis)
-    e3 = Assignment(H, H_eps) 
+    e3 = Assignment(H, H_eps)
     e4 = Assignment(rhos, rho1 + (rho2-rho1)*H)
     e5 = Assignment(mus, mu1 + (mu2-mu1)*H)
     exprs = (e0,e1,e2,e3,e4,e5)
-    eval_fields = DirectionalSymbolic(name='eval_fields', 
-                                    pretty_name=u'{}({},{})'.format(
-                                        phi.pretty_name.decode('utf-8'), 
-                                        rho.pretty_name.decode('utf-8'), 
-                                        mu.pretty_name.decode('utf-8')),
+    eval_fields = DirectionalSymbolic(name='eval_fields',
+                                    pretty_name='{}({},{})'.format(
+                                        phi.pretty_name,
+                                        rho.pretty_name,
+                                        mu.pretty_name),
                                     no_split=True,
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
@@ -161,17 +161,17 @@ def compute(args):
     if (dim==3):
         stretch_diffuse = DirectionalStretchingDiffusion(implementation=impl,
                  name='stretch_diffuse',
-                 pretty_name=u'SD{}'.format(vorti.pretty_name.decode('utf-8')),
+                 pretty_name='SD{}'.format(vorti.pretty_name),
                  formulation = args.stretching_formulation,
                  viscosity = mu,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts, mu: npts},
                  dt=dt, **extra_op_kwds)
     elif (dim==2):
         stretch_diffuse = DirectionalDiffusion(implementation=impl,
                  name='diffuse_{}'.format(vorti.name),
-                 pretty_name=u'D{}'.format(vorti.pretty_name.decode('utf-8')),
+                 pretty_name='D{}'.format(vorti.pretty_name),
                  coeffs = mu,
                  fields  = vorti,
                  variables = {vorti: npts, mu: npts},
@@ -179,7 +179,7 @@ def compute(args):
     else:
         msg='Unsupported dimension {}.'.format(dim)
         raise RuntimeError(msg)
-    
+
     #> External force rot(-rho*g)
     Fext = np.zeros(shape=(dim,), dtype=object).view(SymbolicTensor)
     Fext[1] = -1.0 #-9.8196
@@ -187,7 +187,7 @@ def compute(args):
     lhs = Ws.diff(frame.time)
     rhs = curl(Fext, frame)
     exprs = Assignment.assign(lhs, rhs)
-    external_force = DirectionalSymbolic(name='Fext', 
+    external_force = DirectionalSymbolic(name='Fext',
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
                                     variables={vorti: npts,
@@ -197,11 +197,11 @@ def compute(args):
     #> Directional splitting operator subgraph
     splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order)
     splitting.push_operators(advec, eval_fields, stretch_diffuse, external_force)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, 
-                            variables={velo:npts, vorti: npts}, 
+    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti,
+                            variables={velo:npts, vorti: npts},
                             projection=args.reprojection_frequency,
                             implementation=impl, **extra_op_kwds)
 
@@ -209,12 +209,12 @@ def compute(args):
     io_params = IOParams(filename='fields', frequency=args.dump_freq)
     dump_fields = HDF_Writer(name='dump',
                              io_params=io_params,
-                             variables={velo: npts, 
-                                        vorti: npts,    
+                             variables={velo: npts,
+                                        vorti: npts,
                                         phi: npts,
-                                        rho: npts, 
+                                        rho: npts,
                                         mu: npts})
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -233,24 +233,24 @@ def compute(args):
     integrate_mu = Integrate(field=mu, variables={mu: npts},
                                     parameter=muv, scaling='normalize',
                                     implementation=impl, **extra_op_kwds)
-    
+
 
     ### Adaptive timestep operator
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=1e-3,
                                     name='merge_dt', pretty_name='dt', )
     dt_cfl   = adapt_dt.push_cfl_criteria(cfl=args.cfl, Finf=min_max_U.Finf,
-                                            equivalent_CFL=True, 
+                                            equivalent_CFL=True,
                                             name='dt_cfl', pretty_name='CFL')
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                     criteria=AdvectionCriteria.W_INF,
                                                  name='dt_lcfl', pretty_name='LCFL')
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -258,11 +258,11 @@ def compute(args):
             }
     )
     problem = Problem(method=method)
-    problem.insert(poisson, 
-                   splitting, 
+    problem.insert(poisson,
+                   splitting,
                    dump_fields,
                    integrate_enstrophy, integrate_rho, integrate_mu,
-                   min_max_U, min_max_W, 
+                   min_max_U, min_max_W,
                    adapt_dt)
     problem.build(args)
 
@@ -270,20 +270,20 @@ def compute(args):
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
             enstrophy, rhov, muv,
-            min_max_U.Finf, min_max_W.Finf, 
+            min_max_U.Finf, min_max_W.Finf,
             adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, viscosity and density on all topologies
     Bc, Br = args.Bc, args.Br
     problem.initialize_field(field=velo,  formula=init_velocity)
@@ -293,21 +293,21 @@ def compute(args):
     problem.initialize_field(field=phi,   formula=init_phi, Bc=Bc, Br=Br, reorder='Bc')
 
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class PeriodicBubbleArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'periodic_bubble_levelset'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Periodic Bubble Example: ', fg='blue', style='bold')
@@ -318,7 +318,7 @@ if __name__=='__main__':
             description+='\n'
             description+='\nThis example focuses on a validation study for the '
             description+='hybrid particle-mesh vortex method for varying densities without using a levelset function.'
-    
+
             super(PeriodicBubbleArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
@@ -352,7 +352,7 @@ if __name__=='__main__':
             self._check_default(args, vars_, float, allow_none=False)
             self._check_positive(args, vars_, strict=False, allow_none=False)
             self._check_default(args, ('Bc', 'Br'), tuple, allow_none=False)
-            
+
             Bc, Br = args.Bc, args.Br
             if len(Bc)!=len(Br):
                 msg='Specified {} bubble positions and {} bubble radi.'
@@ -366,7 +366,7 @@ if __name__=='__main__':
                     msg='Specified bubble radius is not a float, got {} which is of type {}.'
                     msg=msg.format(br, type(br).__name__)
                     self.error(msg)
-            
+
         def _add_graphical_io_args(self):
             graphical_io = super(PeriodicBubbleArgParser, self)._add_graphical_io_args()
             graphical_io.add_argument('-pp', '--plot-parameters', action='store_true',
@@ -374,16 +374,16 @@ if __name__=='__main__':
                     help=('Plot the density and viscosity integrals during simulation. '+
                          'Simulation will stop at each time of interest and '+
                          'the plot will be updated every specified freq iterations.'))
-            graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100, 
+            graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100,
                     dest='plot_freq',
                     help='Plotting update frequency in terms of iterations.')
-        
+
         def _check_file_io_args(self, args):
             super(PeriodicBubbleArgParser, self)._check_file_io_args(args)
             self._check_default(args, 'plot_parameters', bool, allow_none=False)
             self._check_default(args, 'plot_freq', int, allow_none=False)
             self._check_positive(args, 'plot_freq', strict=True, allow_none=False)
-            
+
         def _setup_parameters(self, args):
             super(PeriodicBubbleArgParser, self)._setup_parameters(args)
             dim = args.ndim
@@ -397,10 +397,10 @@ if __name__=='__main__':
     parser = PeriodicBubbleArgParser()
 
     parser.set_defaults(impl='cl', ndim=2, npts=(256,),
-                        box_origin=(0.0,), box_length=(1.0,), 
-                        tstart=0.0, tend=0.51, 
+                        box_origin=(0.0,), box_length=(1.0,),
+                        tstart=0.0, tend=0.51,
                         dt=1e-6, cfl=0.5, lcfl=0.125,
-                        dump_freq=0, 
+                        dump_freq=0,
                         dump_times=(0.0, 0.1, 0.20, 0.30, 0.325, 0.4, 0.45, 0.50),
                         rho1=1.0, rho2=10.0, mu1=0.00025, mu2=0.00050,
                         Bc = ((0.5,0.15,0.5),(0.5,0.45,0.5),), Br = (0.1,0.15,))
diff --git a/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py b/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py
index c31c77efa16cb165245557552e71a89de9606bd0..dfa25bddb47b06be7bb623d958b0651b9ccd91bd 100644
--- a/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py
+++ b/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py
@@ -1,7 +1,7 @@
 
 ## HySoP Example: Bubble nD
-## Osher1995 (first part): 
-##  A Level Set Formulation of Eulerian Interface Capturing Methods for 
+## Osher1995 (first part):
+##  A Level Set Formulation of Eulerian Interface Capturing Methods for
 ##  Incompressible Fluid flows.
 
 import os
@@ -15,7 +15,7 @@ def init_velocity(data, **kwds):
 
 def init_rho(data, **kwds):
     data[...] = 0.0
-    
+
 def init_mu(data, **kwds):
     data[...] = 0.0
 
@@ -61,7 +61,7 @@ def compute(args):
     from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
                               ComputeGranularity, Interpolation
     from hysop.numerics.odesolvers.runge_kutta import Euler, RK2, RK3, RK4
-    
+
     from hysop.symbolic import sm, space_symbols, local_indices_symbols
     from hysop.symbolic.base import SymbolicTensor
     from hysop.symbolic.field import curl
@@ -74,11 +74,11 @@ def compute(args):
     dim  = args.ndim
     npts = args.npts
     box  = Box(origin=args.box_origin, length=args.box_length, dim=dim)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
-    
+
     # Setup usual implementation specific variables
     impl = args.impl
     extra_op_kwds = {'mpi_params': mpi_params}
@@ -87,37 +87,37 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
         msg='Unknown implementation \'{}\'.'.format(impl)
         raise ValueError(msg)
-    
+
     # Define parameters and field (time, timestep, velocity, vorticity, enstrophy)
     t, dt   = TimeParameters(dtype=args.dtype)
     velo    = VelocityField(domain=box, dtype=args.dtype)
     vorti   = VorticityField(velocity=velo)
     phi     = LevelSetField(domain=box, dtype=args.dtype)
     _lambda = PenalizationField(domain=box, dtype=args.dtype)
-    rho     = DensityField(domain=box, dtype=args.dtype) 
+    rho     = DensityField(domain=box, dtype=args.dtype)
     mu      = ViscosityField(domain=box, dtype=args.dtype, mu=True)
 
     enstrophy = EnstrophyParameter(dtype=args.dtype)
     rhov = VolumicIntegrationParameter(field=rho)
     muv  = VolumicIntegrationParameter(field=mu)
-    
+
     # Symbolic fields
     frame = rho.domain.frame
     phis  = phi.s(*frame.vars)
@@ -127,7 +127,7 @@ def compute(args):
     Ws    = vorti.s(*frame.vars)
     lambdas = _lambda.s(*frame.vars)
     dts   = dt.s
-    
+
     ### Build the directional operators
     #> Directional penalization
     penalization = -dts*lambdas*Us / (1+lambdas*dts)
@@ -135,18 +135,18 @@ def compute(args):
     lhs = Ws
     rhs = curl(penalization, frame)
     exprs = Assignment.assign(lhs, rhs)
-    penalization = DirectionalSymbolic(name='penalization', 
+    penalization = DirectionalSymbolic(name='penalization',
                                     implementation=impl,
                                     exprs=exprs,
                                     fixed_residue=Ws,
                                     variables={vorti: npts, velo: npts, _lambda: npts},
                                     method={TimeIntegrator: Euler},
                                     dt=dt, **extra_op_kwds)
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advection',
             pretty_name='Adv',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti, phi),
             velocity_cfl = args.cfl,
             variables = {velo: npts, vorti: npts, phi: npts},
@@ -167,15 +167,15 @@ def compute(args):
     e0 = Assignment(pi, np.pi)
     e1 = Assignment(eps, 2.5*dx)
     e2 = Assignment(x, phis)
-    e3 = Assignment(H, H_eps) 
+    e3 = Assignment(H, H_eps)
     e4 = Assignment(rhos, rho1 + (rho2-rho1)*H)
     e5 = Assignment(mus, mu1 + (mu2-mu1)*H)
     exprs = (e0,e1,e2,e3,e4,e5)
-    eval_fields = DirectionalSymbolic(name='eval_fields', 
-                                    pretty_name=u'{}({},{})'.format(
-                                        phi.pretty_name.decode('utf-8'), 
-                                        rho.pretty_name.decode('utf-8'), 
-                                        mu.pretty_name.decode('utf-8')),
+    eval_fields = DirectionalSymbolic(name='eval_fields',
+                                    pretty_name='{}({},{})'.format(
+                                        phi.pretty_name,
+                                        rho.pretty_name,
+                                        mu.pretty_name),
                                     no_split=True,
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
@@ -187,17 +187,17 @@ def compute(args):
     if (dim==3):
         stretch_diffuse = DirectionalStretchingDiffusion(implementation=impl,
                  name='stretch_diffuse',
-                 pretty_name=u'SD{}'.format(vorti.pretty_name.decode('utf-8')),
+                 pretty_name='SD{}'.format(vorti.pretty_name),
                  formulation = args.stretching_formulation,
                  viscosity = mu,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts, mu: npts},
                  dt=dt, **extra_op_kwds)
     elif (dim==2):
         stretch_diffuse = DirectionalDiffusion(implementation=impl,
                  name='diffuse_{}'.format(vorti.name),
-                 pretty_name=u'D{}'.format(vorti.pretty_name.decode('utf-8')),
+                 pretty_name='D{}'.format(vorti.pretty_name),
                  coeffs = mu,
                  fields  = vorti,
                  variables = {vorti: npts, mu: npts},
@@ -205,7 +205,7 @@ def compute(args):
     else:
         msg='Unsupported dimension {}.'.format(dim)
         raise RuntimeError(msg)
-    
+
     #> External force rot(-rho*g)
     Fext = np.zeros(shape=(dim,), dtype=object).view(SymbolicTensor)
     Fext[1] = -1.0 #-9.8196
@@ -213,7 +213,7 @@ def compute(args):
     lhs = Ws.diff(frame.time)
     rhs = curl(Fext, frame)
     exprs = Assignment.assign(lhs, rhs)
-    external_force = DirectionalSymbolic(name='Fext', 
+    external_force = DirectionalSymbolic(name='Fext',
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
                                     variables={vorti: npts,
@@ -223,11 +223,11 @@ def compute(args):
     #> Directional splitting operator subgraph
     splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order)
     splitting.push_operators(penalization, advec, eval_fields, stretch_diffuse, external_force)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, 
-                            variables={velo:npts, vorti: npts}, 
+    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti,
+                            variables={velo:npts, vorti: npts},
                             projection=args.reprojection_frequency,
                             implementation=impl, **extra_op_kwds)
 
@@ -235,17 +235,17 @@ def compute(args):
     io_params = IOParams(filename='fields', frequency=args.dump_freq)
     dump_fields = HDF_Writer(name='dump',
                              io_params=io_params,
-                             variables={#velo: npts, 
-                                        vorti: npts,    
+                             variables={#velo: npts,
+                                        vorti: npts,
                                         #phi: npts,
-                                        rho: npts, 
+                                        rho: npts,
                                         #mu: npts,
                                         })
     io_params = IOParams(filename='lambda', frequency=0)
     dump_lambda = HDF_Writer(name='dump_lambda',
                                 io_params=io_params,
                                 variables = {_lambda: npts})
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -264,7 +264,7 @@ def compute(args):
     integrate_mu = Integrate(field=mu, variables={mu: npts},
                                     parameter=muv, scaling='normalize',
                                     implementation=impl, **extra_op_kwds)
-    
+
 
     ### Adaptive timestep operator
     dx = np.min(np.divide(box.length, np.asarray(npts)-1))
@@ -277,21 +277,21 @@ def compute(args):
     max_dt = min(W0_dt, W1_dt)
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=max_dt,
                                     name='merge_dt', pretty_name='dt', )
-    dt_cfl   = adapt_dt.push_cfl_criteria(cfl=args.cfl, 
+    dt_cfl   = adapt_dt.push_cfl_criteria(cfl=args.cfl,
                                             Fmin=min_max_U.Fmin,
                                             Fmax=min_max_U.Fmax,
-                                            equivalent_CFL=True, 
+                                            equivalent_CFL=True,
                                             name='dt_cfl', pretty_name='CFL')
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                     criteria=AdvectionCriteria.W_INF,
                                                  name='dt_lcfl', pretty_name='LCFL')
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -299,11 +299,11 @@ def compute(args):
             }
     )
     problem = Problem(method=method)
-    problem.insert(poisson, 
-                   splitting, 
+    problem.insert(poisson,
+                   splitting,
                    dump_fields, dump_lambda,
                    integrate_enstrophy, integrate_rho, integrate_mu,
-                   min_max_U, min_max_W, 
+                   min_max_U, min_max_W,
                    adapt_dt)
     problem.build(args)
 
@@ -311,20 +311,20 @@ def compute(args):
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
             enstrophy, rhov, muv,
-            min_max_U.Finf, min_max_W.Finf, 
+            min_max_U.Finf, min_max_W.Finf,
             adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, viscosity and density on all topologies
     Bc, Br = args.Bc, args.Br
     problem.initialize_field(field=velo,  formula=init_velocity)
@@ -335,21 +335,21 @@ def compute(args):
     problem.initialize_field(field=_lambda, formula=init_lambda)
 
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class PeriodicBubbleArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'periodic_bubble_levelset_penalization'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Periodic Bubble Example: ', fg='blue', style='bold')
@@ -360,7 +360,7 @@ if __name__=='__main__':
             description+='\n'
             description+='\nThis example focuses on a validation study for the '
             description+='hybrid particle-mesh vortex method for varying densities without using a levelset function.'
-    
+
             super(PeriodicBubbleArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
@@ -394,7 +394,7 @@ if __name__=='__main__':
             self._check_default(args, vars_, float, allow_none=False)
             self._check_positive(args, vars_, strict=False, allow_none=False)
             self._check_default(args, ('Bc', 'Br'), tuple, allow_none=False)
-            
+
             Bc, Br = args.Bc, args.Br
             if len(Bc)!=len(Br):
                 msg='Specified {} bubble positions and {} bubble radi.'
@@ -408,7 +408,7 @@ if __name__=='__main__':
                     msg='Specified bubble radius is not a float, got {} which is of type {}.'
                     msg=msg.format(br, type(br).__name__)
                     self.error(msg)
-            
+
         def _add_graphical_io_args(self):
             graphical_io = super(PeriodicBubbleArgParser, self)._add_graphical_io_args()
             graphical_io.add_argument('-pp', '--plot-parameters', action='store_true',
@@ -416,16 +416,16 @@ if __name__=='__main__':
                     help=('Plot the density and viscosity integrals during simulation. '+
                          'Simulation will stop at each time of interest and '+
                          'the plot will be updated every specified freq iterations.'))
-            graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100, 
+            graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100,
                     dest='plot_freq',
                     help='Plotting update frequency in terms of iterations.')
-        
+
         def _check_file_io_args(self, args):
             super(PeriodicBubbleArgParser, self)._check_file_io_args(args)
             self._check_default(args, 'plot_parameters', bool, allow_none=False)
             self._check_default(args, 'plot_freq', int, allow_none=False)
             self._check_positive(args, 'plot_freq', strict=True, allow_none=False)
-            
+
         def _setup_parameters(self, args):
             super(PeriodicBubbleArgParser, self)._setup_parameters(args)
             dim = args.ndim
@@ -439,10 +439,10 @@ if __name__=='__main__':
     parser = PeriodicBubbleArgParser()
 
     parser.set_defaults(impl='cl', ndim=2, npts=(256,),
-                        box_origin=(0.0,), box_length=(1.0,), 
-                        tstart=0.0, tend=1.75, 
+                        box_origin=(0.0,), box_length=(1.0,),
+                        tstart=0.0, tend=1.75,
                         dt=1e-6, cfl=0.5, lcfl=0.125,
-                        dump_freq=25, 
+                        dump_freq=25,
                         dump_times=(0.0, 0.1, 0.20, 0.30, 0.325, 0.4, 0.45, 0.50),
                         rho1=1.0, rho2=10.0, mu1=0.00025, mu2=0.00050,
                         Bc = ((0.5,0.15,0.5),), Br = (0.1,))
diff --git a/hysop_examples/examples/bubble/periodic_jet_levelset.py b/hysop_examples/examples/bubble/periodic_jet_levelset.py
index c35266a67bfe88e3e1ebce05cb91ea7fa25024a5..4e024c8ab28d6686e31fe3266b823992e66de4fc 100644
--- a/hysop_examples/examples/bubble/periodic_jet_levelset.py
+++ b/hysop_examples/examples/bubble/periodic_jet_levelset.py
@@ -1,7 +1,7 @@
 
 ## HySoP Example: Periodic jet 2D
-## Osher1995 (second part): 
-##  A Level Set Formulation of Eulerian Interface Capturing Methods for 
+## Osher1995 (second part):
+##  A Level Set Formulation of Eulerian Interface Capturing Methods for
 ##  Incompressible Fluid flows.
 
 import os
@@ -46,7 +46,7 @@ def compute(args):
 
     from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
                               ComputeGranularity, Interpolation
-    
+
     from hysop.symbolic import sm, space_symbols
     from hysop.symbolic.base import SymbolicTensor
     from hysop.symbolic.field import curl
@@ -58,11 +58,11 @@ def compute(args):
     dim  = args.ndim
     npts = args.npts
     box  = Box(origin=args.box_origin, length=args.box_length, dim=dim)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
-    
+
     # Setup usual implementation specific variables
     impl = args.impl
     extra_op_kwds = {'mpi_params': mpi_params}
@@ -71,46 +71,46 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
         msg='Unknown implementation \'{}\'.'.format(impl)
         raise ValueError(msg)
-    
+
     # Define parameters and field (time, timestep, velocity, vorticity, enstrophy)
     t, dt = TimeParameters(dtype=args.dtype)
     velo  = VelocityField(domain=box, dtype=args.dtype)
     vorti = VorticityField(velocity=velo)
     phi   = LevelSetField(domain=box, dtype=args.dtype)
-    rho   = DensityField(domain=box, dtype=args.dtype) 
+    rho   = DensityField(domain=box, dtype=args.dtype)
 
     enstrophy = EnstrophyParameter(dtype=args.dtype)
     rhov = VolumicIntegrationParameter(field=rho)
-    
+
     # Symbolic fields
     frame = rho.domain.frame
     phis  = phi.s(*frame.vars)
     rhos  = rho.s(*frame.vars)
     Ws    = vorti.s(*frame.vars)
-    
+
     ### Build the directional operators
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advection',
             pretty_name='Adv',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti, phi),
             velocity_cfl = args.cfl,
             variables = {velo: npts, vorti: npts, phi: npts},
@@ -130,13 +130,13 @@ def compute(args):
     e0 = Assignment(pi, np.pi)
     e1 = Assignment(eps, 2.5*dx)
     e2 = Assignment(x, phis)
-    e3 = Assignment(H, H_eps) 
+    e3 = Assignment(H, H_eps)
     e4 = Assignment(rhos, rho1 + (rho2-rho1)*H)
     exprs = (e0,e1,e2,e3,e4)
-    eval_fields = DirectionalSymbolic(name='eval_fields', 
-                                    pretty_name=u'{}({})'.format(
-                                        phi.pretty_name.decode('utf-8'), 
-                                        rho.pretty_name.decode('utf-8')), 
+    eval_fields = DirectionalSymbolic(name='eval_fields',
+                                    pretty_name='{}({})'.format(
+                                        phi.pretty_name,
+                                        rho.pretty_name),
                                     no_split=True,
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
@@ -146,7 +146,7 @@ def compute(args):
     #> Directional stretching + diffusion
     diffuse = DirectionalDiffusion(implementation=impl,
              name='diffuse_{}'.format(vorti.name),
-             pretty_name=u'D{}'.format(vorti.pretty_name.decode('utf-8')),
+             pretty_name='D{}'.format(vorti.pretty_name),
              coeffs = (args.mu/10.0,),
              fields = (phi,),
              variables = {vorti: npts , phi: npts},
@@ -155,17 +155,17 @@ def compute(args):
     if (dim==3):
         stretch_diffuse = DirectionalStretchingDiffusion(implementation=impl,
                  name='stretch_diffuse',
-                 pretty_name=u'SD{}'.format(vorti.pretty_name.decode('utf-8')),
+                 pretty_name='SD{}'.format(vorti.pretty_name),
                  formulation = args.stretching_formulation,
                  viscosity = args.mu,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts},
                  dt=dt, **extra_op_kwds)
     elif (dim==2):
         stretch_diffuse = DirectionalDiffusion(implementation=impl,
                  name='diffuse_{}'.format(vorti.name),
-                 pretty_name=u'D{}'.format(vorti.pretty_name.decode('utf-8')),
+                 pretty_name='D{}'.format(vorti.pretty_name),
                  coeffs = args.mu,
                  fields  = vorti,
                  variables = {vorti: npts},
@@ -173,7 +173,7 @@ def compute(args):
     else:
         msg='Unsupported dimension {}.'.format(dim)
         raise RuntimeError(msg)
-    
+
     #> External force rot(-rho*g)
     Fext = np.zeros(shape=(dim,), dtype=object).view(SymbolicTensor)
     Fext[1] = +1
@@ -181,7 +181,7 @@ def compute(args):
     lhs = Ws.diff(frame.time)
     rhs = curl(Fext, frame)
     exprs = Assignment.assign(lhs, rhs)
-    external_force = DirectionalSymbolic(name='Fext', 
+    external_force = DirectionalSymbolic(name='Fext',
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
                                     variables={vorti: npts,
@@ -191,11 +191,11 @@ def compute(args):
     #> Directional splitting operator subgraph
     splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order)
     splitting.push_operators(advec, diffuse, stretch_diffuse, eval_fields, external_force)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, 
-                            variables={velo:npts, vorti: npts}, 
+    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti,
+                            variables={velo:npts, vorti: npts},
                             projection=args.reprojection_frequency,
                             implementation=impl, **extra_op_kwds)
 
@@ -203,11 +203,11 @@ def compute(args):
     io_params = IOParams(filename='fields', frequency=args.dump_freq)
     dump_fields = HDF_Writer(name='dump',
                              io_params=io_params,
-                             variables={#velo: npts, 
-                                        vorti: npts,    
+                             variables={#velo: npts,
+                                        vorti: npts,
                                         #phi: npts,
                                         rho: npts})
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -228,18 +228,18 @@ def compute(args):
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=1e-2,
                                     name='merge_dt', pretty_name='dt', )
     dt_cfl   = adapt_dt.push_cfl_criteria(cfl=args.cfl, Finf=min_max_U.Finf,
-                                            equivalent_CFL=True, 
+                                            equivalent_CFL=True,
                                             name='dt_cfl', pretty_name='CFL')
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                     criteria=AdvectionCriteria.W_INF,
                                                  name='dt_lcfl', pretty_name='LCFL')
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -247,11 +247,11 @@ def compute(args):
             }
     )
     problem = Problem(method=method)
-    problem.insert(poisson, 
-                   splitting, 
+    problem.insert(poisson,
+                   splitting,
                    dump_fields,
                    integrate_enstrophy, integrate_rho,
-                   min_max_U, min_max_W, 
+                   min_max_U, min_max_W,
                    adapt_dt)
     problem.build(args)
 
@@ -259,42 +259,42 @@ def compute(args):
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
-            enstrophy, rhov, 
-            min_max_U.Finf, min_max_W.Finf, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
+            enstrophy, rhov,
+            min_max_U.Finf, min_max_W.Finf,
             adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, viscosity and density on all topologies
     problem.initialize_field(field=velo,  formula=init_velocity)
     problem.initialize_field(field=vorti, formula=init_vorticity)
     problem.initialize_field(field=rho,   formula=init_rho)
-    problem.initialize_field(field=phi,   formula=init_phi, L=box.length) 
+    problem.initialize_field(field=phi,   formula=init_phi, L=box.length)
 
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class PeriodicJetArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'periodic_jet'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Periodic Bubble Example: ', fg='blue', style='bold')
@@ -305,7 +305,7 @@ if __name__=='__main__':
             description+='\n'
             description+='\nThis example focuses on a validation study for the '
             description+='hybrid particle-mesh vortex method for varying densities without using a levelset function.'
-    
+
             super(PeriodicJetArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
@@ -329,7 +329,7 @@ if __name__=='__main__':
             vars_ = ('rho1', 'rho2', 'mu')
             self._check_default(args, vars_, float, allow_none=False)
             self._check_positive(args, vars_, strict=False, allow_none=False)
-            
+
         def _setup_parameters(self, args):
             super(PeriodicJetArgParser, self)._setup_parameters(args)
             dim = args.ndim
@@ -341,10 +341,10 @@ if __name__=='__main__':
     parser = PeriodicJetArgParser()
 
     parser.set_defaults(impl='cl', ndim=2, npts=(128,),
-                        box_origin=(0.0,), box_length=(1.0,), 
-                        tstart=0.0, tend=0.66, 
+                        box_origin=(0.0,), box_length=(1.0,),
+                        tstart=0.0, tend=0.66,
                         dt=1e-5, cfl=0.5, lcfl=0.125,
-                        dump_freq=10, 
+                        dump_freq=10,
                         dump_times=(0.0, 0.1, 0.3, 0.45, 0.55, 0.65),
                         rho1=10.0, rho2=20.0, mu=0.00025)
 
diff --git a/hysop_examples/examples/cylinder/oscillating_cylinder.py b/hysop_examples/examples/cylinder/oscillating_cylinder.py
index 3e9e597704e0e46d3f3ce680bc9878d5d914612f..6da8e27eda054427d546213c0ddbffb1943af59b 100644
--- a/hysop_examples/examples/cylinder/oscillating_cylinder.py
+++ b/hysop_examples/examples/cylinder/oscillating_cylinder.py
@@ -36,7 +36,7 @@ def compute(args):
     from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
                               ComputeGranularity, Interpolation
     from hysop.numerics.odesolvers.runge_kutta import Euler, RK2, RK3, RK4
-    
+
     from hysop.symbolic import sm, space_symbols, local_indices_symbols
     from hysop.symbolic.base import SymbolicTensor
     from hysop.symbolic.field import curl
@@ -44,7 +44,7 @@ def compute(args):
     from hysop.symbolic.misc import Select
     from hysop.symbolic.tmp import TmpScalar
     from hysop.tools.string_utils import framed_str
-    
+
     Kc = 5
     Re = 250
 
@@ -64,11 +64,11 @@ def compute(args):
     dim  = 2
     npts = (int(H/E)*N, int(L/E)*N)
     box  = Box(origin=(-H/2, -L/2), length=(H,L), dim=dim)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
-    
+
     # Setup usual implementation specific variables
     impl = args.impl
     extra_op_kwds = {'mpi_params': mpi_params}
@@ -77,24 +77,24 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
         msg='Unknown implementation \'{}\'.'.format(impl)
         raise ValueError(msg)
-    
+
     # Define parameters and field (time, timestep, velocity, vorticity, enstrophy)
     t, dt   = TimeParameters(dtype=args.dtype)
     velo    = VelocityField(domain=box, dtype=args.dtype)
@@ -117,11 +117,11 @@ def compute(args):
     Xs = LogicalLT((Xc-X)**2 + (Yc-Y)**2,  D**2/4)
 
     compute_lambda = 1e8*Xs
-    cylinder = AnalyticField(name='cylinder', 
+    cylinder = AnalyticField(name='cylinder',
                 field=_lambda, formula=compute_lambda,
                 variables = {_lambda: npts}, implementation=impl,
                 **extra_op_kwds)
-    
+
     ### Build the directional operators
     #> Directional penalization
     penalization = +dts*lambdas*(Uc-Us) / (1+lambdas*dts)
@@ -129,45 +129,45 @@ def compute(args):
     lhs = Ws
     rhs = curl(penalization, frame)
     exprs = Assignment.assign(lhs, rhs)
-    penalization = DirectionalSymbolic(name='penalization', 
+    penalization = DirectionalSymbolic(name='penalization',
                                     implementation=impl,
                                     exprs=exprs,
                                     fixed_residue=Ws,
                                     variables={vorti: npts, velo: npts, _lambda: npts},
                                     method={TimeIntegrator: Euler},
                                     dt=dt, **extra_op_kwds)
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advection',
             pretty_name='Adv',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti,),
             velocity_cfl = args.cfl,
             variables = {velo: npts, vorti: npts},
             dt=dt, **extra_op_kwds)
-    
+
     #> Directional stretching + diffusion
     if (dim==3):
         stretch = DirectionalStretching(implementation=impl,
-                 name='stretch',
-                 pretty_name='stretch',
+                 name='S',
+                 pretty_name='S',
                  formulation = args.stretching_formulation,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts},
                  dt=dt, **extra_op_kwds)
     else:
         stretch = None
-    
+
 
     #> Directional splitting operator subgraph
     splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order)
     splitting.push_operators(penalization, advec, stretch)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, 
-                            variables={velo:npts, vorti: npts}, 
+    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti,
+                            variables={velo:npts, vorti: npts},
                             diffusion=mu, dt=dt,
                             projection=args.reprojection_frequency,
                             implementation=impl, **extra_op_kwds)
@@ -176,10 +176,10 @@ def compute(args):
     io_params = IOParams(filename='fields', frequency=args.dump_freq)
     dump_fields = HDF_Writer(name='dump',
                              io_params=io_params,
-                             variables={velo: npts, 
-                                        vorti: npts,    
+                             variables={velo: npts,
+                                        vorti: npts,
                                         _lambda: npts})
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -200,21 +200,21 @@ def compute(args):
     max_dt = CFL_dt
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=max_dt,
                                     name='merge_dt', pretty_name='dt', )
-    dt_cfl   = adapt_dt.push_cfl_criteria(cfl=args.cfl, 
+    dt_cfl   = adapt_dt.push_cfl_criteria(cfl=args.cfl,
                                           Fmin=min_max_U.Fmin,
                                           Fmax=min_max_U.Fmax,
-                                          equivalent_CFL=True, 
+                                          equivalent_CFL=True,
                                           name='dt_cfl', pretty_name='CFL')
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                     criteria=AdvectionCriteria.W_INF,
                                                  name='dt_lcfl', pretty_name='LCFL')
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -223,10 +223,10 @@ def compute(args):
     )
     problem = Problem(method=method)
     problem.insert(cylinder,
-                   poisson, 
-                   splitting, 
+                   poisson,
+                   splitting,
                    dump_fields,
-                   min_max_U, min_max_W, 
+                   min_max_U, min_max_W,
                    adapt_dt)
     problem.build(args)
 
@@ -234,52 +234,52 @@ def compute(args):
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
-            min_max_U.Finf, min_max_W.Finf, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
+            min_max_U.Finf, min_max_W.Finf,
             adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, viscosity and density on all topologies
     problem.initialize_field(field=velo,    formula=init_velocity)
     problem.initialize_field(field=vorti,   formula=init_vorticity)
     problem.initialize_field(field=_lambda, formula=init_lambda)
 
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class OscillatingCylinderArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'oscillating_cylinder'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Oscillating Cylinder Example: ', fg='blue', style='bold')
             description+='\n'
             description+='\nThis example focuses on a validation study for the '
             description+='penalization with a immersed moving cylinder boundary.'
-    
+
             super(OscillatingCylinderArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
                  default_dump_dir=default_dump_dir)
-        
+
         def _setup_parameters(self, args):
             super(OscillatingCylinderArgParser, self)._setup_parameters(args)
             dim = args.ndim
@@ -293,9 +293,9 @@ if __name__=='__main__':
     toi = tuple(np.linspace(0.0, 20.0, 20*24).tolist())
 
     parser.set_defaults(impl='cl', ndim=2,
-                        tstart=0.0, tend=20.1, 
+                        tstart=0.0, tend=20.1,
                         dt=1e-6, cfl=0.5, lcfl=0.95,
-                        dump_freq=0, 
+                        dump_freq=0,
                         dump_times=toi)
 
     parser.run(compute)
diff --git a/hysop_examples/examples/fixed_point/heat_equation.py b/hysop_examples/examples/fixed_point/heat_equation.py
index 141f305825a467e47a4d502225780e29a3029049..4ecd9f6998f7a596010c86507b54a9fa3838d752 100644
--- a/hysop_examples/examples/fixed_point/heat_equation.py
+++ b/hysop_examples/examples/fixed_point/heat_equation.py
@@ -60,7 +60,7 @@ def compute(args):
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
 
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
@@ -192,13 +192,13 @@ def compute(args):
     simu.write_parameters(t, fixedPoint.it_num,
                           filename='parameters.txt', precision=8)
     problem.initialize_field(u, formula=init_u)
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             checkpoint_handler=args.checkpoint_handler)
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class IMArgParser(HysopArgParser):
         def __init__(self):
diff --git a/hysop_examples/examples/flow_around_sphere/flow_around_sphere.py b/hysop_examples/examples/flow_around_sphere/flow_around_sphere.py
index d8bafa812ccc8663b25e9ab39f04cd4a2919304c..f0265b430eaecbb38cc15578920c5d151e75e42a 100644
--- a/hysop_examples/examples/flow_around_sphere/flow_around_sphere.py
+++ b/hysop_examples/examples/flow_around_sphere/flow_around_sphere.py
@@ -34,7 +34,7 @@ def compute(args):
     cfl = args.cfl
     lcfl = args.lcfl
     uinf = 1.0
-    viscosity = 1. / 250.
+    viscosity = 1.0 / 250
     outfreq = args.dump_freq
     dt0 = args.dt
 
@@ -52,18 +52,18 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
@@ -171,7 +171,7 @@ def compute(args):
         advec_dir = DirectionalAdvection(
             implementation=impl,
             name='advec',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti, ),
             velocity_cfl = args.cfl,
             variables = {velo: npts, vorti: npts},
@@ -182,8 +182,8 @@ def compute(args):
     else:
         StretchOp = StaticDirectionalStretching
     stretch = StretchOp(
-        implementation=Implementation.PYTHON if implIsFortran else impl, 
-        name='stretch',
+        implementation=Implementation.PYTHON if implIsFortran else impl,
+        name='S',
         formulation=StretchingFormulation.CONSERVATIVE,
         velocity=velo,
         vorticity=vorti,
@@ -314,13 +314,13 @@ def compute(args):
     simu.write_parameters(t, dt_cfl, dt_advec, dt, enstrophy, flowrate,
                           min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL,
                           filename='parameters.txt', precision=8)
-    
+
     problem.initialize_field(vorti, formula=computeVort)
     problem.initialize_field(velo, formula=computeVel)
     problem.initialize_field(sphere, formula=computeSphere)
 
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
@@ -330,7 +330,7 @@ def compute(args):
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
     parser = HysopArgParser(prog_name="FlowAroundSphere",
                             description="""HySoP flow around a sphere.\n""",
                             default_dump_dir='{}/hysop_examples/FlowAroundSphere'.format(
diff --git a/hysop_examples/examples/multiresolution/scalar_advection.py b/hysop_examples/examples/multiresolution/scalar_advection.py
index e4a346723b08b2d4a5fe7e8a6b2801eacfcf80a8..2bd4ad3220eb99e24789a6a70f190f4f0ca6c9f9 100644
--- a/hysop_examples/examples/multiresolution/scalar_advection.py
+++ b/hysop_examples/examples/multiresolution/scalar_advection.py
@@ -190,7 +190,7 @@ def compute(args):
 
 
 if __name__ == '__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class MultiResolutionScalarAdvectionArgParser(HysopArgParser):
         def __init__(self):
diff --git a/hysop_examples/examples/particles_above_salt/particles_above_salt_bc.py b/hysop_examples/examples/particles_above_salt/particles_above_salt_bc.py
index f9f7e3ecb677bc7fbe42631af1a474e8275ddd24..797bd31b7308071c949425e2ceb9e75a5d0660ce 100644
--- a/hysop_examples/examples/particles_above_salt/particles_above_salt_bc.py
+++ b/hysop_examples/examples/particles_above_salt/particles_above_salt_bc.py
@@ -1,6 +1,6 @@
 ## See Meiburg 2012 & 2014
 ## Sediment-laden fresh water above salt water.
-    
+
 import numpy as np
 import scipy as sp
 import sympy as sm
@@ -21,7 +21,7 @@ def delta(Ys, l0):
     for Yi in Ys:
         Y0 = Y0*Yi
     return 0.1*l0*(np.random.rand(*Y0.shape)-0.5)
-    
+
 def init_concentration(data, coords, l0, component):
     assert (component==0)
     X = coords[0]
@@ -55,7 +55,7 @@ def compute(args):
 
     from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
                               ComputeGranularity, Interpolation
-    
+
     from hysop.symbolic import sm, space_symbols, local_indices_symbols
     from hysop.symbolic.base import SymbolicTensor
     from hysop.symbolic.field import curl
@@ -82,7 +82,7 @@ def compute(args):
     npts=N[::-1]
     Xo=Xo[::-1]
     Xn=Xn[::-1]
-    
+
     lboundaries = (BoxBoundaryCondition.PERIODIC,)*(dim-1)+(BoxBoundaryCondition.SYMMETRIC,)
     rboundaries = (BoxBoundaryCondition.PERIODIC,)*(dim-1)+(BoxBoundaryCondition.SYMMETRIC,)
 
@@ -93,7 +93,7 @@ def compute(args):
 
     box = Box(origin=Xo, length=np.subtract(Xn,Xo),
                 lboundaries=lboundaries, rboundaries=rboundaries)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
@@ -107,18 +107,18 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
@@ -131,7 +131,7 @@ def compute(args):
     vorti = VorticityField(velocity=velo)
     C = Field(domain=box, name='C', dtype=args.dtype, lboundaries=C_lboundaries, rboundaries=C_rboundaries)
     S = Field(domain=box, name='S', dtype=args.dtype, lboundaries=S_lboundaries, rboundaries=S_rboundaries)
-    
+
     # Symbolic fields
     frame = velo.domain.frame
     Us    = velo.s(*frame.vars)
@@ -141,43 +141,43 @@ def compute(args):
     dts   = dt.s
 
     ### Build the directional operators
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advec',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti,S),
             velocity_cfl = args.cfl,
             variables = {velo: npts, vorti: npts, S: npts},
             dt=dt, **extra_op_kwds)
-   
+
     V0 = [0]*dim
     VP = [0]*dim
     VP[0] = Vp
     advec_C = DirectionalAdvection(implementation=impl,
             name='advec_C',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (C,),
             relative_velocity = VP,
             velocity_cfl = args.cfl,
             variables = {velo: npts, C: npts},
             dt=dt, **extra_op_kwds)
-    
+
     #> Stretch vorticity
     if (dim==3):
         stretch = DirectionalStretching(implementation=impl,
-                 name='stretch',
-                 pretty_name='stretch',
+                 name='S',
+                 pretty_name='S',
                  formulation = args.stretching_formulation,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts},
                  dt=dt, **extra_op_kwds)
     elif (dim==2):
-        stretch = None 
+        stretch = None
     else:
         msg='Unsupported dimension {}.'.format(dim)
         raise RuntimeError(msg)
-    
+
     #> Diffusion of vorticity, S and C
     diffuse_S = Diffusion(implementation=impl,
              enforce_implementation=enforce_implementation,
@@ -186,7 +186,7 @@ def compute(args):
              nu = nu_S,
              Fin = S,
              variables = {S: npts},
-             dt=dt, 
+             dt=dt,
              **extra_op_kwds)
     diffuse_C = Diffusion(implementation=impl,
              enforce_implementation=enforce_implementation,
@@ -204,27 +204,27 @@ def compute(args):
     lhs = Ws.diff(frame.time)
     rhs = curl(Fext, frame)
     exprs = Assignment.assign(lhs, rhs)
-    external_force = DirectionalSymbolic(name='Fext', 
+    external_force = DirectionalSymbolic(name='Fext',
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
                                     variables={vorti: npts,
                                                S: npts,
                                                C: npts},
                                     **extra_op_kwds)
-    
-    splitting = StrangSplitting(splitting_dim=dim, 
+
+    splitting = StrangSplitting(splitting_dim=dim,
                     order=args.strang_order)
     splitting.push_operators(advec, advec_C, stretch, external_force)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, 
+    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti,
                             variables={velo:npts, vorti: npts},
                             diffusion=nu_W, dt=dt,
                             implementation=impl,
                             enforce_implementation=enforce_implementation,
                             **extra_op_kwds)
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(name='min_max_U', field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -233,14 +233,14 @@ def compute(args):
     min_max_W = MinMaxFieldStatistics(field=vorti,
             Finf=True, implementation=impl, variables={vorti:npts},
             **extra_op_kwds)
-    
+
     #> Operators to dump all fields
     dump_fields = HDF_Writer(name='dump',
                              io_params=args.io_params.clone(filename='fields'),
                              force_backend=Backend.OPENCL,
-                             variables={velo: npts, 
+                             variables={velo: npts,
                                         vorti: npts,
-                                        C: npts, 
+                                        C: npts,
                                         S: npts},
                              **extra_op_kwds)
 
@@ -250,7 +250,7 @@ def compute(args):
     view[-1] = (-200.0,+200.0)
     view = tuple(view)
     io_params = IOParams(filename='horizontally_averaged_profiles', frequency=0)
-    compute_mean_fields = ComputeMeanField(name='mean', 
+    compute_mean_fields = ComputeMeanField(name='mean',
             fields={C: (view, axes), S: (view, axes)},
             variables={C: npts, S: npts},
             io_params=io_params)
@@ -258,23 +258,23 @@ def compute(args):
     ### Adaptive timestep operator
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True,
                                     name='merge_dt', pretty_name='dt', )
-    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, 
+    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl,
                                         Fmin=min_max_U.Fmin,
                                         Fmax=min_max_U.Fmax,
-                                        equivalent_CFL=True, 
+                                        equivalent_CFL=True,
                                         relative_velocities=[V0, VP],
                                         name='dt_cfl', pretty_name='CFL')
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                  criteria=AdvectionCriteria.W_INF,
                                                  name='dt_lcfl', pretty_name='LCFL')
 
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -283,64 +283,64 @@ def compute(args):
         )
 
     problem = Problem(method=method)
-    problem.insert(poisson, 
+    problem.insert(poisson,
                    diffuse_S, diffuse_C,
-                   splitting, 
+                   splitting,
                    dump_fields,
                    compute_mean_fields,
-                   min_max_U, min_max_W, 
+                   min_max_U, min_max_W,
                    adapt_dt)
     problem.build(args)
-    
+
     # If a visu_rank was provided, and show_graph was set,
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
             min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, S and C on all topologies
     problem.initialize_field(field=velo,  formula=init_velocity)
     problem.initialize_field(field=vorti, formula=init_vorticity)
     problem.initialize_field(field=C,     formula=init_concentration, l0=l0)
     problem.initialize_field(field=S,     formula=init_salinity, l0=l0)
-    
+
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class ParticleAboveSaltArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'particle_above_salt_bc'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Particles Above Salt Example: ', fg='blue',
                     style='bold')
             description+=colors.color('[Meiburg 2014]', fg='yellow', style='bold')
-            description+=colors.color('\nSediment-laden fresh water above salt water.', 
+            description+=colors.color('\nSediment-laden fresh water above salt water.',
                     fg='yellow')
             description+='\n'
             description+='\nThis example focuses on a validation study for the '
             description+='hybrid particle-mesh vortex method in the Boussinesq approximation.'
-    
+
             super(ParticleAboveSaltArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
@@ -357,10 +357,10 @@ if __name__=='__main__':
     parser = ParticleAboveSaltArgParser()
 
     parser.set_defaults(impl='cl', ndim=2, npts=(64,),
-                        box_origin=(0.0,), box_length=(1.0,), 
-                        tstart=0.0, tend=500.0, 
+                        box_origin=(0.0,), box_length=(1.0,),
+                        tstart=0.0, tend=500.0,
                         dt=1e-6, cfl=4.00, lcfl=0.95,
-                        dump_times=tuple(float(x) for x in range(0,500,10)),
+                        dump_times=tuple(float(x) for x in range(0, 500, 10)),
                         dump_freq=0)
 
     parser.run(compute)
diff --git a/hysop_examples/examples/particles_above_salt/particles_above_salt_bc_3d.py b/hysop_examples/examples/particles_above_salt/particles_above_salt_bc_3d.py
index ef9e427ea2912388e15b7768c84c7ff584fef6ae..2de40f948b438f733fb3ac1447ff4115d6311964 100644
--- a/hysop_examples/examples/particles_above_salt/particles_above_salt_bc_3d.py
+++ b/hysop_examples/examples/particles_above_salt/particles_above_salt_bc_3d.py
@@ -1,6 +1,6 @@
 ## See Meiburg 2012 & 2014
 ## Sediment-laden fresh water above salt water.
-    
+
 import numpy as np
 import scipy as sp
 import sympy as sm
@@ -25,7 +25,7 @@ def delta(Ys, l0):
     for Yi in Ys:
         Y0 = Y0*Yi
     return 0.1*l0*(np.random.rand(*Y0.shape)-0.5)
-    
+
 def init_concentration(data, coords, l0, component):
     assert (component==0)
     X = coords[0]
@@ -59,7 +59,7 @@ def compute(args):
 
     from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
                               ComputeGranularity, Interpolation
-    
+
     from hysop.symbolic import sm, space_symbols, local_indices_symbols
     from hysop.symbolic.base import SymbolicTensor
     from hysop.symbolic.field import curl
@@ -67,7 +67,7 @@ def compute(args):
     from hysop.symbolic.misc import Select
     from hysop.symbolic.tmp import TmpScalar
     from hysop.tools.string_utils import framed_str
-    
+
     ## IO paths
     spectral_path = IO.default_path() + '/spectral'
     dump_energy_ioparams=IOParams(filepath=spectral_path, filename='E_{fname}', frequency=args.dump_freq)
@@ -83,8 +83,8 @@ def compute(args):
         Rs  = args.Rs
         n = args.npts[0]
         N = (3*n+1, n, n)
-        print 'Example configuration is Sc={}, Tau={}, Vp={}, Rs={}, n={}, N={}'.format(Sc, tau, Vp, Rs, n, N)
-        print
+        vprint('Example configuration is Sc={}, Tau={}, Vp={}, Rs={}, n={}, N={}'.format(Sc, tau, Vp, Rs, n, N))
+        vprint()
     else:
         raise NotImplementedError
 
@@ -96,7 +96,7 @@ def compute(args):
     npts=N[::-1]
     Xo=Xo[::-1]
     Xn=Xn[::-1]
-    
+
     lboundaries = (BoxBoundaryCondition.PERIODIC,)*(dim-1)+(BoxBoundaryCondition.SYMMETRIC,)
     rboundaries = (BoxBoundaryCondition.PERIODIC,)*(dim-1)+(BoxBoundaryCondition.SYMMETRIC,)
 
@@ -107,7 +107,7 @@ def compute(args):
 
     box = Box(origin=Xo, length=np.subtract(Xn,Xo),
                 lboundaries=lboundaries, rboundaries=rboundaries)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
@@ -122,17 +122,17 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env
-        cl_env = get_or_create_opencl_env(mpi_params=mpi_params, 
-                                          platform_id=args.cl_platform_id, 
+        cl_env = get_or_create_opencl_env(mpi_params=mpi_params,
+                                          platform_id=args.cl_platform_id,
                                           device_id=args.cl_device_id)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
@@ -145,7 +145,7 @@ def compute(args):
     vorti = VorticityField(velocity=velo)
     C = Field(domain=box, name='C', dtype=args.dtype, lboundaries=C_lboundaries, rboundaries=C_rboundaries)
     S = Field(domain=box, name='S', dtype=args.dtype, lboundaries=S_lboundaries, rboundaries=S_rboundaries)
-    
+
     # Symbolic fields
     frame = velo.domain.frame
     Us    = velo.s(*frame.vars)
@@ -155,43 +155,43 @@ def compute(args):
     dts   = dt.s
 
     ### Build the directional operators
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advec',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti,S),
             velocity_cfl = args.cfl,
             variables = {velo: npts, vorti: npts, S: npts},
             dt=dt, **extra_op_kwds)
-   
+
     V0 = [0]*dim
     VP = [0]*dim
     VP[0] = Vp
     advec_C = DirectionalAdvection(implementation=impl,
             name='advec_C',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (C,),
             relative_velocity = VP,
             velocity_cfl = args.cfl,
             variables = {velo: npts, C: npts},
             dt=dt, **extra_op_kwds)
-    
+
     #> Stretch vorticity
     if (dim==3):
         stretch = DirectionalStretching(implementation=impl,
-                 name='stretch',
-                 pretty_name='stretch',
+                 name='S',
+                 pretty_name='S',
                  formulation = args.stretching_formulation,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts},
                  dt=dt, **extra_op_kwds)
     elif (dim==2):
-        stretch = None 
+        stretch = None
     else:
         msg='Unsupported dimension {}.'.format(dim)
         raise RuntimeError(msg)
-    
+
     #> Diffusion of vorticity, S and C
     diffuse_S = Diffusion(implementation=impl,
              enforce_implementation=enforce_implementation,
@@ -200,9 +200,9 @@ def compute(args):
              nu = nu_S,
              Fin = S,
              variables = {S: npts},
-             dt=dt, 
+             dt=dt,
              dump_energy=dump_energy_ioparams,
-             plot_inout_energy=IOParams(filepath=spectral_path, 
+             plot_inout_energy=IOParams(filepath=spectral_path,
                 filename='E_S_diffusion_{ite}', frequency=args.dump_freq),
              **extra_op_kwds)
     diffuse_C = Diffusion(implementation=impl,
@@ -214,7 +214,7 @@ def compute(args):
              variables = {C: npts},
              dt=dt,
              dump_energy=dump_energy_ioparams,
-             plot_inout_energy=IOParams(filepath=spectral_path, 
+             plot_inout_energy=IOParams(filepath=spectral_path,
                 filename='E_C_diffusion_{ite}', frequency=args.dump_freq),
              **extra_op_kwds)
 
@@ -225,33 +225,33 @@ def compute(args):
     lhs = Ws.diff(frame.time)
     rhs = curl(Fext, frame)
     exprs = Assignment.assign(lhs, rhs)
-    external_force = DirectionalSymbolic(name='Fext', 
+    external_force = DirectionalSymbolic(name='Fext',
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
                                     variables={vorti: npts,
                                                S: npts,
                                                C: npts},
                                     **extra_op_kwds)
-    
-    splitting = StrangSplitting(splitting_dim=dim, 
+
+    splitting = StrangSplitting(splitting_dim=dim,
                     order=args.strang_order)
     splitting.push_operators(advec, advec_C, stretch, external_force)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
     poisson = PoissonCurl(name='poisson',
-                          velocity=velo, vorticity=vorti, 
+                          velocity=velo, vorticity=vorti,
                           variables={velo:npts, vorti: npts},
                           diffusion=nu_W, dt=dt,
-                          implementation=impl, 
+                          implementation=impl,
                           enforce_implementation=enforce_implementation,
                           dump_energy=dump_energy_ioparams,
-                          plot_velocity_energy=IOParams(filepath=spectral_path, 
+                          plot_velocity_energy=IOParams(filepath=spectral_path,
                             filename='E_velocity_{ite}', frequency=args.dump_freq),
-                          plot_inout_vorticity_energy=IOParams(filepath=spectral_path, 
+                          plot_inout_vorticity_energy=IOParams(filepath=spectral_path,
                             filename='E_vorticity_{ite}', frequency=args.dump_freq),
                           **extra_op_kwds)
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(name='min_max_U', field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -260,15 +260,15 @@ def compute(args):
     min_max_W = MinMaxFieldStatistics(field=vorti,
             Finf=True, implementation=impl, variables={vorti:npts},
             **extra_op_kwds)
-    
+
     #> Operators to dump all fields
     io_params = IOParams(filename='fields', frequency=args.dump_freq)
     dump_fields = HDF_Writer(name='dump',
                              io_params=io_params,
                              force_backend=Backend.OPENCL,
                              variables={vorti: npts,
-                                        velo: npts, 
-                                        C: npts, 
+                                        velo: npts,
+                                        C: npts,
                                         S: npts},
                              **extra_op_kwds)
 
@@ -276,23 +276,23 @@ def compute(args):
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True,
                                     name='merge_dt', pretty_name='dt',
                                     max_dt=1.0)
-    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, 
+    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl,
                                         Fmin=min_max_U.Fmin,
                                         Fmax=min_max_U.Fmax,
-                                        equivalent_CFL=True, 
+                                        equivalent_CFL=True,
                                         relative_velocities=[V0, VP],
                                         name='dt_cfl', pretty_name='CFL')
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                  criteria=AdvectionCriteria.W_INF,
                                                  name='dt_lcfl', pretty_name='LCFL')
 
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -301,68 +301,68 @@ def compute(args):
         )
 
     problem = Problem(method=method)
-    problem.insert(poisson, 
+    problem.insert(poisson,
                    diffuse_S, diffuse_C,
                    # dump_fields,
-                   splitting, 
+                   splitting,
                    min_max_U, min_max_W, adapt_dt)
     problem.build(args)
-    
+
     # If a visu_rank was provided, and show_graph was set,
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
             min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, S and C on all topologies
     problem.initialize_field(field=velo,  formula=init_velocity)
     problem.initialize_field(field=vorti, formula=init_vorticity)
     problem.initialize_field(field=C,     formula=init_concentration, l0=l0)
     problem.initialize_field(field=S,     formula=init_salinity, l0=l0)
-    
+
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class ParticleAboveSaltArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'particle_above_salt_bc_3d'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Particles Above Salt Example: ', fg='blue',
                     style='bold')
             description+=colors.color('[Meiburg 2014]', fg='yellow', style='bold')
-            description+=colors.color('\nSediment-laden fresh water above salt water.', 
+            description+=colors.color('\nSediment-laden fresh water above salt water.',
                     fg='yellow')
             description+='\n'
             description+='\nThis example focuses on a validation study for the '
             description+='hybrid particle-mesh vortex method in the Boussinesq approximation.'
-    
+
             super(ParticleAboveSaltArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
                  default_dump_dir=default_dump_dir)
-        
+
         def _add_main_args(self):
             args = super(ParticleAboveSaltArgParser, self)._add_main_args()
             args.add_argument('-Sc', '--schmidt', type=float,
@@ -378,7 +378,7 @@ if __name__=='__main__':
                                 dest='Rs',
                                 help='Density expension factor.')
             return args
-        
+
         def _check_main_args(self, args):
             super(ParticleAboveSaltArgParser, self)._check_main_args(args)
             self._check_default(args, ('schmidt', 'tau', 'Vp', 'Rs'), float, allow_none=False)
@@ -395,11 +395,11 @@ if __name__=='__main__':
     parser = ParticleAboveSaltArgParser()
 
     parser.set_defaults(impl='cl', ndim=3, npts=(64,),
-                        box_origin=(0.0,), box_length=(1.0,), 
-                        tstart=0.0, tend=201.0, 
+                        box_origin=(0.0,), box_length=(1.0,),
+                        tstart=0.0, tend=201.0,
                         dt=1e-6, cfl=12.00, lcfl=0.95,
                         dump_times=(25.0, 50.0, 75.0, 100.0, 125.0, 150.0, 175.0, 200.0),
-                        dump_freq=0, 
+                        dump_freq=0,
                         schmidt=7.0, tau=25.0, Vp=0.04, Rs=2.0)
 
     parser.run(compute)
diff --git a/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py b/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py
index c8aa06b375a4d4597f2b099461dec589ee0aa7ef..de6030d3c64a6d32445bf8c386ad25f52540b924 100644
--- a/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py
+++ b/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py
@@ -1,6 +1,6 @@
 ## See Meiburg 2012 & 2014
 ## Sediment-laden fresh water above salt water.
-    
+
 import numpy as np
 import scipy as sp
 import sympy as sm
@@ -21,7 +21,7 @@ def delta(Ys, l0):
     for Yi in Ys:
         Y0 = Y0*Yi
     return 0.1*l0*(np.random.rand(*Y0.shape)-0.5)
-    
+
 def init_concentration(data, coords, l0, component):
     assert (component==0)
     X = coords[-1]
@@ -60,7 +60,7 @@ def compute(args):
 
     from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
                               ComputeGranularity, Interpolation
-    
+
     from hysop.symbolic import sm, space_symbols, local_indices_symbols
     from hysop.symbolic.base import SymbolicTensor
     from hysop.symbolic.field import curl
@@ -80,7 +80,7 @@ def compute(args):
     dim = args.ndim
     npts = args.npts
     box = Box(origin=Xo, length=np.subtract(Xn,Xo))
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
@@ -93,17 +93,17 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env
-        cl_env = get_or_create_opencl_env(mpi_params=mpi_params, 
-                                          platform_id=args.cl_platform_id, 
+        cl_env = get_or_create_opencl_env(mpi_params=mpi_params,
+                                          platform_id=args.cl_platform_id,
                                           device_id=args.cl_device_id)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
@@ -117,7 +117,7 @@ def compute(args):
     C = Field(domain=box, name='C', dtype=args.dtype)
     S = Field(domain=box, name='S', dtype=args.dtype)
     _lambda = PenalizationField(domain=box, dtype=args.dtype)
-    
+
     # Symbolic fields
     frame = velo.domain.frame
     Us    = velo.s(*frame.vars)
@@ -134,33 +134,33 @@ def compute(args):
     lhs = Ws
     rhs = curl(penalization, frame)
     exprs = Assignment.assign(lhs, rhs)
-    penalization = DirectionalSymbolic(name='penalization', 
+    penalization = DirectionalSymbolic(name='penalization',
                                     implementation=impl,
                                     exprs=exprs,
                                     fixed_residue=Ws,
                                     variables={vorti: npts, velo: npts, _lambda: npts},
                                     **extra_op_kwds)
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advec',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti,S),
             velocity_cfl = args.cfl,
             variables = {velo: npts, vorti: npts, S: npts},
             dt=dt, **extra_op_kwds)
-   
+
     V0 = [0]*dim
     VP = [0]*dim
     VP[-1] = Vp
     advec_C = DirectionalAdvection(implementation=impl,
             name='advec_C',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (C,),
             relative_velocity = VP,
             velocity_cfl = args.cfl,
             variables = {velo: npts, C: npts},
             dt=dt, **extra_op_kwds)
-    
+
     #> Stretch and diffuse vorticity
     if (dim==3):
         stretch_diffuse = DirectionalStretchingDiffusion(implementation=impl,
@@ -168,14 +168,14 @@ def compute(args):
                  pretty_name='sdiff',
                  formulation = args.stretching_formulation,
                  viscosity = 1.0,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts},
                  dt=dt, **extra_op_kwds)
     elif (dim==2):
         stretch_diffuse = DirectionalDiffusion(implementation=impl,
                  name='diffuse_{}'.format(vorti.name),
-                 pretty_name=u'diff{}'.format(vorti.pretty_name.decode('utf-8')),
+                 pretty_name='diff{}'.format(vorti.pretty_name),
                  coeffs = 1.0,
                  fields  = vorti,
                  variables = {vorti: npts},
@@ -183,7 +183,7 @@ def compute(args):
     else:
         msg='Unsupported dimension {}.'.format(dim)
         raise RuntimeError(msg)
-    
+
     #> Diffusion of S and C
     diffuse_S = DirectionalDiffusion(implementation=impl,
              name='diffuse_S',
@@ -207,25 +207,25 @@ def compute(args):
     lhs = Ws.diff(frame.time)
     rhs = curl(Fext, frame)
     exprs = Assignment.assign(lhs, rhs)
-    external_force = DirectionalSymbolic(name='Fext', 
+    external_force = DirectionalSymbolic(name='Fext',
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
                                     variables={vorti: npts,
                                                S: npts,
                                                C: npts},
                                     **extra_op_kwds)
-    
-    splitting = StrangSplitting(splitting_dim=dim, 
+
+    splitting = StrangSplitting(splitting_dim=dim,
                     order=args.strang_order)
-    splitting.push_operators(penalization, advec, advec_C, stretch_diffuse, 
+    splitting.push_operators(penalization, advec, advec_C, stretch_diffuse,
                                 diffuse_S, diffuse_C, external_force)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, 
+    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti,
                             variables={velo:npts, vorti: npts},
                             implementation=impl, **extra_op_kwds)
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(name='min_max_U', field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -234,14 +234,14 @@ def compute(args):
     min_max_W = MinMaxFieldStatistics(field=vorti,
             Finf=True, implementation=impl, variables={vorti:npts},
             **extra_op_kwds)
-    
+
     #> Operators to dump all fields
     io_params = IOParams(filename='fields', frequency=args.dump_freq)
     dump_fields = HDF_Writer(name='dump',
                              io_params=io_params,
-                             variables={velo: npts, 
-                                        vorti: npts,    
-                                        C: npts, 
+                             variables={velo: npts,
+                                        vorti: npts,
+                                        C: npts,
                                         S: npts,
                                         _lambda: npts})
 
@@ -251,7 +251,7 @@ def compute(args):
     view[0] = (-200.0,+200.0)
     view = tuple(view)
     io_params = IOParams(filename='horizontally_averaged_profiles', frequency=0)
-    compute_mean_fields = ComputeMeanField(name='mean', 
+    compute_mean_fields = ComputeMeanField(name='mean',
             fields={C: (view, axes), S: (view, axes)},
             variables={C: npts, S: npts},
             io_params=io_params)
@@ -269,23 +269,23 @@ def compute(args):
 
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=max_dt,
                                     name='merge_dt', pretty_name='dt', )
-    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, 
+    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl,
                                         Fmin=min_max_U.Fmin,
                                         Fmax=min_max_U.Fmax,
-                                        equivalent_CFL=True, 
+                                        equivalent_CFL=True,
                                         relative_velocities=[V0, VP],
                                         name='dt_cfl', pretty_name='CFL')
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                  criteria=AdvectionCriteria.W_INF,
                                                  name='dt_lcfl', pretty_name='LCFL')
 
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -294,65 +294,65 @@ def compute(args):
         )
 
     problem = Problem(method=method)
-    problem.insert(poisson, 
-                   splitting, 
+    problem.insert(poisson,
+                   splitting,
                    dump_fields,
                    compute_mean_fields,
-                   min_max_U, min_max_W, 
+                   min_max_U, min_max_W,
                    adapt_dt)
     problem.build(args)
-    
+
     # If a visu_rank was provided, and show_graph was set,
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
             min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, S and C on all topologies
     problem.initialize_field(field=velo,  formula=init_velocity)
     problem.initialize_field(field=vorti, formula=init_vorticity)
     problem.initialize_field(field=C,     formula=init_concentration, l0=l0)
     problem.initialize_field(field=S,     formula=init_salinity, l0=l0)
     problem.initialize_field(field=_lambda, formula=init_lambda)
-    
+
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class ParticleAboveSaltArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'particle_above_salt_periodic'
-            default_dump_dir = '{}/hysop_examples/periodic_{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/periodic_{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Particles Above Salt Example: ', fg='blue',
                     style='bold')
             description+=colors.color('[Meiburg 2014]', fg='yellow', style='bold')
-            description+=colors.color('\nSediment-laden fresh water above salt water.', 
+            description+=colors.color('\nSediment-laden fresh water above salt water.',
                     fg='yellow')
             description+='\n'
             description+='\nThis example focuses on a validation study for the '
             description+='hybrid particle-mesh vortex method in the Boussinesq approximation.'
-    
+
             super(ParticleAboveSaltArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
@@ -369,10 +369,10 @@ if __name__=='__main__':
     parser = ParticleAboveSaltArgParser()
 
     parser.set_defaults(impl='cl', ndim=2, npts=(64,),
-                        box_origin=(0.0,), box_length=(1.0,), 
-                        tstart=0.0, tend=500.0, 
+                        box_origin=(0.0,), box_length=(1.0,),
+                        tstart=0.0, tend=500.0,
                         dt=1e-6, cfl=0.5, lcfl=0.125,
-                        dump_times=tuple(float(x) for x in range(0,500,5)),
+                        dump_times=tuple(float(x) for x in range(0, 500, 5)),
                         dump_freq=0)
 
     parser.run(compute)
diff --git a/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py b/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py
index ba2a1ada70910b502b34b329f68a0bbd49f9f4ff..1ab89055fdf1c33632fe644c2b76bc9a6926fc48 100644
--- a/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py
+++ b/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py
@@ -1,6 +1,6 @@
 ## See Meiburg 2012 & 2014
 ## Sediment-laden fresh water above salt water.
-    
+
 import numpy as np
 import scipy as sp
 import sympy as sm
@@ -21,7 +21,7 @@ def delta(Ys, l0):
     for Yi in Ys:
         Y0 = Y0*Yi
     return 0.1*l0*(np.random.rand(*Y0.shape)-0.5)
-    
+
 def init_concentration(data, coords, component, l0):
     assert (component==0)
     X = coords[-1].copy()
@@ -55,7 +55,7 @@ def compute(args):
 
     from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
                               ComputeGranularity, Interpolation
-    
+
     from hysop.symbolic import sm, space_symbols, local_indices_symbols
     from hysop.symbolic.base import SymbolicTensor
     from hysop.symbolic.field import curl
@@ -71,7 +71,7 @@ def compute(args):
 
     nu_S = ScalarParameter(name='nu_S', dtype=args.dtype, const=True, initial_value=1.0/Sc)
     nu_C = ScalarParameter(name='nu_C', dtype=args.dtype, const=True, initial_value=1.0/(tau*Sc))
-    nu_W = ScalarParameter(name='nu_W', dtype=args.dtype, cosnt=True, initial_value=1.0)
+    nu_W = ScalarParameter(name='nu_W', dtype=args.dtype, const=True, initial_value=1.0)
 
     # Define the domain
     dim = args.ndim
@@ -79,7 +79,7 @@ def compute(args):
     Xo = (0,0)
     Xn = (2400,750)
     box = Box(origin=Xo, length=np.subtract(Xn,Xo))
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
@@ -92,19 +92,19 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env
-        cl_env = get_or_create_opencl_env(mpi_params=mpi_params, 
-                                          platform_id=args.cl_platform_id, 
+        cl_env = get_or_create_opencl_env(mpi_params=mpi_params,
+                                          platform_id=args.cl_platform_id,
                                           device_id=args.cl_device_id)
 
         tg = cl_env.build_typegen(args.dtype, 'dec', False, False)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
@@ -117,7 +117,7 @@ def compute(args):
     vorti = VorticityField(velocity=velo)
     C = Field(domain=box, name='C', dtype=args.dtype)
     S = Field(domain=box, name='S', dtype=args.dtype)
-    
+
     # Symbolic fields
     frame = velo.domain.frame
     Us    = velo.s(*frame.vars)
@@ -127,22 +127,22 @@ def compute(args):
     dts   = dt.s
 
     ### Build the directional operators
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advec',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti,S),
             velocity_cfl = args.cfl,
             variables = {velo: npts, vorti: npts, S: npts},
             dt=dt, **extra_op_kwds)
-   
+
     # mirror sediment settling speed at Y=1200 (Y in [0..2400])
     VP = [0]*dim
     VP[-1] = 'select({}, {}, ({})(X<{}))'.format(tg.dump(+Vp),
                                              tg.dump(-Vp),
                                              'int' if tg.fbtype=='float' else 'long',
                                              tg.dump(1200.0))
-    
+
     V0  = [0]*dim
     pVP = [0]*dim
     mVP = [0]*dim
@@ -151,33 +151,33 @@ def compute(args):
 
     advec_C = DirectionalAdvection(implementation=impl,
             name='advec_C',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (C,),
             relative_velocity = VP,
             velocity_cfl = args.cfl,
             variables = {velo: npts, C: npts},
             dt=dt, **extra_op_kwds)
-    
+
     #> Stretch vorticity
     if (dim==3):
         stretch = DirectionalStretching(implementation=impl,
-                 name='stretch',
-                 pretty_name='stretch',
+                 name='S',
+                 pretty_name='S',
                  formulation = args.stretching_formulation,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts},
                  dt=dt, **extra_op_kwds)
     elif (dim==2):
-        stretch = None 
+        stretch = None
     else:
         msg='Unsupported dimension {}.'.format(dim)
         raise RuntimeError(msg)
-    
+
     #> Diffusion of vorticity, S and C
     diffuse_W = Diffusion(implementation=impl,
              name='diffuse_{}'.format(vorti.name),
-             pretty_name=u'diff{}'.format(vorti.pretty_name.decode('utf-8')),
+             pretty_name='diff{}'.format(vorti.pretty_name),
              nu = nu_W,
              Fin = vorti,
              variables = {vorti: npts},
@@ -206,7 +206,7 @@ def compute(args):
     lhs = Ws.diff(frame.time)
     rhs = curl(Fext, frame)
     exprs = Assignment.assign(lhs, rhs)
-    external_force = DirectionalSymbolic(name='Fext', 
+    external_force = DirectionalSymbolic(name='Fext',
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
                                     force_residue=0,
@@ -214,16 +214,16 @@ def compute(args):
                                                S: npts,
                                                C: npts},
                                     **extra_op_kwds)
-    
-    splitting = StrangSplitting(splitting_dim=dim, 
+
+    splitting = StrangSplitting(splitting_dim=dim,
                     order=args.strang_order)
     splitting.push_operators(advec, advec_C, stretch, external_force)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, 
+    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti,
                             variables={velo:npts, vorti: npts})
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(name='min_max_U', field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -232,14 +232,14 @@ def compute(args):
     min_max_W = MinMaxFieldStatistics(field=vorti,
             Finf=True, implementation=impl, variables={vorti:npts},
             **extra_op_kwds)
-    
+
     #> Operators to dump all fields
     io_params = IOParams(filename='fields', frequency=args.dump_freq)
     dump_fields = HDF_Writer(name='dump',
                              io_params=io_params,
-                             variables={velo: npts, 
-                                        vorti: npts,    
-                                        C: npts, 
+                             variables={velo: npts,
+                                        vorti: npts,
+                                        C: npts,
                                         S: npts})
 
     #> Operator to compute and save mean fields
@@ -248,7 +248,7 @@ def compute(args):
     view[0] = (+400.0,+800.0)
     view = tuple(view)
     io_params = IOParams(filename='horizontally_averaged_profiles', frequency=0)
-    compute_mean_fields = ComputeMeanField(name='mean', 
+    compute_mean_fields = ComputeMeanField(name='mean',
             fields={C: (view, axes), S: (view, axes)},
             variables={C: npts, S: npts},
             io_params=io_params)
@@ -256,23 +256,23 @@ def compute(args):
     ### Adaptive timestep operator
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=1.0,
                                     name='merge_dt', pretty_name='dt', )
-    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, 
+    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl,
                                         Fmin=min_max_U.Fmin,
                                         Fmax=min_max_U.Fmax,
-                                        equivalent_CFL=True, 
+                                        equivalent_CFL=True,
                                         relative_velocities=[V0, pVP, mVP],
                                         name='dt_cfl', pretty_name='CFL')
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                  criteria=AdvectionCriteria.W_INF,
                                                  name='dt_lcfl', pretty_name='LCFL')
 
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -281,65 +281,65 @@ def compute(args):
         )
 
     problem = Problem(method=method)
-    problem.insert(poisson, 
+    problem.insert(poisson,
                    diffuse_W, diffuse_S, diffuse_C,
-                   splitting, 
+                   splitting,
                    dump_fields,
                    compute_mean_fields,
-                   min_max_U, min_max_W, 
+                   min_max_U, min_max_W,
                    adapt_dt)
     problem.build(args)
-    
+
     # If a visu_rank was provided, and show_graph was set,
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
             min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, S and C on all topologies
     problem.initialize_field(field=velo,  formula=init_velocity)
     problem.initialize_field(field=vorti, formula=init_vorticity)
     problem.initialize_field(field=C,     formula=init_concentration, l0=l0)
     problem.initialize_field(field=S,     formula=init_salinity, l0=l0)
-    
+
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class ParticleAboveSaltArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'particle_above_salt_symmetrized'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Particles Above Salt Example: ', fg='blue',
                     style='bold')
             description+=colors.color('[Meiburg 2014]', fg='yellow', style='bold')
-            description+=colors.color('\nSediment-laden fresh water above salt water.', 
+            description+=colors.color('\nSediment-laden fresh water above salt water.',
                     fg='yellow')
             description+='\n'
             description+='\nThis example focuses on a validation study for the '
             description+='hybrid particle-mesh vortex method in the Boussinesq approximation.'
-    
+
             super(ParticleAboveSaltArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
@@ -356,10 +356,10 @@ if __name__=='__main__':
     parser = ParticleAboveSaltArgParser()
 
     parser.set_defaults(impl='cl', ndim=2, npts=(64,),
-                        box_origin=(0.0,), box_length=(1.0,), 
-                        tstart=0.0, tend=500.0, 
+                        box_origin=(0.0,), box_length=(1.0,),
+                        tstart=0.0, tend=500.0,
                         dt=1e-6, cfl=0.5, lcfl=0.125,
-                        dump_times=tuple(float(x) for x in range(0,500,5)),
+                        dump_times=tuple(float(x) for x in range(0, 500, 5)),
                         dump_freq=0)
 
     parser.run(compute)
diff --git a/hysop_examples/examples/scalar_advection/levelset.py b/hysop_examples/examples/scalar_advection/levelset.py
index 1c33c6a076ccdd73579ea18c6dd2cbabfd4fed70..b8fb77a83317225eeefaa8295706426a88b335cc 100644
--- a/hysop_examples/examples/scalar_advection/levelset.py
+++ b/hysop_examples/examples/scalar_advection/levelset.py
@@ -56,7 +56,7 @@ def compute(args):
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
         extra_op_kwds['cl_env'] = cl_env
         method[OpenClKernelConfig] = args.opencl_kernel_config
@@ -211,7 +211,7 @@ def compute(args):
     problem.initialize_field(scalar, formula=init_scalar)
 
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
@@ -219,7 +219,7 @@ def compute(args):
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class LevelsetArgParser(HysopArgParser):
         def __init__(self):
@@ -233,7 +233,7 @@ if __name__=='__main__':
                  prog_name=prog_name,
                  description=description,
                  default_dump_dir=default_dump_dir)
-        
+
         def _add_main_args(self):
             args = super(LevelsetArgParser, self)._add_main_args()
             args.add_argument('-b', '--bench', action='store_true',
diff --git a/hysop_examples/examples/scalar_advection/scalar_advection.py b/hysop_examples/examples/scalar_advection/scalar_advection.py
index 007a524d0941362f8963ccbd3664d5dc9a8b75f0..d3553d84796a6cc260edf58874a5d12309f31001 100644
--- a/hysop_examples/examples/scalar_advection/scalar_advection.py
+++ b/hysop_examples/examples/scalar_advection/scalar_advection.py
@@ -25,43 +25,43 @@ def compute(args):
     dim  = args.ndim
     npts = args.npts
     box  = Box(origin=args.box_origin, length=args.box_length, dim=dim)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
-    
+
     # Define parameters and field (time and analytic field)
     dt     = ScalarParameter('dt', dtype=args.dtype)
     velo   = Field(domain=box, name='V', is_vector=True,  dtype=args.dtype)
     scalar = Field(domain=box, name='S', nb_components=1, dtype=args.dtype)
-    
+
     # Setup operator method dictionnary
     # Advection-Remesh operator discretization parameters
-    method = { 
+    method = {
                ComputeGranularity:  args.compute_granularity,
                TimeIntegrator:      args.time_integrator,
                Remesh:              args.remesh_kernel,
     }
-    
+
     # Setup implementation specific variables
     impl = args.impl
     extra_op_kwds = { 'mpi_params': mpi_params }
     if (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning method
         # (already done by HysopArgParser for simplicity)
         from hysop.methods import OpenClKernelConfig
         method[OpenClKernelConfig] = args.opencl_kernel_config
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     elif (impl in  (Implementation.PYTHON, Implementation.FORTRAN)):
@@ -69,20 +69,20 @@ def compute(args):
     else:
         msg='Unknown implementation \'{}\'.'.format(impl)
         raise ValueError(msg)
-    
+
     # Create the problem we want to solve
     problem = Problem(method=method)
-    
+
     if (impl is Implementation.FORTRAN):
         # The fortran scales implementation is a special case.
         # Here directional advection is a black box.
         advec = Advection(implementation=impl,
                 name='advec',
-                velocity = velo,       
+                velocity = velo,
                 advected_fields = (scalar,),
                 variables = {velo: npts, scalar: npts},
                 dt = dt, **extra_op_kwds)
-        
+
         # Finally insert our advection into the problem
         problem.insert(advec)
     else:
@@ -90,22 +90,22 @@ def compute(args):
         # here the cfl determines the maximum number of ghosts
         advec = DirectionalAdvection(implementation=impl,
                 name='advec',
-                velocity = velo,       
+                velocity = velo,
                 velocity_cfl = args.cfl,
                 advected_fields = (scalar,),
                 variables = {velo: npts, scalar: npts},
                 dt = dt, **extra_op_kwds)
 
         # Build the directional splitting operator graph
-        splitting = StrangSplitting(splitting_dim=dim, 
+        splitting = StrangSplitting(splitting_dim=dim,
                         order=args.strang_order)
         splitting.push_operators(advec)
 
         # Finally insert our splitted advection into the problem
         problem.insert(splitting)
-    
+
     # Add a writer of input field at given frequency.
-    problem.dump_inputs(fields=scalar, 
+    problem.dump_inputs(fields=scalar,
             io_params=args.io_params.clone(filename='S0'), **extra_op_kwds)
     problem.build(args)
 
@@ -113,11 +113,11 @@ def compute(args):
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Initialize discrete velocity and scalar field
     problem.initialize_field(velo,   formula=init_velocity)
     problem.initialize_field(scalar, formula=init_scalar)
-    
+
     # Determine a timestep using the supplied CFL
     # (velocity is constant for the whole simulation)
     dx   = problem.get_input_discrete_field(scalar).space_step.min()
@@ -126,27 +126,27 @@ def compute(args):
     if (args.dt is not None):
         dt0 = min(args.dt, dt0)
     dt0  = 0.99*dt0
-    
-    # Create a simulation and solve the problem 
+
+    # Create a simulation and solve the problem
     # (do not forget to specify the dt parameter here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       times_of_interest=args.times_of_interest,
                       dt=dt, dt0=dt0)
-    
+
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class ScalarAdvectionArgParser(HysopArgParser):
         def __init__(self):
@@ -157,12 +157,12 @@ if __name__=='__main__':
             description+='Advect a scalar by a given constant velocity. '
             description+='\n\nThe advection operator is directionally splitted resulting '
             description+='in the use of one or more advection-remesh operators per direction.'
-    
+
             super(ScalarAdvectionArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
                  default_dump_dir=default_dump_dir)
-        
+
         def _add_main_args(self):
             args = super(ScalarAdvectionArgParser, self)._add_main_args()
             args.add_argument('-vel', '--velocity', type=str,
@@ -170,11 +170,11 @@ if __name__=='__main__':
                                 dest='velocity',
                                 help='Velocity components.')
             return args
-        
+
         def _check_main_args(self, args):
             super(ScalarAdvectionArgParser, self)._check_main_args(args)
             self._check_default(args, 'velocity', tuple, allow_none=False)
-    
+
         def _setup_parameters(self, args):
             super(ScalarAdvectionArgParser, self)._setup_parameters(args)
             if len(args.velocity) == 1:
@@ -182,7 +182,7 @@ if __name__=='__main__':
 
     parser = ScalarAdvectionArgParser()
 
-    parser.set_defaults(box_origin=(0.0,), box_length=(2*np.pi,), 
+    parser.set_defaults(box_origin=(0.0,), box_length=(2*np.pi,),
                        tstart=0.0, tend=2*np.pi, npts=(128,),
                        dump_freq=5, cfl=0.5, velocity=(1.0,))
 
diff --git a/hysop_examples/examples/scalar_diffusion/scalar_diffusion.py b/hysop_examples/examples/scalar_diffusion/scalar_diffusion.py
index f4bc37cd9977eff9f56c2a3f1d3a575d020187d3..313c6f671824043a39c60b398af9b3464d55f811 100755
--- a/hysop_examples/examples/scalar_diffusion/scalar_diffusion.py
+++ b/hysop_examples/examples/scalar_diffusion/scalar_diffusion.py
@@ -66,7 +66,7 @@ def compute(args):
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
 
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
@@ -91,7 +91,7 @@ def compute(args):
     else:
         msg='Unknown implementation \'{}\'.'.format(impl)
         raise ValueError(msg)
-    
+
     io_params = args.io_params.clone(filename='field')
     problem.dump_inputs(fields=scalar, io_params=io_params, **extra_op_kwds)
     problem.build(args)
@@ -100,7 +100,7 @@ def compute(args):
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Initialize discrete scalar field
     problem.initialize_field(scalar, formula=init_scalar)
 
@@ -112,7 +112,7 @@ def compute(args):
                       dt=dt, dt0=args.dt)
 
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
@@ -121,7 +121,7 @@ def compute(args):
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class ScalarDiffusionArgParser(HysopArgParser):
         def __init__(self):
diff --git a/hysop_examples/examples/sediment_deposit/sediment_deposit.py b/hysop_examples/examples/sediment_deposit/sediment_deposit.py
index 917c712897277870097da6c6787529850cc2c3e5..24bf0484d954894cae41f53cf1f3800ab48baa45 100644
--- a/hysop_examples/examples/sediment_deposit/sediment_deposit.py
+++ b/hysop_examples/examples/sediment_deposit/sediment_deposit.py
@@ -26,7 +26,7 @@ def init_sediment(data, coords, nblobs, rblob):
     coords = coords[0]
     X, Y = coords
     R2 = rblob * rblob
-    
+
     cache_file='/tmp/C_init_{}_{}'.format('_'.join(str(x) for x in data.shape),
             str(abs(hash((TANK_RATIO,nblobs,rblob)))))
     try:
@@ -40,33 +40,32 @@ def init_sediment(data, coords, nblobs, rblob):
         Rx,  Ry  = 2+int(rblob/dx), 2+int(rblob/dy)
         assert (rblob>=dx), 'Sediment radius < dx.'
         assert (rblob>=dy), 'Sediment radius < dy.'
-        
+
         Bx = 1*np.random.rand(nblobs)
         By = 1*np.random.rand(nblobs)
         Ix = np.floor(Bx/dx).astype(np.int32)
         Iy = np.floor(By/dy).astype(np.int32)
         Px = Bx - Ix*dx
         Py = By - Iy*dy
-        
+
         from hysop.tools.numba_utils import make_numba_signature
         args = (Ix, Iy, Bx, By, data)
         signature, _ = make_numba_signature(*args)
-         
+
         @nb.guvectorize([signature],
-            '(n),(n),(n),(n),(n0,n1)', 
+            '(n),(n),(n),(n),(n0,n1)',
             target='parallel',
             nopython=True, cache=True)
         def iter_blobs(Ix, Iy, Bx, By, data):
-            for k in xrange(nblobs):
-                #print 'blob {}/{}'.format(k+1, nblobs)
+            for k in range(nblobs):
                 ix, iy = Ix[k], Iy[k]
                 px, py = Px[k], Py[k]
-                for i in xrange(-Ry, +Ry):
+                for i in range(-Ry, +Ry):
                     ii = iy+i
                     if (ii<0) or (ii>=Ny):
                         continue
                     dy2 = (py + i*dy)**2
-                    for j in xrange(-Rx, +Rx):
+                    for j in range(-Rx, +Rx):
                         jj = ix+j
                         if (jj<0) or (jj>=Nx):
                             continue
@@ -107,7 +106,7 @@ def compute(args):
 
     from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
                               ComputeGranularity, Interpolation
-    
+
     from hysop.numerics.odesolvers.runge_kutta import Euler, RK2, RK3, RK4
     from hysop.symbolic import sm, space_symbols, local_indices_symbols
     from hysop.symbolic.base import SymbolicTensor
@@ -140,7 +139,7 @@ def compute(args):
 
     box = Box(origin=Xo, length=np.subtract(Xn,Xo),
                 lboundaries=lboundaries, rboundaries=rboundaries)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
@@ -154,18 +153,18 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
@@ -178,7 +177,7 @@ def compute(args):
     vorti = VorticityField(velocity=velo)
     S = Field(domain=box, name='S', dtype=args.dtype)
             #lboundaries=S_lboundaries, rboundaries=S_rboundaries)
-    
+
     # Symbolic fields
     frame = velo.domain.frame
     Us    = velo.s(*frame.vars)
@@ -187,40 +186,40 @@ def compute(args):
     dts   = dt.s
 
     ### Build the directional operators
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advec',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti, S),
             velocity_cfl = args.cfl,
-            variables = {velo: npts, 
-                         vorti: npts, 
+            variables = {velo: npts,
+                         vorti: npts,
                          S: npts},
             dt=dt, **extra_op_kwds)
-   
+
     #> Stretch vorticity
     if (dim==3):
         stretch = DirectionalStretching(implementation=impl,
-                 name='stretch',
-                 pretty_name='stretch',
+                 name='S',
+                 pretty_name='S',
                  formulation = args.stretching_formulation,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts},
                  dt=dt, **extra_op_kwds)
     elif (dim==2):
-        stretch = None 
+        stretch = None
     else:
         msg='Unsupported dimension {}.'.format(dim)
         raise RuntimeError(msg)
-    
-    splitting = StrangSplitting(splitting_dim=dim, 
+
+    splitting = StrangSplitting(splitting_dim=dim,
                     order=args.strang_order)
     splitting.push_operators(advec, stretch)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, 
+    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti,
                             variables={velo:npts, vorti: npts},
                             diffusion=nu_W, dt=dt,
                             implementation=impl,
@@ -231,13 +230,13 @@ def compute(args):
     g = 9.81
     Fext = SymbolicExternalForce(name='S', Fext=(0,-g*Ss),
                                    diffusion = {S: nu_S})
-    external_force = SpectralExternalForce(name='Fext', 
+    external_force = SpectralExternalForce(name='Fext',
                                    vorticity=vorti, dt=dt,
                                    Fext=Fext, Finf=True,
                                    implementation=impl,
                                    variables={vorti: npts, S: npts},
                                    **extra_op_kwds)
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(name='min_max_U', field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -246,13 +245,13 @@ def compute(args):
     min_max_W = MinMaxFieldStatistics(field=vorti,
             Finf=True, implementation=impl, variables={vorti:npts},
             **extra_op_kwds)
-    
+
     #> Operators to dump all fields
     io_params = IOParams(filename='fields', frequency=args.dump_freq)
     dump_fields = HDF_Writer(name='dump',
                              io_params=io_params,
                              force_backend=Backend.OPENCL,
-                             variables={velo: npts, 
+                             variables={velo: npts,
                                         vorti: npts,
                                         S: npts},
                              **extra_op_kwds)
@@ -262,7 +261,7 @@ def compute(args):
     view = [slice(None,None,None),]*dim
     view = tuple(view)
     io_params = IOParams(filename='horizontally_averaged_profiles', frequency=0)
-    compute_mean_fields = ComputeMeanField(name='mean', 
+    compute_mean_fields = ComputeMeanField(name='mean',
             fields={S: (view, axes)},
             variables={S: npts},
             io_params=io_params)
@@ -270,9 +269,9 @@ def compute(args):
     ### Adaptive timestep operator
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True,
                                     name='merge_dt', pretty_name='dt')
-    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, 
+    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl,
                                         Finf=min_max_U.Finf,
-                                        equivalent_CFL=True, 
+                                        equivalent_CFL=True,
                                         name='dt_cfl', pretty_name='CFL')
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                  criteria=AdvectionCriteria.W_INF,
@@ -281,13 +280,13 @@ def compute(args):
                                         Finf=external_force.Finf,
                                         name='dt_force', pretty_name='FEXT')
 
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -296,53 +295,53 @@ def compute(args):
         )
 
     problem = Problem(method=method)
-    problem.insert(poisson, 
+    problem.insert(poisson,
                    dump_fields,
                    min_max_U, min_max_W, adapt_dt,
-                   splitting, 
+                   splitting,
                    compute_mean_fields,
                    external_force)
     problem.build(args)
-    
+
     # If a visu_rank was provided, and show_graph was set,
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
             min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, S on all topologies
     problem.initialize_field(field=velo,  formula=init_velocity)
     problem.initialize_field(field=vorti, formula=init_vorticity)
-    problem.initialize_field(field=S,     formula=init_sediment, 
+    problem.initialize_field(field=S,     formula=init_sediment,
             nblobs=nblobs, rblob=rblob, without_ghosts=True)
-    
+
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class ParticleAboveSaltArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'sediment_deposit'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Sediment Deposit Example: ', fg='blue',
@@ -350,7 +349,7 @@ if __name__=='__main__':
             description+='\n'
             description+='\nThis example focuses on a validation study for the '
             description+='hybrid particle-mesh vortex method for sediment deposit.'
-    
+
             super(ParticleAboveSaltArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
@@ -368,7 +367,7 @@ if __name__=='__main__':
 
     parser.set_defaults(impl='cl', ndim=2,
                         npts=(TANK_RATIO*DISCRETIZATION+1,DISCRETIZATION+1),
-                        box_origin=(0.0,), box_length=(1.0,), 
+                        box_origin=(0.0,), box_length=(1.0,),
                         tstart=0.0, tend=20.0,
                         dt=1e-6, cfl=32.0, lcfl=0.90,
                         #dump_times=tuple(float(x) for x in range(0,100000,1000)),
diff --git a/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py b/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py
index 858abd9d07a3c9ce86a3eaa79aaaa9d51f91991b..b5bb7d92c236f629b8d0956dff859054f9574156 100644
--- a/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py
+++ b/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py
@@ -31,7 +31,7 @@ def init_phi(data, coords, nblobs, rblob, component):
     Bx = np.random.rand(nblobs)
     By = TANK_RATIO*np.random.rand(nblobs)
     R2 = rblob * rblob
-    
+
     cache_file='/tmp/C_init_ls_{}_{}'.format('_'.join(str(x) for x in data.shape),
             str(abs(hash((BLOB_INIT, NB, TANK_RATIO, nblobs, rblob)))))
     try:
@@ -50,44 +50,43 @@ def init_phi(data, coords, nblobs, rblob, component):
         vprint('  *Initializing sediments of radius {} with {} random blobs.'.format(rblob, nblobs))
         np.savez_compressed(file=cache_file, data=data)
         vprint('  *Caching data to "{}.npz".'.format(cache_file))
-        
+
         X,   Y   = X.ravel(), Y.ravel()
         dx,  dy  = X[1]-X[0], Y[1]-Y[0]
         Nx,  Ny  = X.size, Y.size
         Rx,  Ry  = 2*(int(rblob/dx)+1), 2*(int(rblob/dy)+1)
         assert (rblob>=dx), 'Sediment radius < dx.'
         assert (rblob>=dy), 'Sediment radius < dy.'
-        
+
         Ix = np.floor(Bx/dx).astype(np.int32)
         Iy = np.floor(By/dy).astype(np.int32)
         Px = Bx - Ix*dx
         Py = By - Iy*dy
-        
+
         from hysop.tools.numba_utils import make_numba_signature
         args = (Ix, Iy, Bx, By, data)
         signature, _ = make_numba_signature(*args)
-         
+
         @nb.guvectorize([signature],
-            '(n),(n),(n),(n),(n0,n1)', 
+            '(n),(n),(n),(n),(n0,n1)',
             target='parallel',
             nopython=True, cache=True)
         def iter_blobs(Ix, Iy, Bx, By, data):
-            for k in xrange(nblobs):
-                #print 'blob {}/{}'.format(k+1, nblobs)
+            for k in range(nblobs):
                 ix, iy = Ix[k], Iy[k]
                 px, py = Px[k], Py[k]
-                for i in xrange(-Ry, +Ry):
+                for i in range(-Ry, +Ry):
                     ii = iy+i
                     if (ii<0) or (ii>=Ny):
                         continue
                     dy2 = (py + i*dy)**2 / R2
-                    for j in xrange(-Rx, +Rx):
+                    for j in range(-Rx, +Rx):
                         jj = ix+j
                         if (jj<0) or (jj>=Nx):
                             continue
                         dx2 = (px - j*dx)**2 / R2
                         d = dx2 + dy2 - 1
-                        if (d<data[ii,jj]): 
+                        if (d<data[ii,jj]):
                             data[ii,jj] = d
 
         vprint('  *Initializing sediments of radius {} with {} random blobs.'.format(rblob, nblobs))
@@ -123,7 +122,7 @@ def compute(args):
 
     from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \
                               ComputeGranularity, Interpolation
-    
+
     from hysop.numerics.odesolvers.runge_kutta import Euler, RK2, RK3, RK4
     from hysop.symbolic import sm, space_symbols, local_indices_symbols
     from hysop.symbolic.base import SymbolicTensor
@@ -157,7 +156,7 @@ def compute(args):
 
     box = Box(origin=Xo, length=np.subtract(Xn,Xo),
                 lboundaries=lboundaries, rboundaries=rboundaries)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
@@ -171,18 +170,18 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
@@ -196,7 +195,7 @@ def compute(args):
     phi   = LevelSetField(domain=box, dtype=args.dtype, **S_boundaries)
     S     = DensityField(name='S', domain=box, dtype=args.dtype, **S_boundaries)
     Sv    = VolumicIntegrationParameter(field=S)
-    
+
     # Symbolic fields
     frame = velo.domain.frame
     Us    = velo.s(*frame.vars)
@@ -206,17 +205,17 @@ def compute(args):
     dts   = dt.s
 
     ### Build the directional operators
-    #> Directional advection 
+    #> Directional advection
     advec = DirectionalAdvection(implementation=impl,
             name='advec',
-            velocity = velo,       
+            velocity = velo,
             advected_fields = (vorti, phi),
             velocity_cfl = args.cfl,
-            variables = {velo:  npts, 
-                         vorti: npts, 
+            variables = {velo:  npts,
+                         vorti: npts,
                          phi:   npts},
             dt=dt, **extra_op_kwds)
-   
+
     #> Recompute density from levelset
     dx = np.max(np.divide(box.length, np.asarray(args.npts)-1))
     S1, S2 = 0.5, 0.0
@@ -232,7 +231,7 @@ def compute(args):
     #e0 = Assignment(pi, np.pi)
     #e1 = Assignment(eps, 5*dx)
     #e2 = Assignment(x, phis*SEDIMENT_RADIUS)
-    #e3 = Assignment(H, H_eps) 
+    #e3 = Assignment(H, H_eps)
     #e4 = Assignment(Ss, S1 + (S2-S1)*H)
     #exprs = (e0,e1,e2,e3,e4)
     if BLOB_INIT:
@@ -241,33 +240,33 @@ def compute(args):
         e = Assignment(Ss, 0.5*LogicalGT(phis, 0.5))
         #e = Assignment(Ss, 0.5*LogicalLE(phis, 0))
     exprs = (e,)
-    eval_fields = DirectionalSymbolic(name='eval_fields', 
-                                    pretty_name=u'{}({})'.format(
-                                        phi.pretty_name.decode('utf-8'), 
-                                        S.pretty_name.decode('utf-8')),
+    eval_fields = DirectionalSymbolic(name='eval_fields',
+                                    pretty_name='{}({})'.format(
+                                        phi.pretty_name,
+                                        S.pretty_name),
                                     no_split=True,
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
                                     variables={phi: npts,
                                                S: npts},
                                     **extra_op_kwds)
-   
+
     #> Stretch vorticity
     if (dim==3):
         stretch = DirectionalStretching(implementation=impl,
-                 name='stretch',
-                 pretty_name='stretch',
+                 name='S',
+                 pretty_name='S',
                  formulation = args.stretching_formulation,
-                 velocity  = velo,       
+                 velocity  = velo,
                  vorticity = vorti,
                  variables = {velo: npts, vorti: npts},
                  dt=dt, **extra_op_kwds)
     elif (dim==2):
-        stretch = None 
+        stretch = None
     else:
         msg='Unsupported dimension {}.'.format(dim)
         raise RuntimeError(msg)
-    
+
     #> External force rot(-S*g)
     Fext = np.zeros(shape=(dim,), dtype=object).view(SymbolicTensor)
     fext = -Ss
@@ -275,26 +274,26 @@ def compute(args):
     lhs = Ws.diff(frame.time)
     rhs = curl(Fext, frame)
     exprs = Assignment.assign(lhs, rhs)
-    external_force = DirectionalSymbolic(name='Fext', 
+    external_force = DirectionalSymbolic(name='Fext',
                                     implementation=impl,
                                     exprs=exprs, dt=dt,
                                     variables={vorti: npts,
                                                S: npts},
                                     **extra_op_kwds)
-    
-    splitting = StrangSplitting(splitting_dim=dim, 
+
+    splitting = StrangSplitting(splitting_dim=dim,
                     order=args.strang_order)
     splitting.push_operators(advec, eval_fields, stretch, external_force)
-    
+
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, 
+    poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti,
                             variables={velo:npts, vorti: npts},
                             diffusion=nu_W, dt=dt,
                             implementation=impl,
                             enforce_implementation=enforce_implementation,
                             **extra_op_kwds)
-    
+
     #> Operator to compute the infinite norm of the velocity
     min_max_U = MinMaxFieldStatistics(name='min_max_U', field=velo,
             Finf=True, implementation=impl, variables={velo:npts},
@@ -303,18 +302,18 @@ def compute(args):
     min_max_W = MinMaxFieldStatistics(field=vorti,
             Finf=True, implementation=impl, variables={vorti:npts},
             **extra_op_kwds)
-    
+
     #> Operators to compute the integrated density
     integrate_S = Integrate(field=S, variables={S: npts},
                                     parameter=Sv, scaling='volumic', cst=2,
                                     implementation=impl, **extra_op_kwds)
-    
+
     #> Operators to dump all fields
     io_params = IOParams(filename='fields', frequency=args.dump_freq)
     dump_fields = HDF_Writer(name='dump',
                              io_params=io_params,
                              force_backend=Backend.OPENCL,
-                             variables={#velo: npts, 
+                             variables={#velo: npts,
                                         #vorti: npts,
                                         phi: npts,
                                         S: npts},
@@ -325,7 +324,7 @@ def compute(args):
     view = [slice(None,None,None),]*dim
     view = tuple(view)
     io_params = IOParams(filename='horizontally_averaged_profiles', frequency=0)
-    compute_mean_fields = ComputeMeanField(name='mean', 
+    compute_mean_fields = ComputeMeanField(name='mean',
             fields={S: (view, axes)},
             variables={S: npts},
             io_params=io_params)
@@ -334,21 +333,21 @@ def compute(args):
     adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True,
                                     name='merge_dt', pretty_name='dt',
                                     max_dt=1e-1)
-    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, 
+    dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl,
                                         Finf=min_max_U.Finf,
-                                        equivalent_CFL=True, 
+                                        equivalent_CFL=True,
                                         name='dt_cfl', pretty_name='CFL')
     dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
                                                  criteria=AdvectionCriteria.W_INF,
                                                  name='dt_lcfl', pretty_name='LCFL')
 
-    
-    ## Create the problem we want to solve and insert our 
+
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:    args.compute_granularity,
                SpaceDiscretization:   args.fd_order,
                TimeIntegrator:        args.time_integrator,
@@ -357,53 +356,53 @@ def compute(args):
         )
 
     problem = Problem(method=method)
-    problem.insert(poisson, 
+    problem.insert(poisson,
                    min_max_U, min_max_W, adapt_dt,
-                   splitting, 
+                   splitting,
                    integrate_S,
                    dump_fields,
                    compute_mean_fields)
     problem.build(args)
-    
+
     # If a visu_rank was provided, and show_graph was set,
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
-    simu.write_parameters(t, dt_cfl, dt_advec, dt, 
+    simu.write_parameters(t, dt_cfl, dt_advec, dt,
             min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL,
             filename='parameters.txt', precision=8)
-    
+
     # Initialize vorticity, velocity, S on all topologies
     problem.initialize_field(field=velo,  formula=init_velocity)
     problem.initialize_field(field=vorti, formula=init_vorticity)
-    problem.initialize_field(field=phi,   formula=init_phi, nblobs=nblobs, rblob=rblob, 
+    problem.initialize_field(field=phi,   formula=init_phi, nblobs=nblobs, rblob=rblob,
             without_ghosts=BLOB_INIT)
-    
+
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class ParticleAboveSaltArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'sediment_deposit_levelset'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Sediment Deposit Levelset Example: ', fg='blue',
@@ -412,7 +411,7 @@ if __name__=='__main__':
             description+='\nThis example focuses on a validation study for the '
             description+='hybrid particle-mesh vortex method for sediment deposit '
             description+='using the levelset method.'
-    
+
             super(ParticleAboveSaltArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
@@ -430,7 +429,7 @@ if __name__=='__main__':
 
     parser.set_defaults(impl='cl', ndim=2,
                         npts=(TANK_RATIO*DISCRETIZATION+1,DISCRETIZATION+1),
-                        box_origin=(0.0,), box_length=(1.0,), 
+                        box_origin=(0.0,), box_length=(1.0,),
                         tstart=0.0, tend=100.1,
                         dt=1e-6, cfl=0.50, lcfl=0.50,
                         dump_times=tuple(float(1*x) for x in range(100)),
diff --git a/hysop_examples/examples/shear_layer/shear_layer.py b/hysop_examples/examples/shear_layer/shear_layer.py
index cf24b835080abe84d0ffa7c1a1d86acd30b1e8a4..d57935f70bdd311795461f7bc37246b25645b88d 100644
--- a/hysop_examples/examples/shear_layer/shear_layer.py
+++ b/hysop_examples/examples/shear_layer/shear_layer.py
@@ -1,6 +1,6 @@
 
 ## HySoP Example: Shear Layer 2D
-## See Brown 1995: 
+## See Brown 1995:
 ## Performance of under-resolved two dimensional incompressible flow simulations
 import sympy as sm
 import numpy as np
@@ -8,7 +8,6 @@ import numpy as np
 def compute(args):
     from hysop import Box, Simulation, Problem, MPIParams,\
                       ScalarParameter
-    from hysop.tools.debug_utils import ImshowDebugger
     from hysop.defaults import VelocityField, VorticityField, \
                                TimeParameters, ViscosityParameter
     from hysop.constants import Implementation, AdvectionCriteria
@@ -21,16 +20,18 @@ def compute(args):
                               ComputeGranularity, Interpolation
 
     from hysop.symbolic import space_symbols
-    
+
+    msg='Parameters are: delta={}, rho={}, visco={}'.format(args.delta, args.rho, args.nu)
+
     # Define the domain
     dim  = args.ndim
     npts = args.npts
     box  = Box(origin=args.box_origin, length=args.box_length, dim=dim)
-    
+
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm(),
                            task_id=box.current_task())
-    
+
     # Setup usual implementation specific variables
     impl = args.impl
     extra_op_kwds = { 'mpi_params': mpi_params }
@@ -39,30 +40,30 @@ def compute(args):
     elif (impl is Implementation.OPENCL):
         # For the OpenCL implementation we need to setup the compute device
         # and configure how the code is generated and compiled at runtime.
-                
+
         # Create an explicit OpenCL context from user parameters
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
-        
+
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
         from hysop.methods import OpenClKernelConfig
         method = { OpenClKernelConfig: args.opencl_kernel_config }
-        
+
         # Setup opencl specific extra operator keyword arguments
         extra_op_kwds['cl_env'] = cl_env
     else:
         msg='Unknown implementation \'{}\'.'.format(impl)
         raise ValueError(msg)
-    
-    # Get back user paramaters 
+
+    # Get back user paramaters
     # rho:   thickness of the shear layers
     # delta: the strength of the initial perturbation
     rho   = args.rho
     delta = args.delta
-    
+
     # Compute initial vorticity fomula symbolically
     # and define the function to compute initial vorticity.
     (x,y) = space_symbols[:2]
@@ -91,7 +92,7 @@ def compute(args):
     velo  = VelocityField(domain=box, dtype=args.dtype)
     vorti = VorticityField(velocity=velo)
     nu    = ViscosityParameter(initial_value=args.nu, const=True, dtype=args.dtype)
-    
+
     ### Build the directional operators
     if (impl is Implementation.FORTRAN) and (dim==3):
         #> Nd advection
@@ -103,11 +104,11 @@ def compute(args):
                 variables = {velo: npts, vorti: npts},
                 dt=dt, **extra_op_kwds)
     else:
-        #> Directional advection 
+        #> Directional advection
         impl_advec = Implementation.PYTHON if (impl is Implementation.FORTRAN) else impl
         advec_dir = DirectionalAdvection(implementation=impl_advec,
                 name='advection_remesh',
-                velocity = velo,       
+                velocity = velo,
                 advected_fields = (vorti,),
                 velocity_cfl = args.cfl,
                 variables = {velo: npts, vorti: npts},
@@ -119,22 +120,22 @@ def compute(args):
 
     ### Build standard operators
     #> Poisson operator to recover the velocity from the vorticity
-    poisson = PoissonCurl(name='poisson_curl', 
-                            velocity=velo, vorticity=vorti, 
-                            variables={velo:npts, vorti: npts}, 
+    poisson = PoissonCurl(name='poisson_curl',
+                            velocity=velo, vorticity=vorti,
+                            variables={velo:npts, vorti: npts},
                             projection=args.reprojection_frequency,
                             diffusion=nu, dt=dt,
-                            implementation=impl, 
+                            implementation=impl,
                             enforce_implementation=args.enforce_implementation,
                             **extra_op_kwds)
     #> We ask to dump the inputs and the outputs of this operator
     poisson.dump_outputs(fields=(vorti,),
             io_params=args.io_params.clone(filename='vorti'),
             **extra_op_kwds)
-    poisson.dump_outputs(fields=(velo,),  
+    poisson.dump_outputs(fields=(velo,),
             io_params=args.io_params.clone(filename='velo'),
             **extra_op_kwds)
-    
+
     #> Operator to compute the infinite norm of the velocity
     if (impl is Implementation.FORTRAN):
         impl = Implementation.PYTHON
@@ -145,19 +146,19 @@ def compute(args):
     min_max_W = MinMaxFieldStatistics(name='min_max_W', field=vorti,
             Finf=True, implementation=impl, variables={vorti:npts},
             **extra_op_kwds)
-    
+
     ### Adaptive timestep operator
     adapt_dt = AdaptiveTimeStep(dt)
     adapt_dt.push_cfl_criteria(cfl=args.cfl, Finf=min_max_U.Finf)
-    adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf, 
+    adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf,
             criteria=AdvectionCriteria.W_INF)
 
-    ## Create the problem we want to solve and insert our 
+    ## Create the problem we want to solve and insert our
     # directional splitting subgraph and the standard operators.
     # The method dictionnary passed to this graph will be dispatched
     # accross all operators contained in the graph.
     method.update(
-            { 
+            {
                ComputeGranularity:  args.compute_granularity,
                SpaceDiscretization: args.fd_order,
                TimeIntegrator:      args.time_integrator,
@@ -167,42 +168,42 @@ def compute(args):
     problem = Problem(method=method)
     problem.insert(poisson, advec, min_max_U, min_max_W, adapt_dt)
     problem.build(args)
-    
+
     # If a visu_rank was provided, and show_graph was set,
     # display the graph on the given process rank.
     if args.display_graph:
         problem.display(args.visu_rank)
-    
+
     # Create a simulation
     # (do not forget to specify the t and dt parameters here)
-    simu = Simulation(start=args.tstart, end=args.tend, 
+    simu = Simulation(start=args.tstart, end=args.tend,
                       nb_iter=args.nb_iter,
                       max_iter=args.max_iter,
                       dt0=args.dt, times_of_interest=args.times_of_interest,
                       t=t, dt=dt)
     simu.write_parameters(t, dt, filename='parameters.txt', precision=4)
-    
+
     # Initialize only the vorticity
     problem.initialize_field(velo,  formula=init_velocity)
     problem.initialize_field(vorti, formula=init_vorticity)
-    
-    # Finally solve the problem 
-    problem.solve(simu, dry_run=args.dry_run, 
+
+    # Finally solve the problem
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler,
             plot_freq=args.plot_freq)
-    
+
     # Finalize
     problem.finalize()
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class ShearLayerArgParser(HysopArgParser):
         def __init__(self):
             prog_name = 'shear_layer'
-            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), 
+            default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(),
                     prog_name)
 
             description=colors.color('HySoP Shear Layer Example: ', fg='blue', style='bold')
@@ -221,12 +222,12 @@ if __name__=='__main__':
             description+='\n  CASE     0        1        2'
             description+='\n  delta    0.5      0.5      0.5'
             description+='\n  rho      30       100      100'
-            description+='\n  visco    1.0e-4   0.5e-4   0.25e-4'
+            description+='\n  visco    1.0e-4   1.0e-4   0.5e-4'
             description+='\n  comment  thick    thin     thin'
             description+='\n'
             description+='\nSee the original paper at '
-            description+='http://crd.lbl.gov/assets/pubs_presos/underIIJCP.pdf.'
-    
+            description+='http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.27.7942&rep=rep1&type=pdf'
+
             super(ShearLayerArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
@@ -260,7 +261,7 @@ if __name__=='__main__':
                 msg='Case parameter should be 0, 1 or 2, got {}.'
                 msg=msg.format(args.case)
                 self.error(msg)
-    
+
         def _add_graphical_io_args(self):
             graphical_io = super(ShearLayerArgParser, self)._add_graphical_io_args()
             graphical_io.add_argument('-pw', '--plot-vorticity', action='store_true',
@@ -268,17 +269,17 @@ if __name__=='__main__':
                     help=('Plot the vorticity component during simulation. '+
                          'Simulation will stop at each time of interest and '+
                          'the plot will be updated every specified freq iterations.'))
-            graphical_io.add_argument('-pf', '--plot-freq', type=int, default=10, 
+            graphical_io.add_argument('-pf', '--plot-freq', type=int, default=10,
                     dest='plot_freq',
-                    help=('Plot frequency in terms of iterations.' 
+                    help=('Plot frequency in terms of iterations.'
                          +' Use 0 to disable frequency based plotting.'))
-        
+
         def _check_file_io_args(self, args):
             super(ShearLayerArgParser, self)._check_file_io_args(args)
             self._check_default(args, 'plot_vorticity', bool, allow_none=False)
             self._check_default(args, 'plot_freq', int, allow_none=False)
             self._check_positive(args, 'plot_freq', strict=False, allow_none=False)
-            
+
         def _setup_parameters(self, args):
             super(ShearLayerArgParser, self)._setup_parameters(args)
             from hysop.tools.types import first_not_None
@@ -288,21 +289,21 @@ if __name__=='__main__':
             rho_defaults   = (30.0,   100.0,  100.0)
             nu_defaults    = (1.0e-4, 0.5e-4, 0.25e-4)
 
-            args.rho   = first_not_None(args.rho, rho_defaults[case])
             args.delta = first_not_None(args.delta, delta_defaults[case])
+            args.rho   = first_not_None(args.rho, rho_defaults[case])
             args.nu    = first_not_None(args.nu, nu_defaults[case])
 
             self._check_positive(args, ('rho','delta','nu'), strict=True, allow_none=False)
-            
+
             if (args.ndim != 2):
-                msg='This example only works for 2D domains.'
+                msg='This example only works on 2D domains.'
                 self.error(msg)
 
     parser = ShearLayerArgParser()
 
     parser.set_defaults(impl='cl', ndim=2, npts=(256,),
-                        box_origin=(0.0,), box_length=(1.0,), 
-                        tstart=0.0, tend=1.25, 
+                        box_origin=(0.0,), box_length=(1.0,),
+                        tstart=0.0, tend=1.2,
                         dt=1e-4, cfl=0.5, lcfl=0.125,
                         case=0, dump_freq=0, dump_times=(0.8, 1.20))
 
diff --git a/hysop_examples/examples/taylor_green/taylor_green.py b/hysop_examples/examples/taylor_green/taylor_green.py
index d9b47eadf1efb886a8e2692133beeb05dcdc18a2..28f1ee703c1d8c27f5dc690859eabbabd9f738b4 100644
--- a/hysop_examples/examples/taylor_green/taylor_green.py
+++ b/hysop_examples/examples/taylor_green/taylor_green.py
@@ -68,7 +68,7 @@ def compute(args):
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number
         cl_env = get_or_create_opencl_env(
             mpi_params=mpi_params,
-            platform_id=args.cl_platform_id, 
+            platform_id=args.cl_platform_id,
             device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None)
 
         # Configure OpenCL kernel generation and tuning (already done by HysopArgParser)
@@ -95,7 +95,6 @@ def compute(args):
                 name='advec',
                 velocity = velo,
                 advected_fields = (vorti,),
-                velocity_cfl = args.cfl,
                 variables = {velo: npts, vorti: npts},
                 dt=dt, **extra_op_kwds)
         advec_dir = None
@@ -112,7 +111,7 @@ def compute(args):
     #> Directional stretching
     if (impl is Implementation.PYTHON) or (impl is Implementation.FORTRAN):
         stretch_dir = StaticDirectionalStretching(implementation=Implementation.PYTHON,
-                 name='stretch',
+                 name='S',
                  formulation = args.stretching_formulation,
                  velocity  = velo,
                  vorticity = vorti,
@@ -120,7 +119,7 @@ def compute(args):
                  dt=dt, **extra_op_kwds)
     else:
         stretch_dir = DirectionalStretching(implementation=impl,
-                 name='stretch',
+                 name='S',
                  formulation = args.stretching_formulation,
                  velocity  = velo,
                  vorticity = vorti,
@@ -140,7 +139,7 @@ def compute(args):
                             enforce_implementation=args.enforce_implementation,
                             implementation=impl, **extra_op_kwds)
     #> We ask to dump the outputs of this operator
-    dump_fields = HDF_Writer(name='fields', 
+    dump_fields = HDF_Writer(name='fields',
             io_params=args.io_params.clone(filename='fields'),
             force_backend=backend,
             variables={velo: npts, vorti: npts}, **extra_op_kwds)
@@ -208,7 +207,7 @@ def compute(args):
                     parameters[axe1] = {dt_lcfl0.name: dt_lcfl0,
                                         dt_lcfl1.name: dt_lcfl1,
                                         dt_lcfl2.name: dt_lcfl2,
-                                        dt_cfl.name:   dt_cfl, 
+                                        dt_cfl.name:   dt_cfl,
                                         dt_stretch.name: dt_stretch}
                     parameters[axe2] = {'CFL*': adapt_dt.equivalent_CFL }
                 else:
@@ -223,7 +222,7 @@ def compute(args):
                                                     snpts, config), fontweight='bold')
                 axe0.set_title('Integrated Enstrophy')
                 axe0.set_xlabel('Non-dimensional time', fontweight='bold')
-                axe0.set_ylabel('$\zeta$',
+                axe0.set_ylabel(r'$\zeta$',
                         rotation=0, fontweight='bold')
                 axe0.set_xlim(args.tstart, args.tend)
                 axe0.set_ylim(0, 26)
@@ -253,7 +252,7 @@ def compute(args):
                     axe2.axhline(y=args.cfl, color='r', linestyle='--')
                     axe2.set_ylim(0., 1.1*args.cfl)
         plot = EnstrophyPlotter(update_frequency=args.plot_freq,
-                                visu_rank=args.visu_rank)
+                                visu_rank=args.visu_rank, io_params=args.io_params)
     else:
         plot = None
 
@@ -289,7 +288,7 @@ def compute(args):
                       t=t, dt=dt)
     params = (t, dt, enstrophy,)
     if args.variable_timestep:
-        params += (dt_cfl, dt_stretch, dt_lcfl0, dt_lcfl1, dt_lcfl2, 
+        params += (dt_cfl, dt_stretch, dt_lcfl0, dt_lcfl1, dt_lcfl2,
                     min_max_U.Finf, min_max_W.Finf, min_max_gradU.Finf, adapt_dt.equivalent_CFL)
     simu.write_parameters(*params, filename='parameters.txt', precision=8)
 
@@ -297,7 +296,7 @@ def compute(args):
     problem.initialize_field(vorti, formula=init_vorticity)
 
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
@@ -306,7 +305,7 @@ def compute(args):
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class TaylorGreenArgParser(HysopArgParser):
         def __init__(self):
diff --git a/hysop_examples/examples/taylor_green/taylor_green_cpuFortran.py b/hysop_examples/examples/taylor_green/taylor_green_cpuFortran.py
index 020cd4b5eb16d85106288658e94cd18bae5969c5..a18262101fc43ae89b37cf0a0d77dd3cf7456e74 100644
--- a/hysop_examples/examples/taylor_green/taylor_green_cpuFortran.py
+++ b/hysop_examples/examples/taylor_green/taylor_green_cpuFortran.py
@@ -76,7 +76,7 @@ def compute(args):
             dt=dt, **extra_op_kwds)
     #> Directional stretching
     stretch = StaticDirectionalStretching(implementation=impl,
-             name='stretch',
+             name='S',
              formulation = args.stretching_formulation,
              velocity  = velo,
              vorticity = vorti,
@@ -215,7 +215,7 @@ def compute(args):
     problem.initialize_field(vorti, formula=init_vorticity)
 
     # Finally solve the problem
-    problem.solve(simu, dry_run=args.dry_run, 
+    problem.solve(simu, dry_run=args.dry_run,
             debug_dumper=args.debug_dumper,
             checkpoint_handler=args.checkpoint_handler)
 
@@ -224,7 +224,7 @@ def compute(args):
 
 
 if __name__=='__main__':
-    from hysop_examples.example_utils import HysopArgParser, colors
+    from hysop_examples.argparser import HysopArgParser, colors
 
     class TaylorGreenArgParser(HysopArgParser):
         def __init__(self):
diff --git a/notebooks/.gitignore b/notebooks/.gitignore
deleted file mode 100644
index 61c6e23f8b321e032095a2f4649de66da9168ebb..0000000000000000000000000000000000000000
--- a/notebooks/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-.ipynb_checkpoints
-interactive
diff --git a/notebooks/00_introduction.ipynb b/notebooks/00_introduction.ipynb
deleted file mode 100644
index 4e7db4008ff4603d1c36c4e1343ade0dd4ef71b3..0000000000000000000000000000000000000000
--- a/notebooks/00_introduction.ipynb
+++ /dev/null
@@ -1,1381 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# HySoP: Hybrid Simulation with Particles\n",
-    "\n",
-    "HySoP is a library dedicated to high performance flow simulation based on particle methods, on hybrid architectures with multiple compute devices including CPUs, GPUs and MICs.\n",
-    "\n",
-    "The library is mainly written in Python (high level functionnalities) on the top of Fortran, C++ and OpenCL backends.\n",
-    "\n",
-    "## How to use HySoP\n",
-    "The problem to be solved must be described using Python 2.7 language either interactively or in a python script.\n",
-    "\n",
-    "If you are not at ease with this language, we strongly encourage you to check one of the numerous python tutorials available on the web. Try for example https://docs.python.org/2/tutorial/. A basic understanding of numpy may also be useful : http://www.numpy.org.\n",
-    "Numpy is the python package for scientific computing on which HySoP relies, especially for arrays handling.\n",
-    "\n",
-    "Interactive session is very useful for tests, basic understanding of hysop functionnalities but real simulation must be executed by using a script. \n",
-    "\n",
-    "\n",
-    "## Quick introduction\n",
-    "\n",
-    "In this quick introduction we will introduce some of the main data structures and types of HySoP.\n",
-    "At the end of this notebook, you will be able to define and discretize Fields on topologies and initialize them.\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "%matplotlib inline\n",
-    "# First we import HySoP and some types we will need for this tutorial\n",
-    "import hysop\n",
-    "from hysop import Box, Field, Discretization, CartesianTopology, Simulation\n",
-    "\n",
-    "# We will also need numpy and matplotlib.\n",
-    "import numpy as np\n",
-    "np.set_printoptions(linewidth=240, formatter={'float': lambda x: format(x, '6.2f')})\n",
-    "\n",
-    "import matplotlib.pyplot as plt"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Describing the physical domain\n",
-    "The first thing to do is to create a physical domain where the simulation will take place. At this time the only type of domain available are $n$-dimensional boxes. A box has an origin $X_0=(x_0,\\cdots,x_{n-1})$ and a length $L=(L_0, \\cdots, L_{n-1})$. By default a unit box will be returned $X_0 = (0,\\cdots,0)$ and $L=(1,\\cdots,1)$. A domain also provide information about its boundary conditions."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Box::d0 | 2D rectangular box domain:\n",
-      "  *origin:  [  0.00   0.00]\n",
-      "  *max_pos: [  1.00   1.00]\n",
-      "  *length:  [  1.00   1.00]\n",
-      "  *left  boundary conditions: [PERIODIC(1), PERIODIC(1)]\n",
-      "  *right boundary conditions: [PERIODIC(1), PERIODIC(1)]\n",
-      "\n",
-      "d0\n"
-     ]
-    }
-   ],
-   "source": [
-    "# The 2D default Box\n",
-    "domain = Box(dim=2)\n",
-    "print domain\n",
-    "print domain.tag\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Box::d1 | 2D rectangular box domain:\n",
-      "  *origin:  [  0.00   0.00]\n",
-      "  *max_pos: [  6.28   6.28]\n",
-      "  *length:  [  6.28   6.28]\n",
-      "  *left  boundary conditions: [PERIODIC(1), PERIODIC(1)]\n",
-      "  *right boundary conditions: [PERIODIC(1), PERIODIC(1)]\n",
-      "\n"
-     ]
-    }
-   ],
-   "source": [
-    "# A custom box\n",
-    "domain = Box(length=(2*np.pi, 2*np.pi))\n",
-    "print domain"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "A Box, like many other types in HySoP, has a unique identifier called a tag that identifies any instance of the type uniquely. You can query object tags by using the $tag$ and $full\\_tag$ attributes."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "domain.tag is d1\n",
-      "domain.full_tag is Box::d1.\n"
-     ]
-    }
-   ],
-   "source": [
-    "print 'domain.tag is {}'.format(domain.tag)\n",
-    "print 'domain.full_tag is {}.'.format(domain.full_tag)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Of course, a Box has many other usefull attributes describing the physical domain:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "domain.dim is 2.\n",
-      "domain.origin is [  0.00   0.00].\n",
-      "domain.length is [  6.28   6.28].\n",
-      "domain.periodicity is [ True  True].\n"
-     ]
-    }
-   ],
-   "source": [
-    "for attr in ('dim','origin','length','periodicity'):\n",
-    "    print 'domain.{} is {}.'.format(attr, getattr(domain, attr))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "A full list of attributes along with their documentation can be retrieved by using the python help method:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Help on class Box in module hysop.domain.box:\n",
-      "\n",
-      "class Box(BoxView, hysop.domain.domain.Domain)\n",
-      " |  Box-shaped domain description.\n",
-      " |  \n",
-      " |  Method resolution order:\n",
-      " |      Box\n",
-      " |      BoxView\n",
-      " |      hysop.domain.domain.DomainView\n",
-      " |      hysop.tools.handle.TaggedObjectView\n",
-      " |      hysop.domain.domain.Domain\n",
-      " |      hysop.tools.handle.RegisteredObject\n",
-      " |      hysop.tools.handle.TaggedObject\n",
-      " |      __builtin__.object\n",
-      " |  \n",
-      " |  Methods defined here:\n",
-      " |  \n",
-      " |  view(self, topology_state)\n",
-      " |      Return a view of this domain altered by some topology_state.\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Static methods defined here:\n",
-      " |  \n",
-      " |  __new__(cls, length=None, origin=None, dim=None, lboundaries=None, rboundaries=None, **kwds)\n",
-      " |      Create or get an existing Box from a dimension, length and origin with specified\n",
-      " |      left and right boundary conditions.\n",
-      " |      \n",
-      " |      Parameters\n",
-      " |      ----------\n",
-      " |      length : array like of float, optional\n",
-      " |          Box sides lengthes. Default = [1.0, ...]\n",
-      " |      origin: array like of float, optional\n",
-      " |          Position of the lowest point of the box. Default [0.0, ...]\n",
-      " |      dim: int, optional\n",
-      " |          Dimension of the box.\n",
-      " |      lboundaries: array_like of BoundaryCondition\n",
-      " |          Left boundary conditions.\n",
-      " |      rboundaries: array_like of BoundaryCondition\n",
-      " |          Right boundary conditions.\n",
-      " |      \n",
-      " |      Attributes\n",
-      " |      ----------\n",
-      " |      dim: int\n",
-      " |          Dimension of the box.\n",
-      " |      length : np.ndarray of HYSOP_REAL\n",
-      " |          Box sides lengthes.\n",
-      " |      origin: np.ndarray of HYSOP_REAL\n",
-      " |          Position of the lowest point of the box. \n",
-      " |      end: np.ndarray of HYSOP_REAL\n",
-      " |          Position of the greatest point of the box. \n",
-      " |      lboundaries: np.ndarray of BoundaryCondition\n",
-      " |          Left boundary conditions.\n",
-      " |      rboundaries: np.ndarray of BoundaryCondition\n",
-      " |          Right boundary conditions.\n",
-      " |      boundaries: tuple of np.ndarray of BoundaryCondition\n",
-      " |          Left and right boundary conditions as a tuple.\n",
-      " |      periodicity: np.ndarray of bool\n",
-      " |          Numpy array mask, True is axis is periodic, else False.\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Data and other attributes defined here:\n",
-      " |  \n",
-      " |  __abstractmethods__ = frozenset([])\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Methods inherited from BoxView:\n",
-      " |  \n",
-      " |  long_description(self)\n",
-      " |      Return a long description of this Box as a string.\n",
-      " |  \n",
-      " |  short_description(self)\n",
-      " |      Return a short description of this Box as a string.\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Data descriptors inherited from BoxView:\n",
-      " |  \n",
-      " |  boundaries\n",
-      " |      Left and right boundary conditions as a tuple.\n",
-      " |  \n",
-      " |  end\n",
-      " |      Position of the greatest point of the box.\n",
-      " |  \n",
-      " |  lboundaries\n",
-      " |      Left boundary conditions.\n",
-      " |  \n",
-      " |  length\n",
-      " |      Box sides lengthes.\n",
-      " |  \n",
-      " |  origin\n",
-      " |      Position of the lowest point of the box.\n",
-      " |  \n",
-      " |  periodicity\n",
-      " |      Numpy array mask, True is axis is periodic, else False.\n",
-      " |  \n",
-      " |  rboundaries\n",
-      " |      Right boundary conditions.\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Methods inherited from hysop.domain.domain.DomainView:\n",
-      " |  \n",
-      " |  __eq__(self, other)\n",
-      " |  \n",
-      " |  __hash__(self)\n",
-      " |  \n",
-      " |  __ne__(self, other)\n",
-      " |  \n",
-      " |  __str__(self)\n",
-      " |      Equivalent to self.long_description()\n",
-      " |  \n",
-      " |  current_task(self)\n",
-      " |      Get task number of the current mpi process.\n",
-      " |  \n",
-      " |  is_on_task(self, params)\n",
-      " |      Test if the current process corresponds to param task.\n",
-      " |  \n",
-      " |  print_topologies(self)\n",
-      " |      Print all topologies registered on the domain.\n",
-      " |  \n",
-      " |  task_on_proc(self, parent_rank)\n",
-      " |      Get task identifier for a given mpi process (parent communicator rank).\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Data descriptors inherited from hysop.domain.domain.DomainView:\n",
-      " |  \n",
-      " |  dim\n",
-      " |      Return the dimension of the domain.\n",
-      " |  \n",
-      " |  domain\n",
-      " |      Return the domain on which the view is on.\n",
-      " |  \n",
-      " |  frame\n",
-      " |      Get symbolic frame associated to this domain.\n",
-      " |  \n",
-      " |  parent_comm\n",
-      " |      Return the parent communicator used to create this domain.\n",
-      " |  \n",
-      " |  parent_rank\n",
-      " |      Return the rank of the process in the parent communicator.\n",
-      " |  \n",
-      " |  proc_tasks\n",
-      " |      Return mapping between mpi process rank and task identifier.\n",
-      " |  \n",
-      " |  registered_topologies\n",
-      " |      Return the dictionary of all topologies already built on this domain,\n",
-      " |      with topology ids as keys and :class:`~hysop.topology.topology.Topology` as values.\n",
-      " |  \n",
-      " |  task_comm\n",
-      " |      Return the communicator that owns the current process.\n",
-      " |      This is the sub-communicator which has been obtained by splitting.\n",
-      " |      the parent communicator by colors (proc_tasks).\n",
-      " |  \n",
-      " |  task_rank\n",
-      " |      Return the rank of the process in the task communicator.\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Data and other attributes inherited from hysop.domain.domain.DomainView:\n",
-      " |  \n",
-      " |  __metaclass__ = <class 'abc.ABCMeta'>\n",
-      " |      Metaclass for defining Abstract Base Classes (ABCs).\n",
-      " |      \n",
-      " |      Use this metaclass to create an ABC.  An ABC can be subclassed\n",
-      " |      directly, and then acts as a mix-in class.  You can also register\n",
-      " |      unrelated concrete classes (even built-in classes) and unrelated\n",
-      " |      ABCs as 'virtual subclasses' -- these and their descendants will\n",
-      " |      be considered subclasses of the registering ABC by the built-in\n",
-      " |      issubclass() function, but the registering ABC won't show up in\n",
-      " |      their MRO (Method Resolution Order) nor will method\n",
-      " |      implementations defined by the registering ABC be callable (not\n",
-      " |      even via super()).\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Methods inherited from hysop.tools.handle.TaggedObjectView:\n",
-      " |  \n",
-      " |  __init__(self, obj_view=None, **kwds)\n",
-      " |  \n",
-      " |  __repr__(self)\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Data descriptors inherited from hysop.tools.handle.TaggedObjectView:\n",
-      " |  \n",
-      " |  __dict__\n",
-      " |      dictionary for instance variables (if defined)\n",
-      " |  \n",
-      " |  __weakref__\n",
-      " |      list of weak references to the object (if defined)\n",
-      " |  \n",
-      " |  full_pretty_tag\n",
-      " |      Unique tag of the underlying object view with cls information.\n",
-      " |  \n",
-      " |  full_tag\n",
-      " |      Unique tag of the underlying object view with cls information.\n",
-      " |  \n",
-      " |  id\n",
-      " |      Unique id of the underlying object view.\n",
-      " |  \n",
-      " |  pretty_tag\n",
-      " |      Unique pretty tag of the underlying object view.\n",
-      " |  \n",
-      " |  tag\n",
-      " |      Unique tag of the underlying object view.\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Methods inherited from hysop.domain.domain.Domain:\n",
-      " |  \n",
-      " |  register_topology(self, topo)\n",
-      " |      Register a new topology on this domain.\n",
-      " |      Do nothing if an equivalent topology is already\n",
-      " |      in the list.\n",
-      " |  \n",
-      " |  remove_topology(self, topo)\n",
-      " |      Remove a topology from the list of this domain.\n",
-      " |      Do nothing if the topology does not exist in the list.\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Methods inherited from hysop.tools.handle.RegisteredObject:\n",
-      " |  \n",
-      " |  __del__(self)\n",
-      " |  \n",
-      " |  ----------------------------------------------------------------------\n",
-      " |  Data descriptors inherited from hysop.tools.handle.RegisteredObject:\n",
-      " |  \n",
-      " |  obj_initialized\n",
-      " |      Return the object initialization state.\n",
-      "\n"
-     ]
-    }
-   ],
-   "source": [
-    "help(Box) # or help(domain)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Creating variables\n",
-    "\n",
-    "Now that we have a domain defined, we can define named continuous scalar and vector fields.\n",
-    "\n",
-    "In HySoP, a continuous field is an abstract object which represents the usual vector field, in a mathematical sense, i.e some function which associates a scalar or a vector to each point of the space.\n",
-    "\n",
-    "Such objects are used as input and/or output variables for continuous operators and must be defined with at least:\n",
-    "* a name, compulsory (required for i/o).\n",
-    "* a domain: the physical domain of definition of the field.\n",
-    "\n",
-    "The underlying datatype that will be used uppon discretization can also be specified. \n",
-    "By default Fields are set to datatype HYSOP_REAL wich will be either np.float32 (single precision floating point) or np.float64 (double precision floating point) depending on HySoP build configuration. There are no restrictions on the datatypes, for example all integer and complex datatypes can also be specified. Let $\\mathbb{D}$ be the space of all the possible values of the chosen datadtype and $\\Omega \\subset \\mathbb{R}^n$ our physical domain.\n",
-    "\n",
-    "For a domain of dimension $n$, scalar fields $s: \\Omega \\rightarrow \\mathbb{D}$ are defined like this:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "HYSOP_REAL is set to float64.\n",
-      "\n",
-      "ScalarField::f0\n",
-      "  *name:           F0\n",
-      "  *pname:          F0\n",
-      "  *dim:            2\n",
-      "  *dtype:          float64\n",
-      "  *symbolic repr.: F0\n",
-      "  *initial values: (0, 0)\n",
-      "  *topology tags:  []\n",
-      "\n",
-      "ScalarField::f1\n",
-      "  *name:           F1\n",
-      "  *pname:          F1\n",
-      "  *dim:            2\n",
-      "  *dtype:          int32\n",
-      "  *symbolic repr.: F1\n",
-      "  *initial values: (0, 0)\n",
-      "  *topology tags:  []\n",
-      "\n"
-     ]
-    }
-   ],
-   "source": [
-    "from hysop.constants import HYSOP_REAL\n",
-    "print 'HYSOP_REAL is set to {}.\\n'.format(HYSOP_REAL.__name__)\n",
-    "\n",
-    "field0 = Field(name='F0', domain=domain)\n",
-    "field1 = Field(name='F1', domain=domain, dtype=np.int32)\n",
-    "\n",
-    "print field0\n",
-    "print field1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Vector fields are created using either the is_vector or the nb_components parameter. is_vector corresponds to specifying nb_components to the number of dimensions of the domain $f(x): \\Omega \\subset \\mathbb{R}^n \\rightarrow \\mathbb{D}^n$ while the more general nb_components parameter allows to specify $g(x): \\Omega \\subset \\mathbb{R}^n \\rightarrow \\mathbb{D}^m$."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "VectorField::tf0\n",
-      "  *name:           V0\n",
-      "  *pname:          V0\n",
-      "  *dim:            2\n",
-      "  *shape:          (2,)\n",
-      "  *nb_components:  2\n",
-      "  *symbolic repr.:\n",
-      "      [[V0â‚€]\n",
-      "       [V0₁]]\n",
-      "\n",
-      "VectorField::tf1\n",
-      "  *name:           V1\n",
-      "  *pname:          V1\n",
-      "  *dim:            2\n",
-      "  *shape:          (3,)\n",
-      "  *nb_components:  3\n",
-      "  *symbolic repr.:\n",
-      "      [[V1â‚€]\n",
-      "       [V1₁]\n",
-      "       [V1â‚‚]]\n",
-      "\n",
-      "TensorField::tf2\n",
-      "  *name:           T\n",
-      "  *pname:          T\n",
-      "  *dim:            2\n",
-      "  *shape:          (3, 2)\n",
-      "  *nb_components:  6\n",
-      "  *symbolic repr.:\n",
-      "      [[T₀₀  T₀₁]\n",
-      "       [T₁₀  T₁₁]\n",
-      "       [T₂₀  T₂₁]]\n"
-     ]
-    }
-   ],
-   "source": [
-    "field2 = Field(name='V0', domain=domain, is_vector=True)\n",
-    "field3 = Field(name='V1', domain=domain, nb_components=3)\n",
-    "field4 = Field(name='T', domain=domain, shape=(3,2))\n",
-    "\n",
-    "print field2\n",
-    "print\n",
-    "print field3\n",
-    "print\n",
-    "print field4"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Topologies\n",
-    "To discretize Fields we will need to build topologies which are objects use to describe the space discretization of domains. Topologies also defined the mapping between data and MPI processes in case of a multiprocess run.\n",
-    "\n",
-    "As only Box domains are available at this day, only one type of topology is available as well: CartesianTopology.\n",
-    "A cartesian topology represents the discretization of a physical box domain into a cartesian grid with a given space step $dx$. It also is responsible to map subgrids to all processes. This will be called the MPI grid layout. As processes need to exchange data as well, we also need to prescribe a number of ghosts in each directions.\n",
-    "\n",
-    "Here we will just discretize the domain for one process without ghosts.\n",
-    "First we choose the shape of discretization (number of points in each directions).\n",
-    "Here we will chose a 5x5 discretization shape. Please note that because of domain periodicity\n",
-    "one point will be thrown away in each periodic direction so the effective local grid size will be 4x4."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "CartesianTopology::t0[domain=Box::d1, pcoords=[0 0], pshape=[1 1], shape=[5,5], ghosts=[0,0], backend=HOST]\n"
-     ]
-    }
-   ],
-   "source": [
-    "d0 = Discretization(resolution=(5,5))\n",
-    "t0 = CartesianTopology(domain=domain, discretization=d0)\n",
-    "print t0.short_description()\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Here pcoords represents the process coordinates into the process grid layout which is of shape (1,1) here.\n",
-    "\n",
-    "Adding ghosts to the discretization is straightforward:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 10,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "CartesianTopology::t1[domain=Box::d1, pcoords=[0 0], pshape=[1 1], shape=[5,5], ghosts=[1,2], backend=HOST]\n"
-     ]
-    }
-   ],
-   "source": [
-    "d1 = Discretization(resolution=(5,5), ghosts=(1,2))\n",
-    "t1 = CartesianTopology(domain=domain, discretization=d1)\n",
-    "print t1.short_description()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Each CartesianTopology has a 'mesh' attributes, an object of type CartesianMesh, which describes the local grid on which the current mpi process works."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 11,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "CartesianMeshView::m1:\n",
-      "  *proc coords:        [0 0]\n",
-      "  *global start:       [0 0]\n",
-      "  *local resolution:   [6 8]\n",
-      "  *compute resolution: [4 4]\n",
-      "  *ghosts:             [1 2]\n",
-      "  *local boundaries:   left  => [PERIODIC(1) PERIODIC(1)]\n",
-      "                       right => [PERIODIC(1) PERIODIC(1)]\n",
-      "\n"
-     ]
-    }
-   ],
-   "source": [
-    "print t1.mesh"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Discrete Fields\n",
-    "Now that we have topologies defined on domains, we can finally discretize our continuous Fields.\n",
-    "Discretizing a Field onto a CartesianTopology yields a CartesianDiscreteField.\n",
-    "\n",
-    "In fact, the real type that will be returned is a CartesianDiscreteFieldView which is a way to see the underlying discrete field data for a given transposition state. This will come handy when using directionnally splittable operators.\n",
-    "The default transposition state is such that the allocated data is C-contiguous. To get the real discrete field on which the view is, just use its 'dfield' attribute.\n",
-    "\n",
-    "A Field can be at most discretized once per topology:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 12,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "[[  0.00   0.00   0.00   0.00]\n",
-      " [  0.00   2.00   2.00   0.00]\n",
-      " [  0.00   0.00   0.00   0.00]\n",
-      " [  0.00   0.00   0.00   0.00]]\n",
-      "\n",
-      "[[  1.00   1.00   1.00   1.00]\n",
-      " [  1.00   1.00   1.00   1.00]\n",
-      " [  1.00   1.00   1.00   1.00]\n",
-      " [  1.00   1.00   1.00   1.00]]\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Here we have a topology without ghosts\n",
-    "dfield0     = field0.discretize(t0)\n",
-    "dfield0_bis = field0.discretize(t0)\n",
-    "assert dfield0.dfield is dfield0_bis.dfield\n",
-    "\n",
-    "# Accessing data is simple as:\n",
-    "dfield0.data[0][...] = 0.0\n",
-    "dfield0.data[0][1,1:3] = 2\n",
-    "print dfield0.sdata\n",
-    "print\n",
-    "\n",
-    "dfield0.sdata[...] = 1.0\n",
-    "print dfield0_bis.sdata"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "All discretized data are of type HostArray, which is a supertype of numpy.ndarray.\n",
-    "With the topology with ghost, you will get something like that:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 13,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "[[  0.00   0.00   0.00   0.00   0.00   0.00   0.00   0.00]\n",
-      " [  0.00   0.00   0.00   0.00   0.00   0.00   0.00   0.00]\n",
-      " [  0.00   0.00   0.00   0.00   0.00   0.00   0.00   0.00]\n",
-      " [  0.00   0.00   0.00   0.00   0.00   0.00   0.00   0.00]\n",
-      " [  0.00   0.00   0.00   0.00   0.00   0.00   0.00   0.00]\n",
-      " [  0.00   0.00   0.00   0.00   0.00   0.00   0.00   0.00]]\n",
-      "\n",
-      "[[   nan    nan    nan    nan    nan    nan    nan    nan]\n",
-      " [   nan    nan   1.00   1.00   1.00   1.00    nan    nan]\n",
-      " [   nan    nan   1.00   1.00   1.00   1.00    nan    nan]\n",
-      " [   nan    nan   1.00   1.00   1.00   1.00    nan    nan]\n",
-      " [   nan    nan   1.00   1.00   1.00   1.00    nan    nan]\n",
-      " [   nan    nan    nan    nan    nan    nan    nan    nan]]\n",
-      "(slice(1, 5, None), slice(2, 6, None))\n"
-     ]
-    }
-   ],
-   "source": [
-    "# Here we have a topology with 2 ghosts on the x-axis and 1 ghost in the y-axis\n",
-    "dfield1 = field0.discretize(t1)\n",
-    "\n",
-    "# Put zeros everywhere\n",
-    "dfield1.sdata[...] = 0.0\n",
-    "print dfield1.sdata\n",
-    "print\n",
-    "\n",
-    "# Put ones inside the compute domain (exluding ghosts)\n",
-    "# and NaNs in the ghost layers\n",
-    "dfield1.sdata[...] = np.nan\n",
-    "dfield1.sdata[dfield1.compute_slices] = 1.0\n",
-    "print dfield1.sdata\n",
-    "print dfield1.compute_slices"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "There is a simple way to hide ghost values:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 14,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "[[  X      X      X      X      X      X      X      X   ]\n",
-      " [  X      X      1.00   1.00   1.00   1.00   X      X   ]\n",
-      " [  X      X      1.00   1.00   1.00   1.00   X      X   ]\n",
-      " [  X      X      1.00   1.00   1.00   1.00   X      X   ]\n",
-      " [  X      X      1.00   1.00   1.00   1.00   X      X   ]\n",
-      " [  X      X      X      X      X      X      X      X   ]]\n"
-     ]
-    }
-   ],
-   "source": [
-    "dfield1.print_with_ghosts(component=0)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### How to initialize a scalar field with a python method"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 15,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "f(x,y) = x\n",
-      "[[  0.00   1.57   3.14   4.71]\n",
-      " [  0.00   1.57   3.14   4.71]\n",
-      " [  0.00   1.57   3.14   4.71]\n",
-      " [  0.00   1.57   3.14   4.71]]\n",
-      "\n",
-      "f(x,y) = y\n",
-      "[[  0.00   0.00   0.00   0.00]\n",
-      " [  1.57   1.57   1.57   1.57]\n",
-      " [  3.14   3.14   3.14   3.14]\n",
-      " [  4.71   4.71   4.71   4.71]]\n",
-      "\n",
-      "f(x,y) = xy\n",
-      "[[  0.00   0.00   0.00   0.00]\n",
-      " [  0.00   2.47   4.93   7.40]\n",
-      " [  0.00   4.93   9.87  14.80]\n",
-      " [  0.00   7.40  14.80  22.21]]\n",
-      "\n"
-     ]
-    }
-   ],
-   "source": [
-    "def f0(data, coords):\n",
-    "    x,y = coords[0]\n",
-    "    data[0][...] = x\n",
-    "    \n",
-    "def f1(data, coords):\n",
-    "    x,y = coords[0]\n",
-    "    data[0][...] = y\n",
-    "    \n",
-    "def f2(data, coords):\n",
-    "    x,y = coords[0]\n",
-    "    data[0][...] = x*y\n",
-    "\n",
-    "for (f,fname) in zip((f0,f1,f2),('x','y','xy')):\n",
-    "    dfield0.initialize(f)\n",
-    "    print 'f(x,y) = {}'.format(fname)\n",
-    "    print dfield0.sdata\n",
-    "    print"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### How to initialize a vector field with a python method\n",
-    "\n",
-    "Here we will use print_with_ghosts for a nice formating of field data including ghosts."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 16,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Component 0:\n",
-      "[[  X      X      X      X      X      X      X      X   ]\n",
-      " [  X      X      0.00   1.57   3.14   4.71   X      X   ]\n",
-      " [  X      X      0.00   1.57   3.14   4.71   X      X   ]\n",
-      " [  X      X      0.00   1.57   3.14   4.71   X      X   ]\n",
-      " [  X      X      0.00   1.57   3.14   4.71   X      X   ]\n",
-      " [  X      X      X      X      X      X      X      X   ]]\n",
-      "\n",
-      "Component 1:\n",
-      "[[  X      X      X      X      X      X      X      X   ]\n",
-      " [  X      X      0.00   0.00   0.00   0.00   X      X   ]\n",
-      " [  X      X      1.57   1.57   1.57   1.57   X      X   ]\n",
-      " [  X      X      3.14   3.14   3.14   3.14   X      X   ]\n",
-      " [  X      X      4.71   4.71   4.71   4.71   X      X   ]\n",
-      " [  X      X      X      X      X      X      X      X   ]]\n",
-      "\n",
-      "Component 2:\n",
-      "[[  X      X      X      X      X      X      X      X   ]\n",
-      " [  X      X      0.00   0.00   0.00   0.00   X      X   ]\n",
-      " [  X      X      0.00   2.47   4.93   7.40   X      X   ]\n",
-      " [  X      X      0.00   4.93   9.87  14.80   X      X   ]\n",
-      " [  X      X      0.00   7.40  14.80  22.21   X      X   ]\n",
-      " [  X      X      X      X      X      X      X      X   ]]\n",
-      "\n"
-     ]
-    }
-   ],
-   "source": [
-    "def f(data, coords):\n",
-    "    x,y = coords[0]\n",
-    "    data[0][...] = x\n",
-    "    data[1][...] = y\n",
-    "    data[2][...] = x*y\n",
-    "    \n",
-    "dfield3 = field3.discretize(t1) # <- topology with ghosts\n",
-    "dfield3.initialize(f)\n",
-    "\n",
-    "for i in xrange(dfield3.nb_components):\n",
-    "    print 'Component {}:'.format(i)\n",
-    "    dfield3.print_with_ghosts(component=i)\n",
-    "    print"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### How to pass parameters to initialization methods\n",
-    "\n",
-    "Just pass the additional parameters as keyword arguments to the initialize method."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 17,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Component 0:\n",
-      "[[  X      X      X      X      X      X      X      X   ]\n",
-      " [  X      X      0.00   0.16   0.31   0.47   X      X   ]\n",
-      " [  X      X      0.00   0.16   0.31   0.47   X      X   ]\n",
-      " [  X      X      0.00   0.16   0.31   0.47   X      X   ]\n",
-      " [  X      X      0.00   0.16   0.31   0.47   X      X   ]\n",
-      " [  X      X      X      X      X      X      X      X   ]]\n",
-      "\n",
-      "Component 1:\n",
-      "[[  X      X      X      X      X      X      X      X   ]\n",
-      " [  X      X      0.00   0.00   0.00   0.00   X      X   ]\n",
-      " [  X      X      0.16   0.16   0.16   0.16   X      X   ]\n",
-      " [  X      X      0.31   0.31   0.31   0.31   X      X   ]\n",
-      " [  X      X      0.47   0.47   0.47   0.47   X      X   ]\n",
-      " [  X      X      X      X      X      X      X      X   ]]\n",
-      "\n",
-      "Component 2:\n",
-      "[[  X      X      X      X      X      X      X      X   ]\n",
-      " [  X      X      0.00   0.00   0.00   0.00   X      X   ]\n",
-      " [  X      X      0.00   0.25   0.49   0.74   X      X   ]\n",
-      " [  X      X      0.00   0.49   0.99   1.48   X      X   ]\n",
-      " [  X      X      0.00   0.74   1.48   2.22   X      X   ]\n",
-      " [  X      X      X      X      X      X      X      X   ]]\n",
-      "\n"
-     ]
-    }
-   ],
-   "source": [
-    "def f(data, coords, p=1.0):\n",
-    "    x,y = coords[0]\n",
-    "    data[0][...] = p*x\n",
-    "    data[1][...] = p*y\n",
-    "    data[2][...] = p*x*y\n",
-    "    \n",
-    "dfield3 = field3.discretize(t1) # <- topology with ghosts\n",
-    "dfield3.initialize(f, p=0.1)\n",
-    "\n",
-    "for i in xrange(dfield3.nb_components):\n",
-    "    print 'Component {}:'.format(i)\n",
-    "    dfield3.print_with_ghosts(component=i)\n",
-    "    print"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### How to quickly display a scalar or a vector field:\n",
-    "This will only display local to process field data."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 18,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAA6oAAAFDCAYAAAAzqPN9AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzsvW/sfc9217XmfO5tLzftT3oVa2lrMEr6QEJKRDE2aRoMsdGq6CNIlKAP7gODoqhEjEkfYeITRWN9cGObQmowBpU2pKaA2hBIgKZVK9AgxKhU22CtRarQ9v4+44Nz9jkza9Zas+bf3vt8zvuVfPM9e8/Mmtn7s/c++33WmjUhxkgAAAAAAAAAAMBZuBw9AAAAAAAAAAAAIAVCFQAAAAAAAADAqYBQBQAAAAAAAABwKiBUAQAAAAAAAACcCghVAAAAAAAAAACnAkIVAAAAAAAAAMCpgFAFAAAAAAAAAHAqIFTBoYQQ/pcQwt8IIfx88u9XhhC+OYTwoyGE/+/2/zcfPVYAAOjFeNZ9KYTwF0MI7yGE3370OAEAYATlWfetIYTvDyH8nyGEnw0h/FAI4ZuOHis4PxCq4Az84zHGr9r+EdHPENH3E9H3EdHXENHvJ6LvDyF8xZGDBACAQbJnXYzx/yCi/4GI/gUi+rGDxwYAALPg73V/k4h+gIi+iYi+loj+LF3f8wAwgVAFZ+TbiOgzRPT7Yoy/EGP8D4goENFvPHRUAAAwmRjjd8UY/2u6vsgBAMCHI8b4Z2OM3x1j/NkY4y8R0b9HRN8UQvhbjx4bODcQquCM/L1E9OMxxpjs+/HbfgAAAAAA8Lx8KxH9dIzx/zp6IODcQKiCM/CHQwg/d/v3h4noq4jor7E6f42Ivnr/oQEAwDT4sw4AAD4i6rMuhPANRPRdRPS7jhkaeCY+c/QAACCi3xxj/OPbRgjhXyGiT1idT4jor+86KgAAmEv2rAMAgA+K+KwLIfwKIvqjRPQfxRj/4P7DAs8GPKrgjPx5Ivq1IYSQ7Pu1t/0AAAAAAOCJCCF8DV1F6g/EGH/v0eMBzwGEKjgjP0xEnxLRvxRC+MoQwu+47f9vjhsSAADMJ4TwFSGEz9E1YdxnQwifCyHguxkA8GEIIXxCRD9ERH8qxvhvHD0e8DzgyxCcjhjjLxLRbyai30ZEP0dE/zxdw0h+8dCBAQDAfP4oEf0NIvqHiOhLt8/feuiIAABgLv8UEf39RPTPsfVV/86jBwbOTcgTqwIAAAAAAAAAAMcCjyoAAAAAAAAAgFOxVKiGEL49hPAXQwh/OYSAmHQAwIcEzzoAwCuAZx0AYE+Whf6GEN6I6H8iot9ERD9JRD9CRL81xvgXlnQIAAAHgGcdAOAVwLMOALA3Kz2q/wAR/eUY4/98S4LznxLRP7mwPwAAOAI86wAArwCedQCAXfnMQttfT0R/Jdn+SSL6DbxSCOGLRPRFIqI3evv7Ph8+EUwFYZ9RIxj1Q/FBMqDv02zwunwIbEwxWHXLdlEqY9s1m5Ftt5ar4xBOd7SOiW3HSnm+Het1AqtHwiURovAnirnZQBQo2Xe/LPJ96XZu49FHul9qn+271S9sUGl7s1vaFsbBj0e0S9nnQLHYl32+nVjpjtPuwuC4n1P+t7/yZfqZn/20rdG+THzWAQBelb8Z/1/6xfgLH+9ZR3jWAaBy5jt+ES3PupVC1UWM8Ut0TclPn1y+EP/Bz/wjZSVtSbnL4xhDqkQuSf1t/23fvd7WdrN9CY+64UJ0CY+6ITxshlu9rX5SJ6b7WJ0YwtV/nW6/bZ+v/ccQNrVz3RcCxbfrvq0sviVlV2Vy65coXh7b97K0PK0TQvI5+f9S1qN7P0ndS9mmsJWNiYhCLPaX/T/qpPseY9g+P/ZRiLdzG69/jhAp3OpdT3e8CroQ6XKJ2b7r9lXIvV3er/tC+jnS2+3/bfu67/3++TPhnS637c9c3ulCW713+uy93ju9hUifCZ9e29Oj/WfDp1d7FO//f/byZXqjW7v7/+/3ttd67/QV4dPb5Xutt+1/29qln282ts9EdG1zG+/bTZRexxHv5dvd9BYeIRhv933h1mbb/7gPL8k9me1XAjnekvop3/LtPyXufzZczzoAwMvyp7/8Q0cPYQrZsy58If6G8A8fPKIPiuWU+Qh88OWkw+WD//0MWp51K6+C/52IvjHZ/obbPvBs9N5LSbvApkLzbd3Go2JMG3GXbWpvwr3/nth/L9zD5+DNfRLBYvCsAwC8AnjWgfWEy4cWqeESXlqktrLySvgRIvrVIYS/K4TwFUT0W4joBxb2B0bR7psJemi61iuUr7NeB5eTCsJPTyqgXxA86wAArwCedWfhI3pTP6hA3YQpBGofy0J/Y4xfDiH8DiL6IbpGCn5PjPHPr+oPzCXECeIy0mNeJ7OX2U7qlTbCXWyGGJhXVWmX7k/acz7icx7sD551AIBXAM+6k/CRXl4+qDAF81g6RzXG+INE9IMr+wBryIQkv+e896AmTIkJV8ue5s2M4Tp3tToGvU6MH+t5D44DzzoAwCuAZ92BfLQXlg8kUiFO1/FxrhJwDM57k+vFHm9tLIwon1lHWtFHe+YDoFJ7ITjihWFWn0e/7PSc26PHfGZ6zhfOJ/jofIQXli2098lDfHkoL0TqWg7P+gvOiRn6a4XqKvW6w4i10N8t6++G4QHu6fqs81LBRyHs/0V9xpd9iFUg8bJiFS+8gPHsAvWJ70sIUFr49/OfWwhVIGIKywn37pQ5sCmaeIbgBAAAAMAz8awC9UmF6YcVpU/690iBUAUPtORHXARmyYpIF6499z23Zy5PE+t9GMmULN5juC//8h4DPKwAAAAAWMuzCdQnE0JPL0if7HzPAEIV1AlGciXrnjdErOpNNeyZob8LgUgFAAAAwDKeRaA+iVB6OkH6JOf1CCBUXxgr/JYvHxNI8bB656tafffY457Syc8kiFMAAAAALOUZBOrJRdTpRenJz9/ZgVB9YQqR6vVyjgpT3nd3oqWBthXS0F8AlnD2L1cAAABrOLNAPamwOq0gPen5GuYk5xtCFdSJRFG7D53XsTtx0oH3RYyBwk2cph5VzFEFAAAAwDBnFKgnE1qnE6QnOz8mZzt3E4BQncEZHzwzCUaY8EKv5tW+kQxJS/CU1cnbpmK0qMrE6ZsgWgEAAAAAmjjbe+KJxNephOmJzkvGmc7RzkCoziDG8z2EBjFDdSfYM7MK84b3MURx/3VbaX9LwNQ6fIhTAAAAAAxxpnfDE4iwU4jSE5wHInpp8dkChCp4kAhGdzIlr+nedVk1jypfnsY5Js2bysEcVbCSQEThTC8wAIDDwRPhA3GG5/uBguxwQXq0GD36+J0c9R7S0iuEKniwcB6p20NrrKNqjkMLA+7wpnIwRxUAAAAAVY4WqAcItMNE6VFi9EQi9BV+8IZQPTOL78Fi3ql7aRhnvbQJq28tjTMVa46rE4hUMJ0QiC4nCT8CAJyDF3jp/LAc+bfbWbDtLkz3FqQHCdFXEJ09QKiemXcietuxv9n3yOp7Tk2gNG4aWX8BAAAAYHKUuNhJvO0qSvcSpDsL0dMJ0DP8SN5wTiBUX5jCo2l4Sru8n4k9M/Q3C9Xt6Ifb6MxEnGYExhxVAAAAAIgcIT52EHK7CNM9BOlOYvQwEXoGsbkTEKqgzoT7cEbob7hl8K2Oa7JHFYAlnO1XVgAAAHX2fHYvFnXLhelqUbp4/LsJ0TMLz4PfVSBUwYOF16LpUXX2WyxPM7iGK19VSMsIDNEKAAAAvDh7vbAvFHdLhelKUbpo3MuF6BEC9IP9CA6hCkTMdU97wmrNJWg6bMZAdFG8qyzrb495hP6C5Zz5F1QAAAAPnlykLhGoq4Tps4nSvb7LP5gA9QKhCkTMbMDeOaVGmRr6W/RlZO3V5qV62xvAiwoAAAC8OHuIg2cRpyvGOXmMS8ToKiGK+a0uIFTBGJa7sseVyet711E1+n3N36DAqQknzAQIADgWPBLOxepn9GThd2phenZBOlu8fcSQ4oOAUAV1ghG6O+Ne7LExYY6qFyxPAwAAALwITyRQp4rTEwrTqYJ0prg7u1CucPgP5Q3dQ6iCB8ZyMj1ZelPM9k7BaWb9nXzPpXNUIVLBfMJhi4oDAM4KngmHs/IF/mwCddZ4ziRKZwm+swrkG4cLzR2BUAUi5hxVq8xjj9OT9dfCsJeulWqRilN4VAEAAIAPzqqX/0mC8DTidHAcpxGkZxnHjfUZiM8kbv1jgVAFx2POc1WSIfH9WmKlCUCkgiXsseg5AAAAmxML1FOI06OF6agYPLp/mh3CfHTY7r7vLhCq4IF3HuqErL/uMGJLJGqCNPP+5u093lQiLE8DAAAAfHhWiNQzCNSRMQz0PSTIjhSkA33P8RCf98eSo4FQBe14Q3+9nk1zjdWH5zSbo8pFpDamCcvTIPQXTCfQ8b+KAgDOBR4J+zJbpB4tUHv7P0KYjojSA/ocE+CTrrM9ROde7yVIpvRB2PuHECOZUhfe9s6w3WKOasdc2R4gUsESXigZAgAAnIoTidRnEqe7CtOdBWn/sR3o/Z45jhVMuM8gVM/MOxG97dhfcj2ZyZS8orBH+HrXUeWeUm9CJmcyJQAAAAB8QD6CSO3pc09x2uu93Kmv3UXpwfOEVZ7gB3MIVfBgoVfS7Z01xmCG/mYVs0Z5EeaoglMQPsTcEQDATM7/0vj0zHwx31ug7iROdxOmO/TTdyw7/W1G+iv63ytkd9Z7C7L+gjPQc9+YCZjSLL/Ouac3cds6FIT7guWcMUwHAAA+KrNe5j+gQG0WdK3C9IyitPVvsaMX+9rfCTI+nwAIVSBihup2eEe5vWx7xlxTyxPbYQ4AAAAAH4SDReouAnWlOF0tTBvsn06Qdv1tDwohTnmSH8shVIGIGaq7MES4mwliN52/itBfsJQwKaU9AODjgEfCGp5NpLb0cxZxusjuUlG68Dx3XXNHhw8zlr6jIOsv6MLwoI5mAObth721RQd6W/cqOYowxfI0YAkQqgAAsJYDwyfPIlCXiNMzCFPvOVglSFvGunfYMO34Y/jifiBUQR2u9LTPHC3Ul287l6fJkikZffH2o7fQWUUqPL7PTBhf3BwA8MHAj1dTOUiknkGgHipOnfYOFaVuewvF6Op5xLKRcRs7A6EKRPgc0h5hmWJ6VJ33jSpSK2PKipzL05xVnKZ8GgO9Pd8zBwAAAFjLM4jUIwWqR0weJUzPLkpn90udInTGNX7YD+bI+gtmYnlUW9qlJrRkTS33XWrfOb5UpFr3OOaoguU84S+bAABwej6SSHXYPKs4nSpMZ4rDmYJ0lRjdc+3aGX0vBEIVPEiEn3tOaW9Xo8mPQnQnUHLraqe3FYBhAp3uywAAcDB4JJyDlSJ1sjfOJX5mCdSZntNZ4vSkwtQtSldmXu7pYy/7SKYEutDmkPIL6qCsv9kcVb6OaofwjTG/v1KRmob+IpkSWMKTpIYHAICnYfSlvFGkTveizhJCO4rT3YTpLFE66e8wXYyuyrI8s+0BQKiCB4ZH1VxXdZQeuwuEo+ZRhUgF8wlP92UBAFgNnglDnFWk7imMamJnL3F6FmE64dy7zvtEj3Tzddx73T/Jj+UQqmAX3Fl/DbJkStyjOgGE/YJdgVAFAIA5fGCRuotA3UOcjgrTM4jSPUOtW+oRjQvPXd9JkEwJzCQSRe2+c15rbm+se9FTQ1Ti/R+8KiFcY9rBvuC8HwfOPRilQaSezou6UKA+vTBdLUpneK9nZ0Jusbmq/WQgVMED7doMRuivU1jy9uryNIatYo4qRbm+d0znuhfBK7EymRIu7GPAeT+Oj3LuP8hh7M7QfL1zitSlArXS1ux7pTgdEaYrRemoIJ01h9drq6fuivambX9VCFUg0hOaa9E95zUJ8c1Cfxuy/rrHiKy/YEfik8wPAQCA0/JsInWlQF3lPR0RpwcI08NE6ZR5uwu8LIOC8+h3FQhVIDJleZqeNsWarcm6p1bW39F+CXNUwc58FC8QAAA8GycTqcsEaq/39AhxukKY9p6bIbE7GtI837s6RWwe9M4CofrCtGTy7cr6m4hOM/TXtGEI0lTUeufKOr2m7zHQG4QrWEaAUAUAMPBMaGKHZ6hLpA6G+trezPkCtdt7qh3nRxamI17SUUHquL6bBOiM+2XaPee3A6H6wrQIz545pV198Xo7ZvZNRSzWUQVLWTlHFQDwnOCR4GeHkN8pInWFF3W2QO0Rp1a7HnE6W5j2CvweET8SDlwpdwnRPeetTrPhrwqhCh4YHtDROaumPWUM121ljupiUo8qRCpYAuaoAgDAvjQuQzNk6yRe1C4P6kyB2mFrN3G6pzAdFaUnnL+qMvH9ZplQDSF8DxF9BxH91Rjjr1nVD1iD6QHNhCQ1i1bT/mKPqhX6izmqoIfuZx08qgCAJ+MU73a9z86Z81L3FqkzBerM8N49xGmrMO0SufuI0mExukKsrvjRfOL7zUqP6vcS0X9IRH9gYR9gJobH0y0sFbqSMbWQCmZDPHvFKLyooIHvpZ5nHYQqAOD5+F764O92h4jUHm/gLIG6WpzO8prO9Jg2H1u7kDVF6SKvLBG1C8/Z7yKT7S0TqjHGPxFC+FWr7IO18Dmpauhvp0e1Z3mawoBHPJtzUqEVwDhdz7oQKOLiAwCkPMEz4fB3u8Xe1FOJ1GcQqLPE6axz03IOJonSLkF6hmVueupWcL/XNPR5+BzVEMIXieiLRESfo88fPJoXR5ujaonR1d+r1vI0lIhVbRxG1mB+n2AdVbCS7Fn32U+IJk6VAgCAs7Dsve5JReq0UN8jBOoE7+kUcXp2YdojPM+wxA01iMsai95pDheqMcYvEdGXiIg+uXwBKmFHCq+mFuobxtdR7VrepsG+Jaa9Tt/AMv1ieRowk/RZ97d8/lfGZ/CeAABAK9l7XXiO97rdRepKL2qLQF3kPW0Spy3nYkYob8PYVG9py/H1CtKRxEze94sZ4nLxu8zhQhUYnMjjMrqOajfWOqophmgdHQKWpwFLgFAFAAAfOyRQ6malSD2rQG055hXidFSYzvCWNonxuSHAVSHqvexH30N2eI+BUD0z70T0ts68JTzdyZQ6w4Ize6aNh0B0L0+TeYPH56hCpILZxDAx3AYA8CFYnnTw1Vgd8jtrPuoqL6pXYK7wnu4lTld4TAdtzkrUZL4j1C7tBXNFU0bfX1qedSuXp/mDRPRtRPS3hRB+koi+M8b43av6Awu5TQU92qOazVENUbefJXvSPbLQCWAG3c+6N1yAAIDn4pB3u4Vf1s8kUg8TqCtCe/cQp7OF6QrPLnUK0glzVLvF5s7Rniuz/v7WVbbBIjThx/d1XNvdvxRrHlVvSDD59XKaTAleVOAFzzoAwKvwNM+7GSG/HTZeUqSOHPNHEactXlz1fMm7l89VHblVEPoLjoILy9GQJB5KnG332PYuT9NgXsv4izmqYDoh3B/wDb+5DKH1c0T/Up9a+fackOpL+4/Acw6t8fb+DVrbbfU957TXdivWdSmNcVa/s/Fep/Z9gCiLgqO8qZZInTEn1el5PESgzg7v9bRdLU73EKajorR1LqxlX+uj0f7UNrkBd00IVfDAemZ7l6tRMIWvZW/0LWTCGwxEKlhBTEJ/97rCtH6O6F/q0yo/euw1vOOYfRyt7Wp/g3T/iO1Z7Tw2z3INEPn+vurfADp1Dg5PaDXkV+MZROreAnVvcTrTa7qHMPWK0tlzVT1islFwTptH32AHQhWInHEd1QwuYLN5qennk/zcDgADyZQAAMDgqGdk47zUXURqrxf1aIHqEYIrxelCwTxblE4NCXaUN4nO3h92JgChCh4kAm919sEp66rOSNaUmkvmqD4DWOf1yYFOBQCAuYx6UxvnpZ5GpLpEW13Yuuaf8jHPEoMecThJ5PaIZZcwPU1IsFp068f/AnL0j+oQquBBci1OEZIGU2xPHt8ziVQiok9jQOLYZyXQcd4CAMA5wSPhQde8uYUitSXzbadIdYX69nhR9xKoPYJwL3F6oDAd8rwa+9X36IoIdQvPkzyPIFSBSHYDWBfrZK+mOBBJQPJ92ng7xed7DPBYgqVgzUQAAHgCzipSJ4T5LhGoE4TlLuLU4+ncU5i2CFJDjJpC1PPeMeFH9Or7TUMXEKrggSE63cJVoTvr76hY7JyjigRKYDnal9l7g43W+qPtztrP3n0fdVxHns9WPuI5eqbz/8ycyZvaMjdvlkjtCfWteVFrAlWyMVugtorTDhutY1giTDvnqbZ4SFUxar5P16/lrh/QF0eHQagCEVNYdtCd9dc0qCxRw+z1rKMKwFJCyLL+Zrw12mqtP9rurP3s3fdRx3Xk+WzlI56jlbYxHeApkL2fi0TqjFDfVi/q0QL1bOJ0hjAVvbdsxwxB2hom7GgrMmGZYi8QquCBdo1ypdchLLuFbuIRDTFQ3IRkg6D0dpuKVIT+guXgnRQAAHIWifVZ3lQ15LesKNjr8KTWylMbFVH7UgK1FtY7W5z2ekwvlX6I5olSz7XbKEC7Ey01tINQBXVCbzjAjL4fYjFmy9EEoosiJNN+ITbBCYl0fCY9AMC5wLdVJ42Zel1tR0J+V3hS9/Si7ilQF4rT5Z7TantW3+MxlS47jzfWqHvtW6mvjcNi51cXCNUzs9i1bnk5+X61rnXBGp5Xt4dV86iabZJ+J8xRfY8Bc1bBfKBTAQDgwRHe1BY7PYmGhDpLRerMMN8RgTriPR3od0RcrhamU0WpeJ0J9bS+a+NotdFIyxs1hOqZeafTzEda6lG16nkFYipOg17kJQ39hUgFK0DWXwAAGGQnb6p7XmqlzmlE6iyBOhLe2+I9bfF+rhKns4WpR5Q2CNKmsGGtfkP7VnrfeSBUX5jiokkUXebxvOUsar7IEntLXsoNcZri7TpNpgRxCpYSSE+mBAB4TV75kXAWb6q3vid50kyROhLq2xvmO8uDOst7uoc4bfGaDgrTXlHqFaQjXtTud3bv/dNgH0IVPEgunDOuo5qF/QaW8VcbhyE4Y+TPQIhTsCOYowoAAP2s8KZKVV2ep3OI1CVe1L0F6gKR2d8uLzLF6agw9YjSFkGq7K6K0IYfd/aIDINQBSLcoxqzm1r53ID74tbEo3d5GmOOKnQCOBKE/gIAAJ3am9o1L7UWEjxLpK72os4QqL0e217v6WJxagrHQWFaE6Utob0ta7JW2xR97vvyAqEKHmihuqEUrrstT+PuQPnM8K6ViuVpwFICvXaYHwCg5BWfCb0vvTt5Uws881LTrlZ5Umd7Ub2icbZAneEF9Yparzjt9JoOCVOPp1SsU+7TxOjIsjVH/rAOoQpE3MK0M/RXFa7Fmq26R7QHS6R6RSwAMyiSTAAAAJiC6k2dvWZqqzfVGstKkdoTdmsJux7Pa49A7QntdXtcWT3Nc+oVp4YwbfaWFuVUIi53I9QzruOqAO15TfG0abALoQoeuN3+7fX4zZBtO8VusTxNxw0EMQpOA3QqAOCVebb5N40hv6Y31StSvfNRvaG+g17UqQK1xxM62KbLc2qF9HrF6agwLcZKJcL9pHtRlf3e8rv99fcwhOoL07KOal5IvotYyyJcDMRha2QcTiBgwV7EwOZ9AwBeHsxbdzISuivh8abODvn1lin1porUUS/qoLf2GQSqGdbr9JxOF6deYdrx7t0kQBc/tyBUX5iWL8XRdVR5e/ecVUs8eryyrL1XjD7D8jSYQ/vk4KUUAACm0xz2y6vNDvm15qWaXk1HuO/ofNQRL2rPPNQBgdozZ1UTqMvEqeU5tYRpoyh1C1LVGaXdI/Luov3gb0UtmgJCFYhwIZltey+wHo8nr28tT+Ppt2GO67OFBX8aA2Epzmcl7BIyAwB4Jl7omXCW55/HmzkS8usVqT1zUlvno3rCdjWBatndU6DuKU47wnqnCFOPp7S2zcdi1LvXr4nPg25ZCFUgYq6jOiHrb8/yNNkcVb48zYQb6JlEKnhyAkJ/AQCMk2i35YyI1IpXdBdvam97y5PqqLdEpI54UV9BoA6IUztZk9Kf1K6y7RWk6jtH5ZJv/lHdVd1vE0IVPNAEqCVMvWEC/Ic5Lauw0VfhUfWMyczye54fdcELgmsPAACOY4E3VbVvCeVZ4b4TROppBeozilNNmFrjKMabF7mWrJEuNa/H1agv0T2vvqEdhCp4YAg/9WJ0Z+z1zkl11CHyh/ROXt4GgFkgcQoA4OVY+Ovwcm/qSMhv1k4TdwtF6mwv6pMK1MPEqeU1HRCmhSj1ClLVISTv97RdBYQqeGCITrfQ1EwXN19q3GcjC/31zlFt4NnmqILnBuuoAgBeilGROjPbb4831aAr5PeMInWCF7VboM4I753hPV0gTruEaaModYcAG04pjaEf1gfvewhV8CC9P3rnlPbQIywXeEo1kfoew1NkAQZPRCCE/gIAcj7qM+EJ5tiMelPzsroAnS5SW8Jx9/CirhKoveG9Dd7TPcWpW5g2elqL/lvr3ev77t2VGgFCFTxIBGN20XEhedB3jpn1Nx2j10PLb/zEo/oew335F4hUMJtICP0FAOTgm6afprDfUW+qN+TX4wFW6hwmUlu8qN1huy11y/5Hw3tbQntni1NznVWvMPWIzaJ92cYSoctDgBvaQqiCB4ZHdTT0t+iqZ7mblIasv16HLcJ+wa5AqAIAgI+ZYb/NfU94WFtLw0h1pLpauG+tjzOKVMmLOjPEdzC8t0WgjnhOe8XpLGHaEwZcbeug5W0bQhXU4Rej13tpiUetrCUMOK3nzRyceE2trL/wooLVYHkaAAA4niLsd7Y3dSTkV+q3Jyy3R6SOzkXtCPPtEajDHtRsX9l/1Xs6SZy6hWlVtDoEqfaOXHv/PuAHdghVIMJDf61ffTyYHlp35uAkmVJRqI8vf8b4BGga+ntWzj4+YBCUxAcAgNcFjwSZmWuntiaxM57TQ2uutsxLXSFSK/XcXtSq8BTE7EqBOjr/VPKe1kJ7W8RpITIr4xHbpGVBLxPainWMutU2jXZ6gFAFdfhF5/ReqnNe+bZxQ+ZtnJl+2Zg6kwDfOWsypU9joDe82Dwv+NsBAMChjHhT87JUcDi8qUK5Oi9V2reHSB0SnkI5qH0DAAAgAElEQVQ9bVyiSFwvUL3e06Z5pxVxOlOY9oX/lrvsZEpG2b19x8tMQxMIVfBAU3S3qaAj8eir57xaF/1oN2cUqeD5QegvAADsyMQlwaaF/Er2pHmpXpEqicbZob4jYb57CFRJGEqi0Os9VUSkaFuIPmwVp1OEqVeQKreEKT5nvrs7gFAFImayo5UXaa/705g322MS4hSsBll/AQCgwsywX16l5k2VBOMompC1+vR6WmeLVEt8Tg7zFcXcYoHqnnsq2U32TxWnViivZoeV+cq1+0beLdq0mPh+A6EKRMzlaTqUnxn627G0TAFe+sEzEQjXLAAgB8+Ep2DImyqUV5MnSXZ4eG6rSJ0R6jvBi2oJ1Kz+YoFqzT31itNq/6pgFY7XrE8ZzaJUuNxGsv+a7ScAoQrOhSGCs2RKIboFM777wdnI1lENyc70c4p2EW9tolFPKpN+fNLqeOekp3ZSW9JxSXaldrVt/ln6X0M7H57ninRuPNT+TrWx164Db9+e3ALWWL19S8ci/Z2ssXpp+ZtYf29prHy7dgwt13OyD3E8C6mF/c70po6G/HrXSt1LpBris9mLaoX5WgI1se8RqM3zTwUhaIb2Sn1r/Yv7+sWpLVptUdoW/ivvt9q4aWgPoQoeLFR07jmpRp0smVIMRBfla925PA0Ah8LfWYwvHxXpS1arY+2z6rSMq9aX57O2z9rmY/Wcl5otT/3WZ6bn79Rju/XZ2jKOkb41W7U+er6LWsZtnQvvttVf7z0Hmpga9psX+tpp3tTamG7t3MmTasKS1e8Wqb2hvqaQTcq4F9USjqmtWQLVOf+0sOkM7e0SpwPCtFmUekSrUM/VRkN7F28wAaEK2pnxAtNTL2tjXObm9w9EKjgHmKMKAAAGDsF5OhpCfsV60rxU3lYSuC0i1ajTHeo76EV1CdSk3XCIrygUGwSq1K+4TxCnmb1yn72/PBdFfVbmKRfrGHWH6jUCoQra8Ya7GcvT+PsKd1FarKNqhY0N3jDPsI4qeGICwaMCAMjBM2ENrWG/StkUb6pkyxqHJC55mdcb21JHEqA3ekSqK1mSI8zX9GQaWXxHQ3zNfr0CtSJEZ4rT6d5Ux7Np1Y/vEKqgjnXx9ZalWMIyEYtF6K8mWrObHWITnBPxi8SatynNW9Uu79SGVU/r32O3NhbLnndMWp+8vaevWp+t56nnc0tfLefIg/a3m1Xfa7PnuGec897x1MakXYvpNjn7AeMsDPutMuJNrc1LdZQV66SK81/1OqoAbfC2ukJ9rbmoDV5UT5ivK0mSR6CKYlQYh9HvqDh1C9OaKJUuceWyXzZPtaEthCqow4VkMMqsdqkJ75xVLkgzI45+WXvvHFUsTwNWE8WXFuWztK3tk8q9Xwre+t6xWPZav+Rq58PTV89x1eq1fm7pa+RFYJbN2WPoPe4Z57x3PLUxzbhXQU7vsjQz6fWmBrs8eIRoNo66AB0RqT4BykSsWCcfb5MX1cjmewaBaopMw3vqF6zCudKOw9hfK5OeQy0JltT6FoO3KoQqeDAhZLYL8xcbTaTuN0f1PQYIVzAfvLgCAMBamKCtrp06qZ9rZ5U1U60xaOJSqFOIVI+dJgHK6oje2O1YWBtTzOV2e8J8RwSqZw6qJ7zXFrFSu3SfIJqd4taqX60nlIt1jLoS6vqsA0CoApHM48mvOy3M1muP4xTIxRzVrLDdnkU6RxUiFUwnkPglst0n2yUnfZaCDNJ7K62fbnvgbSS7M8Yr9WvV8diwjqFme8Nqo52LYhaC0Z/3byLVs86zNK7aWFqRjnPrs3YNSG1qfVn1aufce06serU2Ldej+3rBj1djjCZf8rxkt3hTxS4corJhXuoUT+qASG0J9Z0S5tsyD7UmUA3B2CRQDTHZLE6lfc79ah1eTyoX6jzqGtf0Ds8sCFXwQLvIo/zL04p+LQqRyufzLbph4FEFK4jCO04UvvCkz1boDS/rSXAgtbH2jYzXst9SbtX3tvW2qZ3jWcdi2W39G81g5BroGdOM63zWeRq5r0avF5BzaNivWFcSlJXlaHg/nnmzmki12uwhUluWnfF6UaVnSM2LqgnUdJ83xFcUnxWBagjPEc/pqDCtidZsfJV6rjIF9TgqQKgCF5lHtEMgml/EPSIzBn0d1QmkwhQiFcwmEl5OAQA5+Ka5MWtZmpaw31XeVG8CpaJMFpgiipB1i1QxlHdQpGoCNbHpnot6pED1zD81BavST8e+0nbb/mw8Srm6j9u1mPxuA6EKxpj8i0shWs04OqUPfmN2DgWApXguykDlD0MtbXhIoZYhVSpL69T629q3jFOzke6rZc7V+tLacjueY9XKtL+NtT9FsynZssZa+zvVzq1Uv8W+NM6tv9oPmtY54HWsv6lmSxqfVmYdd+1a5OPw/q167xdQMnPN1RZvqnMsrkzDVsgvq6uG/PZ4UhnLROqeXtRte1CgesJ5Ve+pUce0w+rVyvX97Dril5VwmbWEBJttKrQ88iBUgYg5RzXdtlRgj0Is+jIuZ+2mTvvt9IZiHVWwlEBi6K9WV/zsaWPdu56ylvu3Z5yWDa9Nz37PcVtjHumjddweW44Xjmp/Lcc7ar/nHNTstI7Rc7zeY2ix5Rlnz/0GjsOdJEko497UljmrK0RqCHq5MN91tUg9nUCVBKFqQ+nDaGMdU9VGUZddQ8YzyytIVRG647MKQhWImHNoOgQoT6bUG6ueG0zaZuI0qRd9y9EQUbZ0DcJ9wXLwUgoAAE1Mm5/amu1X6re2buq9qGHMNRE6E6/NFxepMzyo3QLV8d6sLW2jtuX1tDpKPVdZjYa2y4RqCOEbiegPENHX0lVGfCnG+O+v6g9MYGGMLL8B1DmvBlnW3wlzVL3PaCRTAha9zzrMUQUAPBO7vNftNT81L0zaTQr7HfWmKkzzpjaXU2Y/E3/iPM6gZvX9cAKVlzfYqJXJNseEqUe0au/kR8xTXelR/TIR/asxxh8LIXw1Ef1oCOGPxRj/wsI+wQjahTVBwJoeVSdm1l/P/gYQ+gsa6HvW8S8lPrdt28frcBu1612bEyfZTO0U88WT/jjpfstu6xzbtG9vG+t4PG34ePm5teal1sZfG4/WR8v5ta4Vbcwk7PPY0cZSmwfcch6kfdL51sbG22l91q4F3rfVhqN9Tz0f53yvmzk/taevnbypZxCpz+BFLYUxb2v30S1Q+bZRx1VX6lOwobXxlE3xrGptJrFMqMYYf4qIfur2+a+HEH6CiL6eiCBUT4K5tulA3Uejx0ezrfaCVozBuY5q5w0TkOkXdND1rAtUzlEVvnw8XyrV673WRmuv9d1av7av15bWxju+0Tbezx47nj5abXqOyTOuHjst1473PFi2Zpyb2rUg1fG0qdmq7TsRH+a9Tgv7VfcLfxhrSZoebypvo4lUpXyFSM3e2xaL1FxMhS6BavaRbq8WqMq2q67UX2oj2dWc9ZddQt3hwEb9FewyRzWE8KuI6NcR0Z8Ryr5IRF8kIvocfX6P4TwPO/5ISER9nkirvik624VvIVLT9p4Mkw2kHlWIVuDF+6z7zCdfc/qXUgAA0Djiva53fqo77LfWpteb2uJVrdVtOJaziFTVw9njRU0F2YWVZXV5W6EPLu5WCVRTjCp9Ce1F24rd4rNVj5cpdar7GbNE7nKhGkL4KiL6z4noX44x/j+8PMb4JSL6EhHRJ5cvQBGkvBPR2zrzrjh1iY7vihmhv3YHRseDYI4q8NDyrPtlX/eNEXNUAQDPSNN7XWh4r1s0P3W6rVZv6r1dxZs6GvJ7H0uyPSJSR5Im1USeR6RqNgwv6pQw31UC1RSs7BxbdhUbanlDmbWv+s6y6J1mqVANIXyWrg+z/yTG+F+s7AtMwOuJ7PBemsmUsgKhoWcdVSPrbwxx6P6BSAU1up51EKoAgCfjdO91reLWke3XPa90tjdVYHheqlWmiNjUrkukjsxHbZiLOpIsqUVkLheoo+JUEqnE3qkd+6Xt1vmpe/zgvjLrbyCi7yain4gx/rur+gETSZ9rk5eTMUN9LbGrzkmNZoiDRroEjQXEKfDS86yLYZ8HPADgeTj7M+HDvddlwqwiYC1B7BHLnd5U22ZFpPL1UqWyoi0tF6k9ob6zvKgt4nE3gcrrWX1U6lv7q6JUuOSGn0kTnmkrParfQkT/LBH9jyGE//6279+MMf7gwj6PYfAXszPCL/SuZEqaPcG+h2J5mlRMamK305tqZf29PGm6RrCMvmddoDIiwJNNlO+X6qV2pbZWH7W2Vr9WH5pdra1WV+tXqtfSn/dc12zW2nnHL9nRyntsan/7mfakeq02iXzXHW/vbec5H5pNq3+tr81Gy/k4D+ve6xZl7u1aYmbDG/bLypq9qbyeN+RXaqMJUatsoUh1hfpWhFzzXFRHmK9rHqrVR7I9TaDybatuWiaNiX822hZlFpV6XjstemJl1t8/SVO09BMQ44cUq3cilRlKN448bC3cN6sTMkFreVNTb2vqUeVzVN8p0NsTvmGANXQ/60JyeQYqvrBDen1vfd32x8vjst7aib/hhPILIRArS8qz3354H0lVaRzZOC+P8qIdt8HPXHosF/m47vaV405th3T8Sn/isQV2DKltZlPsX6qTnI/itzb+d4r534f/Le512DlOUV8GrOemUJadD2nc7Fyl403HkZ6TrEvh78avg2wclNfP+kjHGNh+5bqxjpnYcavXJD8/QS5Pj0e7987Kke917kRKnjmiVj8Twn7Vflvmltb64yLWamMI20LUZXVIriOJ3yBvp2NdKVJrXlpbeD7sNnlRPXWEcc8SqDXPqeUxdQlFoU613YKnwy5Zf8GTk748cKyw3dSE1yPrtLeCVMQi6y9YTbz/Gq2UV/ZbUQrWvdZS5unDM37P5962veWePluPwXuctXNntamNeQV8vLWxeOtI+1vPm/fceo6hNqbRa673vgQLmB32O5rAqZZAideT2rSE/GZlt+3Um6oKv4edlZ7U3lBfl9AUbPK6NTs+URuKNuqYhHpm3Vp9az/H8ew238sXP7sgVF+YbvHYISZ5P1nfveJ04c0BcQqWkn5JAwAAEZ4JrSwKF1ZxiNEi7Nc7N9U2KrdN9jWH/O4tUo35qK6svqbwLG27RKRLzJbjrdUZFajd4tQjTGuitEGkNv24NvBsg1B9YYqLLBGMmZAMyo3CP/fSY48LycmeWGuOKgAziJfG6ysGkuMpY/651q4Vblsax4p7JbW7fbaOUxprzR7/7B1TbRy1sdXG6bFtnZ/W9tY+bXxbH63n0nvsM85x69+Vf/b00zKOVffKs1MRnL1hv63zU7P6Vp93ITJJKHu8qR6Ba2bxpXLfvWwTemV/R4pU37zRYJRJ7YSxCu08IrJJoDpE54intbDN+5Dq8PpWXaNec7m3zg0IVVAnXq8p8aK1BGKPcDTsxfQLPobHwBaDdVTBCppD/fg1mG5b1+fotWv1O8O+p9/tc60v73nwnjvNRs8xz/579ZyfWj+e9r3XoadO799ltP3MY7Ds4XvkOLTw3pGw37uN68N8qjfVk5CJe1OFdnYZlSJVFJYVkcqFVU2kSqJxZqivKvyEsWr2PSKyR6B6xamjblGft9Ps8c+SHYfoNdtPBEIVPPBc3Eabgh4vJ6+f/AKdZf0NTpHa+WKQClOIVLCEHX5kAQAAwKh5Wq1sv7MTZ2reVKmOMAb3vFQjeZKUAGm6SJVE423bG+orelGLsqRdk+jVPL5WfUOgGjZ5HWnbEq6WOPUI06oorYnYe722d+NeMQuhCkTM+asTwmyn//qijjU030wALOf25SxlLE2xIjqlKNR0nxT9KN13VhSvZId/Tu1o0aTSuHkZH5sV5WxFIUs2PGP2RnzWbEp2rePhtrVxSH3Xzr90fq2oWquOdm3wtryN1k9tvNqYrGvF+vtY59S6L2p/Z25Lslu7RiVPycvQGz679/zU1v5ne1O1pWqIfPNSk/JUVErL0IyIVFFMFuLrYasa6nsvk8Zn1TPGKdaTxin3we0uFah8O0UqE54lpuOJbZcitnxwut7fJz7LIFSByBQhKbxEtdt43CRm6G8qnpmQnqCrAZhPKO8J6R6x6kiftf81amOQ7Fjj1Pqz2nuOu2Usnj4le55nlKfv1nNmtfP2bbWtXTO99T3ta2Ma+dvXxth6rB6bVtvWvmtloIGZ81PvOwfCfn2dZv+b3lSpvlF2HVtpIxWVj7ZGX5nQ3WyEUiS1iNTtc8d8VEtg5p8f9qpttjJhX6tA1Y/b2FbqZPsoL3OLU6V9acMhSK1Lu/U51lAfQhW0k15gThVo/ors7TYmob98HJqtEHVna9S/U5BMCSwHL6gAAGDiTqRkkc5DzYScIERXhv0adooESpXETq6QX2aPi1Q1wy+znQtTWaSWY6BMQDWvj5p9Lm2OelFNT2ZF+PYK1EJc1kQs5fululVx6hCmLkGqXPp7/NAGoQpEMiEZKVvI3e2inOzKjD3CMYb8hozhvl6q9Z2DealgJZHYA3673KRr0nMfSe15u7ROTP7vaaeNQRs/7yPd77Ft9SPZ4f3xiAutvvQ5rdsyntbzpI0zCGW8vjbWmc/g2rnxnnfP2GplVj9afW2s2nFp46zV94yH246PTfAEsLVT3UmUMhuNZRNCftMxqxl+ayL13s/DXs2T6hWpsvArberiMwjtlc9bvUab+jiFfT11UpR6XnHqEqZKn2Jdo153HQcQqkBkhgd0qcfIzBxpFGWiVRer8KiC5WwvqIFtU7KPv/BKL8Pb/7yNtc2SV1Tb8XFpL+587Hx/OuYLqyPdi/wYUzuUbGv9EOnnULInRfrx40n38fbp+ZLOLW8jteX1+fmS/ubWOedox5H2KwktyZZ0XvnfWEI7Vn7cfJ/UFwn9pMd4SfZptrVra2tvXUPSDx/8h9203LKz8jvzjBw9z/RGa5jwYGdtZZY3tdZWEpZpe0nwEOUiVWhT9aTe95WfU6dHt0jlQixMEKkOm00iVeqvVoeEOkJbf5lDnLK+xfd87z6G9Qbd8nYNoQrqTHiGd89R1WCe0pVf8mddngZC+okJ8hecSxzwslobr02rnmccnv21463V94yHl3ueC54v4pZz4DmX1jnQ2tfOW++xav3MtOEZs9S257ocedmq7Zt1v3n6BTJ7CFzmNW1vH/L/jfmnrXNTr/bKelaW32IM93tN6Ie4iGSCLbFVCMqt3l1shVKkBm4/H9OISPXOYa3XDVndmq1yTHmZ2l+KYMezPy+riFPWZ4t3lagiMBc9wyBUwQMrdK/nAkza8RvSLVoTQVosT5OS2psgWp9heZpPY6A3vNw8Lye9rgAA4KnwJlLqmZ/K23rDfi284b6ix9IpYLM2RC3zUkVhmr7LLRKpaQixPQdV6UMVstLYtLpBrluzpYzP7I+EOsm2f78gThUB6vaskoB2e/S8hza0qQrVEMK/SETfF2P8vzuGAp6JxRdb1szrYdVe5mMg0rL+OsdkReCkob9n9aiC+ez5vNuWp9l+i7HC7WtLzEh13EtiNLTXlsqx6qT7akvt8LFaS7BI/1tYY+DHm8L/Bvw4a8urSG2svms2JDvpfl6X26qdL2tMVj+1vzG3VRuTdxkdzUZtiRxv3zW816Tn2t+TZ3i3m55Iyd/xeL9EVHhTrSRKUjuljZpAycryu9Up2ml2kzapMOoVqen+W91WkVoKQja2yud63ZDVrQnQqQJVakfWfp849QjT4vGjjJMz9A7fgMej+rVE9CMhhB8jou8hoh+KMeKt/QMwPRz3JH295FwfMIt9nne3L6aofYGxfZ57pyVqodbfzDrpPqt9S3/a/xa1MdRstLTTbM2w0WK/1q9Vt/f68fZVq+e5/lvatBxPy3eV95p0jXXf761j3+0+4vxUnkRJ7rCsM+JN1fbXQn4pF4fu5EnaZ0XwmSI1E2qhsKd/Do467Hid9moe16Zy3hcl9ZiNVoHqEqdCGRETpsrY7nWlS7kzR0wv1SdFjPHfIqJfTUTfTUS/nYj+Ugjh3w4h/N3zhwP2xPoydv8IMvtrzWsvxCU3hAS8qa/Drs+7gH/4h3/4l/zbEbzbOSiWjKmE/XpsJERBjFbnjKrZfMv9uQc0PK6xUGbqzfpI64fkXZFl+H0KkZoeg/g55HVJGl/eNgqf6SLsS8eyje1yq3PR7cr7I1GIej3F7r2c6LHaABtzYWP7d4n3filEosvtn9T3RWhv/GvBNUc1xhhDCD9NRD9NRF8moq8hoj8UQvhjMcbf3dYlOC0LPZHuX6y9/cdAmZpmDwO1WXwsT2MBcfq67PW8u3/5bBu1GNSWerW2nBFbrePlbVq207G3jNk7ztp+bdtq3zrWlnFL9Tx/G+1aaGlXi12tnbsWrPOoHe+s/nvvtXS8q8Y2yFO+21U8sS4Paev81B682X1rmX69CZSK/VSto81L9SxD0yxS75+JfQ7Kfukzs3Mfr1zfslfL6Ovyomr1KT//1bFJ/RDdnwlVL6tURkIZ+5y3Z88fdlnKnlVhn8RIW/LNUf2dRPTbiOhniOg/JqJ/Pcb4SyGECxH9JSI658MMtKNdwEY9t2lrvk4qkFvEslbXaePZl6c5+/iekV2fd5f0h5aY/68O0FlvdtteW1K94guxYbt37N5x1vZr2972rXjbS+fIDM8y6rS26z13LfQcy6z+R+613u0dOPTdbmbYryUwU0HqDfHtHVtL2C9R09zU6nI0lTVTr/sfH1OBKM1L3T6nIjW1r4rUzf4eIrVRPOafE5E6YEdtS0kdMtqo+5wC1SNOPcLUEqXa5Wxc5q0e0xoej+oXiOifjjH+r9lAYnwPIXzH3OGAQ9EE3oSLzvSoem4KDg/9Tcdufk/oLwReb+tZQNbfJezzvEu/CAEAgGjvZ8Kp3+2mJFKq9dESsjsr7LcqOpW6Hm+qkCBJDflNbBbzUrM6qZhkwpeL1HACkSqIOc3WTC9qIc4se0SKHYdAlQQoyfVzW7IwlcYtftbq3+uue3euCtUY43caZT8xdzjglHAB6/V6ps80JeKwaRhWqJQqWvWbh39fPJNIBWvY9XkHoQoAOAi82y3E7b2tCFhPO+YAFr2gRl/FvNTUTipS0/1cCBM9RKpmS/s8U6RW7faJVFtcNnpRJfFJRNv8U95PYSvdT5X6N7vq2NK2wrYc7qtF0Mi7Z4B1VMGD2RdaIhjdIrUQxUG/MTSc7a3Q33SOKpanAfOJ10QFI6HutdB3y7anzBta33sM3nbSeFr6nDkGq5zoMc70M2+n9d96XtN+rDHX6tWoHY93POmYpO3a30U6np4+PfVmX1+16yDb+cFZmO139/mp5a/deh0t7LfiYXVn+pWSNTHvqORNvbZnglNqw72xrE3VVhBsiWIy/RyK/eJc0BZblXotAjY7P7X66j6nQN32Easr1o9FHWs+q1yHT08gGe+tw+s13HIQqkDE7QG16ngvRCtsN7lZQgwPr2pgL/lq2LLfo6oBkQqWkH45eutb23x/771Zaz/whSN9ubrb9LSdPYZWm+IzyVm3pU+rfORc8fajz3vrGL3nrfXv1/M3nn191a4DcA5YeK9ajb88eMVvNamRUm5l+lUTHxl9CSG/17J8/KJI5d9bIfGkhgaRmo5ZFICh2N/jAbXmo04L9XXU9wrUbu+pJU6Vv7F3zmrP8ytW35v979UQquBBR0ivFzOZktOeO/Q3b0QxxOYhp8mU4FEFAAAABhn1pra070mkVPS3iRyfgFX76gzr1eegBrWNK4ESs6GF/Ba2FSHJ+3KJ1KCLws22Jky5ANTrBVe9LgHLj1USkGJ/USxX91G+ndc1wnqF92uXMHWIVFWEdt5mNSBUgYgZwz4rXG6U1LbmXe0QqUS5FxUiFUwnUJ71VwqJJMqvaRL2BVbXEw46Wpf3K42L260dH69TO35PNMVIyKsWsuk5DsterY+e0FrtXKb7pOuH77fKan9b77h4uXQtt3zf9IYOe8631Bcp9bVj2dD649srvy8/IgMJl1xhwm0GXfvUsF+PNzVF86ZSKXB5yK+4FE3gNh72xXmpmzBSkicVNh0i1b1GqqOsOh+1tyw5tuzcpNsk7BsRqIXt3HtqeU5Fcao9c9jlWghS7ZZZ/NyCUAUuVOHaKUbjBBtqyHBmO1BPNrJnWJ4GPDnWC2ptO93X8qI7o67Ur9ZO21er03I+Vnx5asfoOQ7vWFr/frPPd09/3n0127zcuqa8Y6ydx577ZWQcI3+Xj0iDN3RZxt8V81NrNqXswI7PrrmpvH0a8hsE28ZSNLmNx6Y6L5V0kVp6Jk8gUll/kkgU7adlJJfxseR1Yt7e6I/oJlD5vvt2RaBq9YU6/HNVlPbcJuayYn4zEKpnZl3OARnjwlHXPXVebHy+a0/obzZH1ewsHV+f2IQXFSwH1xgAAJyDWngvX5ZGtKEI0BtD3tSQiFDJm2qF/KZj4KI3FTeO9VK37SLDryS+BDuqMEzLtc8OO1rSpN5QX7cXVRx31G3zfZTbyOs9BGpVnLL62f9Z/cY5qVk5Qn/BxjsRvR09iDnwOar84enBHYaQN4IgAOfkQvUQRVLK+X4tNFYLp9X64O1qIaPW2DzHJtnw1vOEhGr/W3XT4yDSz01tnNa5kB5Jnr5r1P6enjZSuXUtecZUOzYOP1fatZrWr4USe/+mUr+eazZtw/dp49TqfTQWZvq9d6EJyFnzUyU2b+poH+yzmgjJam+1E7yp5vIxiki9zx3lyZMEwSbZKfthdnldSwiyspb5qM1ljrr5eOQwX9HrSooNethxCdQRcapdYtK7s1S38/L3AqEK6kTKfj0TXyIm9qXZKzyqWt3JNw2SKYHpbF9m1vWrfnk49hlfVmYfypehawxWf7V+e+p5yjzHUztXnnq1v4l2niVGnmme66DVtvf8eOz3HFvtWrX6GbkevfeCNSZpn3XvLH7hA1eG56da7VvCfnn9bCZHYyEAACAASURBVH/eX7c3VfGg8rr3kF+ePMlo2yJ2a2G6VUEb2HbyuVmkCrbEMvLVzfsxvKhpfVLq3GyI4y/GY4T23utVwn9TmjP/Vt6LJz3PIFSBCzX010mPB7W04fSoGuOLMVBoFJ0QqWAFs3N5AADAh6U34+/MIawI+9VsSNvaMjVUelOzd65LXm6G/JIkwpK2ybzURzn7zMRul0hN6xbCVBhbjxC1bAvjsds6vKhcoBZ1Ym7TqsfK0v8jL+ef7/ucHlavZ9Xa31uPIFSBhwkv1ObyNFZfSehur0d1ttP3LCDZ05ODvx8AAPQxI+HSZoOLYK/XU6rrLe9JlsQo3qGKbW5Lb18ITF4/kDov9V5e2LPKjfahbC/bE0SqcGx7iFRVWKbbiW3RiyqJVKPesEDV6nmcQqpQnf9eA6EKRFQhSbSv6lMnbbOwSWl+ktC+1ZtKdN7Q309joLePqMBfgUAULpHi7SYLId69/XzfvQkrI6KinO9L90tltXJPP7Vx1o5LGkNLO+mYJKy6Ndu1cdTa9IxpdCxWPy3n4qgxtvQxMpaWa8gztlrf6f8b9/7xPN83429Rhws1x/xUrb1nuRrnkjTaeqqbDXNOq+FNfbRP+yImECVBGdT6d2+uJgJDblMuT2yy7Uf95J7paK96ci0bYn+xWpeIimRJqkBN25JcrypQpVvInMNqCFNuS3kXXhkhBqEKRLKb1nJJOi9O/h7Q815gZvzVbqzYHurLOaNIBc/O9UsnvTa3z9I+77a2z1Nmldf6scblOS6prbddy/3dem60vr12PGPzjr9nLFY/LefiqDG29DEyltHvCG6n1rd+fvFds5ql81MVzLBfK4kSF6/KnNBr/Ue55h0tBKaW5Veqf2H7RAFoi15N5C4RqYYQ7hKpxTYL9VXEJveiZsKT98/tbPV6BapHnErXEa+zbVoCWGNQOxBBqAKFzKPKL6iOWFruoc22NW+oRQy0vey/Kgj9fW4wRxUAAMaZnvHX20ctWZLnM9+2vKkc7k0lJrouQnsmzApRK4hBc16qYqMmMGURK2xLNipC1iVSC0Fo28i3y4RJpheVtS2Ot0GgThenijAtLjvL68qrut5N/e+vEKpAZEIklGlviv3UhjpfdVzMIfQXrEB7mI/Oqd6shg5bW/3Uxsg4evr2tPeMTzsPLeOq9aOVtxzLCJ6/tXUMnnF52s+4Xqz+a+ey9+/U0n/rNWfZ0raBwpGJlKRQ5Iaw31hZl/VerxK+m5ZZ3tR7e2M5Grl/EkQPsToh33cXfSGzIXlVbU9sgyfVIVKbPLRUt5FvK6G+6TbpbYu+SK8zRaBa4lQTpum7iXD5iu8uCx9kEKpARPV4cjq/abmH9VHA7YcpYlMiRuFXoycCHtXn5RrVpYTZjtoesKV9B46Oo6e+1d5jWzsPLeOq1dXKW45lBM9xzTyPWtnKx6jnXPb+nVr6Hz1XUnmx/cTfR08HT6R032b7Z9KQKEmsa81NpdKzqQrLS8jqiyG/m6DiIb9EZAnSoi9pn2SnIkBnilTRc0vMRtGuEuq7bZNU7vCipoIy67vBQxqU/UKZR5gW7yjC5ep6Zg2+q0KoApHZHlVVmBJV3oSUCzzEXCSnNjL3SchspEksrBvsPYa7EDyjN5UIHtWn56TXFQAAnJrWhEuzvKy1BEkrwn6Lfbx/waZkJ/B9kh1WRwv5zfbJ81LzfUEVkapozGyFss5MkSq1YyKyFupb86K2CtRrueBdJdYm+9/eL4pTpzBtnZ8689UUQhXUMYXk5L6K2Kgg3wyWp9UYU55pURerZxWn4ONwubRfY951gKV6WtuWur3jmYGUUXWGvdH6UgZXNaw7yllftzIi/3FJ2WQ9Y7Dq9LTr/bto9a1zpNVpubZrY5pxXRHJfxdgsyzj72a/02Vtzk816A77rXleBbtVb+pWR0qgJIlKYSmaUkQGW3wGoa0odqV2SXtexytSWb0Wkdoc6luUR70+E7Hp8V3H2yFQR8WpdcnxpEok05bY0F0VQhXIcA8ov8EfBaRftQZRs8FtpTH0ka2j6hlH50tC6lHlXJCZEUyg551pJHtqb0bfGeOZgZYxddTeaP2ebLozMs+2ZIeutR9p1/t3qZ3P3szQnj5ax9RrAyL1SeHhwCk9y9JI+z1eUFZf8lKWYxHsSIJVywpciDmpHmvPRaBpj9xCUxOplnezWaSaY4lmPSLSQ321+kzEdgtUr/eU1cueSdplmz7DKEeen+r53qhWUYFQPTMLpkh0cbt37jevV5x6L8xe4WsJ3Hudq7gVNazRj+VRfadAbxCrYIQQ8RILAMjBM6GZzNuZisi+XwLn1/eG/UrbnndAIbmSy5tKuWjM9ntCfkkWjlY9cZ/UD+X7rP3iGA8QqbVQ3yEvqiRGJwhUjzgtw4GlHziLXUKd/mcbhOqZeSeitx37S4Rf8VBICcpnyzR/HkelD8OeuY5qh2fXm0zprFl/wfMSaOzBDQD4eAw4HT4uK5IaaYmUzHFU6liCVOr7huQ5LbYrSZTu9Rq8qaV4k+o6299tlG0lD2oZbszap+PRhLBms0GkimLy9r1s1iO9XXF8Qnle5vCidgpUU5x6hGkRPUNs24p6UYuagVAFLkzhqjaix82hCdMGstDf9IYlyj/zdtp+/gBO5hM9QzIl8Nxsc1Sj88bYrs30Ok3b8nmC3K60z1OXl9fG4T0Gq6+WsfbSa6vl3Hrb9JxHyR63sfJ8aWNu6dOa69o7rtYxzGjXYku6xmf2CxiDiZSmzE/1eE75/FIpiVJmk0TBKInYrW3VK0nkzvK7tS+8pYnILKaPGcJRE6liX3uL1HRsxLa3dqzvfLviRe0UqFXvqUOcasK0Jkr1CHfn+3LDezWEKhDJhGVg2z3ey+Jhmm4k9rjtGJKHRxT318akFXGPKjxcYE9mzO2z5ibOnKfaU27hGfeseYct4xhpV7PladOWjMJ3flafrxnzbdM2I21HxzDSrsWWdKyv/v2zOpHSMryCtGVpGqOOy5uaij2hbunRvAlLrulFcRpEIfjYx49TFrM1kdskRntEqiA2ZfGa2CYS+nKG+koClYgdPxOuimjtFaiWOLXehT2eVF9+g+RztfYDCFXwgD907hukX1VWWXpRMo8qF8JSm3vF+0eWTEnpq6HIxVlDf7GO6nPzpK9lAABwHJ2C1p3x15tIqSdpkiPst5ZE6bpP7t+am6pNtZKFZSjEVzXkVxGGUuivKXLF/Y927v3S+DpFaneob2FTEJ4k1VW8qnT7s0sCdUCc5j+YUUb9B+til1m/BwhV8MDrKbWEpdOeO8qJe07v/Ro3QIfHl3NGYcrBOqrPSwiR3i7vanlt6RBpiZDNbi/WEh/ctjUWy5ZVx9uGj0WyWRtfi03v2Hi93qVJVi1pMmv5mJ5+e66Pmq1ZSyi1/v29y+fUbEpje3Wv6i5oQtd+47ZtsnJ1WRrJ1kXZxxHmpm7i0TM3deubi7vrfioTKKVlhjjNhWgo9uVjFfqVRKpiP7Vj7qd0TI9+TZEq9H0XqYYINUN9s3aCF1UTpIJo5QK12XvqFKfa88g7P3XFMwxCFTxwi8ekrtOj2k36IpveAJqANdoT9b0gPYNoBc+HdR22htrO+HJYFRbs7a+njVbe8vI/o1+p3hlCT0fszhrHqrDcnr9JT7nWb+v9O1IPyAxl/G1JpJR3mvQZ5P1WG61uLYnSfb/cRbM3dWuj2Cm9pHLIr9gu7d8hNvN1ZOueUU1YLhOpggjtCvXNjkcQpA6Bet0ne1B7xKlHmHq+q1vuokuIrlt0A0IViIRoeD17BG2L/czGQ5AWob+a/XRfzJen6XkxOGvoL3hebu8PAABwB4+Ek3B7OLvDhSUKL2m+LSdMkoRj2fYhGnWhKY2n5k3tyvLLhWZRlvSn7afHuETROUukMsGphQ5zkcpFaFOoLxtXuYaq7FUNrE8iQaA2ek/L7TavatrXRu3dWH/n9r9TQ6gCEXdo7mx7hrjNRCq/+C1R7OxaAyIVTCcQvV3e6Z3dGJcQ7z+MbGV8X3o9Svt5+81GWrZ95mXSOHg7qd/amPkxatT69NjQjlNrpx2b1q/2N0vt823JrmXbM04L7Xqx7GjXkPc8auOQ6vPrRLOr/S2k82z9XbQ22pite8Jqy8fJ60r3CTAYXZpmMOOvaktLkuRZaobjCQUmQ4BeHN7UFnskiL/7WG2hqZaRvl9chiZpc1qRyuxRMbY+L6pLoN7KJYFaE6fesm1IG9Kzaq/w32VCNYTwOSL6E0T0lbd+/lCM8TtX9QdOSPr8Zh5Ut7fWuuCD8hmAHel91oUQ7wmx0pD0CysjeiTO4gm03sJ1rlz6JbLNfU1tp/1tfW39bHWu4TixsE2Uf0mlY9P+58dhzX8NST/pvN10fOl4pHmtfA7hZod/lsbA/wZbv9J54sfIx721kRKdSeP3HKPUb4p0Tt4u79nfND1P/O8s2d9eyTc723kgdm60MdTGnJ5j7e+Tlkn2eFvpuPjfNj032jxT6RqW+rPmrGp/R35vF8vTnPx7bNV73Wky/k5Ys7WYn9ojVrV91n7yic9CKBre1K0/UXg29lm8/6V9KoJXFK+8jtGnXBZl+4JIffQneFITe00iVRCiTSJ1E5lbGSX7kjYtAtUrTr3TdVzLEter3FnpUf0FIvqNMcafDyF8loj+ZAjhv4ox/umFfYIV8IeF4b3MSOqpwrTBXhb6G8O14dYuteEdHx+G8rIFQIXmZ12gmH8JaJ891OrzcvblZtoYuR9a7Evlnn21Y2k5r9yWNbbaeEbqj/79rXPitV1r23rcmv0Rm6v/tlYfnnPbcv3ePoeGcLiDeMr3umoIr3d+qXd+aqU/dW5osY+akijxdmb/FXEpeWK5Z1Nda1UTkkpbqX1VvPJ3S03gZm38IrXZk3rfLwjSxK62HVJ7RM0CtSZCPeKU/yCdUvzoJlw/q96flwnVGGMkop+/bX729u/0T+GXRhN4fL9XBFrP/tgeXlwsTaONif+KBcBCep91Hb+lAADAYRz+XjfqeeXtV3pyjWVprvtYHW3f3Z5c9khiVIrPTWx6vanSO5kpPCkvq3k/XaG9Vt8tIrUoaxSpiaCU2+UiVVx6RvCail7UpG3IPj/s3D+G6Bao0rzTkAheIlmcWqJUEqNd81Qbbr2lc1RDCG9E9KNE9PcQ0XfFGP+MUOeLRPRFIqLP0edXDgfUSC9GS0h2eFSLoh7h29mXF3hTQS+tz7qv/Nu/mi4hVufoEdnz/GpzWa05nxK1uafaOGtzEaV2Wllt3m1trm56LqQxacebtqvNzbWOwTNf1/pbafOIpfmZ2ti0dt55plb7ljnGHO+cZ8lm6/zn2vnx/D34mD3nxPP3lOw+C6d/r2vJ+GuaSYVmx/xU77iUsF/tspA8nUW5w5satVMQKBe4Yv9M3Aapff45F5VBLheEpygSqSJSi7FMFKmFRzTxom77iXIhy7crXtRegVr1qt5sXdh+/lkTprVQYK2eREv0yFKhGmP8lIi+OYTwy4novwwh/JoY459jdb5ERF8iIvrk8gUohZOiCtfO79jMXioyewWnKqoDjXpVW5KYgNek9Vn31d/0d0Q+l1RDK5fmhVpzRmv9WO1Hx9jTxntctTYWtXZaP1YfnvFL+2aOxWtDo7e9t67n72eVea5Lz/npucaszzU71vER+TTW0TS/14U173WqkBxlwMsqzk9lIcJq2K+1dqo0JCWJ0sPm1h/lgikbnyE4szpk1yFZWFr9uT2wmhAmu86QSA35MUllhUjN+meiNGtTzkX1hvn2CNR0WFyg3vNiOIRp6WUtb+tVj69dsv7GGH8uhPDfEtG3E9Gfq9UHB5GIRLfH0yksudDt8ai6l6fpFLvaHFWIVOCl5VmH6woA8KzMeq97ykRKA/NTr+21MRhtTS9rIja93lSvwK31pwlOKgWnJNKrHljJFm/H6mgiNe2zW6Rm/fN9MbMveVV7vKhakiRTtN5s1MSpR5gWnlTKqb3LjIralVl/fwUR/dLtYfbLiOg3EdG/s6o/8EEwrt5ijioAJ6DnWVckUwJgAReK9D7pd+6ZtoDM2ZMp7fped4alaVpCe7V2WtsQ1P1mNLiRRGnDIzav9eqCUxSbgkg0BWcqUpVysX9NvAr1TJHKjmVIpN7rCCI1HR/zqope1FvdTbxygXr93C9QpdBeLlAlcZrtS06z9M6ivcdUp9I13E4rPapfR0S//zaf4UJE/1mM8Y8s7A+Mkj6To/zQ4vW8F5t7Gk6nNzRrF/QiC+3GQugvqND1rHsL77UqAAzzNlH8zLQFnpKne68rMv5yASyIxWqWYN7Os7yMYtMV9utIomR6U9N6aQIlsW4oRaSBK+Q3GYcmUvOxKnWp/GyKVM1m0TYadnORaiZN2uqn5UFJmLQJ1Fv7659ZD/OVkiTVBGpNnEpeU02YetdRXfGuvDLr748T0a9bZR/siKX0eoWlhmErC/3lN0PHGJ5hPhA4Pz3Puus7BV76AQAPzv6VdPr3OutLvTfjr5ZIiW0X81OFPlRRKtlm1JIotda79slFoyamtzpBaFMJ003a2d7dvE3N01sVqVzYVkSqbFcQqamYJV5HmY9qeFHvVR1hvj0CVRSjXLAmh39hZWl9Xqe2r6iT/NB5mmRK4OOQPSg6xKnpoe0RuzEQXRzzVUNUTceoP8/fY7gnueA34AVeBTBKgFAFADDOrlSPZI+5rKN9eDyr2z4r7NcaxyUVjHq9XMDd6rPlaKx2LV7V69jldryvqiC1+lLazRCptXBfd9KkRMAWIvUuNIm8XlTVq3rrpiZQa+JU85pqc1Rb3odnraQBoQrqBENodj7XTeGqYM5RVdVooML7ujVhbbRkSsXyAxQQAgeGwBxVAADn7HNUZ3OaREot1MbcEKplC01dTOb9PcJ+m7ypVPFYKuMZEbLuMGFel5ztFBuqSL0fm1DGRWphX/Gk3sp6Rao2F5V7UaUkSWmIb49A9YhTSZj2LFPTcudDqAIXraKyqX1aZnhXi6y/WQdJO8OGJkaJ+m42AHr5TPj06CEAAMBzMyVpkmDDmoPaMT9VFaUX2pSIMrZEgDoPVfKmVhHmprpFKMntqkvRkCxePSLV9KYaIlUbc9WTSkn7Dk+qFupLRJlI5XNRLS+qNAfVK1AtcaoJU08IsLU/o0FTQKi+MJZXc0ryI8OG336438yZSOU3gtOeNxTBWjgegFHSX0QBAICozcvwyriSHe1o2zM/9bqvYsfRNU+i5IWH/XrmppZ9kvi/1ytrCtIGcdwiUuW2sU2kpnY0kZp4QzWRei++iVQt1Febi8rDfGsCtSZOJa+pJkyLkF/h/aXlnQZzVIEL68HSE5prwe257ase1JCXpYKZP1Q6sOaoAjAOQn8BABw8E4iIdluapiZKnYmUVFst81PvfdrZfjk87Lf3vU0Tod41U4nkdjVBqoldzZvaKlLFkN9VIvUeyiuL1F4v6iUrf3hRM1HaIVAtcWrOS+XblefWZXCFAwhVUKfTa+pOkqSJTAuvR/UWLtz67IZHFawkBKLPYHkaAEACMtHP5e4dvbueLvl2Ss982V6xSjchd9HLH/VudbYkSq5xUS4yFW9qNxXRqYrUFttGP5pI5UwTqYkANUXqdpkZIpVn9fXMReVe1BaByr2ireK0Jko1EVp7Z265/CBUwYNEMGYPlUB9Hlajfk/ob2kgyn0wgZxWseaoakCkgtmkv5ICAADRmHb40OyddEmdT+oYh7bmaaVNm5BLBCf3pjacKt7O5U3VxKNz7C4PK+nlluitJk/qFanJmCyRaiZNqoT61ryoPMxXmoMqCVTNe+oVp6kw5aJ0LPwXob+gh1TcMWHKheujgG2TXDY9mZJ1M7D2mcO2QxzAowrmE4fDYQAAH43X+Z6ZnvF3lTva4zXlaPNTq6HGSdjvCKnItOamTmQ45Ld1TEbI75EiNZ2nKolUaW1Urxf1ku3rF6iWONWEaTX81/GOnHtxq9XvQKiCOrd78/4Q8Yb0GsI327ZCf7WLPwbK1lFl49XGB48qOAOBiD4LoQoASFgktYDF4HxYMZESkf4mvmUArnXLsv1OEbBqH2Pe1MKRodT3j6t/XmqXSHWMxy1SEwEpiVRpPmqPF5UL1LTetk/znnrE6aw5qtr7c8vlAKEKRKb82mYJRs2+VwQ3gC9/cEbwAwgAAJwHNeNvTyKl234z7PeWWGkL4fVQDftNzIxk+m1m68ccO5EsGvvnpc7wpF7HpnhTG0XqvbkiUkWvKslLzkhe1M3+RSi7728UqDVxaq2jKof/en6ER+gvGCTzePJnz2zlN1sUm98LvpsjzfqL0F8wm+sXCzyqAIAHPVNTXpqeNVQfbqox2z0hwTvh9b5O86Ymbcz61jiIDvMqzBKpG3uJVEmISiG+XoHqEaeld1V+j3kL9rOs5U8NoQra6fB6zp4PsYI0LBjCFKym9iAHAACQU13ntEe8ehA8nkXYr5qEyShL7SXL0khhv+6sv2dF85Ym7O1N3duTKiVNagn19XhRPQLVEqeaMOWiVHqHqS1V0wOEKqgTbyEkG0H5PGDf4w2Nwk0hjqNzjip+zQZ7EWjNAx0A8Lw8uQxxMz2Rkq9TvSwdT1A+S9uardH5qRab4FLCfl1JlGbOTU3aeOsX+0gvfxaRen9/rIhUKWmSlTBpG5rkRU2Fpbg/EbCa99QSp6kw5aJUen+pOXhSey3v2xCqQIR7QLNQYK9HNX3ex9wm3350rNvOsv5a7YJRlLSPUf8+SUN/4V0FswkU6TOXT48eBgDgRAT8eGULyhYzfA3VvXEsSZPOT316b2mF2soPknf13k4SqVs7YvuPEql3gWmL1FR4SiLVSpi01VkhUGvitCf8l9voBUIV1Ol9fiYKkT+kiixxjr5Mj6rSbxqSUW3WkREYgF7eCHNUAQDAZKXQ1ERxGj7cm0iJqN17elU9WbKjLOy3ciq0Omfwpj6OT/CmsnPgyhSslWmid7FIzeee6iLVWnrG60XVwnxT8SkJ1BZxOhL+y9tLtNzVEKrggebN7JiTWkP1qJptjHVUnWI3a1J838g3JpIpgdmkv5QCAADR9K/Z12GCN7I699Xqz+M97UEQuT3ZfleRitQMr9AlvTz1hDaF/LJxiPu3dsTKDJG6tdFE6kN81kWqlDQpFamSF3Xb3ryokoe0V6Ba4tQSpnL47/wf4CFUXxi3WOS/fHkffN56lhCOQX7IeBltTxAUYAEB1xUAgPECSvWQ+amjeBMpWcc2Y37qLIR3uh5vaorpJXWMh7ddPi9V6D/blbRVI+0yMbq1q4vUS2JTEql8nqrkRdXCfEcEakv4L6+Xtq/xRu/UMs0BQvWFsR4obhFrisxHWY8H9d5wM5feBFyApuNweoCtOaoArCRQpM8GzFEFADzAHNVBahl/vUvTeJHshFC3L81P9byzaHVSoXnZ6krCMwgCtd6vPJZQCMzuBErsWKyw3rtITdu3itR7/3rIb9EvCd5UPrwGkSolTbJCfYt9SpivLGAfAlQTp1s5keZhtUWpNZVJErAtlx2E6gtTiMejRFva74IwY7VbiFRwEIEivWEdVQBAAoTqAczM+JvQ5T21lqXZupfCfgs7jf3ex0xd3tRqn4ogbQ75rfU3SaRK3lQr5Ncb7ltLmlQL9bXCfDWB6vWe1sRpKja5KNU8qaaHteEahVB9YYobPxGJZtbfCX1ZvwxqmHNUe8bEPKppMqU06y8AK8DyNAAA0EnLeqmTMgl3/bp9gl/EvUmU/PaC33PKnCHmu18iIrtCfrk9w75bpIY1IvXuUaVSpPKsvpoXVU+kVPeg6gI2n+NKZAtTTYzWkkUi9BdMp3vegVaUCt8eLyoP/WUPQ9fwuBhXbj4kUwKzCUTwqAIAMo6XNOemKeHRXnjGtMe4Q5lESfV+emwlbave1AZbxT4mMqvvmoLgFEN+Kd2nzzFNbe0lUtNDkUSqNB+1RaRKInRUoFriVBOls96bIVRfmMJLqj0gbvdys1g15qj2hByby9O456W2L0EDkQpWAI8qAOCVqCZSOsMaqtbSNNa+Sl/3UGBtfqoiZjPRmcyxzcVZ43Em4lCcy8rq6sdEJAtFI+GSYsMM+d0+W7ZSkZq1N0J+pWNcLFJrnlRtPipPmOT1orYK1E10asLUE/br+REeHlXgojuZUocH1C1yW2x3jKNnnVR4VMFsMEcVAMDBHNUdSUSx6an1ZPx9GBoakjQ/VazTYVMUopatTNwJYcKecUgCmInP+tjpIVKZoFW9vGzsTSG/W9VFInUbiiVSubdU86Jqc1F9c1TfC3G6tUn/18qvNst3mBU/wEOoApGR+ageTCHsNbBwjOkcVYhUMJ2wZr0xAMATs/h7F5BPTI7UOWIZmjAW9ivOXU3LHG1q3lRxzBVvqt7GCPnNxp5OD2ubl3pvdhOpj+0xkZoKQU2kSkmT5P11L6pHoFri1BKmXJR6fny/hzVXaz6AUAUAgJ3B8jQAAM5H9qh2rZ/qabPL/M9OD6pnqRovl3Jpmdaw39YkSnzZmane1KK+I4GS2U8l5Je1sURq6k29N9nK2XarSL2H/hoiVZqPWpuLOkOg1sRpKkxbvam8PkJ/wTDDHs8Kw7ZjoPvk2QUgmRJYSSCitw/8UgoAaAcO1QNZKHg1D2s6P1Wec5l4R5Nxet+fqsvYcJhA3PZ56rqXr9E8sdoxFG3qIb/VpWi23RWRaoX8pl0WSZIMkaqtkeqZj8pDerkXVQ7/fYhPr0CVxGmvN1WNHGu43SBUXxhLjJoPwo7nuVv4ttjuGAeSKYGzgNBfAMAr0OVNnY00hpZx9XpVed0kkZJYvs1PbaA57FeoK76fJfuKJWksFLt6MiXdm5rb9YX8ZmM3Qn6LYTtFqihY7+KvPEb+DlkTqXx9VC3U1xau/QJVEqeWN1V7l7F+jG+5wiFUX5jioaElJ+IPJm8SBaV5UgAAIABJREFUI+MBlQnXjqRIVl9mtQ7RCY8qmE2gWF1nDADwWnzk0N/ltKyr6sGb8ZeIbrGec/uvkYhNX10q3+WUumk9TfS2elN9QpSNQRhPaVsJ+TXr+ealtiRPItouAz15Ui3c10qa5An11cJ8JYHq8Z5q4pQLU0mQ1n+IR+gv6CG9UWcIyQ5BOwWjX3hUwVnAdQUA+PAc7UxtWfLGIXbVjL9EdE+kNJttfqojjNcdJswdCTURWxOjST2vvcKbysfP6pkhw6lIlbyp9Njnn5c6nuG3lt1XEqlS0iRPqK/mRZU8qB7vqSZOU2Ha400lsi8hDoTqC7MkHHe0r9mimImBHo8qALMJpC+SDQB4TY7WdE/DbO+p2o/yF6mJ0Wy9VHe4V7Ypzk81yLyrpgfyEVpsis5McGrnwZfpV/SmavU2u6TUy+xYCZNiUr+SWOlW//pbwMObqrFSpG54RKoV6uvxomoe1Pu2U5xqonTW9CYI1RfGHfq7fCCUPWDn2g6FWL0Xxfr3DQBrwDqqAADOC/+Q2uL5tMxsX+qm59O5hqrdUV87opuH9CYsU6+nsX6qxjY/1azj8XAS1cNsKSn31CG9nrocTZDr1cW35D1lnyVvKrE/pSPk9151gUiVwn01kWqF+rZ4UTWBqolTLkwlQVr7IR5Zf4ELy8tpzinVHgRFB7o9N4nQDDE81sXi4rPDPkQqOIpARBd4VAEACfhK0ukWlO0d2dvespTMw9o9MrbMS13QeryXrmRKSh11LVXJDqvHy8XlaPg7ZM2bmnhH0/moPPRXC/l9CFf/vFRi2z0iNbUjiVQpaVIt1FdKlCQJVCm89y0RwBu2R1VYqmZi9CKE6gvjfmByT6vX82rNFXWL3Y6LnT+UBkEyJTCdQPdfSQEAgIigVFfzjL9OJ/NTici8RnLhKFTkAleowoWrGPZb8agW66ZKhGgK6UKQsrbpOMQESlzM8nBeSaSyOt55qRvpe6IlUjlSdt9ckJZJk6yESVqYr+RBlcRo3aOai2uJukfVD4QqeGA8MHbDFLcx3bhW3uqm7ZxCmof+pomW3mN4hFtAUIDJIOsvAICDrL8HUfOiWgJ31dI7lsgr6lW8qxVbqqdTqKN5VdWQYIfXVfSmWn3wOmm/irCVQn4zM0LI770oeRf0zkvNumeeVCLKxGavSNUSJmlhvlKIryRGZY+qLE6l9xjftCaE/oKZ9M5dTX+ZYmHG2bbTfhb6y+yrn29tXM97CFKwI1hHFQAAFFavvdpqvxYC3Jnx9+793KbNbsKzOp7b/NRanZrX1CwPsoA12tb6vpYJc1MpkS6KN7UI+d28qYndIuS38KLmIb9Ej3c/LeSXmJnWeamcFSKVz0W1wnxTQWp7VPPw4Ue9pI3wHuOZ1gSPKhiGC0t3qG5KIkDFX9DEjg1zXiHJhG+mWxOvqfc7BaG/YDaB6unbAQCvxZ7BSx8C9xzROWfWXJrm3hflwlUQk2kipWK/1ncmzpR64SF8ZYFqe169WYD5/rRMX15GLvclU5LHJSZQIioEaypKiR4i9Xqq2kN+KdkeXYamV6Raob6aF1XymMoe1VKgauJUEqWuaU0NtySEKqgTDA9or7eV2b9j2Cs8qilpO/OHT/0GSkUshClYSUDWXwAAA6G/B+Jd8qZnnmuaSKlRNKvzTTP7ztBeq6wQiA9vqpYgyc7C295nLnrr3tS8rSJak7LAbKYiVQv55SKVz0s9o0jVvKjSHFQpvJdnDObvKpdMtJbPLM+0JmT9BWvpeU5H+0F6BtKHTzpHFYAVIOsvAABMoGVdVe8yOObyNutfZlyi1vKukiEWDSFZ86qqXlGS3/HEbL2STWv8ope2wZuatEu9qVmz1IMqCFYiOeSX1yGiYZF6F5KKSLXmo1peVG0OqkegauJUzvg7990GQhWIZA8c7uXs8KLOEKlm6K9zfKnX1CJ9AMG7CmYDjyoAgAOP6mQmrc0q2w5LEynVUOenWtmBDS9obc6pteyM2F+H4H3YkhMi1TymVW9qupt5U3lZ6k2910+QkidpWX3TNt73ScuTSkTFdo9IbRGo3HGTJ1eS32Ws6U0tdw6EKhCZ4gFNQywse96wXSv012BGdDIAs8EcVQAAmEfrWqtF/ZZsv7O4zWPdQm3NRErhUU8uu36U54kqdg2vqzUvtRr2K+0LUS7b+uNjlwTtZG+qlEBpw8ryy+t4Qn7TNpY3VUuctLX1elKluaipF7VFoHLPqbbGaor1YzxCf8FchAeFCyOZErcvtbk3lMRpiLoCtZ7zHUIXyZTAbAIh9BcAkPOyP6hWPJ+tAvQpUBIeSXR5O40yNfERbft1UaslQzLDfjU7oq0ovHM2elM3wSl4UwPl3tTMVCpYk/2b41xKoLTtTz2vIyG/nuy+UmZfaT6q14vqFaiWOO3N/usBQhU8MIRl5hF1ekAtj2qPvWIdVU08MgGb6V5n6G8KRCqYD0J/AQAcfNdU8cxHnRmSW01kFCjL8CttJ1TXPM3q2v1btnQhqB1HZe6p4Ak157gGvi/KYzJsyWNyeFMFO1LmX+5NTQ+NZ/nNsv1SnkBpI12KZlSkpoyKVI8XVROomji1EiylaO858KiCPgwhWV2vS2JGzK0mEreHklimDwMeVXAGAiH0FwCQ8wH9hufB65WV6o16dHsy/joSKan9tESaSd7Q1IZQpi436BSzalnqTQ1CXe24BM9ozZv6qBvvbQrBOhDym9LjSSV6CEuPSOXC1OtFrQlUTZxyYaoJ0hnvORCqoI71vLTEqPeZnLk8ne24R1UNAy6zu3lIs/5CpILphPmZ8QAATw6U6v44RahrDdWhcTz+1xMeXT2okuPAnjPKlZw191QxpAlUtyeU7qKyOg81G+Pj/avwpqbvZnf77H2t4k29bj+8pqk3dUML+d3Kall+U7zf+ytEaupF7RGoNXGqiVLJ29pyN0GovjA8HDdl9lIy6i9wnoaSUBQeRo82ybYVImyQPlzgUQWzCZQnSAAAAGT9PQmaKF00V3YTh7VESq1l6rxRZ11L0Jphv8U+6R1OCHRv7E8Sp63eVHX91MSbmsLXTE333z8bIb9EVPWmjopULdRX8qJKc1Dfkn7S+um+tN7juO13mlzYIvQXOLDEoiVi/R2Q+gB1208fHmnWX69HlQ/JOUcVHlWwGoT+AgDAAYx4R2eK1W0eq0HrvFItZLdp3VTNq6t5SoX+ygRHSn+C5/RRL5Y2nN7UcBerfH/uTU3rc28pXzM1bcJDftN9Up2W5ElcpKa0ilRpLmrNi1oKWr849eTeeKO2SEcIVeBCFZbOq80Upj3JlKxxWALcsKGJWHhUwWyuX4jwqAIAHqzx1704HlHqSdCUMmENVd8P9XolyftqLU9T1BO9n6E9lLdWN0RRzEa2bY3rajMVpvFhT/GmsuZd3tQNK4FSS8hvi0hNycRpIlL58jOWSJVCfXsFquZhlerOYOFqyFdCCG8hhP8uhPBHVvcFdoKH2TroDv3tIR1TiOoQI48eVsQoRCrwgGcdAOAVWP6sWz0fdBY93tXga2c6BsQy2aY0P1UdF9t2h+EqddVjKNoryTEVr6z4WZgO5vGmpsvRaN7UzCwTpRuPpWr0kN8WeMiv5End+msRqW/0XojUtyCFBSflFOlC7/d/W31eZ/v3OA/6v5Zf5fbwqP5OIvoJIvpkh77ACrwJjnpxe2WT0F+vd9WYo2p9V2COKuig6VmH0F8AwJMy772usoaq3q5HJC73zbD+QjZOcw5qiiHUWxMpeep5HQluh4M3nFgJMRbntYpCVhC4zENKVHpTt31ygudYnP7Um1rWfey/CN/pIyG/fOmZFJ446bqvLlKvY8pF6nXspUC9H4PgYS3qGBFiUj6O0yxPE0L4BiL6x4jo9xLR71rZF5iAmjnXaDNbwBqiuFhHlZKHVIeYjrHvuw4ATuuzLiS/PAIAAFHby9tRfNj3unLSoruduobqDc/SNI+ESlIfdjbe3E7+v1ZP9c42eFwlWkKExXqesF+hnuQ9lVZ9EEN9yc70m6J5U7UESmXb3BvqmZdKREXIr5TdN21nidRaqO/dbi0EuBCulWRKnc+31R7V30dEv5uIvlqrEEL4IhF9kYjoc/T5xcMBXszQ3CyrLukPtgRzjmpW0VGn1o6NyWsynaOKZEqgkaZn3dd9/RuuKwDAM/Ih3uuCR4ju9Uu2GdKrN2tKpCTUc4lZwetpLT2T7xO+4wKbMdYT9svrMOEphRaXAlX2vPJMv6k3VZubKiF5Tzf4toQ1L7UM/S2z+2pJk1KROkOgSuL0aeaohhC+g4j+aozxR616McYvxRh/fYzx1382fOWq4YBnwYroXTqxNQciAnjpedb98i+sC0FL54nwuSOt/1r6nH0Mo32NHnvPeTgb/Bg8x1nbfkb2vhZmXL9n5JTvdVoipL3DfDuxwoHNV56WMNxaPcFj659fWu9TnMNqhQIn71+RC09BnF7383rxkURp2xW2kN94217nTc3aDYT8SsmTiNLQX1mkPuqNidT7HNV0funWF+V1WuaotrzNr/SofgsR/RMhhH+UiD5HRJ+EEL4vxvjPLOwTjJB6ISd7QLmtzH7qATV/PXTMWWgYU9FMEaeYowoqND/rAq1/YZ1h/9nF6iyeWVxseI6B16ltvyIrxeoTzEQ57Xudy0NaN+LbN4Pa0jRSVt9b/TJBkjxGTyIlb6iuZ34qb9sc9svrCJ7R0habaxrkKO6aN5WIpnhTpeVosvJMbNrrpUrkob+5IJXCfWvzUS2Bmva59fHoj4t4Oex3xnfGMqEaY/w9RPR7iIhCCN9GRP/aGR5moIMoT+Dfynq+XSN7sAyjzq8dv0kgUoFF77OuFvoDAABn4lXf6+LkLMRDwWGTPKmqOJS2nSHC+T7BsVAJ+zXXZJXaFQI1F5nakjRpnU2ESkI03SZ6iNANyZuaIiVQuraT56HmbeWQXy5SN5HbIlI9XtRtnNdt7xxV/49zp0mmBJ6MROy5Q04aQlOyIs2j2oLTE+s2p6yjCsBsAlHxSysA4LV5Ao/q7jR7Smuisld0zvKsji5NU7XfkEjJUYc7FcxtEtoI2+K++7ucI+w33cXCgPn2dZ+8JA2HC9Qso6+yn+9LhWnKaMiv5LFMRWraT6tI1QTqdZ/uYeX7rX0j7CJUY4w/TEQ/vEdfYABvKG0qEC2RaZT1/KKYLU/Dx6WNyVieBoDZ4FkHAHgFnupZp2bMFfZbQrZDrLqjx7Yyof/WjL/mPrFOYNv1vtTxVISlnHSpYocof48TRC1PovSYh8rMJNupN/XC91O+vYnQbC5rIjw53JvqIRWpEnxeaurp3JInce+nJlK9XlSvQJXE6axoRHhUgYvMA9rwy57Y3qhnUaQo12x0/2AKQQv2AcvTAAA4z7A8zaFoiZP2JAS/V9a5hmptaRq5jaOeJ0zYJWTrfbvqVPuO9fFs72nGO1/gdUkO++VoSZV4He5RneVNzdvoIb/SMjRvIReUM0WqJlDLeaotob9+IFRBHe4Z9YbqGqHEqnAt+jI8ol7PLgAn5ASvXAAAAHq4rZlqlm90hxv79nclvnSIy6Z5rNk+R+JLVqdXRI+G/VpL0hRdFaJVn2va6k0lyuelVuuykN9tH88OfFHml7aKVMuDKnqUJ/7oBqEKRGZ4QN32nLaz0F8uYBeKVGT9BQAAAF6EgfmosZbNt+hrTd3mZW9meFZ76ihiuIigK9qU2X5zG9EM+71u18NVrSVpsnqJsJPyT3jmpvL6mjdVC/nNPJ9sCRrPnFSPSNUEqiZMZySNhFAFIqao9HovjTruOap8Yr3HfpZkif1q15EwCSIVzCYQ0RsiAAAACXgkHMCscOKah1WgZw3V2tI0khe0+r7VEUbcWidu2+k+qX7rWKn0hvJ3PC3bb0pL2K+2rwwHfqybaiElUKohhfwS5fNSt3qPfmSR6gn19QpUrzBF6C/oo0eAzhC0FonnNPOoWuLRCB3umYcKjyoAAAAAOM0e1I3ONVTzvnk7yZajjmXT24YL0Np7ojb3VKsjhPWWnlYpXDf93B72602itH3WhKl3buqG5U1NvaXXunLIr7RO6mb7OiZdpEpe1FaBOmtlAwhV8MArKr1zQ80wXm8osDY/1Zi7OoH3GE6/fMjZxwdsMEcVAABAlc4f/Hsy/vL6niRJ+fb4/FRxWRrWnof9hlCW56KUmaiE/fKlaqQ62/ZF8EQS5UmULKTlaDzwLL8PW++FJ3UraxGpkhc1F8PMm9uUTMn//gqhCh4kojMTksLD6o7To2o+7CzhmwjSIvRX6auhyMVZvamfxoDw0SclENHb0YMAAJyKl3uchxf9ua4j5Le00dauNVFSd3KjyphMoSuOyRa+Upgvx/KYboKUh/3y9lqmX46a+dc5N/Vh3/amWll+H31GVaSmddJ9XKRqXtR03JJARTIlsBxVSBJ1fZtyDyoXwqrt9IFihf7aPwrK+/mDW5m/itBfAAAA4Mnozba7EH3uqbZfCAPuCfmtlHUJXWbPXDO2Iozrc1Oj3Z6bE72q3BtatpfCfrM2goc0nY8q1fW8P/LlaDzwBErXPvOQ3832Y6yPxElekeoVqJY4HflJCkIVuHCH6maNHh+b2wpkHtUYiCh5cHV4VGMs5y9sQJiCpYRAbwOZJQEAHxA8E45BOu8r/haazW33DGHdMce01VNa7YOLWqF6Lmod81PTTW2+alrOzTHBKiVK0ralZEnbtjwfVcv8W3pTNa9qzZv6xuac8nmrRML6qSy77zbWrTz9v0WkSgJ1ZqwEhOqZ2TsqRvN4crxzVFPTzJ7LdmEj6OG/PV5eo006RxWiFQAAAHhinjHMuPBiBlfIb082Ya1Pdd8ke9UQ41ooMJXzU63y1my/QXgPlMJ+H/NRS/ub2PTiDZuV5rNu3lR1/VSShKstUr0CVbvDpClqLa/sEKpn5p32ncjmnVPqvcIMe1o9+4GY3BQ8DMQSuLVxbiY6lq4BoIdASKYEAMiBP5V2CdcNXm/pDK/qdjwNob29uOaEcmoidCSEWBShUS8Pwnue0V9g74FS8iQ+P3WDZ/u97itDdzla2G9RrzOJ0tbG60192Hkv56gKIb9EckbfEZEqvcvMzJ+CdyXwQJ3MyZ4XMS9zmdajNErBmWJN6OgYR2aCjwkiFQAAAAATGJry5JqH2thB4zzU1vLasoFm+wFBrRVb81Nr7bVESdb2hpb91pNEyYuWHbjwsAre1K09ryOF+97rOEXqW7BF6kVoUwMeVVDHGd47BUeYh7mfyJ6vmnhNMR0IAAAAABpxgodX9ZqufgepCMPa0jTu7S7PaqU8qyt4Ooxw31om4HItVXt+qifjryfbb43WZEpEVJ2butl9jKcM+VXHs3lbxfmqfBxS+zlAqAIRd/Ikp4h1h/5KDaUbPAaiS5pcKbHhfIDxZEop6UPlrFl/sY7qc/OGQD8AAGhnz0RHB9jzvHu5Mv92lvd4VlumhzWFIxfhvqPbZZIkrby21up1W06cpNnPyo21U62wX2tuam4jT6DUE/JbE6krBeoGhCoQcT90Op/F7mRKnZ5TD8/uUcU6qgAAAMBimkNs534xx443f0sMmjlIGm1VRWmtfuo8kJagUerWymuZfWsi1Ds/VUKanypl+x1FsiHOVW0M+e0VqWoyJWEfkimBYbqWo2mw12PfnfU3FbCaR7aBM3pTieBRfWYCEV2e/ZcSAMBU8EQ4MbPF5+IkSi3lGRM9sys9q3xfyxxUKZpbWo4m3a7NT9XWUpXsaaRL0niR1k1NbaXb0ucZIlVMpuQ+gjoQqkAkasKPqOvb1J1F2PwVMLkRa7+8vQDwqAIAAAA37hl2F+UJzRdel8XmaNdFrKpUR28uLWHjbSvWneSZvWYQtpMt5dt2XS2bL5Ed7su5hMhErO6FVetoSZUqy9JI2X7TPqyw31neVC9ekVoTqG+PdMxdfQNwR83Ky3Fe62bWXyfBfArqHff4Hd+Tvt5nupYBAAAA8Jxc1zURi9xe0j3fvCd6QA/r1zsdjNoTKVl4RGrL/NRsOZrBrL9EVCRRuvdjZv5t96Z6ROobySL1LYTsXw/wqAKRXbVZbzIlSryq2kOPtfeulWqFgZwFhP4+N0imBAAAJ2KHNVyn993QrOW9riUst91b6h9H27zUStUFiZSKOkrWXn/4r+2FLeory9TwEOJaGLAV8vvoK7GfjVka17x7CR5VIKKumyptq0aSJt4QkRbt5bwPvLdLfDIv6qdPMEYAAADgwzPhxbx8TyrDgIfWIuW2jxK5KQ11axl987KYlVcz8BqJlCTx6c3+u4Xw2n2X4b/ebL+PMZZhv1c7tje1xiYSvSLV6zltuUQgVIFIa1Y42cjjo3lPjGR5a8Typmq/tj2DaAUAAACAk5nJkT5AYrxV4nJoTuxMz2tataGuGP4rJFLyts3LdeFpIa2dei/rsOcJ+fWNS/+DXdi/tvEBUINH306Y06Dev95kShaTI2LPGvoLAAAAvCyXJ3yFFV7mV/0W3m13lRAlnkiz4d1qQIjWnNAjU72sREri/m1d00XL0xBJy+jUs//a/SS20v1ZHfks9whTDuaonpknfAZrzHgQF8vTWHNbH43GOwZgMoECXT7SDQ4AGCZg3noXwZ3EaMH5PfAxvswLOtI2nfLV0MztiKCbEA1s20l6CbSEA3vbWHUubD7oDN4oit5VK9zYzERseFNbROrM2wJvSmdm/AeXNox7Z3ROKb8viuVvHLgfZBO+ixDuCwAAAIAzUMsovJdo7ffSNi4p2Bm517I0DadVuLYktPR6UD1zWr32Pd5THvZb2t3qpXZtZgtLeFSBixCTB1TqyRwI/Y2DNoiLXWl8nSDcFwAAAHgxzjLfdM9h7BEizNglLJk3HVyaJi/3hd16bG3tUoHasn7qvY2S/Vcq75nLKvbJ7hdLpPaudACPKhDpTn7UU68Xz7zZGNy3o5b1F95VAAAAAIyw66tEZ19DYzxCfKafBxwMVttLqC8Z8xaiGNLrzfjbgpbhd7PXipZE6U34u1ghv5qgfKMwtBwfhCp4YIT39oTqZiYmZBEOaSMeRvL/t3c3IVLXcRzHP582RckgKA/iSnqIQIQMSiRvQmAP1LWgunRMMAiijkHQLbp0iYoOPUhQB+kSgkKHouzBJDNBIsgIJCKyQw/qt8OMOjvuzM7s/GZ+D/N+wcLM7v5nv7/dmc/+v/P//X//gYszxeh99YAT6jm6CgAAkiv5ffCVLoezwraXjf0Yvftcq52159U3ruNuN/jo6grN7YRHNQdtP+r5qaOeKztKozisSZ0UU3+xrP5gcX+zeuUbNbTBHfs52r9NXF1yeOlCSh4tzPq/r/dLMXiWz6XwlXeZLoVpVpFcygtiAwCWQc6ma4Zz/yoTHTEdZ39uuWZukv3BKwsqJVj1d1TJpvn23h7hdZWiSZU4oopeqXuxvgOg424z3oY9lkwJHnbd1NEejiYVAAAUY5UNeKrpx1mmCF/zOJPvm13nmMo+3qz2G1NeT3XJ9mP8jZZrJFM1qYMeH1jqmuZxxO36DoBOXEb/1N8BP2vp5wf/4Oh/CM5FBQAAmI1Eu125DpyP05CmvIbquEZdSKm/KSyhSSyhBpRiwkvQDH3oGHJ/0O1hwqMtptS/2ZBmdJIT8XMYZ2l0AACA5ByzXShqilY6bzPlNVBTmWRhpf6FlEpEo4qrek/5HPacTb2Y0iqvlzXpO3HD3oGrYbrvxVb+MwAAgJkrajci2dHV8vffarbS+akpp/1KNKqYkaFHVHuNfC7rkKm/Qx5j1ADjkjQAACC7lXZH2F0pykqXpJnOz5xuc76w8rdMDY3qHLumZ1vNcuKpA3Lk81+dfvGnAWhaAQAA6rPaGXI1zKzLbRZNpKN/RZmMbJ+XdDp3HVNyi6TfchcxRYyvbiWP79aI2Ji7iJTIuqoxvrqVPD6yrj4lP58m1fLYJMaX08hZV9p1VE9HxF25i5gG21+2OjaJ8dWu9fEViKyrFOOrW+vjK1CzWSe1/XxqeWwS46sFU38BAAAAAEWhUQUAAAAAFKW0RvW13AVMUctjkxhf7VofX2la/n23PDaJ8dWu9fGVpvXfd8vja3lsEuOrQlGLKQEAAAAAUNoRVQAAAADAnKNRBQAAAAAUpYhG1fY+26dtn7H9XO56UrL9pu1ztr/LXcs02N5i+6jt722ftH0gd02p2F5n+wvb33bH9kLumqbB9oLtb2x/lLuW1pF19Wo566T5yDuybnbIunqRdfVrKeuyN6q2FyS9Kuk+SdslPWp7e96qknpL0r7cRUzRBUnPRMR2SbslPdXQ3+8fSXsj4g5JOyXts707c03TcEDSqdxFtI6sq17LWSfNR96RdTNA1lWPrKtfM1mXvVGVtEvSmYj4MSL+lXRQ0sOZa0omIj6R9HvuOqYlIn6NiK+7t8+r88LYnLeqNKLjr+7dNd2PplYfs70o6QFJr+euZQ6QdRVrOeuk9vOOrJspsq5iZF3dWsu6EhrVzZJ+7rl/Vg29IOaJ7a2S7pT0ed5K0ulOnzgu6ZykwxHRzNi6XpH0rKRLuQuZA2RdI1rMOqn5vCPrZoesawRZV6Wmsq6ERhUNsL1B0geSno6IP3PXk0pEXIyInZIWJe2yvSN3TanYflDSuYj4KnctQC1azTqp3bwj64DxkXX1aTHrSmhUf5G0pef+YvdzqITtNeqE2TsR8WHueqYhIv6QdFRtnZeyR9JDtn9SZ2rWXttv5y2paWRd5eYh66Qm846smy2yrnJkXbWay7oSGtVjkm6zvc32WkmPSDqUuSaMyLYlvSHpVES8nLuelGxvtH1T9/Z6SfdK+iFvVelExPMRsRgRW9V53R2JiMcyl9Uysq5iLWed1HbekXUzR9ZVjKyrV4tZl71RjYgLkvZL+lidE7bfj4iTeatKx/Z7kj6TdLvts7afzF1TYnskPa7OuzY676IyAAABbElEQVTHux/35y4qkU2Sjto+oc4/3sMRUf1S38iDrKtey1knkXdIhKyrHlmHYjiimYWuAAAAAAANyH5EFQAAAACAXjSqAAAAAICi0KgCAAAAAIpCowoAAAAAKAqNKgAAAACgKDSqAAAAAICi0KgCAAAAAIpCo4pi2L7b9gnb62zfYPuk7R256wKAlMg6APOArMOkHBG5awCusP2ipHWS1ks6GxEvZS4JAJIj6wDMA7IOk6BRRVFsr5V0TNLfku6JiIuZSwKA5Mg6APOArMMkmPqL0twsaYOkG9V5Bw4AWkTWAZgHZB1WjSOqKIrtQ5IOStomaVNE7M9cEgAkR9YBmAdkHSZxfe4CgMtsPyHpv4h41/aCpE9t742II7lrA4BUyDoA84Csw6Q4ogoAAAAAKArnqAIAAAAAikKjCgAAAAAoCo0qAAAAAKAoNKoAAAAAgKLQqAIAAAAAikKjCgAAAAAoCo0qAAAAAKAo/wNQgM/RcrKVOQAAAABJRU5ErkJggg==\n",
-      "text/plain": [
-       "<Figure size 1152x720 with 3 Axes>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "df = dfield3\n",
-    "fig, axes = plt.subplots(1,3, figsize=(16,10))\n",
-    "for i in xrange(df.nb_components):\n",
-    "    axes[i].imshow(df.data[i][df.compute_slices], \n",
-    "                   interpolation='bicubic',\n",
-    "                   extent=(0.0, 4.71, 4.71, 0))\n",
-    "\n",
-    "    axes[i].set_xlabel('x')\n",
-    "    axes[i].set_ylabel('y')\n",
-    "    axes[i].set_title('F{}'.format(i))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Making the fields evolve with time\n",
-    "\n",
-    "Here we will quickly introduce the Simulation object which will allow us to introduce time in HySoP.\n",
-    "By default a simulation begins at t=0 and ends at t=1.\n",
-    "The user may supply either a fixed timestep $dt$ or a number of iteration $N$."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 19,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/html": [
-       "<video width=\"576\" height=\"360\" controls autoplay loop>\n",
-       "  <source type=\"video/mp4\" src=\"data:video/mp4;base64,AAAAHGZ0eXBNNFYgAAACAGlzb21pc28yYXZjMQAAAAhmcmVlAAA8fm1kYXQAAAKuBgX//6rcRem9\n",
-       "5tlIt5Ys2CDZI+7veDI2NCAtIGNvcmUgMTUyIHIyODU0IGU5YTU5MDMgLSBILjI2NC9NUEVHLTQg\n",
-       "QVZDIGNvZGVjIC0gQ29weWxlZnQgMjAwMy0yMDE3IC0gaHR0cDovL3d3dy52aWRlb2xhbi5vcmcv\n",
-       "eDI2NC5odG1sIC0gb3B0aW9uczogY2FiYWM9MSByZWY9MyBkZWJsb2NrPTE6MDowIGFuYWx5c2U9\n",
-       "MHgzOjB4MTEzIG1lPWhleCBzdWJtZT03IHBzeT0xIHBzeV9yZD0xLjAwOjAuMDAgbWl4ZWRfcmVm\n",
-       "PTEgbWVfcmFuZ2U9MTYgY2hyb21hX21lPTEgdHJlbGxpcz0xIDh4OGRjdD0xIGNxbT0wIGRlYWR6\n",
-       "b25lPTIxLDExIGZhc3RfcHNraXA9MSBjaHJvbWFfcXBfb2Zmc2V0PS0yIHRocmVhZHM9MTEgbG9v\n",
-       "a2FoZWFkX3RocmVhZHM9MSBzbGljZWRfdGhyZWFkcz0wIG5yPTAgZGVjaW1hdGU9MSBpbnRlcmxh\n",
-       "Y2VkPTAgYmx1cmF5X2NvbXBhdD0wIGNvbnN0cmFpbmVkX2ludHJhPTAgYmZyYW1lcz0zIGJfcHly\n",
-       "YW1pZD0yIGJfYWRhcHQ9MSBiX2JpYXM9MCBkaXJlY3Q9MSB3ZWlnaHRiPTEgb3Blbl9nb3A9MCB3\n",
-       "ZWlnaHRwPTIga2V5aW50PTI1MCBrZXlpbnRfbWluPTUgc2NlbmVjdXQ9NDAgaW50cmFfcmVmcmVz\n",
-       "aD0wIHJjX2xvb2thaGVhZD00MCByYz1jcmYgbWJ0cmVlPTEgY3JmPTIzLjAgcWNvbXA9MC42MCBx\n",
-       "cG1pbj0wIHFwbWF4PTY5IHFwc3RlcD00IGlwX3JhdGlvPTEuNDAgYXE9MToxLjAwAIAAABM/ZYiE\n",
-       "ABP//vexj4FNyAANlzqKeh/hFbH0kFF6sdmgZZQfwZOX+oAbVWujOrqiGmbqzF73kxJ5XP9Eamvz\n",
-       "hAVbXvNRmSIUXx1oOPGifSMO97B4Fy7/O7hti1b9cSZORNWO07bzrOyyH4EMFUbfn5DNAvF3cF/I\n",
-       "LRlU3r0noJFAur1+gwzoklnCu2QR906cAZznZNWYN6vxzuI+nj7t/7hnreqqTr+YXR4Fq+J8MlVV\n",
-       "nQ+wgwVwJIlZBNWh51rVsv6xKKl6XPWxc5ugQPEznQQHutCjsIOpRRXqe7qDYbwrhLBlUZWQx1Og\n",
-       "ImwQjn+mYUkYvZxj0kwIbDENdZAAA0JZRwXrQJzWo5O8Vl/K1Ho7bPGZ86uxLhj8AdoSLghltzG4\n",
-       "E/IG5H33EWBB7EaRzKukjneCfKHKlshPL4iYLATR62VtsKW5oPsRm+jYFk55XCstJ+oOU81hfB2Z\n",
-       "OuLeROK6/CVgAA+sCuQLQAfeHrobYywSmzS251m7t+doP1C7rQ6Jcp8WH2v7Fu6rnHuCjI7Eccve\n",
-       "8jTzVBD/yGc2SLu+ZmPxumBuPNzvQZnWEwpooc/gZWq5jvaw7fSv7p55rzW5/5NvmvmUsl7Owtm3\n",
-       "twYyMmTpM2hwCSE7vlBAhVWUiiPOu8RAfxrQsA9Aue87AGS8xKrLGZg8A8tF2TmVXzoTnNa9OS6C\n",
-       "glEkEAnm77hnvl4lAUcQiXwEgvgY+wkVyG7RzLtgyMqeq9Fbhm6R2/vADoptYRwsHdvmB4SepXJ3\n",
-       "XWoL+TpH91S0QoxhzecPMJ73jjj479j8N7keF32PkcXD74g+PT5esVb775z4kbHT9GyvKkMcYQd4\n",
-       "DG6PykTyXstTnSFgjlulJnxCuTsNBMog8cMDlSQc5eqM3vuVx21mRXhYw+SjE7mMPHw6ZeAlohpt\n",
-       "DRjP/TSaxEaqJYoyj8vSVTbVa8caHhO3pZMDGYH6YvyjL4h5urwDlxq3VHPAMrKjokKjoGxEV98a\n",
-       "C1hI7tN6acEEAArtnqMgeeiyK0Pd47Yi5Q6WQAu3yGI8Vnud2WMIJ+ESVgAQhdIr2cSEmwBQVyQR\n",
-       "d5vadVeTYfK55S8TKDNVf7Tmt/EA44hS9UEBgOpOW5BJiMzTcHWBvxlTJjxfMWAewAIIuQR3n9hB\n",
-       "DsjrrPQ598/K/EEOmUtLVmphkIzEacNrMp2jyUNSYb5DlAjybFBG8jnL1ktMneIIUSNxkVReAGLu\n",
-       "ry0PvamT/orx0ea3/o9VFphEiGaysDHWgmMb8IXyvO3//zrMjNzMgw79Zr8ufMp+wTPLzf88nL8T\n",
-       "aihoqTIAUn0oVg0uvYPHVbPS5+58zfmL61HvB+x8Q+q37/ufoLiDxoXoF86Nd1lgzsi7LA5nPONe\n",
-       "aILFht9FZ+MppaCBf2JqMd/PgJm7glaOdX+gX1aYXupaDHV5eDg1oTaAMMc5BAJyWcCENE+ck+Zg\n",
-       "gHTY2QNXA3eGMAp55F+YeBSbxwhyPblr4Y8MHYZ/vgDdZX0UP8gq9vatqanjBT56pdrTVsXg7hTT\n",
-       "wokYO/aZ0Yr/JPPzM/iOyqLUnUgIdM/60YvscnWmkFPrluZPxfXBOuofnN7labPSy70mDmyiLhhJ\n",
-       "1TpHeOTh8OeqR/lPOqZ+VpQxSO/yV0WR8fPyXoongrLFtoFYUBacjcjEZrZ8dc0GJQ/IGa+ZtmC5\n",
-       "M+v/EnDnaAWZrIt7s4wrval55Cz2giS49XJthNjoFrgU5nTfa8hYJAjXxeYk4mL0EA34lvMRfcFn\n",
-       "+x3J4V+Q7Dpvyp7PUHwPVP15bjq/vTIqzn+buAiiCJa5YHFLEDY61Z4hIxZyGQmnCSpNwFx8tlSA\n",
-       "TfydxxrYY0DOeSz7IfaBlEbKUYCPP2ojAlGEzl8/EMNIYfs/6dwBE01cONz9mvXvOFMAGO/zxjpZ\n",
-       "ormBnfyKaBZW+etvMsTOnePZQCNHG3xEafl/Q3wsBCyK8R5nEF0gixIxFG6vsCEEI5NYE2E7M48o\n",
-       "f1afLFtDC0keyue5SoQ84KQ9zEuoj8ZpUWYyEJ1qNV+dcbMJZB3DFx0asVbywKN34bp7JrOVDqTq\n",
-       "VFACJo0kdJSY4+bwTokQgolll3lHkr6v3rDZdNKnIdMP0pmPBDg3rFZpXbqT5xOdgODEpWPS9deQ\n",
-       "xvZbD3Ukokz7abbfF1wvhxmDM2UhckXAGW4+ypc///hjFg2k87s/zuPqKh8kipyl2gIj3ydQLwi2\n",
-       "4666MFdSMjPbnDchglZa529rPo1GSlgKXDuqxPsT4H/qZFfc4dxjPp19UgzGQR+f2aH8Rwjyf+kr\n",
-       "TSoRyfE9qeulWXXXr7RCj3yaWHODu9CyZZ9pLi6/5cGKheklqXcPg/TbRNp56T66UHPSp5IgYf4B\n",
-       "pDnhy4UaxIDicEMkYLgtj/VZOB0tWu/XwHcKswu5D+2a1wkvFOimnhNnm/rxVHC0Cgz97jm1eY/y\n",
-       "zbT/Q9hf16kDwR/D5zlkGNMSMKXObjIhIqRMArezNtH7T2uJ8p5S2OPy/EwQYbYAOV7DkUltJJ0M\n",
-       "hC1YzUBx8wFy2upuhEVPoPqrCOaJPzcMZK4nJ7hy/KOGhoi+5m3zqpsmESgrfuthbpZLXChzHS7i\n",
-       "u+XbylGC0g3hX+kYagtA1qvlJgyFV5l4fewTHHq1syvpcNnS9Ca82TWt7Pn10nRbSCQNolqQiNkY\n",
-       "+8O7OV0BLkWP8jz+B4+VUrwvaddiiszZ/CJ9cRUftkU/XTGYlQUVelIt812UrRyz4DEoim9JKybh\n",
-       "iFLmrpQQpwbhmd2QmriCZ2dpgcpR//oRCmpvJqiJLjNBKj7JPmxr+vqF02bIq9UwB6PHbGyrwOmY\n",
-       "T6pYQANPVKyTPwrp6zzu0AZLWKW4HZ65uqnxIq2C7mMiq47A9fhh08P4eSYBOF98T8N94HQ4Ix5m\n",
-       "NEKD7Ta8hvLfbB59pBnzXsPJ+eQTa7vVRQq9lTr4OVz9DkH0THXf6Pwm2lGEGcDotHB17Q8/W5bi\n",
-       "wjCodtHhFrHSr9kP2Qi7ii0xeXEH3Ii4sef56RiCqlepsegtOfy/t/VS/pJSbGtPdrEko5P9N90n\n",
-       "m7tifLN/jUfe3Rs9IVSb+6lv5t5ti8exx00GZQcX/C3q+Zeimi5dlfgtztTTPXHby9VXQKxrrJpJ\n",
-       "hoAqzMAbUB49FbG2aNGsLzWB+oq85f475DYptNY0Fls7dZTQcqp/Bflu7FzyKAoFN7EA2SkpRE/S\n",
-       "ue2azy8WdI2PND7k58ScKjAOiV6lo4HEU8hemDSnKNLtKrqpjJ0hzlRleCbxfgain/qnRwDDgJCN\n",
-       "duMUjuNn6ENeAe6YjhkuKk+3FdQJCsIk1J1g3rFT7r1/zv5p31EBzIL5yqUv+XEF55uQlIHgxEiq\n",
-       "64aQ3+VfjgG6UiO00QPUY0sRfFfnpklozMIXOGi9Vj5vGzpQ3SeAcy6K18UmhL1ogQMBT77cEXTM\n",
-       "9hFSblKSFCnfpInZYvjVwjpnY1hM9KSQCe859gF/ShMib3af9/eSWKsAKABirt+hdWo72Vfh/Dfs\n",
-       "DvB6Puj0oEN/UDCjivRpmmJSfcWVCZ7r1lxKqH0rGHIgFkE+Zo/kRwKsDiKswq0Xl7C8WqSK7In+\n",
-       "UK6aAEb+2VwbZi6GhZQzgsuv3ugINIb45qlRex9jFeOwNksiHOJQembI8RmYQU7Or1Sec0tlLHoj\n",
-       "9u/W+jlq49VO71PeXYyhF25d4rx1VJU9qvibLBEyDxNan4ZjWVCCmOSzP9BR8GvDxzL1ByWR/Ced\n",
-       "rEiPz53Wq1cCWuxiEzS1cb6rP/nK0SG7yG+b2tw08PmkVWJpH2Ysn+ATtDsX0f4GeIDvpQ8ALq5c\n",
-       "RUiw9B3qTFy8awIAY2TcyDdTUcutN0l4HS4CkuZZQifluOym6jumR2DXfLM0hiMfPNb7am/fEfjA\n",
-       "Ho9NfB1A5x9+vgJhHwEmrrabl9bq1jOrYcTc1jTg/tlJl2Ug505UN+HUl0nM7ZcYyu4UlUEyxk1K\n",
-       "llbRe5ccrWmX2x+62iNbqR9Omgmb5o2eMo2+MJAaAihdoiQblVEjgYInHR3P9kAupJ4bIO6koR/U\n",
-       "At8svYTsFwYo0px9Hddsmh7A1lhPqv18gL4RXxksXc0IjyncAcXeEA8L3kHZs3wSIRD0i/XGOGEh\n",
-       "7emUGNDrw+8KPOxfKOFaHU/v0VhytM6JkXbTPkFNFesu9bFA/fLhQn1s7icMYGQdgwbkdspbgqgd\n",
-       "PwEpVwPbpN3YGoP/lpkMLRCKLgX0egKIwkeoazEumF7JbXZ8IyME6i5UP4z2LRkA6I1dnl3w7viZ\n",
-       "/yhBwL/q/+vgl46d4mfCnuvtKFzAjqpoKgoiSCBHpdE/v0oafBM0cX8nR1ju8QuCkLXwyAcvj+9D\n",
-       "GlpRrqGUgo5tYYtis/e8N48PFkGMnA2coahJozttSWUlCU6MpmsCVHDu603HZqY9d3Hgq/0CbHt1\n",
-       "AjFPziBI1kxGYGdsLzEK7wMdzPVpFZlzR2hunC8daUbVNL/c1efG8wNhxD/x9g4VDI4tzNW06caW\n",
-       "eVjhrY9udHSIqt2cwtQ6pE5E9eVR/3kFXGv1bs7l7F/mIyx1gtEjuZOAPqy4CYVf/B5eNyn8V+Yi\n",
-       "lbfHcpqO4Dbq+oRv/FlYCeSJZ3jFvFQhzpAmEwW0zai5ejvWspSRMFsjtcXjmxb6NJjx4ehhA5P1\n",
-       "3n+e2ZLT+qR+dbXC6CH3ZumH31MFaYPX0kcka4B7TwqX7uahvLpFXmz3yTsV0/wW1syPrZB4vMZD\n",
-       "C/UQOmF6HYVErI8GT+cBl53NqakAfbvYETDsocksc6cjtsU02WAHddEwrm7pl/akLlCADloKT0ru\n",
-       "6mf9bs9qlcCLkiEXEpaLAKzXs5OQcelEGwYU6VYqjAqaKmKB7DFmkMf7W56F/+GyFc1RjSChkXA+\n",
-       "H0/+Utiw/7aL8/qZmGfvtbQhEXSpdDx4Ejw/fasl1nDfil/cV8tjzuH45hkuNcImGGvLEL2zA8/U\n",
-       "VEh53XUg+nXSyuiPRrQDkZp/Q5/H366BNwpNAvKur3tdCen1Iu6lFh2TlE4SDIOJhlXO/m44Fd7s\n",
-       "UxP3i8VpaNZ5J/lOTmIOO8aDZ95GHJU+mA5pmkvGXI2YsYjx/4aRb3Op7xh3ldsOmby3QUcPDQZ8\n",
-       "JBkP3tA25l/lGG0Hbo5EepzhVC7YSvH+5JIBq3UoABFfe4mpQVgK5Vw/LmAKm15MUy101X2DbjPS\n",
-       "WhPwIWfHCXf1Ew2AC23HkZVWUFpPzeKVVJub8qRLW5oPG4ubmteS+t313mobjfE2fpTy4n4MY2Y+\n",
-       "Ek0d9Dyn3RHhbpjJn95SM4qlT/QC0qi5L/mbq5ibywHQTwhono8d6uHyRkXmu9aB13SBuF+Ae79W\n",
-       "udFMo8d3M4Clqjx7tEw6xFV+O5tgqgMoYPv169ClbTSdqeWcbd+rpdc2Fc4U9JtY4f1LSsTFxZF6\n",
-       "QuTTKyOeYx27BIkWM3jRV7+By4Jl8yy+Xj79QfLggJZ7K+nnTZj7A02m13D/PzO6Ru5WQVhopUDM\n",
-       "NJYrhLb+yjAEXiVp1P7/rE9jWLietPVKGI9+23mZJkdnlkQpIEBAKF9r8LDMcncU3WKeuZ3EV1fs\n",
-       "cn4tS9KQKVbMYO7/YQIkvZp1HhbS5tv35ZYvLkT+fL9/7zdP9KPRFpwsWg/zAW+1dVoAOGZgTnXD\n",
-       "+jK13V+JD4YsN2ajCjn1i595YxLVZy4itbd2k3/4zxD6cL/mdWKtuEx+p+OW9pRDVPnY+2f7Nvzz\n",
-       "DGYZkk+eoQ0hy7akXFE/R4ir4SuuGSL56RJQtqAI8JCG8b/5WK+o8bkuGE1XvFGsi3mZL0ekMGa2\n",
-       "qRrYNKdHvDZqcLrad2gYDGs2ZJn2iFjEv3qSalz/lab4m3NY1Zb2wTjnczGvJVzEvLsJ59Oyn85Z\n",
-       "BHaEFv1FU3LEo/pnu3zgmdQAuauOrlrSS1rK/Xkc0NvJz7w6QzDLKiXk3shq7B9UKaK4Sjy1Ao+x\n",
-       "rB03whLt3QaTG0VUZ3BaLpNC5uYH1XAjnmdNN9xhbzgvDuYh9y+RcZA+m29b2Kxwio48vMXGj4Xz\n",
-       "Zz6o3uKjdiHgaHx/xNQsIegLzdp4cy1DcauLRyL1wjKI3Hj0hgL3xQ+jJT5WIbzz7qZEoLvuM9yd\n",
-       "/riQbpFGuZtleil8xUuXbZextH4ru796hadaoZo7M7EAkERuVFWqyVe4oV5vJHDvh1cUYZPM8FwA\n",
-       "vsQHSE/jWUeUhkKIJ+tgc84ZqnZ08Cu0OnJ+jyzQMlQohoa/mtYFkce2fifO119ZhcgllsacVqMw\n",
-       "1rTjB2K30d99+H6TnEivi0fJAC9G8f7GKwzRomfVhxH8mgfPZnafejwr3SXBrRcKpZ35sjR95F7G\n",
-       "pnx+ZCK/PHVwjVokD/r8oEOxvNfv8eAe+YcR06a/6MZqGu+BWCpthJ+px5QG8FUSJYS9QV6hee1k\n",
-       "oHycf9LYDvA2/AU+JQtszU9Sr7BNrPuS4KEu31YtW0oMqHNM2XU9q3mYHOKpQpLHfglApk3APaf9\n",
-       "Gkk6JJ65lMG15HuKuVj/rvRBpYETJhAyLIKfwO7LlmCqHTLNokKB/aC4PH/6GRtpTcP4GPXhbGT3\n",
-       "X+/24txVEgAAAwAAAwAAAwAAAwAzoQAABUNBmiRsQS/+tSr/I0bABssOSDuYC4eiGzr0S8wqbUsO\n",
-       "bMpmJ+yEGjuxsxnpXpVsyUQxHlaaPOL8rPA3WbiXrL0NtJ03kpVv7Cn2VTYA6G5GkJJ84eiHM7Nm\n",
-       "ISto8E+nnLr4FWmbdYORgEOUR5zYIRR5s8lebdFrYKove6VPmO7LOh0BuasWGk4GLAn+31pOu4GT\n",
-       "6R5ydap5xtaOeSoPgG4okzlNymUXg9XKXTqFoYhVAd+oqWXujGE+WYyk81Wz0fq0dyD9Cc/5mLYP\n",
-       "T2UYucZMhTzDoKt2B7F83haV9spUhl7dh/7okqX3afPm8lBystQVIWO+huZcnQmT86JNWz2cKgMI\n",
-       "E2mSiaVxyyFBuArpVr6z5pRNLcHYa3bITFdwuwkMVtXwCuQyAzIFBz2Gkrrah6zfdqAyeYZlksnj\n",
-       "9TyI9/+hHNXVhELY/GpFYfULei3JlDBveveuAkk1lE1q4Zfx0BX3nisylRdX/uAWYElKiYAqMlc1\n",
-       "RIIO7JvLeidsMOVYcz5TX5zHeILSohuQBrcafLvoZ9RCEIxr7s+dM9huzGoxATh+e/BZrpSd4TRZ\n",
-       "0YfudeS97D0Vj0mAg4XZTZYS58qZWozOurhDGQq1oN8F7s65YBraABHuMqPVGd8Q2QLWdt15530r\n",
-       "g9493tY8aYdIolnA9Ht4+1l3q+QE13RAmBNYaoLfmKAMIoM6Ou88o03jaPXBOaJsJ0BTh1cDKvMc\n",
-       "6XsCTSggrgSHC/oWyDiCtsWMPdbpcAwz0iLREnd+qwxpVBWOARF1RxFpJE1K7KdZdsrYVQ1CB96m\n",
-       "84f+gABJfRbKJafj2r1/WaJPSOQPudp0zM+8lSnJLgTPpGgt1Ba6L++GKVP8yWW9subtmDn3/k6W\n",
-       "+FqOd8wS5VsxC4ELXMZQdc9D7/DBjrGmctxG2JU5wNZmDie+YAbKGYNRR7fTPvN57Zoe7eby/kua\n",
-       "8TyNTGx03B0EQ9IaGYydp/gc2hI60I5cP4ol5OcZb+siMvJgm2c2tCyh+J4CbDMiesRug1oV5GLE\n",
-       "JtEcV5CsxwQn96DDGOQ3MF6jrTiuIhCOtPEJnGo5aHyvLYQCrHg2DtOi6qbhiCh0FFjK1Sj0zVnc\n",
-       "p+0Xq/v0s0wIT6S3tNVLsJu7uhnSQzFpC/yVxUI+3dbymhisRVNjVW6Y9wUWxp1udLIF8Y/7hzKU\n",
-       "Rd8XGGEVh1THaN7s/NvkFFgMdwbV56PDv4+rZ5IbjloywE/kcc9rinZJ/yVAUX8y9ejyrgOvspa5\n",
-       "SLOOepVgUz25pmo7AiVryyP+P9GI3oTATgg8Fa3G0hS/m/GMFJ5iuN9KrnHkcegbqovaWGH3s4za\n",
-       "rqR/BsWI84KevzpCKjei0XS9UJwC/O//or9oHZFpTdcMk1VlKaQ3w9FQb+lcfpyb3vd2lWZnDzvw\n",
-       "fJosG/86qf0V7cLBdP4s46Tu93uw05/P/0izWB6Mmx0dIeP1E89RTYbBgLQVZ43k7o0Kpnx7OMUk\n",
-       "Rsx9grCRtBkaYak3jSTudxQQhVgAAFt5PyBJPeBEAY9iHU0fJI7G/yN3oy1uoSyoaCVzQe17yi5E\n",
-       "oXeTvs15edEubUBxxkR9mA6/W6FnHjbKTC6PJ8rvyUX+8vYWHie/eeDD3o2MZ5P/pPOM0ALekmZ/\n",
-       "2uQEXeFFF7bpTG+HDq/scV11onO1aQhqajiTIHszaWPf+0QiDKJ1cwWcNAW0R93gwWJn9gPQ/UgX\n",
-       "o/d7OBJmYl/T/z/9RkW1XsKBWCeqkVmFLRY4aLTBy2xBJMS+/qoNRvBaeai4/R1Ea/LKTqBGqjp1\n",
-       "7UITScAAAAG1QZ5CeIIfAe0LqgALjucS0TEqRysNn95+9L0ZP4K00dqtUJgGD45JzrIxNdLsqUjE\n",
-       "W+2WusheDOv7aOZm2pD70qFhUbmtMX/jpjH6oqqLbVFMIxUO627ZQOZ0kNGe1TidTvqq1Al/Fz0X\n",
-       "76DElAmnsCgN/l5GTWXGsBEnhsd8qyqWP0L4MtwFJudULuIoBv6LUtACvDe3r5Jm+USNRC8IDOng\n",
-       "D/cGyp53l9ZQojdB3z7xU/F4u04uHJWr9pXEc8Z7SLzFoBX3EB4l5fjTB6HvB69X4X5OCGvSubW7\n",
-       "KujOGK1AT0osirfN0FhYLTnm5j3eM/CC5WHnjDi3MdkIpUOe3hzOFHkxFhoA9UQaCTOgsneuiZ0j\n",
-       "kqJn+BTsj9+kTdnl4RqnGKSyx8XU991NklXpEgT4D6qUr7eurPP6XxWE5lXW/2HKLqPdq6Yhvq+e\n",
-       "EsbNkBTfkyjs4P7QYtIskpQ8uZDPnBDWOPoZpfYfvJSnki/MAuATP++JhW8T3YjvRsExe560xGvN\n",
-       "KFdbBKYILzjHkPDAPWqAWd6J0Mzig2mbPSsN/TfSef6i7No1726xHqyH5zOAU0EAAADQAZ5hdEP/\n",
-       "BFHAoEy2jXhQT4rDbKNjhWae43CZxWp9BYlm52ZFLNgHBOiHny1qe8HShbg33jlLwyhtXU/UzEAz\n",
-       "R1n/Qmry+h43w3MBjH8KnEoLHFYwdbEfuQxFiZMssSDnDslauS/Ddret2fzQgKWgijAmRzS/XmkN\n",
-       "dJZxJfATyS4lk26bfgt5jLDQpkDGefZk9uII8fSK1MeldHq4YnbnQrz74R2zzRIwcbBl7dAJmmhh\n",
-       "JMT5QpIb5OfBA63WO5m1E2lSK6z17aNFmRZgF6aFJAAAAN4BnmNqQ/8EUcCVXTFOvVOhmlsaG7FQ\n",
-       "uAFxofjyto0Hq7F+Lw+sKRQbFs4xaynOPIgJRcr6csFsaYiuAVG0iXivGjc+aUMJQrmf3fFcAnQO\n",
-       "DfbYd9p3e4suZx0FwNcoR1PpCGIsTJwBiNYwPzbZFPzpmJS7qjBhWWlQxnqgzXzkZUXhjNBsOWGC\n",
-       "itxQdUwaDj8bv0wikci/EIfSp5y2N+xIXWQDe+C8uwYe6yPvEAi0+HNzBGb7lMOXq8S8kBawSop9\n",
-       "QxGGY7jzV0Xb6ouyKEBQdkjKrXjTlTY21DX2CHkAAAOAQZpmSahBaJlMFPBL//61KwAAExsAH6E5\n",
-       "HcblYt0SZwSUks2UZmhtiLYyc9e3vazXhvn3QOS+/mDu6L0Dz2N2OWOuZzJKNIBffokHfOiOiaej\n",
-       "znsqfjXTCCa0Hty2/Vl+poYhu5shHNvwu6TQbxtu72z5ud6zIDoJyX0sDd88YvSj27CaqZvmkwDa\n",
-       "Xsp9Ib7qxM5MUVtuA5g2q/AofVt1zoEvGj/m4U7BChsoGJKgd0h+ib+etU1BMXvwvWCGYLE0T5WR\n",
-       "q9DNJPaDmVKAZYPDSKY9DZbgmx2TLzl1aXbhdjwwZ9ojaK4d4x71PQpz3bs9uyIcE104iyRLiveV\n",
-       "6PCm8QGBKMsP6m2FLTIUfHR37msDot5rasg3oE3Dbh3+f0SphaXXbC3myU6U5+6O1j9nparZz5HP\n",
-       "3X3kGJEYw6znwykd+dSw64mZ+L9a7YGmEHY2+iqiANOW8Dy98a/R2OXMiQOsctPPKHWZ6Tey5/sP\n",
-       "ksbIa2IKmMVMpNPbGfEgxED9ulsl6rO0HMV4hcvNChxgWc9vF91zQ/+AfOujf3OB0q+1Os0D9InT\n",
-       "WAXJmdKWgvsqUcq/REJWmje9SciBbA7wnKXfnuSGnLFw2bYaXc9wyfKOloNPlWrh0kP9FQM0/aVo\n",
-       "46/+mexhEfr1il47M3Fx4NXr6fy+/XkSmbWEP1mGBq0m1vR8hbqqn0KDkBkc9XUhc4yumz7mE78H\n",
-       "hYW1EF0ZU8hBT7xl1hia2Vlfd0Uq/QG+KSxQFNKWvAbiGcD3N034dSgIfSR343PLGn+81kKvu2LJ\n",
-       "j1owKWbSFdHDa2eRt9CfRo+eDS3ngiaI/q5QSpCAfSU2wCGZlwr4jez46m2CLtTjgq8cTdUqH/dx\n",
-       "DRKzzvYp8wBrAndDrAAMG8kO1eAHOdDVmEbD/o2jzVkS+PGQWiZ5Phs/4wpc4/zWWdL4+jJKJWuz\n",
-       "ILNIJVYzrmmj5i+kgBGIwrQO+YVtN2NmCgHQ+dQf8uCkZlbFAZ/OQFewEcUCbHYzjPCfN1W4d3wf\n",
-       "tX2cqN2mfsQwvTqkdob/Tv/bTt0dUHoelnPvfKFSPxFJkgCxRjDHX9DuQxQVrW1AM+FakPtSXb8/\n",
-       "sbn62JGOZV8TOJix62il/BKVh5uZpWco8NQi2vwkvlipaZSFS7Yz/fk9u/yqpOQW1uzIiqqrEM2g\n",
-       "K7+zm/zH7CNDEKSz9Prbx0EAAACuAZ6FakP/BFHAlV0zJjOhtObaGtvYJS4RXil52SnKiOOeRkiU\n",
-       "fR7bvyxAAE52o1G/AF2EIBg0isQva6Wp3zH4pY9HOTLezdtIHedeBjj/IiYKOShhOITUy0VTDJio\n",
-       "Vdb8+8WrjTdfYL71cdfXvBnr3gO295wJM509magDumpI9ndHxNR/m1CGvXYPqKxW6c0V9tuGnd1/\n",
-       "KWwwcBEwRTsPgq9S50qv2rVoeSwkkgG9AAACzEGah0nhClJlMCCX//61K+xD/5dcxzV9KvK2HbAA\n",
-       "Kz9Bg9++pWLQ7jb3pGtSRwwDZ95//1PhzzbZO/H1UVM1vx8WCbp0kR6Mzeq5M2Cw3SSgQcemco6X\n",
-       "LEAYAAAxp8DK7OJYfRzYNMFI90mRmjDAjwebg/u+7WpWmKzIdqVf4bJAmgLMZKXHELpI1trXUpYn\n",
-       "/D2wXm/11JLkSKmAwOxxUqQK+3BP58CsFGlC8KImgiC3RsJ6SiEuuJYQDXNe6teBBM3TxwwPce0C\n",
-       "BgcKsa55VMhF8UZBDzrHOxwgzBzQerKJaX5ZcQDaOy3NsV5fjrcwjOLHUkuK+taeqMbAPbyd/qoP\n",
-       "V/97uLvk57rRJRC8f7tzYETwQmuIGbuKdeoGKFlor6TeHJq5RObet5vXlfZIV5Yps7NI4zbpvqdw\n",
-       "nyb6GaSZG1WaRiUQvpd2nEHCZ4Dlq4aS4RvXtGYmH8IlGNSv9la8JrfHtE35yeCc+iClAqkEbfIe\n",
-       "/gcs2kDiL05MWFJVsF7na4LFQtvA0wqGC6UUGKngj4U7WbuFJrXCDxQED28MDSAB01cAX7buK/dO\n",
-       "YBeXj6ZTHqZXrF6VmpMPMm7qoWqjqk8vrjH0EQlWx4IbZljm/vtLK+IrNGLX/8ovvVdfsIcLWCdd\n",
-       "vmSBID+K98Gt7EPtil4PeH4pgeazfrbJbp1GUeFvLGRCHKHYuPLxQfcyRjpBWOL2V/9wihy6PlWR\n",
-       "wikfB9dcO6DQSoI71zTfW2teqSV2Dp56hPoTNFz/Ii7jgZ+iFdoTXldDR5SSW2elniqqH/ZgHepA\n",
-       "yV5fUJrhcNXCd4xlKoAMknMUFcg1iCK2lFIHfC/mQ+K5dz/Q4EOqy9hdN96CK4HePTI+bJe9+pCI\n",
-       "vglk34SWmJpljgFeTeUk6GBBl2qvbVYfITLFhrAn2yM9BZO4vbzWQbo7qKbxrA8013eAvfR+ghha\n",
-       "M3l9AAAChUGaqEnhDomUwIJf/rUrwheoYwVDefQAQ6CVGdCcZu9o0AYgPILVsQVTBYL+u/iss8xq\n",
-       "AVivpGgE/+t9hnfevyoda82ADV24xh0qbEjv5piSVIxGwWKxHfxc3x6yRpOBAzeQFV6G9XMKR1KE\n",
-       "tj+lPPFoiK2sbVK8YzxGfh6eD1wDpisr5CRHQx4jmsBBpFEzTjKKEYUhndrzOYBppHWNTvxIU+Iw\n",
-       "bGQ2wCFVD0oxw7zLcZ1c9NQyaly0gEMZLwJMnCP0dsbPAQldfOXQiv06tZYBsWAIlR9kaGWVzV19\n",
-       "l6p6A/jsH3aKkoxXtGa59PNiuALBsVd70AQUrApW63nL52YcDSpGWbfvNUXayZmuBdj2qA6bc12A\n",
-       "w9Dcu5eaAPmBo3uhhMRd4VIbjE95M+eRccQ5zTSOZCUrJK28IBKE5ZZBgV2M7UbuBPkjjf/BgsS+\n",
-       "fIeP99bKhcJOGYIOI8OYKNV7DVM0cl1n2PKlfFYZv8QKUTz/ftKVCjir5gPE2x7sclPHGWcS3AyD\n",
-       "NS1Jk1aS49vuVqmFN3A3VwmDqZep3kMCLe+ZZO+XYkTZwiB8d7oJjXkBY/e3yeAKp0SBRJs95ZUA\n",
-       "YINc/ZjHFe4hZWrLUQ7BD0eo/LW++0CUpJLTsNZZEBiPlHx4eTlYoe7Sm0WvUI6N5EZ8EEL6oBof\n",
-       "9VVwwexGr+QJzuyQWZd3njlUnpQQjJT3YBgx/YlCZ+yLZ9ypYZazNA9lg5vU94R06Ma32cL6iL2u\n",
-       "MNVFAnjmN8DdJ8OjQVyFUcA5iW61FeJxHisiJPQ7goytF+LgOBt4UnWrNcVWDjsDorqOXBSctxCf\n",
-       "F9GdO4awunvpvAJATbRdqc625JnvKzlB8AAAAsVBmslJ4Q8mUwIJ//61K8KUopRlNefGAAl3O48k\n",
-       "S9B73752HAsli91fvEx9z2KqYHzXLkj+crdrSooE5QhGv/69sXoinbAK5D7Qy4jUSAR1Kg483ngZ\n",
-       "yT+x39ts/bNErcyJvfcHy4ufzQ4UFvg/U6hHbkTF96FuUep1HzXNsevlYbn8pNAXIMdvJUTF9lZK\n",
-       "U9/mQpI8MQ0qJG6UhdYlJ/+d5Nz5rNqpqaylhr5s4bs+Qr58p/+AewQCSH1SMY7QJAZoRBMxaiyq\n",
-       "27g63iqxmZD6ERO8AprUvqulVMz5h71TwPU+y23n50ri9lVm3fK+idTtHxGCGk4WljHXVgzczQmf\n",
-       "Q44k0sR9CH0Qzhg8r+lBthTw1J5mE3wgrNe9IDTVe8jzFOA9lG5X2xCkdoM2s0Vru6Ta8qL/2RCb\n",
-       "zyhn2BRRLnDIlWJReoSYjy5VOZCYZnV+8w8o/nowpSEbD+EI6VCFPaBudc1tC1eb9EGnk6+3TrIP\n",
-       "Vw/rU40eH+AydOfyD/qi97zPFO8KhltK7twdnRWpHxBowoP86wlYYpPFOWBLrufAGfuQgukuf4bC\n",
-       "/GDmclY03+dXcan1/FOVb65cDjqofkmaDwpmwTC3EAG1xpYc+CD4ViMCG/XAz+oAgR8+y8UKOLmP\n",
-       "BCzcFroQd96aTeEGs2b9hu9tXTErAOzZbXp9ZLAD1A9qz3BsyMoIQniXOiqRZbbqRH8gIlMroX3C\n",
-       "fJYI8N23u/dStbMcczETYkKCDDiz5dgI1SJnMpg/mnMSw61kyjs1wZMS/jdTr6ZWIYaaSLTIA+Q3\n",
-       "Hf677wP654sSueQuhfAtHFRHzoXa1tw/KKrvnbNVDoPMBw14QK6VKV0HDYKKneXequ1k9eFi/Grc\n",
-       "p2yfKZHWFsgDCztqAS3jTF6SheGICR0roVRveSDJ/qHHGA3yRhX3ltO4LKURFRb0/YjIkpcGAAAE\n",
-       "AUGa7UnhDyZTAgn//rUrBPL8wJ8AAhIgAGvOiJ9vD8oPFKdqZESEP0wShAbPIVqySqg9ih8h7/6H\n",
-       "E8MgBHDQt0hMOzuE+InUSbRAkhQ8wAAE4hFhPvu2qZm8XETcvb/mb7lRUmY4I/zxDzZyL9Osu3bZ\n",
-       "nGrQgtcn30mi2K/VIaAC5slfSMePKCwFmymcWBnU5vvDEkXFCQhthv64k3Fsczas9iF4iQ4d+rDL\n",
-       "g6OpGPQJa/7RRMhwkdY+kZAjaCL+nr4uVuNFMiyYRy7VbordwH1pbO7y5NPxOWQkNRb6/83XYXw8\n",
-       "6++8UrGZAOVCdPGcDI4KME/HOwRKdlHg1adhmTqI4+fMqXTpmrfgVSqddN10YFj0utgI3xyFtyUV\n",
-       "uJxk3LeDkgy+hnn+uK74t0haDykT2MzXarxrIHNbuSAVkFIMoPFbUsSw+tr2I7qzCvHlSc9QYpmy\n",
-       "HLARaWoMXvLN9elyv/BR157l7m9zaoU/0uhSeGgzXv+oxn7ZrDfNfKPDWofyKykaNFUbJ6tz0fRW\n",
-       "GffUT0rDkYS2aMTlZQaEZGnglK5WoNoakvC+YGxGLe6blPj7GfohVZJO8zmANguiieIuHR7eViKT\n",
-       "j62wU5q/p9lh4PJlI1773zcZFbg/KMe8qMB34Tn8JboDhzbK4f4O+9HmnaPscDE8DSXOzlZN6YYk\n",
-       "FLidL8HeZmN29UaxIyigNqu+Xp3fWp7vE7poc2Cw2Bjjg5FlgOTR+W7JMliLuaNIa/12XbAh3Xty\n",
-       "21GpPnUZXxMFXJbN4K5k3CQlYPvlsjwaU8sShcIFLMH+fK4iz8pg9Sca26D7VemNu8rRqTV2bH6L\n",
-       "/skw50TZIqMGjCyg5odafh9EQV7BUKNwh67wDvwUza074LvHiQB6KZl7REW7mtTpKdJ+OQF8AEOu\n",
-       "UrDOdTsu4BJdC52pwvk5xybf+WRVzt16PmeYx7VyJ/p/jAytjQk7w1ZuIJ1W07DAnP0XyWFl9ozm\n",
-       "AJFLHxYQnUQH0889mKQxUU9oCHFey/lj/76/t97TTbN+MmLLU+yXDM+na7SaQ9oS2VfmVj0pmLyb\n",
-       "SymCs10zYFsD/wrCPzZLvOfQdw8TYU21qKdREvVQyEpb0CY+UUF+OsyVuIDAB/8asGGAGUViUbM5\n",
-       "S6Ji6ZxdYDsYq+RVlqCEp/9Td4cPnwdTRJUolFCElGgK7wDGVZsy92PQt6IHawdn3i7tBOA+TzzQ\n",
-       "St3hI7uPt8EsxkZ20VzFtIdhHo3619zKiBMpAH5A+ApD6xiSA/jVWX0THrLLAfk/YE5ymW50we5P\n",
-       "6ZQ032Y6mRBqnRTXmwU81CHx65kpeLX6xNvgYe0/OlFIh22iwbqnVWtUw4OcUDPFJ/6S8aYp/DTp\n",
-       "AAABEEGfC0URPBD/Ab4ECgAAfGWSpXIYAf4/0JSOOReD82prmIjZXBlytEIVrGG831CfXrHMyJES\n",
-       "il/XO7fv5Ij4ETsrb6ok1xST9GFb7/6z2D8KB8l+FtvS0oO+F1FaEROUP1hMVDzFnwkf6TCXwHzK\n",
-       "EZm+Wy2louoRx1ESqf19tYD6l82VdCOOjMIQjYq5Sq+nvNYEDhZqlc55EyVwLOq72ViNkECo2PLw\n",
-       "/Pi7z6ld27h4x4HKHL/vg5Ni9yB78Niqe08MiPjHj7KKL5ctEuSzRACjtGNtyrBSnIchhE/BKgpH\n",
-       "W1xR2DeS3LUMHJ5KLQu7FmT95vS2tqgdCPLW4zR1DHW0dDJIdklvKHhNvec4qEPAAAAAwQGfKnRD\n",
-       "/wSi+bt2P12zwAfl3PKsnG8DlHMFqD6MhwNzNxu1bUXd+wk92Y/C4km44uKNKb65xq/Nv1ooP0Uh\n",
-       "tb8IXW44v33NLveyn/A3wkJl7+kU0k4bBLgmmoB9pFz+GP5IcsvDzitQaMJjZ7Rx9/wjz/eZEDS5\n",
-       "XWeVNTUcZu4hUkGKLxqmVLK3CJ101Q3b5ZsiEjzaOZsUpD/H5NovnC+qOp8/3QXtrsU+e/hIjVnx\n",
-       "fpF3k1Pb6SfFhe4AijHKwjYAAAD1AZ8sakP/BJ8eqlBVppVigAnHnMW5uGwtakwblI6MhwGOg0qt\n",
-       "mqC88UqGeaFMovx0B63FnPWavEpjqnKaj6x/0Uh2L8IZy442XJTA25FOxla2kd6InQkozgADUJ+V\n",
-       "UtgvMjzGeJ0PaX72b7XOIFVWYk2+z3HUy4M45DjGTSXnqdp+cuXyjyMCxTWFTzrgJG31o3L3qYiF\n",
-       "DU9bePTiAA4CnQEat+RyZr046/ubFsaAGe3XYWq2uICHAyg5GO8fK3opiGhrhAB+28/JqWgXvEBN\n",
-       "IGTeAYZvLq2fySrXrkwvWo4dTcxwrehx0C96MSm6okXq/zEVgXcAAAPgQZsxSahBaJlMCCP//rUq\n",
-       "/dYcNQAdt64CSSmVA3jU6LRpllA8P3SVS/GWA/OttQw3zX1Towd//n3Pc/fj6aH9cwE3oCcC3A+k\n",
-       "f9n9tz/VDi0E/IHYolg18gtH06oDIIEwwf4Bio04t88WONh/yH7U7lPVbLU+cUKQUlLaOs3//hN6\n",
-       "81Y6+WIfjQTmGjwdPHrzPNyqdYHabkPtKoAIsyKEK2AG6eeJJ3CdoMR4kyiW1Siims837NaTj0CP\n",
-       "Cqik2gkxbMHwcIGHqOvfLHESSONVs18yU3asof5KiHn6eO7UV2cbNN7bUBOysPex86WhS0c5EYOQ\n",
-       "7XmlSzm1WvkMV8ALU/zAiPFhEz5jzTyUXjpvP2DAasr59pdl/8nEAtSrUobrafoGYFS7SqydpKKk\n",
-       "IJaoMAk0Ib4LJLn6qm+pCFVCEf/B74he8o0S7UgY+ozghIyGzvL8aDkwFvrGyAne4+5zirS65oUS\n",
-       "7vH99DpbsD8s7StsxBqSKgT2bwx1OYmp3ZTVFj7GC72XDw1vykpxgpw0O3QXiIHnmxIzGFdv0wCB\n",
-       "J2REDk3Ndw0wH99jNo9dpr8Pnym3+BzCbZpNz1g3beQhxpb9jxNP3ZUAWHHDKk463mRKb2AywE4M\n",
-       "dasRz495xy3xn/DdeXM52qXWnK2wzkCUstO16c6VVy8iRSd6lpLTSggXOFlepHL3JSOyRvGHNKBm\n",
-       "Icd7gE29JK9N3C66OKnl6ZwlyiO49ig2BMxR5Uh0RuoLTDoZaS8xRtS6hLyEOYR1aKpiM0cFE4Fk\n",
-       "iP/GNn/+aB1Ha4JzUAguWWVhjVS38+hHWeM5/Ehg/ziqUrn3gY/LQjDfoVBfAXlnr8ynCtj9gt3J\n",
-       "qtaBSSSk49Kr/C/iZHtv2rMZK0lBgbFowbY9Ze4up1pgXoJx0An9MAApUoeHhIncTj6XIrbKS8GC\n",
-       "B5+tANQaRN8enHIXCrCyCHo1vblLiqd4EMLWbFsKx6DDY3e54QS432Xe7w3e0CPSbITJndiC9b91\n",
-       "6XwPhgYIGfY41TY8Owec6pYN9SUrd0v7Wsbb4XcRePSqvziAkNyZYt39lpLAyG97dTnVAyucMJM9\n",
-       "1xQD2IewGM3eZbImfJ3sqDZVs6Auf4TZ2EGypa66DihHceP6tRENryGSGj4oRiRCDHuNT37qI/bz\n",
-       "0NowzbJRoptqW+Wev5ADkEDSFSjD4q6OnwfOe2tShL+FEhSJ4IZGNpchQnyDerzRZa+ODUR66yvv\n",
-       "owUy2EHhqLYorG2sPZQjLEHVoxZlWpmaM8fN2IitskQawHXixBH/eUBWP8dE/q1tyEjODC10QdRR\n",
-       "1GaPIJgAAakAAAFuQZ9PRREsEP8CpzQjsLAoZEZ9HtQAeXM0PuLGzd6UmkqjbEqzL3Nnpn5oUIbs\n",
-       "cvtMlgMqNSEIsK1ErsH4EUPoGNGvAl30kbWuM0fwAp7h79OkEUdGcnyOJ/ZgvhSHXhB54q9Sbysg\n",
-       "6xphvGyZnsIu9JV28FTDLQZx0iLeb3YfNd3OOkJClMZJu5BZblGk/jr+hjDWMvHEW0+qb4hxY59/\n",
-       "phP5VGIiNqihGCsDGq++KsZC7mK3TXSwWi1h3CTi87vQl13LrMRf5hmGxWAhnb7IiAoXUYbMGFhL\n",
-       "T0BJ8sFdtseKQjEKypULgrdvJbHDBbqKnhdAyztBnPqmQMhrvCJWL9V31vpiRaFg3sNlS1+8oo8W\n",
-       "ZdorwJY+2MUEt51jVZz3iybrVYywSgerW82O5F0+5kELrF6BKvre8uw8sjaw0gHDx8nOOrJkbr+p\n",
-       "zQHB40y7CFx59sfrJXJFMaa8l2sV5OdRwT7yjPjVlFBrXgpJAAAA8wGfbnRD/wSi+bv3EckbACZe\n",
-       "cxbm4bC1qTBuUjoyHAYaii/BJy+zLTBLPSmL1iZF92TcC5D2QPG8PJiQe1M5/TFsPIPO8xq2TzmJ\n",
-       "+TKV8A1LMPYMnU1slYl7vmcLp0QLb2geMKD6QlfkbHH9K0TpWIY+Wl57gQpdERiUfmsOWu8bHpzJ\n",
-       "1j+ha4bSEIM/iO47Ifdz29JQS2J3yP7IYxD5O4qNj+D2GPdqqe2VzZKzES3aWOCxMk+lN5lhzYBD\n",
-       "0ZqjCS/oEj+z7TF5jhR9/UT2qoBYFLY1HzksL00pm1jx3pyrXbKDPxlTltrCxEA9iQoiH94ZUAAA\n",
-       "AS8Bn3BqQ/8Enx6qUFWkN2KACcecxayJBeaMmE2Gk53BLlky/oEIDO1zCFgD2ACCs3gvFL35graP\n",
-       "mmJbrpMf0I+PLYSU2NapX0Uh3WWn6g8HjE/cR8EYaN7I3tU+34yHpCBux5sryz95ZnARlN6gUFvE\n",
-       "jDVnpQixW5RaAyDZqo+vEPDun3Hgv3eac5CBfl7TyZw3v/mkgfCkgQ5EzMYNflIsRhp6V2ElamPZ\n",
-       "Ae0eH9CpWDacstpsTIdQAG1PMlHKf0FT5PVFingxWIn8K3Kkdo1ZQ7/9gUVfKQiGqH4GFC2pGhuu\n",
-       "74DUJlMf+90cKgcE+u6hC+UElIQsK+hGjwaoaqyJYIpL9PfE+u7kPuXxMWuMi53S6a6turj7G1CO\n",
-       "ZJ4F+9StGtFqZeBeF0Z83Z9wFJAAAAJxQZtzSahBbJlMFEw///6pnFIwgpbqpRQATjzmLc3DYWtS\n",
-       "YNykdGQ4DsG/wXdKeD5l/+NCTP663T8YB9Dm4P1ALAqykBhVQb+Vtci+VXHrOKSIeslfYbj6BsSQ\n",
-       "8DEtKhFDacEAsQhnITJGDzvAVVcooU4yn/CldacJP0X1qcIvhdk4PBnOpv/GGkNl4NKdVIlEuNVl\n",
-       "2PRULXjJYdnl24pKHymq1giLjWZ6Hbaf4IxXarlMvOM7idhGXkXXNka1UP16/BUOpdLWDwsCO096\n",
-       "Vd+ZTwVGy7hoj7kIapf8ruzE9u77iFwRnLprZ0r8y5nQ9brdpRUixbmuv7HOzT3ngbYRN0/2suGl\n",
-       "TQnDNAq+2URytGm75J9Jm9+c9LyH+Gbqi/HiLOEnyVv5sbmxxH2pOsNxrNvqtbPfQF189T5/DqEG\n",
-       "GYbH1O7NSLd8POa6ag6c33AtH/XV3snGibqK8mbns00Zwpc8y1XG10eNZSgbZAsAEZJ94L1eiE0x\n",
-       "gtI//O9zo5yUyVL6L9Vx+eQoDc7nUhsdR/ZXQqrnkrnBERl8gauUcSzH9SluPxLgVILsTv+p37DJ\n",
-       "I55ODUvK1ELHjXvIGEJBr2fF7YIOvvHkVYIfYi8OG1VID3lz2m63lmkjCHkorHD+pXidKSzJxpbX\n",
-       "lvebohDwmuHbcpnFmn6GvAbCo+w7FUXMFD0tZ1ddaXIUHc8jttutFODUuPgW8tJC8iMtyzfWqC53\n",
-       "BrbiOZZlexWpQeWXHK2u3iJWifZ3dfZwxWQoByvqLIuQdTNOmk4E8MBmqylpDEzIUrlaunJanLyK\n",
-       "AUhlXhQNwe7OKct4DUzpB4hL3AAwIQAAAKMBn5JqQ/8EoQyKib6QttHV+JDhSD+8y9cr8O/Fehp+\n",
-       "TQG0R8yf2pAriCFx8pI9gRBW8BNCVw+msJWB1gBg8OHsmBW3nco0v0xX3AUdHRRtePWC9czSWiU9\n",
-       "YT1kaFXQWGmCpAysKrinIGDxTtgl3iw6RWNX8PwFM5NxVA1agC5CKQ5z55yGofdIpgcKdEFJSY8M\n",
-       "f3r63he/W9owGiR5/45qow6oAAAED21vb3YAAABsbXZoZAAAAAAAAAAAAAAAAAAAA+gAAA+gAAEA\n",
-       "AAEAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAA\n",
-       "AAAAAAAAAAAAAAAAAAAAAAAAAAIAAAM5dHJhawAAAFx0a2hkAAAAAwAAAAAAAAAAAAAAAQAAAAAA\n",
-       "AA+gAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAQAAAAAJA\n",
-       "AAABaAAAAAAAJGVkdHMAAAAcZWxzdAAAAAAAAAABAAAPoAAAEAAAAQAAAAACsW1kaWEAAAAgbWRo\n",
-       "ZAAAAAAAAAAAAAAAAAAAKAAAAKAAVcQAAAAAAC1oZGxyAAAAAAAAAAB2aWRlAAAAAAAAAAAAAAAA\n",
-       "VmlkZW9IYW5kbGVyAAAAAlxtaW5mAAAAFHZtaGQAAAABAAAAAAAAAAAAAAAkZGluZgAAABxkcmVm\n",
-       "AAAAAAAAAAEAAAAMdXJsIAAAAAEAAAIcc3RibAAAALRzdHNkAAAAAAAAAAEAAACkYXZjMQAAAAAA\n",
-       "AAABAAAAAAAAAAAAAAAAAAAAAAJAAWgASAAAAEgAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAA\n",
-       "AAAAAAAAAAAAAAAAABj//wAAADJhdmNDAWQAFv/hABlnZAAWrNlAkC/5YQAAAwABAAADAAoPFi2W\n",
-       "AQAGaOvjyyLAAAAAHHV1aWRraEDyXyRPxbo5pRvPAyPzAAAAAAAAABhzdHRzAAAAAAAAAAEAAAAU\n",
-       "AAAIAAAAABRzdHNzAAAAAAAAAAEAAAABAAAAoGN0dHMAAAAAAAAAEgAAAAEAABAAAAAAAQAAKAAA\n",
-       "AAABAAAQAAAAAAEAAAAAAAAAAQAACAAAAAABAAAYAAAAAAEAAAgAAAAAAwAAEAAAAAABAAAoAAAA\n",
-       "AAEAABAAAAAAAQAAAAAAAAABAAAIAAAAAAEAACgAAAAAAQAAEAAAAAABAAAAAAAAAAEAAAgAAAAA\n",
-       "AQAAGAAAAAABAAAIAAAAABxzdHNjAAAAAAAAAAEAAAABAAAAFAAAAAEAAABkc3RzegAAAAAAAAAA\n",
-       "AAAAFAAAFfUAAAVHAAABuQAAANQAAADiAAADhAAAALIAAALQAAACiQAAAskAAAQFAAABFAAAAMUA\n",
-       "AAD5AAAD5AAAAXIAAAD3AAABMwAAAnUAAACnAAAAFHN0Y28AAAAAAAAAAQAAACwAAABidWR0YQAA\n",
-       "AFptZXRhAAAAAAAAACFoZGxyAAAAAAAAAABtZGlyYXBwbAAAAAAAAAAAAAAAAC1pbHN0AAAAJal0\n",
-       "b28AAAAdZGF0YQAAAAEAAAAATGF2ZjU3LjgzLjEwMA==\n",
-       "\">\n",
-       "  Your browser does not support the video tag.\n",
-       "</video>"
-      ],
-      "text/plain": [
-       "<IPython.core.display.HTML object>"
-      ]
-     },
-     "execution_count": 19,
-     "metadata": {},
-     "output_type": "execute_result"
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAesAAAEaCAYAAAAxPX8dAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJztnX3MNWld37+/mfs8+7DsPnVXtsibxVZKApQuabUGErLBmlJdBdt/Ci3EmnaTWipUKUFiY9LUmvaPlprSNk+EgMVijKKgwa5tkRjagLhUtwIBiYG6lBfpurCLwu5zn1//mLlmrteZa86ZOXOdc76f5M6Zl2vmzH2f657P+f2ulxFVBSGEEELKpVr7AgghhBAyDGVNCCGEFA5lTQghhBQOZU0IIYQUDmVNCCGEFA5lTQghhBQOZU0IIYQUDmVNyJEgIp8Skb+6w3F3ish9IvLH7eudA2VvF5FfFJGviMinReTl+101IWQOKGtCThgRuQLgXQDeDuA2AG8D8K52e4w3AXgUwBMB/G0A/0FEnn2IayWEpKGsCTkCROQ/AfhGAL8sIo+IyOsyD70LwAWAN6rq11T1JwEIgBdF3uPxAP4mgH+qqo+o6vsBvBvAK+b4HQghu0NZE3IEqOorAPwfAN+tqreo6r8SkYcGfl7fHvpsAPerO6/w/e12nz8P4IaqfsLa9juJsoSQA3Kx9gUQQnZDVb8uo9gtAL7kbfsSgFsTZb+cWZYQckAYWRNy2jwC4Jq37RqAh/csSwg5IJQ1IceD84i8tu069fOGtthHADxXRMQ69Lntdp9PALgQkWdY2/5ioiwh5IAwDU7I8fB5AH/WrKjqLRnHvA/AJYAfFJH/CODvt9vf6xdU1a+IyDsB/DMR+XsA7gTwEgDP3/O6CSF7wsiakOPhJwD8aNuB7LU5B6jqowBeCuCVAB4C8P0AXtpuh4i8QUR+1TrkBwA8DsAXALwDwD9QVUbWhKyMuJ1ECSGEEFIajKwJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrFdCRD4lIn8iIo9YP08WkTtF5D4R+eP29c61r5WcDgP17rqIfFxEtiLyfWtfJzkdEnXuhSLyLhH5QxF5UETuFZFnrn2tJUNZr8t3q+ot5gfAFwG8C8DbAdwG4G0A3iUiV9a8SHJyOPVOVf8vgN8B8AMAPrzytZHTxL/XfRXAuwE8E8ATAfwmmnsfSUBZl8VdAC4AvFFVv6aqPwlAALxo1asiJ4+qvklV/zuamyghi6Kqv6mqb1bVB1X1MQD/BsAzReTr1762UqGsy+LZAO5XVbW23d9uJ4SQU+WFAD6nqv9v7QspFcp6XX5JRB5qf34JwC0AvuSV+RKAWw9/aeSE8esdIUuTrHMi8lQAbwLwQ+tc2nFwsfYFnDkvVdX/ZlZE5B8DuOaVuQbg4YNeFTl1nHpHyAGI1jkRuQPArwH496r6jsNf1vHAyLosPgLguSIi1rbnttsJIeRkEJHb0Ij63ar642tfT+lQ1mXxPgCXAH5QRG4SkVe129+73iWRc0BErojIVTQdGjciclVEeH8giyAi1wDcC+B/qOrr176eY4D/jAWhqo8CeCmAVwJ4CMD3o0kfPbrqhZFz4NcA/AmA5wO43i6/cNUrIqfM9wL4FgB/1xt//Y1rX1ipiNvxmBBCCCGlwciaEEIIKZxFZS0iL26nMPykiLBdghwE1jtyaFjnyNIslgYXkRrAJwB8B4AHAHwIwMtU9aOLvCEhYL0jh4d1jhyCJSPrbwXwSVX9/baD1M8CeMmC70cIwHpHDg/rHFmcJWX9FAB/YK0/0G4jZElY78ihYZ0ji7P6DGYicg+AewCgRv2XbhZ7Ai9xy/YHeRvFLxDZLv1+53iB+tutdXXWmxcVt6xTxuz3jkueJ7Y9sq151cj2vhmju6R2m4hav3qz3O8z2xQiQAX3mH771vpzaH8e+7U9Z3eOdh/gvn93nf2fytlu89kHbuCPHtxKdOeeDNc5cq58Vb+CR/Vri9Q5IFLvgskKyaws9knug6C5w/cX19S7r45e7ZKy/gyAp1nrT223OajqdTTjOnGtul2/7eKv9TvNnAxV83t0E3tVVWObqmq2VdKUraTZXtfN9rpu1/vyWlfN8e02rQVa10At0Era/e3yhdlmXtG9bmuBVmj3WesV2nLoy5tls936QVdOw/21NvsrBdoyqJptqBVSKaTeQqpGpFW9RVUp6nqLqtriotqirhR1u3xRbbGpL1FLs3ylutFsly1uqm/gQrbYVJe4kEts5BIX1RZXq8ewkUtUUGyqG9i0+zZyiRpbbOQGrsglKtk2r9iili02aN6nhqKSLWo0y83H2S/321xpv/zuzy9W7wbrHDlbPnDj3l0PnX6vk9v1r8i37/p+yyILW27BuXakWsHQe/4+H3jsv2SVWzIN/iEAzxCRb2qfx/y30Dy/lJAlYb0jh+Z06txSopaq/5n1tOL8zI593amfA7FYZK2qN9rpMu8FUAN4i6pyjmuyKNPrnRz0H46UzG43+5O41y0h6QXEPDtH9L+/aJu1qr4HwHuWfA9CfFjvyKE52jo3p6RnFN+sYl5KyAdOua/ewYwQQsgKzCHqGUQ4i5jnFPIa7d4ZnI+sl+40QY6XQv85CVmEfe+Fe4hxbzHvI+UZ/89lRp/knul8ZK1KYZOAZjgb6wUpdKTPnOxTz3eU5E5y3lXIe8h40XtANfL7ZL73+ciaFEEl27UvgZDzYlcRTZTmQcS8w3vsJOIxwa4AZU0OylarsoTdjr8n5CQzb7v8ThMEOknQU8Q8UcqThLzr//vK9YOyJuQUb9LkvJlap5cQdO45M8+XLeQpMj6iqJuyJoSRNTkVFpJ0lqBzzpVxniwp5/zPLiF3LNC+nXk6ypqcN8IOZqTl2KvBlHqcIdZRQY+dYw4x79s5K0PE01LoS1QSdjAjJAPh0C3ScsT1IFc4+0p6D0HvJeY9pTz+3jOn9heAsibkiKYcJCRgBlHvHEXvKuddxZw4bvi9ls8QRC5oQtm8YpQ1OW8EjKxJwzFWgxwp7CrpOQWdkvPE8unzz/t7tG82vH/o3JNgGpyQPNhmTY6RpUQ9QW6zyHkOMU8VcurcM0fZWe3hjKwJyaF97jkhxxRaj0lgLknnCnpPOcfPuaeUY+ecKPVR2R7wiz5lTc4b9gYnhmOpBnOLem5JZ5TbS845Ys49LnUtu7a3557DLZhVirImZw4ja2I4AlvvKOpZJZ0j6F3knHM9OdFyzu+wb3YgZ1+M6OeQdyhlTQgja3IMLCnqXSQ9VdBjch4T8xxSzvmisVMntn3uIYysCRlHANQcukVQdmA9l6gzotdJkt5H0HvIebKUR8vntnkvIGxG1qREinqIB4C20XrtiyBFcKT1YFdRLyXpXQU9l5yHyo5+QcgTtM4x7GsilDU5KOU9dQucG5w0lOrqwWhuAVFnSnonQQ9Fz7lyTol5KFoeOp+3LyriJaPstZ9nLSJvAXA3gC+o6nOWeh9CbHapd8o0ONmDRe91O0RoU0Q9GE1PlXSOoBPXsreck++XKeWxCDu393qq7AwsGVm/FcC/A/DTC74HIT5vxZR6J8LImjTsfpN9K9a410Wi6p1FvaSkc6LnXeWcIeaklOdOlwPQXerQ2pG1qv6GiDx9lpNtlVNCkix2qnesW2QPZr3X2UxMfy8l6kmSTkXRsfceu4axyHmKnHMi8qFyiIg49R3/CCPrLETkHgD3AMBV3BwvxJspmRGnzm2uMbImByHrXpd1oomizmmfHoumd5F0rqBjghyLhmNyHimXkyp3hGz/mTMi69GoOnGb0Uy9rS5rVb0O4DoAXKtu15Uvh5wBdp37Uzc/WbXml0GCxSIig3Ovk5F73YRryRqe1Z3WyHCHaHpI0pF90TT3VEHvKueR6Doq5QEhJyUeKZu13S2UUaYAWROyJiqAcgYzgvwIZ3F26P2d3B+LbmOiHoqmd5X00PsF75GW62BaO0POo2IeKpe6tpZoncnMBHfvdyyRNSGrwyw4OVIGH3E5p6iHOo7NIel9BD0UOftyHiqTLGft9/7e0dT3kHynlrdYcujWOwDcBeAJIvIAgB9T1Tcv9X4ZF7TaW5PDMbneiXDoFmnY8R4x671urqh6oqgH096paHpM0rlR9AyCHpNzjphjUnZk7H80OVH2jN5Zsjf4y5Y6906oUthnwC71jrIm+7DWvc6JqnMnF2k2tuU8UafS3nNLOhVFDwnaE/xkOU8Rs70vVt7/m3q3j8GOZnsInWlwct4IONqANKxdDSZE1YPp7+50drTsiXRM1FOi6R0knSVos75L9JySc0rMQ1K2/vRDkfbklLg5brwIAMqakPF5fgkpmaG5t4FZRZ0VTe8q6aEU99yC9ssMiVki24LtcBm4pwRlM28/lDU5b0TAoVsEwLrNZHNE1UPt1LF2a/uYIVHnRtNTJZ0bRWfKex85+2KOi9o9v7PNPqfFnCMMKGty1ihAWRMA+enIoohNkJLxpUMcuUZEHUt7x74AxNqVU5IeaotOtUPvIuhYajsm54SYw3OiJ5ouh0vkzz+5HTsCZU3OG6GsScta1WDmqLrfNpL+ts+fEHU07Z2KpueQdMZ2oJVlQtBO9ByJnKNy9sUcROL9nyyQvL8fgCanIg03Hc0MZoSsDdusyVEyFFXntlPby5GIutueE00bwSW27ytpO+JNpritY1OCjkbOKTknou/m90SwzSmL+P6QvPvP+ch6zfYoUi4CKCcwI8A6kfUc96XML5vRDmU5EXWmqJPR9BRJR1PgCKPoXEGPRM+OnBNi9qUci6hzOo0lI2hG1h4cZ00SMLImqzB2PxpKge8QVTfLlqjt68gV9VDa24+mUx3HMiQdRNFRwco0QTuCh3uMWU6J2ZdyTNLe5xmfJCVzW4TzkTUpgkq2a1+CgzKyJi3FzA0+hbEvmmPt1Pb2qaL2096xyHtuSfvt0L5sI4JORs925BzbHhOz9feKtWcPdjyLlQHHWZNC2WpVlrBFsGUHMwIcT+ZtSlTt7HPF7JxvrI06Iuog7R0RdZDyTm7rr2sfSQcpbj+C9gQdbcuOyDkZUWeKmr3BCZmBZM9NQtZihxnL+sKeuIHxdmq0Uk+JOtY+HUt7xyLnVITttUkPSTqW6nYFbx+DoCzgCdoRsr0s8e3d+fplw+CYbJs9vwtS1uS8EQ7dIi3HVg38yUr8CU/sfQBG26mdsjuKeiiajvXuzpF0J01L0qko2o+WBwSdisLTonalPD26RhQO3SIkAwUja9JQ6qQoYx3LosSiarOcaqc2x/mpbyAUtdO27Yk61TZtp7wjvbsnSTrSFt2VS6W4E4JORc6j7dcD6/3fzV3dp18EZU3OHsqaFEWOkFNRdaytOnbOYF1cUdvbY6JOtU/ntE3b0bQt6bas0yadG0nnRNGe4N19pnx4bHQZ/Xq8Z3i/OCpoRtaEZCBgBzPScCrVYGpUbbdT+2X3EfVYB7JENL1XJJ0TRScEPShnX/Lw9sPf5lWmGeoWZU3OHkbW5CgYi7inRNV++tsc76W1gzZqw1RR7yBpwIuIp0g6Iuj+fMOCHpJzKGpx1qcO4xrdZ0FZk7OG46yJoYhx1jm9wIc6lo1F1WPpb79N2i871EY9o6jnlHRU0IiXSXdC88Rsl0Nk3dqWXEfkmAEoa3L2MLImp0QQVfvC99PfMTE3JwrT3zmirqpOQFpXZUk6V9BDco4sd+UQCtyw75dBypqcN0JZk5Y9b6arkhKuvX8ojT6U/p4YUS8STdfLSHqwk5m44vXT403Z4fVgGV65xP4Yi8laRJ4G4KcBPBGAAriuqv92qfcjBNit3nGcNdmH2e51OXOBB1HyQAo8JudYVD2HqNv/Ia1ksqi1FiQlHYmm1f5SMJOkA0Hb8k2JG325paNqYNnI+gaAH1bVD4vIrQDuE5H/qqofXfA9CZlW78w/PyG731APfq9zU90S3w6EPcCnv9GsEXXzJWAkmq5kPOVdTZB05Zdzywf7gGA7kE6bd8vB365fTMpa8kW+mKxV9bMAPtsuPywiHwPwFACUNVmMqfVOwQ5mpGHXSVGKu9fFOqXZPcCnRNXd8Rlt1AlRN5HzQNrbF7NZr/zo2V0fEnr3cI4pkk5tQ7geS2Onou2g/I4cpM1aRJ4O4HkAPhjZdw+AewDgKm4+xOWQFTnkQzxS9c6uc5tbb2NkTRpmuKEucq9LtTWnUuDmmB0e/ZpMf3fn9URt9k0VdZVOezvRdyffcJs5Lhk5B9snSjolaHs9ts1a95fHIvAhFpe1iNwC4BcAvEZVv+zvV9XrAK4DwLXq9lJn/CMzcainbg3VO7vOPe4bnqaMrMkcTLrXiXev22XWsvD9YxsnRdXOcbH0NxCJsJEt6mj7dCzt3aW5xZFvLOXtSzlVNic1Hk2JwyuTWG/+Nma9/3gDQWfK2WdRWYvIBk3l/RlVfeeS70WIYVK9M/+4hOx4EwXmv9dlP2Ur2ka9Q4WekP4ORB0ZnhWIug7bp7toeSjtHWubruLpcF/GvqSHUuOpdutUBzN3vRGzI2/0y6PRdOZHvWRvcAHwZgAfU9V/vdT7EGKzS72jrMk+rH6vizxty5kExZkcZWQClFj622+ntkSNyghxgqjtXuGxtHdCxmHUDet8dnlX0slI2j8P3DL+MmDWNRB3tw/9upP+9j+foX0JloysXwDgFQD+t4j8drvtDar6nsln2upO7S/kLJlU77qbATl79ugEtN+9bpdIOCboobfwpezuDKcUteVsyjipcPTt1Ga9i7q9NHWmqIMhWV4q2xauHyUnO6klZO2cH5F9wbLmpcHhro+2V3tlhliyN/j78y9jBIqaZLJTvaOsyR7Meq/z8cdeD7VXT0mdj0XVBqcTmVumF6PZ1ko11UZti9qb5MQWtStlv0xGNF0lpDwi6TBNrs5xgHce9OdItll7H8k+vcI5gxk5b9obByEL6XYyQ/OBuwVj26ogBT42tehgVJ1If9sdylLjqLNEbXckq1xxRtPekehau/dPl0lK2hd5RNKBoAN5q3tO9Pv95aJ7gxNSOlpxEAJZiX1T4BPT4f379vJ11runXUnY+xt9+ttpp4515pog6nj0HE97x6LupND949GvxyStlY4KOipnT9Bj0fXoegLKmpw3wsiatGTeNEsjlRqPFAzS3KmoOrkcaae2pxDtZW53ErPlHEbUg6JOSDicQAXheYaGZzn7NN6mDWvZSNor0/8dPDH7rwBUvKCg25cXLIzKWkT+EYC3q+ofZZ2xVHIrNCmCB794CRG57RD1jh3MiOGg97tdouoUZsiWJ18nBR68vyfpoajayBfooupYhzIzM5nds7sTrL1sRF/5gg3breGXsdLeUTHb+5DeH5N0LCUe3Y5+ny1mTUk7hTlHBjmR9RMBfEhEPgzgLQDuVdXjyxuqUthHxI0bAA5Q7xSUNWloK1c59zvv4R2p+cCTx6bavseiZ3vZFnXVt13b6e1Yz2/YcouIOpr69kXtRdfRtLcn7SAad+Rsymm0k1pU0uZLS9Vva/4+pqy3rfv7R1Lge+pn9Dalqj8K4BloxhF+H4DfE5F/ISJ/br+3JiTNn/6GGjhEvetuJvw59x8IVr/fZU+GAkxrr470/FbzXrGo2hu21e2zhJxspzZ/y3ZmMkfMQ6K2Pwfr1TnG/rwiy9u6P6+9HRWgtUKrJprWOrLflKm12V/321BpOw+6XUa7zEKzv/0x22tr3V+2f2aMrKGqKiKfA/A5NE+YuQ3Az7dPlnld3lsRMo2D1Tt2MCMWRd/vPEGPtlfHUuCxCN2PqoEgqk6lv1Pt1Ea4rpgHImon+o2LOra/E2172bFyTsrbvLa/o58S96Pr5hgv5Q13f7TN2mqjDj4mv/06g5w261cDeCWALwL4KQD/RFUfE5EKwO8BoKzJ7LRt1vdh6XonYAcz0iBHer/z26vtIVtBuYio/XHVQDyqhiVHO7L2ZetE3OKIuhcwgtS3I8tOwJJun/alHd2XTnkbiYdt2navcL9d2hO0t0/scuj3SbJzmVV2hJzI+nYAf0NVP21vVNWtiNyd9S6ETOTyEsCB6h2HbhGL8u93U/vejKXAzTZ7ONdQVA24EXO3z9pe+9F1L/SYqG1J9wL2RJ2SdlLkkWjai5QdkfuSNgmJlKB9OYslZe/P6wt54icIIEPWqvpjA/s+tsN7EjLKHU+s8Yefv/x0bN+s9U4AMLImANo26zLud5kzl42SmwIHhqPqSKeyaPrbtD3b0XVUymFnsqioK1eogbThbuv3qbsNcKTs9wrv0/oJSXcibwVtdSyTTtT9cd2iLWlf2L7QR+A4a3LmKCNr0rJuPcieuQwY7lxmz1rmvEErX+chHZaQrTJBVN1tsyJo2JF1op3a2m+LODbmebKog8g4kvZ2UuReytuOpKtE9JyQtCPoVsICO7JW60850HY9gfORNYdtkRjmH5KQI7tFdPOBmyhcrGW3YC/9VGTdpb37ZSCMqs02O6o2kXKqndrpLOb0Fg8j60mijrVPJ9uxvZR4VNQDkq40EHRMzmbdXgb8Zf/jma/N+jTgOGuSYiBIIaQYRqLpKBk9v3eKqr1OZU7UbLdTd5L2hO7JNCnq2LInalvG8YhbEUTTlbUM9HK2JV2FUbSIkbYbOTv7nD+lOq/A7t8Jz0fWpAgq2a59CS6CZqwjIaf2Xd6Wu50C7/a70m62WWW9HuBO1NzKOJb+dtqkLbG6ETYcATudyTJE7craa5+2IuZYNB2kvCv31Za0mG2AI2lf0G5U3bdZVzFZB23XjKxJgWy1Kk7YwjQ4KZhJM5eZ8nY5P+oO9jcvGjumS4ObdQSdyoL0txdxp+SdnCQlJe19RV3B7UBmR9NVJN1duZKuqnFBV050rd6fMrzPVHaP8hEoa3LemH9WQlaMrJ3OZWNzhk9Jhw+JfiAF7sxWFomq/U5lTorcOlat41x5RyJuW+JG2jmiDo6zOpnF0t6JaLqPqC0hV66Y7eWqk7F2+2wxx6LqKiJsmetBHoScNoqKaXACACv3Bg8YmHq0i7b9yVDcQv1rkO52Je50LMuIqm3xxnp/++lvp23akXQ4zehUUTvt2JXddq2epNFF0Wa5i6a9qNmPpKuqF7IdQRtBGzmLLfDuYxyTdR6UNTl7ctuMCCmKoU5lkeg7NhGK07nSicKtqBrjUbXf+zuQsXWc3RktGm175YPtvqi9jmTRtLf32kTRVjRdtZFwGzkbOduSrgcE7cvZj6or79X9uBhZEzKKCFDVZbWhk3UoerDIlId2OMdFBG2/muVOykairridqNrfbn4AL7oeaKe2e3770bZdPtgeiahzRW2J2bRNSxWmvANRA6ir7aCkK28bYAkaw+nw1SNrEbkK4DcA3NS+z88PzQ5EyBzsUu8qtlmTPSjpXhftjJZIgWuQ5pbocC2nBzgQj6qtKDqWInfT4BJEz7E0eTAGe0pE7bdP+53IzA8AqbZdyruqtv0rXEn3y3FBV6KOmANpexF0TOJDLBlZfw3Ai1T1ERHZAHi/iPyqqn5gwfckZFK9E1HUjKwJ9moO2etel/1YzLHQ32uHjh4zkAJXq6jdsayLquHK1f0RT7rpnuHRTmR2dG29z6CorRnJoqL2e3t3P2japL1ououozU+1DSRdJwRtrzcfRS9uM/olkLVJla/dwax9YPsj7eqm/WEIQxZll3pXVZQ12Z0l73Xhk7Mi6XC/c9lQe7Upby9X/Tb7YR5BVO2Nq/bbsYP0d6SzWNBO7UfXUYnHRR0dmuUJumuf9tPeXjRtRG3apetKByV9IVtX2J6cfXF3Hw1CYed+V1u0zVpEagD3AfhmAG9S1Q9GytwD4B4AuIqbl7wcciaM1Tu7zm3uuIYLyppgr8h6vnvd2LAttAK3H4vp7uyXox3KPCnbWOOdY7P65UTVg73ETfrbj6ZtmXui7nt0e23UVuSdJepI2tuPputqi9pIu9oGkr6otk4UbdaNnJs/oS9uN7quI6nw1SNrAFDVSwB3isjXAfhFEXmOqv6uV+Y6gOsAcK26nZE32ZuxemfXucc/40las82aIL+jT4zJ9zoZudflhltjpDqUmWvqoujYsehT4FOj6qH098i2flkdSUeHZ00QtentbYu6qrbRaLq2BO5H0r6kbUHbcjZiTom7/zgKkLVBVR8SkV8H8GIAvztWnpA5yKp3bZsUIXvZumXqvS6rvTqzJ7jYYraFHHuIR+WtYzgFvndUHRumZQs4SJuruz6DqMUSdFW5bdNG1JUn6pSkL9qyRtBGzraY/WjaToHbws6tdkv2Br8DwGNt5X0cgO8A8C+Xej9CgOn1TqBMgxMA+R19guNKuNfFpB/rkGb3/gZ6afsp8Bmj6mj6u30ftYaMhWlza30BUftp77p9d6E7AAATR0lEQVQVskl5123kPCTpTXXpCBtAJ22/Y1mN/j4z1I6dYsnI+kkA3ta25VQAfk5Vf2XB9yMEmFjvBKCsCYC9Aut173VDncsi7dVm3U6Ba0zsznms15GoOmx3Hk5/pzqURVPiE0TdtUfX22T7dPPaiNpOedvLvqQvqktLyG5UbVLjRsx2RzNb1mZf81Gs3xv8fgDPW+r85DhZ+iEeU+ud+VadohLFVqV79bePkVNurAyv4UDXsKOtD3KvS8k0tX2s93fqXKkUOND9fcai6sH0tyVof8KTbly0nSb3en53+yWjM5n004iaqFqApKhNBG0vX1SXlrTdSLpf7gVtdzSz12vrvudH0kV0MCPEp8Snbm2qy8H9tbiv/vYxcsqNleE1LH8Nu6bBdyV7fLXBj5pTPcFHOpV1614KvDknRlPg3fGRqHqo3dqOum3xw5NydDYzZwiXOyNZKvVd1c0EJ3W97TqS2e3TTtt0m/a+aNupK2c5T9L2ci3bTspG1P569zFS1oSMI8I0OGk46HSjsffyhm0FY6zNNjNsKwN/fPVQujuWdEh1LEu2VVeIR9WplHjlHtNt6ySt3TGOqK3IOiVqv8e3L+qLNrI2aW87mraXN4Gwt9jIZVLQvqi7tuxUB7O10+CEHAMCxZXqxtqXQQrg0JF1kskRt7g9we1Xf7ldD9qrUxOhIN6xbFJU7cnY6f3dbbPGUwOdiJ126u687hSi6c5k00R9IZdd2/SFbLGpLrsoe9O99pLeyGUgaF/OvqhtSXft2icVWW91vnGHhFiwNzgxFCPruYhNkJJ1HJwUeLDPG6edasNOybs5ztpmR892StyTc+xRl3bbtC1qkd1EfSHbrgOZEXbXqQxtuhz9dl/SRtCuqNO9ws2fNIfjkDVFTRZCBLgorA2drMNB0+C5jI2xjvUEz5ladGjIlnN+BLIdH3ttlU1E1f5+J/3tSbt7gpYlaQicub4FCIZnDYl6U9lRtCvqjfVqR9P2q5H0proRCNrpWAYznGuLypK0PZMZ0+CEZFBBcVPNNDjJT0ceHRO+hcR6gdsp8OZ8iMq7X49E1d6Y6r6jmdX725y70miKvImw+57fnailn+vbmfBkRNRGzmbZT3s3wnYFvZHLpKQ3chnIufak7XeuraEnlgYnZEEYWZOjJNYuPfC0Lf+RmLH2aud8Zj9gideWubvPnxbU7WzmRsz+MK5o+tsab921U7cRdd+GHRd1HRH1pr50enzHRN28upI2KW9H2JacTSczW9DNdre9uu8V7qfBKWtCRpG2lych+zzIY1FikXFG06Caduecc5psunjnNuKEG20H7dDdsvSpdD+6hidtS8CKVDt22E4N8dqpBd0TtMQStekIZiJqM/QqR9R227QdTTeCdiPpmKTrrj27j7QBt0d482ffMrIOKLJBiqyNALgQyprkd/RZ5s3zhmLFygZDvGJP00qeyxMwPKEingJPTYISa6v226VT7dUQdSLtIP1tkgBt+jvV8zs64YndmSxD1Jt2ORVN9+3WW1xp99mCtuXsC9ts7z8CytpFlcImAdKmtwgpujd4VfVjrFNkT4JiXvsy9pCtpnw/ZCuWAjdlsn/gdzTrI2YnHd7JuT8ulv4W6SPsyn5SlllOiVqmidq0TTfrN1CJ4orcGJS0EbRZBgC/Z3izXMh0o4QcA5wUhRhK+i4fmxAlii/vVE/w9Bu55fzU+VAKHO5QrNgwLj9NDm+70z5t1v3e32Klv7uOZe4zqWMdyrqZyTJEfVOb4rbbp03a246mN3IDtWjzakn6ily6srbkHArbn240D8qanDWMrImhiMh612GqqSFenoyTD++wBOyv56TAneOd6Nw6R/ejgbz9ucGd5cpKf0sX5Ftt1eYhHO2c3HZbtYTDs1KdybrhWVb7tCvrG1007cvZyNsM3bIF3XUw855r3f+pGVkTMgplTQxFyHoKqXbuSEStfgRtU9nlwiFbnR2RToGnJkFxyljTiMbkHYypDiJrwO/9bdLffceyPpp2e373PcAbGYep76vVY07btPm5IjeCtmoj55iku9Q47CdutaK26lj/nOs8KGty1ghAWRMA+enI4ogM19KcCN2JfMP2arPc7Ie3P4ya4zOZpaPqYOhWJ2g4gg5mKRO0bdVmXZ30t1hPyeoELf083xdDbdSerI2obTmbqLpvs45L2h5DXbfrBlvQjKwJyaAZusVJUUh+R5+DMTZ72RjdDGWWuBOdy5xjgJE0eL9u9xpPdSyLiTyIqrtzq3Wd/X4zSxmkb6fun03diFoAZ6hWM0NZH1XbM5OlRG1kfVUec9qnr3jCDqNqxabrXKbWZCjaidk88c3/VHO/JFLW5KxhGpwYVkuDTxm21R2TuMUPpbpj+2Lzh1ubwmFdXpnIa1rgVlQNT9r+gzrEjar7nt9tlC3aHNamwuPjqfunZ23a6LqW/hnVfq/vyppetJZtUtROStyKpDfdWOs+vV1LL+favHqfQ25nwvORdUldPUkxMA1ODEdzh7Ci4uiNPvdeNxBF+8+ujqXB+4g5MkVpJJIO5xJXb7/b8UwsgQvQdTDzO5WZqLqPtPvnUJv0t99O3Qt5G3Qms1PfRtQb9OOtr6DtZNbNXtZE01dkGxW0kXO/zf182Gbtw3HWJAIja2IosoPZVPFmlHN6gnudy5pXOy1uSdpbHx1vbe1z5A6/nTrSA7xdNm3VXTRt2qjFFra2k530ncpi6W9nGtHK7umto6LueoHDPmbbpbyb9TaaRiNoW85V+zcNRV1BMr8mno+sSRH4E9mvjSB8ZB05T47iq3xuytyf89uf+xuuhP10tyNca32wXCwVHpUwEMxWFusBPhBVm05ltrhN+rtqH3trp783VqTdDc/yhmZNFXXfRt1E0xv0kvYFXbXq9lPgjazzWFzWIlID+C0An1HVu5d+P1I2W60WF/aUOidtOxQh+0bWi93rcjua7ThG25e4uw/x9upO3tKXQzwVbu9Pyd1fNj2++1e3rdqkvO35v6V9rUxP8Db9bbdF+9OHVu0wrI0ZN216dsOamcwS9cbrTBaLpo2ojaT7NHjVvu72OR0isn41gI8BuHaA9yIEmFDnRBRXmAYnwBy9wWe51yU7HKVmK7NlnpMOb59hDdjpcHHasJ2hXIi0V8N79aNpwHqPfn/QsSwaUduRtelcFrZVO+K2omnzWneR9NYRuy3s/ulaJpr2Iuu2jXpI1FcGJF1ZsXOdyIoUkQYXkacC+C4APw7gh5Z8L0KA3epcaal5cnwsfa/Lnn4UA2Os/R7h0c5pwRuHqW8MRMrRqDmRAu8krs7xYr1WfnQNBG3VZqx1I2M3qm7S4O4DOLrHXUK79Hct2kXZMVGbXt8b2WIDDdLeG6mikk4JeheWjqzfCOB1AG5NFRCRewDcAwBXcfPCl0POgEl17glP3jCyJgDc2aV2oMx7nf3gjmSZ5sXuCW5vD8ok5Bykv8XdPpgCd4TdD9fqOpk5Qnaj6q7ntxdVX1jzfJtOZab3t5/+NpLuHsgB93GXpo3aF/XGiqY3Ui8iacNishaRuwF8QVXvE5G7UuVU9TqA6wBwrbq9wO6Y5FjYpc5981+4Wf2HwRMyhSLvdQNP6HKmHvXblLvjre2xMl6UHbRNR7arfWwi4ral3fnctGEDnrQ1GlUbcdfWtrp9bnTVRsjdLGPSj42226n7yLr/IlC3om46lB1W1MCykfULAHyPiHwngKsAronI21X17yz4nuS8mVznpP3nJGSPDma73+vsG/uUDmK+EDLT5IGUg/1iLUe2O/KWpMBjPcnNOaMp8LacnQL3O5jZYhbAEbYt6cqKtLt26khU3Qi8XTZjrrtJT9zJTq7I1mmjNqLeoO7S3hupsSSLyVpVfwTAjwBA+23ztRQ1WZJd6pwA2IBpcBI21+ay6r1un7kjWt/HhmPF2qmjbdZme5DWNj8ab79OHQckU+BipcA7cVuzlfUdyEyPcNO7Ox1VX2nFXGHbTHzipb9rE6HD7Ux2aFEDHGdNzh5G1sRw5K1wY+IebLd20+I2fio82gbdrg9G2s66usteL3CBlf7OSoG7qfDa225H1UbiZvhWJ21xJW2Ppa6lGZ5ldyY7pKiBA8laVd8H4H2HeC9CgPw6JwgfBk/Okz1i1I6D3OtypOttS81KFhtjbU8zmn4fd3mwrTraXh35afeb1HdzCb2QAXQpcLNs0t0A3HbqLkJux0pbUXU31ag1ptrvVLaxjtm0Hco2bfu03UZ9KFEDjKzJuSMcukVa5rB1CTjtzvv9UsFY6xFJB8cmJN6su1+SbUk36+qsV57ETQq82dcvN+tuxzKTPTP/65VJdVs/TurbS3/XZvx0Nyxrytxj80BZk7NG2m/ThBQ5N3gOU57AFXs0ZuRpW2Pp7aAj2mDKG4G0o/u7nuD9RCjNr6GOuO0UeLNueoG7k6HYKXC/Dds8wrKPqN22arv3t0l/m57fTfp7uV7fKShrcvYwDU5OjokRdTDGeqf39Nc1XA8k7Zfxiljt1YDbA9xZtyXdireR8rZLkzvDtezpRL0OZV2bN9yo2kx60kx8clhRA/lP5yLkbCkhTc5rKOcaTh6rvTkZbdv7o1FyYh2ISFuDbX5KHHDbq826nQpvtm2dOuKKOkyBA3BS5EbYzXY3qm62ycHT3wZG1uSglHazFeRdUwnXzWtY9hqOrcl6yhSk7QHhcsYpBh+ZaW2fjC9xxNutzaxlzrZA0n1Hs25djHTzUuD2eWLZtjWjaoCyJgfmEE/dmgrT4KQ4Uk/a2vHJWg4Zkk/NaJZcT5wjOh47cq6xzmVmWxVE21tnm91e3axvo/cb8z9vZi+sPUHHUuBrRtXmmgghhJwpQ8O4BomkxQNBw3rSllM28QU5IuiwV7gG4valDfTpbSBsr7b3G5nbvcCb9T4FbqLqNWFkTc4aSaS8yPlxtL3BxzAP89iTVPo7tn1sWlP7+HCbmWbU3ezLOLbN7lzWr9sRsyvqcEhX3ws8xlopcOBYIuvtif4TEULIHOySHh9re54g4Rw5x6Lu7n38TmcJ/PHUgDsZir+9P84SuBVJN699e7WNnwJfm+OIrOdopyEkQewbOyFnxRQJ7xM1m+2Z57CHbdn4HcpSpKYS9p+052fXYlHsmu3VzfsTQgg5PCumVPdmirAnOC5V1B+2lSLWdm1H3LUfPcOPprVYKZZ6XYQQQmYmpxPZaE/wXZiQvZqScU5lxYKUdmSWQrsnePr85WR1KWtCCCGHZ0DgsdR3ilRKPJUCT8k5lQpfe3y1fz2nT0HfkAgh5CSZ8TY7RdjngKiW8wcRkYcBfHzt61iIJwD44toXsSBz/35/RlXvmPF8UU68zgGsd1M4SJ0DTr7esc5NI6veldYb/OOq+pfXvoglEJHfOtXfDTjq3+9k6xxw1J9LFkf8+51svTvizySLtX6/80mDE0IIIUcKZU0IIYQUTmmyvr72BSzIKf9uwPH+fsd63bnw9yuTY73uHE75dwNW+v2K6mBGCCGEkJDSImtCCCGEeFDWhBBCSOEUIWsRebGIfFxEPikir1/7euZERJ4mIr8uIh8VkY+IyKvXvqa5EZFaRP6XiPzK2tcyhVOtd+dQ54DjrHenWueA86h3a9a51WUtIjWANwH46wCeBeBlIvKsda9qVm4A+GFVfRaAbwPwD0/s9wOAVwP42NoXMYUTr3fnUOeAI6t3J17ngPOod6vVudVlDeBbAXxSVX9fVR8F8LMAXrLyNc2Gqn5WVT/cLj+M5oN+yrpXNR8i8lQA3wXgp9a+lomcbL079ToHHG29O9k6B5x+vVu7zpUg66cA+ANr/QGc0AdsIyJPB/A8AB9c90pm5Y0AXgdEHmtTNmdR7060zgHHWe/Oos4BJ1vvVq1zJcj6LBCRWwD8AoDXqOqX176eORCRuwF8QVXvW/taSMgp1jmA9a50TrHelVDnSpD1ZwA8zVp/arvtZBCRDZrK+zOq+s61r2dGXgDge0TkU2hSei8Skbeve0nZnHS9O+E6BxxvvTvpOgecdL1bvc6tPimKiFwA+ASAb0dTcT8E4OWq+pFVL2wmREQAvA3Ag6r6mrWvZylE5C4Ar1XVu9e+lhxOud6dS50DjqvenXKdA86n3q1V51aPrFX1BoBXAbgXTYeEnzuVytvyAgCvQPNN7Lfbn+9c+6LOnROvd6xzBXLidQ5gvVuU1SNrQgghhAyzemRNCCGEkGEoa0IIIaRwKGtCCCGkcChrQgghpHAoa0IIIaRwKGtCCCGkcChrQgghpHAo60IQkW8RkftF5KqIPL59Huxz1r4ucrqwzpE1YL3bDU6KUhAi8s8BXAXwOAAPqOpPrHxJ5MRhnSNrwHo3Hcq6IETkCpr5gr8K4PmqernyJZETh3WOrAHr3XSYBi+LrwdwC4Bb0XzrJGRpWOfIGrDeTYSRdUGIyLvRPH7tmwA8SVVftfIlkROHdY6sAevddC7WvgDSICKvBPCYqv5nEakB/E8ReZGqvnftayOnCescWQPWu91gZE0IIYQUDtusCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkMKhrAkhhJDCoawJIYSQwqGsCSGEkML5/+FM71ufWssrAAAAAElFTkSuQmCC\n",
-      "text/plain": [
-       "<Figure size 576x360 with 3 Axes>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "df = dfield3\n",
-    "nb_iter=20\n",
-    "simu = Simulation(nb_iter=nb_iter, quiet=True)\n",
-    "\n",
-    "fig, axes = plt.subplots(1,3, figsize=(8,5))\n",
-    "\n",
-    "imgs = ()\n",
-    "for i in xrange(df.nb_components):\n",
-    "    img = axes[i].imshow(df.data[i][df.compute_slices], \n",
-    "                   interpolation='bicubic',\n",
-    "                   extent=(0.0, 4.71, 4.71, 0),\n",
-    "                   vmin=0, vmax=(20 if i==2 else 5))\n",
-    "    imgs += (img,)\n",
-    "    axes[i].set_xlabel('x')\n",
-    "    axes[i].set_ylabel('y')\n",
-    "    axes[i].set_title('F{}'.format(i))\n",
-    "\n",
-    "def update():\n",
-    "    fig.suptitle('t={}'.format(simu.t()))\n",
-    "    for (i,img) in enumerate(imgs):\n",
-    "        img.set_data(df.data[i][df.compute_slices])\n",
-    "    \n",
-    "def init():\n",
-    "    simu.initialize()\n",
-    "    df.initialize(f, p=1.0, quiet=True)\n",
-    "    update()\n",
-    "    return imgs\n",
-    "\n",
-    "def animate(i):\n",
-    "    simu.advance()\n",
-    "    df.initialize(f, p=1.0-simu.t(), quiet=True)\n",
-    "    update()\n",
-    "    return imgs\n",
-    "\n",
-    "from matplotlib import animation\n",
-    "from IPython.display import HTML\n",
-    "anim = animation.FuncAnimation(fig, animate, init_func=init,\n",
-    "                               frames=nb_iter, interval=200)\n",
-    "HTML(anim.to_html5_video())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "In the next notebook we will see how to do this the HySoP way by introducing operators and computational graphs."
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 2",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.15+"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/01_analytical.ipynb b/notebooks/01_analytical.ipynb
deleted file mode 100644
index bc75bdef4b70bc1aa6919a98d46fe29a94ea020d..0000000000000000000000000000000000000000
--- a/notebooks/01_analytical.ipynb
+++ /dev/null
@@ -1,249 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# HySoP: Time dependent analytical fields\n",
-    "\n",
-    "In this notebook we will see how to define analytical fields that evolve with time\n",
-    "and how to dump their values to disk at a given dump frequency.\n",
-    "\n",
-    "* __Level:__ easy\n",
-    "* __Recommended lecture:__ 00_introduction notebook\n",
-    "\n",
-    "In this notebook you will learn how to create, setup and apply operators embedded into a computational graph.\n",
-    "\n",
-    "Two operators are discussed here:\n",
-    "* __PythonAnalyticField:__ update a field with a python method at each simulation step.\n",
-    "* __HDF_Writer:__ write a field to disk as an hdf file.\n",
-    "\n",
-    "## Setting up HySoP\n",
-    "\n",
-    "Like in the last notebook we need to import the library and \n",
-    "required types."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import hysop\n",
-    "from hysop.deps import np\n",
-    "from hysop import Box, Discretization, CartesianTopology, \\\n",
-    "                  Field, Simulation, Problem\n",
-    "from hysop.defaults import TimeParameters\n",
-    "from hysop.operators import HDF_Writer\n",
-    "from hysop.backend.host.python.operator.analytic import PythonAnalyticField"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Domain and topology\n",
-    "First we define a domain, a discretization and finally our topology."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "======== CartesianTopology::t0 ========\n",
-      "  *on task: 999\n",
-      "  *backend: HostArrayBackend::bk1\n",
-      "  *shape: [1 1]\n",
-      "  *process of coords [0 0] and of ranks cart_rank=0, parent_rank=0\n",
-      "  *cartesian ranks map:\n",
-      "    [0]\n",
-      "  *cartesian to parent comm ranks mapping:\n",
-      "    [[0]]\n",
-      "  *neighbours ranks (left, right) x direction \n",
-      "    [[-1  0]\n",
-      "     [-1  0]]\n",
-      "  *BoxView::d0 | 2D rectangular box domain:\n",
-      "    *origin:  [0. 0.]\n",
-      "    *max_pos: [6.28318531 6.28318531]\n",
-      "    *length:  [6.28318531 6.28318531]\n",
-      "    *left  boundary conditions: [PERIODIC(1), PERIODIC(1)]\n",
-      "    *right boundary conditions: [PERIODIC(1), PERIODIC(1)]\n",
-      "  *CartesianMeshView::m0:\n",
-      "    *proc coords:        [0 0]\n",
-      "    *global start:       [0 0]\n",
-      "    *local resolution:   [64 64]\n",
-      "    *compute resolution: [64 64]\n",
-      "    *ghosts:             [0 0]\n",
-      "    *local boundaries:   left  => [PERIODIC(1) PERIODIC(1)]\n",
-      "                         right => [PERIODIC(1) PERIODIC(1)]\n",
-      "=================================\n",
-      "\n"
-     ]
-    }
-   ],
-   "source": [
-    "# chose the dimensionality of the domain\n",
-    "dim = 2\n",
-    "\n",
-    "# define the domain\n",
-    "box = Box(length=(2*np.pi,)*dim)\n",
-    "\n",
-    "# discretization parameters (here we choose 64^dim, without ghosts)\n",
-    "discretization = Discretization((65,)*dim)\n",
-    "\n",
-    "# finally create a cartesian topology\n",
-    "topo = CartesianTopology(domain=box, discretization=discretization)\n",
-    "print topo"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Define the scalar field and time parameters\n",
-    "Here we will define the field that will be updated at each time step."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "t,dt = TimeParameters(dtype=np.float32)\n",
-    "f0   = Field(name='F0', domain=box, dtype=np.float32)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Define all required operators\n",
-    "### Python analytic operator"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def compute_scalar(data, coords, t):\n",
-    "    data[0][...] = (1.0/(1.0+0.1*t()))\n",
-    "    for x in coords[0]:\n",
-    "        data[0][...] *= np.cos(x-t())\n",
-    "        \n",
-    "# Analytic operator\n",
-    "op0 = PythonAnalyticField(name='analytic', field=f0,\n",
-    "                    formula=compute_scalar,\n",
-    "                    variables={f0: topo},\n",
-    "                    extra_input_kwds={'t':t}) # <= here we pass all extra keyword arguments required by formula"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### The HDF writer operator:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# HDF Writer\n",
-    "# by default fields will be dumped to the notebook interactive folder\n",
-    "op1 = HDF_Writer(name='dumped_scalar_field', \n",
-    "                 variables={f0:topo})"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "\n",
-    "## Build the computational operator graph\n",
-    "Build computational operator graph:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Create a problem and build it\n",
-    "problem = Problem()\n",
-    "problem.insert(op0, op1)\n",
-    "problem.build()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Solve and finalize"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Create a simulation and solve the problem \n",
-    "# (do not forget to specify the time parameter here)\n",
-    "simu = Simulation(start=0.0, end=2*np.pi, \n",
-    "                  nb_iter=100, t=t, dt=dt)\n",
-    "\n",
-    "# Finally solve the problem \n",
-    "problem.solve(simu)\n",
-    "\n",
-    "# Finalize\n",
-    "problem.finalize()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "You can use paraview to visualize the evolution of our scalar field by opening the 'dumped_scalar_field.xmf' file contained in the interactive folder of your current working directory. \n",
-    "\n",
-    "This is what the result should look like:\n",
-    "\n",
-    "<video controls src=\"videos/analytic.ogv\" />"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 2",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.15rc1"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/videos/analytic.ogv b/notebooks/videos/analytic.ogv
deleted file mode 100644
index 81a0a9296ca8561fdbf48fadb02fafe8dff39b62..0000000000000000000000000000000000000000
Binary files a/notebooks/videos/analytic.ogv and /dev/null differ
diff --git a/requirements.txt b/requirements.txt
index 163c660c86bd6f1c282d9f75075352fec69dec0b..cd3fd9be3d99ec6c723e81fe9a160ac9dc8dacad 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,13 +6,11 @@ h5py
 psutil
 py-cpuinfo
 gmpy2
-subprocess32
 editdistance
 portalocker
 tee
 ansicolors
 argparse_color_formatter
-primefac
 pybind11
 pyopencl
 pyfftw
@@ -20,10 +18,9 @@ mpi4py
 matplotlib
 numba
 configparser
-backports.tempfile
-backports.weakref
 networkx
 pyvis
 zarr
 numcodecs
 jsonpickle
+pytest
diff --git a/setup.py.in b/setup.py.in
index 0c61ea8347546f7c1e8e65ff5c3cf2e43354aa3b..6edf1a4090d7b5d19ecb2e28487fb9bd1fcaf702 100644
--- a/setup.py.in
+++ b/setup.py.in
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 """setup.py file for @PYPACKAGE_NAME@
 
@@ -18,12 +18,6 @@ import sys
 sys.path.append('@CMAKE_BINARY_DIR@/')
 import sort_f90
 
-# --- Check if c++ interface is enabled ---
-enable_cpp = "@USE_CXX@"
-if enable_cpp:
-    swig_executable = find_executable("@SWIG_EXECUTABLE@")
-
-
 def parseCMakeVar(var, sep=';'):
     """
     Post-process cmake list-like variables.
@@ -56,7 +50,7 @@ def parseCMakeDefines(var):
     
     # regex to match compiler defines like -DMACRO_NAME or
     # -DMACRO_NAME = MACRO_VALUE
-    p = re.compile('\s*(?:-D)?\s*(\w+)(?:\s*=\s*(\w+))?\s*')
+    p = re.compile(r'\s*(?:-D)?\s*(\w+)(?:\s*=\s*(\w+))?\s*')
 
     res = list()
     for d in defines:
@@ -64,7 +58,7 @@ def parseCMakeDefines(var):
         if m:
             res.append(m.group(1,2))
         else:
-            print "\tWarning: Could not extract cmake define from '", d, "'."
+            print("\tWarning: Could not extract cmake define from '", d, "'.")
     return res
 
 # --- external libraries to be linked with ---
@@ -162,83 +156,10 @@ def create_fortran_extension(name, pyf_file=None, src_dirs=None, sources=None,
     return ext_fort
 
 
-def create_swig_extension(name, inc_dirs, src_dirs=None, sources=None):
-    """Create a python module from C++ files, using swig
-
-    Parameters
-    ----------
-    name: string
-        module name
-    inc_dirs: list
-        list of directories for includes (-I ...), absolute path
-        containing headers (.hpp) and swig files (.i)
-    sources: list, optional
-         all c++ files that must be taken into account
-    src_dirs: list, optional
-         all directories containing fortran sources (absolute path)
-         that must be taken into account. Warning: no recursive scan
-         in src_dirs.
-
-    Notes
-    -----
-    * either sources, src_dirs or inc_dirs is required but all
-    can be used simultaneously.
-    * only .cpp files are taken into account.
-    * main config file for swig is assumed to be
-      name.i in CMAKE_SOURCE_DIR/swig/
-
-    """
-    swig_dir         = os.path.join('@CMAKE_SOURCE_DIR@', 'swig')
-    swig_config_file = os.path.join(swig_dir, name + '.i')
-
-    include_dirs = set(inc_dirs)
-    if sources is None:
-        sources = []
-        assert (src_dirs is not None) or (inc_dirs is not None)
-    if(src_dirs is None):
-        assert(inc_dirs is not None)
-        for idir in inc_dirs:
-            for root, dirnames, filenames in os.walk(idir):
-                for filename in fnmatch.filter(filenames, '*.cpp'):
-                    sources.append(os.path.join(root, filename))
-    else:
-        for sdir in src_dirs:
-            sources += glob.glob(os.path.join(sdir, '*.cpp'))
-
-    sources.insert(0, swig_config_file)
-    include_dirs = list(include_dirs)
-
-    name = 'hysop._' + name
-    swig_opts = ['-I' + swig_dir,
-                 '-O', '-Wextra', '-Werror',
-                 '-c++', '-extranative', '-safecstrings']
-
-    extern_includes = parseCMakeVar("@CXX_EXT_INCLUDES@")
-    for exti in extern_includes:
-        include_dirs.append(exti)
-
-    libraries = parseCMakeVar("@CXX_EXT_LIBS@")
-    library_dirs = parseCMakeVar("@CXX_EXT_LIB_DIRS@")
-    extra_compile_args = parseCMakeVar("@CXX_FLAGS@", sep=' ')
-    extra_link_args = parseCMakeVar("@CXX_LINKER_FLAGS@", sep=' ')
-    define_macros = parseCMakeDefines("@CXX_EXTRA_DEFINES@")
-    swig_ext = Extension(name, sources=sources, language='c++',
-                         swig_opts=swig_opts,
-                         include_dirs=include_dirs,
-                         library_dirs=library_dirs,
-                         libraries=libraries,
-                         define_macros=define_macros,
-                         runtime_library_dirs=library_dirs,
-                         extra_compile_args=extra_compile_args,
-                         extra_link_args=extra_link_args)
-
-    return swig_ext
-
-
 # ------------ Set list of packages required to build the module -------------
 # List of modules (directories) to be included
-with_test = "@WITH_TESTS@" is "ON"
-with_opencl = "@WITH_OPENCL@" is "ON"
+with_test = "@WITH_TESTS@" == "ON"
+with_opencl = "@WITH_OPENCL@" == "ON"
 exclude=[]
 if not with_test:
    exclude.append('*tests*')
@@ -257,7 +178,7 @@ ext_modules = []
 enable_fortran = "@USE_FORTRAN@"
 ext = {}
 
-if enable_fortran is "ON":
+if enable_fortran == "ON":
     fortran_root = \
         '@CMAKE_SOURCE_DIR@/hysop'
     hysop_libdir = ['@CMAKE_BINARY_DIR@/src']
@@ -271,26 +192,26 @@ if enable_fortran is "ON":
     ####fortran_src.add('f2py/parameters.f90')
     # -- fftw fortran sources --
     withfftw = "@WITH_FFTW@"
-    if withfftw is "ON":
+    if withfftw == "ON":
         subdirs.append(os.path.join('numerics', 'fftw_f'))
         fftwdirs = parseCMakeVar('@FFTWLIB@')
         hysop_libdir.extend(fftwdirs)
 
     # -- scales sources --
     withscales = '@WITH_SCALES@'
-    if withscales is "ON":
+    if withscales == "ON":
         subdirs.append('scales_f')
         #fortran_src.add('f2py/scales2py.f90')
 
 
     # -- other fortran sources --
     withextras = '@WITH_EXTRAS@'
-    if withextras is "ON":
+    if withextras == "ON":
         subdirs.append(os.path.join('numerics', 'extras_f'))
 
     # -- set full path to fortran sources --
     fortran_src = list(fortran_src)
-    for i in xrange(len(fortran_src)):
+    for i in range(len(fortran_src)):
         fortran_src[i] = os.path.join(fortran_root, fortran_src[i])
 
     precision_file = os.path.join('@GENERATED_FORTRAN_FILES_DIR@',
@@ -324,28 +245,10 @@ if enable_fortran is "ON":
     for ex in ext:
         ext_modules.append(ext[ex])
 
-# --- C++ files and swig interface --
-
-if enable_cpp is "ON":
-    # path to .i files
-    swig_include_dirs = [os.path.join('@CMAKE_SOURCE_DIR@','swig')]
-
-    cpp_include_dirs = ['src/fftw','src/hysop++/src']
-    for d in cpp_include_dirs:
-        swig_include_dirs.append(os.path.join('@CMAKE_SOURCE_DIR@', d))
-
-    ext = {}
-    cpp2hysop = "@CPP_2_HYSOP@"
-    ext[cpp2hysop] = create_swig_extension(name=cpp2hysop,
-                                           inc_dirs=swig_include_dirs)
-    for ex in ext:
-        ext_modules.append(ext[ex])
-
 data_files = []
 
 descr = 'Hybrid Computation with Particles.'
 authors = 'G.H Cottet, J.M Etancelin, J.B Keck, C.Mimeau, F.Pérignon, C. Picard'
-# authors = 'HySoP development team'
 config = Configuration(
     name='@PACKAGE_NAME@',
     version='@HYSOP_VERSION@',
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index a3d17a69c40c655407f1087e02f7df41d14dd11a..45641959b96a8fb61587fb9ca5e384b939dfbf47 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -48,8 +48,8 @@ set(${HYSOP_LIBRARY_NAME}_HDRS ${HDRS_FILES})
 
 # ============= Generates HySoPConfig.hpp =============
 # The file HYSOP_LIBRARY_NAME_defines.hpp will be generated from config.hpp.cmake;
-if(EXISTS ${CMAKE_SOURCE_DIR}/config.hpp.cmake)
-  configure_file(${CMAKE_SOURCE_DIR}/config.hpp.cmake ${HYSOP_LIBRARY_NAME}_defines.hpp)
+if(EXISTS ${CMAKE_SOURCE_DIR}/cmake/config.hpp.cmake)
+  configure_file(${CMAKE_SOURCE_DIR}/cmake/config.hpp.cmake ${HYSOP_LIBRARY_NAME}_defines.hpp)
   include_directories(${CMAKE_BINARY_DIR})
 endif()
 
@@ -67,34 +67,3 @@ include_directories(${HYSOP_INCLUDE_DIRECTORIES})
 
 add_library(${HYSOP_LIBRARY_NAME} STATIC ${${HYSOP_LIBRARY_NAME}_SRCS})
 target_link_libraries(${HYSOP_LIBRARY_NAME} PRIVATE ${HYSOP_LINK_LIBRARIES})
-
-#install(TARGETS ${HYSOP_LIBRARY_NAME}
-#  LIBRARY DESTINATION ${HYSOP_PYTHON_INSTALL_DIR}/${PACKAGE_NAME})
-
-# ============= Create an executable linked with libhysop =============
-# This part is optional and only useful to test libhysop in a
-# way that does not depends on python.
-# At the time it only includes fftw tests.
-if(WITH_FORTRAN_TESTS)
-  # if(WITH_FFTW)
-  #   # Set the name of a executable file that will be linked with libHYSOP_LIBRARY_NAME.
-  #   # Useful to test libhysop in a way that does not depend on python.
-  #   set(EXE_NAME ${HYSOP_LIBRARY_NAME}Run)
-  #   # A main file to create an executable (test purpose)
-  #   # Any files in these dirs will be used to create HySoP exec (linked with libhysop)
-  #   set(${EXE_NAME}_SRCDIRS main)
-  #   # Source and header files list, to generate a working executable based on libhysop.
-  #   get_sources("${${EXE_NAME}_SRCDIRS}")
-  #   get_headers("${${EXE_NAME}_SRCDIRS}")
-  #   set(${EXE_NAME}_SRCS ${SOURCES_FILES})
-  #   set(${EXE_NAME}_HDRS ${HDRS_FILES})
-  #   list(APPEND ${EXE_NAME}_SRC ${${EXE_NAME}_HDRS})
-  #   include_directories(${${EXE_NAME}_HDRS})
-  #   add_executable(${EXE_NAME} ${${EXE_NAME}_SRCS})
-  #   add_dependencies(${EXE_NAME} ${HYSOP_LIBRARY_NAME})
-    
-  #   # libs to link with EXE_NAME
-  #   target_link_libraries(${EXE_NAME} ${HYSOP_LIBRARY_NAME})
-  #   target_link_libraries(${EXE_NAME} ${HYSOP_LINK_LIBRARIES})
-  # endif()
-endif()
diff --git a/src/hysop++/main/diffSolver.cpp b/src/hysop++/main/diffSolver.cpp
deleted file mode 100644
index 3d0316e3f01076e7a803825272a94ae64a2bb1f0..0000000000000000000000000000000000000000
--- a/src/hysop++/main/diffSolver.cpp
+++ /dev/null
@@ -1,237 +0,0 @@
-
-#include <cstdlib>
-
-#include "domain/domain.h"
-#include "solver/fftDiffSolver.h"
-#include "data/multi_array/multi_array.h"
-#include "utils/constants.h"
-#include "fft/extension.h"
-#include "maths/quad_maths.h"
-
-using namespace hysop;
-using namespace hysop::domain;
-
-static constexpr std::size_t nExtensions = 4 ;
-static constexpr std::size_t nExtensionsPair = 7 ;
-static constexpr fft::Extension ext[nExtensions] = 
-{ fft::Extension::NONE, fft::Extension::ODD, fft::Extension::EVEN, fft::Extension::PERIODIC };
-static constexpr std::pair<fft::Extension,fft::Extension> pext[nExtensionsPair] {
-        std::make_pair(ext[3],ext[3]), //periodic-periodic
-        std::make_pair(ext[3],ext[3]), //periodic-periodic
-        std::make_pair(ext[2],ext[1]), //even-odd
-        std::make_pair(ext[1],ext[2]), //odd-even
-        std::make_pair(ext[2],ext[2]), //even-even
-        std::make_pair(ext[1],ext[1]), //odd-odd
-        std::make_pair(ext[0],ext[0]), //none-none
-};
-
-#ifdef HAS_QUADMATHS
-    static constexpr __float128  freqs[6] = { 1.0Q, 1.0Q, 0.75Q, 0.75Q, 0.50Q, 0.50Q };
-#else
-    static constexpr long double freqs[6] = { 1.0L, 1.0L, 0.75L, 0.75L, 0.50L, 0.50L };
-#endif
-
-template <typename T>
-std::function<T(T)> func(std::size_t k) {
-    switch(k) {
-        case 0: return [=](T x) {return std::cos(T(freqs[0])*x);};
-        case 1: return [=](T x) {return std::sin(T(freqs[1])*x);};
-        case 2: return [=](T x) {return std::cos(T(freqs[2])*x);};
-        case 3: return [=](T x) {return std::sin(T(freqs[3])*x);};
-        case 4: return [=](T x) {return std::cos(T(freqs[4])*x);};
-        case 5: return [=](T x) {return std::sin(T(freqs[5])*x);};
-        default: return[=](T x) { return T(1); };
-    }
-}
-
-template <typename T>
-std::function<T(T)> derivative(std::size_t k, int order) {
-    bool even = (k%2==0);
-    std::size_t p, offset;
-    T sign, coeff;
-    if(k>5) {
-        if(order != 0)
-            throw std::runtime_error("Non zero order !");
-        return func<T>(k);
-    }
-    else if(even) { /* cos func */
-        p      = (order%2==0 ? k : k+1);
-        sign  = std::pow(T(-1),(order+1)/2);
-        coeff = std::pow(freqs[k], order);
-    }
-    else { /* sin func */
-        p     = (order%2==0 ? k : k-1); 
-        sign  = std::pow(T(-1),order/2);
-        coeff = std::pow(freqs[k], order);
-    }
-    return [=](T x) { return sign*coeff*(func<T>(p)(x)); };
-}
-    
-template <typename T, std::size_t Dim, bool verbose=false> 
-void test(std::size_t p_maxOrder, bool includePeriodicBds=false) {
-    typename Shape<Dim>::type shape;
-    typename Domain<T,Dim>::DomainSize domainSize;
-    Domain<T,Dim> ref, inBuffer, outBuffer;
-
-    Domain<T,Dim>& in  = inBuffer;
-    Domain<T,Dim>& out = outBuffer;
-
-    std::array<int,Dim> order;
-
-    shape.fill(8);
-    domainSize.fill(2*hysop::constants::pi);
-
-    T eps = std::numeric_limits<T>::epsilon();
-    const std::size_t N = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<std::size_t>());
-    
-    ref.resize(domainSize).reshape(shape);
-    in  = ref;
-    out = ref;
-   
-    typename Shape<Dim>::type maxOrder, testCases;
-    maxOrder.fill(p_maxOrder+1);
-    testCases.fill(nExtensionsPair);
-    Index<Dim> orderId(maxOrder);
-    Index<Dim> testCaseId;
-    std::size_t testCaseCount;
-    while(!(++orderId).atMaxId()) {
-        std::cout << "  ::Order::" << orderId.ids() << (verbose ? "\n" : "");
-    
-        std::array<T,3> meanDists;
-        meanDists.fill(0);
-        testCaseId.reset(testCases);
-        testCaseCount = testCaseId.maxId();
-        while(!testCaseId.atMaxId()) { 
-            std::copy(orderId.ids().begin(),orderId.ids().end(), order.begin());
-
-            /* generate transform configuration */
-            std::array<std::pair<fft::Extension,fft::Extension>, Dim> extConfig;
-            for (std::size_t k=0; k<Dim; k++) {
-                std::size_t id = testCaseId[k];
-                extConfig[k] = pext[id];
-                if(pext[id].first==fft::Extension::NONE)
-                    order[k] = 0;
-            }
-            fft::FftDomainConfiguration<Dim> domainConfig(extConfig, includePeriodicBds);
-            
-            const std::size_t orderSum = std::accumulate(order.begin(), order.end(), 0);
-            if(orderSum == 0) {
-                testCaseCount--;
-                ++testCaseId;
-                continue;
-            }
-            T orderPow = std::pow(T(10),T(orderSum));
-            if(std::is_same<T,long double>::value) /* just in case long doubles are not hardware supported... */
-                orderPow *= 1e3;
-            const auto criteria = std::make_tuple(orderPow*eps*N,orderPow*eps*sqrt(N),2*orderPow*eps);
-
-            const auto f = [&](const typename Domain<T,Dim>::SpaceVariable &x) { 
-                T val = func<T>(testCaseId[0])(x[0]);
-                for (std::size_t d=1; d < Dim; d++)
-                    val *= func<T>(testCaseId[d])(x[d]);
-                return val;
-            };
-            const auto d = [&](const typename Domain<T,Dim>::SpaceVariable &x) { 
-                T val = derivative<T>(testCaseId[0],order[0])(x[0]);
-                for (std::size_t d=1; d < Dim; d++)
-                    val *= derivative<T>(testCaseId[d],order[d])(x[d]);
-                return val;
-            };
-            {
-                ref.resetDomainConfiguration(domainConfig.boundariesConfiguration());
-                in  = ref;
-                out = ref;
-
-                in.apply(f);
-                ref.apply(d);
-                out.data().apply([](T& v){ v=T(0);});
-            }
-
-            solver::FftDiffSolver<T,Dim> solver(domainSize, domainConfig, FFTW_MEASURE, includePeriodicBds, includePeriodicBds);
-            solver.apply(in.data(), out.data(), order);
-
-            std::stringstream ss;
-            ss << "[";
-            for (std::size_t k=0; k<Dim-1; k++) 
-                ss << extConfig[k].first << "/" << extConfig[k].second << ",";
-            ss << extConfig[Dim-1].first << "/" << extConfig[Dim-1].second;
-            ss << "]";
-
-            const auto dist = out.distance(ref);
-            const bool pass =      (std::get<0>(dist) < std::get<0>(criteria)) 
-                && (std::get<1>(dist) < std::get<1>(criteria))
-                && (std::get<2>(dist) < std::get<2>(criteria));
-
-            if((pass && verbose) || !pass) {
-                std::cout << (pass ? GREEN : RED);
-                std::cout << "\t" << std::setw(Dim*15) << ss.str() << " => " << (pass ? "OK" : "KO") 
-                    << "  " << RESET << std::scientific << std::setprecision(2) << dist << std::endl;
-            }
-            if(!pass) {
-                //in.print("IN");
-                //ref.print("REF");
-                //out.print("OUT");
-                std::cout << criteria << std::endl;
-                exit(EXIT_FAILURE);
-            }
-
-            meanDists[0] += std::get<0>(dist);
-            meanDists[1] += std::get<1>(dist);
-            meanDists[2] += std::get<2>(dist);
-
-            ++testCaseId;
-        }
-        for (std::size_t k = 0; k < 3; k++)
-            meanDists[k] /= T(testCaseCount);
-        std::cout << "=> mean distances over " << std::scientific << std::setprecision(1) << std::setw(4)
-            << testCaseCount << " testcases: " << meanDists;
-        for (std::size_t k = 0; k < 3; k++)
-            meanDists[k] = std::round(meanDists[k]/eps);
-        std::cout << " ~= " <<  std::fixed << std::setprecision(0) << meanDists << " eps" << std::endl; 
-    }
-}
-    
-int main(int argc, const char *argv[]) {
-
-#ifdef FFTW_HAS_FFTW3F
-    std::cout << "== TEST 1D - float       ==" << std::endl;
-    test<float,1,false>(5);
-    std::cout << "== TEST 2D - float       ==" << std::endl;
-    test<float,2,false>(3);
-    std::cout << "== TEST 3D - float       ==" << std::endl;
-    test<float,3,false>(1);
-    std::cout << std::endl;
-#endif
-    
-#ifdef FFTW_HAS_FFTW3D
-    std::cout << "== TEST 1D - double       ==" << std::endl;
-    test<double,1,false>(5);
-    std::cout << "== TEST 2D - double       ==" << std::endl;
-    test<double,2,false>(3);
-    std::cout << "== TEST 3D - double       ==" << std::endl;
-    test<double,3,false>(1);
-    std::cout << std::endl;
-#endif
-    
-#ifdef FFTW_HAS_FFTW3L
-    std::cout << "== TEST 1D - long double       ==" << std::endl;
-    test<long double,1,false>(5);
-    std::cout << "== TEST 2D - long double       ==" << std::endl;
-    test<long double,2,false>(3);
-    std::cout << "== TEST 3D - long double       ==" << std::endl;
-    test<long double,3,false>(1);
-    std::cout << std::endl;
-#endif
-
-#ifdef FFTW_HAS_FFTW3Q
-    std::cout << "== TEST 1D - __float128       ==" << std::endl;
-    test<__float128,1,false>(5);
-    std::cout << "== TEST 2D - __float128       ==" << std::endl;
-    test<__float128,2,false>(3);
-    std::cout << "== TEST 3D - __float128       ==" << std::endl;
-    test<__float128,3,false>(1);
-    std::cout << std::endl;
-#endif
-
-    return EXIT_SUCCESS;
-}
diff --git a/src/hysop++/main/planner.cpp b/src/hysop++/main/planner.cpp
deleted file mode 100644
index 287c6713c1b31dcbd582dad1bb82ae5f1a5691e7..0000000000000000000000000000000000000000
--- a/src/hysop++/main/planner.cpp
+++ /dev/null
@@ -1,197 +0,0 @@
-
-#include "maths/quad_maths.h"
-
-#include "data/multi_array/multi_array.h"
-#include "domain/domain.h"
-#include "utils/constants.h"
-#include "fft/planner.h"
-#include "fft/extension.h"
-
-
-using namespace hysop;
-using namespace hysop::domain;
-
-static constexpr std::size_t nExtensions = 4 ;
-static constexpr std::size_t nExtensionsPair = 6 ;
-static constexpr fft::Extension ext[nExtensions] = 
-{ fft::Extension::NONE, fft::Extension::ODD, fft::Extension::EVEN, fft::Extension::PERIODIC };
-static constexpr std::pair<fft::Extension,fft::Extension> pext[nExtensionsPair] {
-    std::make_pair(ext[0],ext[0]), //none-none
-        std::make_pair(ext[1],ext[1]), //odd-odd
-        std::make_pair(ext[1],ext[2]), //odd-even
-        std::make_pair(ext[1],ext[2]), //even-odd
-        std::make_pair(ext[2],ext[2]), //even-even
-        std::make_pair(ext[3],ext[3]), //periodic-periodic
-};
-
-template <typename T, std::size_t Dim, bool verbose=false> 
-void test(bool inplace, bool includePeriodicBds);
-
-
-int main(int argc, const char *argv[]) {
-
-#ifdef FFTW_HAS_FFTW3F
-    std::cout << std::endl; 
-    std::cout << "== TEST 1D - float       ==\t";
-    test<float,1>(false,true);
-    std::cout << "== TEST 2D - float       ==\t";
-    test<float,2>(false,true);
-    std::cout << "== TEST 3D - float       ==\t";
-    test<float,3>(false,true);
-#endif
-
-#ifdef FFTW_HAS_FFTW3D
-    std::cout << std::endl; 
-    std::cout << "== TEST 1D - double      ==\t";
-    test<double,1>(true,true);
-    std::cout << "== TEST 2D - double      ==\t";
-    test<double,2>(true,true);
-    std::cout << "== TEST 3D - double      ==\t";
-    test<double,3>(true,true);
-#endif
-
-#ifdef FFTW_HAS_FFTW3L
-    std::cout << std::endl; 
-    std::cout << "== TEST 1D - long double ==\t";
-    test<long double,1>(false,false);
-    std::cout << "== TEST 2D - long double ==\t";
-    test<long double,2>(false,false);
-    std::cout << "== TEST 3D - long double ==\t";
-    test<long double,3>(false,false);
-#endif
-
-#ifdef FFTW_HAS_FFTW3Q
-    std::cout << std::endl; 
-    std::cout << "== TEST 1D - __float128 ==\t";
-    test<__float128,1>(false,false);
-    std::cout << "== TEST 2D - __float128 ==\t";
-    test<__float128,2>(false,false);
-    std::cout << "== TEST 3D - __float128 ==\t";
-    test<__float128,3>(false,false);
-#endif
-    
-    return 0;
-}
-
-template <typename T, std::size_t Dim, bool verbose> 
-void test(bool inplace, bool includePeriodicBds) {
-    typename Shape<Dim>::type shape;
-    typename Domain<T,Dim>::DomainSize domainSize;
-    Domain<T,Dim> ref, inBuffer, outBuffer;
-
-    Domain<T,Dim>& in  = inBuffer;
-    Domain<T,Dim>& out = (inplace ? inBuffer : outBuffer);
-
-    fft::Planner<T,Dim> planner;
-    std::array<int,Dim> order;
-
-    const std::size_t nPoints = 16;
-    shape.fill(nPoints);
-    domainSize.fill(1.0);
-    order.fill(2);
-
-    const T eps = std::numeric_limits<T>::epsilon();
-    const std::size_t N = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<std::size_t>());
-    const auto criteria = std::make_tuple(50*eps*N,sqrt(50)*eps*N,700*eps);
-    
-    ref.resize(domainSize).reshape(shape);
-    in  = ref;
-    out = ref;
-   
-    typename Shape<Dim>::type testCases;
-    testCases.fill(nExtensionsPair);
-    Index<Dim> testCaseId(testCases);
-    std::array<T,3> meanDists{0};
-    while(!testCaseId.atMaxId()) { 
-        /* generate transform configuration */
-        std::array<std::pair<fft::Extension,fft::Extension>, Dim> extConfig;
-        for (std::size_t k=0; k<Dim; k++) {
-            std::size_t id = testCaseId[k];
-            extConfig[k] = pext[id];
-        }
-        fft::FftDomainConfiguration<Dim> domainConfig(extConfig, includePeriodicBds);
-    
-        const auto f = [&](T &val, const hysop::Index<Dim>& idx) { 
-            val = static_cast<T>(rand())/static_cast<T>(RAND_MAX);
-            for (std::size_t d=0; d<Dim; d++) {
-                if(idx[d]==0) {
-                    if(extConfig[d].first == fft::Extension::ODD) {
-                        val=T(0); 
-                        return;
-                    }
-                    else if(extConfig[d].first == fft::Extension::PERIODIC)
-                        val=T(0.42);
-                }
-                else if(std::size_t(idx[d]) == idx.dim()[d]-1) {
-                    if(extConfig[d].second == fft::Extension::ODD) {
-                        val=T(0);
-                        return;
-                    }
-                    else if(extConfig[d].second == fft::Extension::PERIODIC && includePeriodicBds)
-                        val=T(0.42);
-                }
-            }
-        };
-        
-        if(includePeriodicBds)
-            ref.resetDomainConfiguration(domainConfig.boundariesConfiguration()); 
-        
-        /* fill reference and copy into input buffer */
-        ref.data().apply(f);
-        in = ref;
-       
-        /* plan transforms and check if planning succeeded */
-        bool status = planner.plan(in.data(), out.data(), domainConfig, order, domainSize, FFTW_MEASURE, 
-                    includePeriodicBds, includePeriodicBds);
-        assert(status || testCaseId()==0);
-    
-        /* execute forward and backward inplace transforms */
-        planner.executeForwardTransform();
-        {
-            if(planner.transformType() == fft::FftTransformType::FFT_R2C)
-                planner.transformedComplexData().apply([&](std::complex<T>& val) { val /= planner.normalisationFactor(); }); 
-            else if(planner.transformType() == fft::FftTransformType::FFT_R2R)
-                planner.transformedRealData().apply([&](T& val) { val /= planner.normalisationFactor(); }); 
-        }
-        planner.executeBackwardTransform();
-        
-        std::stringstream ss;
-        ss << "[";
-        for (std::size_t k=0; k<Dim-1; k++) 
-            ss << extConfig[k].first << "/" << extConfig[k].second << ",";
-        ss << extConfig[Dim-1].first << "/" << extConfig[Dim-1].second;
-        ss << "]";
-
-        const auto dist = out.distance(ref);
-        const bool pass =      (std::get<0>(dist) < std::get<0>(criteria)) 
-                            && (std::get<1>(dist) < std::get<1>(criteria))
-                            && (std::get<2>(dist) < std::get<2>(criteria));
-
-        if((pass && verbose) || !pass) {
-            std::cout << (pass ? GREEN : RED);
-            std::cout << "\t" << std::setw(Dim*15) << ss.str() << " => " << (pass ? "OK" : "KO") 
-                << "  " << RESET << std::scientific << std::setprecision(2) << dist << std::endl;
-        }
-        if(!pass) {
-            if(!inplace)
-                in.print("IN");
-            ref.print("REF");
-            out.print("OUT");
-            std::cout << planner << std::endl;
-            exit(EXIT_FAILURE);
-        }
-
-        meanDists[0] += std::get<0>(dist);
-        meanDists[1] += std::get<1>(dist);
-        meanDists[2] += std::get<2>(dist);
-
-        ++testCaseId;
-    }
-    for (std::size_t k = 0; k < 3; k++)
-        meanDists[k] /= T(testCaseId.maxId());
-    std::cout << "Mean distances over " << std::scientific << std::setprecision(1) << std::setw(4)
-         << testCaseId.maxId() << " testcases: " << meanDists;
-    for (std::size_t k = 0; k < 3; k++)
-        meanDists[k] = std::round(meanDists[k]/eps);
-    std::cout << " ~= " << std::fixed << std::setprecision(0) << meanDists << " eps" << std::endl; 
-}
diff --git a/src/hysop++/main/poissonSolver.cpp b/src/hysop++/main/poissonSolver.cpp
deleted file mode 100644
index e95a8cd50e8dac1f0ab9caa0170ee72494232c26..0000000000000000000000000000000000000000
--- a/src/hysop++/main/poissonSolver.cpp
+++ /dev/null
@@ -1,213 +0,0 @@
-
-#include <cstdlib>
-
-#include "domain/domain.h"
-#include "solver/fftPoissonSolver.h"
-#include "data/multi_array/multi_array.h"
-#include "utils/constants.h"
-#include "domain/boundary.h"
-
-using namespace hysop;
-using namespace hysop::domain;
-
-static constexpr std::size_t nBoundaries = 4;
-static constexpr std::size_t nBoundaryPairs = 7;
-static constexpr domain::Boundary bds[nBoundaries] = 
-{ domain::Boundary::NONE, domain::Boundary::HOMOGENEOUS_NEUMANN, domain::Boundary::HOMOGENEOUS_DIRICHLET, domain::Boundary::PERIODIC };
-static constexpr std::pair<domain::Boundary,domain::Boundary> pbds[nBoundaryPairs] {
-        std::make_pair(bds[3],bds[3]), //periodic-periodic
-        std::make_pair(bds[3],bds[3]), //periodic-periodic
-        std::make_pair(bds[2],bds[1]), //even-odd
-        std::make_pair(bds[1],bds[2]), //odd-even
-        std::make_pair(bds[2],bds[2]), //even-even
-        std::make_pair(bds[1],bds[1]), //odd-odd
-        std::make_pair(bds[0],bds[0]), //none-none
-};
-    
-#ifdef HAS_QUADMATHS
-    static constexpr __float128  freqs[6] = { 1.0Q, 1.0Q, 0.75Q, 0.75Q, 0.50Q, 0.50Q };
-#else
-    static constexpr long double freqs[6] = { 1.0L, 1.0L, 0.75L, 0.75L, 0.50L, 0.50L };
-#endif
-
-template <typename T>
-std::function<T(T)> func(std::size_t k) {
-    switch(k) {
-        case 0: return [=](T x) {return std::cos(T(freqs[0])*x);};
-        case 1: return [=](T x) {return std::sin(T(freqs[1])*x);};
-        case 2: return [=](T x) {return std::cos(T(freqs[2])*x);};
-        case 3: return [=](T x) {return std::sin(T(freqs[3])*x);};
-        case 4: return [=](T x) {return std::cos(T(freqs[4])*x);};
-        case 5: return [=](T x) {return std::sin(T(freqs[5])*x);};
-        default: return[=](T x) { return T(1); };
-    }
-}
-
-std::string bdsToStr(domain::Boundary bd) {
-    switch(bd) {
-        case(Boundary::NONE):                    return "None      ";
-        case(Boundary::PERIODIC) :               return "Periodic  ";
-        case(Boundary::HOMOGENEOUS_NEUMANN):     return "Hom_Neum. ";
-        case(Boundary::HOMOGENEOUS_DIRICHLET):   return "Hom_Diric.";
-        case(Boundary::NEUMANN):                 return "Neumann   ";
-        case(Boundary::DIRICHLET):               return "Dirichlet ";
-    }
-    return "";
-}
-
-template <typename T, std::size_t Dim, bool verbose=false> 
-void test(bool includePeriodicBds=false) {
-    typename Shape<Dim>::type shape;
-    typename Domain<T,Dim>::DomainSize domainSize;
-    Domain<T,Dim> ref, inBuffer, outBuffer;
-
-    Domain<T,Dim>& in  = inBuffer;
-    Domain<T,Dim>& out = outBuffer;
-
-    shape.fill(16);
-    domainSize.fill(2*hysop::constants::pi);
-
-    const T eps = std::numeric_limits<T>::epsilon();
-    const std::size_t N = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<std::size_t>());
-    
-    ref.resize(domainSize).reshape(shape);
-    in  = ref;
-    out = ref;
-   
-    typename Shape<Dim>::type testCases;
-    testCases.fill(nBoundaryPairs);
-    Index<Dim> testCaseId(testCases);
-    std::array<T,3> meanDists{0};
-    std::size_t testCaseCount = testCaseId.maxId()-1;
-
-    if(verbose)
-        std::cout << std::endl;
-    
-    while(testCaseId() != testCaseId.maxId()-1) { 
-
-        /* generate transform configuration */
-        std::size_t orderSum = 0;
-        std::array<std::pair<domain::Boundary,domain::Boundary>, Dim> bdsConfig;
-        T W2sum = T(0);
-        for (std::size_t k=0; k<Dim; k++) {
-            std::size_t id = testCaseId[k];
-            bdsConfig[k] = pbds[id];
-            if(bdsConfig[k].first != domain::Boundary::NONE) {
-                W2sum += freqs[id]*freqs[id];
-                orderSum+=2;
-            }
-        }
-        domain::DomainConfiguration<Dim> domainConfig(bdsConfig, includePeriodicBds);
-        
-        T orderPow = std::pow(T(10),T(orderSum));
-        if(std::is_same<T,long double>::value) /* just in case long doubles are not hardware supported... */
-            orderPow *= 1e3;
-        const auto criteria = std::make_tuple(orderPow*eps*N,orderPow*eps*sqrt(N),2*orderPow*eps);
-
-        const auto phi = [&](const typename Domain<T,Dim>::SpaceVariable &x) { 
-            T val = func<T>(testCaseId[0])(x[0]);
-            for (std::size_t d=1; d < Dim; d++)
-                val *= func<T>(testCaseId[d])(x[d]);
-            return val;
-        };
-        const auto f = [&](const typename Domain<T,Dim>::SpaceVariable &x) { 
-            return -W2sum*phi(x);
-        };
-
-        {
-            ref.resetDomainConfiguration(domainConfig);
-            in  = ref;
-            out = ref;
-
-            in.apply(f);
-            ref.apply(phi);
-            out.data().apply([](T& v){ v=T(0);});
-        }
-
-        solver::FftPoissonSolver<T,Dim> solver(domainSize, domainConfig, FFTW_MEASURE, includePeriodicBds, includePeriodicBds);
-        solver.apply(in.data(), out.data());
-
-        std::stringstream ss;
-        ss << "[";
-        for (std::size_t k=0; k<Dim-1; k++) 
-            ss << bdsToStr(bdsConfig[k].first) << "/" << bdsToStr(bdsConfig[k].second) << ",";
-        ss << bdsToStr(bdsConfig[Dim-1].first) << "/" << bdsToStr(bdsConfig[Dim-1].second);
-        ss << "]";
-
-        const auto dist = out.distance(ref);
-        const bool pass =      (std::get<0>(dist) < std::get<0>(criteria)) 
-            && (std::get<1>(dist) < std::get<1>(criteria))
-            && (std::get<2>(dist) < std::get<2>(criteria));
-
-        if((pass && verbose) || !pass) {
-            std::cout << (pass ? GREEN : RED);
-            std::cout << "\t" << std::setw(Dim*15) << ss.str() << " => " << (pass ? "OK" : "KO") 
-                << "  " << RESET << std::scientific << std::setprecision(2) << dist << std::endl;
-        }
-        if(!pass) {
-            //in.print("IN");
-            //ref.print("REF");
-            //out.print("OUT");
-            std::cout << "\t\tTest Failed... Criteria was " << criteria << "." << std::endl;
-            exit(EXIT_FAILURE);
-        }
-
-        meanDists[0] += std::get<0>(dist);
-        meanDists[1] += std::get<1>(dist);
-        meanDists[2] += std::get<2>(dist);
-
-        ++testCaseId;
-    }
-
-        for (std::size_t k = 0; k < 3; k++)
-            meanDists[k] /= T(testCaseCount);
-        std::cout << "\t=> mean distances over " << std::scientific << std::setprecision(1) << std::setw(4)
-            << testCaseCount << " testcases: " << meanDists;
-        for (std::size_t k = 0; k < 3; k++)
-            meanDists[k] = std::round(meanDists[k]/eps);
-        std::cout << " ~= " <<  std::fixed << std::setprecision(0) << meanDists << " eps" << std::endl; 
-}
-    
-int main(int argc, const char *argv[]) {
-
-#ifdef FFTW_HAS_FFTW3F
-    std::cout << "== TEST 1D - float       ==";
-    test<float,1,true>();
-    std::cout << "== TEST 2D - float       ==";
-    test<float,2,true>();
-    std::cout << "== TEST 3D - float       ==";
-    test<float,3,false>();
-    std::cout << std::endl;
-#endif
-    
-#ifdef FFTW_HAS_FFTW3D
-    std::cout << "== TEST 1D - double      ==";
-    test<double,1,false>();
-    std::cout << "== TEST 2D - double      ==";
-    test<double,2,false>();
-    std::cout << "== TEST 3D - double      ==";
-    test<double,3,false>();
-    std::cout << std::endl;
-#endif
-    
-#ifdef FFTW_HAS_FFTW3L
-    std::cout << "== TEST 1D - long double ==";
-    test<long double,1,false>();
-    std::cout << "== TEST 2D - long double ==";
-    test<long double,2,false>();
-    std::cout << "== TEST 3D - long double ==";
-    test<long double,3,false>();
-    std::cout << std::endl;
-#endif
-
-#ifdef FFTW_HAS_FFTW3Q
-    std::cout << "== TEST 1D - __float128 ==";
-    test<__float128,1,false>();
-    std::cout << "== TEST 2D - __float128 ==";
-    test<__float128,2,false>();
-    std::cout << "== TEST 3D - __float128 ==";
-    test<__float128,3,false>();
-    std::cout << std::endl;
-#endif
-    return EXIT_SUCCESS;
-}
diff --git a/src/hysop++/src/data/accumulatorIndex.h b/src/hysop++/src/data/accumulatorIndex.h
deleted file mode 100644
index b794002c2e5fba947912783b76b7e6a343a5fe2b..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/accumulatorIndex.h
+++ /dev/null
@@ -1,65 +0,0 @@
-
-#ifndef HYSOP_ACCUMULATORINDEX_H
-#define HYSOP_ACCUMULATORINDEX_H
-
-#include <array>
-#include <cassert>
-#include <vector>
-#include <functional>
-
-#include "data/index.h"
-
-namespace hysop {
-
-    template <typename T, std::size_t Dim, typename Source = std::array<std::vector<T>,Dim>, typename Functor = std::function<T(const T&, const T&)>>
-        struct AccumulatorIndex : public Index<Dim> {
-    
-            private:
-                using super = Index<Dim>;
-            public:
-                using Indices   = typename super::Indices;
-                using Dimension = typename super::Dimension;
-          
-            public:
-                AccumulatorIndex(const AccumulatorIndex& idx) = default;
-                ~AccumulatorIndex() = default;
-                
-                template <typename DimArray=Dimension, typename AccumulatorIndexArray=Indices>
-                AccumulatorIndex(const DimArray& p_dim = DimArray{0}, 
-                                 const Indices &p_ids  = AccumulatorIndexArray{0}):
-                     super(p_dim, p_ids), m_accumulatedData{0}, m_sourceData(nullptr), m_functor(std::plus<T>()) {
-                            } 
-
-                AccumulatorIndex& setAccumulatorSource(const Source& p_source)     { m_sourceData = &p_source; init(); return *this; }
-                AccumulatorIndex& setAccumulatorFunction(const Functor& p_functor) { m_functor    = p_functor; init(); return *this; }
-
-                const T& accumulatedVal() const { return m_accumulatedData[Dim-1]; }
-    
-            protected:
-                void init() {
-                    if(m_sourceData != nullptr) {
-                        m_accumulatedData[0] = (*m_sourceData)[0][this->operator[](0)];
-                        for (std::size_t d=0; d<Dim-1; d++)
-                            m_accumulatedData[d+1] = m_functor(m_accumulatedData[d],(*m_sourceData)[d+1][this->operator[](d+1)]);
-                    }
-                }
-
-                virtual void onIndexChange(std::size_t pos, std::ptrdiff_t offset) final override {
-                    if(m_sourceData != nullptr) {
-                        assert(pos < Dim);
-                        m_accumulatedData[pos] = (pos==0 ? (*m_sourceData)[0][this->operator[](0)] : m_functor(m_accumulatedData[pos-1],(*m_sourceData)[pos][this->operator[](pos)]));
-                        for (std::size_t d=pos; d<Dim-1; d++)
-                            m_accumulatedData[d+1] = m_functor(m_accumulatedData[d],(*m_sourceData)[d+1][this->operator[](d+1)]);
-                    }
-                };
-
-            protected:
-                std::array<T,Dim> m_accumulatedData;
-                const Source* m_sourceData;
-                Functor m_functor;
-        };
-
-}
-
-#endif /* end of include guard: HYSOP_ACCUMULATORINDEX_H */
-
diff --git a/src/hysop++/src/data/index.h b/src/hysop++/src/data/index.h
deleted file mode 100644
index c0dcb6993b9d28022ef20c481c6c398aa0c61da2..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/index.h
+++ /dev/null
@@ -1,125 +0,0 @@
-
-#ifndef HYSOP_INDEX_H
-#define HYSOP_INDEX_H
-
-#include <array>
-#include <cassert>
-
-namespace hysop {
-
-    template <std::size_t Dim>
-        struct Index {
-            typedef boost::array<std::ptrdiff_t,Dim> Indices;
-            typedef boost::array<std::size_t,Dim>    Dimension;
-           
-            Index(const Index& idx) = default;
-            ~Index() = default;
-            
-            template <typename DimArray=Dimension, typename IndexArray=Indices>
-            Index(const DimArray& p_dim = DimArray{0}, 
-                  const Indices &p_ids  = IndexArray{0}) : 
-                m_dim(), m_ids(), m_id(0) { 
-                    for (std::size_t d = 0; d < Dim; d++) {
-                        m_dim[d] = p_dim[d];
-                        m_ids[d] = p_ids[d];
-                    }
-                    initializeId(); 
-                } 
-
-            template <typename DimArray, typename IndexArray=Indices>
-            Index& reset(const DimArray& p_dim, const IndexArray &p_ids = IndexArray{0}) {
-                for (std::size_t d = 0; d < Dim; d++) {
-                    m_dim[d] = p_dim[d];
-                    m_ids[d] = p_ids[d];
-                }
-                initializeId();
-                return *this;
-            }
-            
-            Index& setIndexToMinusOne() {
-                for (std::size_t d=0; d<Dim-1; d++)
-                    m_ids[d] = 0;
-                m_ids[Dim-1] = -1;
-                initializeId();
-                return *this;
-            }
-
-            std::size_t id() const { return m_id; };
-            std::size_t maxId() const { return m_maxId; }
-            bool atMaxId() const { return m_id == m_maxId; }
-            
-            const Indices&   ids() const { return m_ids; };
-            const Dimension& dim() const { return m_dim; };
-
-            std::ptrdiff_t  operator[](std::size_t k) const { return m_ids[k]; } 
-            std::size_t     operator()() const { return m_id; } 
-
-            virtual void onIndexChange(std::size_t pos, std::ptrdiff_t offset) {};
-            virtual void onIndexOverflow(std::size_t pos) {};
-            virtual void onIndexUndeflow(std::size_t pos) {};
-
-//__attribute__((optimize("unroll-loops")))
-            Index& operator++() {    //prefix
-                for (int d = Dim-1; d >=0; d--) {
-                    m_ids[d]++;
-                    if(m_ids[d]==std::ptrdiff_t(m_dim[d])) {
-                        m_ids[d]=0;
-                        this->onIndexOverflow(d);
-                    }
-                    else {
-                        this->onIndexChange(d, +1);
-                        break;
-                    }
-                }
-                m_id++;
-                return *this;
-            };
-//__attribute__((optimize("unroll-loops")))
-            Index& operator--() {    //prefix
-                for (int d = 0; d < Dim; d++) {
-                    if(m_ids[d]!=0) {
-                        this->onIndexChange(d, -1);
-                        m_ids[d]--;
-                        break;
-                    }
-                    else {
-                        this->onIndexUndeflow(d);
-                    }
-                }
-                m_id--;
-                return *this;
-            };
-
-            Index operator++(int) { //postfix
-                Index result(*this);
-                ++(*this);
-                return result;
-            };
-            Index operator--(int) { //postfix
-                Index result(*this);
-                --(*this);
-                return result;
-            };
-            
-            protected:            
-                void initializeId() {
-                    m_id = 0;
-                    m_maxId = 1;
-                    for (std::size_t d = 0; d < Dim-1; d++) {
-                        m_id = (m_id + m_ids[d]) * m_dim[d+1]; 
-                        m_maxId*=m_dim[d];
-                    }
-                    m_id += m_ids[Dim-1];
-                    m_maxId*=m_dim[Dim-1];
-                }
-
-            private:
-                Dimension m_dim;
-                Indices   m_ids;
-                std::size_t m_id, m_maxId;
-        };
-
-}
-
-#endif /* end of include guard: HYSOP_INDEX_H */
-
diff --git a/src/hysop++/src/data/memory/fftwAllocator.h b/src/hysop++/src/data/memory/fftwAllocator.h
deleted file mode 100644
index d2b8423f37893f7188e1d44e39b88cbb01eb9bad..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/memory/fftwAllocator.h
+++ /dev/null
@@ -1,53 +0,0 @@
-
-#ifndef HYSOP_FFTW_ALLOCATOR_H
-#define HYSOP_FFTW_ALLOCATOR_H
-
-#include <limits>
-#include <fftw3.h>
-
-namespace hysop {
-    namespace data {
-        namespace memory {
-
-        /* FftwAllocator designed to correctly align data for fftw */
-        template <typename T>
-            struct FftwAllocator {
-                using value_type = T;
-                using const_pointer = const T*;
-
-                FftwAllocator() = default;
-
-                template <class U>
-                    FftwAllocator(const FftwAllocator<U>&) {}
-
-                T* allocate(std::size_t n, const_pointer hint=nullptr) {
-                    if (n <= std::numeric_limits<std::size_t>::max() / sizeof(T)) {
-                        if (auto ptr = fftw_malloc(n * sizeof(T))) {
-                            return static_cast<T*>(ptr);
-                        }
-                    }
-                    throw std::bad_alloc();
-                }
-                void deallocate(T* ptr, std::size_t n) {
-                    fftw_free(ptr);
-                }
-                void destroy(T* ptr) {
-                    ptr->~T();
-                }
-            };
-
-        template <typename T, typename U>
-            inline bool operator == (const FftwAllocator<T>&, const FftwAllocator<U>&) {
-                return true;
-            }
-
-        template <typename T, typename U>
-            inline bool operator != (const FftwAllocator<T>& a, const FftwAllocator<U>& b) {
-                return !(a == b);
-            }
-    
-        } /* end of namespace memory */
-    } /* end of namespace data */
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_FFTW_ALLOCATOR_H */
diff --git a/src/hysop++/src/data/memory/minimalAllocator.h b/src/hysop++/src/data/memory/minimalAllocator.h
deleted file mode 100644
index 2460db0db91dacc0fdc7466a8edc44d5a62c67fb..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/memory/minimalAllocator.h
+++ /dev/null
@@ -1,52 +0,0 @@
-
-#ifndef HYSOP_MINIMAL_ALLOCATOR_H
-#define HYSOP_MINIMAL_ALLOCATOR_H
-
-#include <limits>
-
-namespace hysop {
-    namespace data {
-        namespace memory {
-        
-        /* Minimal MinimalAllocator for boost and std libs  */
-        template <typename T>
-            struct MinimalAllocator {
-                using value_type = T;
-                using const_pointer = const T*;
-
-                MinimalAllocator() = default;
-
-                template <class U>
-                    MinimalAllocator(const MinimalAllocator<U>&) {}
-
-                T* allocate(std::size_t n, const_pointer hint=nullptr) {
-                    if (n <= std::numeric_limits<std::size_t>::max() / sizeof(T)) {
-                        if (auto ptr = std::malloc(n * sizeof(T))) {
-                            return static_cast<T*>(ptr);
-                        }
-                    }
-                    throw std::bad_alloc();
-                }
-                void deallocate(T* ptr, std::size_t n) {
-                    std::free(ptr);
-                }
-                void destroy(T* ptr) {
-                    ptr->~T();
-                }
-            };
-
-        template <typename T, typename U>
-            inline bool operator == (const MinimalAllocator<T>&, const MinimalAllocator<U>&) {
-                return true;
-            }
-
-        template <typename T, typename U>
-            inline bool operator != (const MinimalAllocator<T>& a, const MinimalAllocator<U>& b) {
-                return !(a == b);
-            }
-
-        } /* end of namespace memory */
-    } /* end of namespace data */
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_MINIMAL_ALLOCATOR_H */
diff --git a/src/hysop++/src/data/multi_array/< b/src/hysop++/src/data/multi_array/<
deleted file mode 100644
index 9b142c71f18edb8b281820e6be16eb1e3c059507..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/multi_array/<
+++ /dev/null
@@ -1,352 +0,0 @@
-
-#ifndef HYSOP_MULTI_ARRAY_H
-#define HYSOP_MULTI_ARRAY_H
-
-/**********************************/
-/*** Hysop boost multi array wrapper ***/
-/********************************/
-
-#include "const_multi_array_view.h"
-#include "multi_array_view.h"
-
-namespace hysop {
-    namespace data {
-
-        /* forward declaration */
-        FORWARD_DECLARE_TYPES()
-    
-        /* class hysop::data::multi_array */
-        template <typename T, std::size_t Dim, typename Allocator>
-            class multi_array : public boost_multi_array<T,Dim,Allocator> {
-                static_assert(Dim>0, "Dim cannot be zero !");
-
-                private:
-                    using super     = boost_multi_array<T,Dim,Allocator>;
-                    using super_ref = boost_multi_array_ref<T,Dim>;
-                public:
-                    PUBLIC_CLASS_TYPES()
-
-                public:
-                    multi_array(const extents_gen<Dim>& extents = extents_gen<Dim>());
-                    multi_array(const Shape<Dim>& shape);
-
-                    multi_array(const multi_array& other);
-                    multi_array(multi_array&& other);
-
-                    explicit multi_array(const array_ref& other);
-                    explicit multi_array(const array_view& other);
-                    explicit multi_array(const const_array_ref& other);
-                    explicit multi_array(const const_array_view& other);
-                   
-                    explicit multi_array(const boost_multi_array<T,Dim,Allocator>& other);
-                    explicit multi_array(const boost_multi_array_ref<T,Dim>& other);
-                    explicit multi_array(const boost_multi_array_view<T,Dim>& other);
-                    explicit multi_array(const boost_const_multi_array_ref<T,Dim>& other);
-                    explicit multi_array(const boost_const_multi_array_view<T,Dim>& other);
-                    explicit multi_array(boost_multi_array<T,Dim,Allocator>&& other);
-
-                    multi_array& operator=(const multi_array& other);
-                    multi_array& operator=(const array_ref& ref);
-                    multi_array& operator=(const array_view& view);
-                    multi_array& operator=(const const_array_ref& ref);
-                    multi_array& operator=(const const_array_view& view);
-                    multi_array& operator=(multi_array&& other);
-
-                    operator array_ref();
-                    operator const_array_ref() const;
-
-                public:
-
-                    multi_array& reshape(const Shape<Dim>& shape);
-                    
-                    /* print data */
-                    const multi_array& print(const std::string& name, std::ostream& os = std::cout, 
-                            unsigned int precision=2u, unsigned int width=6u) const;
-
-                    /* Apply function to all elements of the view */
-                    /* (1) apply func(T&) on all elements */
-                    /* (2) apply func(T&, const Index<Dim>&) on all elements */
-                    /* (3) apply func(T&, const Index<Dim>&, farg0, fargs...) on all elements */
-                    multi_array& apply(const std::function<void(T&)>& func);
-                    multi_array& apply(const std::function<void(T&, const Index<Dim>&)>& func);
-                    template <typename Functor, typename Arg0, typename... Args> 
-                        multi_array& apply(const Functor& func, Arg0&& farg0, Args&&... fargs);
-
-                    /* boolean array_view specific functions */
-                    ENABLE_IF_BOOL(bool) all() const;
-                    ENABLE_IF_BOOL(bool) any() const;
-                    ENABLE_IF_BOOL(bool) none() const;
-
-                    /* real and complex data accessors, usefull for FFT like transforms */
-                    ENABLE_IF_REAL   (      T*) rdata()       { return this->origin(); }
-                    ENABLE_IF_REAL   (const T*) rdata() const { return this->origin(); }
-
-                    ENABLE_IF_COMPLEX(      T*) cdata()       { return this->origin(); }
-                    ENABLE_IF_COMPLEX(const T*) cdata() const { return this->origin(); }
-
-                    ENABLE_IF_COMPLEX(      typename fft::fftw_complex_type<T>::std_type*) std_cdata() { 
-                        return reinterpret_cast<      typename fft::fftw_complex_type<T>::std_type*>(this->origin()); 
-                    }
-                    ENABLE_IF_COMPLEX(const typename fft::fftw_complex_type<T>::std_type*) std_cdata() const { 
-                        return reinterpret_cast<const typename fft::fftw_complex_type<T>::std_type*>(this->origin()); 
-                    }
-                    ENABLE_IF_COMPLEX(      typename fft::fftw_complex_type<T>::fftw_type*) fftw_cdata() { 
-                        return reinterpret_cast<      typename fft::fftw_complex_type<T>::fftw_type*>(this->origin()); 
-                    }
-                    ENABLE_IF_COMPLEX(const typename fft::fftw_complex_type<T>::fftw_type*) fftw_cdata() const { 
-                        return reinterpret_cast<const typename fft::fftw_complex_type<T>::fftw_type*>(this->origin()); 
-                    }
-
-                    ENABLE_IF_COMPLEX(      typename fft::fftw_complex_type<T>::value_type*) asRealData() { 
-                        return reinterpret_cast<       typename fft::fftw_complex_type<T>::value_type*>(this->origin()); 
-                    }
-                    ENABLE_IF_COMPLEX(const typename fft::fftw_complex_type<T>::value_type*) asRealData() const { 
-                        return reinterpret_cast< const typename fft::fftw_complex_type<T>::value_type*>(this->origin()); 
-                    }
-                    ENABLE_IF_REAL   (      typename fft::fftw_complex_type<T>::std_type*) asStdComplexData() { 
-                        return reinterpret_cast<      typename fft::fftw_complex_type<T>::std_type*>(this->origin()); 
-                    }
-                    ENABLE_IF_REAL   (const typename fft::fftw_complex_type<T>::std_type*) asStdComplexdata() const { 
-                        return reinterpret_cast<const typename fft::fftw_complex_type<T>::std_type*>(this->origin()); 
-                    }
-                    ENABLE_IF_REAL   (      typename fft::fftw_complex_type<T>::fftw_type*) asFftwComplexdata() { 
-                        return reinterpret_cast<      typename fft::fftw_complex_type<T>::fftw_type*>(this->origin()); 
-                    }
-                    ENABLE_IF_REAL   (const typename fft::fftw_complex_type<T>::fftw_type*) asFftwComplexData() const { 
-                        return reinterpret_cast<const typename fft::fftw_complex_type<T>::fftw_type*>(this->origin()); 
-                    }
-
-                protected:
-                    static extents_gen<Dim> shapeToExtents(const Shape<Dim> &shape);
-            };
-
-
-        /* Implementation */
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const extents_gen<Dim>& extents):
-            boost_multi_array<T,Dim,Allocator>(extents) {}
-                    
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const Shape<Dim>& shape):
-            boost_multi_array<T,Dim,Allocator>(shapeToExtents(shape)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const multi_array& other):
-            boost_multi_array<T,Dim,Allocator>(static_cast<const boost_multi_array<T,Dim>&>(other)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const array_view& other):
-            boost_multi_array<T,Dim,Allocator>(static_cast<const boost_multi_array_view<T,Dim>&>(other)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const const_array_view& other):
-            boost_multi_array<T,Dim,Allocator>(static_cast<const boost_multi_array_view<T,Dim>&>(other)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(multi_array&& other):
-            boost_multi_array<T,Dim,Allocator>(static_cast<boost_multi_array<T,Dim,Allocator>&&>(other)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const boost_multi_array_view<T,Dim>& other):
-            boost_multi_array<T,Dim,Allocator>(other) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const boost_const_multi_array_view<T,Dim>& other):
-            boost_multi_array<T,Dim,Allocator>(other) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const boost_multi_array_ref<T,Dim>& other):
-            boost_multi_array<T,Dim,Allocator>(other) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const boost_const_multi_array_ref<T,Dim>& other):
-            boost_multi_array<T,Dim,Allocator>(other) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(boost_multi_array<T,Dim,Allocator>&& other):
-            boost_multi_array<T,Dim,Allocator>(other) {}
-
-
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::operator=(const array_view& other) {
-                this->reshape(other.shape());
-                /* cast obligatory to avoid shape() function aliasing */
-                super::operator=(dynamic_cast<const boost_multi_array_view<T,Dim>&>(other));
-                return *this;
-            }
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::operator=(const const_array_view& other) {
-                this->reshape(other.shape());
-                /* cast obligatory to avoid shape() function aliasing */
-                super::operator=(dynamic_cast<const boost_multi_array_view<T,Dim>&>(other));
-                return *this;
-            }
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::operator=(const multi_array& other) {
-                this->reshape(other.shape());
-                /* cast obligatory to avoid shape() function aliasing */
-                super::operator=(dynamic_cast<const boost_multi_array_ref<T,Dim>&>(other));
-                return *this;
-            }
-
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::operator=(multi_array&& other) {
-                super::operator=(other);
-                return *this;
-            }
-
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>::operator array_view() {
-                return this->operator[](hysop::utils::buildView<Dim>());
-            }
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>::operator const_array_view() const {
-                return this->operator[](hysop::utils::buildView<Dim>());
-            }
-
-            
-        template <typename T, std::size_t Dim, typename Allocator> 
-            Shape<Dim> multi_array<T,Dim,Allocator>::shape() const {
-                Shape<Dim> shape;
-                const std::size_t* extents = this->super::shape();
-                for (std::size_t d = 0; d < Dim; d++)
-                    shape[d] = static_cast<std::size_t>(extents[d]);
-                return shape;
-            }
-
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::reshape(const Shape<Dim>& shape) { 
-                boost::array<int,Dim> extents;
-                for (std::size_t d = 0; d < Dim; d++)
-                    extents[d] = static_cast<int>(shape[d]);
-                this->resize(extents);
-                return *this;
-            }
-                    
-        template <typename T, std::size_t Dim, typename Allocator> 
-        typename multi_array<T,Dim,Allocator>::template extents_gen<Dim> multi_array<T,Dim,Allocator>::shapeToExtents(const Shape<Dim> &shape) {
-            return utils::buildExtents(shape);
-        }
-        
-        template <typename T, std::size_t Dim, typename Allocator>
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::apply(const std::function<void(T&)>& func) {
-                T* data = this->origin();
-                for (std::size_t k = 0; k < this->num_elements(); k++) {
-                    func(data[k]);
-                } 
-                return *this;
-            }
-        template <typename T, std::size_t Dim, typename Allocator>
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::apply(const std::function<void(T&, const Index<Dim>&)>& func) {
-                hysop::Index<Dim> idx(this->shape());
-                T* data = this->origin();
-                for (std::size_t k = 0; k < this->num_elements(); k++) {
-                    func(data[k], idx);
-                    ++idx;
-                }
-                return *this;
-            }
-
-        template <typename T, std::size_t Dim, typename Allocator>
-            template <typename Functor, typename Arg0, typename... Args>
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::apply(const Functor& func, Arg0&& farg0, Args&&... fargs) {
-                hysop::Index<Dim> idx(this->shape());
-                T* data = this->origin();
-                for (std::size_t k = 0; k < this->num_elements(); k++) {
-                    func(data[k], idx, std::forward<Arg0>(farg0), std::forward<Args>(fargs)...);
-                    ++idx;
-                }
-                return *this;
-            }
-
-        template <typename T, std::size_t Dim, typename Allocator>
-            const multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::print(const std::string& name, std::ostream& os, 
-                    unsigned int precision, unsigned int width) const {
-                auto S = this->super::shape();
-                std::size_t id = 0;
-
-                os << name << " = [";
-                if(Dim==1 || Dim>3) {
-                    for(std::size_t k=0; k<this->num_elements(); k++) {
-                        T x = this->data()[id++];
-                        os << std::fixed << std::showpos << std::setprecision(precision) << std::setw(width) << x << " ";
-                    }
-                }
-                else if(Dim==2) {
-                    std::cout << std::endl;
-                    for(std::size_t i=0; i<S[0]; i++) {
-                        os << "\t[";
-                        for(std::size_t j=0; j<S[1]; j++) {
-                            T x = this->data()[id++];
-                            os << std::fixed << std::showpos << std::setprecision(precision) << std::setw(width) << x << " ";
-                        }
-                        os << "]" << std::endl;
-                    }
-                }
-                else {
-                    std::cout << std::endl;
-                    for(std::size_t i=0; i<S[0]; i++) {
-                        os << "\t[[";
-                        for(std::size_t j=0; j<S[1]; j++) {
-                            if(j>0)
-                                os << "\t [";
-                            for(std::size_t k=0; k<S[2]; k++) {
-                                T x = this->data()[id++];
-                                os << std::fixed << std::showpos << std::setprecision(precision) << std::setw(width) << x << " ";
-                            }
-                            if(j!=S[1]-1)
-                                os << "]" << std::endl;
-                            else
-                                os << "]]," << std::endl;
-                        }
-                    }
-                }
-                os << "];" << std::endl;
-                return *this;
-            }
-
-        /* boolean specific functions */
-        template <typename T, std::size_t Dim, typename Allocator>
-            template <typename TT>
-            typename std::enable_if<std::is_same<TT,bool>::value, bool>::type multi_array<T,Dim,Allocator>::all() const {
-                for(std::size_t k=0; k<this->num_elements(); k++) {
-                    if(!this->data()[k])
-                        return false;
-                }
-                return true;
-            }
-
-        template <typename T, std::size_t Dim, typename Allocator>
-            template <typename TT>
-            typename std::enable_if<std::is_same<TT,bool>::value, bool>::type multi_array<T,Dim,Allocator>::any() const {
-                for(std::size_t k=0; k<this->num_elements(); k++) {
-                    if(this->data()[k])
-                        return true;
-                }
-                return false;
-            }
-
-        template <typename T, std::size_t Dim, typename Allocator>
-            template <typename TT>
-            typename std::enable_if<std::is_same<TT,bool>::value, bool>::type multi_array<T,Dim,Allocator>::none() const {
-                for(std::size_t k=0; k<this->num_elements(); k++) {
-                    if(this->data()[k])
-                        return false;
-                }
-                return true;
-            }
-
-
-    } /* end of namespace data */ 
-} /* end of namespace hysop */
-
-#undef ENABLE_IF_BOOL
-#undef ENABLE_IF_REAL
-#undef ENABLE_IF_COMPLEX
-
-#include "multi_array_ext.h"
-
-#endif /* end of include guard: HYSOP_MULTI_ARRAY_H */
diff --git a/src/hysop++/src/data/multi_array/const_multi_array_ref.h b/src/hysop++/src/data/multi_array/const_multi_array_ref.h
deleted file mode 100644
index cc0a2f2c494a220941e2be01f57cf5107af3df04..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/multi_array/const_multi_array_ref.h
+++ /dev/null
@@ -1,53 +0,0 @@
-
-#ifndef HYSOP_MULTI_ARRAY_H
-#include "data/multi_array/multi_array.h"
-#else 
-
-#ifndef HYSOP_CONST_MULTI_ARRAY_REF_H
-#define HYSOP_CONST_MULTI_ARRAY_REF_H
-
-namespace hysop {
-    namespace data {
-        
-        /* class hysop::data::multi_array */
-        template <typename T, std::size_t Dim>
-            class const_multi_array_ref : public boost_const_multi_array_ref<T,Dim> {
-                static_assert(Dim>0, "Dim cannot be zero !");
-
-                private:
-                    using super = boost_const_multi_array_ref<T,Dim>;
-                public:
-                    PUBLIC_CLASS_TYPES()
-
-                public:
-                    const_multi_array_ref(const const_multi_array_ref<T,Dim>& ref) = default;
-                    const_multi_array_ref& operator=(const const_multi_array_ref<T,Dim>& other) = default;
-                    
-                    const_multi_array_ref(const boost_const_multi_array_ref<T,Dim>& ref);
-                    const_multi_array_ref& operator=(const boost_const_multi_array_ref<T,Dim>& other);
-
-                public:
-                    PUBLIC_CONST_REF_INTERFACE(SINGLE_ARG(const_multi_array_ref<T,Dim>))
-                };
-
-
-        /* Implementation */
-        template <typename T, std::size_t Dim>
-        const_multi_array_ref<T,Dim>::const_multi_array_ref(const boost_const_multi_array_ref<T,Dim>& ref) :
-            super(ref) {
-        }
-
-        template <typename T, std::size_t Dim>
-        const_multi_array_ref<T,Dim>& const_multi_array_ref<T,Dim>::operator=(const boost_const_multi_array_ref<T,Dim>& other) {
-            super::operator=(other);
-            return *this;
-        }
-
-        CONST_REF_IMPLEMENTATION(SINGLE_ARG(const_multi_array_ref<T,Dim>), SINGLE_ARG(template <typename T, std::size_t Dim>))
-
-    } /* end of namespace data */ 
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_CONST_MULTI_ARRAY_REF_H */
-
-#endif /* end of MULTI_ARRAY include guard */
diff --git a/src/hysop++/src/data/multi_array/const_multi_array_view.h b/src/hysop++/src/data/multi_array/const_multi_array_view.h
deleted file mode 100644
index 335433d8ad660cf2d065ef03a0dbc165948faec6..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/multi_array/const_multi_array_view.h
+++ /dev/null
@@ -1,53 +0,0 @@
-
-#ifndef HYSOP_MULTI_ARRAY_H
-#include "data/multi_array/multi_array.h"
-#else 
-
-#ifndef HYSOP_CONST_MULTI_ARRAY_VIEW_H
-#define HYSOP_CONST_MULTI_ARRAY_VIEW_H
-
-namespace hysop {
-    namespace data {
-        
-        /* class hysop::data::multi_array */
-        template <typename T, std::size_t Dim>
-            class const_multi_array_view : public boost_const_multi_array_view<T,Dim> {
-                static_assert(Dim>0, "Dim cannot be zero !");
-
-                private:
-                    using super = boost_const_multi_array_view<T,Dim>;
-                public:
-                    PUBLIC_CLASS_TYPES()
-
-                public:
-                    const_multi_array_view(const const_multi_array_view<T,Dim>& view) = default;
-                    const_multi_array_view& operator=(const const_multi_array_view<T,Dim>& other) = default;
-                    
-                    const_multi_array_view(const boost_const_multi_array_view<T,Dim>& view);
-                    const_multi_array_view& operator=(const boost_const_multi_array_view<T,Dim>& other);
-
-                public:
-                    PUBLIC_CONST_VIEW_INTERFACE(SINGLE_ARG(const_multi_array_view<T,Dim>))
-                };
-
-
-        /* Implementation */
-        template <typename T, std::size_t Dim>
-        const_multi_array_view<T,Dim>::const_multi_array_view(const boost_const_multi_array_view<T,Dim>& view) :
-            super(view) {
-        }
-
-        template <typename T, std::size_t Dim>
-        const_multi_array_view<T,Dim>& const_multi_array_view<T,Dim>::operator=(const boost_const_multi_array_view<T,Dim>& other) {
-            super::operator=(other);
-            return *this;
-        }
-
-        CONST_VIEW_IMPLEMENTATION(SINGLE_ARG(const_multi_array_view<T,Dim>), SINGLE_ARG(template <typename T, std::size_t Dim>))
-
-    } /* end of namespace data */ 
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_CONST_MULTI_ARRAY_VIEW_H */
-
-#endif /* end of MULTI_ARRAY include guard */
diff --git a/src/hysop++/src/data/multi_array/multi_array.h b/src/hysop++/src/data/multi_array/multi_array.h
deleted file mode 100644
index 41746a7eb7a451708e6aaaae88b297b1a4d6a099..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/multi_array/multi_array.h
+++ /dev/null
@@ -1,52 +0,0 @@
-
-#ifndef HYSOP_MULTI_ARRAY_H
-#define HYSOP_MULTI_ARRAY_H
-
-#include "utils/utils.h"
-#include "utils/default.h"
-#include <boost/multi_array.hpp>
-
-/****************************************/
-/*** Hysop boost multi array wrapper ****/
-/****************************************/
-
-namespace hysop {
-    namespace data {
-
-        /* forward declaration of types */
-        template <typename T, std::size_t Dim, typename Allocator = hysop::_default::allocator<T>>                                           
-        class multi_array;                                                                              
-        template <typename T, std::size_t Dim>                                                              
-        class multi_array_ref;                                                                          
-        template <typename T, std::size_t Dim>                                                              
-        class const_multi_array_ref;                                                                    
-        template <typename T, std::size_t Dim>                                                              
-        class multi_array_view;                                                                         
-        template <typename T, std::size_t Dim>                                                              
-        class const_multi_array_view;                                                                   
-
-        template <typename T, std::size_t Dim, typename Allocator = hysop::_default::allocator<T>>                                          
-        using boost_multi_array = boost::multi_array<T,Dim,Allocator>;                                  
-        template <typename T, std::size_t Dim>                                                              
-        using boost_multi_array_ref = boost::multi_array_ref<T,Dim>;                                    
-        template <typename T, std::size_t Dim>                                                              
-        using boost_const_multi_array_ref = boost::const_multi_array_ref<T,Dim>;                        
-        template <typename T, std::size_t Dim>                                                              
-        using boost_multi_array_view = boost::detail::multi_array::multi_array_view<T,Dim>;             
-        template <typename T, std::size_t Dim>                                                              
-        using boost_const_multi_array_view = boost::detail::multi_array::const_multi_array_view<T,Dim>;
-    }
-}
-
-#include "data/multi_array/multi_array_defines.h"
-
-#include "data/multi_array/const_multi_array_view.h"
-#include "data/multi_array/multi_array_view.h"
-#include "data/multi_array/const_multi_array_ref.h"
-#include "data/multi_array/multi_array_ref.h"
-#include "data/multi_array/multi_array_impl.h"
-#include "data/multi_array/multi_array_ext.h"
-
-#include "data/multi_array/multi_array_clean.h"
-
-#endif /* end of include guard: HYSOP_MULTI_ARRAY_H */
diff --git a/src/hysop++/src/data/multi_array/multi_array_clean.h b/src/hysop++/src/data/multi_array/multi_array_clean.h
deleted file mode 100644
index 0f8c988ccac0234ece162df80e5e0b8398f10b03..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/multi_array/multi_array_clean.h
+++ /dev/null
@@ -1,47 +0,0 @@
-
-#ifndef HYSOP_MULTI_ARRAY_H
-#include "data/multi_array/multi_array.h"
-#else 
-
-#ifndef HYSOP_MULTI_ARRAY_CLEAN_H
-#define HYSOP_MULTI_ARRAY_CLEAN_H
-
-/* clean all macros used to generate hysop multiarray */
-
-#undef SINGLE_ARG
-#undef PUBLIC_CLASS_TYPES
-
-#undef ENABLE_IF_BOOL
-#undef ENABLE_IF_REAL
-#undef ENABLE_IF_COMPLEX
-
-#undef PUBLIC_COMMON_CONST_INTEFACE
-#undef PUBLIC_CONST_REF_INTERFACE
-#undef PUBLIC_CONST_VIEW_INTERFACE
-#undef PUBLIC_COMMON_NON_CONST_INTERFACE
-#undef PUBLIC_NON_CONST_REF_INTERFACE
-#undef PUBLIC_NON_CONST_VIEW_INTERFACE
-
-#undef LOOP_VARNAME
-#undef LOOP_OVER_ALL_REF_ELEMENTS
-#undef REF_DATA_ACCESS
-#undef LOOP_OVER_ALL_VIEW_ELEMENTS
-#undef VIEW_DATA_ACCESS
-
-#undef COMMON_CONST_IMPLEMENTATION
-#undef LOOP_DEPENDENT_CONST_IMPLEMENTATION
-#undef CONST_REF_IMPL
-#undef CONST_VIEW_IMPL
-#undef CONST_REF_IMPLEMENTATION
-#undef CONST_VIEW_IMPLEMENTATION
-
-#undef COMMON_NON_CONST_IMPLEMENTATION
-#undef LOOP_DEPENDENT_NON_CONST_IMPLEMENTATION
-#undef NON_CONST_REF_IMPL
-#undef NON_CONST_VIEW_IMPL
-#undef NON_CONST_REF_IMPLEMENTATION
-#undef NON_CONST_VIEW_IMPLEMENTATION
-
-#endif /* end of include guard: MULTI_ARRAY_CLEAN_H */
-
-#endif /* end of MULTI_ARRAY include guard */
diff --git a/src/hysop++/src/data/multi_array/multi_array_defines.h b/src/hysop++/src/data/multi_array/multi_array_defines.h
deleted file mode 100644
index 1f8ac7fbee74788207eb00efc301899efbffbe9b..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/multi_array/multi_array_defines.h
+++ /dev/null
@@ -1,366 +0,0 @@
-
-#ifndef HYSOP_MULTI_ARRAY_H
-#include "data/multi_array/multi_array.h"
-#else 
-
-#ifndef HYSOP_MULTI_ARRAY_DEFINES_H
-#define HYSOP_MULTI_ARRAY_DEFINES_H
-
-#include <algorithm>
-#include <iostream>
-#include <iomanip>
-#include <cmath>
-
-#include "utils/utils.h"
-#include "utils/types.h"
-#include "data/index.h"
-#include "fft/fftwComplex.h"
-
-
-/* helper macros to generate all hysop multi_array wrappers */
-#define SINGLE_ARG(...) __VA_ARGS__
-
-/* declaration of all public class types */
-#define PUBLIC_CLASS_TYPES()                                                      \
-    template <typename Alloc>                                                     \
-    using array = multi_array<T,Dim,Alloc>;                                       \
-                                                                                  \
-    using array_ref        = multi_array_ref<T,Dim>;                              \
-    using array_view       = multi_array_view<T,Dim>;                             \
-    using const_array_ref  = const_multi_array_ref<T,Dim>;                        \
-    using const_array_view = const_multi_array_view<T,Dim>;                       \
-                                                                                  \
-    template <std::size_t NExtents>                                               \
-    using extents_gen = boost::detail::multi_array::extent_gen<NExtents>;         \
-                                                                                  \
-    template <std::size_t NumRanges, std::size_t NumDim>                          \
-    using index_gen   = boost::detail::multi_array::index_gen<NumRanges, NumDim>; \
-                                                                                  \
-    using index_range = boost::multi_array_types::index_range;
-
-
-/* various enable if macros */
-#define ENABLE_IF_BOOL(TYPE,DEFAULT)                                     \
-            template <typename TT DEFAULT>                               \
-            typename std::enable_if<                                     \
-                       std::is_same<TT,bool>::value                      \
-                , TYPE>::type
-#define ENABLE_IF_FFTW_REAL(TYPE,DEFAULT)                                \
-            template <typename TT DEFAULT>                               \
-            typename std::enable_if<                                     \
-                    hysop::fft::is_fftw_supported_type<TT>::value        \
-                , TYPE>::type 
-#define ENABLE_IF_FFTW_COMPLEX(TYPE,DEFAULT)                               \
-            template <typename TT DEFAULT>                                 \
-            typename std::enable_if<                                       \
-                    hysop::fft::is_fftw_supported_complex_type<TT>::value  \
-                , TYPE>::type
-
-/* CLASS INTERFACES */
-/* public const common interfaces for views and referencess */
-#define PUBLIC_COMMON_CONST_INTERFACE(TYPENAME)                                                                   \
-        /* shape related */                                                                                      \
-        typename Shape<Dim>::type shape() const;                                                                                \
-                                                                                                                 \
-        /* print data */                                                                                         \
-        const TYPENAME& print(const std::string& name, std::ostream& os = std::cout,                             \
-                unsigned int precision=2u, unsigned int width=6u) const;                                         \
-                                                                                                                 \
-        /* boolean array_view specific functions */                                                              \
-        ENABLE_IF_BOOL(bool,=T) all() const;                                                                     \
-        ENABLE_IF_BOOL(bool,=T) any() const;                                                                     \
-        ENABLE_IF_BOOL(bool,=T) none() const;
-
-
-/* public const reference interface */
-#define PUBLIC_CONST_REF_INTERFACE(TYPENAME)                                                                    \
-        /* common const interface */                                                                            \
-        PUBLIC_COMMON_CONST_INTERFACE(SINGLE_ARG(TYPENAME))                                                      \
-        /* real and complex data accessors, usefull for FFT like transforms */                                  \
-        ENABLE_IF_FFTW_REAL   (const T*,=T) rdata() const;                                                           \
-        ENABLE_IF_FFTW_REAL   (const typename fft::fftw_complex_type<T>::std_type*,=T)  asStdComplexData() const;    \
-        ENABLE_IF_FFTW_REAL   (const typename fft::fftw_complex_type<T>::fftw_type*,=T) asFftwComplexData() const;   \
-        ENABLE_IF_FFTW_COMPLEX(const T*,=T) cdata() const;                                                           \
-        ENABLE_IF_FFTW_COMPLEX(const typename fft::fftw_complex_type<T>::std_type*,=T)    std_cdata() const;         \
-        ENABLE_IF_FFTW_COMPLEX(const typename fft::fftw_complex_type<T>::fftw_type*,=T)  fftw_cdata() const;         \
-        ENABLE_IF_FFTW_COMPLEX(const typename fft::fftw_complex_type<T>::value_type*,=T) asRealData() const;
-
-
-/* public const view interface */
-#define PUBLIC_CONST_VIEW_INTERFACE(TYPENAME)                                                     \
-        /* common const interface */                                                              \
-        PUBLIC_COMMON_CONST_INTERFACE(SINGLE_ARG(TYPENAME))
-
-
-/* non const interfaces */
-#define PUBLIC_COMMON_NON_CONST_INTERFACE(TYPENAME)                                               \
-        /* Apply function to all elements */                                                      \
-        TYPENAME& apply(const std::function<void(T&)>& func);                                     \
-        TYPENAME& apply(const std::function<void(T&, const Index<Dim>&)>& func);                  \
-        template <typename Functor, typename Arg0, typename... Args>                              \
-        TYPENAME& apply(const Functor& func, Arg0&& farg0, Args&&... fargs);
-
-
-#define PUBLIC_NON_CONST_REF_INTERFACE(TYPENAME)                                                  \
-        /* common non const interface */                                                          \
-        PUBLIC_COMMON_NON_CONST_INTERFACE(SINGLE_ARG(TYPENAME))                                   \
-        /* real and complex data accessors, usefull for FFT like transforms */                    \
-        ENABLE_IF_FFTW_REAL(T*,=T) rdata();                                                            \
-        ENABLE_IF_FFTW_REAL(typename fft::fftw_complex_type<T>::std_type*,=T) asStdComplexData();      \
-        ENABLE_IF_FFTW_REAL(typename fft::fftw_complex_type<T>::fftw_type*,=T) asFftwComplexData();    \
-        ENABLE_IF_FFTW_COMPLEX(T*,=T) cdata();                                                         \
-        ENABLE_IF_FFTW_COMPLEX(typename fft::fftw_complex_type<T>::std_type*,=T) std_cdata();          \
-        ENABLE_IF_FFTW_COMPLEX(typename fft::fftw_complex_type<T>::fftw_type*,=T) fftw_cdata();        \
-        ENABLE_IF_FFTW_COMPLEX(typename fft::fftw_complex_type<T>::value_type*,=T) asRealData();
-
-
-#define PUBLIC_NON_CONST_VIEW_INTERFACE(TYPENAME)                                                 \
-        /* common non const interface */                                                          \
-        PUBLIC_COMMON_NON_CONST_INTERFACE(SINGLE_ARG(TYPENAME))
-
-
-
-/* CLASS IMPLEMENTATIONS */
-
-/* Loop dependant implementation macros (references contain contiguous data but not the views) */
-#define LOOP_VARNAME multi_array_index
-#define NO_DEFAULT_TEMPLATES 
-
-/* A reference has a contiguous memory layout and can be accessed by data offsets */
-#define LOOP_OVER_ALL_REF_ELEMENTS(ARRAY) \
-    for (std::size_t LOOP_VARNAME=0; LOOP_VARNAME<((ARRAY).num_elements()); LOOP_VARNAME++)
-
-#define REF_DATA_ACCESS(ARRAY) (ARRAY).data()[LOOP_VARNAME]
-
-/* A view is non a contiguous memory access and can be accessed only by index list */
-#define LOOP_OVER_ALL_VIEW_ELEMENTS(ARRAY)      \
-    Index<Dim> LOOP_VARNAME((ARRAY).shape());   \
-    LOOP_VARNAME.setIndexToMinusOne();          \
-    while((++LOOP_VARNAME)() != LOOP_VARNAME.maxId())
-
-#define VIEW_DATA_ACCESS(ARRAY) (ARRAY).operator()(LOOP_VARNAME.ids())
-
-
-/* CONST IMPLEMENTATIONS */
-/* Common const implementation */
-#define COMMON_CONST_IMPLEMENTATION(TYPENAME,TEMPLATES)          \
-        /* shape related */                                      \
-        TEMPLATES                                                \
-        typename Shape<Dim>::type TYPENAME::shape() const {      \
-            typename Shape<Dim>::type shape;                     \
-            const std::size_t* extents = this->super::shape();   \
-            for (std::size_t d = 0; d < Dim; d++)                \
-                shape[d] = static_cast<std::size_t>(extents[d]); \
-            return shape;                                        \
-        }
-
-/* Loop dependant const implementation */
-#define LOOP_DEPENDENT_CONST_IMPLEMENTATION(TYPENAME,TEMPLATES,LOOP_OVER_ALL_ELEMENTS,DATA_ACCESS)                            \
-        /* print data */                                                                                                      \
-        TEMPLATES                                                                                                             \
-        const TYPENAME& TYPENAME::print(const std::string& name, std::ostream& os,                                            \
-                unsigned int precision, unsigned int width) const {                                                           \
-            auto S = this->super::shape();                                                                                    \
-            std::size_t id = 0;                                                                                               \
-                                                                                                                              \
-            os << name << " = [";                                                                                             \
-            if(Dim==1) {                                                                                                      \
-                for(std::size_t k=0; k<this->num_elements(); k++) {                                                           \
-                    T x = this->operator()(boost::array<std::size_t,1>{k});                                                   \
-                    os << std::fixed << std::showpos << std::setprecision(precision) << std::setw(width) << x << " ";         \
-                }                                                                                                             \
-            }                                                                                                                 \
-            else if(Dim==2) {                                                                                                 \
-                std::cout << std::endl;                                                                                       \
-                for(std::size_t i=0; i<S[0]; i++) {                                                                           \
-                    os << "\t[";                                                                                              \
-                    for(std::size_t j=0; j<S[1]; j++) {                                                                       \
-                        T x = this->operator()(boost::array<std::size_t,2>{i,j});                                             \
-                        os << std::fixed << std::showpos << std::setprecision(precision) << std::setw(width) << x << " ";     \
-                    }                                                                                                         \
-                    os << "]" << std::endl;                                                                                   \
-                }                                                                                                             \
-            }                                                                                                                 \
-            else if(Dim==3) {                                                                                                 \
-                std::cout << std::endl;                                                                                       \
-                for(std::size_t i=0; i<S[0]; i++) {                                                                           \
-                    os << "\t[[";                                                                                             \
-                    for(std::size_t j=0; j<S[1]; j++) {                                                                       \
-                        if(j>0)                                                                                               \
-                            os << "\t [";                                                                                     \
-                        for(std::size_t k=0; k<S[2]; k++) {                                                                   \
-                            T x = this->operator()(boost::array<std::size_t,3>{i,j,k});                                       \
-                            os << std::fixed << std::showpos << std::setprecision(precision) << std::setw(width) << x << " "; \
-                        }                                                                                                     \
-                        if(j!=S[1]-1)                                                                                         \
-                            os << "]" << std::endl;                                                                           \
-                        else                                                                                                  \
-                            os << "]]," << std::endl;                                                                         \
-                    }                                                                                                         \
-                }                                                                                                             \
-            }                                                                                                                 \
-            else {                                                                                                            \
-                LOOP_OVER_ALL_ELEMENTS(*this) {                                                                               \
-                    T x = DATA_ACCESS(*this);                                                                                 \
-                    os << std::fixed << std::showpos << std::setprecision(precision) << std::setw(width) << x << " ";         \
-                }                                                                                                             \
-            }                                                                                                                 \
-            os << "];" << std::endl;                                                                                          \
-            return *this;                                                                                                     \
-        }                                                                                                   \
-                                                                                                            \
-        /* boolean array_view specific functions */                                                         \
-        TEMPLATES ENABLE_IF_BOOL(bool,NO_DEFAULT_TEMPLATES) TYPENAME::all() const {                         \
-            LOOP_OVER_ALL_ELEMENTS(*this) {                                                                 \
-                const bool val = DATA_ACCESS(*this);                                                        \
-                if(!val)                                                                                    \
-                    return false;                                                                           \
-            }                                                                                               \
-            return true;                                                                                    \
-        }                                                                                                   \
-        TEMPLATES ENABLE_IF_BOOL(bool,NO_DEFAULT_TEMPLATES) TYPENAME::any() const {                         \
-            LOOP_OVER_ALL_ELEMENTS(*this) {                                                                 \
-                const bool val = DATA_ACCESS(*this);                                                        \
-                if(val)                                                                                     \
-                    return true;                                                                            \
-            }                                                                                               \
-            return false;                                                                                   \
-        }                                                                                                   \
-        TEMPLATES ENABLE_IF_BOOL(bool,NO_DEFAULT_TEMPLATES) TYPENAME::none() const {                        \
-            LOOP_OVER_ALL_ELEMENTS(*this) {                                                                 \
-                const bool val = DATA_ACCESS(*this);                                                        \
-                if(val)                                                                                     \
-                    return false;                                                                           \
-            }                                                                                               \
-            return true;                                                                                    \
-        }
-
-/* Reference specific const implementation */
-#define CONST_REF_IMPL(TYPENAME,TEMPLATES)                                                                                          \
-    TEMPLATES                                                                                                                       \
-    ENABLE_IF_FFTW_REAL(const T*,NO_DEFAULT_TEMPLATES) TYPENAME::rdata() const {                                                         \
-        return this->data();                                                                                                        \
-    }                                                                                                                               \
-    TEMPLATES                                                                                                                       \
-    ENABLE_IF_FFTW_REAL(const typename fft::fftw_complex_type<T>::std_type*,NO_DEFAULT_TEMPLATES)  TYPENAME::asStdComplexData() const {  \
-        return reinterpret_cast<const typename fft::fftw_complex_type<T>::std_type*>(this->data());                                 \
-    }                                                                                                                               \
-    TEMPLATES                                                                                                                       \
-    ENABLE_IF_FFTW_REAL(const typename fft::fftw_complex_type<T>::fftw_type*,NO_DEFAULT_TEMPLATES) TYPENAME::asFftwComplexData() const { \
-        return reinterpret_cast<const typename fft::fftw_complex_type<T>::fftw_type*>(this->data());                                \
-    }                                                                                                                               \
-    TEMPLATES                                                                                                                       \
-    ENABLE_IF_FFTW_COMPLEX(const T*,NO_DEFAULT_TEMPLATES) TYPENAME::cdata() const {                                                      \
-        return this->data();                                                                                                        \
-    }                                                                                                                               \
-    TEMPLATES                                                                                                                       \
-    ENABLE_IF_FFTW_COMPLEX(const typename fft::fftw_complex_type<T>::std_type*,NO_DEFAULT_TEMPLATES)    TYPENAME::std_cdata() const {    \
-        return reinterpret_cast<const typename fft::fftw_complex_type<T>::std_type*>(this->data());                                 \
-    }                                                                                                                               \
-    TEMPLATES                                                                                                                       \
-    ENABLE_IF_FFTW_COMPLEX(const typename fft::fftw_complex_type<T>::fftw_type*,NO_DEFAULT_TEMPLATES)  TYPENAME::fftw_cdata() const {    \
-        return reinterpret_cast<const typename fft::fftw_complex_type<T>::fftw_type*>(this->data());                                \
-    }                                                                                                                               \
-    TEMPLATES                                                                                                                       \
-    ENABLE_IF_FFTW_COMPLEX(const typename fft::fftw_complex_type<T>::value_type*,NO_DEFAULT_TEMPLATES) TYPENAME::asRealData() const {    \
-        return reinterpret_cast<const typename fft::fftw_complex_type<T>::value_type*>(this->data());                               \
-    }                                                                                                                               
-
-/* View specific const implementation */
-#define CONST_VIEW_IMPL(TYPENAME,TEMPLATES)
-
-/* All reference const implementation */
-#define CONST_REF_IMPLEMENTATION(TYPENAME,TEMPLATES)                                                                             \
-    COMMON_CONST_IMPLEMENTATION(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES))                                                      \
-    LOOP_DEPENDENT_CONST_IMPLEMENTATION(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES),LOOP_OVER_ALL_REF_ELEMENTS,REF_DATA_ACCESS)   \
-    CONST_REF_IMPL(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES))
-
-/* All view const implementation */
-#define CONST_VIEW_IMPLEMENTATION(TYPENAME,TEMPLATES)                                                                            \
-    COMMON_CONST_IMPLEMENTATION(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES))                                                      \
-    LOOP_DEPENDENT_CONST_IMPLEMENTATION(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES),LOOP_OVER_ALL_VIEW_ELEMENTS,VIEW_DATA_ACCESS) \
-    CONST_VIEW_IMPL(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES))
-
-
-
-
-/* NON CONST IMPLEMENTATIONS */
-/* Common non const implementation */
-#define COMMON_NON_CONST_IMPLEMENTATION(TYPENAME,TEMPLATES)
-
-/* Loop dependant non const implementation */
-#define LOOP_DEPENDENT_NON_CONST_IMPLEMENTATION(TYPENAME,TEMPLATES,LOOP_OVER_ALL_ELEMENTS,DATA_ACCESS)         \
-    /* Apply function to all elements */                                                                       \
-    TEMPLATES                                                                                                  \
-    TYPENAME& TYPENAME::apply(const std::function<void(T&)>& func) {                                           \
-        LOOP_OVER_ALL_ELEMENTS(*this) {                                                                        \
-            func( DATA_ACCESS(*this) );                                                                        \
-        }                                                                                                      \
-        return *this;                                                                                          \
-    }                                                                                                          \
-    TEMPLATES                                                                                                  \
-    TYPENAME& TYPENAME::apply(const std::function<void(T&, const Index<Dim>&)>& func) {                        \
-        LOOP_OVER_ALL_VIEW_ELEMENTS(*this) {                                                                   \
-            func( VIEW_DATA_ACCESS(*this), LOOP_VARNAME );                                                     \
-        }                                                                                                      \
-        return *this;                                                                                          \
-    }                                                                                                          \
-    TEMPLATES                                                                                                  \
-    template <typename Functor, typename Arg0, typename... Args>                                               \
-    TYPENAME& TYPENAME::apply(const Functor& func, Arg0&& farg0, Args&&... fargs) {                            \
-        LOOP_OVER_ALL_VIEW_ELEMENTS(*this) {                                                                   \
-            func( VIEW_DATA_ACCESS(*this), LOOP_VARNAME, std::forward<Arg0>(farg0), std::forward<Args>(fargs)... ); \
-        }                                                                                                      \
-        return *this;                                                                                          \
-    }
-
-/* Reference specific non const implementation */
-#define NON_CONST_REF_IMPL(TYPENAME,TEMPLATES)                                                                          \
-    TEMPLATES                                                                                                           \
-    ENABLE_IF_FFTW_REAL(T*,NO_DEFAULT_TEMPLATES) TYPENAME::rdata() {                                                         \
-        return this->data();                                                                                            \
-    }                                                                                                                   \
-    TEMPLATES                                                                                                           \
-    ENABLE_IF_FFTW_REAL(typename fft::fftw_complex_type<T>::std_type*,NO_DEFAULT_TEMPLATES)  TYPENAME::asStdComplexData() {  \
-        return reinterpret_cast<typename fft::fftw_complex_type<T>::std_type*>(this->data());                           \
-    }                                                                                                                   \
-    TEMPLATES                                                                                                           \
-    ENABLE_IF_FFTW_REAL(typename fft::fftw_complex_type<T>::fftw_type*,NO_DEFAULT_TEMPLATES) TYPENAME::asFftwComplexData() { \
-        return reinterpret_cast<typename fft::fftw_complex_type<T>::fftw_type*>(this->data());                          \
-    }                                                                                                                   \
-    TEMPLATES                                                                                                           \
-    ENABLE_IF_FFTW_COMPLEX(T*,NO_DEFAULT_TEMPLATES) TYPENAME::cdata() {                                                      \
-        return this->data();                                                                                            \
-    }                                                                                                                   \
-    TEMPLATES                                                                                                           \
-    ENABLE_IF_FFTW_COMPLEX(typename fft::fftw_complex_type<T>::std_type*,NO_DEFAULT_TEMPLATES)    TYPENAME::std_cdata() {    \
-        return reinterpret_cast<typename fft::fftw_complex_type<T>::std_type*>(this->data());                           \
-    }                                                                                                                   \
-    TEMPLATES                                                                                                           \
-    ENABLE_IF_FFTW_COMPLEX(typename fft::fftw_complex_type<T>::fftw_type*,NO_DEFAULT_TEMPLATES)  TYPENAME::fftw_cdata() {    \
-        return reinterpret_cast<typename fft::fftw_complex_type<T>::fftw_type*>(this->data());                          \
-    }                                                                                                                   \
-    TEMPLATES                                                                                                           \
-    ENABLE_IF_FFTW_COMPLEX(typename fft::fftw_complex_type<T>::value_type*,NO_DEFAULT_TEMPLATES) TYPENAME::asRealData() {    \
-        return reinterpret_cast<typename fft::fftw_complex_type<T>::value_type*>(this->data());                         \
-    }                                                                                                                               
-
-/* View specific non const implementation */
-#define NON_CONST_VIEW_IMPL(TYPENAME,TEMPLATES)
-
-/* Reference specific non const implementation */
-#define NON_CONST_REF_IMPLEMENTATION(TYPENAME,TEMPLATES)                                                                             \
-    COMMON_NON_CONST_IMPLEMENTATION(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES))                                                      \
-    LOOP_DEPENDENT_NON_CONST_IMPLEMENTATION(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES),LOOP_OVER_ALL_REF_ELEMENTS,REF_DATA_ACCESS)   \
-    NON_CONST_REF_IMPL(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES))
-
-/* View specific non const implementation */
-#define NON_CONST_VIEW_IMPLEMENTATION(TYPENAME,TEMPLATES)                                                                            \
-    COMMON_NON_CONST_IMPLEMENTATION(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES))                                                      \
-    LOOP_DEPENDENT_NON_CONST_IMPLEMENTATION(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES),LOOP_OVER_ALL_VIEW_ELEMENTS,VIEW_DATA_ACCESS) \
-    NON_CONST_VIEW_IMPL(SINGLE_ARG(TYPENAME),SINGLE_ARG(TEMPLATES))
-
-
-#endif /* end of include guard: HYSOP_MULTI_ARRAY_DEFINES_H */
-
-#endif /* end of MULTI_ARRAY include guard */
diff --git a/src/hysop++/src/data/multi_array/multi_array_ext.h b/src/hysop++/src/data/multi_array/multi_array_ext.h
deleted file mode 100644
index da2f8f69634503ab22fddd11c9c82b7ce301125a..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/multi_array/multi_array_ext.h
+++ /dev/null
@@ -1,247 +0,0 @@
-
-#ifndef HYSOP_MULTI_ARRAY_H
-#include "data/multi_array/multi_array.h"
-#else 
-
-#ifndef HYSOP_MULTI_ARRAY_EXT_H
-#define HYSOP_MULTI_ARRAY_EXT_H
-
-namespace hysop {
-    namespace data {
-
-        /* distances */
-        template <typename T, std::size_t Dim> 
-            T distance_L1(const const_multi_array_ref<T,Dim> &lhs, const const_multi_array_ref<T,Dim> &rhs);
-        template <typename T, std::size_t Dim>
-            T distance_L2(const const_multi_array_ref<T,Dim> &lhs, const const_multi_array_ref<T,Dim> &rhs);
-        template <typename T, std::size_t Dim>
-            T distance_Linf(const const_multi_array_ref<T,Dim> &lhs, const const_multi_array_ref<T,Dim> &rhs);
-
-        /* unary operators */
-        template <typename T, std::size_t Dim>
-        multi_array<T,Dim> operator+(const const_multi_array_ref<T,Dim>& arr);
-        template <typename T, std::size_t Dim>
-        multi_array<T,Dim> operator-(const const_multi_array_ref<T,Dim>& arr);
-        template <std::size_t Dim>
-        multi_array<bool,Dim> operator~(const const_multi_array_ref<bool,Dim>& arr);
-
-        /* elementwise arithmetic operations */
-        template <typename T, std::size_t Dim>
-            multi_array<T,Dim> operator-(const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<T,Dim> operator+(const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<T,Dim> operator*(const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<T,Dim> operator/(const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<T,Dim> operator%(const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-
-        /* elementwise boolean operations */
-        template <typename T, std::size_t Dim>
-            multi_array<bool,Dim> operator& (const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<bool,Dim> operator| (const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<bool,Dim> operator^ (const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-
-        
-        /* element wise ordering test - near equality test for floating point types, see utils/utils.h::areEqual<T> */
-        template <typename T, std::size_t Dim>
-            multi_array<bool,Dim> operator==(const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<bool,Dim> operator!=(const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<bool,Dim> operator>=(const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<bool,Dim> operator<=(const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<bool,Dim> operator> (const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-        template <typename T, std::size_t Dim>
-            multi_array<bool,Dim> operator< (const const_multi_array_ref<T,Dim>& lhs, const const_multi_array_ref<T,Dim>& rhs);
-
-
-
-        /* And all their view and rvalue references variants */
-        /* Ref    - Ref                       */
-        /* Ref    - View                      */
-        /* View   - Ref                       */
-        /* View   - View                      */
-        /*                                    */
-        /* Rvalue - View                      */
-        /* Rvalue - Ref                       */
-        /* View   - Rvalue                    */
-        /* Ref    - Rvalue                    */
-        /* ...                                */
-
-        
-        /* Implementation */
-        
-        #define CHECK_SHAPES(LHS,RHS) assert(LHS.shape() == RHS.shape())
-
-        #define BINARY_OP(TEMPLATES,T,R,RES,OPNAME,BINOP,LHS,RHS,LOOP_OVER_ALL_ELEMENTS,DATA_ACCESS,CREATE_BUFFER,FROM_BUFFER,RET_OP) \
-            TEMPLATES                                                                                                             \
-                RES OPNAME(LHS lhs, RHS rhs) {                                                                                    \
-                    CHECK_SHAPES(lhs,rhs);                                                                                        \
-                    CREATE_BUFFER(FROM_BUFFER,R);                                                                                 \
-                    LOOP_OVER_ALL_ELEMENTS(lhs) {                                                                                 \
-                        const T& lhsVal = DATA_ACCESS(lhs);                                                                       \
-                        const T& rhsVal = DATA_ACCESS(rhs);                                                                       \
-                        BINOP;                                                                                                    \
-                    }                                                                                                             \
-                    return RET_OP(BUFFER_NAME);                                                                                   \
-                }
-
-        #define BINARY_OP_VIEW_VIEW(TEMPLATES,T,R,RET,OPNAME,BINOP,CREATE_BUFFER,RET_OP)   \
-                BINARY_OP(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),\
-                        VIEW(T),VIEW(T),LOOP_OVER_ALL_VIEW_ELEMENTS,VIEW_DATA_ACCESS,CREATE_BUFFER,lhs,RET_OP)
-        #define BINARY_OP_VIEW_REF(TEMPLATES,T,R,RET,OPNAME,BINOP,CREATE_BUFFER,RET_OP)   \
-                BINARY_OP(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),\
-                        VIEW(T),REF(T),LOOP_OVER_ALL_VIEW_ELEMENTS,VIEW_DATA_ACCESS,CREATE_BUFFER,lhs,RET_OP)
-        #define BINARY_OP_REF_VIEW(TEMPLATES,T,R,RET,OPNAME,BINOP,CREATE_BUFFER,RET_OP)   \
-                BINARY_OP(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),\
-                        REF(T),VIEW(T),LOOP_OVER_ALL_VIEW_ELEMENTS,VIEW_DATA_ACCESS,CREATE_BUFFER,lhs,RET_OP)
-        #define BINARY_OP_REF_REF(TEMPLATES,T,R,RET,OPNAME,BINOP,CREATE_BUFFER,RET_OP)     \
-                BINARY_OP(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),\
-                        REF(T),REF(T),LOOP_OVER_ALL_REF_ELEMENTS,REF_DATA_ACCESS,CREATE_BUFFER,lhs,RET_OP)
-        #define BINARY_OP_REF_RVALUE(TEMPLATES,T,R,RET,OPNAME,BINOP,CREATE_BUFFER,RET_OP)  \
-                BINARY_OP(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),\
-                        REF(T),RVALUE(T),LOOP_OVER_ALL_REF_ELEMENTS,REF_DATA_ACCESS,CREATE_BUFFER,rhs,RET_OP)
-        #define BINARY_OP_RVALUE_REF(TEMPLATES,T,R,RET,OPNAME,BINOP,CREATE_BUFFER,RET_OP)  \
-                BINARY_OP(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),\
-                        RVALUE(T),REF(T),LOOP_OVER_ALL_REF_ELEMENTS,REF_DATA_ACCESS,CREATE_BUFFER,lhs,RET_OP)
-        #define BINARY_OP_VIEW_RVALUE(TEMPLATES,T,R,RET,OPNAME,BINOP,CREATE_BUFFER,RET_OP) \
-                BINARY_OP(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),\
-                        VIEW(T),RVALUE(T),LOOP_OVER_ALL_VIEW_ELEMENTS,VIEW_DATA_ACCESS,CREATE_BUFFER,rhs,RET_OP)
-        #define BINARY_OP_RVALUE_VIEW(TEMPLATES,T,R,RET,OPNAME,BINOP,CREATE_BUFFER,RET_OP) \
-                BINARY_OP(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),\
-                        RVALUE(T),VIEW(T),LOOP_OVER_ALL_VIEW_ELEMENTS,VIEW_DATA_ACCESS,CREATE_BUFFER,lhs,RET_OP)
-
-        #define LVALUE_BINARY_OPS(TEMPLATES,T,R,RET,OPNAME,BINOP,CREATE_BUFFER,RET_OP)                                         \
-                BINARY_OP_VIEW_VIEW(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),CREATE_BUFFER,RET_OP)   \
-                BINARY_OP_VIEW_REF(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),CREATE_BUFFER,RET_OP)    \
-                BINARY_OP_REF_VIEW(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),CREATE_BUFFER,RET_OP)    \
-                BINARY_OP_REF_REF(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),CREATE_BUFFER,RET_OP)
-
-        #define RVALUE_BINARY_OPS(TEMPLATES,T,R,RET,OPNAME,BINOP,CREATE_BUFFER,RET_OP)                                         \
-                BINARY_OP_REF_RVALUE(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),CREATE_BUFFER,RET_OP)  \
-                BINARY_OP_RVALUE_REF(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),CREATE_BUFFER,RET_OP)  \
-                BINARY_OP_VIEW_RVALUE(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),CREATE_BUFFER,RET_OP) \
-                BINARY_OP_RVALUE_VIEW(SINGLE_ARG(TEMPLATES),SINGLE_ARG(T),SINGLE_ARG(R),SINGLE_ARG(RET),OPNAME,SINGLE_ARG(BINOP),CREATE_BUFFER,RET_OP)
-      
-       
-        /* Code generator */
-        #define BUFFER_NAME macro_generated_local_buffer
-        #define CREATE_MA_BUFFER(FROM_MA,R) multi_array<R,Dim>  BUFFER_NAME(FROM_MA.shape())
-        #define CREATE_R_BUFFER(FROM_MA,R)  R BUFFER_NAME = R(0)
-        #define NO_MA_BUFFER(FROM_MA,R)     multi_array<R,Dim>& BUFFER_NAME = FROM_MA
-
-        #define SIMPLE_BINOP(OP)     DATA_ACCESS(BUFFER_NAME) = ((lhsVal) OP (rhsVal))
-        #define SIMPLE_FBINOP(FOP)   DATA_ACCESS(BUFFER_NAME) = FOP((lhsVal),(rhsVal))
-        #define SUP_OR_EQUAL()       DATA_ACCESS(BUFFER_NAME) = (((lhsVal) > (rhsVal)) || hysop::utils::areEqual<T>((rhsVal),(lhsVal)))
-        #define INF_OR_EQUAL()       DATA_ACCESS(BUFFER_NAME) = (((lhsVal) < (rhsVal)) || hysop::utils::areEqual<T>((rhsVal),(lhsVal)))
-        #define IDENTITY(X) (X)
-        
-        #define T1         SINGLE_ARG(template<std::size_t Dim>)
-        #define T1bis      SINGLE_ARG(template<std::size_t Dim, typename Allocator>)
-        #define T2         SINGLE_ARG(template<typename T, std::size_t Dim>)
-        #define T3         SINGLE_ARG(template<typename T, std::size_t Dim, typename Allocator>)
-        #define VIEW(T)    SINGLE_ARG(const const_multi_array_view<T,Dim>&)
-        #define REF(T)     SINGLE_ARG(const const_multi_array_ref<T,Dim>&)
-        #define RVALUE(T)  SINGLE_ARG(multi_array<T,Dim,Allocator>&&)
-        #define LVALUE(T)  SINGLE_ARG(multi_array<T,Dim,Allocator>)
-        #define DLVALUE(T) SINGLE_ARG(multi_array<T,Dim>)
-
-
-        /* distances */
-        LVALUE_BINARY_OPS(T2,T,T,T,distance_L1,\
-                const T val = std::abs<T>(rhsVal-lhsVal); BUFFER_NAME += val,\
-                CREATE_R_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,T,T,distance_L2,\
-                const T val = std::abs<T>(rhsVal-lhsVal); BUFFER_NAME += val*val,\
-                CREATE_R_BUFFER,std::sqrt)
-        LVALUE_BINARY_OPS(T2,T,T,T,distance_Linf,\
-                SINGLE_ARG(const T val = std::abs<T>(rhsVal-lhsVal); BUFFER_NAME = std::max<T>(BUFFER_NAME,val)),\
-                CREATE_R_BUFFER,IDENTITY)
-        
-        
-        /* elementwise arithmetic operations */
-        LVALUE_BINARY_OPS(T2,T,T,DLVALUE(T),operator+,SIMPLE_BINOP(+),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,T,DLVALUE(T),operator-,SIMPLE_BINOP(-),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,T,DLVALUE(T),operator*,SIMPLE_BINOP(*),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,T,DLVALUE(T),operator/,SIMPLE_BINOP(/),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,T,DLVALUE(T),operator%,SIMPLE_BINOP(%),CREATE_MA_BUFFER,IDENTITY)
-
-        RVALUE_BINARY_OPS(T3,T,T,LVALUE(T),operator+,SIMPLE_BINOP(+),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T3,T,T,LVALUE(T),operator-,SIMPLE_BINOP(-),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T3,T,T,LVALUE(T),operator*,SIMPLE_BINOP(*),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T3,T,T,LVALUE(T),operator/,SIMPLE_BINOP(/),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T3,T,T,LVALUE(T),operator%,SIMPLE_BINOP(%),NO_MA_BUFFER,IDENTITY)
-        
-        /* elementwise boolean like operations */
-        LVALUE_BINARY_OPS(T2,T,bool,DLVALUE(bool),operator&,SIMPLE_BINOP(&),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,bool,DLVALUE(bool),operator|,SIMPLE_BINOP(|),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,bool,DLVALUE(bool),operator^,SIMPLE_BINOP(^),CREATE_MA_BUFFER,IDENTITY)
-        
-        RVALUE_BINARY_OPS(T1bis,bool,bool,LVALUE(bool),operator&,SIMPLE_BINOP(&),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T1bis,bool,bool,LVALUE(bool),operator|,SIMPLE_BINOP(|),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T1bis,bool,bool,LVALUE(bool),operator^,SIMPLE_BINOP(^),NO_MA_BUFFER,IDENTITY)
-        
-        /* element wise ordering test - near equality test for floating point types, see utils/utils.h::areEqual<T> */
-        LVALUE_BINARY_OPS(T2,T,bool,DLVALUE(bool),operator==,SIMPLE_FBINOP(hysop::utils::areEqual<T>),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,bool,DLVALUE(bool),operator!=,SIMPLE_FBINOP(hysop::utils::areNotEqual<T>),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,bool,DLVALUE(bool),operator<,SIMPLE_BINOP(<),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,bool,DLVALUE(bool),operator>,SIMPLE_BINOP(>),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,bool,DLVALUE(bool),operator>=,SUP_OR_EQUAL(),CREATE_MA_BUFFER,IDENTITY)
-        LVALUE_BINARY_OPS(T2,T,bool,DLVALUE(bool),operator<=,INF_OR_EQUAL(),CREATE_MA_BUFFER,IDENTITY)
-        
-        /* Comparisson for booleans... */
-        RVALUE_BINARY_OPS(T1bis,bool,bool,LVALUE(bool),operator==,SIMPLE_BINOP(==),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T1bis,bool,bool,LVALUE(bool),operator!=,SIMPLE_BINOP(!=),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T1bis,bool,bool,LVALUE(bool),operator<,SIMPLE_BINOP(<),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T1bis,bool,bool,LVALUE(bool),operator>,SIMPLE_BINOP(>),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T1bis,bool,bool,LVALUE(bool),operator>=,SIMPLE_BINOP(>=),NO_MA_BUFFER,IDENTITY)
-        RVALUE_BINARY_OPS(T1bis,bool,bool,LVALUE(bool),operator<=,SIMPLE_BINOP(<=),NO_MA_BUFFER,IDENTITY)
-
-        /* clean macros */
-        #undef LVALUE_BINARY_OPS
-        #undef RVALUE_BINARY_OPS
-
-        #undef BINARY_OP_RVALUE_VIEW
-        #undef BINARY_OP_RVALUE_REF
-        #undef BINARY_OP_VIEW_RVALUE
-        #undef BINARY_OP_REF_RVALUE
-
-        #undef BINARY_OP_REF_REF
-        #undef BINARY_OP_REF_VIEW
-        #undef BINARY_OP_VIEW_REF
-        #undef BINARY_OP_VIEW_VIEW
-
-        #undef IDENTITY
-        #undef SIMPLE_BINOP
-        #undef SIMPLE_FBINOP
-        #undef BINARY_OP
-
-        #undef DLVALUE
-        #undef LVALUE
-        #undef RVALUE
-        #undef REF
-        #undef VIEW
-        #undef T3
-        #undef T2
-        #undef T1bis
-        #undef T1
-
-        #undef NO_MA_BUFFER
-        #undef CREATE_R_BUFFER
-        #undef CREATE_MA_BUFFER
-        #undef CHECK_SHAPES
-        #undef BUFFER_NAME
-
-
-    } /* end of namespace data */
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_MULTI_ARRAY_EXT_H */
-
-#endif /* end of MULTI_ARRAY include guard */
diff --git a/src/hysop++/src/data/multi_array/multi_array_impl.h b/src/hysop++/src/data/multi_array/multi_array_impl.h
deleted file mode 100644
index efb1b1c3bce475a56527d8734cb59ea2b4c47440..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/multi_array/multi_array_impl.h
+++ /dev/null
@@ -1,192 +0,0 @@
-
-#ifndef HYSOP_MULTI_ARRAY_H
-#include "data/multi_array/multi_array.h"
-#else 
-
-#ifndef HYSOP_MULTI_ARRAY_IMPL_H
-#define HYSOP_MULTI_ARRAY_IMPL_H
-
-namespace hysop {
-    namespace data {
-
-        /* class hysop::data::multi_array */
-        template <typename T, std::size_t Dim, typename Allocator>
-            class multi_array : public boost_multi_array<T,Dim,Allocator> {
-                static_assert(Dim>0, "Dim cannot be zero !");
-
-                private:
-                    using super = boost_multi_array<T,Dim,Allocator>;
-                public:
-                    PUBLIC_CLASS_TYPES()
-
-                public:
-                    multi_array(const extents_gen<Dim>& extents = extents_gen<Dim>());
-                    multi_array(const typename Shape<Dim>::type& shape);
-
-                    multi_array(const multi_array& other);
-                    multi_array(multi_array&& other);
-
-                    explicit multi_array(const array_ref& other);
-                    explicit multi_array(const array_view& other);
-                    explicit multi_array(const const_array_ref& other);
-                    explicit multi_array(const const_array_view& other);
-                   
-                    explicit multi_array(const boost_multi_array<T,Dim,Allocator>& other);
-                    explicit multi_array(const boost_multi_array_ref<T,Dim>& other);
-                    explicit multi_array(const boost_multi_array_view<T,Dim>& other);
-                    explicit multi_array(const boost_const_multi_array_ref<T,Dim>& other);
-                    explicit multi_array(const boost_const_multi_array_view<T,Dim>& other);
-                    explicit multi_array(boost_multi_array<T,Dim,Allocator>&& other);
-
-                    multi_array& operator=(const multi_array& other);
-                    multi_array& operator=(const array_ref& ref);
-                    multi_array& operator=(const array_view& view);
-                    multi_array& operator=(const const_array_ref& ref);
-                    multi_array& operator=(const const_array_view& view);
-                    multi_array& operator=(multi_array&& other);
-
-                    operator array_ref();
-                    operator const_array_ref() const;
-
-                public:
-                    PUBLIC_CONST_REF_INTERFACE(SINGLE_ARG(multi_array<T,Dim,Allocator>))
-                    PUBLIC_NON_CONST_REF_INTERFACE(SINGLE_ARG(multi_array<T,Dim,Allocator>))
-        
-                    multi_array& reshape(const typename Shape<Dim>::type& shape);
-
-                protected:
-                    static extents_gen<Dim> shapeToExtents(const typename Shape<Dim>::type &shape);
-            };
-
-
-        /* Implementation */
-        
-        /* constructors */
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const extents_gen<Dim>& extents):
-            boost_multi_array<T,Dim,Allocator>(extents) {}
-                    
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const typename Shape<Dim>::type& shape):
-            boost_multi_array<T,Dim,Allocator>(shapeToExtents(shape)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const multi_array& other):
-            boost_multi_array<T,Dim,Allocator>(static_cast<const boost_multi_array<T,Dim,Allocator>&>(other)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const array_view& other):
-            boost_multi_array<T,Dim,Allocator>(static_cast<const boost_multi_array_view<T,Dim>&>(other)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const const_array_view& other):
-            boost_multi_array<T,Dim,Allocator>(static_cast<const boost_const_multi_array_view<T,Dim>&>(other)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const array_ref& other):
-            boost_multi_array<T,Dim,Allocator>(static_cast<const boost_multi_array_ref<T,Dim>&>(other)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const const_array_ref& other):
-            boost_multi_array<T,Dim,Allocator>(static_cast<const boost_const_multi_array_ref<T,Dim>&>(other)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(multi_array&& other):
-            boost_multi_array<T,Dim,Allocator>(static_cast<boost_multi_array<T,Dim,Allocator>&&>(other)) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const boost_multi_array_view<T,Dim>& other):
-            boost_multi_array<T,Dim,Allocator>(other) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const boost_const_multi_array_view<T,Dim>& other):
-            boost_multi_array<T,Dim,Allocator>(other) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const boost_multi_array_ref<T,Dim>& other):
-            boost_multi_array<T,Dim,Allocator>(other) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(const boost_const_multi_array_ref<T,Dim>& other):
-            boost_multi_array<T,Dim,Allocator>(other) {}
-        
-        template <typename T, std::size_t Dim, typename Allocator> 
-        multi_array<T,Dim,Allocator>::multi_array(boost_multi_array<T,Dim,Allocator>&& other):
-            boost_multi_array<T,Dim,Allocator>(other) {}
-
-        
-        /* operator = */
-        /* cast obligatory to avoid shape() function aliasing */
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::operator=(const multi_array<T,Dim,Allocator>& other) {
-                this->reshape(other.shape());
-                super::operator=(dynamic_cast<const boost_multi_array<T,Dim,Allocator>&>(other));
-                return *this;
-            }
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::operator=(const array_view& other) {
-                this->reshape(other.shape());
-                super::operator=(dynamic_cast<const boost_multi_array_view<T,Dim>&>(other));
-                return *this;
-            }
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::operator=(const const_array_view& other) {
-                this->reshape(other.shape());
-                super::operator=(dynamic_cast<const boost_const_multi_array_view<T,Dim>&>(other));
-                return *this;
-            }
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::operator=(const array_ref& other) {
-                this->reshape(other.shape());
-                super::operator=(dynamic_cast<const boost_multi_array_ref<T,Dim>&>(other));
-                return *this;
-            }
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::operator=(const const_array_ref& other) {
-                this->reshape(other.shape());
-                super::operator=(dynamic_cast<const boost_const_multi_array_ref<T,Dim>&>(other));
-                return *this;
-            }
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::operator=(multi_array&& other) {
-                super::operator=(other);
-                return *this;
-            }
-
-        /* casting operators */
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>::operator multi_array_ref<T,Dim>() {
-                return static_cast<boost_multi_array_ref<T,Dim>>(*this);
-            }
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>::operator const_multi_array_ref<T,Dim>() const {
-                return static_cast<boost_const_multi_array_ref<T,Dim>>(*this);
-            }
-
-        /* static members */
-        template <typename T, std::size_t Dim, typename Allocator> 
-        typename multi_array<T,Dim,Allocator>::template extents_gen<Dim> multi_array<T,Dim,Allocator>::shapeToExtents(const typename Shape<Dim>::type &shape) {
-            return utils::buildExtents(shape);
-        }
-
-        /* multiarray const & non const reference implementation */
-        CONST_REF_IMPLEMENTATION(SINGLE_ARG(multi_array<T,Dim,Allocator>), SINGLE_ARG(template <typename T, std::size_t Dim, typename Allocator>)) 
-        NON_CONST_REF_IMPLEMENTATION(SINGLE_ARG(multi_array<T,Dim,Allocator>), SINGLE_ARG(template <typename T, std::size_t Dim, typename Allocator>))
-        
-        /* multi array specific */
-        template <typename T, std::size_t Dim, typename Allocator> 
-            multi_array<T,Dim,Allocator>& multi_array<T,Dim,Allocator>::reshape(const typename Shape<Dim>::type& shape) { 
-                boost::array<int,Dim> extents;
-                for (std::size_t d = 0; d < Dim; d++)
-                    extents[d] = static_cast<int>(shape[d]);
-                this->resize(extents);
-                return *this;
-            }
-
-
-    } /* end of namespace data */ 
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_MULTI_ARRAY_IMPL_H */
-
-#endif /* end of MULTI_ARRAY include guard */
diff --git a/src/hysop++/src/data/multi_array/multi_array_ref.h b/src/hysop++/src/data/multi_array/multi_array_ref.h
deleted file mode 100644
index 80418513624fb1c76b1ea725b4aed41fa06a390f..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/multi_array/multi_array_ref.h
+++ /dev/null
@@ -1,70 +0,0 @@
-
-#ifndef HYSOP_MULTI_ARRAY_H
-#include "data/multi_array/multi_array.h"
-#else 
-
-#ifndef HYSOP_MULTI_ARRAY_REF_H
-#define HYSOP_MULTI_ARRAY_REF_H
-
-namespace hysop {
-    namespace data {
-
-        /* class hysop::data::multi_array */
-        template <typename T, std::size_t Dim>
-            class multi_array_ref : public boost_multi_array_ref<T,Dim> {
-                static_assert(Dim>0, "Dim cannot be zero !");
-
-                private:
-                    using super = boost_multi_array_ref<T,Dim>;
-                public:
-                    PUBLIC_CLASS_TYPES()
-
-                public:
-                    multi_array_ref(T* base=nullptr, const extents_gen<Dim>& ranges = hysop::utils::buildExtents(std::array<std::size_t,Dim>{0}));
-                    multi_array_ref(const multi_array_ref<T,Dim>& ref) = default;
-                    multi_array_ref& operator=(const multi_array_ref<T,Dim>& other) = default;
-                    
-                    multi_array_ref(const boost_multi_array_ref<T,Dim>& ref);
-                    multi_array_ref& operator=(const boost_multi_array_ref<T,Dim>& other);
-  
-
-                    operator const_array_ref() const;
-
-                public:
-                    PUBLIC_CONST_REF_INTERFACE(SINGLE_ARG(multi_array_ref<T,Dim>))
-                    PUBLIC_NON_CONST_REF_INTERFACE(SINGLE_ARG(multi_array_ref<T,Dim>))
-                };
-
-
-        /* Implementation */
-        template <typename T, std::size_t Dim>
-        multi_array_ref<T,Dim>::multi_array_ref(T* base, const extents_gen<Dim>& ranges):
-            super(base,ranges) {
-        }
-                    
-        template <typename T, std::size_t Dim>
-        multi_array_ref<T,Dim>::multi_array_ref(const boost_multi_array_ref<T,Dim>& ref) :
-            super(ref) {
-        }
-       
-        template <typename T, std::size_t Dim>
-        multi_array_ref<T,Dim>& multi_array_ref<T,Dim>::operator=(const boost_multi_array_ref<T,Dim>& other) {
-            super::operator=(other);
-            return *this;
-        }
-                    
-        template <typename T, std::size_t Dim>
-        multi_array_ref<T,Dim>::operator const_array_ref() const {
-            return static_cast<boost_const_multi_array_ref<T,Dim>>(*this);
-        }
-
-        CONST_REF_IMPLEMENTATION(SINGLE_ARG(multi_array_ref<T,Dim>), SINGLE_ARG(template <typename T, std::size_t Dim>))
-        NON_CONST_REF_IMPLEMENTATION(SINGLE_ARG(multi_array_ref<T,Dim>), SINGLE_ARG(template <typename T, std::size_t Dim>))
-        
-    } /* end of namespace data */ 
-} /* end of namespace hysop */
-
-
-#endif /* end of include guard: HYSOP_MULTI_ARRAY_REF_H */
-
-#endif /* end of MULTI_ARRAY include guard */
diff --git a/src/hysop++/src/data/multi_array/multi_array_view.h b/src/hysop++/src/data/multi_array/multi_array_view.h
deleted file mode 100644
index d2931c9b0d95af8ad7208e1e3f6801efdb42d532..0000000000000000000000000000000000000000
--- a/src/hysop++/src/data/multi_array/multi_array_view.h
+++ /dev/null
@@ -1,72 +0,0 @@
-
-#ifndef HYSOP_MULTI_ARRAY_H
-#include "data/multi_array/multi_array.h"
-#else 
-
-#ifndef HYSOP_MULTI_ARRAY_VIEW_H
-#define HYSOP_MULTI_ARRAY_VIEW_H
-
-namespace hysop {
-    namespace data {
-
-        /* class hysop::data::multi_array */
-        template <typename T, std::size_t Dim>
-            class multi_array_view : public boost_multi_array_view<T,Dim> {
-                static_assert(Dim>0, "Dim cannot be zero !");
-
-                private:
-                    using super = boost_multi_array_view<T,Dim>;
-                public:
-                    PUBLIC_CLASS_TYPES()
-
-                public:
-                    multi_array_view(const multi_array_view<T,Dim>& view) = default;
-                    multi_array_view& operator=(const multi_array_view<T,Dim>& other) = default;
-                    
-                    multi_array_view(const boost_multi_array_view<T,Dim>& view);
-                    multi_array_view& operator=(const boost_multi_array_view<T,Dim>& other);
-
-                    operator const_array_view() const;
-
-                public:
-                    PUBLIC_CONST_VIEW_INTERFACE(SINGLE_ARG(multi_array_view<T,Dim>))
-                    PUBLIC_NON_CONST_VIEW_INTERFACE(SINGLE_ARG(multi_array_view<T,Dim>))
-                };
-
-
-        /* Implementation */
-
-
-// remove anoying boost warning 
-#if defined(__GNUG__) and !defined(__clang__)
-#pragma GCC diagnostic push 
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
-#endif
-        template <typename T, std::size_t Dim>
-        multi_array_view<T,Dim>::multi_array_view(const boost_multi_array_view<T,Dim>& view) :
-            super(view) {
-        }
-#if defined(__GNUG__) and !defined(__clang__)
-#pragma GCC diagnostic pop
-#endif
-       
-        template <typename T, std::size_t Dim>
-        multi_array_view<T,Dim>& multi_array_view<T,Dim>::operator=(const boost_multi_array_view<T,Dim>& other) {
-            super::operator=(other);
-            return *this;
-        }
-                    
-        template <typename T, std::size_t Dim>
-        multi_array_view<T,Dim>::operator const_array_view() const {
-            return static_cast<boost_const_multi_array_view<T,Dim>>(*this);
-        }
-
-        CONST_VIEW_IMPLEMENTATION(SINGLE_ARG(multi_array_view<T,Dim>), SINGLE_ARG(template <typename T, std::size_t Dim>))
-        NON_CONST_VIEW_IMPLEMENTATION(SINGLE_ARG(multi_array_view<T,Dim>), SINGLE_ARG(template <typename T, std::size_t Dim>))
-        
-    } /* end of namespace data */ 
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_MULTI_ARRAY_VIEW_H */
-
-#endif /* end of MULTI_ARRAY include guard */
diff --git a/src/hysop++/src/detail/index_seq.h b/src/hysop++/src/detail/index_seq.h
deleted file mode 100644
index e324244ab5e3567b2e0733535a381968ffe54711..0000000000000000000000000000000000000000
--- a/src/hysop++/src/detail/index_seq.h
+++ /dev/null
@@ -1,42 +0,0 @@
-
-#ifndef HYSOP_INDEX_SEQ_H
-#define HYSOP_INDEX_SEQ_H
-
-namespace hysop {
-    namespace detail {
-
-        template <int...>
-            struct index_seq {};
-        
-        template <int k, std::size_t d, int... I>
-            struct constant_seq_impl {
-                typedef typename constant_seq_impl<k,d-1,k,I...>::type type;
-            };
-        
-        template <int k, int... I>
-            struct constant_seq_impl<k,0,I...> {
-                typedef index_seq<I...> type;
-            };
-
-        template <std::size_t count, int step, int current, int... I>
-            struct index_seq_impl {
-                typedef typename index_seq_impl<count-1,step,current+step,I...,current>::type type;
-            };
-        template <int step, int current, int... I>
-            struct index_seq_impl<0,step,current,I...> {
-                typedef index_seq<I...> type;
-            };
-        
-        
-        template <std::size_t count, int i0=0, int step=1>
-            using index_seq_gen = typename index_seq_impl<count,step,i0>::type;
-        
-        template <int constant, std::size_t count>
-            using constant_seq_gen = typename constant_seq_impl<constant,count>::type;
-    }
-}
-
-
-
-#endif /* end of include guard: HYSOP_INDEX_SEQ_H */
-
diff --git a/src/hysop++/src/domain/boundary.cpp b/src/hysop++/src/domain/boundary.cpp
deleted file mode 100644
index 4a3f862af508f93048a8f94bbc7c4b5c77a98832..0000000000000000000000000000000000000000
--- a/src/hysop++/src/domain/boundary.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-
-#include "domain/boundary.h"
-
-namespace hysop {
-    namespace domain {
-
-        static const char* boundary_strings[6] = {
-            "NONE",
-            "HOMOGENEOUS_DIRICHLET",
-            "HOMOGENEOUS_NEUMANN",
-            "DIRICHLET",
-            "NEUMANN",
-            "PERIODIC"
-        };
-
-        const char* toStringBoundary(Boundary bd) {
-            return boundary_strings[static_cast<int>(bd)+1];
-        }
-
-        std::ostream& operator<<(std::ostream& os, const Boundary& bd) {
-            os << toStringBoundary(bd);
-            return os;
-        }
-
-    }
-}
diff --git a/src/hysop++/src/domain/boundary.h b/src/hysop++/src/domain/boundary.h
deleted file mode 100644
index 9320085e00b5ccb3145f3c8cff58fab8c2b58db1..0000000000000000000000000000000000000000
--- a/src/hysop++/src/domain/boundary.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef HYSOP_BOUNDARY_H
-#define HYSOP_BOUNDARY_H
-
-#include <iostream>
-
-namespace hysop {
-    namespace domain {
-
-        enum class Boundary : int {
-            NONE                  = -1,
-            HOMOGENEOUS_DIRICHLET = 0,
-            HOMOGENEOUS_NEUMANN   = 1,
-            DIRICHLET             = 2,
-            NEUMANN               = 3,
-            PERIODIC              = 4
-        };
-
-        const char* toStringBoundary(Boundary bd);
-        std::ostream& operator<<(std::ostream& os, const Boundary& bd);
-    }
-}
-
-#endif /* end of include guard: HYSOP_BOUNDARY_H */
diff --git a/src/hysop++/src/domain/domain.h b/src/hysop++/src/domain/domain.h
deleted file mode 100644
index 70d860eceaae35d5f5ac2dccadf1a0271458ce6b..0000000000000000000000000000000000000000
--- a/src/hysop++/src/domain/domain.h
+++ /dev/null
@@ -1,154 +0,0 @@
-
-#ifndef HYSOP_DOMAIN_H
-#define HYSOP_DOMAIN_H
-
-#include "data/multi_array/multi_array.h"
-#include "utils/types.h"
-#include "utils/utils.h"
-#include "fft/fftDomainConfiguration.h"
-
-namespace hysop {
-    namespace domain {
-        
-        template <typename T, std::size_t Dim>
-            class Domain;
-        
-        template <typename T, std::size_t Dim>
-            std::ostream& operator<< (std::ostream& os, const Domain<T,Dim>& domain);
-
-        template <typename T, std::size_t Dim>
-            class Domain {
-                public:
-                    using DomainSize    = std::array<T, Dim>;
-                    using SpaceStep     = std::array<T, Dim>;
-                    using SpaceVariable = std::array<T, Dim>;
-
-                public:
-                    Domain() :
-                        m_shape{0}, m_dataShape{0}, m_leftDataOffset{0},
-                        m_domainConfig(), m_domainSize{0}, m_spaceStep{0}, m_data() {}
-
-                    Domain(const typename Shape<Dim>::type& p_shape, const DomainConfiguration<Dim>& p_domainConfig, const DomainSize& p_domainSize) :
-                        m_shape{0}, m_dataShape{0}, m_leftDataOffset{0},
-                        m_domainConfig(p_domainConfig), m_domainSize(p_domainSize), 
-                        m_spaceStep{0}, m_data() {
-                            this->reshape(p_shape);
-                        }
-    
-                    Domain(Domain<T,Dim>&& other)                        = default;
-                    Domain<T,Dim>& operator=(const Domain<T,Dim>& other) = default; 
-                    Domain<T,Dim>& operator=(Domain<T,Dim>&& other)      = default;
-
-                    Domain<T,Dim>& reshape(const typename Shape<Dim>::type& p_domainShape) {
-                        m_shape = p_domainShape;
-                        m_domainConfig.getShapeConfiguration(m_shape, m_dataShape, m_leftDataOffset);
-                        m_data.reshape(m_dataShape);
-                        this->computeSpaceStep();
-                        return *this;
-                    }
-
-                    Domain<T,Dim>& resize(const DomainSize& p_size) {
-                        m_domainSize = p_size;
-                        this->computeSpaceStep();
-                        return *this;
-                    }
-
-                    Domain<T,Dim>& resetDomainConfiguration(const DomainConfiguration<Dim>& p_domainConfig) {
-                        m_domainConfig = p_domainConfig;
-                        this->reshape(m_shape);
-                        return *this;
-                    }
-
-                    Domain<T,Dim>& print(const std::string &p_name) {
-                        m_data.print(p_name);
-                        return *this;
-                    }
-                    
-                    fft::FftDomainConfiguration<Dim> fftDomainConfiguration() const { 
-                        return fft::FftDomainConfiguration<Dim>(m_domainConfig); 
-                    }
-
-                    const typename Shape<Dim>::type&              shape()          const { return m_shape; }
-                    const typename Shape<Dim>::type&              dataShape()      const { return m_dataShape; }
-                    const SpaceStep&               spaceStep()      const { return m_spaceStep; }
-                    const DomainSize&              domainSize()     const { return m_domainSize; }
-                    const typename Offset<Dim>::type&             leftDataOffset() const { return m_leftDataOffset; }
-                    const DomainConfiguration<Dim> boundaryConfig() const { return m_domainConfig; }
-
-                    const hysop::multi_array<T,Dim>&    data() const { return m_data; }
-                          hysop::multi_array_ref<T,Dim> data()       { return m_data; } 
-
-                    
-                    /* Apply f(X, fargs...) on the whole domain where X = [x_0, x_1, ..., x_{Dim-1}] is the space variable */
-                    /* The result of the Functor f should be convertible to domain real data type T                        */
-                    template <typename Functor, typename... Args>
-                    Domain<T,Dim>& apply(const Functor& f,  Args&&... fargs);
-    
-                    T distance_L1(const Domain<T,Dim> &other) {
-                        assert(this->dataShape() == other.dataShape());
-                        return hysop::data::distance_L1<T,Dim>(this->data(), other.data());
-                    }
-                    T distance_L2(const Domain<T,Dim> &other) {
-                        assert(this->dataShape() == other.dataShape());
-                        return hysop::data::distance_L2<T,Dim>(this->data(), other.data());
-                    }
-                    T distance_Linf(const Domain<T,Dim> &other) {
-                        assert(this->dataShape() == other.dataShape());
-                        return hysop::data::distance_Linf<T,Dim>(this->data(), other.data());
-                    }
-                    std::tuple<T,T,T> distance(const Domain<T,Dim>& other) {
-                        return std::tuple<T,T,T>(distance_L1(other), distance_L2(other), distance_Linf(other));
-                    }
-
-                protected:
-                    void computeSpaceStep() {
-                        for (std::size_t d = 0; d < Dim; d++) {
-                            //std::size_t N = ((m_domainConfig[d].first==domain::Boundary::PERIODIC && !m_domainConfig.includePeriodicBoundaries()) ? m_shape[d]-1 : m_shape[d]-1);
-                            m_spaceStep[d] = m_domainSize[d] / (m_shape[d]-1);
-                        }
-                    }
-
-                protected:
-                    typename Shape<Dim>::type  m_shape, m_dataShape;
-                    typename Offset<Dim>::type m_leftDataOffset;
-
-                    DomainConfiguration<Dim> m_domainConfig;
-                    DomainSize m_domainSize;
-                    SpaceStep  m_spaceStep;
-
-                    hysop::multi_array<T, Dim> m_data;
-            };
-                    
-        /* Apply f(X, args...) on the whole domain where X = [x_0, x_1, ..., x_{Dim-1}] is the space variable */
-        template <typename T, std::size_t Dim>
-        template <typename Functor, typename... Args>
-            Domain<T,Dim>& Domain<T,Dim>::apply(const Functor& f,  Args&&... fargs) {
-                hysop::Index<Dim> idx(m_dataShape);
-                std::array<T,Dim> X{0};
-                T* data = m_data.origin();
-                for (std::size_t k = 0; k < m_data.num_elements(); k++) {
-                    data[k] = static_cast<T>(f(X, std::forward<Args>(fargs)...));
-                    ++idx;
-                    for (std::size_t d = 0; d < Dim; d++)
-                        X[d] = (idx[d]+m_leftDataOffset[d])*m_spaceStep[d];
-                }
-                return *this;
-            }
-        
-        template <typename T, std::size_t Dim>
-            std::ostream& operator<< (std::ostream& os, const Domain<T,Dim>& domain) {
-                os << "== Domain ==" << std::endl;
-                os << "\tShape     : " << domain.shape() << std::endl;
-                os << "\tSize      : " << domain.domainSize() << std::endl;
-                os << "\tSpaceStep : " << domain.spaceStep() << std::endl;
-                os << domain.boundaryConfig();
-                os << "\tLeftDataOffset: " << domain.leftDataOffset() << std::endl;
-                os << "\tDataShape     : " << domain.dataShape() << std::endl;
-                return os;
-            }
-
-    }
-}
-
-#endif /* end of include guard: HYSOP_DOMAIN_H */
-
diff --git a/src/hysop++/src/domain/domainConfiguration.h b/src/hysop++/src/domain/domainConfiguration.h
deleted file mode 100644
index 3f1c7ded73e54e653af9f971fa7ba256e4ba5eee..0000000000000000000000000000000000000000
--- a/src/hysop++/src/domain/domainConfiguration.h
+++ /dev/null
@@ -1,100 +0,0 @@
-
-#ifndef HYSOP_DOMAIN_CONFIGURATION_H
-#define HYSOP_DOMAIN_CONFIGURATION_H
-
-#include <array>
-#include <stdexcept>
-
-#include "utils/types.h"
-#include "domain/boundary.h"
-#include "detail/index_seq.h"
-
-namespace hysop {
-    namespace domain {
-        
-        template <std::size_t Dim>
-            class DomainConfiguration;
-
-        template <std::size_t Dim>
-            std::ostream& operator<<(std::ostream& os, const DomainConfiguration<Dim>& config);
-        
-        template <std::size_t Dim>
-            class DomainConfiguration {
-
-                public:
-                    typedef std::pair<Boundary,Boundary> BoundaryPair;
-                    typedef std::array<BoundaryPair,Dim> BoundaryArray;
-                public: 
-                    DomainConfiguration(const BoundaryArray& p_boundaries = defaultDomainBoundaries(), 
-                            //bool p_includeDirichletBoundaries = true,
-                            bool p_includePeriodicBoundaries  = true) :
-                        m_boundaries(p_boundaries),
-                        //m_includeDirichletBoundaries(p_includeDirichletBoundaries),
-                        m_includePeriodicBoundaries(p_includePeriodicBoundaries) {
-                            checkBoundaries();
-                        }
-                
-                    const BoundaryArray& boundaries() const { return m_boundaries;                 }
-                    //bool includeDirichletBoundaries() const { return m_includeDirichletBoundaries; }
-                    bool includePeriodicBoundaries()  const { return m_includePeriodicBoundaries;  }
-
-                    const BoundaryPair& operator[](std::size_t k) const { return m_boundaries[k]; }
-
-                    void getShapeConfiguration(const typename Shape<Dim>::type &p_fullShape, typename Shape<Dim>::type &p_realShape, typename Offset<Dim>::type &p_leftOffset) const {
-                        for (std::size_t d = 0; d < Dim; d++) {
-                            const BoundaryPair& pair = m_boundaries[d];
-                            //bool hasDirichletLeftOffset  = (pair.first  == DIRICHLET || pair.first  == HOMOGENEOUS_DIRICHLET);
-                            //bool hasDirichletRightOffset = (pair.second == DIRICHLET || pair.second == HOMOGENEOUS_DIRICHLET);
-                            //std::size_t dirichletLeftOffset  = hasDirichletLeftOffset  && !this->includeDirichletBoundaries();
-                            //std::size_t dirichletRightOffset = hasDirichletRightOffset && !this->includeDirichletBoundaries();
-                            bool hasPeriodicRightOffset  = (pair.second == Boundary::PERIODIC);
-                            std::size_t periodicRightOffset  = hasPeriodicRightOffset  && !this->includePeriodicBoundaries();
-                            std::size_t leftOffset  = 0;
-                            std::size_t rightOffset = periodicRightOffset;
-                            if(p_fullShape[d] <= (leftOffset + rightOffset))
-                                throw std::runtime_error("Domain shape is to small on axe " + std::to_string(d) + " for prescribed boundaries !");
-                            p_leftOffset[d] = std::ptrdiff_t(leftOffset);
-                            p_realShape[d]  = p_fullShape[d] - leftOffset - rightOffset;
-                        }
-                    }
-
-                protected:
-                    void checkBoundaries() const {
-                        for (std::size_t d = 0; d < Dim; d++) {
-                            const BoundaryPair& pair = m_boundaries[d];
-                            if((pair.first == Boundary::PERIODIC) ^ (pair.second == Boundary::PERIODIC))
-                                throw std::runtime_error("Bad boundaries configuration on axe " + std::to_string(d) + " !");
-                        }
-                    }
-
-                    #ifndef SWIG
-                        template <int... I>
-                        static const std::array<BoundaryPair, Dim> defaultDomainBoundariesImpl(hysop::detail::index_seq<I...>) {
-                            const BoundaryPair defaultVal[1] = { std::make_pair(Boundary::NONE,Boundary::NONE) };
-                            return { defaultVal[I]..., };
-                        }
-                        static const std::array<BoundaryPair, Dim> defaultDomainBoundaries() {
-                            return defaultDomainBoundariesImpl(hysop::detail::constant_seq_gen<0,Dim>());
-                        }
-                    #endif 
-
-                protected:
-                    BoundaryArray  m_boundaries;
-                    bool m_includePeriodicBoundaries;
-                    //bool m_includeDirichletBoundaries;
-            };
-        
-        template <std::size_t Dim>
-            std::ostream& operator<<(std::ostream& os, const DomainConfiguration<Dim>& config) {
-                os << "== DomainConfiguration<Dim=" << std::to_string(Dim) << "> ==" << std::endl;
-                for (std::size_t d = 0; d < Dim; d++)
-                    os << "\taxe[" << d << "]: " << config[d].first << "/" << config[d].second << std::endl;
-                //os << "\tDirichlet boundaries included ? " << std::boolalpha << config.includeDirichletBoundaries() << std::endl;
-                os << "\tPeriodic  boundaries included ? " << std::boolalpha << config.includePeriodicBoundaries()  << std::endl;
-                return os;
-            }
-
-    }
-}
-
-#endif /* end of include guard: HYSOP_DOMAIN_CONFIGURATION_H */
diff --git a/src/hysop++/src/fft/extension.cpp b/src/hysop++/src/fft/extension.cpp
deleted file mode 100644
index 6f2a67e5eba4540b4b1b2367a881f410ca88e6c4..0000000000000000000000000000000000000000
--- a/src/hysop++/src/fft/extension.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-
-#include "fft/extension.h"
-
-namespace hysop {
-    namespace fft {
-
-        static const char* extension_strings[4] {
-            "NONE",
-            "EVEN",
-            "ODD",
-            "PERIODIC"
-        };
-
-        const char* toStringExtension(Extension ext) {
-            return extension_strings[static_cast<int>(ext)+1];
-        }
-
-        std::ostream& operator<<(std::ostream& os, const Extension& ext) {
-            os << toStringExtension(ext);
-            return os;
-        }
-
-    }
-}
diff --git a/src/hysop++/src/fft/extension.h b/src/hysop++/src/fft/extension.h
deleted file mode 100644
index 5aaba36f2a03c55fbda30439a3d9c7ead4a23519..0000000000000000000000000000000000000000
--- a/src/hysop++/src/fft/extension.h
+++ /dev/null
@@ -1,23 +0,0 @@
-
-#ifndef FFT_EXTENSION_H
-#define FFT_EXTENSION_H
-
-#include <ostream>
-
-namespace hysop {
-    namespace fft {
-
-        enum class Extension : int {
-            NONE=-1,
-            EVEN=0,
-            ODD=1,
-            PERIODIC=2
-        };
-
-        const char* toStringExtension(Extension ext);
-        std::ostream& operator<<(std::ostream& os, const Extension& ext);
-    }
-}
-
-
-#endif /* end of include guard: FFT_EXTENSION_H */
diff --git a/src/hysop++/src/fft/fftDomainConfiguration.h b/src/hysop++/src/fft/fftDomainConfiguration.h
deleted file mode 100644
index 57f014d63c414e638f76af03eb15795d48ad2222..0000000000000000000000000000000000000000
--- a/src/hysop++/src/fft/fftDomainConfiguration.h
+++ /dev/null
@@ -1,114 +0,0 @@
-
-#ifndef HYSOP_FFTDOMAINCONFIGURATION_H
-#define HYSOP_FFTDOMAINCONFIGURATION_H
-
-#include "utils/defines.h"
-#include "domain/domainConfiguration.h"
-#include "fft/extension.h"
-
-namespace hysop {
-    namespace fft {
-
-        template <std::size_t Dim>
-            class FftDomainConfiguration;
-
-        template <std::size_t Dim>
-            std::ostream& operator<<(std::ostream& os, const FftDomainConfiguration<Dim>& config);
-
-        template <std::size_t Dim>
-        class FftDomainConfiguration {
-
-                public:
-                    typedef typename domain::DomainConfiguration<Dim>::BoundaryPair  BoundaryPair;
-                    typedef typename domain::DomainConfiguration<Dim>::BoundaryArray BoundaryArray;
-                    typedef std::pair<fft::Extension,fft::Extension>                 ExtensionPair;
-                    typedef std::array<ExtensionPair,Dim>                            ExtensionArray;
-                public: 
-                    FftDomainConfiguration(const ExtensionArray& p_extensions, bool p_includePeriodicBoundaries) :
-                        m_extensions(p_extensions), m_includePeriodicBoundaries(p_includePeriodicBoundaries) {
-                        }
-
-                    FftDomainConfiguration(const domain::DomainConfiguration<Dim>& p_domain):
-                        m_extensions(boundariesToExtensions(p_domain.boundaries())), 
-                        m_includePeriodicBoundaries(p_domain.includePeriodicBoundaries()) {
-                        }
-                    
-                    const ExtensionArray& extensions() const { return m_extensions; }
-                    bool   includePeriodicBoundaries() const { return m_includePeriodicBoundaries; }
-
-                    ExtensionPair operator[](std::size_t k) const {
-                        return m_extensions[k];
-                    }
-
-                    domain::DomainConfiguration<Dim> boundariesConfiguration() const {
-                        return domain::DomainConfiguration<Dim>(fftExtensionsToBoundaries(m_extensions), m_includePeriodicBoundaries);
-                    }
-
-                    static BoundaryArray fftExtensionsToBoundaries(const ExtensionArray& extArray) {
-                        BoundaryArray bdArray;
-                        for(std::size_t d=0; d<Dim; d++) {
-                            const ExtensionPair& extPair = extArray[d];
-                            bdArray[d] = std::make_pair(fftExtensionToBoundary(extPair.first), fftExtensionToBoundary(extPair.second));
-                        }
-                        return bdArray;
-                    }
-                    
-                    static ExtensionArray boundariesToExtensions(const BoundaryArray& bdArray) {
-                        ExtensionArray extArray;
-                        for(std::size_t d=0; d<Dim; d++) {
-                            const BoundaryPair& bdPair = bdArray[d];
-                            extArray[d] = std::make_pair(boundaryToExtension(bdPair.first), boundaryToExtension(bdPair.second));
-                        }
-                        return extArray;
-                    }
-
-                    static domain::Boundary fftExtensionToBoundary(fft::Extension ext) {
-                        switch(ext) {
-                            case(fft::Extension::PERIODIC):
-                                return domain::Boundary::PERIODIC;
-                            case(fft::Extension::EVEN):
-                                return domain::Boundary::HOMOGENEOUS_DIRICHLET;
-                            case(fft::Extension::ODD):
-                                return domain::Boundary::HOMOGENEOUS_NEUMANN;
-                            case(fft::Extension::NONE):
-                                return domain::Boundary::NONE;
-                            default:
-                                NOT_IMPLEMENTED_YET;
-                        }
-                    }
-                    
-                    static fft::Extension boundaryToExtension(domain::Boundary bd) {
-                        switch(bd) {
-                            case(domain::Boundary::PERIODIC):
-                                return fft::Extension::PERIODIC;
-                            case(domain::Boundary::HOMOGENEOUS_DIRICHLET):
-                                return fft::Extension::EVEN;
-                            case(domain::Boundary::HOMOGENEOUS_NEUMANN):
-                                return fft::Extension::ODD;
-                            case(domain::Boundary::NONE):
-                                return fft::Extension::NONE;
-                                //throw std::runtime_error("Cannot build a FftDomainConfiguration based on a boundary of type 'NONE' !");
-                            default:
-                                NOT_IMPLEMENTED_YET;
-                        }
-                    }
-                
-                protected:
-                    ExtensionArray  m_extensions;
-                    bool m_includePeriodicBoundaries;
-            };
-
-        template <std::size_t Dim>
-            std::ostream& operator<<(std::ostream& os, const FftDomainConfiguration<Dim>& config) {
-                os << "== FftDomainConfiguration<Dim=" << std::to_string(Dim) << "> ==" << std::endl;
-                for (std::size_t d = 0; d < Dim; d++)
-                    os << "\taxe[" << d << "]: " << config[d].first << "/" << config[d].second;
-                os << "\tPeriodic  boundaries included ?" << std::boolalpha << config.includePeriodicBoundaries()  << std::endl;
-                return os;
-            }
-
-    }
-}
-
-#endif /* end of include guard: HYSOP_FFTDOMAINCONFIGURATION_H */
-
diff --git a/src/hysop++/src/fft/fftw3.cpp b/src/hysop++/src/fft/fftw3.cpp
deleted file mode 100644
index 25a877c87af6be0aa0c7a0c7930e5ce63bda3dde..0000000000000000000000000000000000000000
--- a/src/hysop++/src/fft/fftw3.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-
-#include "fft/fftw3.h"
-
-namespace hysop {
-    namespace fft {
-        template struct Fftw3<float>;
-        template struct Fftw3<double>;
-        template struct Fftw3<long double>;
-        #ifdef HAS_QUADMATHS
-            template struct Fftw3<__float128>;
-        #endif
-    }
-}
-
-/* needed for swig to avoid undefined reference link error */
-#if !defined(FFTW_HAS_FFTW3F_THREADS) || !defined(FFTW_HAS_FFTW3F_OMP)
-    int  fftwf_init_threads()          { return 0; }
-    void fftwf_plan_with_nthreads(int) {}
-    void fftwf_cleanup_threads()       {}
-#endif
-
-#if !defined(FFTW_HAS_FFTW3D_THREADS) || !defined(FFTW_HAS_FFTW3D_OMP)
-    int  fftw_init_threads()          { return 0; }
-    void fftw_plan_with_nthreads(int) {}
-    void fftw_cleanup_threads()       {}
-#endif
-
-#if !defined(FFTW_HAS_FFTW3L_THREADS) || !defined(FFTW_HAS_FFTW3L_OMP)
-    int  fftwl_init_threads()          { return 0; }
-    void fftwl_plan_with_nthreads(int) {}
-    void fftwl_cleanup_threads()       {}
-#endif
-
-#ifdef HAS_QUADMATHS
-    #if !defined(FFTW_HAS_FFTW3Q_THREADS) || !defined(FFTW_HAS_FFTW3Q_OMP)
-        int  fftwq_init_threads()          { return 0; }
-        void fftwq_plan_with_nthreads(int) {}
-        void fftwq_cleanup_threads()       {}
-    #endif
-#endif
diff --git a/src/hysop++/src/fft/fftw3.h b/src/hysop++/src/fft/fftw3.h
deleted file mode 100644
index e71a137c559c0c0d8260bf6324b5e1b195934e01..0000000000000000000000000000000000000000
--- a/src/hysop++/src/fft/fftw3.h
+++ /dev/null
@@ -1,534 +0,0 @@
-
-#ifndef HYSOP_FFTW3_H
-#define HYSOP_FFTW3_H
-
-#include <complex>
-#include <stdexcept>
-#include <fftw3.h>
-
-#ifdef HAS_QUADMATHS
-#include "maths/quad_maths.h"
-#endif
-
-/*                                                         */
-/* fftw3 c++ wrapper based on original macros in <fftw3.h> */
-/*                                                         */
-
-/* macros normally already defined in <fftw3.h> */
-#ifndef FFTW_CONCAT
-#define FFTW_CONCAT(prefix, name) prefix ## name
-#endif
-#ifndef FFTW_MANGLE_FLOAT
-#define FFTW_MANGLE_FLOAT(name)       FFTW_CONCAT(fftwf_, name)
-#endif
-#ifndef FFTW_MANGLE_LONG_DOUBLE
-#define FFTW_MANGLE_LONG_DOUBLE(name) FFTW_CONCAT(fftwl_, name)
-#endif
-#ifndef FFTW_MANGLE_QUAD
-#define FFTW_MANGLE_QUAD(name)        FFTW_CONCAT(fftwq_, name)
-#endif
-/***********************************************/
-
-#undef FFTW_MANGLE_DOUBLE
-#define FFTW_MANGLE_DOUBLE(name)      FFTW_CONCAT(::fftw_, name)
-
-/* prefix for function wrappers inside the class */
-#define FFTW_MANGLE_CLASS(name)      FFTW_CONCAT(fftw_, name)
-
-/* macro used to generate a full template specialisation of class Fftw3 for each type */
-#define FFTW_DEFINE_CXX_API(X, Y, REAL, has_thread_support)                                                                                   \
-template <>                                                                                                                                   \
-    struct Fftw3<REAL> {                                                                                                                      \
-        typedef REAL R;                                                                                                                       \
-        typedef Y(complex) C;                                                                                                                 \
-        typedef Y(plan) plan;                                                                                                                 \
-        typedef Y(iodim) iodim;                                                                                                               \
-        typedef Y(iodim64) iodim64;                                                                                                           \
-        typedef Y(r2r_kind) r2r_kind;                                                                                                         \
-        typedef Y(read_char_func) read_char_func;                                                                                             \
-        typedef Y(write_char_func) write_char_func;                                                                                           \
-        typedef std::complex<REAL> stdC;                                                                                                      \
-                                                                                                                                              \
-        void X(execute)(const plan p) const {                                                                                                 \
-            Y(execute)(p);                                                                                                                    \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_dft)(int rank, const int *n,                                                                                              \
-                C *in, C *out, int sign, unsigned int flags) const {                                                                          \
-            return Y(plan_dft)(rank, n, in, out, sign, flags);                                                                                \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_dft_1d)(int n, C *in, C *out, int sign,                                                                                   \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_dft_1d)(n, in, out, sign, flags);                                                                                   \
-        }                                                                                                                                     \
-        plan X(plan_dft_2d)(int n0, int n1,                                                                                                   \
-                C *in, C *out, int sign, unsigned int flags) const {                                                                          \
-            return Y(plan_dft_2d)(n0, n1, in, out, sign, flags);                                                                              \
-        }                                                                                                                                     \
-        plan X(plan_dft_3d)(int n0, int n1, int n2,                                                                                           \
-                C *in, C *out, int sign, unsigned int flags) const {                                                                          \
-            return Y(plan_dft_3d)(n0, n1, n2, in, out, sign, flags);                                                                          \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_many_dft)(int rank, const int *n,                                                                                         \
-                int howmany,                                                                                                                  \
-                C *in, const int *inembed,                                                                                                    \
-                int istride, int idist,                                                                                                       \
-                C *out, const int *onembed,                                                                                                   \
-                int ostride, int odist,                                                                                                       \
-                int sign, unsigned int flags) const {                                                                                         \
-            return Y(plan_many_dft)(rank, n, howmany, in, inembed, istride, idist, out, onembed, ostride, odist, sign, flags);                \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_guru_dft)(int rank, const iodim *dims,                                                                                    \
-                int howmany_rank,                                                                                                             \
-                const iodim *howmany_dims,                                                                                                    \
-                C *in, C *out,                                                                                                                \
-                int sign, unsigned int flags) const {                                                                                         \
-            return Y(plan_guru_dft)(rank, dims, howmany_rank, howmany_dims, in, out, sign, flags);                                            \
-        }                                                                                                                                     \
-        plan X(plan_guru_split_dft)(int rank, const iodim *dims,                                                                              \
-                int howmany_rank,                                                                                                             \
-                const iodim *howmany_dims,                                                                                                    \
-                R *ri, R *ii, R *ro, R *io,                                                                                                   \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_guru_split_dft)(rank, dims, howmany_rank, howmany_dims, ri, ii, ro, io, flags);                                     \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_guru64_dft)(int rank,                                                                                                     \
-                const iodim64 *dims,                                                                                                          \
-                int howmany_rank,                                                                                                             \
-                const iodim64 *howmany_dims,                                                                                                  \
-                C *in, C *out,                                                                                                                \
-                int sign, unsigned int flags) const {                                                                                         \
-            return Y(plan_guru64_dft)(rank, dims, howmany_rank, howmany_dims, in, out, sign, flags);                                          \
-        }                                                                                                                                     \
-        plan X(plan_guru64_split_dft)(int rank,                                                                                               \
-                const iodim64 *dims,                                                                                                          \
-                int howmany_rank,                                                                                                             \
-                const iodim64 *howmany_dims,                                                                                                  \
-                R *ri, R *ii, R *ro, R *io,                                                                                                   \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_guru64_split_dft)(rank, dims, howmany_rank, howmany_dims, ri, ii, ro, io, flags);                                   \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        void X(execute_dft)(const plan p, C *in, C *out) const {                                                                              \
-            Y(execute_dft)(p, in, out);                                                                                                       \
-        }                                                                                                                                     \
-        void X(execute_split_dft)(const plan p, R *ri, R *ii, R *ro, R *io) const {                                                           \
-            Y(execute_split_dft)(p, ri, ii, ro, io);                                                                                          \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_many_dft_r2c)(int rank, const int *n,                                                                                     \
-                int howmany,                                                                                                                  \
-                R *in, const int *inembed,                                                                                                    \
-                int istride, int idist,                                                                                                       \
-                C *out, const int *onembed,                                                                                                   \
-                int ostride, int odist,                                                                                                       \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_many_dft_r2c)(rank, n, howmany, in, inembed, istride, idist, out, onembed, ostride, odist, flags);                  \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_dft_r2c)(int rank, const int *n,                                                                                          \
-                R *in, C *out, unsigned int flags) const {                                                                                    \
-            return Y(plan_dft_r2c)(rank, n, in, out, flags);                                                                                  \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_dft_r2c_1d)(int n,R *in,C *out,unsigned int flags) const {                                                                \
-            return Y(plan_dft_r2c_1d)(n,in,out,flags);                                                                                        \
-        }                                                                                                                                     \
-        plan X(plan_dft_r2c_2d)(int n0, int n1,                                                                                               \
-                R *in, C *out, unsigned int flags) const {                                                                                    \
-            return Y(plan_dft_r2c_2d)(n0, n1, in, out, flags);                                                                                \
-        }                                                                                                                                     \
-        plan X(plan_dft_r2c_3d)(int n0, int n1,                                                                                               \
-                int n2,                                                                                                                       \
-                R *in, C *out, unsigned int flags) const {                                                                                    \
-            return Y(plan_dft_r2c_3d)(n0, n1, n2, in, out, flags);                                                                            \
-        }                                                                                                                                     \
-                                                                                                                                              \
-                                                                                                                                              \
-        plan X(plan_many_dft_c2r)(int rank, const int *n,                                                                                     \
-                int howmany,                                                                                                                  \
-                C *in, const int *inembed,                                                                                                    \
-                int istride, int idist,                                                                                                       \
-                R *out, const int *onembed,                                                                                                   \
-                int ostride, int odist,                                                                                                       \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_many_dft_c2r)(rank, n, howmany, in, inembed, istride, idist, out, onembed, ostride, odist, flags);                  \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_dft_c2r)(int rank, const int *n,                                                                                          \
-                C *in, R *out, unsigned int flags) const {                                                                                    \
-            return Y(plan_dft_c2r)(rank, n, in, out, flags);                                                                                  \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_dft_c2r_1d)(int n,C *in,R *out,unsigned int flags) const {                                                                \
-            return Y(plan_dft_c2r_1d)(n,in,out,flags);                                                                                        \
-        }                                                                                                                                     \
-        plan X(plan_dft_c2r_2d)(int n0, int n1,                                                                                               \
-                C *in, R *out, unsigned int flags) const {                                                                                    \
-            return Y(plan_dft_c2r_2d)(n0, n1, in, out, flags);                                                                                \
-        }                                                                                                                                     \
-        plan X(plan_dft_c2r_3d)(int n0, int n1,                                                                                               \
-                int n2,                                                                                                                       \
-                C *in, R *out, unsigned int flags) const {                                                                                    \
-            return Y(plan_dft_c2r_3d)(n0, n1, n2, in, out, flags);                                                                            \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_guru_dft_r2c)(int rank, const iodim *dims,                                                                                \
-                int howmany_rank,                                                                                                             \
-                const iodim *howmany_dims,                                                                                                    \
-                R *in, C *out,                                                                                                                \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_guru_dft_r2c)(rank, dims, howmany_rank, howmany_dims, in, out, flags);                                              \
-        }                                                                                                                                     \
-        plan X(plan_guru_dft_c2r)(int rank, const iodim *dims,                                                                                \
-                int howmany_rank,                                                                                                             \
-                const iodim *howmany_dims,                                                                                                    \
-                C *in, R *out,                                                                                                                \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_guru_dft_c2r)(rank, dims, howmany_rank, howmany_dims, in, out, flags);                                              \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_guru_split_dft_r2c)(                                                                                                      \
-                int rank, const iodim *dims,                                                                                                  \
-                int howmany_rank,                                                                                                             \
-                const iodim *howmany_dims,                                                                                                    \
-                R *in, R *ro, R *io,                                                                                                          \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_guru_split_dft_r2c)( rank, dims, howmany_rank, howmany_dims, in, ro, io, flags);                                    \
-        }                                                                                                                                     \
-        plan X(plan_guru_split_dft_c2r)(                                                                                                      \
-                int rank, const iodim *dims,                                                                                                  \
-                int howmany_rank,                                                                                                             \
-                const iodim *howmany_dims,                                                                                                    \
-                R *ri, R *ii, R *out,                                                                                                         \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_guru_split_dft_c2r)( rank, dims, howmany_rank, howmany_dims, ri, ii, out, flags);                                   \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_guru64_dft_r2c)(int rank,                                                                                                 \
-                const iodim64 *dims,                                                                                                          \
-                int howmany_rank,                                                                                                             \
-                const iodim64 *howmany_dims,                                                                                                  \
-                R *in, C *out,                                                                                                                \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_guru64_dft_r2c)(rank, dims, howmany_rank, howmany_dims, in, out, flags);                                            \
-        }                                                                                                                                     \
-        plan X(plan_guru64_dft_c2r)(int rank,                                                                                                 \
-                const iodim64 *dims,                                                                                                          \
-                int howmany_rank,                                                                                                             \
-                const iodim64 *howmany_dims,                                                                                                  \
-                C *in, R *out,                                                                                                                \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_guru64_dft_c2r)(rank, dims, howmany_rank, howmany_dims, in, out, flags);                                            \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_guru64_split_dft_r2c)(                                                                                                    \
-                int rank, const iodim64 *dims,                                                                                                \
-                int howmany_rank,                                                                                                             \
-                const iodim64 *howmany_dims,                                                                                                  \
-                R *in, R *ro, R *io,                                                                                                          \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_guru64_split_dft_r2c)( rank, dims, howmany_rank, howmany_dims, in, ro, io, flags);                                  \
-        }                                                                                                                                     \
-        plan X(plan_guru64_split_dft_c2r)(                                                                                                    \
-                int rank, const iodim64 *dims,                                                                                                \
-                int howmany_rank,                                                                                                             \
-                const iodim64 *howmany_dims,                                                                                                  \
-                R *ri, R *ii, R *out,                                                                                                         \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_guru64_split_dft_c2r)( rank, dims, howmany_rank, howmany_dims, ri, ii, out, flags);                                 \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        void X(execute_dft_r2c)(const plan p, R *in, C *out) const {                                                                          \
-            Y(execute_dft_r2c)(p, in, out);                                                                                                   \
-        }                                                                                                                                     \
-        void X(execute_dft_c2r)(const plan p, C *in, R *out) const {                                                                          \
-            Y(execute_dft_c2r)(p, in, out);                                                                                                   \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        void X(execute_split_dft_r2c)(const plan p,                                                                                           \
-                R *in, R *ro, R *io) const {                                                                                                  \
-            return Y(execute_split_dft_r2c)(p, in, ro, io);                                                                                   \
-        }                                                                                                                                     \
-        void X(execute_split_dft_c2r)(const plan p,                                                                                           \
-                R *ri, R *ii, R *out) const {                                                                                                 \
-            return Y(execute_split_dft_c2r)(p, ri, ii, out);                                                                                  \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_many_r2r)(int rank, const int *n,                                                                                         \
-                int howmany,                                                                                                                  \
-                R *in, const int *inembed,                                                                                                    \
-                int istride, int idist,                                                                                                       \
-                R *out, const int *onembed,                                                                                                   \
-                int ostride, int odist,                                                                                                       \
-                const r2r_kind *kind, unsigned int flags) const {                                                                             \
-            return Y(plan_many_r2r)(rank, n, howmany, in, inembed, istride, idist, out, onembed, ostride, odist, kind, flags);                \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_r2r)(int rank, const int *n, R *in, R *out,                                                                               \
-                const r2r_kind *kind, unsigned int flags) const {                                                                             \
-            return Y(plan_r2r)(rank, n, in, out, kind, flags);                                                                                \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_r2r_1d)(int n, R *in, R *out,                                                                                             \
-                r2r_kind kind, unsigned int flags) const {                                                                                    \
-            return Y(plan_r2r_1d)(n, in, out, kind, flags);                                                                                   \
-        }                                                                                                                                     \
-        plan X(plan_r2r_2d)(int n0, int n1, R *in, R *out,                                                                                    \
-                r2r_kind kind0, r2r_kind kind1,                                                                                               \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_r2r_2d)(n0, n1, in, out, kind0, kind1, flags);                                                                      \
-        }                                                                                                                                     \
-        plan X(plan_r2r_3d)(int n0, int n1, int n2,                                                                                           \
-                R *in, R *out, r2r_kind kind0,                                                                                                \
-                r2r_kind kind1, r2r_kind kind2,                                                                                               \
-                unsigned int flags) const {                                                                                                   \
-            return Y(plan_r2r_3d)(n0, n1, n2, in, out, kind0, kind1, kind2, flags);                                                           \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_guru_r2r)(int rank, const iodim *dims,                                                                                    \
-                int howmany_rank,                                                                                                             \
-                const iodim *howmany_dims,                                                                                                    \
-                R *in, R *out,                                                                                                                \
-                const r2r_kind *kind, unsigned int flags) const {                                                                             \
-            return Y(plan_guru_r2r)(rank, dims, howmany_rank, howmany_dims, in, out, kind, flags);                                            \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        plan X(plan_guru64_r2r)(int rank, const iodim64 *dims,                                                                                \
-                int howmany_rank,                                                                                                             \
-                const iodim64 *howmany_dims,                                                                                                  \
-                R *in, R *out,                                                                                                                \
-                const r2r_kind *kind, unsigned int flags) const {                                                                             \
-            return Y(plan_guru64_r2r)(rank, dims, howmany_rank, howmany_dims, in, out, kind, flags);                                          \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        void X(execute_r2r)(const plan p, R *in, R *out) const {                                                                              \
-            Y(execute_r2r)(p, in, out);                                                                                                       \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        void X(destroy_plan)(plan p) const {                                                                                                  \
-            Y(destroy_plan)(p);                                                                                                               \
-        }                                                                                                                                     \
-        void X(forget_wisdom)() const {                                                                                                       \
-            Y(forget_wisdom)();                                                                                                               \
-        }                                                                                                                                     \
-        void X(cleanup)() const {                                                                                                             \
-            Y(cleanup)();                                                                                                                     \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        void X(set_timelimit)(double t) const {                                                                                               \
-            Y(set_timelimit)(t);                                                                                                              \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        template <typename T = void>                                                                                                          \
-        typename std::enable_if<has_thread_support, T>::type                                                                                  \
-        X(plan_with_nthreads)(int nthreads) const {                                                                                           \
-            Y(plan_with_nthreads)(nthreads);                                                                                                  \
-        }                                                                                                                                     \
-        template <typename T = int>                                                                                                           \
-        typename std::enable_if<has_thread_support, T>::type                                                                                  \
-        X(init_threads)() const {                                                                                                             \
-            return Y(init_threads)();                                                                                                         \
-        }                                                                                                                                     \
-        template <typename T=void>                                                                                                            \
-        typename std::enable_if<has_thread_support, T>::type                                                                                  \
-        X(cleanup_threads)() const {                                                                                                          \
-            Y(cleanup_threads)();                                                                                                             \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        int X(export_wisdom_to_filename)(const char *filename) const {                                                                        \
-            return Y(export_wisdom_to_filename)(filename);                                                                                    \
-        }                                                                                                                                     \
-        void X(export_wisdom_to_file)(FILE *output_file) const {                                                                              \
-            Y(export_wisdom_to_file)(output_file);                                                                                            \
-        }                                                                                                                                     \
-        char *X(export_wisdom_to_string)() const {                                                                                            \
-            return Y(export_wisdom_to_string)();                                                                                              \
-        }                                                                                                                                     \
-        void X(export_wisdom)(write_char_func write_char,                                                                                     \
-                void *data) const {                                                                                                           \
-            return Y(export_wisdom)(write_char, data);                                                                                        \
-        }                                                                                                                                     \
-        int X(import_system_wisdom)() const {                                                                                                 \
-            return Y(import_system_wisdom)();                                                                                                 \
-        }                                                                                                                                     \
-        int X(import_wisdom_from_filename)(const char *filename) const {                                                                      \
-            return Y(import_wisdom_from_filename)(filename);                                                                                  \
-        }                                                                                                                                     \
-        int X(import_wisdom_from_file)(FILE *input_file) const {                                                                              \
-            return Y(import_wisdom_from_file)(input_file);                                                                                    \
-        }                                                                                                                                     \
-        int X(import_wisdom_from_string)(const char *input_string) const {                                                                    \
-            return Y(import_wisdom_from_string)(input_string);                                                                                \
-        }                                                                                                                                     \
-        int X(import_wisdom)(read_char_func read_char, void *data) const {                                                                    \
-            return Y(import_wisdom)(read_char, data);                                                                                         \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        void X(fprint_plan)(const plan p, FILE *output_file) const {                                                                          \
-            Y(fprint_plan)(p, output_file);                                                                                                   \
-        }                                                                                                                                     \
-        void X(print_plan)(const plan p) const {                                                                                              \
-            Y(print_plan)(p);                                                                                                                 \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        void *X(malloc)(size_t n) const {                                                                                                     \
-            return Y(malloc)(n);                                                                                                              \
-        }                                                                                                                                     \
-        R *X(alloc_real)(size_t n) const {                                                                                                    \
-            return Y(alloc_real)(n);                                                                                                          \
-        }                                                                                                                                     \
-        C *X(alloc_complex)(size_t n) const {                                                                                                 \
-            return Y(alloc_complex)(n);                                                                                                       \
-        }                                                                                                                                     \
-        void X(free)(void *p) const {                                                                                                         \
-            Y(free)(p);                                                                                                                       \
-        }                                                                                                                                     \
-                                                                                                                                              \
-        void X(flops)(const plan p,                                                                                                           \
-                double *add, double *mul, double *fmas) const {                                                                               \
-            return Y(flops)(p, add, mul, fmas);                                                                                               \
-        }                                                                                                                                     \
-        double X(estimate_cost)(const plan p) const {                                                                                         \
-            return Y(estimate_cost)(p);                                                                                                       \
-        }                                                                                                                                     \
-        double X(cost)(const plan p) const {                                                                                                  \
-            return Y(cost)(p);                                                                                                                \
-        }                                                                                                                                     \
-    };
-
-
-/* Constants */
-namespace hysop {
-    namespace fft {
-        /* float */
-        #ifdef FFTW_HAS_FFTW3F
-            static constexpr bool fftw_has_float_support = true;
-            #ifdef FFTW_HAS_FFTW3F_MPI
-                static constexpr bool fftw_has_float_mpi_support = true;
-            #else 
-                static constexpr bool fftw_has_float_mpi_support = false;
-            #endif
-            #if defined(FFTW_HAS_FFTW3F_THREADS) || defined(FFTW_HAS_FFTW3F_OMP)
-                static constexpr bool fftw_has_float_thread_support = true;
-            #else
-                static constexpr bool fftw_has_float_thread_support = false;
-            #endif 
-        #else
-            static constexpr bool fftw_has_float_support        = false;
-            static constexpr bool fftw_has_float_thread_support = false;
-            static constexpr bool fftw_has_float_mpi_support    = false;
-        #endif
-
-        /* double */
-        #ifdef FFTW_HAS_FFTW3D
-            static constexpr bool fftw_has_double_support = true;
-            #ifdef FFTW_HAS_FFTW3D_MPI
-                static constexpr bool fftw_has_double_mpi_support = true;
-            #else 
-                static constexpr bool fftw_has_double_mpi_support = false;
-            #endif
-            #if defined(FFTW_HAS_FFTW3D_THREADS) || defined(FFTW_HAS_FFTW3D_OMP)
-                static constexpr bool fftw_has_double_thread_support = true;
-            #else
-                static constexpr bool fftw_has_double_thread_support = false;
-            #endif 
-        #else
-            static constexpr bool fftw_has_double_support        = false;
-            static constexpr bool fftw_has_double_thread_support = false;
-            static constexpr bool fftw_has_double_mpi_support    = false;
-        #endif
-        
-        /* long double */
-        #ifdef FFTW_HAS_FFTW3L
-            static constexpr bool fftw_has_long_double_support = true;
-            #ifdef FFTW_HAS_FFTW3L_MPI
-                static constexpr bool fftw_has_long_double_mpi_support = true;
-            #else 
-                static constexpr bool fftw_has_long_double_mpi_support = false;
-            #endif
-            #if defined(FFTW_HAS_FFTW3L_THREADS) || defined(FFTW_HAS_FFTW3L_OMP)
-                static constexpr bool fftw_has_long_double_thread_support = true;
-            #else
-                static constexpr bool fftw_has_long_double_thread_support = false;
-            #endif 
-        #else
-            static constexpr bool fftw_has_long_double_support        = false;
-            static constexpr bool fftw_has_long_double_thread_support = false;
-            static constexpr bool fftw_has_long_double_mpi_support    = false;
-        #endif
-        
-        /* __float128  */
-        #ifdef FFTW_HAS_FFTW3Q
-            static constexpr bool fftw_has_quad_float_support = true;
-            #ifdef FFTW_HAS_FFTW3Q_MPI
-                static constexpr bool fftw_has_quad_float_mpi_support = true;
-            #else 
-                static constexpr bool fftw_has_quad_float_mpi_support = false;
-            #endif
-            #if defined(FFTW_HAS_FFTW3Q_THREADS) || defined(FFTW_HAS_FFTW3Q_OMP)
-                static constexpr bool fftw_has_quad_float_thread_support = true;
-            #else
-                static constexpr bool fftw_has_quad_float_thread_support = false;
-            #endif 
-        #else
-            static constexpr bool fftw_has_quad_float_support        = false;
-            static constexpr bool fftw_has_quad_float_thread_support = false;
-            static constexpr bool fftw_has_quad_float_mpi_support    = false;
-        #endif
-    }
-}
-
-/* Wrappers */
-namespace hysop {
-    namespace fft {
-
-        template <typename T>
-        struct Fftw3 {
-                Fftw3() { 
-                    throw std::runtime_error(
-                            "Can only use Fftw3 wrapper with types {float, double, long double, __float128} ! "
-                            "Note: __float128 type is enabled only if HAS_QUADMATHS is defined."
-                            );
-                }
-            };
-        template <typename T> struct is_fftw_supported_type          { static constexpr bool value = false; };
-        template <typename T> struct is_fftw_supported_complex_type  { static constexpr bool value = false; };
-        
-        
-        /* Generate Ffftw<> template specialisations */
-        FFTW_DEFINE_CXX_API(FFTW_MANGLE_CLASS, FFTW_MANGLE_FLOAT, float, hysop::fft::fftw_has_float_thread_support)
-        FFTW_DEFINE_CXX_API(FFTW_MANGLE_CLASS, FFTW_MANGLE_DOUBLE, double, hysop::fft::fftw_has_double_thread_support)
-        FFTW_DEFINE_CXX_API(FFTW_MANGLE_CLASS, FFTW_MANGLE_LONG_DOUBLE, long double, hysop::fft::fftw_has_long_double_thread_support)
-
-        template <> struct is_fftw_supported_type<float>       { static constexpr bool value = true;  };
-        template <> struct is_fftw_supported_type<double>      { static constexpr bool value = true;  };
-        template <> struct is_fftw_supported_type<long double> { static constexpr bool value = true;  };
-        
-        template <> struct is_fftw_supported_complex_type<std::complex<float>>       { static constexpr bool value = true;  };
-        template <> struct is_fftw_supported_complex_type<std::complex<double>>      { static constexpr bool value = true;  };
-        template <> struct is_fftw_supported_complex_type<std::complex<long double>> { static constexpr bool value = true;  };
-        template <> struct is_fftw_supported_complex_type<fftwf_complex>             { static constexpr bool value = true;  };
-        template <> struct is_fftw_supported_complex_type<fftw_complex>              { static constexpr bool value = true;  };
-        template <> struct is_fftw_supported_complex_type<fftwl_complex>             { static constexpr bool value = true;  };
-
-#ifdef HAS_QUADMATHS
-        FFTW_DEFINE_CXX_API(FFTW_MANGLE_CLASS, FFTW_MANGLE_QUAD, __float128, hysop::fft::fftw_has_quad_float_thread_support)
-        template <> struct is_fftw_supported_type<__float128>                        { static constexpr bool value = true;  };
-        template <> struct is_fftw_supported_complex_type<std::complex<__float128>>  { static constexpr bool value = true;  };
-        template <> struct is_fftw_supported_complex_type<fftwq_complex>             { static constexpr bool value = true;  };
-#endif
-
-        
-    }
-}
-
-#undef FFTW_DEFINE_CXX_API
-#undef FFTW_MANGLE_CLASS
-
-#endif /* end of include guard: HYSOP_FFTW3_H */
-
diff --git a/src/hysop++/src/fft/fftwComplex.h b/src/hysop++/src/fft/fftwComplex.h
deleted file mode 100644
index a28954a5f5af170f11ed2eb0a957fa95440cbfc9..0000000000000000000000000000000000000000
--- a/src/hysop++/src/fft/fftwComplex.h
+++ /dev/null
@@ -1,87 +0,0 @@
-
-#ifndef FFTWCOMPLEX_H
-#define FFTWCOMPLEX_H
-
-#include <type_traits>
-#include <complex>
-#include <fft/fftw3.h>
-
-namespace hysop {
-    namespace fft {
-
-        template <typename T> struct fftw_complex_type {
-            typedef void value_type;
-            typedef void fftw_type; 
-            typedef void std_type; 
-        };
-
-        template <> struct fftw_complex_type<long double> { 
-            typedef long double               value_type;
-            typedef fftwl_complex             fftw_type; 
-            typedef std::complex<long double> std_type; 
-        };
-        template <> struct fftw_complex_type<double> { 
-            typedef double               value_type;
-            typedef fftw_complex         fftw_type; 
-            typedef std::complex<double> std_type; 
-        };
-        template <> struct fftw_complex_type<float> { 
-            typedef float               value_type;
-            typedef fftwf_complex       fftw_type; 
-            typedef std::complex<float> std_type; 
-        };
-        
-        template <> struct fftw_complex_type<std::complex<long double>> { 
-            typedef long double               value_type;
-            typedef fftwl_complex             fftw_type; 
-            typedef std::complex<long double> std_type; 
-        };
-        template <> struct fftw_complex_type<std::complex<double>> { 
-            typedef double               value_type;
-            typedef fftw_complex         fftw_type; 
-            typedef std::complex<double> std_type; 
-        };
-        template <> struct fftw_complex_type<std::complex<float>> { 
-            typedef float               value_type;
-            typedef fftwf_complex       fftw_type; 
-            typedef std::complex<float> std_type; 
-        };
-        
-        template <> struct fftw_complex_type<fftwl_complex> { 
-            typedef long double               value_type;
-            typedef fftwl_complex             fftw_type; 
-            typedef std::complex<long double> std_type; 
-        };
-        template <> struct fftw_complex_type<fftw_complex> { 
-            typedef double               value_type;
-            typedef fftw_complex         fftw_type; 
-            typedef std::complex<double> std_type; 
-        };
-        template <> struct fftw_complex_type<fftwf_complex> { 
-            typedef float               value_type;
-            typedef fftwf_complex       fftw_type; 
-            typedef std::complex<float> std_type; 
-        };
-
-#ifdef HAS_QUADMATHS
-        template <> struct fftw_complex_type<__float128> { 
-            typedef __float128               value_type;
-            typedef fftwq_complex            fftw_type; 
-            typedef std::complex<__float128> std_type; 
-        };
-        template <> struct fftw_complex_type<std::complex<__float128>> { 
-            typedef __float128               value_type;
-            typedef fftwq_complex            fftw_type; 
-            typedef std::complex<__float128> std_type; 
-        };
-        template <> struct fftw_complex_type<fftwq_complex> { 
-            typedef __float128               value_type;
-            typedef fftwq_complex            fftw_type; 
-            typedef std::complex<__float128> std_type; 
-        };
-#endif
-
-    }
-}
-
-#endif /* end of include guard: FFTWCOMPLEX_H */
diff --git a/src/hysop++/src/fft/planner.h b/src/hysop++/src/fft/planner.h
deleted file mode 100644
index 75b8ca8520afe85a33ee0c72547b2f5758cc51a2..0000000000000000000000000000000000000000
--- a/src/hysop++/src/fft/planner.h
+++ /dev/null
@@ -1,747 +0,0 @@
-
-#ifndef HYSOP_PLANNER_H
-#define HYSOP_PLANNER_H
-
-#include <list>
-#include <cassert>
-#include <functional>
-
-#include "fft/fftw3.h"
-#include "fft/transform.h"
-#include "fft/fftDomainConfiguration.h"
-#include "domain/domain.h"
-#include "utils/default.h"
-
-namespace hysop {
-    namespace fft {
-                    
-        enum FftTransformType : int {
-            FFT_NONE=-1,
-            FFT_R2R,
-            FFT_R2C
-        };
-        
-        template <typename T, std::size_t Dim>
-            class Planner;
-
-        template <typename T, std::size_t Dim>
-            std::ostream& operator<<(std::ostream& os, const Planner<T,Dim>& planner);
-
-        template <typename T, std::size_t Dim>
-            class Planner : private Fftw3<T> {
-                static_assert(hysop::fft::is_fftw_supported_type<T>::value, 
-                        "Planner data type is not currently supported by fftw !");
-
-                public:
-                    using         real = typename Fftw3<T>::R;
-                    using fftw_complex = typename Fftw3<T>::C;
-                    using  std_complex = typename Fftw3<T>::stdC;
-                    using    fftw_plan = typename Fftw3<T>::plan;
-                    using   fftw_iodim = typename Fftw3<T>::iodim;
-                    
-                protected:
-                    using RealArray = hysop::multi_array<T,Dim>;
-                    using TransformArray = std::array<fft::Transform,Dim>;
-
-                public:
-                    Planner(std::complex<T> p_fixedAxeWaveNumbers    = std::complex<T>(1,0),
-                            std::complex<T> p_fixedAxeWaveNumberPows = std::complex<T>(1,0));
-                    Planner(const Planner& other) = delete; /* cannot copy fftw plans to prevent inter instance plan destroying */
-                    Planner(Planner&& other)      = default;
-                    virtual ~Planner();
-
-                    Planner& operator=(const Planner& planner) = delete;
-                    Planner& operator=(Planner&& planner)      = default;
-                
-                    /* Plans a forward and a backward DCT/DST/FFT on each axe of the domain.                                */
-                    /* An axe with a NONE/NONE fft fomain configuration or order=0 is not transformed.                      */
-                    /* Input and output can be the same but in case of inplace transform, a temporary buffer is created     */
-                    /* during planning.                                                                                     */
-                    /* If transforms include a complex transform (FFT) or if the transform is inplace, additional data may  */
-                    /* be allocated inside the class.                                                                       */ 
-                    /* fftw_flags are used to pass flags to FFTW.                                                           */  
-                    /* Return true if planning was successfull AND if there is any transform, else return false.            */
-                    /* Note: To get max performance use hysop::_default::fft_allocator<T> with hysop::multi_array           */
-                    bool plan(hysop::const_multi_array_ref<T,Dim> input_rdata, 
-                              hysop::multi_array_ref<T,Dim> output_rdata,
-                              const fft::FftDomainConfiguration<Dim>& inputFftDomainConfig,
-                              const std::array<int,Dim>& order,
-                              const std::array<T  ,Dim>& domainSize,
-                              unsigned int fftw_flags,
-                              bool includeOutputPeriodicBoundaries=false, bool mirrorOutputPeriodicBoundaries=false);
-
-                    void executeForwardTransform();
-                    void executeBackwardTransform();
-
-                    FftTransformType transformType() const;
-
-                    T normalisationFactor() const;
-                    const std::array<T,Dim> signs() const;
-                    const std::array<std::vector<std::complex<T>>,Dim>& waveNumbers() const;
-                    const std::array<std::vector<std::complex<T>>,Dim>& waveNumbersPows() const;
-
-                    hysop::multi_array_view<T,Dim>              transformedRealData();    /* view because possibly non contiguous */
-                    hysop::multi_array_ref<std::complex<T>,Dim> transformedComplexData(); /* ref  because contiguous (allocated inside class) */
-                    
-                    /* Get planned transform description */
-                    const std::string& toString() const;
-
-                    /* Modify untransformed axe generated wave numbers to simplify algorithm implementations based on the planner */
-                    Planner<T,Dim>& setFixedAxeWaveNumbers(std::complex<T> p_fixedAxeWaveNumber); 
-                    Planner<T,Dim>& setFixedAxeWaveNumberPows(std::complex<T> p_fixedAxeWaveNumberPow); 
-
-                protected:
-                    void checkDomainCompatibility(const fft::FftDomainConfiguration<Dim>& domainConfig) const;
-                    void checkExtensionCompatibility(const fft::Extension &lhs, const fft::Extension &rhs) const;
-
-                    fft::Transform findTransform(const std::pair<Extension,Extension>& ed) const;
-
-                    void reset();
-                        
-                    template <typename Transfo>
-                    void toStreamTransform(std::ostream& os, const Transfo& tr, 
-                            int rank, int howmany_rank, 
-                            const fftw_iodim* dims, const fftw_iodim *howmany_dims,
-                            int input_data_offset=0, int output_data_offset=0);
-
-                protected:
-                    std::complex<T> m_fixedAxeWaveNumbers, m_fixedAxeWaveNumberPows;
-
-                    hysop::multi_array_ref<T,Dim> m_realBuffer;
-                    hysop::multi_array<std_complex,Dim,hysop::_default::fft_allocator<std_complex>>   m_complexBuffer;
-                    typename hysop::multi_array<T,Dim>::template index_gen<Dim,Dim>  m_transformedRealBufferView;
-
-                    std::list<fftw_plan> m_forward_R2R_plans,  m_R2C_plans;
-                    std::list<fftw_plan> m_backward_R2R_plans, m_C2R_plans;
-
-                    FftTransformType m_transformType;
-                    std::vector<std::size_t> m_periodicAxes;
-
-                    std::array<T,Dim> m_signs;
-                    std::array<std::vector<std_complex>,Dim> m_waveNumbers;
-                    std::array<std::vector<std_complex>,Dim> m_waveNumbersPows;
-                    T m_normalisationFactor;
-
-                    bool m_mirrorOutputPeriodicBoundaries;
-                    std::string m_plannedTransformStr;
-            };
-        
-        template <typename T, std::size_t Dim>
-            Planner<T,Dim>::Planner(std::complex<T> p_fixedAxeWaveNumbers, std::complex<T> p_fixedAxeWaveNumberPows):
-                m_fixedAxeWaveNumbers(p_fixedAxeWaveNumbers),
-                m_fixedAxeWaveNumberPows(p_fixedAxeWaveNumberPows),
-                m_realBuffer(), m_complexBuffer(),
-                m_forward_R2R_plans(), m_R2C_plans(),
-                m_backward_R2R_plans(), m_C2R_plans(),
-                m_transformType(), 
-                m_signs(), m_waveNumbers(), m_waveNumbersPows(), m_normalisationFactor(),
-                m_mirrorOutputPeriodicBoundaries(),
-                m_plannedTransformStr() {
-                    reset();
-            }
-        
-        template <typename T, std::size_t Dim>
-            Planner<T,Dim>::~Planner() {
-                reset();
-            }
-
-        template <typename T, std::size_t Dim>
-            void Planner<T,Dim>::reset() {
-                new (&m_realBuffer) multi_array_ref<T,Dim>();
-                m_complexBuffer.reshape(typename Shape<Dim>::type{0});
-                for(fftw_plan plan : m_forward_R2R_plans)
-                    this->fftw_destroy_plan(plan);
-                for(fftw_plan plan : m_backward_R2R_plans)
-                    this->fftw_destroy_plan(plan);
-                for(fftw_plan plan : m_C2R_plans)
-                    this->fftw_destroy_plan(plan);
-                for(fftw_plan plan : m_R2C_plans)
-                    this->fftw_destroy_plan(plan);
-                m_forward_R2R_plans.clear();
-                m_R2C_plans.clear();
-                m_backward_R2R_plans.clear();
-                m_C2R_plans.clear();
-                m_periodicAxes.clear();
-                m_transformType = FftTransformType::FFT_NONE;
-                for (std::size_t d = 0; d < Dim; d++) {
-                    m_waveNumbers[d].clear(); 
-                    m_waveNumbersPows[d].clear();
-                }
-                m_signs.fill(0);
-                m_normalisationFactor = T(0);
-                m_mirrorOutputPeriodicBoundaries = false;
-                m_plannedTransformStr = "** No planned transforms **";
-        }
-                    
-        template <typename T, std::size_t Dim>
-            void Planner<T,Dim>::checkDomainCompatibility(const fft::FftDomainConfiguration<Dim>& domainConfig) const {
-                for (std::size_t d = 0; d < Dim; d++)
-                    checkExtensionCompatibility(domainConfig[d].first, domainConfig[d].second);
-            }
-
-        template <typename T, std::size_t Dim>
-            void Planner<T,Dim>::checkExtensionCompatibility(const fft::Extension &lhs, const fft::Extension &rhs) const {
-                if ((lhs==fft::Extension::PERIODIC) ^ (rhs==fft::Extension::PERIODIC)) 
-                    throw std::runtime_error("Planner error: Periodic domain extensions are not compatible !");
-                if ((lhs==fft::Extension::NONE) ^ (rhs==fft::Extension::NONE))
-                    throw std::runtime_error("Planner error: None domain extensions are not compatible !");
-            }
-
-        template <typename T, std::size_t Dim>
-            bool Planner<T,Dim>::plan(hysop::const_multi_array_ref<T,Dim> input_rdata, 
-                                      hysop::multi_array_ref<T,Dim> output_rdata,
-                    const fft::FftDomainConfiguration<Dim>& inputFftDomainConfig,
-                    const std::array<int,Dim>& order,
-                    const std::array<T  ,Dim>& domainSize,
-                                  unsigned int fftw_flags, 
-                    const bool includeOutputPeriodicBoundaries,
-                    const bool mirrorOutputPeriodicBoundaries) {
-
-                this->checkDomainCompatibility(inputFftDomainConfig);
-                this->reset();
-
-                bool inplace = (input_rdata.data() == output_rdata.data());
-                hysop::multi_array<T,Dim> input_rdata_buffer;
-                if(inplace)
-                    input_rdata_buffer = input_rdata;
-
-                m_mirrorOutputPeriodicBoundaries = includeOutputPeriodicBoundaries && mirrorOutputPeriodicBoundaries;
-                
-                TransformArray forwardTransforms, backwardTransforms;
-                typename Shape<Dim>::type  realBufferShape, complexBufferShape;
-                std::array<int,Dim> forward_transform_size, backward_transform_size, complex_transform_size;
-                std::array<int,Dim> forward_input_offset,  forward_output_offset;
-                std::array<int,Dim> backward_input_offset, backward_output_offset;
-                std::array<bool,Dim> oddOrder, axeTransformed;
-                
-                bool hasRealTransforms=false, hasComplexTransforms=false;
-                int lastComplexTransformAxe=-1;
-                std::stringstream ss;
-
-                ss << "=== Planner ===" << std::endl;
-                ss << "** input **" << std::endl;
-                ss << "\tInputDataDim: " << input_rdata.shape() << std::endl;
-                ss << "\tDomainConfig: " << inputFftDomainConfig;
-                ss << "\tOrder       : " << order << std::endl;
-                ss << "\tDomainSize  : " << domainSize << std::endl;
-                ss << "\tipb/mpb     : " << std::boolalpha << includeOutputPeriodicBoundaries << "/" 
-                    << std::boolalpha << mirrorOutputPeriodicBoundaries << std::endl;
-
-                for (std::size_t d = 0; d < Dim; d++) {
-                    const      FftDomainConfiguration<Dim> &inputConfig = inputFftDomainConfig;
-                    const auto &inputExtensions = inputConfig.extensions()[d]; 
-                    const bool axeIsPeriodic = (inputExtensions.first==Extension::PERIODIC);
-                    const bool axeIsOddOrder = (order[d]%2!=0);
-                    const bool axeIsTransformed = !(order[d]==0 || inputExtensions.first == fft::Extension::NONE);
-                    const std::size_t inputSize = input_rdata.shape()[d] 
-                        - std::size_t(axeIsPeriodic && inputConfig.includePeriodicBoundaries());
-                    
-                    const fft::Transform tr = this->findTransform(inputExtensions);
-
-                    forwardTransforms[d] = tr;
-                    if(axeIsOddOrder)
-                        backwardTransforms[d] = tr.conjugateInverseTransform();
-                    else
-                        backwardTransforms[d] = tr.inverseTransform();
-                       
-                    realBufferShape[d]         = inputSize + 
-                        std::size_t((inputExtensions.second==Extension::PERIODIC) && includeOutputPeriodicBoundaries);
-                    complexBufferShape[d]      = inputSize;
-                    forward_transform_size[d]  = inputSize;
-                    backward_transform_size[d] = inputSize;
-                    complex_transform_size[d]  = inputSize;
-
-                    forward_input_offset[d]   = 0;
-                    forward_output_offset[d]  = 0;
-                    backward_input_offset[d]  = 0;
-                    backward_output_offset[d] = 0;
-
-                    oddOrder[d]       = axeIsOddOrder;
-                    axeTransformed[d] = axeIsTransformed;
-
-                    if(axeIsPeriodic)
-                        m_periodicAxes.push_back(d);
-                    
-                    if(!axeIsTransformed) {
-                        continue;
-                    }
-                    else {
-                        if(tr.isR2R()) {
-                            hasRealTransforms = true;
-                            const std::size_t firstExtOdd   = (inputExtensions.first  == Extension::ODD);
-                            const std::size_t secondExtOdd  = (inputExtensions.second == Extension::ODD);
-                            const std::size_t firstExtEven  = (inputExtensions.first  == Extension::EVEN);
-                            const std::size_t secondExtEven = (inputExtensions.second == Extension::EVEN);
-
-                            forward_transform_size[d]  -= (firstExtOdd + secondExtOdd);
-                            forward_input_offset[d]   = std::ptrdiff_t(firstExtOdd);
-                            forward_output_offset[d]  = std::ptrdiff_t(firstExtOdd);
-                            
-                            complexBufferShape[d]     = forward_transform_size[d];
-                            complex_transform_size[d] = forward_transform_size[d];
-                            
-                            if(axeIsOddOrder) {
-                                backward_transform_size[d] -= (firstExtEven + secondExtEven);
-                                backward_input_offset[d] = std::ptrdiff_t( (tr.kind == FFTW_RODFT01) 
-                                        || ((tr.kind != FFTW_REDFT01) && firstExtEven) );
-                                backward_output_offset[d] = std::ptrdiff_t(firstExtEven);
-                            }
-                            else {
-                                backward_transform_size[d] = forward_transform_size[d];
-                                backward_input_offset[d]   = forward_output_offset[d]; 
-                                backward_output_offset[d]  = forward_input_offset[d];
-                            }
-                        }
-                        else {
-                            hasComplexTransforms = true;
-                            lastComplexTransformAxe = d;
-                        }
-                    }
-                }
-                
-                
-                /* Allocate complex buffer only if necessary, return if no transforms */
-                if(hasComplexTransforms) {
-                    m_transformType = FFT_R2C;
-                    
-                    assert(lastComplexTransformAxe >= 0);
-                    complexBufferShape[lastComplexTransformAxe] = complexBufferShape[lastComplexTransformAxe]/2 + 1;
-                    m_complexBuffer.reshape(complexBufferShape);
-                }
-                else if(hasRealTransforms) {
-                    m_transformType = FFT_R2R;
-                }
-                else {
-                    m_transformType = FFT_NONE;
-                    if(!inplace)
-                        std::copy(input_rdata.begin(), input_rdata.end(), output_rdata.begin());
-                    this->reset();
-                    ss << m_plannedTransformStr << std::endl;
-                        m_plannedTransformStr = ss.str();
-                    return false;
-                }
-                
-                /* Check if output buffer shape match */
-                {
-                    if(output_rdata.shape() != realBufferShape)
-                        throw std::runtime_error("Output buffer shape should match the planned one !");
-                    new (&m_realBuffer) multi_array_ref<T,Dim>(output_rdata);
-                }
-                
-                /* Compute normalisation factor and wave numbers */
-                m_normalisationFactor = T(1);
-                for(std::size_t d=0; d<Dim; d++) {
-                    const fft::Transform tr = forwardTransforms[d];
-                    const std::size_t N = (tr.isR2R() ? realBufferShape[d] : complexBufferShape[d]);
-                    const std::size_t K = (tr.isR2R() ? forward_transform_size[d] : complexBufferShape[d]);
-                    const std::size_t S = (tr.isR2R() ? forward_transform_size[d] : complex_transform_size[d]);
-                    const real L = domainSize[d];
-                    T sign = T(1);
-                    if(!axeTransformed[d]) {
-                        m_waveNumbers[d].resize(N);
-                        m_waveNumbersPows[d].resize(N);
-                        m_normalisationFactor*=T(1);
-                        for (std::size_t k = 0; k < N; k++) {
-                            m_waveNumbers[d][k]     = m_fixedAxeWaveNumbers;
-                            m_waveNumbersPows[d][k] = m_fixedAxeWaveNumberPows;
-                        }
-                    }
-                    else {
-                        m_waveNumbers[d].resize(K);
-                        m_waveNumbersPows[d].resize(K);
-                        m_normalisationFactor*= tr.normalisation<T>(S);
-                        if(tr.isR2R()) {
-                            const std::size_t sign_offset = (tr.basefunc()==fft::BaseFunc::COS ? 1 : 0);
-                            sign *= std::pow(T(-1),(order[d]+sign_offset)/2);
-                        }
-                        for (std::size_t k = 0; k < K; k++) {
-                            m_waveNumbers[d][k] = tr.omega<T>(k,K,L,d==std::size_t(lastComplexTransformAxe));
-                            m_waveNumbersPows[d][k] = sign*std::pow(m_waveNumbers[d][k], order[d]);
-                        }
-                    }
-                    m_signs[d] = sign;
-                }
-            
-                /* Build planned transforms description string */
-                ss << "** configuration **"                                                  << std::endl;
-                ss << "\thasRealTransforms    : " << std::boolalpha << hasRealTransforms     << std::endl;
-                ss << "\thasComplexTransforms : " << std::boolalpha << hasComplexTransforms  << std::endl;
-                ss << "\tTransformed axes     : " << std::noboolalpha << axeTransformed      << std::endl;
-                ss << "\tOddOrder             : " << std::noboolalpha << oddOrder            << std::endl;
-                ss << "\tForward  transforms  : " << forwardTransforms                       << std::endl;
-                ss << "\tBackward transforms  : " << backwardTransforms                      << std::endl;
-                ss << "\tLast cplx trans. axe : " << lastComplexTransformAxe                 << std::endl;
-
-                ss << "** buffer shape **"                                                   << std::endl;
-                ss << "\tReal    buffer shape  : " << realBufferShape                        << std::endl;
-                ss << "\tComplex buffer shape  : " << complexBufferShape                     << std::endl;
-
-                ss << "** transform size and offsets **"                                     << std::endl;
-                ss << "\tForward  transf. size : " << forward_transform_size                 << std::endl;
-                ss << "\tForward  input  offset: " << forward_input_offset                   << std::endl;
-                ss << "\tForward  output offset: " << forward_output_offset                  << std::endl;
-                ss << "\tComplex  transf. size : " << complex_transform_size                 << std::endl;
-                ss << "\tBackward transf. size : " << backward_transform_size                << std::endl;
-                ss << "\tBackward input  offset: " << backward_input_offset                  << std::endl;
-                ss << "\tBackward output offset: " << backward_output_offset                 << std::endl;
-
-                ss << "** normalisation & wave numbers **" << std::endl;
-                ss << "\tNormalisation: " << m_normalisationFactor << std::endl;
-                ss << "\tSigns: " << m_signs << std::endl;
-                ss << "\t--Wave numbers--"<< std::endl;
-                for (std::size_t k = 0; k < Dim; k++)
-                    ss << "\t\taxe" << k << ": " << m_waveNumbers[k] << std::endl;
-                ss << "\t--wave numbers powers--"<< std::endl;
-                for (std::size_t k = 0; k < Dim; k++)
-                    ss << "\t\taxe" << k << ": " << m_waveNumbersPows[k] << std::endl;
-                
-                
-                /* Compute complex plans */
-                if(hasComplexTransforms) {
-                    ss << "** complex transforms detail **" << std::endl;
-                    fftw_plan R2C_plan=NULL, C2R_plan=NULL;
-                    const int rank = lastComplexTransformAxe+1;
-                    const int howmany_rank = Dim;
-                    fftw_iodim dims[Dim];
-                    fftw_iodim howmany_dims[Dim];
-                    int forwardInputOffset=0, backwardOutputOffset=0;
-                    {
-                        int local_io_stride = 1;
-                        for(int d=Dim-1; d>=0; d--) {
-                            forwardInputOffset   += forward_output_offset[d]*local_io_stride;
-                            backwardOutputOffset += forward_output_offset[d]*local_io_stride;
-                            local_io_stride *= realBufferShape[d];
-                        }
-                    }
-                    {
-                        int input_stride=1, output_stride=1;
-                        for(int d=Dim-1, dd=rank; d>=0; d--) {
-                            const bool isR2C            = forwardTransforms[d].isR2C();
-                            const bool isAxeTransformed = axeTransformed[d];
-                            if(isR2C && isAxeTransformed) {
-                                dims[d]         = fftw_iodim{complex_transform_size[d], input_stride, output_stride};
-                                howmany_dims[d] = fftw_iodim{                        1, input_stride, output_stride};
-                            }
-                            else {
-                                dims[d]         = fftw_iodim{                        1, input_stride, output_stride};
-                                howmany_dims[d] = fftw_iodim{forward_transform_size[d], input_stride, output_stride};
-                            }
-                            input_stride  *= realBufferShape[d];
-                            output_stride *= complexBufferShape[d];
-                        }
-                        R2C_plan = this->fftw_plan_guru_dft_r2c(
-                                rank, dims, 
-                                howmany_rank, howmany_dims,
-                                m_realBuffer.rdata()+forwardInputOffset, m_complexBuffer.fftw_cdata(),
-                                fftw_flags);
-                        this->toStreamTransform(ss, "FFTW_FORWARD", rank, howmany_rank, dims, howmany_dims,
-                                forwardInputOffset, 0);
-                        if(!R2C_plan) {
-                            ss << "=> R2C plan creation FAILED !" << std::endl;
-                            m_plannedTransformStr = ss.str();
-                            return false;
-                        }
-                    }
-                    {
-                        int input_stride=1, output_stride=1;
-                        for(int d=Dim-1, dd=rank; d>=0; d--) {
-                            const bool isR2C            = forwardTransforms[d].isR2C();
-                            const bool isAxeTransformed = axeTransformed[d];
-                            if(isR2C && isAxeTransformed) {
-                                dims[d]         = fftw_iodim{complex_transform_size[d], input_stride, output_stride};
-                                howmany_dims[d] = fftw_iodim{                        1, input_stride, output_stride};
-                            }
-                            else {
-                                dims[d]         = fftw_iodim{                         1, input_stride, output_stride};
-                                howmany_dims[d] = fftw_iodim{ forward_transform_size[d], input_stride, output_stride};
-                            }
-                            input_stride  *= complexBufferShape[d];
-                            output_stride *= realBufferShape[d];
-                        }
-                        C2R_plan = this->fftw_plan_guru_dft_c2r(
-                                rank, dims, 
-                                howmany_rank, howmany_dims,
-                                m_complexBuffer.fftw_cdata(), m_realBuffer.rdata()+backwardOutputOffset,
-                                fftw_flags);
-                        this->toStreamTransform(ss, "FFTW_BACKWARD", rank, howmany_rank, dims, howmany_dims,
-                                0, backwardOutputOffset);
-                        if(!C2R_plan) {
-                            ss << "=> C2R plan creation FAILED !" << std::endl;
-                            m_plannedTransformStr = ss.str();
-                            return false;
-                        }
-                    }
-                    m_R2C_plans.push_back(R2C_plan);
-                    m_C2R_plans.push_back(C2R_plan);
-                }
-                
-                /* Compute real plans */
-                if(hasRealTransforms) {
-                    ss << "** real transforms detail **" << std::endl;
-                    const int rank = 1;
-                    const int howmany_rank = Dim;
-                    fftw_r2r_kind kind[rank];
-                    fftw_iodim    dims[rank];
-                    fftw_iodim howmany_dims[howmany_rank];
-                    int io_stride = 1;
-                    for(int k=Dim-1; k>=0; k--) {
-                        const fft::Transform& ftr  = forwardTransforms[k];
-                        const fft::Transform& btr  = backwardTransforms[k];
-                        const bool isR2R            = ftr.isR2R() && btr.isR2R();
-                        const bool isAxeTransformed = axeTransformed[k];
-                        //int forwardInputOffset=0, forwardOutputOffset=0, backwardInputOffset=0, backwardOutputOffset=0;
-                        if(isR2R && isAxeTransformed) {
-                            ss << "\tTRANSFORM (" << std::to_string(k) << "):" << std::endl;
-                            fftw_plan forward_plan=NULL, backward_plan=NULL;
-                            int local_io_stride = 1;
-                            for(int d=Dim-1; d>=0; d--) {
-                                //howmany_dims[d] = fftw_iodim{ (d==k ? 1 : forward_transform_size[d]), local_io_stride, local_io_stride };
-                                //forwardInputOffset   += forward_input_offset[d]  *local_io_stride;
-                                //forwardOutputOffset  += forward_output_offset[d] *local_io_stride;
-                                //backwardInputOffset  += backward_input_offset[d] *local_io_stride;
-                                //backwardOutputOffset += backward_output_offset[d]*local_io_stride;
-                                howmany_dims[d] = fftw_iodim{ (d==k ? 1 : int(realBufferShape[d])), local_io_stride, local_io_stride };
-                                local_io_stride *= realBufferShape[d];
-                            }
-                            {
-                                kind[0] = static_cast<fftw_r2r_kind>(ftr.kind);
-                                dims[0] = { fftw_iodim{forward_transform_size[k], io_stride, io_stride} };
-                                forward_plan = this->fftw_plan_guru_r2r(
-                                        rank, dims, 
-                                        howmany_rank, howmany_dims,
-                                        //m_realBuffer.rdata()+forwardInputOffset,
-                                        //m_realBuffer.rdata()+forwardOutputOffset,
-                                        m_realBuffer.rdata()+forward_input_offset[k]*io_stride,
-                                        m_realBuffer.rdata()+forward_output_offset[k]*io_stride,
-                                        kind, fftw_flags);
-                                //this->toStreamTransform(ss, ftr, rank, howmany_rank, dims, howmany_dims, 
-                                        //forwardInputOffset, forwardOutputOffset);
-                                this->toStreamTransform(ss, ftr, rank, howmany_rank, dims, howmany_dims, 
-                                        forward_input_offset[k]*io_stride, forward_output_offset[k]*io_stride);
-                                if(!forward_plan) {
-                                    ss << "=> Forward R2R plan creation FAILED !" << std::endl;
-                                    m_plannedTransformStr = ss.str();
-                                    return false;
-                                }
-                            }
-                            {
-                                kind[0] = static_cast<fftw_r2r_kind>(btr.kind);
-                                dims[0] = { fftw_iodim{backward_transform_size[k], io_stride, io_stride} };
-                                backward_plan = this->fftw_plan_guru_r2r(
-                                        rank, dims, 
-                                        howmany_rank, howmany_dims,
-                                        //m_realBuffer.rdata()+backwardInputOffset,
-                                        //m_realBuffer.rdata()+backwardOutputOffset,
-                                        m_realBuffer.rdata()+backward_input_offset[k]*io_stride,
-                                        m_realBuffer.rdata()+backward_output_offset[k]*io_stride,
-                                        kind, fftw_flags);
-                                //this->toStreamTransform(ss, btr, rank, howmany_rank, dims, howmany_dims, 
-                                        //backwardInputOffset, backwardOutputOffset);
-                                this->toStreamTransform(ss, btr, rank, howmany_rank, dims, howmany_dims, 
-                                        backward_input_offset[k]*io_stride, backward_output_offset[k]*io_stride);
-                                if(!backward_plan) {
-                                    ss << "=> Backward R2R plan creation FAILED !" << std::endl;
-                                    m_plannedTransformStr = ss.str();
-                                    return false;
-                                }
-                            }
-                            m_forward_R2R_plans.push_back(forward_plan);
-                            m_backward_R2R_plans.push_back(backward_plan);
-                        }
-                        io_stride *= realBufferShape[k];
-                    }
-                }
-                   
-                /* Copy input data into the buffer */
-                if(inputFftDomainConfig.includePeriodicBoundaries() ^ includeOutputPeriodicBoundaries) {
-                    NOT_IMPLEMENTED_YET;
-                }
-                else {
-                    if(inplace) 
-                        std::copy(input_rdata_buffer.begin(), input_rdata_buffer.end(), m_realBuffer.begin());
-                    else
-                        std::copy(input_rdata.begin()       , input_rdata.end()       , m_realBuffer.begin());
-                }
-               
-                /* Create real buffer subview */
-                ss << "** real buffer view **" << std::endl;
-                {
-                    ss << "\tview = index_gen";
-                    std::array<boost::multi_array_types::index_range,Dim> ranges;
-                    for (std::size_t d = 0; d < Dim; d++) {
-                        const int offset = forward_output_offset[d];
-                        ranges[d] = boost::multi_array_types::index_range(
-                                offset,
-                                offset+forward_transform_size[d]);
-                                
-                        ss << "[range(" << ranges[d].start() << "," << ranges[d].finish() << ")]";
-                    }
-                    ss << std::endl;
-                    m_transformedRealBufferView = hysop::utils::buildIndices<Dim>(ranges);
-                }
-                
-                ss << "===============" << std::endl;
-                m_plannedTransformStr = ss.str();
-
-                return true;
-            }
-
-        template <typename T, std::size_t Dim>
-            void Planner<T,Dim>::executeForwardTransform() {
-                for(auto& plan : m_forward_R2R_plans) 
-                    this->fftw_execute(plan);
-                for(auto& plan : m_R2C_plans) 
-                    this->fftw_execute(plan);
-            }
-
-        template <typename T, std::size_t Dim>
-            void Planner<T,Dim>::executeBackwardTransform() {
-                for(auto& plan : m_C2R_plans) 
-                    this->fftw_execute(plan);
-                for(auto plan = m_backward_R2R_plans.rbegin(); plan!=m_backward_R2R_plans.rend(); ++plan) 
-                    this->fftw_execute(*plan);
-    
-                if(m_mirrorOutputPeriodicBoundaries) {
-                    const typename Shape<Dim>::type rshape = m_realBuffer.shape();
-                    T *data = m_realBuffer.rdata();
-                    for(const int axe : m_periodicAxes) {
-                        const int N = rshape[axe];
-                        const int num_elem = m_realBuffer.num_elements()/N;
-                        std::array<int,Dim> ids{0};
-                        int id = 0;
-                        int offset=1;
-                        {
-                            for(int d=Dim-1; d>axe; d--) offset*= rshape[d];
-                            offset *= (N-1);
-                        }
-                        for(int i=0; i<num_elem; i++) {
-                            data[id+offset]=data[id];
-                            for (int d=Dim-1; d>=0; d--) {
-                                if(d==axe)
-                                    continue;
-                                ids[d]++;
-                                if(ids[d]==int(rshape[d]))
-                                    ids[d]=0;
-                                else
-                                    break;
-                            }
-                            id = ids[0];
-                            for (std::size_t d=1; d < Dim; d++)
-                                id = id*rshape[d] + ids[d];
-                        }
-                    }
-                }
-            }
-                    
-        template <typename T, std::size_t Dim>
-        FftTransformType Planner<T,Dim>::transformType() const {
-            return m_transformType;
-        }
-
-        template <typename T, std::size_t Dim>
-            T Planner<T,Dim>::normalisationFactor() const {
-                return m_normalisationFactor;
-            }
-
-        template <typename T, std::size_t Dim>
-        const std::array<T,Dim> Planner<T,Dim>::signs() const {
-            return m_signs;
-        }
-
-        template <typename T, std::size_t Dim>
-            const std::array<std::vector<std::complex<T>>,Dim>& Planner<T,Dim>::waveNumbers() const {
-                return m_waveNumbers;
-            }
-                    
-        template <typename T, std::size_t Dim>
-        const std::array<std::vector<std::complex<T>>,Dim>& Planner<T,Dim>::waveNumbersPows() const {
-            return m_waveNumbersPows;
-        }
-
-        template <typename T, std::size_t Dim>
-            hysop::multi_array_view<T,Dim> Planner<T,Dim>::transformedRealData() { 
-                if(m_transformType==FFT_R2C)
-                    throw std::runtime_error("Requesting planner real data but planned transform is real to complex !");
-                else if(m_transformType==FFT_NONE)
-                    throw std::runtime_error("Requesting planner real data but there was no successfull planned transforms !");
-                return m_realBuffer[m_transformedRealBufferView]; 
-            }
-
-        template <typename T, std::size_t Dim>
-            hysop::multi_array_ref<std::complex<T>,Dim> Planner<T,Dim>::transformedComplexData() {
-                if(m_transformType==FFT_R2R)
-                    throw std::runtime_error("Requesting planner complex data but planned transform is real to real !");
-                else if(m_transformType==FFT_NONE)
-                    throw std::runtime_error("Requesting planner real data but there was no successfull planned transforms !");
-                return m_complexBuffer;
-            }
-
-        template <typename T, std::size_t Dim>
-            fft::Transform Planner<T,Dim>::findTransform(const std::pair<Extension,Extension>& ed) const {
-                if(ed.first == Extension::EVEN) {
-                    if(ed.second == Extension::EVEN)
-                        return fft::Transform(FFTW_REDFT00);
-                    else
-                        return fft::Transform(FFTW_REDFT01);
-                }
-                else if(ed.first == Extension::ODD) {
-                    if(ed.second == Extension::EVEN)
-                        return fft::Transform(FFTW_RODFT01);
-                    else
-                        return fft::Transform(FFTW_RODFT00);
-                }
-                else {
-                    return fft::Transform(FFTW_FORWARD);
-                }
-            }
-                    
-        template <typename T, std::size_t Dim>
-        template <typename Transfo>
-        void Planner<T,Dim>::toStreamTransform(std::ostream& os, const Transfo& tr, 
-                int rank, int howmany_rank, 
-                const fftw_iodim* dims, const fftw_iodim* howmany_dims,
-                const int input_data_offset, const int output_data_offset) {
-            os << "\t  --" << tr << "--" << std::endl;
-            os << "\t\tdims[" << rank << "] = {" << std::endl;
-            for (int i = 0; i < rank-1; i++) 
-                os << "\t\t  " << dims[i] << "," << std::endl;
-            os << "\t\t  " << dims[rank-1] << std::endl;
-            os << "\t\t};" << std::endl;
-            os << "\t\thowmany[" << howmany_rank << "] = {" << std::endl;
-            for (int i = 0; i < howmany_rank-1; i++) 
-                os << "\t\t  " << howmany_dims[i] << "," << std::endl;
-            os << "\t\t  " << howmany_dims[howmany_rank-1] << std::endl;
-            os << "\t\t};" << std::endl;
-            os << "\t\tinput  data offset: " << input_data_offset << std::endl;
-            os << "\t\toutput data offset: " << output_data_offset << std::endl;
-        }
-                    
-        /* Get planned transform description */
-        template <typename T, std::size_t Dim>
-            const std::string& Planner<T,Dim>::toString() const {
-                return m_plannedTransformStr;
-        }
-        
-        template <typename T, std::size_t Dim>
-            std::ostream& operator<<(std::ostream& os, const Planner<T,Dim>& planner) {
-                os << planner.toString();
-                return os;
-        }
-                    
-        template <typename T, std::size_t Dim>
-        Planner<T,Dim>& Planner<T,Dim>::setFixedAxeWaveNumbers(std::complex<T> p_fixedAxeWaveNumber) {
-            m_fixedAxeWaveNumbers = p_fixedAxeWaveNumber;
-            return *this;
-        }
-        
-        template <typename T, std::size_t Dim>
-        Planner<T,Dim>& Planner<T,Dim>::setFixedAxeWaveNumberPows(std::complex<T> p_fixedAxeWaveNumberPow) {
-            m_fixedAxeWaveNumberPows = p_fixedAxeWaveNumberPow;
-            return *this;
-        }
-
-    } /* end of namespace fft */
-} /* end of namespace hysop */
-
-
-#endif /* end of include guard: HYSOP_PLANNER_H */
diff --git a/src/hysop++/src/fft/transform.cpp b/src/hysop++/src/fft/transform.cpp
deleted file mode 100644
index 53531952f165ca6ed415d5b6fc32deec18a31930..0000000000000000000000000000000000000000
--- a/src/hysop++/src/fft/transform.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#include "fft/transform.h"
-
-namespace hysop {
-    namespace fft {
-
-    std::ostream& operator<<(std::ostream& os, const Transform &tr) {
-        os << tr.toString();
-        return os;
-    }
-   
-    }
-}
-
diff --git a/src/hysop++/src/fft/transform.h b/src/hysop++/src/fft/transform.h
deleted file mode 100644
index 7a6e36a3b21f7836281adefb03b2b2c3a68dd2c7..0000000000000000000000000000000000000000
--- a/src/hysop++/src/fft/transform.h
+++ /dev/null
@@ -1,188 +0,0 @@
-
-#ifndef FFTTRANSFORM_H
-#define FFTTRANSFORM_H
-
-#include <complex>
-#include <iostream> 
-#include <fftw3.h>
-
-#include "utils/constants.h"
-
-namespace hysop {
-    namespace fft {
-
-        struct Transform;
-        std::ostream& operator<<(std::ostream& os, const Transform &tr);
-
-        enum BaseFunc {
-            CEXP,
-            SIN,
-            COS
-        };
-
-        struct Transform {
-            public:
-                int kind;
-
-                Transform(int p_kind = 0):
-                    kind(p_kind) {}
-    
-                bool isR2C() const {
-                    return kind == FFTW_FORWARD;
-                }
-                bool isR2R() const {
-                    return !this->isR2C();
-                }
-
-                BaseFunc basefunc() const {
-                    switch(kind) {
-                        case(FFTW_REDFT00):
-                        case(FFTW_REDFT01):
-                        case(FFTW_REDFT10):
-                        case(FFTW_REDFT11):
-                            return COS;
-                        case(FFTW_RODFT00):
-                        case(FFTW_RODFT01):
-                        case(FFTW_RODFT10):
-                        case(FFTW_RODFT11):
-                            return SIN;
-                        case(FFTW_R2HC):
-                        case(FFTW_HC2R):
-                        case(FFTW_FORWARD):
-                            return CEXP;
-                        default:
-                            throw std::runtime_error("Unknown transform !");
-                    }
-                }
-
-                template <typename T>
-                std::complex<T> omega(std::size_t k, std::size_t N, T L = T(1), bool lastDim=false) const {
-                    using namespace hysop::constants;
-                    switch(kind) {
-                        case(FFTW_FORWARD):
-                            if(lastDim) {
-                                return std::complex<T>(T(0),T(2)*pi*T(k)/L);
-                            }
-                            else {
-                                T kk;
-                                if(k <= N/2 -1)
-                                    kk = T(k);
-                                else if(k==N/2)
-                                    kk = T(0);
-                                else 
-                                    kk = T(k)-T(N);
-                                return std::complex<T>(T(0),T(2)*pi*kk/L);
-                            }
-                        case(FFTW_REDFT00):
-                            return std::complex<T>(pi*T(k)/L, T(0));
-                        case(FFTW_RODFT00): /* offset +1 */
-                            return std::complex<T>(pi*T(k+1)/L, T(0));
-                        case(FFTW_REDFT01):
-                            return std::complex<T>(pi*(T(k)+T(0.5))/L, T(0));
-                        case(FFTW_RODFT01): /* -0.5 + 1 offset = +0.5 */
-                            return std::complex<T>(pi*(T(k)+T(0.5))/L, T(0));
-                        default:
-                            throw std::runtime_error("Not implemented yet !");
-                    }
-                }
-                
-                template <typename T>
-                    T normalisation(std::size_t n) const {
-                    switch(kind) {
-                        case(FFTW_FORWARD):
-                            return T(n);
-                        case(FFTW_RODFT00):
-                            return T(2*(n+1));
-                        case(FFTW_REDFT00):
-                            return T(2*(n-1));
-                        case(FFTW_REDFT01):
-                        case(FFTW_REDFT10):
-                        case(FFTW_REDFT11):
-                        case(FFTW_RODFT01):
-                        case(FFTW_RODFT10):
-                        case(FFTW_RODFT11):
-                            return T(2*n);
-                        default:
-                            return T(n);
-                    }
-                }
-
-                int inverseTransform() const {
-                    switch(kind) {
-                        case(FFTW_REDFT00):
-                        case(FFTW_RODFT00):
-                            return kind;
-                        case(FFTW_REDFT01):
-                            return FFTW_REDFT10;
-                        case(FFTW_REDFT10):
-                            return FFTW_REDFT01;
-                        case(FFTW_RODFT01):
-                            return FFTW_RODFT10;
-                        case(FFTW_RODFT10):
-                            return FFTW_RODFT01;
-                        case(FFTW_R2HC):
-                            return FFTW_HC2R;
-                        case(FFTW_HC2R):
-                            return FFTW_R2HC;
-                        case(FFTW_FORWARD):
-                            return FFTW_BACKWARD;
-                        default:
-                            throw std::runtime_error("Unknown transform !");
-                    }
-                }
-
-                int conjugateInverseTransform() const {
-                    switch(kind) {
-                        case(FFTW_REDFT00):
-                            return FFTW_RODFT00;
-                        case(FFTW_REDFT01):
-                            return FFTW_RODFT10;
-                        case(FFTW_REDFT10):
-                            return FFTW_RODFT01;
-                        case(FFTW_RODFT00):
-                            return FFTW_REDFT00;
-                        case(FFTW_RODFT01):
-                            return FFTW_REDFT10;
-                        case(FFTW_RODFT10):
-                            return FFTW_REDFT01;
-                        default:
-                            return this->inverseTransform();
-                    }
-                }
-
-                std::string toString() const {
-                    switch(kind) {
-                        case(FFTW_REDFT00):
-                            return "FFTW_REDFT00";
-                        case(FFTW_RODFT00):
-                            return "FFTW_RODFT00";
-                        case(FFTW_REDFT01):
-                            return "FFTW_REDFT01";
-                        case(FFTW_REDFT11):
-                            return "FFTW_REDFT11";
-                        case(FFTW_REDFT10):
-                            return "FFTW_REDFT10";
-                        case(FFTW_RODFT01):
-                            return "FFTW_RODFT01";
-                        case(FFTW_RODFT10):
-                            return "FFTW_RODFT10";
-                        case(FFTW_RODFT11):
-                            return "FFTW_RODFT11";
-                        case(FFTW_R2HC):
-                            return "FFTW_R2HC";
-                        //case(FFTW_HC2R):
-                            //return "FFTW_HC2R";
-                        case(FFTW_BACKWARD):
-                            return "FFTW_BACKWARD";
-                        case(FFTW_FORWARD):
-                            return "FFTW_FORWARD";
-                        default:
-                            return "FFTW_TRANSFORM_KIND_STRING_NOT_FOUND";
-                    }
-                }
-        };
-
-    }
-}
-
-#endif /* end of include guard: FFTTRANSFORM_H */
diff --git a/src/hysop++/src/maths/polynomial.h b/src/hysop++/src/maths/polynomial.h
deleted file mode 100644
index 5373e9eced5c07b8bb3c152cc9fcd66daae301e0..0000000000000000000000000000000000000000
--- a/src/hysop++/src/maths/polynomial.h
+++ /dev/null
@@ -1,428 +0,0 @@
-
-#ifndef HYSOP_POLYNOMIAL_H
-#define HYSOP_POLYNOMIAL_H
-
-#include <array>
-#include "data/multi_array/multi_array.h"
-
-namespace hysop {
-    namespace maths {
-
-        /* Polynomials in dimension Dim with coefficients of type T */
-        /* Basic polynomial operations are provided                 */
-        /* TODO: Implement fast Nlog(N) multiplication by FFT       */
-        /* TODO: Implement polynomial division                      */
-        template <typename T, std::size_t Dim>
-            class Polynomial;
-
-        template <typename T, std::size_t Dim>
-            std::ostream& operator<<(std::ostream& os, const Polynomial<T,Dim>& poly);
-
-        template <typename T, std::size_t Dim>
-            class Polynomial {
-                public:
-                    /* constructors, destructors & operator= */
-                    Polynomial()                                      = default;
-                    Polynomial(const Polynomial&  p_other)            = default;
-                    Polynomial(      Polynomial&& p_other)            = default;
-                    Polynomial& operator=(const Polynomial& p_other)  = default;
-                    Polynomial& operator=(      Polynomial&& p_other) = default;
-                    ~Polynomial()                                     = default;
-
-                    Polynomial(const std::array<std::size_t,Dim>& p_order);
-                    explicit Polynomial(const hysop::multi_array<T,Dim>&  p_coeffs);
-                    explicit Polynomial(      hysop::multi_array<T,Dim>&& p_coeffs);
-
-                    template <typename U>
-                    explicit Polynomial(const Polynomial<U,Dim>& p_other);
-                    
-                    /* accessors */
-                    const hysop::multi_array<T,Dim>&    coefficients() const;
-                          hysop::multi_array<T,Dim>&    coefficients();
-                    const std::array<std::size_t,Dim>&  order()        const;
-                          std::array<std::size_t,Dim>   shape()        const; /* == order + 1 */
-
-                    /* mutators */
-                    Polynomial& reshape(const std::array<std::size_t,Dim>& p_shape);
-                    Polynomial& setOrder(const std::array<std::size_t,Dim>& p_order);
-            
-                    Polynomial& applyToCoefficients(const std::function<void(T&)>& func);
-                    Polynomial& applyToCoefficients(const std::function<void(T&, const Index<Dim>&)>& func);
-                    
-                    /* apply func(T&, const Index<Dim>&, farg0, fargs...) to all coefficients */
-                    template <typename Functor, typename Arg0, typename... Args> 
-                        Polynomial& applyToCoefficients(const Functor& func, Arg0&& farg0, Args&&... fargs);
-                    
-                    /* elementwise access to coefficients */
-                    const T& operator[](const Index<Dim> &p_id) const;
-                          T& operator[](const Index<Dim> &p_id);
-                    const T& operator[](std::size_t k) const;
-                          T& operator[](std::size_t k);
-
-                    /* polynomial function evaluation with arbitrary type */
-                    template <typename U1, typename... U, typename R = typename std::common_type<T,U1,U...>::type> 
-                        R operator()(const U1& x1, const U&... xs) const;
-                    template <typename U, typename R=typename std::common_type<T,U>::type> 
-                        R operator()(const std::array<U,Dim> &x) const;
-                    template <typename U, typename R=typename std::common_type<T,U>::type> 
-                        R operator()(const U* x) const;
-
-                    /* basic elementwise operations */
-                    Polynomial& operator+=(const Polynomial& p_other);
-                    Polynomial& operator-=(const Polynomial& p_other);
-
-                    Polynomial& operator*=(const T& p_val);
-                    Polynomial& operator/=(const T& p_val);
-                    Polynomial& operator%=(const T& p_val);
-                    
-                    /* polynomial multiplication and division */
-                    Polynomial& operator*=(const Polynomial& p_other);
-                    Polynomial& operator/=(const Polynomial& p_other);
-                
-                    /* integral and derivatives */
-                    Polynomial& integrate    (std::size_t dim, int order);
-                    Polynomial& differentiate(std::size_t dim, int order);
-
-                    template <typename I> typename std::enable_if<std::is_integral<I>::value, Polynomial&>::type                        
-                        differentiate(const std::array<I,Dim>& order);
-                    template <typename I> typename std::enable_if<std::is_integral<I>::value, Polynomial&>::type                        
-                        integrate(const std::array<I,Dim>& order);
-
-                    template <typename I> typename std::enable_if<std::is_integral<I>::value, Polynomial&>::type
-                    operator >>=(const std::array<I,Dim>& order);
-                    template <typename I> typename std::enable_if<std::is_integral<I>::value, Polynomial&>::type
-                    operator <<=(const std::array<I,Dim>& order);
-
-                    /* comparisson operators - uses near equality if T is a floating point type */
-                    bool operator==(const Polynomial& other);
-                    bool operator!=(const Polynomial& other);
-
-                    /* misc */
-                    std::string toString(unsigned int p_precision=2, unsigned int p_width=6) const;
-
-                protected:
-                    /* misc */
-                    template <std::size_t D>
-                    std::string toStringImpl(const T& p_coeff, unsigned int p_precision, unsigned int p_width, bool p_begin, bool p_end) const {
-                        std::stringstream ss;
-                        ss << (p_begin ? "" : " ") << std::fixed << std::showpos << std::setprecision(p_precision) << p_coeff;
-                        return ss.str();
-                    }
-                    template <std::size_t D, typename ArrayView, std::size_t K=Dim-D>
-                        std::string toStringImpl(const ArrayView& p_view, unsigned int p_precision, unsigned int p_width, 
-                                bool=false, bool=false) const {
-                            static const char varNames[3] = { 'z','y','x' };
-                            static const int offset = (Dim==1 ? 2 : (Dim==2 ? 1 : (Dim==3 ? 0 : -1)));
-                            static const char delimiters[3][2] = { {'[',']'},
-                                                                   {'{','}'},
-                                                                   {'(',')'} };
-
-
-                            std::string str;
-                            for (std::ptrdiff_t k=m_coeffs.shape()[K]-1; k>=0; k--) {
-                                std::string localStr = toStringImpl<D-1>(
-                                        p_view[k], p_precision, p_width, k==std::ptrdiff_t(m_coeffs.shape()[K]-1), k==0);
-                                if(localStr!="") {
-                                    if(D>1)
-                                        str += delimiters[D%3][0];
-                                    str += localStr;
-                                    if(D>1)
-                                        str += delimiters[D%3][1];
-                                    
-                                    std::string varName;
-                                    if(Dim<=3)
-                                        varName = varNames[K+offset];
-                                    else 
-                                        varName = "x_"+std::to_string(D);
-                                    if(k==0)
-                                        ;
-                                    else if(k==1)
-                                        str += varName;
-                                    else
-                                        str += varName + "^" + std::to_string(k);
-                                    if(k>0 && D>1)
-                                        str += " + ";
-                                }
-                            }
-                            return str;
-                    }
-
-                    /* static members */
-                    static std::array<std::size_t,Dim> orderFromShape(const std::array<std::size_t,Dim>& p_shape);
-                    static std::array<std::size_t,Dim> shapeFromOrder(const std::array<std::size_t,Dim>& p_order);
-               
-                public:
-                    template <typename X>
-                        struct PolynomIndex : public Index<Dim> {
-
-                            public:
-                                using typename Index<Dim>::Dimension;
-                                using typename Index<Dim>::Indices;
-                            public:
-                                template <typename DimArray=typename Index<Dim>::Dimension, typename IndexArray=typename Index<Dim>::Indices>
-                                PolynomIndex(std::array<X,Dim> p_spaceVar,
-                                        const DimArray&   p_dim = Dimension{0}, 
-                                        const IndexArray& p_ids = Indices{0});
-                                const std::array<X,Dim>& spaceVariable() const; /* returns {X[0],...,X[Dim-1]} */
-                                                               X value() const; /* returns X[0]^id[0] * X1^id[1] * ... * X[Dim-1]^id[Dim-1]*/
-                            protected:
-                                void initialize();
-                                virtual void onIndexChange   (std::size_t p_pos, std::ptrdiff_t p_offset) final override;
-                                virtual void onIndexOverflow (std::size_t p_pos) final override;
-
-                            protected:
-                                const std::array<X,Dim> m_spaceVar;
-                                std::array<X,Dim> m_powers; 
-                                X m_value;
-                        };
-
-                protected:
-                    hysop::multi_array<T,Dim>   m_coeffs;
-                    std::array<std::size_t,Dim> m_order;
-            };
-
-        /* unary operations */
-        template <typename T, std::size_t Dim>
-            Polynomial<T,Dim> operator+(const Polynomial<T,Dim>& poly);
-        template <typename T, std::size_t Dim>
-            Polynomial<T,Dim> operator-(const Polynomial<T,Dim>& poly);
-
-        /* basic operations */
-        template <typename T1, typename T2, std::size_t Dim, typename T = typename std::common_type<T1,T2>::type>
-            Polynomial<T,Dim> operator+(const Polynomial<T1,Dim>& lhs, const Polynomial<T2,Dim>& rhs);
-        template <typename T1, typename T2, std::size_t Dim, typename T = typename std::common_type<T1,T2>::type>
-            Polynomial<T,Dim> operator-(const Polynomial<T1,Dim>& lhs, const Polynomial<T2,Dim>& rhs);
-        template <typename T1, typename T2, std::size_t Dim, typename T = typename std::common_type<T1,T2>::type>
-            Polynomial<T,Dim> operator*(const Polynomial<T1,Dim>& lhs, const Polynomial<T2,Dim>& rhs);
-        template <typename T1, typename T2, std::size_t Dim, typename T = typename std::common_type<T1,T2>::type>
-            Polynomial<T,Dim> operator/(const Polynomial<T1,Dim>& lhs, const Polynomial<T2,Dim>& rhs);
-
-
-        /* tensor product of polynomials */
-        template <typename T1, typename T2, std::size_t Dim1, std::size_t Dim2, 
-                  typename T=typename std::common_type<T1,T2>::type, std::size_t Dim=Dim1+Dim2>
-            Polynomial<T,Dim> operator|(const Polynomial<T1,Dim1>& lhs, const Polynomial<T2,Dim2>& rhs);
-        
-
-        /* integral and derivatives */
-        template <typename T, std::size_t Dim, typename I>
-            typename std::enable_if<std::is_integral<I>::value, Polynomial<T,Dim>>::type
-            operator<<(const Polynomial<T,Dim>& lhs, const std::array<I,Dim>& k);
-        template <typename T, std::size_t Dim, typename I>
-            typename std::enable_if<std::is_integral<I>::value, Polynomial<T,Dim>>::type
-            operator>>(const Polynomial<T,Dim>& lhs, const std::array<I,Dim>& k);
-
-        template <typename T, std::size_t Dim, typename I>
-            typename std::enable_if<std::is_integral<I>::value, Polynomial<T,Dim>>::type
-            operator<<(Polynomial<T,Dim>&& lhs, const std::array<I,Dim>& k);
-        template <typename T, std::size_t Dim, typename I>
-            typename std::enable_if<std::is_integral<I>::value, Polynomial<T,Dim>>::type
-            operator>>(Polynomial<T,Dim>&& lhs, const std::array<I,Dim>& k);
-        
-
-
-          /********************/
-         /** IMPLEMENTATION **/
-        /********************/
-        
-        /* static members */
-        template <typename T, std::size_t Dim>
-            std::array<std::size_t,Dim> Polynomial<T,Dim>::orderFromShape(const std::array<std::size_t,Dim>& p_shape) {
-                std::array<std::size_t,Dim> order;
-                for (std::size_t d = 0; d < Dim; d++)
-                    order[d] = p_shape[d]-1;
-                return order;
-            }
-
-        template <typename T, std::size_t Dim>
-            std::array<std::size_t,Dim> Polynomial<T,Dim>::shapeFromOrder(const std::array<std::size_t,Dim>& p_order) {
-                std::array<std::size_t,Dim> shape;
-                for (std::size_t d = 0; d < Dim; d++)
-                    shape[d] = p_order[d]+1;
-                return shape;
-            }
-
-        /* constructors, destructors & operator= */
-        template <typename T, std::size_t Dim>
-        Polynomial<T,Dim>::Polynomial(const std::array<std::size_t,Dim>& p_shape) :
-            m_coeffs(), m_order() {
-                this->reshape(p_shape);
-        }
-        
-        template <typename T, std::size_t Dim>
-            Polynomial<T,Dim>::Polynomial(const hysop::multi_array<T,Dim>&  p_coeffs) :
-                m_coeffs(p_coeffs), m_order(orderFromShape(m_coeffs.shape())) {
-        }
-        
-        template <typename T, std::size_t Dim>
-            Polynomial<T,Dim>::Polynomial(hysop::multi_array<T,Dim>&& p_coeffs) :
-                m_coeffs(std::move(p_coeffs)), m_order(orderFromShape(m_coeffs.shape())) {
-        }
-                    
-        template <typename T, std::size_t Dim>
-        template <typename U>
-            Polynomial<T,Dim>::Polynomial(const Polynomial<U,Dim>& p_other) {
-                this->reshape(p_other.shape());
-                for (std::size_t k=0; k < m_coeffs.num_elements(); k++)
-                    m_coeffs.data()[k] = static_cast<T>(p_other.data()[k]);
-        }
-                    
-        /* accessors */
-        template <typename T, std::size_t Dim>
-        const hysop::multi_array<T,Dim>& Polynomial<T,Dim>::coefficients() const {
-            return m_coeffs;
-        }
-        template <typename T, std::size_t Dim>
-        hysop::multi_array<T,Dim>& Polynomial<T,Dim>::coefficients() {
-            return m_coeffs;
-        }
-        template <typename T, std::size_t Dim>
-        const std::array<std::size_t,Dim>&  Polynomial<T,Dim>::order() const {
-            return m_order;
-        }
-        template <typename T, std::size_t Dim>
-        std::array<std::size_t,Dim> Polynomial<T,Dim>::shape() const {
-            return m_coeffs.shape();
-        }
-                    
-        /* mutators */
-        template <typename T, std::size_t Dim>
-        Polynomial<T,Dim>& Polynomial<T,Dim>::reshape(const std::array<std::size_t,Dim>& p_shape) {
-            m_order = orderFromShape(p_shape);
-            m_coeffs.reshape(p_shape);
-            return *this;
-        }
-        
-        template <typename T, std::size_t Dim>
-        Polynomial<T,Dim>& Polynomial<T,Dim>::setOrder(const std::array<std::size_t,Dim>& p_order) {
-            m_order = p_order;
-            m_coeffs.reshape(shapeFromOrder(p_order));
-            return *this;
-        }
-            
-
-        template <typename T, std::size_t Dim>
-        Polynomial<T,Dim>& Polynomial<T,Dim>::applyToCoefficients(const std::function<void(T&)>& func) {
-            m_coeffs.apply(func);
-            return *this;
-        }
-        template <typename T, std::size_t Dim>
-        Polynomial<T,Dim>& Polynomial<T,Dim>::applyToCoefficients(const std::function<void(T&, const Index<Dim>&)>& func) {
-            m_coeffs.apply(func);
-            return *this;
-        }
-
-        /* apply func(T&, const Index<Dim>&, farg0, fargs...) on all coefficients */
-        template <typename T, std::size_t Dim>
-        template <typename Functor, typename Arg0, typename... Args> 
-        Polynomial<T,Dim>& Polynomial<T,Dim>::applyToCoefficients(const Functor& func, Arg0&& farg0, Args&&... fargs) {
-            m_coeffs.apply(func, farg0, fargs...);
-            return *this;
-        }
-
-        /* access to coefficients */
-        template <typename T, std::size_t Dim>
-        const T& Polynomial<T,Dim>::operator[](std::size_t k) const {
-            return m_coeffs.data()[k];
-        }
-        template <typename T, std::size_t Dim>
-        T& Polynomial<T,Dim>::operator[](std::size_t k) {
-            return m_coeffs.data()[k];
-        }
-        template <typename T, std::size_t Dim>
-        const T& Polynomial<T,Dim>::operator[](const Index<Dim> &p_id) const {
-            return m_coeffs.data()[p_id.id()];
-        }
-
-        template <typename T, std::size_t Dim>
-        T& Polynomial<T,Dim>::operator[](const Index<Dim> &p_id) {
-            return m_coeffs.data()[p_id.id()];
-        }
-
-        /* polynomial evaluation */
-        template <typename T, std::size_t Dim>
-        template <typename U1, typename... U, typename R> 
-            R Polynomial<T,Dim>::operator()(const U1& x1, const U&... xs) const {
-                return this->operator()(std::array<R,Dim>{x1,xs...});
-        }
-        template <typename T, std::size_t Dim>
-        template <typename U, typename R>
-            R Polynomial<T,Dim>::operator()(const U* p_x) const {
-                return this->operator()(std::array<U,Dim>(p_x));
-        }
-        template <typename T, std::size_t Dim>
-        template <typename U, typename R>
-            R Polynomial<T,Dim>::operator()(const std::array<U,Dim> &p_x) const {
-                /* compute result */
-                R res = R(0);
-                const T* coeffs = m_coeffs.data();
-                PolynomIndex<U> idx(p_x, this->shape());
-                while(!idx.atMaxId()) {
-                    res += coeffs[idx()]*idx.value();
-                    ++idx;
-                }
-                return res;
-        }
-                    
-        template <typename T, std::size_t Dim> 
-        std::string Polynomial<T,Dim>::toString(unsigned int p_precision, unsigned int p_width) const {
-            return toStringImpl<Dim>(m_coeffs, p_precision, p_width); 
-        }
-        
-        /* struct PolynomIndex */
-        template <typename T, std::size_t Dim> 
-            template <typename X> 
-                template <typename DimArray, typename IndexArray>
-            Polynomial<T,Dim>::PolynomIndex<X>::
-            PolynomIndex(std::array<X,Dim> p_spaceVar, const DimArray& p_dim, const IndexArray& p_ids):
-                Index<Dim>(p_dim, p_ids), m_spaceVar(p_spaceVar), m_powers{0}, m_value(0) {
-                    this->initialize();
-            }
-        template <typename T, std::size_t Dim> 
-            template <typename X> 
-            void Polynomial<T,Dim>::PolynomIndex<X>::initialize() {
-                m_value = X(1);
-                for (std::size_t d=0; d<Dim; d++) {
-                    X power = std::pow(m_spaceVar[d],this->operator[](d));
-                    m_powers[d] = power;
-                    m_value *= power; 
-                }
-            }
-        template <typename T, std::size_t Dim> 
-            template <typename X> 
-            const std::array<X,Dim>& Polynomial<T,Dim>::PolynomIndex<X>::spaceVariable() const {
-                return m_spaceVar;
-            }
-        template <typename T, std::size_t Dim> 
-            template <typename X> 
-            X Polynomial<T,Dim>::PolynomIndex<X>::value() const {
-                return m_value;
-            }
-        template <typename T, std::size_t Dim> 
-            template <typename X> 
-            void Polynomial<T,Dim>::PolynomIndex<X>::onIndexChange(std::size_t p_pos, std::ptrdiff_t p_offset) {
-                assert(p_offset == 1);
-                m_powers[p_pos] = m_powers[p_pos]*m_spaceVar[p_pos];
-                m_value *= m_spaceVar[p_pos];
-            }
-        template <typename T, std::size_t Dim> 
-            template <typename X> 
-            void Polynomial<T,Dim>::PolynomIndex<X>::onIndexOverflow (std::size_t p_pos) {
-                m_powers[p_pos] = X(1);
-                m_value = X(1);
-                for (std::size_t d=0; d < Dim; d++) 
-                    m_value *= m_powers[d];
-            }
-        
-        template <typename T, std::size_t Dim>
-            std::ostream& operator<<(std::ostream& os, const Polynomial<T,Dim>& poly) {
-                os << poly.toString();
-                return os;
-            }
-
-    } /* end of namespace maths */
-} /* end of namesapce hysop */
-
-
-#endif /* end of include guard: HYSOP_POLYNOMIAL_H */
-
diff --git a/src/hysop++/src/maths/quad_maths.h b/src/hysop++/src/maths/quad_maths.h
deleted file mode 100644
index 97ae1fe0bac81780893db112e28c1741bf141f39..0000000000000000000000000000000000000000
--- a/src/hysop++/src/maths/quad_maths.h
+++ /dev/null
@@ -1,299 +0,0 @@
-
-#ifdef HAS_QUADMATHS
-
-#ifndef HYSOP_QUAD_MATHS_H
-#define HYSOP_QUAD_MATHS_H
-
-#include <cfloat>
-#include <cmath>
-#include <limits>
-#include <quadmath.h>
-
-#include <iostream>
-#include <iomanip>
-
-/* missing gcc defines */
-#define FLT128_RADIX FLT_RADIX
-#define FLT128_HAS_DENORM      true
-#define FLT128_HAS_INFINITY    true
-#define FLT128_HAS_QUIET_NAN   true
-
-namespace std {
-
-    template<> struct numeric_limits<__float128> {
-        static constexpr bool is_specialized = true;
-
-        static constexpr __float128 min() { return FLT128_MIN; }
-        static constexpr __float128 max() { return FLT128_MAX; }
-        static constexpr __float128 lowest() noexcept { return -FLT128_MAX; }
-
-        static constexpr int digits = FLT128_MANT_DIG;
-        static constexpr int digits10 = FLT128_DIG;
-        static constexpr int max_digits10 = (2 + (FLT128_MANT_DIG) * 643L / 2136);
-
-        static constexpr bool is_signed = true;
-        static constexpr bool is_integer = false;
-        static constexpr bool is_exact = false;
-        static constexpr int radix = FLT128_RADIX;
-
-        static constexpr __float128 epsilon()      { return FLT128_EPSILON; }
-        static constexpr __float128 round_error()  { return 0.5; }
-
-        static constexpr int min_exponent = FLT128_MIN_EXP;
-        static constexpr int min_exponent10 = FLT128_MIN_10_EXP;
-        static constexpr int max_exponent = FLT128_MAX_EXP;
-        static constexpr int max_exponent10 = FLT128_MAX_10_EXP;
-
-        static constexpr bool has_infinity = FLT128_HAS_INFINITY;
-        static constexpr bool has_quiet_NaN = FLT128_HAS_QUIET_NAN;
-        static constexpr bool has_signaling_NaN = has_quiet_NaN;
-        static constexpr float_denorm_style has_denorm = bool(FLT128_HAS_DENORM) ? denorm_present : denorm_absent;
-        static constexpr bool has_denorm_loss = std::numeric_limits<float>::has_denorm_loss;
-
-        static constexpr __float128 infinity()      { return std::numeric_limits<float>::infinity(); }
-        static constexpr __float128 quiet_NaN()     { return std::numeric_limits<float>::quiet_NaN(); }
-        static constexpr __float128 signaling_NaN() { return std::numeric_limits<float>::signaling_NaN(); }
-        static constexpr __float128 denorm_min()    { return FLT128_DENORM_MIN; }
-
-        static constexpr bool is_iec559 = has_infinity && has_quiet_NaN && has_denorm == denorm_present;
-        static constexpr bool is_bounded = true;
-        static constexpr bool is_modulo = false;
-
-        static constexpr bool traps = false;
-        static constexpr bool tinyness_before = false;
-        static constexpr float_round_style round_style = round_to_nearest;
-    };
-
-    inline int fpclassify(__float128 arg) { return std::fpclassify(static_cast<long double>(arg)); }
-
-    inline __float128 abs(__float128 x) { return cabsq(__complex128{x,0.0Q}); }
-    inline __float128 acos(__float128 x) { return acosq(x); }
-    inline __float128 acosh(__float128 x) { return acoshq(x); }
-    inline __float128 asin(__float128 x) { return asinq(x); }
-    inline __float128 asinh(__float128 x) { return asinhq(x); }
-    inline __float128 atan(__float128 x) { return atanq(x); }
-    inline __float128 atanh(__float128 x) { return atanhq(x); }
-    inline __float128 cbrt(__float128 x) { return cbrtq(x); }
-    inline __float128 ceil(__float128 x) { return ceilq(x); }
-    inline __float128 cosh(__float128 x) { return coshq(x); }
-    inline __float128 cos(__float128 x) { return cosq(x); }
-    inline __float128 erf(__float128 x) { return erfq(x); }
-    inline __float128 erfc(__float128 x) { return erfcq(x); }
-    inline __float128 exp(__float128 x) { return expq(x); }
-    inline __float128 expm1(__float128 x) { return expm1q(x); }
-    inline __float128 fabs(__float128 x) { return fabsq(x); }
-    inline int        finite(__float128 x) { return finiteq(x); }
-    inline __float128 floor(__float128 x) { return floorq(x); }
-    inline __float128 frexp(__float128 x, int* p) { return frexpq(x,p); }
-    inline int        isinf(__float128 x) { return isinfq(x); }
-    inline int        ilogb(__float128 x) { return ilogbq(x); }
-    inline int        isnan(__float128 x) { return isnanq(x); }
-    inline __float128 j0(__float128 x) { return j0q(x); }
-    inline __float128 j1(__float128 x) { return j1q(x); }
-    inline __float128 jn(int i, __float128 x) { return jnq(i,x); }
-    inline __float128 ldexp(__float128 x, int i) { return ldexpq(x,i); }
-    inline __float128 lgamma(__float128 x) { return lgammaq(x); }
-    inline long long int llrint(__float128 x) { return llrintq(x); }
-    inline long long int llround(__float128 x) { return llroundq(x); }
-    inline __float128 log(__float128 x) { return logq(x); }
-    inline __float128 log10(__float128 x) { return log10q(x); }
-    inline __float128 log2(__float128 x) { return log2q(x); }
-    inline __float128 log1p(__float128 x) { return log1pq(x); }
-    inline long int   lrint(__float128 x) { return lrintq(x); }
-    inline long int   lround(__float128 x) { return lroundq(x); }
-    inline __float128 nearbyint(__float128 x) { return nearbyintq(x); }
-    inline __float128 pow(__float128 x, __float128 y) { return powq(x,y); }
-    inline __float128 rint(__float128 x) { return rintq(x); }
-    inline __float128 round(__float128 x) { return roundq(x); }
-    inline __float128 scalbln(__float128 x, long int li) { return scalblnq(x,li); }
-    inline __float128 scalbn(__float128 x, int i) { return scalbnq(x,i); }
-    inline int        signbit(__float128 x) { return signbitq(x); }
-    inline __float128 sinh(__float128 x) { return sinhq(x); }
-    inline __float128 sin(__float128 x) { return sinq(x); }
-    inline __float128 sqrt(__float128 x) { return sqrtq(x); }
-    inline __float128 tan(__float128 x) { return tanq(x); }
-    inline __float128 tanh(__float128 x) { return tanhq(x); }
-    inline __float128 tgamma(__float128 x) { return tgammaq(x); }
-    inline __float128 trunc(__float128 x) { return truncq(x); }
-    inline __float128 y0(__float128 x) { return y0q(x); }
-    inline __float128 y1(__float128 x) { return y1q(x); }
-    inline __float128 yn(int i, __float128 x) { return ynq(i,x); }
-
-
-    /* Prototypes for complex functions */
-    inline __float128 abs(__complex128 x) { return cabsq(x); }
-    inline __float128 arg(__complex128 x) { return cargq(x); }
-    inline __float128 imag(__complex128 x) { return cimagq(x); }
-    inline __float128 real(__complex128 x) { return crealq(x); }
-    inline __complex128 acos(__complex128 x) { return cacosq(x); }
-    inline __complex128 acosh(__complex128 x) { return cacoshq(x); }
-    inline __complex128 asin(__complex128 x) { return casinq(x); }
-    inline __complex128 asinh(__complex128 x) { return casinhq(x); }
-    inline __complex128 atan(__complex128 x) { return catanq(x); }
-    inline __complex128 atanh(__complex128 x) { return catanhq(x); }
-    inline __complex128 cos(__complex128 x) { return ccosq(x); }
-    inline __complex128 cosh(__complex128 x) { return ccoshq(x); }
-    inline __complex128 exp(__complex128 x) { return cexpq(x); }
-    inline __complex128 expi(__float128 x) { return cexpiq(x); }
-    inline __complex128 log10(__complex128 x) { return clog10q(x); }
-    inline __complex128 conj(__complex128 x) { return conjq(x); }
-    inline __complex128 pow(__complex128 x, __complex128 y) { return cpowq(x,y); }
-    inline __complex128 proj(__complex128 x) { return cprojq(x); }
-    inline __complex128 sin(__complex128 x) { return csinq(x); }
-    inline __complex128 sinh(__complex128 x) { return csinhq(x); }
-    inline __complex128 sqrt(__complex128 x) { return csqrtq(x); }
-    inline __complex128 tan(__complex128 x) { return ctanq(x); }
-    inline __complex128 tanh(__complex128 x) { return ctanhq(x); }
-
-    inline std::ostream& operator<<(std::ostream& os, __float128 x) {
-        const int prec = os.precision();
-
-        if(prec==0) {
-            os << static_cast<long long int>(x);
-        }
-        else { 
-            char buf[128];
-            const std::string format = "%+-#"+std::to_string(prec)+".*Qe";
-            const int n = quadmath_snprintf(buf,128,format.c_str(),prec,x);
-            if(n>127) {
-                char *str = new char[n+1];
-                if (str)
-                    quadmath_snprintf (str,n+1,format.c_str(),prec,x);
-                os << str;
-                delete(str);
-            }
-            else {
-                os << buf;
-            }
-        }
-
-        return os;
-    }
-}
-
-#include <complex>
-
-namespace std {
-
-    inline __float128 abs(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        return cabsq(X);
-    }
-    inline __float128 arg(std::complex<__float128> x) {
-        __complex128 X{x.real(),x.imag()};
-        return cargq(X);
-    }
-    inline __float128 imag(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        return cimagq(X);
-    }
-    inline __float128 real(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        return crealq(X);
-    }
-    inline std::complex<__float128> acos(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = cacosq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> acosh(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = cacoshq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> asin(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = casinq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> asinh(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = casinhq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> atan(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = catanq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> atanh(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = catanhq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> cos(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = ccosq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> cosh(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = ccoshq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> exp(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = cexpq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    //inline std::complex<__float128> expi(__float128 x) { 
-        //__complex128 X = cexpiq(x);
-        //return std::complex<__float128>(__real__ X, __imag__ X);
-    //}
-    inline std::complex<__float128> log10(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = clog10q(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> conj(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = conjq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> pow(std::complex<__float128> x, std::complex<__float128> y) {
-        __complex128 X{x.real(),x.imag()};
-        __complex128 Y{y.real(),y.imag()};
-        X = cpowq(X,Y);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> proj(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = cprojq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> sin(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = csinq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> sinh(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = csinhq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> sqrt(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = csqrtq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> tan(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = ctanq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-    inline std::complex<__float128> tanh(std::complex<__float128> x) { 
-        __complex128 X{x.real(),x.imag()};
-        X = ctanhq(X);
-        return std::complex<__float128>(__real__ X, __imag__ X);
-    }
-
-    inline std::complex< __float128 > pow(std::complex< __float128> x , __float128 y) {
-        __float128 R = powq(std::abs(x), y);
-        __float128 phi = atanq(x.imag()/x.real());
-        return std::complex<__float128 >(R*cosq(y*phi), R*sinq(y*phi));
-    }
-}
-
-#endif /* end of include guard: HYSOP_QUAD_MATHS_H */
-
-#endif
diff --git a/src/hysop++/src/solver/diffSolver.h b/src/hysop++/src/solver/diffSolver.h
deleted file mode 100644
index 7b6908803e5a6af6e4db89638b211049d3f9e957..0000000000000000000000000000000000000000
--- a/src/hysop++/src/solver/diffSolver.h
+++ /dev/null
@@ -1,29 +0,0 @@
-
-#ifndef HYSOP_DIFFSOLVER_H
-#define HYSOP_DIFFSOLVER_H
-
-#include "data/multi_array/multi_array.h"
-
-namespace hysop {
-    namespace solver {
-        template <typename T, std::size_t Dim>
-            class DiffSolver {
-
-                public:
-                    virtual void apply(hysop::const_multi_array_ref<T,Dim> input,
-                            hysop::multi_array_ref<T,Dim> output,
-                            const std::array<int,Dim> &order) const = 0;
-
-                    void operator()(hysop::const_multi_array_ref<T,Dim> input, 
-                                    hysop::multi_array_ref<T,Dim> output, 
-                            const std::array<int,Dim> &order) {
-                        this->apply(input,output,order);
-                    }
-
-            };
-
-    } /* end of namespace solver */
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_DIFFSOLVER_H */
-
diff --git a/src/hysop++/src/solver/fftDiffSolver.h b/src/hysop++/src/solver/fftDiffSolver.h
deleted file mode 100644
index d9951e7a76358a280ecf0ae2480da6d3c2078ada..0000000000000000000000000000000000000000
--- a/src/hysop++/src/solver/fftDiffSolver.h
+++ /dev/null
@@ -1,191 +0,0 @@
-
-#ifndef HYSOP_FFTDIFFSOLVER_H
-#define HYSOP_FFTDIFFSOLVER_H
-
-#include "solver/diffSolver.h"
-#include "fft/planner.h"
-#include "data/accumulatorIndex.h"
-
-namespace hysop {
-    namespace solver {
-
-        template <typename T, std::size_t Dim>
-            class FftDiffSolver : public DiffSolver<T,Dim>  {
-
-                private:
-                    using super = DiffSolver<T,Dim>;
-                public:
-                    FftDiffSolver()                              = default;
-                    FftDiffSolver(const FftDiffSolver&  p_other) = default;
-                    FftDiffSolver(      FftDiffSolver&& p_other) = default;
-                    ~FftDiffSolver()                             = default;
-
-                    FftDiffSolver& operator=(const FftDiffSolver&  p_other) = default;
-                    FftDiffSolver& operator=(      FftDiffSolver&& p_other) = default;
-
-                    FftDiffSolver(const std::array<T,Dim> &p_domainSize, const fft::FftDomainConfiguration<Dim> &p_inputFftConfig,
-                            unsigned int p_fftFlags = FFTW_MEASURE,
-                            bool p_includeOutputPeriodicBds=false, bool p_mirrorOutputPeriodicBds=false);
-
-                    /* Mutators */
-                    FftDiffSolver& setDomainSize(const std::array<T,Dim>& p_domainSize);
-                    FftDiffSolver& setFftDomainConfiguration(const fft::FftDomainConfiguration<Dim>& p_fftConfig);
-
-                    FftDiffSolver& setFftFlags(unsigned int p_flags);
-                    FftDiffSolver& appendFftFlags(unsigned int p_flags);
-
-                    FftDiffSolver& includeOutputPeriodicBoundaries(bool p_val = true);
-                    FftDiffSolver& excludeOutputPeriodicBoundaries();
-
-                    FftDiffSolver& enableOutputPeriodicBoundariesMirroring(bool p_val = true);
-                    FftDiffSolver& disableOutputPeriodicBoundariesMirroring();
-
-                    /* Accessors */
-                    std::array<T,Dim>                domainSize() const;
-                    fft::FftDomainConfiguration<Dim> inputFftConfig() const;
-
-                    unsigned int fftFlags() const;
-                    bool includeOutputPeriodicBds() const;
-                    bool mirrorOutputPeriodicBds()  const;
-
-                    /* Apply operator */
-                    virtual void apply(hysop::const_multi_array_ref<T,Dim> p_input,
-                                       hysop::multi_array_ref<T,Dim> p_output,
-                                       const std::array<int,Dim> &p_order) const final override;
-
-                protected:
-                    std::array<T,Dim>           m_domainSize;
-                    fft::FftDomainConfiguration<Dim> m_inputFftConfig;
-                    unsigned int m_fftFlags;
-                    bool m_includeOutputPeriodicBds, m_mirrorOutputPeriodicBds;
-            };
-                    
-        
-
-        /* Implementation */
-
-        template <typename T, std::size_t Dim>
-        FftDiffSolver<T,Dim>::FftDiffSolver(const std::array<T,Dim> &p_domainSize, const fft::FftDomainConfiguration<Dim> &p_inputFftConfig,
-                unsigned int p_fftFlags,
-                bool p_includeOutputPeriodicBds, bool p_mirrorOutputPeriodicBds):
-                m_domainSize(p_domainSize), m_inputFftConfig(p_inputFftConfig), 
-                m_fftFlags(p_fftFlags),
-                m_includeOutputPeriodicBds(p_includeOutputPeriodicBds), m_mirrorOutputPeriodicBds(p_mirrorOutputPeriodicBds) {
-        }
-
-
-        /* Mutators */
-        template <typename T, std::size_t Dim>
-            FftDiffSolver<T,Dim>& FftDiffSolver<T,Dim>::setDomainSize(const std::array<T,Dim>& p_domainSize) {
-                m_domainSize = p_domainSize;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftDiffSolver<T,Dim>& FftDiffSolver<T,Dim>::setFftDomainConfiguration(const fft::FftDomainConfiguration<Dim>& p_fftConfig) {
-                m_inputFftConfig = p_fftConfig;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftDiffSolver<T,Dim>& FftDiffSolver<T,Dim>::includeOutputPeriodicBoundaries(bool p_val) {
-                m_includeOutputPeriodicBds = p_val;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftDiffSolver<T,Dim>& FftDiffSolver<T,Dim>::excludeOutputPeriodicBoundaries() {
-                return this->includeOutputPeriodicBoundaries(false);
-            }
-        template <typename T, std::size_t Dim>
-            FftDiffSolver<T,Dim>& FftDiffSolver<T,Dim>::setFftFlags(unsigned int p_flags) {
-                m_fftFlags = p_flags;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftDiffSolver<T,Dim>& FftDiffSolver<T,Dim>::appendFftFlags(unsigned int p_flags) {
-                m_fftFlags |= p_flags;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftDiffSolver<T,Dim>& FftDiffSolver<T,Dim>::enableOutputPeriodicBoundariesMirroring(bool p_val) {
-                m_mirrorOutputPeriodicBds = p_val;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftDiffSolver<T,Dim>& FftDiffSolver<T,Dim>::disableOutputPeriodicBoundariesMirroring() {
-                return this->enableOutputPeriodicBoundariesMirroring(false);
-            }
-
-        /* Accessors */
-        template <typename T, std::size_t Dim>
-            std::array<T,Dim> FftDiffSolver<T,Dim>::domainSize() const {
-                return m_domainSize;
-            }
-        template <typename T, std::size_t Dim>
-            fft::FftDomainConfiguration<Dim> FftDiffSolver<T,Dim>::inputFftConfig() const {
-                return m_inputFftConfig;
-            }
-        template <typename T, std::size_t Dim>
-            unsigned int FftDiffSolver<T,Dim>::fftFlags() const {
-                return m_fftFlags;
-            }
-        template <typename T, std::size_t Dim>
-            bool FftDiffSolver<T,Dim>::includeOutputPeriodicBds() const {
-                return m_includeOutputPeriodicBds;
-            }
-        template <typename T, std::size_t Dim>
-            bool FftDiffSolver<T,Dim>::mirrorOutputPeriodicBds()  const {
-                return m_mirrorOutputPeriodicBds;
-            }
-
-        /* Apply operator */
-        template <typename T, std::size_t Dim>
-        void FftDiffSolver<T,Dim>::apply(
-                hysop::const_multi_array_ref<T,Dim> p_input,
-                hysop::multi_array_ref<T,Dim> p_output,
-                const std::array<int,Dim> &p_order) const {
-
-            fft::Planner<T,Dim> planner;
-            bool success = planner.plan(p_input, p_output, m_inputFftConfig, p_order, m_domainSize, m_fftFlags, 
-                                m_includeOutputPeriodicBds, m_mirrorOutputPeriodicBds);
-            if(!success) 
-                throw std::runtime_error("Failed to plan transforms !");
-            
-            planner.executeForwardTransform();
-            {
-                AccumulatorIndex<std::complex<T>,Dim> idx;
-                idx.setAccumulatorSource(planner.waveNumbersPows()).setAccumulatorFunction(std::multiplies<std::complex<T>>());
-
-                if(planner.transformType() == fft::FftTransformType::FFT_R2R) {
-                    multi_array_view<T,Dim> view = planner.transformedRealData();
-                    //view.print("PRE-RDATA");
-
-                    idx.reset(view.shape());
-                    while(!idx.atMaxId()) {
-                        view(idx.ids()) *= idx.accumulatedVal().real()/planner.normalisationFactor();
-                        ++idx;
-                    }
-                    
-                    //view.print("POST-RDATA");
-                }
-                else if(planner.transformType() == fft::FftTransformType::FFT_R2C) {
-                    multi_array_ref<std::complex<T>,Dim> ref = planner.transformedComplexData();
-                    std::complex<T> *data = ref.data();
-                    //ref.print("PRE-CDATA");
-
-                    idx.reset(ref.shape());
-                    while(!idx.atMaxId()) {
-                        data[idx()] *= idx.accumulatedVal()/planner.normalisationFactor();
-                        ++idx;
-                    }
-                    
-                    //ref.print("POST-CDATA");
-                }
-            }
-            planner.executeBackwardTransform();
-        }
-
-    } /* end of namespace solver */
-} /* end of namespace hysop */
-
-
-
-#endif /* end of include guard: HYSOP_FFTDIFFSOLVER_H */
diff --git a/src/hysop++/src/solver/fftPoissonSolver.h b/src/hysop++/src/solver/fftPoissonSolver.h
deleted file mode 100644
index 5e9ac41c019be13e10763f40a40c74912001b6d4..0000000000000000000000000000000000000000
--- a/src/hysop++/src/solver/fftPoissonSolver.h
+++ /dev/null
@@ -1,206 +0,0 @@
-
-#ifndef HYSOP_FFTPOISSONSOLVER_H
-#define HYSOP_FFTPOISSONSOLVER_H
-
-#include <cmath>
-
-#include "maths/quad_maths.h"
-#include "solver/poissonSolver.h"
-#include "fft/planner.h"
-#include "data/accumulatorIndex.h"
-
-namespace hysop {
-    namespace solver {
-
-        template <typename T, std::size_t Dim>
-            class FftPoissonSolver : public PoissonSolver<T,Dim>  {
-
-                private:
-                    using super = PoissonSolver<T,Dim>;
-                public:
-                    FftPoissonSolver()                                 = default;
-                    FftPoissonSolver(const FftPoissonSolver&  p_other) = default;
-                    FftPoissonSolver(      FftPoissonSolver&& p_other) = default;
-                    ~FftPoissonSolver()                                = default;
-
-                    FftPoissonSolver& operator=(const FftPoissonSolver&  p_other) = default;
-                    FftPoissonSolver& operator=(      FftPoissonSolver&& p_other) = default;
-
-                    FftPoissonSolver(const std::array<T,Dim> &p_domainSize, const domain::DomainConfiguration<Dim> &p_domainConfig,
-                            unsigned int p_fftFlags = FFTW_MEASURE,
-                            bool p_includeOutputPeriodicBds=false, bool p_mirrorOutputPeriodicBds=false);
-
-                    /* Mutators */
-                    FftPoissonSolver& setDomainSize(const std::array<T,Dim>& p_domainSize);
-                    FftPoissonSolver& setInputDomainConfiguration(const domain::DomainConfiguration<Dim>& p_domainConfig);
-
-                    FftPoissonSolver& setFftFlags(unsigned int p_flags);
-                    FftPoissonSolver& appendFftFlags(unsigned int p_flags);
-
-                    FftPoissonSolver& includeOutputPeriodicBoundaries(bool p_val = true);
-                    FftPoissonSolver& excludeOutputPeriodicBoundaries();
-
-                    FftPoissonSolver& enableOutputPeriodicBoundariesMirroring(bool p_val = true);
-                    FftPoissonSolver& disableOutputPeriodicBoundariesMirroring();
-
-                    /* Accessors */
-                    std::array<T,Dim>                domainSize() const;
-                    domain::DomainConfiguration<Dim> inputDomainConfig() const;
-
-                    unsigned int fftFlags() const;
-                    bool includeOutputPeriodicBds() const;
-                    bool mirrorOutputPeriodicBds()  const;
-
-                    /* Apply operator */
-                    virtual void apply(hysop::const_multi_array_ref<T,Dim> p_input,
-                                       hysop::multi_array_ref<T,Dim> p_output) const final override;
-
-                protected:
-                    std::array<T,Dim>           m_domainSize;
-                    domain::DomainConfiguration<Dim> m_inputDomainConfig;
-                    unsigned int m_fftFlags;
-                    bool m_includeOutputPeriodicBds, m_mirrorOutputPeriodicBds;
-            };
-                    
-        
-
-        /* Implementation */
-
-        template <typename T, std::size_t Dim>
-        FftPoissonSolver<T,Dim>::FftPoissonSolver(const std::array<T,Dim> &p_domainSize, const domain::DomainConfiguration<Dim> &p_inputDomainConfig,
-                unsigned int p_fftFlags,
-                bool p_includeOutputPeriodicBds, bool p_mirrorOutputPeriodicBds):
-                m_domainSize(p_domainSize), m_inputDomainConfig(p_inputDomainConfig), 
-                m_fftFlags(p_fftFlags),
-                m_includeOutputPeriodicBds(p_includeOutputPeriodicBds), m_mirrorOutputPeriodicBds(p_mirrorOutputPeriodicBds) {
-        }
-
-
-        /* Mutators */
-        template <typename T, std::size_t Dim>
-            FftPoissonSolver<T,Dim>& FftPoissonSolver<T,Dim>::setDomainSize(const std::array<T,Dim>& p_domainSize) {
-                m_domainSize = p_domainSize;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftPoissonSolver<T,Dim>& FftPoissonSolver<T,Dim>::setInputDomainConfiguration(const domain::DomainConfiguration<Dim>& p_inputDomainConfig) {
-                m_inputDomainConfig = p_inputDomainConfig;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftPoissonSolver<T,Dim>& FftPoissonSolver<T,Dim>::includeOutputPeriodicBoundaries(bool p_val) {
-                m_includeOutputPeriodicBds = p_val;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftPoissonSolver<T,Dim>& FftPoissonSolver<T,Dim>::excludeOutputPeriodicBoundaries() {
-                return this->includeOutputPeriodicBoundaries(false);
-            }
-        template <typename T, std::size_t Dim>
-            FftPoissonSolver<T,Dim>& FftPoissonSolver<T,Dim>::setFftFlags(unsigned int p_flags) {
-                m_fftFlags = p_flags;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftPoissonSolver<T,Dim>& FftPoissonSolver<T,Dim>::appendFftFlags(unsigned int p_flags) {
-                m_fftFlags |= p_flags;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftPoissonSolver<T,Dim>& FftPoissonSolver<T,Dim>::enableOutputPeriodicBoundariesMirroring(bool p_val) {
-                m_mirrorOutputPeriodicBds = p_val;
-                return *this;
-            }
-        template <typename T, std::size_t Dim>
-            FftPoissonSolver<T,Dim>& FftPoissonSolver<T,Dim>::disableOutputPeriodicBoundariesMirroring() {
-                return this->enableOutputPeriodicBoundariesMirroring(false);
-            }
-
-        /* Accessors */
-        template <typename T, std::size_t Dim>
-            std::array<T,Dim> FftPoissonSolver<T,Dim>::domainSize() const {
-                return m_domainSize;
-            }
-        template <typename T, std::size_t Dim>
-            domain::DomainConfiguration<Dim> FftPoissonSolver<T,Dim>::inputDomainConfig() const {
-                return m_inputDomainConfig;
-            }
-        template <typename T, std::size_t Dim>
-            unsigned int FftPoissonSolver<T,Dim>::fftFlags() const {
-                return m_fftFlags;
-            }
-        template <typename T, std::size_t Dim>
-            bool FftPoissonSolver<T,Dim>::includeOutputPeriodicBds() const {
-                return m_includeOutputPeriodicBds;
-            }
-        template <typename T, std::size_t Dim>
-            bool FftPoissonSolver<T,Dim>::mirrorOutputPeriodicBds()  const {
-                return m_mirrorOutputPeriodicBds;
-            }
-
-        /* Apply operator */
-        template <typename T, std::size_t Dim>
-        void FftPoissonSolver<T,Dim>::apply(
-                hysop::const_multi_array_ref<T,Dim> p_input,
-                hysop::multi_array_ref<T,Dim> p_output) const {
-
-
-            fft::Planner<T,Dim> planner;
-            planner.setFixedAxeWaveNumberPows(std::complex<T>(0,0));
-            {
-                std::array<int, Dim> order;
-                for (std::size_t d=0; d<Dim; d++)
-                    order[d] = (m_inputDomainConfig[d].first == domain::Boundary::NONE ? 0 : 2);
-                bool success = planner.plan(p_input, p_output, m_inputDomainConfig, order, m_domainSize, m_fftFlags, 
-                                    m_includeOutputPeriodicBds, m_mirrorOutputPeriodicBds);
-                if(!success) 
-                    throw std::runtime_error("Failed to plan transforms !");
-            }
-            const T normalisationFactor = planner.normalisationFactor();
-            //std::cout << planner << std::endl;
-
-            planner.executeForwardTransform();
-            {
-                AccumulatorIndex<std::complex<T>,Dim> idx;
-                idx.setAccumulatorSource(planner.waveNumbersPows()).setAccumulatorFunction(std::plus<std::complex<T>>());
-
-                if(planner.transformType() == fft::FftTransformType::FFT_R2R) {
-                    multi_array_view<T,Dim> view = planner.transformedRealData();
-                    //view.print("PRE-RDATA");
-
-                    idx.reset(view.shape());
-                    while(!idx.atMaxId()) {
-                        T filter = idx.accumulatedVal().real(); 
-                        filter = (std::fpclassify(filter)==FP_ZERO ? T(0) : (T(1)/filter)*(T(1)/normalisationFactor));  
-                        view(idx.ids()) *= filter;
-                        ++idx;
-                    }
-                    
-                    //view.print("POST-RDATA");
-                }
-                else if(planner.transformType() == fft::FftTransformType::FFT_R2C) {
-                    multi_array_ref<std::complex<T>,Dim> ref = planner.transformedComplexData();
-                    std::complex<T> *data = ref.data();
-                    //ref.print("PRE-CDATA");
-
-                    idx.reset(ref.shape());
-                    while(!idx.atMaxId()) {
-                        std::complex<T> filter = idx.accumulatedVal(); 
-                        filter = ((std::fpclassify(filter.real())==FP_ZERO) && (std::fpclassify(filter.imag())==FP_ZERO) ? 
-                                std::complex<T>(0,0) : (T(1)/filter)*(T(1)/normalisationFactor));
-                        data[idx()] *= filter;
-                        ++idx;
-                    }
-                    
-                    //ref.print("POST-CDATA");
-                }
-            }
-            planner.executeBackwardTransform();
-        }
-
-    } /* end of namespace solver */
-} /* end of namespace hysop */
-
-
-
-#endif /* end of include guard: HYSOP_FFTPOISSONSOLVER_H */
diff --git a/src/hysop++/src/solver/poissonSolver.h b/src/hysop++/src/solver/poissonSolver.h
deleted file mode 100644
index 61b16440a835de7d3e55d6e9cfa298313b1050a3..0000000000000000000000000000000000000000
--- a/src/hysop++/src/solver/poissonSolver.h
+++ /dev/null
@@ -1,27 +0,0 @@
-
-#ifndef HYSOP_POISSONSOLVER_H
-#define HYSOP_POISSONSOLVER_H
-
-#include "data/multi_array/multi_array.h"
-
-namespace hysop {
-    namespace solver {
-        template <typename T, std::size_t Dim>
-            class PoissonSolver {
-
-                public:
-                    virtual void apply(hysop::const_multi_array_ref<T,Dim> input,
-                            hysop::multi_array_ref<T,Dim> output) const = 0;
-
-                    void operator()(hysop::const_multi_array_ref<T,Dim> input, 
-                                    hysop::multi_array_ref<T,Dim> output) {
-                        this->apply(input,output);
-                    }
-
-            };
-
-    } /* end of namespace solver */
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_POISSONSOLVER_H */
-
diff --git a/src/hysop++/src/utils/constants.h b/src/hysop++/src/utils/constants.h
deleted file mode 100644
index 585ed629e11ec6c9ef537f4f92dc35926cd208cd..0000000000000000000000000000000000000000
--- a/src/hysop++/src/utils/constants.h
+++ /dev/null
@@ -1,22 +0,0 @@
-
-#ifndef CONSTANTS_H
-#define CONSTANTS_H
-
-#include <cmath>
-#include "maths/quad_maths.h"
-#include "utils/types.h"
-
-namespace hysop {
-    namespace constants {
-        static constexpr hysop::types::complex I = hysop::types::complex(0,1);
-        static constexpr hysop::types::complex Z = hysop::types::complex(0,0);
-
-#ifdef HAS_QUADMATHS
-        static const __float128 pi = acosq(-1.0Q);
-#else
-        static const long double pi = acosl(-1.0L);
-#endif
-    }
-}
-
-#endif /* end of include guard: CONSTANTS_H */
diff --git a/src/hysop++/src/utils/default.h b/src/hysop++/src/utils/default.h
deleted file mode 100644
index efeacfb8db0b5197dcdd51fbaa637a45c56f22d1..0000000000000000000000000000000000000000
--- a/src/hysop++/src/utils/default.h
+++ /dev/null
@@ -1,20 +0,0 @@
-
-#ifndef HYSOP_DEFAULT_H
-#define HYSOP_DEFAULT_H
-
-#include "data/memory/minimalAllocator.h"
-#include "data/memory/fftwAllocator.h"
-
-namespace hysop {
-    namespace _default {
-        
-        template <typename T>
-        using allocator = hysop::data::memory::MinimalAllocator<T>;
-
-        template <typename T>
-        using fft_allocator = hysop::data::memory::FftwAllocator<T>;
- 
-    } /* end of namespace _default */
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_DEFAULT_H */
diff --git a/src/hysop++/src/utils/defines.h b/src/hysop++/src/utils/defines.h
deleted file mode 100644
index b456d75fa8c4400040642eeb3af4029f195efcc1..0000000000000000000000000000000000000000
--- a/src/hysop++/src/utils/defines.h
+++ /dev/null
@@ -1,46 +0,0 @@
-
-#ifndef HYSOP_DEFINES_H
-#define HYSOP_DEFINES_H
-
-#include <stdexcept>
-#include <string>
-
-#if __cplusplus >= 201103L
-#define HAS_CXX11
-#endif
-
-#if __cplusplus >= 201402L
-#define HAS_CXX14
-#endif
-
-#define CAT(X,Y) CAT2(X,Y)
-#define CAT2(X,Y) X##Y
-#define CAT_2 CAT
-#define CAT_3(X,Y,Z) CAT(X,CAT(Y,Z))
-#define CAT_4(A,X,Y,Z) CAT(A,CAT_3(X,Y,Z))
-#define CAT_5(A,B,X,Y,Z) CAT_3(A,B,CAT_3(X,Y,Z))
-
-#define NOT_IMPLEMENTED_YET {                                                                                                    \
-    throw std::runtime_error("Function not implemented yet in " + std::string(__FILE__) + ":" + std::to_string(__LINE__) + "."); \
-}
-
-//Linux console colors
-#define RESET   "\033[0m"
-#define BLACK   "\033[30m"      /* Black */
-#define RED     "\033[31m"      /* Red */
-#define GREEN   "\033[32m"      /* Green */
-#define YELLOW  "\033[33m"      /* Yellow */
-#define BLUE    "\033[34m"      /* Blue */
-#define MAGENTA "\033[35m"      /* Magenta */
-#define CYAN    "\033[36m"      /* Cyan */
-#define WHITE   "\033[37m"      /* White */
-#define BOLDBLACK   "\033[1m\033[30m"      /* Bold Black */
-#define BOLDRED     "\033[1m\033[31m"      /* Bold Red */
-#define BOLDGREEN   "\033[1m\033[32m"      /* Bold Green */
-#define BOLDYELLOW  "\033[1m\033[33m"      /* Bold Yellow */
-#define BOLDBLUE    "\033[1m\033[34m"      /* Bold Blue */
-#define BOLDMAGENTA "\033[1m\033[35m"      /* Bold Magenta */
-#define BOLDCYAN    "\033[1m\033[36m"      /* Bold Cyan */
-#define BOLDWHITE   "\033[1m\033[37m"      /* Bold White */
-
-#endif /* end of include guard: HYSOP_DEFINES_H */
diff --git a/src/hysop++/src/utils/types.h b/src/hysop++/src/utils/types.h
deleted file mode 100644
index ccbd3c345f831eed98142209e606a41b0eed3230..0000000000000000000000000000000000000000
--- a/src/hysop++/src/utils/types.h
+++ /dev/null
@@ -1,63 +0,0 @@
-
-#ifndef HYSOP_TYPES_H
-#define HYSOP_TYPES_H
-
-#include <complex>
-#include <array>
-
-#include "utils/utils.h"
-#include "utils/default.h"
-
-namespace hysop {
-    
-    /* forward declare external types */
-    namespace data {
-        template <typename T, std::size_t Dim, typename Allocator>
-            class multi_array;
-        template <typename T, std::size_t Dim>
-            class multi_array_ref;
-        template <typename T, std::size_t Dim>
-            class multi_array_view;
-        template <typename T, std::size_t Dim>
-            class const_multi_array_view;
-        template <typename T, std::size_t Dim>
-            class const_multi_array_ref;
-    } /* end of namespace data */
-
-
-    namespace types {
-        typedef double             real;  
-        typedef std::complex<real> complex;
-    } /* end of namespace types */
-    
-    
-/* expose the folowwing types to namespace hysop */
-
-/* swig does not support alias templates... */
-    template <std::size_t Dim> 
-        struct Shape { 
-            typedef std::array<std::size_t, Dim> type; 
-        };
-    template <std::size_t Dim> 
-        struct Offset { 
-            typedef std::array<std::ptrdiff_t, Dim> type; 
-        };
-    
-    template <typename T, std::size_t Dim, typename Allocator = hysop::_default::allocator<T>>
-    using multi_array = hysop::data::multi_array<T,Dim,Allocator>;
-
-    template <typename T, std::size_t Dim>
-    using multi_array_view = hysop::data::multi_array_view<T,Dim>;
-    
-    template <typename T, std::size_t Dim>
-    using const_multi_array_view = hysop::data::const_multi_array_view<T,Dim>;
-    
-    template <typename T, std::size_t Dim>
-    using multi_array_ref = hysop::data::multi_array_ref<T,Dim>;
-    
-    template <typename T, std::size_t Dim>
-    using const_multi_array_ref = hysop::data::const_multi_array_ref<T,Dim>;
-
-} /* end of namespace hysop */
-
-#endif /* end of include guard: HYSOP_TYPES_H */
diff --git a/src/hysop++/src/utils/utils.cpp b/src/hysop++/src/utils/utils.cpp
deleted file mode 100644
index 501243272d4a6c4e9b71cf1681c6895dc224df63..0000000000000000000000000000000000000000
--- a/src/hysop++/src/utils/utils.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-
-#include "utils/utils.h"
-
-namespace std {
-    std::ostream& operator<<(std::ostream& os, const fftw_iodim& iodim) {
-        os << "[n=" << iodim.n << ", is=" << iodim.is << ", os=" << iodim.os << "]";
-        return os;
-    }
-}
-
-namespace hysop {
-    namespace utils {
-
-    }
-}
-
-
diff --git a/src/hysop++/src/utils/utils.h b/src/hysop++/src/utils/utils.h
deleted file mode 100644
index 4eaf5fcac6abbfbf622b7ad081f9d3c0c1bf529b..0000000000000000000000000000000000000000
--- a/src/hysop++/src/utils/utils.h
+++ /dev/null
@@ -1,197 +0,0 @@
-
-#ifndef HYSOP_UTILS_H
-#define HYSOP_UTILS_H
-
-#include <fftw3.h>
-#include <array>
-#include <vector>
-#include <tuple>
-#include <iostream>
-#include <limits>
-
-#include "maths/quad_maths.h"
-#include "detail/index_seq.h"
-#include <boost/multi_array.hpp>
-
-namespace hysop {
-    namespace utils {
-       
-        template <typename... T>
-        void printTuple(std::ostream& os, const std::tuple<T...>& tuple);
-
-        template <typename T>
-            bool areEqual(const T &lhs, const T &rhs);
-        template <typename T>
-            bool areNotEqual(const T &lhs, const T &rhs);
-       
-
-        /* boost related utilities */
-        template <std::size_t NumDims>
-        boost::detail::multi_array::index_gen<NumDims, NumDims> buildView();
-
-        template <std::size_t NumDims>
-        boost::detail::multi_array::index_gen<NumDims, NumDims> buildIndices(
-                const std::array<boost::multi_array_types::index_range, NumDims> &p_ranges);
-
-        template <std::size_t NumRanges>
-        boost::detail::multi_array::extent_gen<NumRanges> buildExtents(
-                const std::array<std::size_t, NumRanges> &p_shape);
-
-
-        /* Implementation */
-
-        template <typename Tuple, int... I>
-            void printTupleImpl(std::ostream& os, const Tuple& tuple, hysop::detail::index_seq<I...>) {
-                const int dummy[sizeof...(I)] = { (os << std::get<I>(tuple) << ",", 0)... };
-                os << std::get<sizeof...(I)>(tuple);
-            }
-        template <typename... T>
-            void printTuple(std::ostream& os, const std::tuple<T...>& tuple) {
-                os << "(";
-                printTupleImpl(os,tuple, hysop::detail::index_seq_gen<sizeof...(T)-1>());
-                os << ")";
-            }
-        
-        template <typename T, typename>
-            bool areEqualImpl(const T& lhs, const T& rhs) {
-                return lhs == rhs;
-            }
-        template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type* = nullptr>
-            bool areEqualImpl(const T& lhs, const T& rhs) {
-                return (std::abs(rhs - lhs) <= std::numeric_limits<T>::epsilon() * std::max(std::abs(lhs), std::abs(rhs)));
-            }
-        
-        template <typename T>
-            bool areEqual(const T &lhs, const T &rhs) {
-                return areEqualImpl<T>(lhs,rhs);
-            }
-        template <typename T>
-            bool areNotEqual(const T &lhs, const T &rhs) {
-                return !areEqualImpl<T>(lhs,rhs);
-            }
-        
-        
-        /* boost related utilities */
-        template <std::size_t NumDims>
-        struct BuildViewImpl {
-            static_assert(NumDims >= 1, "NumDims cannot be < 1");
-            boost::detail::multi_array::index_gen<NumDims, NumDims> build() const {
-                return BuildViewImpl<NumDims-1>().build()[boost::multi_array_types::index_range()];
-            }
-        };
-        template <>
-        struct BuildViewImpl<1> {
-            boost::detail::multi_array::index_gen<1,1> build() const {
-                return boost::multi_array_types::index_gen()[boost::multi_array_types::index_range()];
-            }
-        };
-        template <std::size_t NumDims>
-        boost::detail::multi_array::index_gen<NumDims, NumDims> buildView() {
-            return BuildViewImpl<NumDims>().build();
-        }
-
-        template <std::size_t NumDims, std::size_t K=NumDims>
-            struct BuildIndicesImpl {
-                static_assert(NumDims >= 1, "NumDims cannot be < 1");
-                const std::array<boost::multi_array_types::index_range, NumDims> &m_ranges;       
-                BuildIndicesImpl(const std::array<boost::multi_array_types::index_range, NumDims> &p_ranges): m_ranges(p_ranges) {} 
-                boost::detail::multi_array::index_gen<K,K> build() const {
-                    return BuildIndicesImpl<NumDims,K-1>(m_ranges).build()[m_ranges[K-1]];
-                }
-        };
-        template <std::size_t NumDims>
-            struct BuildIndicesImpl<NumDims,1> {
-                const std::array<boost::multi_array_types::index_range, NumDims> &m_ranges;       
-                BuildIndicesImpl(const std::array<boost::multi_array_types::index_range, NumDims> &p_ranges): m_ranges(p_ranges) {} 
-                boost::detail::multi_array::index_gen<1,1> build() const {
-                    return boost::multi_array_types::index_gen()[m_ranges[0]];
-                }
-        };
-        template <std::size_t NumDims>
-        boost::detail::multi_array::index_gen<NumDims, NumDims> buildIndices(
-                const std::array<boost::multi_array_types::index_range, NumDims> &p_ranges) {
-            return BuildIndicesImpl<NumDims>(p_ranges).build();
-        }
-        
-        template <std::size_t NumRanges, std::size_t K=NumRanges>
-            struct BuildExtentImpl {
-                static_assert(NumRanges >= 1, "NumDims cannot be < 1");
-                const std::array<std::size_t,NumRanges>& m_shape;
-                BuildExtentImpl(const std::array<std::size_t, NumRanges>& p_shape): m_shape(p_shape) {}
-                boost::detail::multi_array::extent_gen<K> build() const {
-                    return BuildExtentImpl<NumRanges,K-1>(m_shape).build()[m_shape[K-1]];
-                }
-            };
-        template <std::size_t NumRanges>
-            struct BuildExtentImpl<NumRanges,1> {
-                const std::array<std::size_t,NumRanges>& m_shape;
-                BuildExtentImpl(const std::array<std::size_t, NumRanges>& p_shape): m_shape(p_shape) {}
-                boost::detail::multi_array::extent_gen<1> build() const {
-                    return boost::multi_array_types::extent_gen()[m_shape[0]];
-                }
-            };
-        template <std::size_t NumRanges>
-        boost::detail::multi_array::extent_gen<NumRanges> buildExtents(
-                const std::array<std::size_t, NumRanges> &p_shape) {
-            return BuildExtentImpl<NumRanges>(p_shape).build();
-        }
-    }
-}
-
-
-/* quick and dirty fix to allow non namespace dependant operators << for std containers */
-namespace std {
-    
-    template <typename T, std::size_t Dim> 
-        std::ostream& operator<<(std::ostream& os, const std::array<T,Dim>& array);
-    template <typename T, std::size_t Dim> 
-        std::ostream& operator<<(std::ostream& os, const boost::array<T,Dim>& array);
-
-    template <typename T>
-        std::ostream& operator<<(std::ostream& os, const std::vector<T>& vector);
-    
-    template <typename... T>
-        std::ostream& operator<<(std::ostream& os, const std::tuple<T...>& tuple);
-
-    std::ostream& operator<<(std::ostream& os, const fftw_iodim& iodim);
-
-
-
-    /* Implementation */
-    template <typename T, std::size_t Dim>
-        std::ostream& operator<<(std::ostream& os, const std::array<T,Dim>& array) {
-            os << "[";
-            for (std::size_t i = 0; i < Dim-1; i++) 
-                os << array[i] << ",";
-            os << array[Dim-1];
-            os << "]";
-            return os;
-        }
-    template <typename T, std::size_t Dim>
-        std::ostream& operator<<(std::ostream& os, const boost::array<T,Dim>& array) {
-            os << "[";
-            for (std::size_t i = 0; i < Dim-1; i++) 
-                os << array[i] << ",";
-            os << array[Dim-1];
-            os << "]";
-            return os;
-        }
-    template <typename T>
-        std::ostream& operator<<(std::ostream& os, const std::vector<T>& vector) {
-            os << "[";
-            if(!vector.empty()) {
-                for (std::size_t i = 0; i < vector.size()-1; i++) 
-                    os << vector[i] << ",";
-                os << vector[vector.size()-1];
-            }
-            os << "]";
-            return os;
-        }
-    template <typename... T>
-        std::ostream& operator<<(std::ostream& os, const std::tuple<T...>& tuple) {
-            hysop::utils::printTuple(os,tuple);
-            return os;
-        }
-}
-
-#endif /* end of include guard: HYSOP_UTILS_H */
diff --git a/src/hysop++/tests/CMakeLists.txt b/src/hysop++/tests/CMakeLists.txt
deleted file mode 100644
index 30668e041d732cb6b3820658669454cf265c05c5..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/CMakeLists.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-
-include("${CMAKE_SOURCE_DIR}/cmake/GoogleTestHelper.cmake")
-
-add_subdirectory("testPolynoms")
-add_subdirectory("testPlanner")
-add_subdirectory("testDiffSolver")
-add_subdirectory("testPoissonSolver")
diff --git a/src/hysop++/tests/testDiffSolver/CMakeLists.txt b/src/hysop++/tests/testDiffSolver/CMakeLists.txt
deleted file mode 100644
index 6f3ddcdc849f9243ac45c15c400bebb3e95fef06..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testDiffSolver/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-
-file(GLOB CPP_SRCS *.cpp)
-set(SRCS ${CPP_SRCS})
-
-get_filename_component(test_name ${CMAKE_CURRENT_SOURCE_DIR} NAME)
-add_definitions(${CXX_EXTRA_DEFINES})
-add_executable(${test_name} ${SRCS})
-add_dependencies(${test_name} ${HYSOP_CXX_LIBRARY_DEP})
-
-target_link_libraries(${test_name} ${HYSOP_CXX_LIBRARY})
-target_link_libraries(${test_name} ${GTEST_LIBRARIES} ${CXX_EXT_LIBS})
-
-add_test("${test_name}" "${test_name}")
-
diff --git a/src/hysop++/tests/testDiffSolver/main.cpp b/src/hysop++/tests/testDiffSolver/main.cpp
deleted file mode 100644
index b6141085e8080256953f5555450500ea27948a0c..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testDiffSolver/main.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#include "gtest/gtest.h"
-
-int main(int argc, char **argv)
-{
-    ::testing::InitGoogleTest(&argc, argv);
-    int ret = RUN_ALL_TESTS();
-
-    return ret;
-}
diff --git a/src/hysop++/tests/testDiffSolver/testDiffSolver.cpp b/src/hysop++/tests/testDiffSolver/testDiffSolver.cpp
deleted file mode 100644
index 55fec62f5f9ef391cd444d730a730833451168e2..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testDiffSolver/testDiffSolver.cpp
+++ /dev/null
@@ -1,243 +0,0 @@
-
-#include "testDiffSolver.h"
-
-#include <cstdlib>
-
-#include "domain/domain.h"
-#include "solver/fftDiffSolver.h"
-#include "data/multi_array/multi_array.h"
-#include "utils/constants.h"
-#include "fft/extension.h"
-
-using namespace hysop;
-using namespace hysop::domain;
-
-static constexpr std::size_t nExtensions = 4 ;
-static constexpr std::size_t nExtensionsPair = 7 ;
-static constexpr fft::Extension ext[nExtensions] = 
-{ fft::Extension::NONE, fft::Extension::ODD, fft::Extension::EVEN, fft::Extension::PERIODIC };
-static constexpr std::pair<fft::Extension,fft::Extension> pext[nExtensionsPair] {
-        std::make_pair(ext[3],ext[3]), //periodic-periodic
-        std::make_pair(ext[3],ext[3]), //periodic-periodic
-        std::make_pair(ext[2],ext[1]), //even-odd
-        std::make_pair(ext[1],ext[2]), //odd-even
-        std::make_pair(ext[2],ext[2]), //even-even
-        std::make_pair(ext[1],ext[1]), //odd-odd
-        std::make_pair(ext[0],ext[0]), //none-none
-};
-
-#ifdef HAS_QUADMATHS
-    static constexpr __float128  freqs[6] = { 1.0Q, 1.0Q, 0.75Q, 0.75Q, 0.50Q, 0.50Q };
-#else
-    static constexpr long double freqs[6] = { 1.0L, 1.0L, 0.75L, 0.75L, 0.50L, 0.50L };
-#endif
-
-template <typename T>
-std::function<T(T)> func(std::size_t k) {
-    switch(k) {
-        case 0: return [=](T x) {return std::cos(T(freqs[0])*x);};
-        case 1: return [=](T x) {return std::sin(T(freqs[1])*x);};
-        case 2: return [=](T x) {return std::cos(T(freqs[2])*x);};
-        case 3: return [=](T x) {return std::sin(T(freqs[3])*x);};
-        case 4: return [=](T x) {return std::cos(T(freqs[4])*x);};
-        case 5: return [=](T x) {return std::sin(T(freqs[5])*x);};
-        default: return[=](T x) { return T(1); };
-    }
-}
-
-template <typename T>
-std::function<T(T)> derivative(std::size_t k, int order) {
-    bool even = (k%2==0);
-    std::size_t p, offset;
-    T sign, coeff;
-    if(k>5) {
-        if(order != 0)
-            throw std::runtime_error("Non zero order !");
-        return func<T>(k);
-    }
-    else if(even) { /* cos func */
-        p      = (order%2==0 ? k : k+1);
-        sign  = std::pow(T(-1),(order+1)/2);
-        coeff = std::pow(freqs[k], order);
-    }
-    else { /* sin func */
-        p     = (order%2==0 ? k : k-1); 
-        sign  = std::pow(T(-1),order/2);
-        coeff = std::pow(freqs[k], order);
-    }
-    return [=](T x) { return sign*coeff*(func<T>(p)(x)); };
-}
-    
-template <typename T, std::size_t Dim, bool verbose=false> 
-void test(std::size_t p_maxOrder, bool includePeriodicBds=false) {
-    typename Shape<Dim>::type shape;
-    typename Domain<T,Dim>::DomainSize domainSize;
-    Domain<T,Dim> ref, inBuffer, outBuffer;
-
-    Domain<T,Dim>& in  = inBuffer;
-    Domain<T,Dim>& out = outBuffer;
-
-    std::array<int,Dim> order;
-
-    shape.fill(8);
-    domainSize.fill(2*hysop::constants::pi);
-
-    T eps = std::numeric_limits<T>::epsilon();
-    const std::size_t N = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<std::size_t>());
-    
-    ref.resize(domainSize).reshape(shape);
-    in  = ref;
-    out = ref;
-   
-    typename Shape<Dim>::type maxOrder, testCases;
-    maxOrder.fill(p_maxOrder+1);
-    testCases.fill(nExtensionsPair);
-    Index<Dim> orderId(maxOrder);
-    Index<Dim> testCaseId;
-    std::size_t testCaseCount;
-    while(!(++orderId).atMaxId()) {
-        std::cout << "  ::Order::" << orderId.ids() << (verbose ? "\n" : "");
-    
-        std::array<T,3> meanDists;
-        meanDists.fill(0);
-        testCaseId.reset(testCases);
-        testCaseCount = testCaseId.maxId();
-        while(!testCaseId.atMaxId()) { 
-            std::copy(orderId.ids().begin(),orderId.ids().end(), order.begin());
-
-            /* generate transform configuration */
-            std::array<std::pair<fft::Extension,fft::Extension>, Dim> extConfig;
-            for (std::size_t k=0; k<Dim; k++) {
-                std::size_t id = testCaseId[k];
-                extConfig[k] = pext[id];
-                if(pext[id].first==fft::Extension::NONE)
-                    order[k] = 0;
-            }
-            fft::FftDomainConfiguration<Dim> domainConfig(extConfig, includePeriodicBds);
-            
-            const std::size_t orderSum = std::accumulate(order.begin(), order.end(), 0);
-            if(orderSum == 0) {
-                testCaseCount--;
-                ++testCaseId;
-                continue;
-            }
-            T orderPow = std::pow(T(10),T(orderSum));
-            if(std::is_same<T,long double>::value) /* just in case long doubles are not hardware supported... */
-                orderPow *= 1e3;
-            const auto criteria = std::make_tuple(orderPow*eps*N,orderPow*eps*sqrt(N),2*orderPow*eps);
-
-            const auto f = [&](const typename Domain<T,Dim>::SpaceVariable &x) { 
-                T val = func<T>(testCaseId[0])(x[0]);
-                for (std::size_t d=1; d < Dim; d++)
-                    val *= func<T>(testCaseId[d])(x[d]);
-                return val;
-            };
-            const auto d = [&](const typename Domain<T,Dim>::SpaceVariable &x) { 
-                T val = derivative<T>(testCaseId[0],order[0])(x[0]);
-                for (std::size_t d=1; d < Dim; d++)
-                    val *= derivative<T>(testCaseId[d],order[d])(x[d]);
-                return val;
-            };
-            {
-                ref.resetDomainConfiguration(domainConfig.boundariesConfiguration());
-                in  = ref;
-                out = ref;
-
-                in.apply(f);
-                ref.apply(d);
-                out.data().apply([](T& v){ v=T(0);});
-            }
-
-            solver::FftDiffSolver<T,Dim> solver(domainSize, domainConfig, FFTW_MEASURE, includePeriodicBds, includePeriodicBds);
-            solver.apply(in.data(), out.data(), order);
-
-            std::stringstream ss;
-            ss << "[";
-            for (std::size_t k=0; k<Dim-1; k++) 
-                ss << extConfig[k].first << "/" << extConfig[k].second << ",";
-            ss << extConfig[Dim-1].first << "/" << extConfig[Dim-1].second;
-            ss << "]";
-
-            const auto dist = out.distance(ref);
-            const bool pass =      (std::get<0>(dist) < std::get<0>(criteria)) 
-                && (std::get<1>(dist) < std::get<1>(criteria))
-                && (std::get<2>(dist) < std::get<2>(criteria));
-
-            if((pass && verbose) || !pass) {
-                std::cout << (pass ? GREEN : RED);
-                std::cout << "\t" << std::setw(Dim*15) << ss.str() << " => " << (pass ? "OK" : "KO") 
-                    << "  " << RESET << std::scientific << std::setprecision(2) << dist << std::endl;
-            }
-            if(!pass) {
-                //in.print("IN");
-                //ref.print("REF");
-                //out.print("OUT");
-                std::cout << "Test failed => Criteria was: " << criteria << std::endl;
-            }
-
-            meanDists[0] += std::get<0>(dist);
-            meanDists[1] += std::get<1>(dist);
-            meanDists[2] += std::get<2>(dist);
-            
-            EXPECT_TRUE(pass);
-
-            ++testCaseId;
-        }
-        for (std::size_t k = 0; k < 3; k++)
-            meanDists[k] /= T(testCaseCount);
-        std::cout << "=> mean distances over " << std::scientific << std::setprecision(1) << std::setw(4)
-            << testCaseCount << " testcases: " << meanDists;
-        for (std::size_t k = 0; k < 3; k++)
-            meanDists[k] = std::round(meanDists[k]/eps);
-        std::cout << " ~= " <<  std::fixed << std::setprecision(0) << meanDists << " eps" << std::endl; 
-    }
-}
-    
-#ifdef FFTW_HAS_FFTW3F
-TEST_F(DiffSolverTest, FloatDerivatives) {
-    std::cout << std::endl;
-    std::cout << "== TEST 1D - float       ==" << std::endl;
-    test<float,1,false>(5);
-    std::cout << "== TEST 2D - float       ==" << std::endl;
-    test<float,2,false>(3);
-    std::cout << "== TEST 3D - float       ==" << std::endl;
-    test<float,3,false>(1);
-}
-#endif
-
-#ifdef FFTW_HAS_FFTW3D
-TEST_F(DiffSolverTest, DoubleDerivatives) {
-    std::cout << std::endl;
-    std::cout << "== TEST 1D - double       ==" << std::endl;
-    test<double,1,false>(5);
-    std::cout << "== TEST 2D - double       ==" << std::endl;
-    test<double,2,false>(3);
-    std::cout << "== TEST 3D - double       ==" << std::endl;
-    test<double,3,false>(1);
-}
-#endif
-    
-#ifdef FFTW_HAS_FFTW3L
-TEST_F(DiffSolverTest, LongDoubleDerivatives) {
-    std::cout << std::endl;
-    std::cout << "== TEST 1D - long double       ==" << std::endl;
-    test<long double,1,false>(5);
-    std::cout << "== TEST 2D - long double       ==" << std::endl;
-    test<long double,2,false>(3);
-    std::cout << "== TEST 3D - long double       ==" << std::endl;
-    test<long double,3,false>(1);
-}
-#endif
-
-#ifdef FFTW_HAS_FFTW3Q
-TEST_F(DiffSolverTest, QuadFloatDerivatives) {
-    std::cout << std::endl;
-    std::cout << "== TEST 1D - __float128       ==" << std::endl;
-    test<__float128,1,false>(5);
-    std::cout << "== TEST 2D - __float128       ==" << std::endl;
-    test<__float128,2,false>(3);
-    std::cout << "== TEST 3D - __float128       ==" << std::endl;
-    test<__float128,3,false>(1);
-}
-#endif
-
diff --git a/src/hysop++/tests/testDiffSolver/testDiffSolver.h b/src/hysop++/tests/testDiffSolver/testDiffSolver.h
deleted file mode 100644
index b51090915dfbe0efbd9999a50b53103ca6a24ebf..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testDiffSolver/testDiffSolver.h
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#include "gtest/gtest.h"
-#include "solver/fftDiffSolver.h"
-#include "fft/extension.h"
-
-using namespace hysop;
-
-class DiffSolverTest : public ::testing::Test {
-    protected:
-        DiffSolverTest() {}
-        void SetUp()  {}
-        void TearDown() {}
-        virtual ~DiffSolverTest() {}
-};
diff --git a/src/hysop++/tests/testPlanner/CMakeLists.txt b/src/hysop++/tests/testPlanner/CMakeLists.txt
deleted file mode 100644
index 6f3ddcdc849f9243ac45c15c400bebb3e95fef06..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPlanner/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-
-file(GLOB CPP_SRCS *.cpp)
-set(SRCS ${CPP_SRCS})
-
-get_filename_component(test_name ${CMAKE_CURRENT_SOURCE_DIR} NAME)
-add_definitions(${CXX_EXTRA_DEFINES})
-add_executable(${test_name} ${SRCS})
-add_dependencies(${test_name} ${HYSOP_CXX_LIBRARY_DEP})
-
-target_link_libraries(${test_name} ${HYSOP_CXX_LIBRARY})
-target_link_libraries(${test_name} ${GTEST_LIBRARIES} ${CXX_EXT_LIBS})
-
-add_test("${test_name}" "${test_name}")
-
diff --git a/src/hysop++/tests/testPlanner/main.cpp b/src/hysop++/tests/testPlanner/main.cpp
deleted file mode 100644
index b6141085e8080256953f5555450500ea27948a0c..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPlanner/main.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#include "gtest/gtest.h"
-
-int main(int argc, char **argv)
-{
-    ::testing::InitGoogleTest(&argc, argv);
-    int ret = RUN_ALL_TESTS();
-
-    return ret;
-}
diff --git a/src/hysop++/tests/testPlanner/testPlanner.cpp b/src/hysop++/tests/testPlanner/testPlanner.cpp
deleted file mode 100644
index aab308e9867099bc340d1db9f577029d033f45e8..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPlanner/testPlanner.cpp
+++ /dev/null
@@ -1,200 +0,0 @@
-
-#include "maths/quad_maths.h"
-#include "testPlanner.h"
-
-#include "data/multi_array/multi_array.h"
-#include "domain/domain.h"
-#include "utils/constants.h"
-#include "fft/planner.h"
-#include "fft/extension.h"
-
-
-using namespace hysop;
-using namespace hysop::domain;
-
-static constexpr std::size_t nExtensions = 4 ;
-static constexpr std::size_t nExtensionsPair = 6 ;
-static constexpr fft::Extension ext[nExtensions] = 
-{ fft::Extension::NONE, fft::Extension::ODD, fft::Extension::EVEN, fft::Extension::PERIODIC };
-static constexpr std::pair<fft::Extension,fft::Extension> pext[nExtensionsPair] {
-    std::make_pair(ext[0],ext[0]), //none-none
-        std::make_pair(ext[1],ext[1]), //odd-odd
-        std::make_pair(ext[1],ext[2]), //odd-even
-        std::make_pair(ext[1],ext[2]), //even-odd
-        std::make_pair(ext[2],ext[2]), //even-even
-        std::make_pair(ext[3],ext[3]), //periodic-periodic
-};
-
-template <typename T, std::size_t Dim, bool verbose=false> 
-void test(bool inplace, bool includePeriodicBds);
-
-#ifdef FFTW_HAS_FFTW3F
-TEST_F(PlannerTest, InplaceFloatTransforms) {
-    std::cout << std::endl; 
-    std::cout << "== TEST 1D - float       ==\t";
-    test<float,1>(false,true);
-    std::cout << "== TEST 2D - float       ==\t";
-    test<float,2>(false,true);
-    std::cout << "== TEST 3D - float       ==\t";
-    test<float,3>(false,true);
-}
-#endif
-
-#ifdef FFTW_HAS_FFTW3D
-TEST_F(PlannerTest, InplaceDoubleTransforms) {
-    std::cout << std::endl; 
-    std::cout << "== TEST 1D - double      ==\t";
-    test<double,1>(true,true);
-    std::cout << "== TEST 2D - double      ==\t";
-    test<double,2>(true,true);
-    std::cout << "== TEST 3D - double      ==\t";
-    test<double,3>(true,true);
-}
-#endif
-    
-#ifdef FFTW_HAS_FFTW3L
-TEST_F(PlannerTest, InplaceLongDoubleTransforms) {
-    std::cout << std::endl; 
-    std::cout << "== TEST 1D - long double ==\t";
-    test<long double,1>(false,false);
-    std::cout << "== TEST 2D - long double ==\t";
-    test<long double,2>(false,false);
-    std::cout << "== TEST 3D - long double ==\t";
-    test<long double,3>(false,false);
-}
-#endif
-
-#ifdef FFTW_HAS_FFTW3Q
-TEST_F(PlannerTest, InplaceQuadDoubleTransforms) {
-    std::cout << std::endl; 
-    std::cout << "== TEST 1D - __float128 ==\t";
-    test<__float128,1>(false,false);
-    std::cout << "== TEST 2D - __float128 ==\t";
-    test<__float128,2>(false,false);
-    std::cout << "== TEST 3D - __float128 ==\t";
-    test<__float128,3>(false,false);
-}
-#endif
-
-template <typename T, std::size_t Dim, bool verbose> 
-void test(bool inplace, bool includePeriodicBds) {
-    typename Shape<Dim>::type shape;
-    typename Domain<T,Dim>::DomainSize domainSize;
-    Domain<T,Dim> ref, inBuffer, outBuffer;
-
-    Domain<T,Dim>& in  = inBuffer;
-    Domain<T,Dim>& out = (inplace ? inBuffer : outBuffer);
-
-    fft::Planner<T,Dim> planner;
-    std::array<int,Dim> order;
-
-    const std::size_t nPoints = 16;
-    shape.fill(nPoints);
-    domainSize.fill(1.0);
-    order.fill(2);
-
-    const T eps = std::numeric_limits<T>::epsilon();
-    const std::size_t N = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<std::size_t>());
-    const auto criteria = std::make_tuple(50*eps*N,sqrt(50)*eps*N,700*eps);
-    
-    ref.resize(domainSize).reshape(shape);
-    in  = ref;
-    out = ref;
-   
-    typename Shape<Dim>::type testCases;
-    testCases.fill(nExtensionsPair);
-    Index<Dim> testCaseId(testCases);
-    std::array<T,3> meanDists{0};
-    while(!testCaseId.atMaxId()) { 
-        /* generate transform configuration */
-        std::array<std::pair<fft::Extension,fft::Extension>, Dim> extConfig;
-        for (std::size_t k=0; k<Dim; k++) {
-            std::size_t id = testCaseId[k];
-            extConfig[k] = pext[id];
-        }
-        fft::FftDomainConfiguration<Dim> domainConfig(extConfig, includePeriodicBds);
-    
-        const auto f = [&](T &val, const hysop::Index<Dim>& idx) { 
-            val = static_cast<T>(rand())/static_cast<T>(RAND_MAX);
-            for (std::size_t d=0; d<Dim; d++) {
-                if(idx[d]==0) {
-                    if(extConfig[d].first == fft::Extension::ODD) {
-                        val=T(0); 
-                        return;
-                    }
-                    else if(extConfig[d].first == fft::Extension::PERIODIC)
-                        val=T(0.42);
-                }
-                else if(std::size_t(idx[d]) == idx.dim()[d]-1) {
-                    if(extConfig[d].second == fft::Extension::ODD) {
-                        val=T(0);
-                        return;
-                    }
-                    else if(extConfig[d].second == fft::Extension::PERIODIC && includePeriodicBds)
-                        val=T(0.42);
-                }
-            }
-        };
-        
-        if(includePeriodicBds)
-            ref.resetDomainConfiguration(domainConfig.boundariesConfiguration()); 
-        
-        /* fill reference and copy into input buffer */
-        ref.data().apply(f);
-        in = ref;
-       
-        /* plan transforms and check if planning succeeded */
-        bool status = planner.plan(in.data(), out.data(), domainConfig, order, domainSize, FFTW_MEASURE, 
-                    includePeriodicBds, includePeriodicBds);
-        assert(status || testCaseId()==0);
-    
-        /* execute forward and backward inplace transforms */
-        planner.executeForwardTransform();
-        {
-            if(planner.transformType() == fft::FftTransformType::FFT_R2C)
-                planner.transformedComplexData().apply([&](std::complex<T>& val) { val /= planner.normalisationFactor(); }); 
-            else if(planner.transformType() == fft::FftTransformType::FFT_R2R)
-                planner.transformedRealData().apply([&](T& val) { val /= planner.normalisationFactor(); }); 
-        }
-        planner.executeBackwardTransform();
-        
-        std::stringstream ss;
-        ss << "[";
-        for (std::size_t k=0; k<Dim-1; k++) 
-            ss << extConfig[k].first << "/" << extConfig[k].second << ",";
-        ss << extConfig[Dim-1].first << "/" << extConfig[Dim-1].second;
-        ss << "]";
-
-        const auto dist = out.distance(ref);
-        const bool pass =      (std::get<0>(dist) < std::get<0>(criteria)) 
-                            && (std::get<1>(dist) < std::get<1>(criteria))
-                            && (std::get<2>(dist) < std::get<2>(criteria));
-
-        if((pass && verbose) || !pass) {
-            std::cout << (pass ? GREEN : RED);
-            std::cout << "\t" << std::setw(Dim*15) << ss.str() << " => " << (pass ? "OK" : "KO") 
-                << "  " << RESET << std::scientific << std::setprecision(2) << dist << std::endl;
-        }
-        if(!pass) {
-            if(!inplace)
-                in.print("IN");
-            ref.print("REF");
-            out.print("OUT");
-            std::cout << planner << std::endl;
-            exit(EXIT_FAILURE);
-        }
-
-        meanDists[0] += std::get<0>(dist);
-        meanDists[1] += std::get<1>(dist);
-        meanDists[2] += std::get<2>(dist);
-
-        ++testCaseId;
-    }
-    for (std::size_t k = 0; k < 3; k++)
-        meanDists[k] /= T(testCaseId.maxId());
-    std::cout << "Mean distances over " << std::scientific << std::setprecision(1) << std::setw(4)
-         << testCaseId.maxId() << " testcases: " << meanDists;
-    for (std::size_t k = 0; k < 3; k++)
-        meanDists[k] = std::round(meanDists[k]/eps);
-    std::cout << " ~= " << std::fixed << std::setprecision(0) << meanDists << " eps" << std::endl; 
-}
diff --git a/src/hysop++/tests/testPlanner/testPlanner.h b/src/hysop++/tests/testPlanner/testPlanner.h
deleted file mode 100644
index 5be17aa72b6456d5c6979fe7b6ad3b1dc132c7f1..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPlanner/testPlanner.h
+++ /dev/null
@@ -1,20 +0,0 @@
-
-#include "gtest/gtest.h"
-#include "fft/planner.h"
-#include "fft/extension.h"
-
-using T = double;
-constexpr std::size_t Dim = 1;
-    
-using namespace hysop;
-
-class PlannerTest : public ::testing::Test {
-    protected:
-        PlannerTest() {}
-        void SetUp()  {}
-        void TearDown() {}
-        virtual ~PlannerTest() {}
-
-    public:
-        fft::Planner<T,Dim> planner;
-};
diff --git a/src/hysop++/tests/testPoissonSolver/CMakeLists.txt b/src/hysop++/tests/testPoissonSolver/CMakeLists.txt
deleted file mode 100644
index 6f3ddcdc849f9243ac45c15c400bebb3e95fef06..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPoissonSolver/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-
-file(GLOB CPP_SRCS *.cpp)
-set(SRCS ${CPP_SRCS})
-
-get_filename_component(test_name ${CMAKE_CURRENT_SOURCE_DIR} NAME)
-add_definitions(${CXX_EXTRA_DEFINES})
-add_executable(${test_name} ${SRCS})
-add_dependencies(${test_name} ${HYSOP_CXX_LIBRARY_DEP})
-
-target_link_libraries(${test_name} ${HYSOP_CXX_LIBRARY})
-target_link_libraries(${test_name} ${GTEST_LIBRARIES} ${CXX_EXT_LIBS})
-
-add_test("${test_name}" "${test_name}")
-
diff --git a/src/hysop++/tests/testPoissonSolver/main.cpp b/src/hysop++/tests/testPoissonSolver/main.cpp
deleted file mode 100644
index b6141085e8080256953f5555450500ea27948a0c..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPoissonSolver/main.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#include "gtest/gtest.h"
-
-int main(int argc, char **argv)
-{
-    ::testing::InitGoogleTest(&argc, argv);
-    int ret = RUN_ALL_TESTS();
-
-    return ret;
-}
diff --git a/src/hysop++/tests/testPoissonSolver/testPoissonSolver.cpp b/src/hysop++/tests/testPoissonSolver/testPoissonSolver.cpp
deleted file mode 100644
index 58d87d5440e617a18f9ba6abda18fef3a85b736e..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPoissonSolver/testPoissonSolver.cpp
+++ /dev/null
@@ -1,208 +0,0 @@
-
-#include "testPoissonSolver.h"
-
-#include <cstdlib>
-
-#include "domain/domain.h"
-#include "solver/fftPoissonSolver.h"
-#include "data/multi_array/multi_array.h"
-#include "utils/constants.h"
-#include "domain/boundary.h"
-
-using namespace hysop;
-using namespace hysop::domain;
-
-static constexpr std::size_t nBoundaries = 4;
-static constexpr std::size_t nBoundaryPairs = 7;
-static constexpr domain::Boundary bds[nBoundaries] = 
-{ domain::Boundary::NONE, domain::Boundary::HOMOGENEOUS_NEUMANN, domain::Boundary::HOMOGENEOUS_DIRICHLET, domain::Boundary::PERIODIC };
-static constexpr std::pair<domain::Boundary,domain::Boundary> pbds[nBoundaryPairs] {
-        std::make_pair(bds[3],bds[3]), //periodic-periodic
-        std::make_pair(bds[3],bds[3]), //periodic-periodic
-        std::make_pair(bds[2],bds[1]), //even-odd
-        std::make_pair(bds[1],bds[2]), //odd-even
-        std::make_pair(bds[2],bds[2]), //even-even
-        std::make_pair(bds[1],bds[1]), //odd-odd
-        std::make_pair(bds[0],bds[0]), //none-none
-};
-    
-#ifdef HAS_QUADMATHS
-    static constexpr __float128  freqs[6] = { 1.0Q, 1.0Q, 0.75Q, 0.75Q, 0.50Q, 0.50Q };
-#else
-    static constexpr long double freqs[6] = { 1.0L, 1.0L, 0.75L, 0.75L, 0.50L, 0.50L };
-#endif
-
-template <typename T>
-std::function<T(T)> func(std::size_t k) {
-    switch(k) {
-        case 0: return [=](T x) {return std::cos(T(freqs[0])*x);};
-        case 1: return [=](T x) {return std::sin(T(freqs[1])*x);};
-        case 2: return [=](T x) {return std::cos(T(freqs[2])*x);};
-        case 3: return [=](T x) {return std::sin(T(freqs[3])*x);};
-        case 4: return [=](T x) {return std::cos(T(freqs[4])*x);};
-        case 5: return [=](T x) {return std::sin(T(freqs[5])*x);};
-        default: return[=](T x) { return T(1); };
-    }
-}
-
-template <typename T, std::size_t Dim, bool verbose=false> 
-void test(bool includePeriodicBds=false) {
-    typename Shape<Dim>::type shape;
-    typename Domain<T,Dim>::DomainSize domainSize;
-    Domain<T,Dim> ref, inBuffer, outBuffer;
-
-    Domain<T,Dim>& in  = inBuffer;
-    Domain<T,Dim>& out = outBuffer;
-
-    shape.fill(16);
-    domainSize.fill(2*hysop::constants::pi);
-
-    T eps = std::numeric_limits<T>::epsilon();
-    const std::size_t N = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<std::size_t>());
-    
-    ref.resize(domainSize).reshape(shape);
-    in  = ref;
-    out = ref;
-   
-    typename Shape<Dim>::type testCases;
-    testCases.fill(nBoundaryPairs);
-    Index<Dim> testCaseId(testCases);
-    std::array<T,3> meanDists{0};
-    std::size_t testCaseCount = testCaseId.maxId()-1;
-
-    if(verbose)
-        std::cout << std::endl;
-    
-    while(testCaseId() != testCaseId.maxId()-1) { 
-
-        /* generate transform configuration */
-        std::size_t orderSum = 0;
-        std::array<std::pair<domain::Boundary,domain::Boundary>, Dim> bdsConfig;
-        T W2sum = T(0);
-        for (std::size_t k=0; k<Dim; k++) {
-            std::size_t id = testCaseId[k];
-            bdsConfig[k] = pbds[id];
-            if(bdsConfig[k].first != domain::Boundary::NONE) {
-                W2sum += freqs[id]*freqs[id];
-                orderSum+=2;
-            }
-        }
-        domain::DomainConfiguration<Dim> domainConfig(bdsConfig, includePeriodicBds);
-        
-        T orderPow = std::pow(T(10),T(orderSum));
-        if(std::is_same<T,long double>::value) /* just in case long doubles are not hardware supported... */
-            orderPow *= 1e3;
-        const auto criteria = std::make_tuple(orderPow*eps*N,orderPow*eps*sqrt(N),2*orderPow*eps);
-
-        const auto phi = [&](const typename Domain<T,Dim>::SpaceVariable &x) { 
-            T val = func<T>(testCaseId[0])(x[0]);
-            for (std::size_t d=1; d < Dim; d++)
-                val *= func<T>(testCaseId[d])(x[d]);
-            return val;
-        };
-        const auto f = [&](const typename Domain<T,Dim>::SpaceVariable &x) { 
-            return -W2sum*phi(x);
-        };
-
-        {
-            ref.resetDomainConfiguration(domainConfig);
-            in  = ref;
-            out = ref;
-
-            in.apply(f);
-            ref.apply(phi);
-            out.data().apply([](T& v){ v=T(0);});
-        }
-
-        solver::FftPoissonSolver<T,Dim> solver(domainSize, domainConfig, FFTW_MEASURE, includePeriodicBds, includePeriodicBds);
-        solver.apply(in.data(), out.data());
-
-        std::stringstream ss;
-        ss << "[";
-        for (std::size_t k=0; k<Dim-1; k++) 
-            ss << bdsConfig[k].first << "/" << bdsConfig[k].second << ",";
-        ss << bdsConfig[Dim-1].first << "/" << bdsConfig[Dim-1].second;
-        ss << "]";
-
-        const auto dist = out.distance(ref);
-        const bool pass =      (std::get<0>(dist) < std::get<0>(criteria)) 
-            && (std::get<1>(dist) < std::get<1>(criteria))
-            && (std::get<2>(dist) < std::get<2>(criteria));
-
-        if((pass && verbose) || !pass) {
-            std::cout << (pass ? GREEN : RED);
-            std::cout << "\t" << std::setw(Dim*15) << ss.str() << " => " << (pass ? "OK" : "KO") 
-                << "  " << RESET << std::scientific << std::setprecision(2) << dist << std::endl;
-        }
-        if(!pass) {
-            //in.print("IN");
-            //ref.print("REF");
-            //out.print("OUT");
-            std::cout << "\t\tTest Failed... Criteria was " << criteria << "." << std::endl;
-        }
-        EXPECT_TRUE(pass);
-
-        meanDists[0] += std::get<0>(dist);
-        meanDists[1] += std::get<1>(dist);
-        meanDists[2] += std::get<2>(dist);
-
-        ++testCaseId;
-    }
-
-        for (std::size_t k = 0; k < 3; k++)
-            meanDists[k] /= T(testCaseCount);
-        std::cout << "\t=> mean distances over " << std::scientific << std::setprecision(1) << std::setw(4)
-            << testCaseCount << " testcases: " << meanDists;
-        for (std::size_t k = 0; k < 3; k++)
-            meanDists[k] = std::round(meanDists[k]/eps);
-        std::cout << " ~= " <<  std::fixed << std::setprecision(0) << meanDists << " eps" << std::endl; 
-}
-
-   
-#ifdef FFTW_HAS_FFTW3F
-TEST_F(PoissonSolverTest, FloatPoissonSolver) {
-    std::cout << std::endl;
-    std::cout << "== TEST 1D - float       ==";
-    test<float,1,false>(true);
-    std::cout << "== TEST 2D - float       ==";
-    test<float,2,false>();
-    std::cout << "== TEST 3D - float       ==";
-    test<float,3,false>();
-}
-#endif
-
-#ifdef FFTW_HAS_FFTW3D
-TEST_F(PoissonSolverTest, DoublePoissonSolver) {
-    std::cout << std::endl;
-    std::cout << "== TEST 1D - double      ==";
-    test<double,1,false>();
-    std::cout << "== TEST 2D - double      ==";
-    test<double,2,false>();
-    std::cout << "== TEST 3D - double      ==";
-    test<double,3,false>();
-}
-#endif
-    
-#ifdef FFTW_HAS_FFTW3L
-TEST_F(PoissonSolverTest, LongDoublePoissonSolver) {
-    std::cout << std::endl;
-    std::cout << "== TEST 1D - long double ==";
-    test<long double,1,false>();
-    std::cout << "== TEST 2D - long double ==";
-    test<long double,2,false>();
-    std::cout << "== TEST 3D - long double ==";
-    test<long double,3,false>();
-}
-#endif
-
-#ifdef FFTW_HAS_FFTW3Q
-TEST_F(PoissonSolverTest, QuadFloatPoissonSolver) {
-    std::cout << std::endl;
-    std::cout << "== TEST 1D - __float128 ==";
-    test<__float128,1,false>();
-    std::cout << "== TEST 2D - __float128 ==";
-    test<__float128,2,false>();
-    std::cout << "== TEST 3D - __float128 ==";
-    test<__float128,3,false>();
-}
-#endif
diff --git a/src/hysop++/tests/testPoissonSolver/testPoissonSolver.h b/src/hysop++/tests/testPoissonSolver/testPoissonSolver.h
deleted file mode 100644
index a5115abeffa845b87c5cee1463b86d711f707552..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPoissonSolver/testPoissonSolver.h
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#include "gtest/gtest.h"
-#include "solver/fftPoissonSolver.h"
-#include "fft/extension.h"
-
-using namespace hysop;
-
-class PoissonSolverTest : public ::testing::Test {
-    protected:
-        PoissonSolverTest() {}
-        void SetUp()  {}
-        void TearDown() {}
-        virtual ~PoissonSolverTest() {}
-};
diff --git a/src/hysop++/tests/testPolynoms/CMakeLists.txt b/src/hysop++/tests/testPolynoms/CMakeLists.txt
deleted file mode 100644
index ae4b35dd2c7c115b8e73a278ec049d0bd4336748..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPolynoms/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-
-file(GLOB CPP_SRCS *.cpp)
-set(SRCS ${CPP_SRCS})
-
-get_filename_component(test_name ${CMAKE_CURRENT_SOURCE_DIR} NAME)
-add_definitions(${CXX_EXTRA_DEFINES})
-add_executable(${test_name} ${SRCS})
-add_dependencies(${test_name} ${HYSOP_CXX_LIBRARY_DEP})
-
-target_link_libraries(${test_name} ${HYSOP_LIBRARY})
-target_link_libraries(${test_name} ${GTEST_LIBRARIES} ${CXX_EXT_LIBS})
-
-add_test("${test_name}" "${test_name}")
-
diff --git a/src/hysop++/tests/testPolynoms/main.cpp b/src/hysop++/tests/testPolynoms/main.cpp
deleted file mode 100644
index b6141085e8080256953f5555450500ea27948a0c..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPolynoms/main.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#include "gtest/gtest.h"
-
-int main(int argc, char **argv)
-{
-    ::testing::InitGoogleTest(&argc, argv);
-    int ret = RUN_ALL_TESTS();
-
-    return ret;
-}
diff --git a/src/hysop++/tests/testPolynoms/testPolynoms.cpp b/src/hysop++/tests/testPolynoms/testPolynoms.cpp
deleted file mode 100644
index 0849af6a44be51f954392dcb5932bcfd435cf7fd..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPolynoms/testPolynoms.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-
-#include "testPolynoms.h"
-
-using namespace hysop::maths;
-
-template <typename T, std::size_t Dim>
-void evalTest(std::size_t p_size, std::size_t p_samples);
-    
-TEST_F(PolynomialTest, EvalTest1D) {
-    const std::size_t order=10;
-    const std::size_t samples=4096;
-
-    evalTest<bool,1>(order,samples); 
-
-    evalTest<char,1>(order,samples); 
-    evalTest<short,1>(order,samples); 
-    evalTest<int,1>(order,samples); 
-    evalTest<long int,1>(order,samples); 
-    evalTest<long long int,1>(order,samples); 
-
-    evalTest<unsigned char,1>(order,samples); 
-    evalTest<unsigned short,1>(order,samples); 
-    evalTest<unsigned int,1>(order,samples); 
-    evalTest<unsigned long int,1>(order,samples); 
-    evalTest<unsigned long long int,1>(order,samples); 
-
-    evalTest<float,1>(order,samples); 
-    evalTest<double,1>(order,samples); 
-    evalTest<long double,1>(order,samples); 
-#ifdef HAS_QUADMATHS
-    evalTest<__float128,1>(order,samples); 
-#endif
-
-    evalTest<std::size_t,1>(order,samples);
-    evalTest<std::ptrdiff_t,1>(order,samples);
-}
-
-TEST_F(PolynomialTest, EvalTest2D) {
-    const std::size_t order=10;
-    const std::size_t samples=32;
-
-    evalTest<float,2>(order,samples); 
-    evalTest<double,2>(order,samples); 
-    evalTest<long double,2>(order,samples); 
-#ifdef HAS_QUADMATHS
-    evalTest<__float128,2>(order,samples); 
-#endif
-}
-
-TEST_F(PolynomialTest, EvalTest3D) {
-    const std::size_t order=10;
-    const std::size_t samples=4;
-
-    evalTest<float,3>(order,samples); 
-    evalTest<double,3>(order,samples); 
-    evalTest<long double,3>(order,samples); 
-#ifdef HAS_QUADMATHS
-    evalTest<__float128,3>(order,samples); 
-#endif
-}
-
-TEST_F(PolynomialTest, EvalTest4D) {
-    const std::size_t order=10;
-    const std::size_t samples=2;
-
-    evalTest<float,4>(order,samples); 
-    evalTest<double,4>(order,samples); 
-    evalTest<long double,4>(order,samples); 
-#ifdef HAS_QUADMATHS
-    evalTest<__float128,4>(order,samples); 
-#endif
-}
-
-template <typename T, std::size_t Dim>
-void evalTest(const std::size_t p_size, const std::size_t p_samples) {
-    Polynomial<T,Dim> P;
-    Index<Dim> polyIdx;
-    { 
-        typename Shape<Dim>::type polyShape;
-        polyShape.fill(p_size);
-        P.reshape(polyShape).applyToCoefficients([](T& ak, const Index<Dim>& idx){ 
-                ak = T(idx())/T(idx.maxId());
-        });
-    }
-
-    std::array<T,Dim> X;
-    T dX;
-    {
-        const T a = T(0);
-        const T b = T(1);
-        dX = (b-a)/(p_samples-1);   
-    }
-    
-    typename Shape<Dim>::type sampleShape;
-    sampleShape.fill(p_samples);
-    Index<Dim> sampleIdx(sampleShape);
-    while(!sampleIdx.atMaxId()) {
-        for (std::size_t d=0; d < Dim; d++)
-            X[d] = sampleIdx[d]*dX;
-        T lhs, rhs;
-        lhs = P(X); 
-        rhs = T(0);
-        polyIdx.reset(P.shape());
-        while(!polyIdx.atMaxId()) {
-            T val = T(1);
-            for (std::size_t d=0; d<Dim; d++)
-                val *= std::pow(X[d],polyIdx[d]);
-            rhs += T(polyIdx())/T(polyIdx.maxId())*val;
-            ++polyIdx;
-        }
-        ASSERT_LE(std::abs(rhs-lhs),std::pow(10,Dim)*std::numeric_limits<T>::epsilon());
-        ++sampleIdx;
-    }
-}
-
diff --git a/src/hysop++/tests/testPolynoms/testPolynoms.h b/src/hysop++/tests/testPolynoms/testPolynoms.h
deleted file mode 100644
index d7dd0bd6adbae83eca2cf652100ebd2ce71e31ef..0000000000000000000000000000000000000000
--- a/src/hysop++/tests/testPolynoms/testPolynoms.h
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#include "gtest/gtest.h"
-#include "maths/polynomial.h"
-
-using namespace hysop;
-
-class PolynomialTest : public ::testing::Test {
-    protected:
-        PolynomialTest() {}
-        void SetUp()  {}
-        void TearDown() {}
-        virtual ~PolynomialTest() {}
-};
diff --git a/swig/cpp2hysop.i b/swig/cpp2hysop.i
deleted file mode 100644
index 7b1e2deb2418189642c7011528076310973abe8a..0000000000000000000000000000000000000000
--- a/swig/cpp2hysop.i
+++ /dev/null
@@ -1,7 +0,0 @@
-// -*- C++ -*-
-%module cpp2hysop
-
-%include start.i
-
-%include hysop++/hysop++.i
-
diff --git a/swig/hysop++/domain.i b/swig/hysop++/domain.i
deleted file mode 100644
index 8e996563f8bc60b02add5146c4e94cec4061a7f6..0000000000000000000000000000000000000000
--- a/swig/hysop++/domain.i
+++ /dev/null
@@ -1,38 +0,0 @@
-%module domain
-
-// -*- C++ -*-
-%{
-  #include "domain/boundary.h"
-  #include "domain/domainConfiguration.h"
-%}
-
-/* domain boundary */
-%ignore hysop::domain::operator<<;
-%include "domain/boundary.h"
-
-
-/* domain configuration */
-%ignore hysop::domain::operator<<;
-%ignore hysop::domain::DomainConfiguration::operator[];
-%include "domain/domainConfiguration.h"
-
-%define INSTANTIATE_DOMAIN_CONFIG_IMPL(CLASS_NAME,CLASS_TEMPLATE) 
-    %template(CLASS_NAME) CLASS_TEMPLATE; 
-    %extend CLASS_TEMPLATE {
-        CLASS_TEMPLATE::BoundaryPair __get_item__(unsigned int k) const {
-            return $self->operator[](k);
-        }
-        const char* __str__() const {
-            std::stringstream ss;
-            ss << *($self);
-            return ss.str().c_str(); 
-        }
-    }
-%enddef
-
-%define INSTANTIATE_DOMAIN_CONFIG(INT) 
-    INSTANTIATE_DOMAIN_CONFIG_IMPL(DomainConfiguration ## INT ## D, hysop::domain::DomainConfiguration<INT>)
-%enddef
-
-%formacro(INSTANTIATE_DOMAIN_CONFIG, INSTANTIATED_DIMENSIONS)
-
diff --git a/swig/hysop++/fft.i b/swig/hysop++/fft.i
deleted file mode 100644
index dd62b768631dd89b95e11a8fff4d76102ae13351..0000000000000000000000000000000000000000
--- a/swig/hysop++/fft.i
+++ /dev/null
@@ -1,94 +0,0 @@
-
-%module fft
-
-// -*- C++ -*-
-%{
-  #include "fftw3.h"
-  #include "fft/fftw3.h"
-  #include "fft/extension.h"
-  #include "fft/transform.h"
-  #include "fft/fftDomainConfiguration.h"
-%}
-
-/* fftw3.h */
-#if !defined(FFTW_HAS_FFTW3F_THREADS) || !defined(FFTW_HAS_FFTW3F_OMP)
-    %ignore fftwf_init_threads();
-    %ignore fftwf_plan_with_nthreads(int);
-    %ignore fftwf_make_planner_thread_safe();
-    %ignore fftwf_cleanup_threads();
-#endif
-
-#if !defined(FFTW_HAS_FFTW3D_THREADS) || !defined(FFTW_HAS_FFTW3D_OMP)
-    %ignore fftw_init_threads();
-    %ignore fftw_plan_with_nthreads(int);
-    %ignore fftw_make_planner_thread_safe();
-    %ignore fftw_cleanup_threads();
-#endif
-
-#if !defined(FFTW_HAS_FFTW3L_THREADS) || !defined(FFTW_HAS_FFTW3L_OMP)
-    %ignore fftwl_init_threads();
-    %ignore fftwl_plan_with_nthreads(int);
-    %ignore fftwl_make_planner_thread_safe();
-    %ignore fftwl_cleanup_threads();
-#endif
-
-#if !defined(FFTW_HAS_FFTW3Q_THREADS) || !defined(FFTW_HAS_FFTW3Q_OMP)
-    %ignore fftwq_init_threads();
-    %ignore fftwq_plan_with_nthreads(int);
-    %ignore fftwq_make_planner_thread_safe();
-    %ignore fftwq_cleanup_threads();
-#endif
-
-%rename(is_) fftw_iodim_do_not_use_me::is;
-%rename(is_) fftw_iodim64_do_not_use_me::is;
-%include "fftw3.h"
-
-/* fftw3 c++ wrappers */
-%include "fft/fftw3.h"
-%template(Fftw3f) hysop::fft::Fftw3<float>;
-%template(Fftw3d) hysop::fft::Fftw3<double>;
-%template(Fftw3l) hysop::fft::Fftw3<long double>;
-#ifdef HAS_QUADMATHS
-    %template(Fftw3q) hysop::fft::Fftw3<__float128>;
-#endif
-
-/* fft transforms */
-%ignore hysop::fft::operator<<;
-%include "fft/transform.h"
-%extend hysop::fft::Transform {
-    const char* __str__() const { 
-        return $self->toString().c_str(); 
-    }
-}
-
-
-/* fft extensions */
-%ignore hysop::fft::operator<<;
-%include "fft/extension.h"
-
-
-/* fft domain configuration */
-%ignore hysop::fft::operator<<;
-%ignore hysop::fft::FftDomainConfiguration::operator[];
-%include "fft/fftDomainConfiguration.h"
-
-%define INSTANTIATE_FFT_DOMAIN_CONFIG_IMPL(CLASS_NAME,CLASS_TEMPLATE...) 
-    %template(CLASS_NAME) CLASS_TEMPLATE; 
-    %extend CLASS_TEMPLATE {
-        CLASS_TEMPLATE::ExtensionPair __get_item__(unsigned int k) const {
-            return $self->operator[](k);
-        }
-        const char* __str__() const {
-            std::stringstream ss;
-            ss << *($self);
-            return ss.str().c_str();
-        }
-    }
-%enddef
-
-%define INSTANTIATE_FFT_DOMAIN_CONFIG(INT) 
-    INSTANTIATE_FFT_DOMAIN_CONFIG_IMPL(FftDomainConfiguration ## INT ## D, hysop::fft::FftDomainConfiguration<INT>)
-%enddef
-
-%formacro(INSTANTIATE_FFT_DOMAIN_CONFIG, INSTANTIATED_DIMENSIONS)
-
diff --git a/swig/hysop++/hysop++.i b/swig/hysop++/hysop++.i
deleted file mode 100644
index a681efa69e7bfb6395da0f94a606665a65ccc0d8..0000000000000000000000000000000000000000
--- a/swig/hysop++/hysop++.i
+++ /dev/null
@@ -1,5 +0,0 @@
-
-/* hysop++ */
-%include "utils.i"
-%include "domain.i"
-%include "fft.i"
diff --git a/swig/hysop++/utils.i b/swig/hysop++/utils.i
deleted file mode 100644
index 541179ea9f6d5da9971426e67f47376eb6c1ed68..0000000000000000000000000000000000000000
--- a/swig/hysop++/utils.i
+++ /dev/null
@@ -1,29 +0,0 @@
-
-%module utils
-
-// -*- C++ -*-
-%{
-  #include "utils/types.h"
-  #include "utils/constants.h"
-%}
-
-/* types */
-%warnfilter(342) hysop::multi_array;
-%warnfilter(342) hysop::multi_array_view;
-%warnfilter(342) hysop::const_multi_array_view;
-%warnfilter(342) hysop::multi_array_ref;
-%warnfilter(342) hysop::const_multi_array_ref;
-%ignore hysop::multi_array;
-%ignore hysop::multi_array_view;
-%ignore hysop::const_multi_array_view;
-%ignore hysop::multi_array_ref;
-%ignore hysop::const_multi_array_ref;
-%include "utils/types.h"
-
-%define INSTANTIATE_SHAPE(INT)  %template(Shape  ## INT ## D) hysop::Shape<INT>;   %enddef
-%define INSTANTIATE_OFFSET(INT) %template(Offset ## INT ## D) hysop::Offset<INT>;  %enddef
-%formacro(INSTANTIATE_SHAPE,  INSTANTIATED_DIMENSIONS)
-%formacro(INSTANTIATE_OFFSET, INSTANTIATED_DIMENSIONS)
-
-/* constants */
-%include "utils/constants.h"
diff --git a/swig/numpy.i b/swig/numpy.i
deleted file mode 100644
index d3eb0c2181c419b73aeb396b757db13570da4a5b..0000000000000000000000000000000000000000
--- a/swig/numpy.i
+++ /dev/null
@@ -1,3085 +0,0 @@
-/* -*- C -*-  (not really, but good for syntax highlighting) */
-#ifdef SWIGPYTHON
-
-%{
-#ifndef SWIG_FILE_WITH_INIT
-#define NO_IMPORT_ARRAY
-#endif
-#include "stdio.h"
-#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#include <numpy/arrayobject.h>
-%}
-
-/**********************************************************************/
-
-%fragment("NumPy_Backward_Compatibility", "header")
-{
-%#if NPY_API_VERSION < 0x00000007
-%#define NPY_ARRAY_DEFAULT NPY_DEFAULT
-%#define NPY_ARRAY_FARRAY  NPY_FARRAY
-%#define NPY_ARRAY_F_CONTIGUOUS NPY_F_CONTIGUOUS
-%#endif
-}
-
-/**********************************************************************/
-
-/* The following code originally appeared in
- * enthought/kiva/agg/src/numeric.i written by Eric Jones.  It was
- * translated from C++ to C by John Hunter.  Bill Spotz has modified
- * it to fix some minor bugs, upgrade from Numeric to numpy (all
- * versions), add some comments and functionality, and convert from
- * direct code insertion to SWIG fragments.
- */
-
-%fragment("NumPy_Macros", "header")
-{
-/* Macros to extract array attributes.
- */
-%#if NPY_API_VERSION < 0x00000007
-%#define is_array(a)            ((a) && PyArray_Check((PyArrayObject*)a))
-%#define array_type(a)          (int)(PyArray_TYPE((PyArrayObject*)a))
-%#define array_numdims(a)       (((PyArrayObject*)a)->nd)
-%#define array_dimensions(a)    (((PyArrayObject*)a)->dimensions)
-%#define array_size(a,i)        (((PyArrayObject*)a)->dimensions[i])
-%#define array_strides(a)       (((PyArrayObject*)a)->strides)
-%#define array_stride(a,i)      (((PyArrayObject*)a)->strides[i])
-%#define array_data(a)          (((PyArrayObject*)a)->data)
-%#define array_descr(a)         (((PyArrayObject*)a)->descr)
-%#define array_flags(a)         (((PyArrayObject*)a)->flags)
-%#define array_enableflags(a,f) (((PyArrayObject*)a)->flags) = f
-%#else
-%#define is_array(a)            ((a) && PyArray_Check(a))
-%#define array_type(a)          PyArray_TYPE((PyArrayObject*)a)
-%#define array_numdims(a)       PyArray_NDIM((PyArrayObject*)a)
-%#define array_dimensions(a)    PyArray_DIMS((PyArrayObject*)a)
-%#define array_strides(a)       PyArray_STRIDES((PyArrayObject*)a)
-%#define array_stride(a,i)      PyArray_STRIDE((PyArrayObject*)a,i)
-%#define array_size(a,i)        PyArray_DIM((PyArrayObject*)a,i)
-%#define array_data(a)          PyArray_DATA((PyArrayObject*)a)
-%#define array_descr(a)         PyArray_DESCR((PyArrayObject*)a)
-%#define array_flags(a)         PyArray_FLAGS((PyArrayObject*)a)
-%#define array_enableflags(a,f) PyArray_ENABLEFLAGS((PyArrayObject*)a,f)
-%#endif
-%#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS((PyArrayObject*)a))
-%#define array_is_native(a)     (PyArray_ISNOTSWAPPED((PyArrayObject*)a))
-%#define array_is_fortran(a)    (PyArray_ISFORTRAN((PyArrayObject*)a))
-}
-
-/**********************************************************************/
-
-%fragment("NumPy_Utilities",
-          "header")
-{
-  /* Given a PyObject, return a string describing its type.
-   */
-  const char* pytype_string(PyObject* py_obj)
-  {
-    if (py_obj == NULL          ) return "C NULL value";
-    if (py_obj == Py_None       ) return "Python None" ;
-    if (PyCallable_Check(py_obj)) return "callable"    ;
-    if (PyString_Check(  py_obj)) return "string"      ;
-    if (PyInt_Check(     py_obj)) return "int"         ;
-    if (PyFloat_Check(   py_obj)) return "float"       ;
-    if (PyDict_Check(    py_obj)) return "dict"        ;
-    if (PyList_Check(    py_obj)) return "list"        ;
-    if (PyTuple_Check(   py_obj)) return "tuple"       ;
-%#if PY_MAJOR_VERSION < 3
-    if (PyFile_Check(    py_obj)) return "file"        ;
-    if (PyModule_Check(  py_obj)) return "module"      ;
-    if (PyInstance_Check(py_obj)) return "instance"    ;
-%#endif
-
-    return "unkown type";
-  }
-
-  /* Given a NumPy typecode, return a string describing the type.
-   */
-  const char* typecode_string(int typecode)
-  {
-    static const char* type_names[25] = {"bool",
-                                         "byte",
-                                         "unsigned byte",
-                                         "short",
-                                         "unsigned short",
-                                         "int",
-                                         "unsigned int",
-                                         "long",
-                                         "unsigned long",
-                                         "long long",
-                                         "unsigned long long",
-                                         "float",
-                                         "double",
-                                         "long double",
-                                         "complex float",
-                                         "complex double",
-                                         "complex long double",
-                                         "object",
-                                         "string",
-                                         "unicode",
-                                         "void",
-                                         "ntypes",
-                                         "notype",
-                                         "char",
-                                         "unknown"};
-    return typecode < 24 ? type_names[typecode] : type_names[24];
-  }
-
-  /* Make sure input has correct numpy type.  This now just calls
-     PyArray_EquivTypenums().
-   */
-  int type_match(int actual_type,
-                 int desired_type)
-  {
-    return PyArray_EquivTypenums(actual_type, desired_type);
-  }
-
-%#ifdef SWIGPY_USE_CAPSULE
-  void free_cap(PyObject * cap)
-  {
-    void* array = (void*) PyCapsule_GetPointer(cap,SWIGPY_CAPSULE_NAME);
-    if (array != NULL) free(array);
-  }
-%#endif
-
-
-}
-
-/**********************************************************************/
-
-%fragment("NumPy_Object_to_Array",
-          "header",
-          fragment="NumPy_Backward_Compatibility",
-          fragment="NumPy_Macros",
-          fragment="NumPy_Utilities")
-{
-  /* Given a PyObject pointer, cast it to a PyArrayObject pointer if
-   * legal.  If not, set the python error string appropriately and
-   * return NULL.
-   */
-  PyArrayObject* obj_to_array_no_conversion(PyObject* input,
-                                            int        typecode)
-  {
-    PyArrayObject* ary = NULL;
-    if (is_array(input) && (typecode == NPY_NOTYPE ||
-                            PyArray_EquivTypenums(array_type(input), typecode)))
-    {
-      ary = (PyArrayObject*) input;
-    }
-    else if is_array(input)
-    {
-      const char* desired_type = typecode_string(typecode);
-      const char* actual_type  = typecode_string(array_type(input));
-      PyErr_Format(PyExc_TypeError,
-                   "Array of type '%s' required.  Array of type '%s' given",
-                   desired_type, actual_type);
-      ary = NULL;
-    }
-    else
-    {
-      const char* desired_type = typecode_string(typecode);
-      const char* actual_type  = pytype_string(input);
-      PyErr_Format(PyExc_TypeError,
-                   "Array of type '%s' required.  A '%s' was given",
-                   desired_type,
-                   actual_type);
-      ary = NULL;
-    }
-    return ary;
-  }
-
-  /* Convert the given PyObject to a NumPy array with the given
-   * typecode.  On success, return a valid PyArrayObject* with the
-   * correct type.  On failure, the python error string will be set and
-   * the routine returns NULL.
-   */
-  PyArrayObject* obj_to_array_allow_conversion(PyObject* input,
-                                               int       typecode,
-                                               int*      is_new_object)
-  {
-    PyArrayObject* ary = NULL;
-    PyObject*      py_obj;
-    if (is_array(input) && (typecode == NPY_NOTYPE ||
-                            PyArray_EquivTypenums(array_type(input),typecode)))
-    {
-      ary = (PyArrayObject*) input;
-      *is_new_object = 0;
-    }
-    else
-    {
-      py_obj = PyArray_FROMANY(input, typecode, 0, 0, NPY_ARRAY_DEFAULT);
-      /* If NULL, PyArray_FromObject will have set python error value.*/
-      ary = (PyArrayObject*) py_obj;
-      *is_new_object = 1;
-    }
-    return ary;
-  }
-
-  /* Given a PyArrayObject, check to see if it is contiguous.  If so,
-   * return the input pointer and flag it as not a new object.  If it is
-   * not contiguous, create a new PyArrayObject using the original data,
-   * flag it as a new object and return the pointer.
-   */
-  PyArrayObject* make_contiguous(PyArrayObject* ary,
-                                 int*           is_new_object,
-                                 int            min_dims,
-                                 int            max_dims)
-  {
-    PyArrayObject* result;
-    if (array_is_contiguous(ary))
-    {
-      result = ary;
-      *is_new_object = 0;
-    }
-    else
-    {
-      result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary,
-                                                              array_type(ary),
-                                                              min_dims,
-                                                              max_dims);
-      *is_new_object = 1;
-    }
-    return result;
-  }
-
-  /* Given a PyArrayObject, check to see if it is Fortran-contiguous.
-   * If so, return the input pointer, but do not flag it as not a new
-   * object.  If it is not Fortran-contiguous, create a new
-   * PyArrayObject using the original data, flag it as a new object
-   * and return the pointer.
-   */
-  PyArrayObject* make_fortran(PyArrayObject* ary,
-                              int*           is_new_object)
-  {
-    PyArrayObject* result;
-    if (array_is_fortran(ary))
-    {
-      result = ary;
-      *is_new_object = 0;
-    }
-    else
-    {
-      Py_INCREF(array_descr(ary));
-      result = (PyArrayObject*) PyArray_FromArray(ary,
-                                                  array_descr(ary),
-                                                  NPY_ARRAY_F_CONTIGUOUS);
-      *is_new_object = 1;
-    }
-    return result;
-  }
-
-  /* Convert a given PyObject to a contiguous PyArrayObject of the
-   * specified type.  If the input object is not a contiguous
-   * PyArrayObject, a new one will be created and the new object flag
-   * will be set.
-   */
-  PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input,
-                                                          int       typecode,
-                                                          int*      is_new_object)
-  {
-    int is_new1 = 0;
-    int is_new2 = 0;
-    PyArrayObject* ary2;
-    PyArrayObject* ary1 = obj_to_array_allow_conversion(input,
-                                                        typecode,
-                                                        &is_new1);
-    if (ary1)
-    {
-      ary2 = make_contiguous(ary1, &is_new2, 0, 0);
-      if ( is_new1 && is_new2)
-      {
-        Py_DECREF(ary1);
-      }
-      ary1 = ary2;
-    }
-    *is_new_object = is_new1 || is_new2;
-    return ary1;
-  }
-
-  /* Convert a given PyObject to a Fortran-ordered PyArrayObject of the
-   * specified type.  If the input object is not a Fortran-ordered
-   * PyArrayObject, a new one will be created and the new object flag
-   * will be set.
-   */
-  PyArrayObject* obj_to_array_fortran_allow_conversion(PyObject* input,
-                                                       int       typecode,
-                                                       int*      is_new_object)
-  {
-    int is_new1 = 0;
-    int is_new2 = 0;
-    PyArrayObject* ary2;
-    PyArrayObject* ary1 = obj_to_array_allow_conversion(input,
-                                                        typecode,
-                                                        &is_new1);
-    if (ary1)
-    {
-      ary2 = make_fortran(ary1, &is_new2);
-      if (is_new1 && is_new2)
-      {
-        Py_DECREF(ary1);
-      }
-      ary1 = ary2;
-    }
-    *is_new_object = is_new1 || is_new2;
-    return ary1;
-  }
-} /* end fragment */
-
-/**********************************************************************/
-
-%fragment("NumPy_Array_Requirements",
-          "header",
-          fragment="NumPy_Backward_Compatibility",
-          fragment="NumPy_Macros")
-{
-  /* Test whether a python object is contiguous.  If array is
-   * contiguous, return 1.  Otherwise, set the python error string and
-   * return 0.
-   */
-  int require_contiguous(PyArrayObject* ary)
-  {
-    int contiguous = 1;
-    if (!array_is_contiguous(ary))
-    {
-      PyErr_SetString(PyExc_TypeError,
-                      "Array must be contiguous.  A non-contiguous array was given");
-      contiguous = 0;
-    }
-    return contiguous;
-  }
-
-  /* Require that a numpy array is not byte-swapped.  If the array is
-   * not byte-swapped, return 1.  Otherwise, set the python error string
-   * and return 0.
-   */
-  int require_native(PyArrayObject* ary)
-  {
-    int native = 1;
-    if (!array_is_native(ary))
-    {
-      PyErr_SetString(PyExc_TypeError,
-                      "Array must have native byteorder.  "
-                      "A byte-swapped array was given");
-      native = 0;
-    }
-    return native;
-  }
-
-  /* Require the given PyArrayObject to have a specified number of
-   * dimensions.  If the array has the specified number of dimensions,
-   * return 1.  Otherwise, set the python error string and return 0.
-   */
-  int require_dimensions(PyArrayObject* ary,
-                         int            exact_dimensions)
-  {
-    int success = 1;
-    if (array_numdims(ary) != exact_dimensions)
-    {
-      PyErr_Format(PyExc_TypeError,
-                   "Array must have %d dimensions.  Given array has %d dimensions",
-                   exact_dimensions,
-                   array_numdims(ary));
-      success = 0;
-    }
-    return success;
-  }
-
-  /* Require the given PyArrayObject to have one of a list of specified
-   * number of dimensions.  If the array has one of the specified number
-   * of dimensions, return 1.  Otherwise, set the python error string
-   * and return 0.
-   */
-  int require_dimensions_n(PyArrayObject* ary,
-                           int*           exact_dimensions,
-                           int            n)
-  {
-    int success = 0;
-    int i = 0;
-    char dims_str[255] = "";
-    char s[255];
-    for (i = 0; i < n && !success; i++)
-    {
-      if (array_numdims(ary) == exact_dimensions[i])
-      {
-        success = 1;
-      }
-    }
-    if (!success)
-    {
-      for (i = 0; i < n-1; i++)
-      {
-        sprintf(s, "%d, ", exact_dimensions[i]);
-        strcat(dims_str,s);
-      }
-      sprintf(s, " or %d", exact_dimensions[n-1]);
-      strcat(dims_str,s);
-      PyErr_Format(PyExc_TypeError,
-                   "Array must have %s dimensions.  Given array has %d dimensions",
-                   dims_str,
-                   array_numdims(ary));
-    }
-    return success;
-  }
-
-  /* Require the given PyArrayObject to have a specified shape.  If the
-   * array has the specified shape, return 1.  Otherwise, set the python
-   * error string and return 0.
-   */
-  int require_size(PyArrayObject* ary,
-                   npy_intp*      size,
-                   int            n)
-  {
-    int i = 0;
-    int success = 1;
-    int len = 0;
-    char desired_dims[255] = "[";
-    char s[255];
-    char actual_dims[255] = "[";
-    for(i=0; i < n;i++)
-    {
-      if (size[i] != -1 &&  size[i] != array_size(ary,i))
-      {
-        success = 0;
-      }
-    }
-    if (!success)
-    {
-      for (i = 0; i < n; i++)
-      {
-        if (size[i] == -1)
-        {
-          sprintf(s, "*,");
-        }
-        else
-        {
-          sprintf(s, "%ld,", (long int)size[i]);
-        }
-        strcat(desired_dims,s);
-      }
-      len = strlen(desired_dims);
-      desired_dims[len-1] = ']';
-      for (i = 0; i < n; i++)
-      {
-        sprintf(s, "%ld,", (long int)array_size(ary,i));
-        strcat(actual_dims,s);
-      }
-      len = strlen(actual_dims);
-      actual_dims[len-1] = ']';
-      PyErr_Format(PyExc_TypeError,
-                   "Array must have shape of %s.  Given array has shape of %s",
-                   desired_dims,
-                   actual_dims);
-    }
-    return success;
-  }
-
-  /* Require the given PyArrayObject to to be Fortran ordered.  If the
-   * the PyArrayObject is already Fortran ordered, do nothing.  Else,
-   * set the Fortran ordering flag and recompute the strides.
-   */
-  int require_fortran(PyArrayObject* ary)
-  {
-    int success = 1;
-    int nd = array_numdims(ary);
-    int i = 0;
-    npy_intp * strides = array_strides(ary);
-    if (array_is_fortran(ary)) return success;
-    /* Set the Fortran ordered flag */
-    array_enableflags(ary,NPY_ARRAY_FARRAY);
-    /* Recompute the strides */
-    strides[0] = strides[nd-1];
-    for (i=1; i < nd; ++i)
-      strides[i] = strides[i-1] * array_size(ary,i-1);
-    return success;
-  }
-}
-
-/* Combine all NumPy fragments into one for convenience */
-%fragment("NumPy_Fragments",
-          "header",
-          fragment="NumPy_Backward_Compatibility",
-          fragment="NumPy_Macros",
-          fragment="NumPy_Utilities",
-          fragment="NumPy_Object_to_Array",
-          fragment="NumPy_Array_Requirements")
-{
-}
-
-/* End John Hunter translation (with modifications by Bill Spotz)
- */
-
-/* %numpy_typemaps() macro
- *
- * This macro defines a family of 74 typemaps that allow C arguments
- * of the form
- *
- *    1. (DATA_TYPE IN_ARRAY1[ANY])
- *    2. (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1)
- *    3. (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1)
- *
- *    4. (DATA_TYPE IN_ARRAY2[ANY][ANY])
- *    5. (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
- *    6. (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2)
- *    7. (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
- *    8. (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2)
- *
- *    9. (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY])
- *   10. (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
- *   11. (DATA_TYPE** IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
- *   12. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3)
- *   13. (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
- *   14. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3)
- *
- *   15. (DATA_TYPE IN_ARRAY4[ANY][ANY][ANY][ANY])
- *   16. (DATA_TYPE* IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
- *   17. (DATA_TYPE** IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
- *   18. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, , DIM_TYPE DIM4, DATA_TYPE* IN_ARRAY4)
- *   19. (DATA_TYPE* IN_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
- *   20. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_FARRAY4)
- *
- *   21. (DATA_TYPE INPLACE_ARRAY1[ANY])
- *   22. (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1)
- *   23. (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1)
- *
- *   24. (DATA_TYPE INPLACE_ARRAY2[ANY][ANY])
- *   25. (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
- *   26. (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2)
- *   27. (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
- *   28. (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2)
- *
- *   29. (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY])
- *   30. (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
- *   31. (DATA_TYPE** INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
- *   32. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3)
- *   33. (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
- *   34. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3)
- *
- *   35. (DATA_TYPE INPLACE_ARRAY4[ANY][ANY][ANY][ANY])
- *   36. (DATA_TYPE* INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
- *   37. (DATA_TYPE** INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
- *   38. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_ARRAY4)
- *   39. (DATA_TYPE* INPLACE_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
- *   40. (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_FARRAY4)
- *
- *   41. (DATA_TYPE ARGOUT_ARRAY1[ANY])
- *   42. (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
- *   43. (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
- *
- *   44. (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
- *
- *   45. (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
- *
- *   46. (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
- *
- *   47. (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1)
- *   48. (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1)
- *
- *   49. (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
- *   50. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2)
- *   51. (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
- *   52. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2)
- *
- *   53. (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
- *   54. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)
- *   55. (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
- *   56. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)
- *
- *   57. (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
- *   58. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEW_ARRAY4)
- *   59. (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
- *   60. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEW_FARRAY4)
- *
- *   61. (DATA_TYPE** ARGOUTVIEWM_ARRAY1, DIM_TYPE* DIM1)
- *   62. (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1)
- *
- *   63. (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
- *   64. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2)
- *   65. (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
- *   66. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2)
- *
- *   67. (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
- *   68. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEWM_ARRAY3)
- *   69. (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
- *   70. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEWM_FARRAY3)
- *
- *   71. (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
- *   72. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_ARRAY4)
- *   73. (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
- *   74. (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_FARRAY4)
- *
- * where "DATA_TYPE" is any type supported by the NumPy module, and
- * "DIM_TYPE" is any int-like type suitable for specifying dimensions.
- * The difference between "ARRAY" typemaps and "FARRAY" typemaps is
- * that the "FARRAY" typemaps expect Fortran ordering of
- * multidimensional arrays.  In python, the dimensions will not need
- * to be specified (except for the "DATA_TYPE* ARGOUT_ARRAY1"
- * typemaps).  The IN_ARRAYs can be a numpy array or any sequence that
- * can be converted to a numpy array of the specified type.  The
- * INPLACE_ARRAYs must be numpy arrays of the appropriate type.  The
- * ARGOUT_ARRAYs will be returned as new numpy arrays of the
- * appropriate type.
- *
- * These typemaps can be applied to existing functions using the
- * %apply directive.  For example:
- *
- *     %apply (double* IN_ARRAY1, int DIM1) {(double* series, int length)};
- *     double prod(double* series, int length);
- *
- *     %apply (int DIM1, int DIM2, double* INPLACE_ARRAY2)
- *           {(int rows, int cols, double* matrix        )};
- *     void floor(int rows, int cols, double* matrix, double f);
- *
- *     %apply (double IN_ARRAY3[ANY][ANY][ANY])
- *           {(double tensor[2][2][2]         )};
- *     %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY])
- *           {(double low[2][2][2]                )};
- *     %apply (double ARGOUT_ARRAY3[ANY][ANY][ANY])
- *           {(double upp[2][2][2]                )};
- *     void luSplit(double tensor[2][2][2],
- *                  double low[2][2][2],
- *                  double upp[2][2][2]    );
- *
- * or directly with
- *
- *     double prod(double* IN_ARRAY1, int DIM1);
- *
- *     void floor(int DIM1, int DIM2, double* INPLACE_ARRAY2, double f);
- *
- *     void luSplit(double IN_ARRAY3[ANY][ANY][ANY],
- *                  double ARGOUT_ARRAY3[ANY][ANY][ANY],
- *                  double ARGOUT_ARRAY3[ANY][ANY][ANY]);
- */
-
-%define %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE)
-
-/************************/
-/* Input Array Typemaps */
-/************************/
-
-/* Typemap suite for (DATA_TYPE IN_ARRAY1[ANY])
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE IN_ARRAY1[ANY])
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE IN_ARRAY1[ANY])
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[1] = { $1_dim0 };
-  array = obj_to_array_contiguous_allow_conversion($input,
-                                                   DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 1) ||
-      !require_size(array, size, 1)) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-%typemap(freearg)
-  (DATA_TYPE IN_ARRAY1[ANY])
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[1] = { -1 };
-  array = obj_to_array_contiguous_allow_conversion($input,
-                                                   DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 1) ||
-      !require_size(array, size, 1)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-}
-%typemap(freearg)
-  (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[1] = {-1};
-  array = obj_to_array_contiguous_allow_conversion($input,
-                                                   DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 1) ||
-      !require_size(array, size, 1)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DATA_TYPE*) array_data(array);
-}
-%typemap(freearg)
-  (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE IN_ARRAY2[ANY][ANY])
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE IN_ARRAY2[ANY][ANY])
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE IN_ARRAY2[ANY][ANY])
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[2] = { $1_dim0, $1_dim1 };
-  array = obj_to_array_contiguous_allow_conversion($input,
-                                                   DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 2) ||
-      !require_size(array, size, 2)) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-%typemap(freearg)
-  (DATA_TYPE IN_ARRAY2[ANY][ANY])
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[2] = { -1, -1 };
-  array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 2) ||
-      !require_size(array, size, 2)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-}
-%typemap(freearg)
-  (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[2] = { -1, -1 };
-  array = obj_to_array_contiguous_allow_conversion($input,
-                                                   DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 2) ||
-      !require_size(array, size, 2)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DATA_TYPE*) array_data(array);
-}
-%typemap(freearg)
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[2] = { -1, -1 };
-  array = obj_to_array_fortran_allow_conversion($input,
-                                                DATA_TYPECODE,
-                                                &is_new_object);
-  if (!array || !require_dimensions(array, 2) ||
-      !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-}
-%typemap(freearg)
-  (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[2] = { -1, -1 };
-  array = obj_to_array_contiguous_allow_conversion($input,
-                                                   DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 2) ||
-      !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DATA_TYPE*) array_data(array);
-}
-%typemap(freearg)
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY])
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY])
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY])
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 };
-  array = obj_to_array_contiguous_allow_conversion($input,
-                                                   DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 3) ||
-      !require_size(array, size, 3)) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-%typemap(freearg)
-  (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY])
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[3] = { -1, -1, -1 };
-  array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 3) ||
-      !require_size(array, size, 3)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-  $4 = (DIM_TYPE) array_size(array,2);
-}
-%typemap(freearg)
-  (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE** IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE** IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-{
-  /* for now, only concerned with lists */
-  $1 = PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE** IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-  (DATA_TYPE** array=NULL, PyArrayObject** object_array=NULL, int* is_new_object_array=NULL)
-{
-  npy_intp size[2] = { -1, -1 };
-  PyArrayObject* temp_array;
-  Py_ssize_t i;
-  int is_new_object = 0;
-
-  /* length of the list */
-  $2 = PyList_Size($input);
-
-  /* the arrays */
-  array = (DATA_TYPE **)malloc($2*sizeof(DATA_TYPE *));
-  object_array = (PyArrayObject **)calloc($2,sizeof(PyArrayObject *));
-  is_new_object_array = (int *)calloc($2,sizeof(int));
-
-  if (array == NULL || object_array == NULL || is_new_object_array == NULL)
-  {
-    SWIG_fail;
-  }
-
-  for (i=0; i<$2; i++)
-  {
-    temp_array = obj_to_array_contiguous_allow_conversion(PySequence_GetItem($input,i), DATA_TYPECODE, &is_new_object);
-
-    /* the new array must be stored so that it can be destroyed in freearg */
-    object_array[i] = temp_array;
-    is_new_object_array[i] = is_new_object;
-
-    if (!temp_array || !require_dimensions(temp_array, 2)) SWIG_fail;
-
-    /* store the size of the first array in the list, then use that for comparison. */
-    if (i == 0)
-    {
-      size[0] = array_size(temp_array,0);
-      size[1] = array_size(temp_array,1);
-    }
-
-    if (!require_size(temp_array, size, 2)) SWIG_fail;
-
-    array[i] = (DATA_TYPE*) array_data(temp_array);
-  }
-
-  $1 = (DATA_TYPE**) array;
-  $3 = (DIM_TYPE) size[0];
-  $4 = (DIM_TYPE) size[1];
-}
-%typemap(freearg)
-  (DATA_TYPE** IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-{
-  Py_ssize_t i;
-
-  if (array$argnum!=NULL) free(array$argnum);
-
-  /*freeing the individual arrays if needed */
-  if (object_array$argnum!=NULL)
-  {
-    if (is_new_object_array$argnum!=NULL)
-    {
-      for (i=0; i<$2; i++)
-      {
-        if (object_array$argnum[i] != NULL && is_new_object_array$argnum[i])
-        { Py_DECREF(object_array$argnum[i]); }
-      }
-      free(is_new_object_array$argnum);
-    }
-    free(object_array$argnum);
-  }
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3,
- *                    DATA_TYPE* IN_ARRAY3)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[3] = { -1, -1, -1 };
-  array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 3) ||
-      !require_size(array, size, 3)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DIM_TYPE) array_size(array,2);
-  $4 = (DATA_TYPE*) array_data(array);
-}
-%typemap(freearg)
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[3] = { -1, -1, -1 };
-  array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE,
-                                                &is_new_object);
-  if (!array || !require_dimensions(array, 3) ||
-      !require_size(array, size, 3) | !require_fortran(array)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-  $4 = (DIM_TYPE) array_size(array,2);
-}
-%typemap(freearg)
-  (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3,
- *                    DATA_TYPE* IN_FARRAY3)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[3] = { -1, -1, -1 };
-  array = obj_to_array_contiguous_allow_conversion($input,
-                                                   DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 3) ||
-      !require_size(array, size, 3) || !require_fortran(array)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DIM_TYPE) array_size(array,2);
-  $4 = (DATA_TYPE*) array_data(array);
-}
-%typemap(freearg)
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE IN_ARRAY4[ANY][ANY][ANY][ANY])
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE IN_ARRAY4[ANY][ANY][ANY][ANY])
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE IN_ARRAY4[ANY][ANY][ANY][ANY])
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[4] = { $1_dim0, $1_dim1, $1_dim2 , $1_dim3};
-  array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 4) ||
-      !require_size(array, size, 4)) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-%typemap(freearg)
-  (DATA_TYPE IN_ARRAY4[ANY][ANY][ANY][ANY])
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE* IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3, DIM_TYPE DIM4)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[4] = { -1, -1, -1, -1 };
-  array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 4) ||
-      !require_size(array, size, 4)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-  $4 = (DIM_TYPE) array_size(array,2);
-  $5 = (DIM_TYPE) array_size(array,3);
-}
-%typemap(freearg)
-  (DATA_TYPE* IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE** IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3, DIM_TYPE DIM4)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE** IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-{
-  /* for now, only concerned with lists */
-  $1 = PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE** IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-  (DATA_TYPE** array=NULL, PyArrayObject** object_array=NULL, int* is_new_object_array=NULL)
-{
-  npy_intp size[3] = { -1, -1, -1 };
-  PyArrayObject* temp_array;
-  Py_ssize_t i;
-  int is_new_object = 0;
-
-  /* length of the list */
-  $2 = PyList_Size($input);
-
-  /* the arrays */
-  array = (DATA_TYPE **)malloc($2*sizeof(DATA_TYPE *));
-  object_array = (PyArrayObject **)calloc($2,sizeof(PyArrayObject *));
-  is_new_object_array = (int *)calloc($2,sizeof(int));
-
-  if (array == NULL || object_array == NULL || is_new_object_array == NULL)
-  {
-    SWIG_fail;
-  }
-
-  for (i=0; i<$2; i++)
-  {
-    temp_array = obj_to_array_contiguous_allow_conversion(PySequence_GetItem($input,i), DATA_TYPECODE, &is_new_object);
-
-    /* the new array must be stored so that it can be destroyed in freearg */
-    object_array[i] = temp_array;
-    is_new_object_array[i] = is_new_object;
-
-    if (!temp_array || !require_dimensions(temp_array, 3)) SWIG_fail;
-
-    /* store the size of the first array in the list, then use that for comparison. */
-    if (i == 0)
-    {
-      size[0] = array_size(temp_array,0);
-      size[1] = array_size(temp_array,1);
-      size[2] = array_size(temp_array,2);
-    }
-
-    if (!require_size(temp_array, size, 3)) SWIG_fail;
-
-    array[i] = (DATA_TYPE*) array_data(temp_array);
-  }
-
-  $1 = (DATA_TYPE**) array;
-  $3 = (DIM_TYPE) size[0];
-  $4 = (DIM_TYPE) size[1];
-  $5 = (DIM_TYPE) size[2];
-}
-%typemap(freearg)
-  (DATA_TYPE** IN_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-{
-  Py_ssize_t i;
-
-  if (array$argnum!=NULL) free(array$argnum);
-
-  /*freeing the individual arrays if needed */
-  if (object_array$argnum!=NULL)
-  {
-    if (is_new_object_array$argnum!=NULL)
-    {
-      for (i=0; i<$2; i++)
-      {
-        if (object_array$argnum[i] != NULL && is_new_object_array$argnum[i])
-        { Py_DECREF(object_array$argnum[i]); }
-      }
-      free(is_new_object_array$argnum);
-    }
-    free(object_array$argnum);
-  }
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4,
- *                    DATA_TYPE* IN_ARRAY4)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_ARRAY4)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_ARRAY4)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[4] = { -1, -1, -1 , -1};
-  array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 4) ||
-      !require_size(array, size, 4)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DIM_TYPE) array_size(array,2);
-  $4 = (DIM_TYPE) array_size(array,3);
-  $5 = (DATA_TYPE*) array_data(array);
-}
-%typemap(freearg)
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_ARRAY4)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DATA_TYPE* IN_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3, DIM_TYPE DIM4)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* IN_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* IN_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[4] = { -1, -1, -1, -1 };
-  array = obj_to_array_fortran_allow_conversion($input, DATA_TYPECODE,
-                                                &is_new_object);
-  if (!array || !require_dimensions(array, 4) ||
-      !require_size(array, size, 4) | !require_fortran(array)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-  $4 = (DIM_TYPE) array_size(array,2);
-  $5 = (DIM_TYPE) array_size(array,3);
-}
-%typemap(freearg)
-  (DATA_TYPE* IN_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4,
- *                    DATA_TYPE* IN_FARRAY4)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_FARRAY4)
-{
-  $1 = is_array($input) || PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_FARRAY4)
-  (PyArrayObject* array=NULL, int is_new_object=0)
-{
-  npy_intp size[4] = { -1, -1, -1 , -1 };
-  array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE,
-                                                   &is_new_object);
-  if (!array || !require_dimensions(array, 4) ||
-      !require_size(array, size, 4) || !require_fortran(array)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DIM_TYPE) array_size(array,2);
-  $4 = (DIM_TYPE) array_size(array,3);
-  $5 = (DATA_TYPE*) array_data(array);
-}
-%typemap(freearg)
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* IN_FARRAY4)
-{
-  if (is_new_object$argnum && array$argnum)
-    { Py_DECREF(array$argnum); }
-}
-
-/***************************/
-/* In-Place Array Typemaps */
-/***************************/
-
-/* Typemap suite for (DATA_TYPE INPLACE_ARRAY1[ANY])
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE INPLACE_ARRAY1[ANY])
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE INPLACE_ARRAY1[ANY])
-  (PyArrayObject* array=NULL)
-{
-  npy_intp size[1] = { $1_dim0 };
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,1) || !require_size(array, size, 1) ||
-      !require_contiguous(array) || !require_native(array)) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-
-/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1)
-  (PyArrayObject* array=NULL, int i=1)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,1) || !require_contiguous(array)
-      || !require_native(array)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = 1;
-  for (i=0; i < array_numdims(array); ++i) $2 *= array_size(array,i);
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1)
-  (PyArrayObject* array=NULL, int i=0)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,1) || !require_contiguous(array)
-      || !require_native(array)) SWIG_fail;
-  $1 = 1;
-  for (i=0; i < array_numdims(array); ++i) $1 *= array_size(array,i);
-  $2 = (DATA_TYPE*) array_data(array);
-}
-
-/* Typemap suite for (DATA_TYPE INPLACE_ARRAY2[ANY][ANY])
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE INPLACE_ARRAY2[ANY][ANY])
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE INPLACE_ARRAY2[ANY][ANY])
-  (PyArrayObject* array=NULL)
-{
-  npy_intp size[2] = { $1_dim0, $1_dim1 };
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,2) || !require_size(array, size, 2) ||
-      !require_contiguous(array) || !require_native(array)) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-
-/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,2) || !require_contiguous(array)
-      || !require_native(array)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,2) || !require_contiguous(array) ||
-      !require_native(array)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DATA_TYPE*) array_data(array);
-}
-
-/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,2) || !require_contiguous(array)
-      || !require_native(array) || !require_fortran(array)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,2) || !require_contiguous(array) ||
-      !require_native(array) || !require_fortran(array)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DATA_TYPE*) array_data(array);
-}
-
-/* Typemap suite for (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY])
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY])
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY])
-  (PyArrayObject* array=NULL)
-{
-  npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 };
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,3) || !require_size(array, size, 3) ||
-      !require_contiguous(array) || !require_native(array)) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-
-/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,3) || !require_contiguous(array) ||
-      !require_native(array)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-  $4 = (DIM_TYPE) array_size(array,2);
-}
-
-/* Typemap suite for (DATA_TYPE** INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE** INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-{
-  $1 = PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE** INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-  (DATA_TYPE** array=NULL, PyArrayObject** object_array=NULL)
-{
-  npy_intp size[2] = { -1, -1 };
-  PyArrayObject* temp_array;
-  Py_ssize_t i;
-
-  /* length of the list */
-  $2 = PyList_Size($input);
-
-  /* the arrays */
-  array = (DATA_TYPE **)malloc($2*sizeof(DATA_TYPE *));
-  object_array = (PyArrayObject **)calloc($2,sizeof(PyArrayObject *));
-
-  if (array == NULL || object_array == NULL)
-  {
-    SWIG_fail;
-  }
-
-  for (i=0; i<$2; i++)
-  {
-    temp_array = obj_to_array_no_conversion(PySequence_GetItem($input,i), DATA_TYPECODE);
-
-    /* the new array must be stored so that it can be destroyed in freearg */
-    object_array[i] = temp_array;
-
-    if ( !temp_array || !require_dimensions(temp_array, 2) ||
-      !require_contiguous(temp_array) ||
-      !require_native(temp_array) ||
-      !PyArray_EquivTypenums(array_type(temp_array), DATA_TYPECODE)
-    ) SWIG_fail;
-
-    /* store the size of the first array in the list, then use that for comparison. */
-    if (i == 0)
-    {
-      size[0] = array_size(temp_array,0);
-      size[1] = array_size(temp_array,1);
-    }
-
-    if (!require_size(temp_array, size, 2)) SWIG_fail;
-
-    array[i] = (DATA_TYPE*) array_data(temp_array);
-  }
-
-  $1 = (DATA_TYPE**) array;
-  $3 = (DIM_TYPE) size[0];
-  $4 = (DIM_TYPE) size[1];
-}
-%typemap(freearg)
-  (DATA_TYPE** INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-{
-  if (array$argnum!=NULL) free(array$argnum);
-  if (object_array$argnum!=NULL) free(object_array$argnum);
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3,
- *                    DATA_TYPE* INPLACE_ARRAY3)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,3) || !require_contiguous(array)
-      || !require_native(array)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DIM_TYPE) array_size(array,2);
-  $4 = (DATA_TYPE*) array_data(array);
-}
-
-/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,3) || !require_contiguous(array) ||
-      !require_native(array) || !require_fortran(array)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-  $4 = (DIM_TYPE) array_size(array,2);
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3,
- *                    DATA_TYPE* INPLACE_FARRAY3)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,3) || !require_contiguous(array)
-      || !require_native(array) || !require_fortran(array)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DIM_TYPE) array_size(array,2);
-  $4 = (DATA_TYPE*) array_data(array);
-}
-
-/* Typemap suite for (DATA_TYPE INPLACE_ARRAY4[ANY][ANY][ANY][ANY])
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE INPLACE_ARRAY4[ANY][ANY][ANY][ANY])
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE INPLACE_ARRAY4[ANY][ANY][ANY][ANY])
-  (PyArrayObject* array=NULL)
-{
-  npy_intp size[4] = { $1_dim0, $1_dim1, $1_dim2 , $1_dim3 };
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,4) || !require_size(array, size, 4) ||
-      !require_contiguous(array) || !require_native(array)) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-
-/* Typemap suite for (DATA_TYPE* INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3, DIM_TYPE DIM4)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,4) || !require_contiguous(array) ||
-      !require_native(array)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-  $4 = (DIM_TYPE) array_size(array,2);
-  $5 = (DIM_TYPE) array_size(array,3);
-}
-
-/* Typemap suite for (DATA_TYPE** INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3, DIM_TYPE DIM4)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE** INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-{
-  $1 = PySequence_Check($input);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE** INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-  (DATA_TYPE** array=NULL, PyArrayObject** object_array=NULL)
-{
-  npy_intp size[3] = { -1, -1, -1 };
-  PyArrayObject* temp_array;
-  Py_ssize_t i;
-
-  /* length of the list */
-  $2 = PyList_Size($input);
-
-  /* the arrays */
-  array = (DATA_TYPE **)malloc($2*sizeof(DATA_TYPE *));
-  object_array = (PyArrayObject **)calloc($2,sizeof(PyArrayObject *));
-
-  if (array == NULL || object_array == NULL)
-  {
-    SWIG_fail;
-  }
-
-  for (i=0; i<$2; i++)
-  {
-    temp_array = obj_to_array_no_conversion(PySequence_GetItem($input,i), DATA_TYPECODE);
-
-    /* the new array must be stored so that it can be destroyed in freearg */
-    object_array[i] = temp_array;
-
-    if ( !temp_array || !require_dimensions(temp_array, 3) ||
-      !require_contiguous(temp_array) ||
-      !require_native(temp_array) ||
-      !PyArray_EquivTypenums(array_type(temp_array), DATA_TYPECODE)
-    ) SWIG_fail;
-
-    /* store the size of the first array in the list, then use that for comparison. */
-    if (i == 0)
-    {
-      size[0] = array_size(temp_array,0);
-      size[1] = array_size(temp_array,1);
-      size[2] = array_size(temp_array,2);
-    }
-
-    if (!require_size(temp_array, size, 3)) SWIG_fail;
-
-    array[i] = (DATA_TYPE*) array_data(temp_array);
-  }
-
-  $1 = (DATA_TYPE**) array;
-  $3 = (DIM_TYPE) size[0];
-  $4 = (DIM_TYPE) size[1];
-  $5 = (DIM_TYPE) size[2];
-}
-%typemap(freearg)
-  (DATA_TYPE** INPLACE_ARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-{
-  if (array$argnum!=NULL) free(array$argnum);
-  if (object_array$argnum!=NULL) free(object_array$argnum);
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4,
- *                    DATA_TYPE* INPLACE_ARRAY4)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_ARRAY4)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_ARRAY4)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,4) || !require_contiguous(array)
-      || !require_native(array)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DIM_TYPE) array_size(array,2);
-  $4 = (DIM_TYPE) array_size(array,3);
-  $5 = (DATA_TYPE*) array_data(array);
-}
-
-/* Typemap suite for (DATA_TYPE* INPLACE_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2,
- *                    DIM_TYPE DIM3, DIM_TYPE DIM4)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DATA_TYPE* INPLACE_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* INPLACE_FARRAY4, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,4) || !require_contiguous(array) ||
-      !require_native(array) || !require_fortran(array)) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-  $2 = (DIM_TYPE) array_size(array,0);
-  $3 = (DIM_TYPE) array_size(array,1);
-  $4 = (DIM_TYPE) array_size(array,2);
-  $5 = (DIM_TYPE) array_size(array,3);
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3,
- *                    DATA_TYPE* INPLACE_FARRAY4)
- */
-%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY,
-           fragment="NumPy_Macros")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_FARRAY4)
-{
-  $1 = is_array($input) && PyArray_EquivTypenums(array_type($input),
-                                                 DATA_TYPECODE);
-}
-%typemap(in,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DIM_TYPE DIM4, DATA_TYPE* INPLACE_FARRAY4)
-  (PyArrayObject* array=NULL)
-{
-  array = obj_to_array_no_conversion($input, DATA_TYPECODE);
-  if (!array || !require_dimensions(array,4) || !require_contiguous(array)
-      || !require_native(array) || !require_fortran(array)) SWIG_fail;
-  $1 = (DIM_TYPE) array_size(array,0);
-  $2 = (DIM_TYPE) array_size(array,1);
-  $3 = (DIM_TYPE) array_size(array,2);
-  $4 = (DIM_TYPE) array_size(array,3);
-  $5 = (DATA_TYPE*) array_data(array);
-}
-
-/*************************/
-/* Argout Array Typemaps */
-/*************************/
-
-/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY1[ANY])
- */
-%typemap(in,numinputs=0,
-         fragment="NumPy_Backward_Compatibility,NumPy_Macros")
-  (DATA_TYPE ARGOUT_ARRAY1[ANY])
-  (PyObject* array = NULL)
-{
-  npy_intp dims[1] = { $1_dim0 };
-  array = PyArray_SimpleNew(1, dims, DATA_TYPECODE);
-  if (!array) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-%typemap(argout)
-  (DATA_TYPE ARGOUT_ARRAY1[ANY])
-{
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
-}
-
-/* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
- */
-%typemap(in,numinputs=1,
-         fragment="NumPy_Fragments")
-  (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
-  (PyObject* array = NULL)
-{
-  npy_intp dims[1];
-  if (!PyInt_Check($input))
-  {
-    const char* typestring = pytype_string($input);
-    PyErr_Format(PyExc_TypeError,
-                 "Int dimension expected.  '%s' given.",
-                 typestring);
-    SWIG_fail;
-  }
-  $2 = (DIM_TYPE) PyInt_AsLong($input);
-  dims[0] = (npy_intp) $2;
-  array = PyArray_SimpleNew(1, dims, DATA_TYPECODE);
-  if (!array) SWIG_fail;
-  $1 = (DATA_TYPE*) array_data(array);
-}
-%typemap(argout)
-  (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
-{
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
-}
-
-/* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
- */
-%typemap(in,numinputs=1,
-         fragment="NumPy_Fragments")
-  (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
-  (PyObject* array = NULL)
-{
-  npy_intp dims[1];
-  if (!PyInt_Check($input))
-  {
-    const char* typestring = pytype_string($input);
-    PyErr_Format(PyExc_TypeError,
-                 "Int dimension expected.  '%s' given.",
-                 typestring);
-    SWIG_fail;
-  }
-  $1 = (DIM_TYPE) PyInt_AsLong($input);
-  dims[0] = (npy_intp) $1;
-  array = PyArray_SimpleNew(1, dims, DATA_TYPECODE);
-  if (!array) SWIG_fail;
-  $2 = (DATA_TYPE*) array_data(array);
-}
-%typemap(argout)
-  (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
-{
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
-}
-
-/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
- */
-%typemap(in,numinputs=0,
-         fragment="NumPy_Backward_Compatibility,NumPy_Macros")
-  (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
-  (PyObject* array = NULL)
-{
-  npy_intp dims[2] = { $1_dim0, $1_dim1 };
-  array = PyArray_SimpleNew(2, dims, DATA_TYPECODE);
-  if (!array) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-%typemap(argout)
-  (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
-{
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
-}
-
-/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
- */
-%typemap(in,numinputs=0,
-         fragment="NumPy_Backward_Compatibility,NumPy_Macros")
-  (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
-  (PyObject* array = NULL)
-{
-  npy_intp dims[3] = { $1_dim0, $1_dim1, $1_dim2 };
-  array = PyArray_SimpleNew(3, dims, DATA_TYPECODE);
-  if (!array) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-%typemap(argout)
-  (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
-{
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
-}
-
-/* Typemap suite for (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
- */
-%typemap(in,numinputs=0,
-         fragment="NumPy_Backward_Compatibility,NumPy_Macros")
-  (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
-  (PyObject* array = NULL)
-{
-  npy_intp dims[4] = { $1_dim0, $1_dim1, $1_dim2, $1_dim3 };
-  array = PyArray_SimpleNew(4, dims, DATA_TYPECODE);
-  if (!array) SWIG_fail;
-  $1 = ($1_ltype) array_data(array);
-}
-%typemap(argout)
-  (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
-{
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
-}
-
-/*****************************/
-/* Argoutview Array Typemaps */
-/*****************************/
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1    )
-  (DATA_TYPE*  data_temp = NULL , DIM_TYPE  dim_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1)
-{
-  npy_intp dims[1] = { *$2 };
-  PyObject* obj = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DATA_TYPE** ARGOUTVIEW_ARRAY1)
-  (DIM_TYPE  dim_temp, DATA_TYPE*  data_temp = NULL )
-{
-  $1 = &dim_temp;
-  $2 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1)
-{
-  npy_intp dims[1] = { *$1 };
-  PyObject* obj = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$2));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1     , DIM_TYPE* DIM2     )
-  (DATA_TYPE*  data_temp = NULL , DIM_TYPE  dim1_temp, DIM_TYPE  dim2_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
-{
-  npy_intp dims[2] = { *$2, *$3 };
-  PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1     , DIM_TYPE* DIM2     , DATA_TYPE** ARGOUTVIEW_ARRAY2)
-  (DIM_TYPE  dim1_temp, DIM_TYPE  dim2_temp, DATA_TYPE*  data_temp = NULL )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2)
-{
-  npy_intp dims[2] = { *$1, *$2 };
-  PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1     , DIM_TYPE* DIM2     )
-  (DATA_TYPE*  data_temp = NULL  , DIM_TYPE  dim1_temp, DIM_TYPE  dim2_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
-{
-  npy_intp dims[2] = { *$2, *$3 };
-  PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1     , DIM_TYPE* DIM2     , DATA_TYPE** ARGOUTVIEW_FARRAY2)
-  (DIM_TYPE  dim1_temp, DIM_TYPE  dim2_temp, DATA_TYPE*  data_temp = NULL  )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2)
-{
-  npy_intp dims[2] = { *$1, *$2 };
-  PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
-                      DIM_TYPE* DIM3)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    )
-  (DATA_TYPE* data_temp = NULL  , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-  $4 = &dim3_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
-{
-  npy_intp dims[3] = { *$2, *$3, *$4 };
-  PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
-                      DATA_TYPE** ARGOUTVIEW_ARRAY3)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)
-  (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp = NULL)
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &dim3_temp;
-  $4 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)
-{
-  npy_intp dims[3] = { *$1, *$2, *$3 };
-  PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$4));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
-                      DIM_TYPE* DIM3)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    )
-  (DATA_TYPE* data_temp = NULL   , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-  $4 = &dim3_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
-{
-  npy_intp dims[3] = { *$2, *$3, *$4 };
-  PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
-                      DATA_TYPE** ARGOUTVIEW_FARRAY3)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DATA_TYPE** ARGOUTVIEW_FARRAY3)
-  (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp = NULL   )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &dim3_temp;
-  $4 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)
-{
-  npy_intp dims[3] = { *$1, *$2, *$3 };
-  PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$4));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
-                      DIM_TYPE* DIM3, DIM_TYPE* DIM4)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    )
-  (DATA_TYPE* data_temp = NULL  , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-  $4 = &dim3_temp;
-  $5 = &dim4_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
-{
-  npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
-                      DATA_TYPE** ARGOUTVIEW_ARRAY4)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    , DATA_TYPE** ARGOUTVIEW_ARRAY4)
-  (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL  )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &dim3_temp;
-  $4 = &dim4_temp;
-  $5 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEW_ARRAY4)
-{
-  npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
-                      DIM_TYPE* DIM3, DIM_TYPE* DIM4)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    )
-  (DATA_TYPE* data_temp = NULL   , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-  $4 = &dim3_temp;
-  $5 = &dim4_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
-{
-  npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
-                      DATA_TYPE** ARGOUTVIEW_FARRAY4)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    , DATA_TYPE** ARGOUTVIEW_FARRAY4)
-  (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL   )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &dim3_temp;
-  $4 = &dim4_temp;
-  $5 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEW_FARRAY4)
-{
-  npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/*************************************/
-/* Managed Argoutview Array Typemaps */
-/*************************************/
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY1, DIM_TYPE* DIM1)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEWM_ARRAY1, DIM_TYPE* DIM1    )
-  (DATA_TYPE*  data_temp = NULL  , DIM_TYPE  dim_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DATA_TYPE** ARGOUTVIEWM_ARRAY1, DIM_TYPE* DIM1)
-{
-  npy_intp dims[1] = { *$2 };
-  PyObject* obj = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DATA_TYPE** ARGOUTVIEWM_ARRAY1)
-  (DIM_TYPE  dim_temp, DATA_TYPE*  data_temp = NULL  )
-{
-  $1 = &dim_temp;
-  $2 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1)
-{
-  npy_intp dims[1] = { *$1 };
-  PyObject* obj = PyArray_SimpleNewFromData(1, dims, DATA_TYPECODE, (void*)(*$2));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1     , DIM_TYPE* DIM2     )
-  (DATA_TYPE*  data_temp = NULL  , DIM_TYPE  dim1_temp, DIM_TYPE  dim2_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
-{
-  npy_intp dims[2] = { *$2, *$3 };
-  PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1     , DIM_TYPE* DIM2     , DATA_TYPE** ARGOUTVIEWM_ARRAY2)
-  (DIM_TYPE  dim1_temp, DIM_TYPE  dim2_temp, DATA_TYPE*  data_temp = NULL  )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2)
-{
-  npy_intp dims[2] = { *$1, *$2 };
-  PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1     , DIM_TYPE* DIM2     )
-  (DATA_TYPE*  data_temp = NULL   , DIM_TYPE  dim1_temp, DIM_TYPE  dim2_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
-{
-  npy_intp dims[2] = { *$2, *$3 };
-  PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1     , DIM_TYPE* DIM2     , DATA_TYPE** ARGOUTVIEWM_FARRAY2)
-  (DIM_TYPE  dim1_temp, DIM_TYPE  dim2_temp, DATA_TYPE*  data_temp = NULL   )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2)
-{
-  npy_intp dims[2] = { *$1, *$2 };
-  PyObject* obj = PyArray_SimpleNewFromData(2, dims, DATA_TYPECODE, (void*)(*$3));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
-                      DIM_TYPE* DIM3)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    )
-  (DATA_TYPE* data_temp = NULL   , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-  $4 = &dim3_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
-{
-  npy_intp dims[3] = { *$2, *$3, *$4 };
-  PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
-                      DATA_TYPE** ARGOUTVIEWM_ARRAY3)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DATA_TYPE** ARGOUTVIEWM_ARRAY3)
-  (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp = NULL   )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &dim3_temp;
-  $4 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEWM_ARRAY3)
-{
-  npy_intp dims[3] = { *$1, *$2, *$3 };
-  PyObject* obj= PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$4));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
-                      DIM_TYPE* DIM3)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    )
-  (DATA_TYPE* data_temp = NULL    , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-  $4 = &dim3_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
-{
-  npy_intp dims[3] = { *$2, *$3, *$4 };
-  PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
-                      DATA_TYPE** ARGOUTVIEWM_FARRAY3)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DATA_TYPE** ARGOUTVIEWM_FARRAY3)
-  (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DATA_TYPE* data_temp = NULL    )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &dim3_temp;
-  $4 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEWM_FARRAY3)
-{
-  npy_intp dims[3] = { *$1, *$2, *$3 };
-  PyObject* obj = PyArray_SimpleNewFromData(3, dims, DATA_TYPECODE, (void*)(*$4));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
-                      DIM_TYPE* DIM3, DIM_TYPE* DIM4)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    )
-  (DATA_TYPE* data_temp = NULL   , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-  $4 = &dim3_temp;
-  $5 = &dim4_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
-{
-  npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
-                      DATA_TYPE** ARGOUTVIEWM_ARRAY4)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    , DATA_TYPE** ARGOUTVIEWM_ARRAY4)
-  (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL   )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &dim3_temp;
-  $4 = &dim4_temp;
-  $5 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_ARRAY4)
-{
-  npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
-                      DIM_TYPE* DIM3, DIM_TYPE* DIM4)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    )
-  (DATA_TYPE* data_temp = NULL    , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-  $4 = &dim3_temp;
-  $5 = &dim4_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
-{
-  npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
-                      DATA_TYPE** ARGOUTVIEWM_FARRAY4)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    , DATA_TYPE** ARGOUTVIEWM_FARRAY4)
-  (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL    )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &dim3_temp;
-  $4 = &dim4_temp;
-  $5 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_FARRAY4)
-{
-  npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
-                      DIM_TYPE* DIM3, DIM_TYPE* DIM4)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    )
-  (DATA_TYPE* data_temp = NULL   , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-  $4 = &dim3_temp;
-  $5 = &dim4_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
-{
-  npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
-                      DATA_TYPE** ARGOUTVIEWM_ARRAY4)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    , DATA_TYPE** ARGOUTVIEWM_ARRAY4)
-  (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL   )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &dim3_temp;
-  $4 = &dim4_temp;
-  $5 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_ARRAY4)
-{
-  npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
-                      DIM_TYPE* DIM3, DIM_TYPE* DIM4)
- */
-%typemap(in,numinputs=0)
-  (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    )
-  (DATA_TYPE* data_temp = NULL    , DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp)
-{
-  $1 = &data_temp;
-  $2 = &dim1_temp;
-  $3 = &dim2_temp;
-  $4 = &dim3_temp;
-  $5 = &dim4_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)
-{
-  npy_intp dims[4] = { *$2, *$3, *$4 , *$5 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$1));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-/* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
-                      DATA_TYPE** ARGOUTVIEWM_FARRAY4)
- */
-%typemap(in,numinputs=0)
-  (DIM_TYPE* DIM1    , DIM_TYPE* DIM2    , DIM_TYPE* DIM3    , DIM_TYPE* DIM4    , DATA_TYPE** ARGOUTVIEWM_FARRAY4)
-  (DIM_TYPE dim1_temp, DIM_TYPE dim2_temp, DIM_TYPE dim3_temp, DIM_TYPE dim4_temp, DATA_TYPE* data_temp = NULL    )
-{
-  $1 = &dim1_temp;
-  $2 = &dim2_temp;
-  $3 = &dim3_temp;
-  $4 = &dim4_temp;
-  $5 = &data_temp;
-}
-%typemap(argout,
-         fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements")
-  (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_FARRAY4)
-{
-  npy_intp dims[4] = { *$1, *$2, *$3 , *$4 };
-  PyObject* obj = PyArray_SimpleNewFromData(4, dims, DATA_TYPECODE, (void*)(*$5));
-  PyArrayObject* array = (PyArrayObject*) obj;
-
-  if (!array || !require_fortran(array)) SWIG_fail;
-
-%#ifdef SWIGPY_USE_CAPSULE
-    PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
-%#else
-    PyObject* cap = PyCObject_FromVoidPtr((void*)(*$1), free);
-%#endif
-
-%#if NPY_API_VERSION < 0x00000007
-  PyArray_BASE(array) = cap;
-%#else
-  PyArray_SetBaseObject(array,cap);
-%#endif
-
-  $result = SWIG_Python_AppendOutput($result,obj);
-}
-
-%enddef    /* %numpy_typemaps() macro */
-/* *************************************************************** */
-
-/* Concrete instances of the %numpy_typemaps() macro: Each invocation
- * below applies all of the typemaps above to the specified data type.
- */
-%numpy_typemaps(signed char       , NPY_BYTE     , int)
-%numpy_typemaps(unsigned char     , NPY_UBYTE    , int)
-%numpy_typemaps(short             , NPY_SHORT    , int)
-%numpy_typemaps(unsigned short    , NPY_USHORT   , int)
-%numpy_typemaps(int               , NPY_INT      , int)
-%numpy_typemaps(unsigned int      , NPY_UINT     , int)
-%numpy_typemaps(long              , NPY_LONG     , int)
-%numpy_typemaps(unsigned long     , NPY_ULONG    , int)
-%numpy_typemaps(long long         , NPY_LONGLONG , int)
-%numpy_typemaps(unsigned long long, NPY_ULONGLONG, int)
-%numpy_typemaps(float             , NPY_FLOAT    , int)
-%numpy_typemaps(double            , NPY_DOUBLE   , int)
-
-/* ***************************************************************
- * The follow macro expansion does not work, because C++ bool is 4
- * bytes and NPY_BOOL is 1 byte
- *
- *    %numpy_typemaps(bool, NPY_BOOL, int)
- */
-
-/* ***************************************************************
- * On my Mac, I get the following warning for this macro expansion:
- * 'swig/python detected a memory leak of type 'long double *', no destructor found.'
- *
- *    %numpy_typemaps(long double, NPY_LONGDOUBLE, int)
- */
-
-/* ***************************************************************
- * Swig complains about a syntax error for the following macro
- * expansions:
- *
- *    %numpy_typemaps(complex float,  NPY_CFLOAT , int)
- *
- *    %numpy_typemaps(complex double, NPY_CDOUBLE, int)
- *
- *    %numpy_typemaps(complex long double, NPY_CLONGDOUBLE, int)
- */
-
-#endif /* SWIGPYTHON */
diff --git a/swig/start.i b/swig/start.i
deleted file mode 100644
index 24aa1a26ac2a1bdaf4b36b05d8394eb989aac14e..0000000000000000000000000000000000000000
--- a/swig/start.i
+++ /dev/null
@@ -1,14 +0,0 @@
-%{
-#define SWIG_FILE_WITH_INIT
-#include <assert.h>
-%}
-// numpy macros
-%include numpy.i 	
-
-%init %{
-  import_array();
-%}
-
-
-//usefull macros used to instanciate templates
-%define INSTANTIATED_DIMENSIONS 1,2,3,4,5,6,7 %enddef
diff --git a/trashed_examples/FlowAroundHemisphere.py b/trashed_examples/FlowAroundHemisphere.py
deleted file mode 100644
index 5f47c02a9c03fc94388ca0d755ba9d99d0929cd9..0000000000000000000000000000000000000000
--- a/trashed_examples/FlowAroundHemisphere.py
+++ /dev/null
@@ -1,419 +0,0 @@
-#!/usr/bin/python
-
-"""
-Taylor Green 3D : see paper van Rees 2011.
-
-All parameters are set and defined in python module dataTG.
-
-"""
-
-from hysop import Box
-from hysop.f2py import fftw2py
-import numpy as np
-import cPickle
-from scitools.NumPyDB import NumPyDB_cPickle as hysopPickle
-from hysop.fields.continuous import Field
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.mpi.topology import Cartesian
-from hysop.operator.advection import Advection
-from hysop.operator.stretching import Stretching
-from hysop.operator.absorption_BC import AbsorptionBC
-from hysop.operator.poisson import Poisson
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-from hysop.operator.profiles import Profiles
-from hysop.problem.simulation import Simulation
-from hysop.methods_keys import Scales, TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting, dtCrit, SpaceDiscretisation
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK2
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.integrators.runge_kutta4 import RK4 as RK4
-from hysop.numerics.finite_differences import FD_C_4, FD_C_2
-from hysop.numerics.interpolation import Linear
-from hysop.numerics.remeshing import L6_4 as rmsh
-import hysop.tools.io_utils as io
-import hysop.tools.numpywrappers as npw
-from hysop.mpi import main_rank, MPI
-from hysop.tools.parameters import Discretization, IOParams
-
-print " ========= Start Navier-Stokes 3D (Taylor Green benchmark) ========="
-
-# ====== pi constant and trigonometric functions ======
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-# ====== Flow constants =======
-uinf = 1.0
-VISCOSITY = 1. / 300.
-
-# ======= Domain =======
-dim = 3
-Nx = 129
-Ny = Nz = 65
-#Nx = 257
-#Ny = Nz = 129
-#Nx = 513
-#Ny = Nz = 257
-#Nx = 1025
-#Ny = Nz = 513
-g = 2
-boxlength = npw.asrealarray([10.24, 5.12, 5.12])
-boxorigin = npw.asrealarray([-2.0, -2.56, -2.56])
-box = Box(length=boxlength, origin=boxorigin)
-
-# A global discretization with ghost points
-d3dg = Discretization([Nx, Ny, Nz], [g, g, g])
-# A global discretization, without ghost points
-d3d = Discretization([Nx, Ny, Nz])
-
-# ====== Sphere inside the domain ======
-RADIUS = 0.5
-pos = [0., 0., 0.]
-from hysop.domain.subsets import Sphere, HemiSphere
-sphere = HemiSphere(origin=pos, radius=RADIUS, parent=box)
-
-
-# ======= Function to compute initial velocity  =======
-def computeVel(res, x, y, z, t):
-    res[0][...] = uinf
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-
-# ======= Function to compute initial vorticity =======
-def computeVort(res, x, y, z, t):
-    res[0][...] = 0.
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-#  ====== Time-dependant required-flowrate (Variable Parameter) ======
-def computeFlowrate(simu):
-    # === Time-dependant flow rate ===
-    t = simu.tk
-    Tstart = 3.0
-    flowrate = np.zeros(3)
-    flowrate[0] = uinf * box.length[1] * box.length[2]
-    if t >= Tstart and t <= Tstart + 1.0:
-        flowrate[1] = sin(pi * (t - Tstart)) * \
-                      box.length[1] * box.length[2]
-    # === Constant flow rate ===
-    #    flowrate = np.zeros(3)
-    #    flowrate[0] = uinf * box.length[1] * box.length[2]
-    return flowrate
-
-
-# ======= Fields =======
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', is_vector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', is_vector=True)
-
-# ========= Simulation setup =========
-simu = Simulation(start=0.0, end=75.0, time_step=0.0125, max_iter=10000000)
-
-
-# Adaptative timestep method : dt = min(values(dtCrit))
-# where dtCrit is a list of criterions on which the computation
-# of the adaptative time step is based
-# ex : dtCrit = ['gradU', 'cfl', 'stretch'], means :
-# dt = min (dtAdv, dtCfl, dtStretch), where dtAdv is equal to LCFL / |gradU|
-# For dtAdv, the possible choices are the following:
-# 'vort' (infinite norm of vorticity) : dtAdv = LCFL / |vort|
-# 'gradU' (infinite norm of velocity gradient), dtAdv = LCFL / |gradU|
-# 'deform' (infinite norm of deformation tensor),
-# dtAdv = LCFL / (0.5(gradU + gradU^T))
-op = {}
-iop = IOParams("time_step")
-# Default topology (i.e. 3D, with ghosts)
-topo_with_ghosts = box.create_topology(d3dg)
-
-
-op['dtAdapt'] = AdaptTimeStep(velo, vorti, simulation=simu,
-                              discretization=topo_with_ghosts,
-                              method={TimeIntegrator: RK3,
-                                      SpaceDiscretisation: FD_C_4,
-                                      dtCrit: ['gradU', 'stretch', 'cfl']},
-                              io_params=iop,
-                              lcfl=0.125,
-                              cfl=0.5)
-
-op['advection'] = Advection(velo, vorti,
-                            discretization=d3d,
-                            method={Scales: 'p_M6',
-                                    Splitting: 'classic'}
-                            )
-
-op['stretching'] = Stretching(velo, vorti,
-                              discretization=topo_with_ghosts)
-
-op['diffusion'] = Diffusion(viscosity=VISCOSITY, vorticity=vorti,
-                            discretization=d3d)
-
-rate = VariableParameter(formula=computeFlowrate)
-op['poisson'] = Poisson(velo, vorti, discretization=d3d, flowrate=rate)
-
-# ===== Discretization of computational operators ======
-for ope in op.values():
-    ope.discretize()
-
-topofft = op['poisson'].discreteFields[vorti].topology
-topoadvec = op['advection'].discreteFields[vorti].topology
-
-# =====  Smooth vorticity absorption at the outlet =====
-op['vort_absorption'] = AbsorptionBC(velo, vorti, discretization=topofft, 
-                                     req_flowrate=rate, 
-                                     x_coords_absorp=[7.24, 8.24])
-#                                     x_coords_absorp=[1.56, 2.56])
-op['vort_absorption'].discretize()
-
-# =====  Penalization of the vorticity on a sphere inside the domain =====
-from hysop.operator.penalization import PenalizeVorticity
-op['penalVort'] = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                                    discretization=topo_with_ghosts,
-                                    obstacles=[sphere], coeff=1e8,
-                                    method={SpaceDiscretisation: FD_C_4})
-op['penalVort'].discretize()
-
-# ==== Operators to map data between the different computational operators ===
-# (i.e. between topologies)
-distr = {}
-distr['fft2str'] = RedistributeIntra(source=op['poisson'],
-                                     target=op['stretching'],
-                                     variables=[velo, vorti])
-distr['str2fft'] = RedistributeIntra(source=op['stretching'],
-                                     target=op['poisson'],
-                                     variables=[velo, vorti])
-distr['fft2advec'] = RedistributeIntra(source=op['poisson'],
-                                       target=op['advection'],
-                                       variables=[velo, vorti])
-distr['advec2fft'] = RedistributeIntra(source=op['advection'],
-                                       target=op['poisson'],
-                                       variables=[velo, vorti])
-# ========= Monitoring operators =========
-monitors = {}
-#iop = IOParams('fields', frequency=100)
-#monitors['writer'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-#                                io_params=iop)
-
-io_ener = IOParams('energy_enstrophy')
-monitors['energy'] = EnergyEnstrophy(velo, vorti, discretization=topofft,
-                                     io_params=io_ener, is_normalized=False)
-
-rk = 0
-if (0.0 in topofft.mesh.coords[2]):
-    rk = main_rank
-io_prof = IOParams('profile_Y_axis', frequency=10, io_leader=rk)
-monitors['profile'] = Profiles(velo, vorti, discretization=topofft,
-                               io_params=io_prof, prof_coords=[0.0, 0.0], 
-                               direction=1, beginMeanComput=0.1)
-
-from hysop.domain.control_box import ControlBox
-from hysop.operator.drag_and_lift import MomentumForces, NocaForces
-ref_step = topo_with_ghosts.mesh.space_step
-cbpos = npw.zeros(dim)
-cblength = npw.zeros(dim)
-cbpos[...] = boxorigin[...]
-cbpos +=  15 * ref_step
-cblength[...] = boxlength[...]
-cblength -= 30 * ref_step
-cb = ControlBox(parent=box, origin=cbpos, length=cblength)
-coeffForce = 1. / (0.5 * uinf ** 2 * pi * RADIUS ** 2)
-
-io_forces=IOParams('drag_and_lift_NocaII')
-#monitors['forcesNoca'] = NocaForces(velo, vorti, 
-#                                    discretization=topo_with_ghosts,
-#                                    nu=VISCOSITY, 
-#                                    volume_of_control=cb,
-#                                    normalization=coeffForce,
-#                                    obstacles=[sphere], 
-#                                    io_params=io_forces)
-
-io_forcesPenal=IOParams('drag_and_lift_Mom')
-monitors['forcesMom'] = MomentumForces(velocity=velo, 
-                                       discretization=topo_with_ghosts,
-                                       normalization=coeffForce,
-                                       obstacles=[sphere], 
-                                       penalisation_coeff=[1e8],
-                                       io_params=io_forcesPenal)
-
-#io_forcesPenal=IOParams('drag_and_lift_penal')
-#monitors['forcesPenal'] = DragAndLiftPenal(velo, vorti, coeffForce,
-#                                           discretization=topofft,
-#                                           obstacles=[sphere], factor=[1e8],
-#                                           io_params=io_forcesPenal)
-
-step_dir = ref_step[0]
-io_sliceXY = IOParams('sliceXY', frequency=20)
-thickSliceXY = ControlBox(parent=box, origin=[-2.0, -2.56, -2.0 * step_dir], 
-                          length=[10.24- step_dir, 5.12- step_dir, 4.0 * step_dir])
-#thickSliceXY = ControlBox(parent=box, origin=[-2.56, -2.56, -2.0 * step_dir], 
-#                          length=[5.12 - step_dir, 5.12 - step_dir, 4.0 * step_dir])
-monitors['writerSliceXY'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                                      io_params=io_sliceXY, subset=thickSliceXY, 
-                                      xmfalways=True)
-
-io_sliceXZ = IOParams('sliceXZ', frequency=400)
-thickSliceXZ = ControlBox(parent=box, origin=[-2.0, -2.0 * step_dir, -2.56], 
-                          length=[10.24- step_dir, 4.0 * step_dir, 5.12- step_dir])
-monitors['writerSliceXZ'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                                       io_params=io_sliceXZ, subset=thickSliceXZ, 
-                                       xmfalways=True)
-
-io_subBox = IOParams('subBox', frequency=2000)
-subBox = ControlBox(parent=box, origin=[-0.7, -2.0, -2.0], length=[8.0, 4.0, 4.0])
-monitors['writerSubBox'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                                      io_params=io_subBox, subset=subBox, 
-                                      xmfalways=True)
-
-# ========= Setup for all declared operators/monitors =========
-time_setup = MPI.Wtime()
-for ope in op.values():
-    ope.setup()
-for ope in distr.values():
-    ope.setup()
-
-for monit in monitors.values():
-    monit.discretize()
-for monit in monitors.values():
-    monit.setup()
-
-print '[', main_rank, '] total time for setup:', MPI.Wtime() - time_setup
-
-# ========= Fields initialization =========
-# - initialize velo + vort on topostr
-# - penalize vorticity
-# - redistribute topostr --> topofft
-
-time_init = MPI.Wtime()
-ind = sphere.discretize(topofft)
-def initFields():
-    velo.initialize(topo=topo_with_ghosts)
-    vorti.initialize(topo=topo_with_ghosts)
-    op['penalVort'].apply(simu)
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-
-initFields()
-print '[', main_rank, '] total time for init :', MPI.Wtime() - time_init
-
-fullseq = []
-
-def run(sequence):
-    op['vort_absorption'].apply(simu)
-    op['poisson'].apply(simu)               # Poisson + correction
-    monitors['forcesMom'].apply(simu)     # Forces Heloise
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    op['penalVort'].apply(simu)             # Vorticity penalization
-#    distr['str2fft'].apply(simu)
-#    distr['str2fft'].wait()
-#    op['poisson'].apply(simu)
-#    distr['fft2str'].apply(simu)
-#    distr['fft2str'].wait()
-    op['stretching'].apply(simu)            # Stretching
-#    monitors['forcesNoca'].apply(simu)          # Forces Noca
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-    op['diffusion'].apply(simu)             # Diffusion
-    distr['fft2advec'].apply(simu)
-    distr['fft2advec'].wait()
-    op['advection'].apply(simu)             # Advection (scales)
-    distr['advec2fft'].apply(simu)
-    distr['advec2fft'].wait()
-    monitors['writerSliceXY'].apply(simu)
-#    monitors['writerSliceXZ'].apply(simu)
-#    monitors['writerSubBox'].apply(simu)
-    monitors['energy'].apply(simu)          # Energy/enstrophy
-    monitors['profile'].apply(simu)         # Profile
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    op['dtAdapt'].apply(simu)               # Update timestep
-    op['dtAdapt'].wait()
-
-# ==== Serialize the simulation data of the problem to a "restart" file ====
-def dump(filename):
-    """
-    Serialize some data of the problem to file
-    (only data required for a proper restart, namely fields in self.input
-    and simulation).
-    @param filename : prefix for output file. Real name = filename_rk_N,
-    N being current process number. If None use default value from problem
-    parameters (self.filename)
-    """
-    if filename is not None:
-        filedump = filename + '_rk_' + str(main_rank)
-    db = open(filedump, 'wb')
-    cPickle.dump(simu, db)
-
-# ====== Load the simulation data of the problem from a "restart" file ======
-def restart(filename):
-    """
-    Load serialized data to restart from a previous state.
-    self.input variables and simulation are loaded.
-    @param  filename : prefix for downloaded file.
-    Real name = filename_rk_N, N being current process number.
-    If None use default value from problem
-    parameters (self.filename)
-    """
-    if filename is not None:
-        filedump = filename + '_rk_' + str(main_rank)
-    db = open(filedump, 'r')
-    simu = cPickle.load(db)
-    simu.start = simu.time - simu.time_step
-    ite = simu.current_iteration
-    simu.initialize()
-    simu.current_iteration = ite
-    print 'simu', simu
-    print ("load ...", filename)
-    return simu
-
-seq = fullseq
-
-simu.initialize()
-doDump = False
-doRestart = False
-dumpFreq = 10
-io_default=IOParams('restart')
-dump_filename = io.Writer(io_params=io_default).filename
-#===== Restart (if needed) =====
-if doRestart:
-    simu = restart(dump_filename)
-    iop_vel = IOParams('velo_00000.h5')
-    velo.hdf_load(topofft, io_params=iop_vel)
-    iop_vort = IOParams('vorti_00000.h5')
-    vorti.hdf_load(topofft, io_params=iop_vort)
-    # Set up for monitors and redistribute
-    for ope in distr.values():
-        ope.setup()
-    for monit in monitors.values():
-        monit.setup()
-
-# ======= Time loop =======
-time_run = MPI.Wtime()
-while not simu.isOver:
-    if topofft.rank == 0:
-        simu.printState()
-    run(seq)
-    simu.advance()
-    testdump = simu.current_iteration % dumpFreq is 0
-    if doDump and testdump:
-        print 'dump ...'
-        dump(dump_filename)
-        iop_vel = IOParams('velo')
-        velo.hdf_dump(topofft, io_params=iop_vel)
-        iop_vort = IOParams('vorti')
-        vorti.hdf_dump(topofft, io_params=iop_vort)
-print '[', main_rank, '] total time for run :', MPI.Wtime() - time_run
-
-# ======= Finalize =======
-fftw2py.clean_fftw_solver(box.dimension)
-for ope in distr.values():
-    ope.finalize()
-for monit in monitors.values():
-    monit.finalize()
diff --git a/trashed_examples/FlowAroundSphere_DNS.py b/trashed_examples/FlowAroundSphere_DNS.py
deleted file mode 100644
index 69808ec092186f2ef002fcb59961d56564d49ddc..0000000000000000000000000000000000000000
--- a/trashed_examples/FlowAroundSphere_DNS.py
+++ /dev/null
@@ -1,334 +0,0 @@
-#!/usr/bin/python
-
-"""
-Taylor Green 3D : see paper van Rees 2011.
-
-All parameters are set and defined in python module dataTG.
-
-"""
-
-from hysop import Box, IOParams, IO
-from hysop.f2hysop import fftw2py
-import numpy as np
-import cPickle
-#from scitools.NumPyDB import NumPyDB_cPickle as hysopPickle
-from hysop.fields.continuous import Field
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.mpi.topology import Cartesian
-from hysop.operator.advection import Advection
-from hysop.operator.stretching import Stretching
-from hysop.operator.absorption_BC import AbsorptionBC
-from hysop.operator.poisson import Poisson
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-from hysop.operator.profiles import Profiles
-from hysop.operator.residual import Residual
-from hysop.problem.simulation import Simulation
-from hysop.methods_keys import Scales, TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting, dtCrit, SpaceDiscretisation
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK2
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.integrators.runge_kutta4 import RK4 as RK4
-from hysop.numerics.finite_differences import FD_C_4, FD_C_2
-from hysop.numerics.interpolation import Linear
-from hysop.numerics.remeshing import L6_4 as rmsh
-from hysop.tools.profiler import Profiler, FProfiler
-import hysop.tools.io_utils as io
-import hysop.tools.numpywrappers as npw
-from hysop.mpi import main_rank, MPI
-from hysop.tools.parameters import Discretization
-
-print " ========= Start Navier-Stokes 3D (Taylor Green benchmark) ========="
-
-# ====== pi constant and trigonometric functions ======
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-# ====== Flow constants =======
-uinf = 1.0
-VISCOSITY = 1. / 300.
-
-# ======= Domain =======
-dim = 3
-#-------- choose resolution -----
-#Nz = 129
-Nz = 129
-#Nz = 513
-Ny = Nx = 129
-#Ny = Nx = 257
-#--------------------------------
-g = 2
-
-boxorigin = npw.asrealarray([-2.56, -2.56, -2.56])
-#-------- chose domain length  --------
-boxlength = npw.asrealarray([5.12, 5.12, 5.12])
-#boxlength = npw.asrealarray([5.12, 5.12, 10.24])
-#boxlength = npw.asrealarray([5.12, 5.12, 20.48])
-#-------------------------------------
-box = Box(length=boxlength, origin=boxorigin)
-
-# A global discretization with ghost points
-d3dg = Discretization([Nx, Ny, Nz], [g, g, g])
-# A global discretization, without ghost points
-d3d = Discretization([Nx, Ny, Nz])
-
-# ====== Sphere inside the domain ======
-RADIUS = 0.5
-pos = [0., 0., 0.]
-from hysop.domain.subsets import Sphere, HemiSphere
-sphere = Sphere(origin=pos, radius=RADIUS, parent=box)
-
-
-# ======= Function to compute initial velocity  =======
-def computeVel(res, x, y, z, t):
-    res[0][...] = uinf
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-
-# ======= Function to compute initial vorticity =======
-def computeVort(res, x, y, z, t):
-    res[0][...] = 0.
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-#  ====== Time-dependant required-flowrate (Variable Parameter) ======
-def computeFlowrate(simu):
-    # === Time-dependant flow rate ===
-    t = simu.tk
-    Tstart = 3.0
-    flowrate = np.zeros(3)
-    flowrate[0] = uinf * box.length[1] * box.length[2]
-    if t >= Tstart and t <= Tstart + 1.0:
-        flowrate[1] = sin(pi * (t - Tstart)) * \
-                      box.length[1] * box.length[2]
-    # === Constant flow rate ===
-#    flowrate = np.zeros(3)
-#    flowrate[0] = uinf * box.length[1] * box.length[2]
-    return flowrate
-
-
-# ======= Fields =======
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', is_vector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', is_vector=True)
-
-# ========= Simulation setup =========
-simu = Simulation(start=0.0, end=300.0, time_step=0.0125, max_iter=100)
-
-
-# Adaptative timestep method : dt = min(values(dtCrit))
-# where dtCrit is a list of criterions on which the computation
-# of the adaptative time step is based
-# ex : dtCrit = ['gradU', 'cfl', 'stretch'], means :
-# dt = min (dtAdv, dtCfl, dtStretch), where dtAdv is equal to LCFL / |gradU|
-# For dtAdv, the possible choices are the following:
-# 'vort' (infinite norm of vorticity) : dtAdv = LCFL / |vort|
-# 'gradU' (infinite norm of velocity gradient), dtAdv = LCFL / |gradU|
-# 'deform' (infinite norm of deformation tensor),
-# dtAdv = LCFL / (0.5(gradU + gradU^T))
-op = {}
-iop = IOParams("time_step", fileformat=IO.ASCII)
-# Default topology (i.e. 3D, with ghosts)
-topo_with_ghosts = box.create_topology(d3dg)
-
-
-op['dtAdapt'] = AdaptTimeStep(velo, vorti, simulation=simu,
-                              discretization=topo_with_ghosts,
-                              method={TimeIntegrator: RK3,
-                                      SpaceDiscretisation: FD_C_4,
-                                      dtCrit: ['gradU', 'stretch']},
-                              io_params=iop,
-                              lcfl=0.125,
-                              cfl=0.5)
-
-op['advection'] = Advection(velo, vorti,
-                            discretization=d3d,
-                            method={Scales: 'p_M6',
-                                    Splitting: 'classic'}
-                            )
-
-op['stretching'] = Stretching(velo, vorti,
-                              discretization=topo_with_ghosts)
-
-op['diffusion'] = Diffusion(viscosity=VISCOSITY, vorticity=vorti,
-                            discretization=d3d)
-
-rate = VariableParameter(formula=computeFlowrate)
-op['poisson'] = Poisson(velo, vorti, discretization=d3d, flowrate=rate)
-
-# ===== Discretization of computational operators ======
-for ope in op.values():
-    ope.discretize()
-
-topofft = op['poisson'].discreteFields[vorti].topology
-topoadvec = op['advection'].discreteFields[vorti].topology
-
-# =====  Smooth vorticity absorption at the outlet =====
-op['vort_absorption'] = AbsorptionBC(velo, vorti, discretization=topofft, 
-                                     req_flowrate=rate,
-                                     x_coords_absorp=[1.56, 2.56])
-op['vort_absorption'].discretize()
-
-# =====  Penalization of the vorticity on a sphere inside the domain =====
-from hysop.operator.penalization import PenalizeVorticity
-op['penalVort'] = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                                    discretization=topo_with_ghosts,
-                                    obstacles=[sphere], coeff=1e8,
-                                    method={SpaceDiscretisation: FD_C_4})
-op['penalVort'].discretize()
-
-# ==== Operators to map data between the different computational operators ===
-# (i.e. between topologies)
-distr = {}
-distr['fft2str'] = RedistributeIntra(source=op['poisson'],
-                                     target=op['stretching'],
-                                     variables=[velo, vorti])
-distr['str2fft'] = RedistributeIntra(source=op['stretching'],
-                                     target=op['poisson'],
-                                     variables=[velo, vorti])
-distr['fft2advec'] = RedistributeIntra(source=op['poisson'],
-                                       target=op['advection'],
-                                       variables=[velo, vorti])
-distr['advec2fft'] = RedistributeIntra(source=op['advection'],
-                                       target=op['poisson'],
-                                       variables=[velo, vorti])
-
-# ========= Setup for all declared operators =========
-time_setup = MPI.Wtime()
-for ope in op.values():
-    ope.setup()
-for ope in distr.values():
-    ope.setup()
-
-
-print '[', main_rank, '] total time for setup:', MPI.Wtime() - time_setup
-
-# ========= Fields initialization =========
-# - initialize velo + vort on topostr
-# - penalize vorticity
-# - redistribute topostr --> topofft
-
-time_init = MPI.Wtime()
-ind = sphere.discretize(topofft)
-def initFields():
-    velo.initialize(topo=topo_with_ghosts)
-    vorti.initialize(topo=topo_with_ghosts)
-    op['penalVort'].apply(simu)
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-
-initFields()
-print '[', main_rank, '] total time for init :', MPI.Wtime() - time_init
-
-fullseq = []
-
-def run(sequence):
-    op['vort_absorption'].apply(simu)
-    op['poisson'].apply(simu)               # Poisson + correction
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    op['penalVort'].apply(simu)             # Vorticity penalization
-    op['stretching'].apply(simu)            # Stretching
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-    op['diffusion'].apply(simu)             # Diffusion
-    distr['fft2advec'].apply(simu)
-    distr['fft2advec'].wait()
-    op['advection'].apply(simu)             # Advection (scales)
-    distr['advec2fft'].apply(simu)
-    distr['advec2fft'].wait()
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    op['dtAdapt'].apply(simu)               # Update timestep
-    op['dtAdapt'].wait()
-
-# ==== Serialize the simulation data of the problem to a "restart" file ====
-def dump(filename):
-    """
-    Serialize some data of the problem to file
-    (only data required for a proper restart, namely fields in self.input
-    and simulation).
-    @param filename : prefix for output file. Real name = filename_rk_N,
-    N being current process number. If None use default value from problem
-    parameters (self.filename)
-    """
-    if filename is not None:
-        filedump = filename + '_rk_' + str(main_rank)
-    db = open(filedump, 'wb')
-    cPickle.dump(simu, db)
-
-# ====== Load the simulation data of the problem from a "restart" file ======
-def restart(filename):
-    """
-    Load serialized data to restart from a previous state.
-    self.input variables and simulation are loaded.
-    @param  filename : prefix for downloaded file.
-    Real name = filename_rk_N, N being current process number.
-    If None use default value from problem
-    parameters (self.filename)
-    """
-    if filename is not None:
-        filedump = filename + '_rk_' + str(main_rank)
-    db = open(filedump, 'r')
-    simu = cPickle.load(db)
-    simu.start = simu.time - simu.time_step
-    ite = simu.current_iteration
-    simu.initialize()
-    simu.current_iteration = ite
-    print 'simu', simu
-    print ("load ...", filename)
-    return simu
-
-seq = fullseq
-
-simu.initialize()
-doDump = False
-doRestart = False
-dumpFreq = 8000
-io_default = IOParams('restart', fileformat=IO.ASCII)
-dump_filename = io_default.filename
-print dump_filename
-#===== Restart (if needed) =====
-if doRestart:
-    simu = restart(dump_filename)
-    iop_vel = IOParams('velo_00000.h5')
-    velo.hdf_load(topofft, io_params=iop_vel)
-    iop_vort = IOParams('vorti_00000.h5')
-    vorti.hdf_load(topofft, io_params=iop_vort)
-    # Set up for redistribute
-    for ope in distr.values():
-        ope.setup()
-
-# ======= Time loop =======
-total_time = FProfiler("Total")
-solve_time = FProfiler("SolveTime")
-cttime = MPI.Wtime()
-while not simu.isOver:
-    ctime = MPI.Wtime()
-    #    if topofft.rank == 0:
-    #        simu.printState()
-    run(seq)
-    solve_time += MPI.Wtime() - ctime # Mesure le temps d execution d une iteration complete
-    simu.advance()
-simu.finalize()
-total_time += MPI.Wtime() - cttime # Mesure le temps total de la boucle (ne devrait pas etre tres different de 'solve_time')
-
-prof = Profiler(None, box.comm_task)
-prof += solve_time
-prof += total_time
-for ope in op.values():
-    ope.finalize()
-    ope.get_profiling_info() # permet a l operateur de collecter les infos de son operateur discret
-    prof += ope.profiler # on ajoute les info de op a l objet prof
-for v in (velo, vorti):
-    prof += v.profiler  # On recupere aussi les info des variables
-prof.summarize() # on resume le profile fait des moyennes a travers les procesus
-print str(prof) # On affiche les resultats, on peut faire aussi prof.write()
diff --git a/trashed_examples/FlowAroundSphere_linearized.py b/trashed_examples/FlowAroundSphere_linearized.py
deleted file mode 100644
index 26d0004e2f82d2b52c145c2e323f58c8831a7fc2..0000000000000000000000000000000000000000
--- a/trashed_examples/FlowAroundSphere_linearized.py
+++ /dev/null
@@ -1,503 +0,0 @@
-#!/usr/bin/python
-
-"""
-Taylor Green 3D : see paper van Rees 2011.
-
-All parameters are set and defined in python module dataTG.
-
-"""
-
-from hysop import Box
-from hysop.f2py import fftw2py
-import numpy as np
-import cPickle
-#from scitools.NumPyDB import NumPyDB_cPickle as hysopPickle
-from hysop.fields.continuous import Field
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.mpi.topology import Cartesian
-from hysop.operator.advection import Advection
-from hysop.operator.stretching import Stretching, \
-    StretchingLinearized
-from hysop.operator.dissip_filter import DissipFilter
-from hysop.operator.absorption_BC import AbsorptionBC
-from hysop.operator.poisson import Poisson
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-from hysop.operator.profiles import Profiles
-from hysop.operator.monitoringPoints import MonitoringPoints
-from hysop.operator.residual import Residual
-from hysop.problem.simulation import Simulation
-from hysop.methods_keys import Scales, TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting, dtCrit, SpaceDiscretisation
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK2
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.integrators.runge_kutta4 import RK4 as RK4
-from hysop.numerics.finite_differences import FD_C_4, FD_C_2, Filter_C_4
-from hysop.numerics.interpolation import Linear
-from hysop.numerics.remeshing import L6_4 as rmsh
-import hysop.tools.io_utils as io
-import hysop.tools.numpywrappers as npw
-from hysop.mpi import main_rank, MPI
-from hysop.tools.parameters import Discretization, IOParams
-
-print " ========= Start Navier-Stokes 3D (Taylor Green benchmark) ========="
-
-# ====== pi constant and trigonometric functions ======
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-# ====== Flow constants =======
-VISCOSITY = 1. / 300.
-
-# ======= Domain =======
-dim = 3
-#Nx = 129
-#Ny = Nz = 65
-Nx = 257
-Ny = Nz = 129
-#Nx = 513
-#Ny = Nz = 257
-#Nx = 1025
-#Ny = Nz = 513
-g = 2
-boxlength = npw.asrealarray([10.24, 5.12, 5.12])
-boxorigin = npw.asrealarray([-2.0, -2.56, -2.56])
-box = Box(length=boxlength, origin=boxorigin)
-
-# A global discretization with ghost points
-d3dg = Discretization([Nx, Ny, Nz], [g, g, g])
-# A global discretization, without ghost points
-d3d = Discretization([Nx, Ny, Nz])
-
-# ====== Sphere inside the domain ======
-RADIUS = 0.5
-pos = [0., 0., 0.]
-from hysop.domain.subsets import Sphere, HemiSphere
-sphere = Sphere(origin=pos, radius=RADIUS, parent=box)
-
-
-# ======= Function to set initial velocity =======
-def setZeroVel(res, x, y, z, t):
-    res[0][...] = 0.
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-
-# ======= Function to set initial vorticity =======
-def setZeroVort(res, x, y, z, t):
-    res[0][...] = 0.
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-#  ====== Time-dependant required-flowrate (Variable Parameter) ======
-def computeFlowrate(simu):
-    # === Time-dependant flow rate ===
-#    t = simu.tk
-#    Tstart = 3.0
-#    flowrate = np.zeros(3)
-#    flowrate[0] = uinf * box.length[1] * box.length[2]
-#    if t >= Tstart and t <= Tstart + 1.0:
-#        flowrate[1] = sin(pi * (t - Tstart)) * \
-#                      box.length[1] * box.length[2]
-    # === Constant flow rate ===
-    flowrate = np.zeros(3)
-#    flowrate[0] = uinf * box.length[1] * box.length[2]
-    return flowrate
-
-
-# ======= Fields =======
-# Base flow
-veloBF = Field(domain=box, formula=setZeroVel,
-               name='Velocity', is_vector=True)
-vortiBF = Field(domain=box, formula=setZeroVort,
-                name='Vorticity', is_vector=True)
-# Small perturbation whose stability is analyzed
-velo = Field(domain=box, formula=setZeroVel,
-               name='Velocity_fluc', is_vector=True)
-vorti = Field(domain=box, formula=setZeroVort,
-                name='Vorticity_fluc', is_vector=True)
-
-# ========= Simulation setup =========
-simu = Simulation(start=0.0, end=2000.0, time_step=0.005, max_iter=10000000)
-
-
-# Adaptative timestep method : dt = min(values(dtCrit))
-# where dtCrit is a list of criterions on which the computation
-# of the adaptative time step is based
-# ex : dtCrit = ['gradU', 'cfl', 'stretch'], means :
-# dt = min (dtAdv, dtCfl, dtStretch), where dtAdv is equal to LCFL / |gradU|
-# For dtAdv, the possible choices are the following:
-# 'vort' (infinite norm of vorticity) : dtAdv = LCFL / |vort|
-# 'gradU' (infinite norm of velocity gradient), dtAdv = LCFL / |gradU|
-# 'deform' (infinite norm of deformation tensor),
-# dtAdv = LCFL / (0.5(gradU + gradU^T))
-op = {}
-iop = IOParams("time_step")
-# Default topology (i.e. 3D, with ghosts)
-topo_with_ghosts = box.create_topology(d3dg)
-
-
-op['dtAdapt'] = AdaptTimeStep(velo, vorti, simulation=simu,
-                              discretization=topo_with_ghosts,
-                              method={TimeIntegrator: RK3,
-                                      SpaceDiscretisation: FD_C_4,
-                                      dtCrit: ['gradU', 'stretch']},
-                              io_params=iop,
-                              lcfl=0.125,
-                              cfl=0.5)
-
-op['advectionBridge'] = Advection(veloBF, vortiBF,
-                                  discretization=d3d,
-                                  method={Scales: 'p_M6',
-                                  Splitting: 'classic'}
-                                  )
-
-op['advection'] = Advection(veloBF, vorti,
-                            discretization=d3d,
-                            method={Scales: 'p_M6',
-                            Splitting: 'classic'}
-                            )
-
-op['stretchingLin'] = StretchingLinearized(velocity=velo,
-                                           vorticity=vorti,
-                                           velocity_BF=veloBF,
-                                           vorticity_BF=vortiBF,
-                                           discretization=topo_with_ghosts)
-
-
-op['artifDissip'] = DissipFilter(velo, vorti,
-                                 discretization=topo_with_ghosts,
-                                 method={SpaceDiscretisation: Filter_C_4})
-
-op['diffusion'] = Diffusion(viscosity=VISCOSITY, vorticity=vorti,
-                            discretization=d3d)
-
-rate = VariableParameter(formula=computeFlowrate)
-op['poisson'] = Poisson(velo, vorti, discretization=d3d, flowrate=rate)#, projection=1)
-
-op['poissonProj'] = Poisson(velo, vorti, discretization=d3d,
-                            flowrate=rate, projection=1)
-
-# ===== Discretization of computational operators ======
-for ope in op.values():
-    ope.discretize()
-
-topofft = op['poisson'].discreteFields[vorti].topology
-topoadvec = op['advection'].discreteFields[vorti].topology
-
-# =====  Smooth vorticity absorption at the outlet =====
-op['vort_absorption'] = AbsorptionBC(velo, vorti, discretization=topofft, 
-                                     req_flowrate=rate, 
-                                     x_coords_absorp=[7.24, 8.24])
-#                                     x_coords_absorp=[1.56, 2.56])
-op['vort_absorption'].discretize()
-
-# =====  Penalization of the vorticity on a sphere inside the domain =====
-from hysop.operator.penalization import PenalizeVorticity
-op['penalVort'] = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                                    discretization=topo_with_ghosts,
-                                    obstacles=[sphere], coeff=1e8,
-                                    method={SpaceDiscretisation: FD_C_4})
-op['penalVort'].discretize()
-
-# ==== Operators to map data between the different computational operators ===
-# (i.e. between topologies)
-distr = {}
-distr['advec2str'] = RedistributeIntra(source=op['advectionBridge'],
-                                       target=op['stretchingLin'],
-                                       variables=[veloBF, vortiBF])
-
-distr['fft2str'] = RedistributeIntra(source=op['poisson'],
-                                     target=op['stretchingLin'],
-                                     variables=[velo, vorti])
-
-distr['str2fft'] = RedistributeIntra(source=op['stretchingLin'],
-                                     target=op['poisson'],
-                                     variables=[velo, vorti])
-
-# ========= Monitoring operators =========
-monitors = {}
-#iop = IOParams('fields', frequency=100)
-#monitors['writer'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-#                                io_params=iop)
-
-io_ener = IOParams('energy_enstrophy')
-monitors['energy'] = EnergyEnstrophy(velo, vorti, discretization=topofft,
-                                     io_params=io_ener, is_normalized=False)
-
-rk = 0
-if (0.0 in topofft.mesh.coords[2]):
-    rk = main_rank
-io_prof = IOParams('profile_X_axis', frequency=8400, io_leader=rk)
-monitors['profile'] = Profiles(velo, vorti, discretization=topofft,
-                               io_params=io_prof, prof_coords=[0.0, 0.0],
-                               direction=0, beginMeanComput=25.0)
-
-coordsMonit = [1.0, 0.0, 0.0]
-rk = 0
-if (coordsMonit[2] in topo_with_ghosts.mesh.coords[2]):
-    rk = main_rank
-print 'rk ... ', rk
-io_monit = IOParams('monit', frequency=1, io_leader=rk)
-monitors['monit'] = MonitoringPoints(velo, vorti,
-                                     discretization=topo_with_ghosts,
-                                     io_params=io_monit,
-                                     monitPt_coords=coordsMonit)
-
-io_resi = IOParams('residual')
-monitors['residual'] = Residual(vorti, discretization=topofft,
-                                io_params=io_resi)
-
-from hysop.domain.control_box import ControlBox
-from hysop.operator.drag_and_lift import MomentumForces, NocaForces
-ref_step = topo_with_ghosts.mesh.space_step
-#cbpos = npw.zeros(dim)
-#cblength = npw.zeros(dim)
-#cbpos[...] = boxorigin[...]
-#cbpos +=  15 * ref_step
-#cblength[...] = boxlength[...]
-#cblength -= 30 * ref_step
-#cb = ControlBox(parent=box, origin=cbpos, length=cblength)
-#coeffForce = 1. / (0.5 * uinf ** 2 * pi * RADIUS ** 2)
-
-#io_forces=IOParams('drag_and_lift_NocaII')
-#monitors['forcesNoca'] = NocaForces(velo, vorti,
-#                                    discretization=topo_with_ghosts,
-#                                    nu=VISCOSITY, 
-#                                    volume_of_control=cb,
-#                                    normalization=coeffForce,
-#                                    obstacles=[sphere], 
-#                                    io_params=io_forces)
-
-#io_forcesPenal=IOParams('drag_and_lift_Mom')
-#monitors['forcesMom'] = MomentumForces(velocity=velo, 
-#                                       discretization=topo_with_ghosts,
-#                                       normalization=coeffForce,
-#                                       obstacles=[sphere], 
-#                                       penalisation_coeff=[1e8],
-#                                       io_params=io_forcesPenal)
-
-#io_forcesPenal=IOParams('drag_and_lift_penal')
-#monitors['forcesPenal'] = DragAndLiftPenal(velo, vorti, coeffForce,
-#                                           discretization=topofft,
-#                                           obstacles=[sphere], factor=[1e8],
-#                                           io_params=io_forcesPenal)
-
-step_dir = ref_step[0]
-io_sliceXY = IOParams('sliceXY', frequency=1)
-thickSliceXY = ControlBox(parent=box, origin=[-2.0, -2.56, -2.0 * step_dir], 
-                          length=[10.24- step_dir, 5.12- step_dir, 4.0 * step_dir])
-#thickSliceXY = ControlBox(parent=box, origin=[-2.56, -2.56, -2.0 * step_dir], 
-#                          length=[5.12 - step_dir, 5.12 - step_dir, 4.0 * step_dir])
-monitors['writerSliceXY'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                                      io_params=io_sliceXY, subset=thickSliceXY, 
-                                      xmfalways=True)
-
-io_sliceXZ = IOParams('sliceXZ', frequency=1)
-thickSliceXZ = ControlBox(parent=box, origin=[-2.0, -2.0 * step_dir, -2.56], 
-                          length=[10.24- step_dir, 4.0 * step_dir, 5.12- step_dir])
-monitors['writerSliceXZ'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                                       io_params=io_sliceXZ, subset=thickSliceXZ, 
-                                       xmfalways=True)
-
-io_subBox = IOParams('subBox', frequency=5000)
-subBox = ControlBox(parent=box, origin=[-0.7, -2.0, -2.0], length=[8.0, 4.0, 4.0])
-monitors['writerSubBox'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                                      io_params=io_subBox, subset=subBox, 
-                                      xmfalways=True)
-
-# ========= Setup for all declared operators/monitors =========
-time_setup = MPI.Wtime()
-for ope in op.values():
-    ope.setup()
-for ope in distr.values():
-    ope.setup()
-
-for monit in monitors.values():
-    monit.discretize()
-for monit in monitors.values():
-    monit.setup()
-
-print '[', main_rank, '] total time for setup:', MPI.Wtime() - time_setup
-
-# ========= Fields initialization =========
-# - initialize (veloBF, vortiBF) on topofft
-# - initialize (veloBF, vortiBF) and (velo, vort) on topostr
-# - penalize vorti from velo on topostr
-# - redistribute topostr --> topofft
-
-time_init = MPI.Wtime()
-ind = sphere.discretize(topofft)
-def initFields():
-    veloBF.initialize(topo=topofft)
-    vortiBF.initialize(topo=topofft)
-    veloBF.initialize(topo=topo_with_ghosts)
-    vortiBF.initialize(topo=topo_with_ghosts)
-    velo.initialize(topo=topo_with_ghosts)
-    vorti.initialize(topo=topo_with_ghosts)
-    op['penalVort'].apply(simu)
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-
-# /!\ CAREFULL /!\ : INITIALIZATION COMMENTED OUT !!!!!!!!!!!!!!!!!
-#initFields()
-print '[', main_rank, '] total time for init :', MPI.Wtime() - time_init
-
-fullseq = []
-
-def run(sequence):
-    print 'norm vort perturb 1                     :', vorti.norm(topofft)
-    op['vort_absorption'].apply(simu)
-    print 'norm vort perturb 2 (apres absorption  ):', vorti.norm(topofft)
-    op['poisson'].apply(simu)               # Poisson + correction
-    print 'norm vort perturb 3 (apres Poisson     ):', vorti.norm(topofft)
-    #    monitors['forcesMom'].apply(simu)       # Forces Heloise
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    op['penalVort'].apply(simu)             # Vorticity penalization
-    print 'norm vort perturb 4 (apres penalisation):', vorti.norm(topo_with_ghosts)
-    op['stretchingLin'].apply(simu)         # Stretching linearized
-    print 'norm vort perturb 5 (apres stretching  ):', vorti.norm(topo_with_ghosts)
-    op['artifDissip'].apply(simu)           # Dissipation filter
-    print 'norm vort perturb 6 (apres dissp filter):', vorti.norm(topo_with_ghosts)
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-    op['diffusion'].apply(simu)             # Diffusion
-    print 'norm vort perturb 7 (apres diffusion   ):', vorti.norm(topofft)
-    op['advection'].apply(simu)      # Advection
-    print 'norm vort perturb 8 (apres advection   ):', vorti.norm(topofft)
-
-#    monitors['writerSliceXY'].apply(simu)
-#    monitors['writerSliceXZ'].apply(simu)
-#    monitors['writerSubBox'].apply(simu)
-    monitors['energy'].apply(simu)          # Energy/enstrophy
-    monitors['profile'].apply(simu)         # Profile
-    monitors['residual'].apply(simu)        # Vorticity residual
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    monitors['monit'].apply(simu)           # Monitoring points in the wake
-#    op['dtAdapt'].apply(simu)               # Update timestep
-#    op['dtAdapt'].wait()
-
-# ==== Serialize the simulation data of the problem to a "restart" file ====
-def dump(filename):
-    """
-    Serialize some data of the problem to file
-    (only data required for a proper restart, namely fields in self.input
-    and simulation).
-    @param filename : prefix for output file. Real name = filename_rk_N,
-    N being current process number. If None use default value from problem
-    parameters (self.filename)
-    """
-    if filename is not None:
-        filedump = filename + '_rk_' + str(main_rank)
-    db = open(filedump, 'wb')
-    cPickle.dump(simu, db)
-
-# ====== Load the simulation data of the problem from a "restart" file ======
-def restart(filename):
-    """
-    Load serialized data to restart from a previous state.
-    self.input variables and simulation are loaded.
-    @param  filename : prefix for downloaded file.
-    Real name = filename_rk_N, N being current process number.
-    If None use default value from problem
-    parameters (self.filename)
-    """
-    if filename is not None:
-        filedump = filename + '_rk_' + str(main_rank)
-    db = open(filedump, 'r')
-    simu = cPickle.load(db)
-#    simu.start = simu.time - simu.time_step
-#    ite = simu.current_iteration
-    simu.start = 0.0
-    simu.time_step = 0.005
-    simu.initialize()
-#    simu.current_iteration = ite
-    print 'simu', simu
-    print ("load ...", filename)
-    return simu
-
-seq = fullseq
-
-simu.initialize()
-doDump = True
-doRestart = True
-dumpFreq = 8000
-io_default=IOParams('restart')
-dump_filename = io.Writer(io_params=io_default).filename
-#===== Restart (if needed) =====
-if doRestart:
-    # Fields initialization
-    veloBF.initialize(topo=topofft)
-    vortiBF.initialize(topo=topofft)
-    veloBF.initialize(topo=topo_with_ghosts)
-    vortiBF.initialize(topo=topo_with_ghosts)
-    velo.initialize(topo=topofft)
-    vorti.initialize(topo=topofft)
-    # Load data from dumped files --> base flow
-    simu = restart(dump_filename)
-    iop_vel = IOParams('velo_00000.h5')
-    veloBF.hdf_load(topofft, io_params=iop_vel)
-    iop_vort = IOParams('vorti_00000.h5')
-    vortiBF.hdf_load(topofft, io_params=iop_vort)
-    # Set up for monitors and redistribute
-    for ope in distr.values():
-        ope.setup()
-    for monit in monitors.values():
-        monit.setup()
-    # vortBF projection + Poisson
-    op['poissonProj'].apply(simu)
-    # Initialize velo and vorti to a small perturbation
-    # equal to baseFlow * 1e-8
-    # (cf Florian Guiho's thesis p.30 eq. (2.13))
-    for d in xrange(dim):
-        velo.discreteFields[topofft].data[d][...] = \
-            veloBF.discreteFields[topofft].data[d][...].copy()
-        velo.discreteFields[topofft].data[d][...] *= 1e-8
-        vorti.discreteFields[topofft].data[d][...] = \
-            vortiBF.discreteFields[topofft].data[d][...].copy()
-        vorti.discreteFields[topofft].data[d][...] *= 1e-8
-    # Redistribute veloBF and vortBF on topoGhosts
-    distr['advec2str'].apply(simu)
-    distr['advec2str'].wait()
-
-    print '======= INITIALIZATION ======='
-    print 'vortiBF', vortiBF.norm(topofft)
-#    print 'vortiBF norm Ghosts', vortiBF.norm(topo_with_ghosts)
-    print 'vorti', vorti.norm(topofft)
-
-    print 'veloBF', veloBF.norm(topofft)
-#    print 'veloBF norm Ghosts', veloBF.norm(topo_with_ghosts)
-    print 'velo', velo.norm(topofft)
-
-
-# ======= Time loop =======
-time_run = MPI.Wtime()
-while not simu.isOver:
-    if topofft.rank == 0:
-        simu.printState()
-    run(seq)
-    simu.advance()
-    testdump = simu.current_iteration % dumpFreq is 0
-    if doDump and testdump:
-        print 'dump ...'
-        dump(dump_filename)
-        iop_vel = IOParams('veloFluc')
-        velo.hdf_dump(topofft, io_params=iop_vel)
-        iop_vort = IOParams('vortiFluc')
-        vorti.hdf_dump(topofft, io_params=iop_vort)
-print '[', main_rank, '] total time for run :', MPI.Wtime() - time_run
-
-# ======= Finalize =======
-fftw2py.clean_fftw_solver(box.dimension)
-for ope in distr.values():
-    ope.finalize()
-for monit in monitors.values():
-    monit.finalize()
diff --git a/trashed_examples/FlowAroundSphere_pressure.py b/trashed_examples/FlowAroundSphere_pressure.py
deleted file mode 100644
index 4bc96784963b7aab30dcc85e44a1753f1305ea77..0000000000000000000000000000000000000000
--- a/trashed_examples/FlowAroundSphere_pressure.py
+++ /dev/null
@@ -1,434 +0,0 @@
-#!/usr/bin/python
-
-"""
-Taylor Green 3D : see paper van Rees 2011.
-
-All parameters are set and defined in python module dataTG.
-
-"""
-
-from hysop import Box
-from hysop.f2py import fftw2py
-import numpy as np
-from hysop.fields.continuous import Field
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.mpi.topology import Cartesian
-from hysop.operator.advection import Advection
-from hysop.operator.stretching import Stretching
-from hysop.operator.absorption_BC import AbsorptionBC
-from hysop.operator.poisson import Poisson
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operator.differential import DivAdvection
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-from hysop.problem.simulation import Simulation
-from hysop.methods_keys import Scales, TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting, dtCrit, SpaceDiscretisation, \
-    GhostUpdate, Formulation
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK2
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.integrators.runge_kutta4 import RK4 as RK4
-from hysop.numerics.finite_differences import FD_C_4, FD_C_2
-from hysop.numerics.interpolation import Linear
-from hysop.numerics.remeshing import L6_4 as rmsh
-import hysop.tools.io_utils as io
-import hysop.tools.numpywrappers as npw
-from hysop.mpi import main_rank, MPI
-from hysop.tools.parameters import Discretization, IOParams
-
-print " ========= Start Navier-Stokes 3D (Taylor Green benchmark) ========="
-
-# ====== pi constant and trigonometric functions ======
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-# ====== Flow constants =======
-uinf = 1.0
-VISCOSITY = 1. / 300.
-
-# ======= Domain =======
-dim = 3
-#Nx = 513
-#Ny = Nz = 257
-Nx = 257
-Ny = Nz = 129
-g = 2
-boxlength = npw.asrealarray([10.24, 5.12, 5.12])
-boxorigin = npw.asrealarray([-2.0, -2.56, -2.56])
-box = Box(length=boxlength, origin=boxorigin)
-
-# A global discretization with ghost points
-d3dg = Discretization([Nx, Ny, Nz], [g, g, g])
-# A global discretization, without ghost points
-d3d = Discretization([Nx, Ny, Nz])
-
-# ====== Sphere inside the domain ======
-RADIUS = 0.5
-pos = [0., 0., 0.]
-from hysop.domain.subsets.sphere import Sphere, HemiSphere
-#sphere = Sphere(origin=pos, radius=RADIUS, parent=box)
-sphere = HemiSphere(origin=pos, radius=RADIUS, parent=box)
-
-
-# ======= Function to compute initial velocity  =======
-def computeVel(res, x, y, z, t):
-    res[0][...] = uinf
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-
-# ======= Function to compute initial vorticity =======
-def computeVort(res, x, y, z, t):
-    res[0][...] = 0.
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-# =======  Function to compute initial pressure ======= 
-def computePressure(res, x, y, z, t):
-    res[0][...] = 0.
-    return res
-
-#  ====== Time-dependant required-flowrate (Variable Parameter) ======
-def computeFlowrate(simu):
-    # === Time-dependant flow rate ===
-    t = simu.tk
-    Tstart = 3.0
-    flowrate = np.zeros(3)
-    flowrate[0] = uinf * box.length[1] * box.length[2]
-    if t >= Tstart and t <= Tstart + 1.0:
-        flowrate[1] = sin(pi * (t - Tstart)) * \
-                      box.length[1] * box.length[2]
-    # === Constant flow rate ===
-    #    flowrate = np.zeros(3)
-    #    flowrate[0] = uinf * box.length[1] * box.length[2]
-    return flowrate
-
-
-# ======= Fields =======
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', is_vector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', is_vector=True)
-pressure = Field(domain=box, formula=computePressure,
-                 name='Pressure')
-
-# ========= Simulation setup =========
-simu = Simulation(start=0.0, end=80.0, time_step=0.0125, max_iter=10000000)
-
-
-# Adaptative timestep method : dt = min(values(dtCrit))
-# where dtCrit is a list of criterions on which the computation
-# of the adaptative time step is based
-# ex : dtCrit = ['gradU', 'cfl', 'stretch'], means :
-# dt = min (dtAdv, dtCfl, dtStretch), where dtAdv is equal to LCFL / |gradU|
-# For dtAdv, the possible choices are the following:
-# 'vort' (infinite norm of vorticity) : dtAdv = LCFL / |vort|
-# 'gradU' (infinite norm of velocity gradient), dtAdv = LCFL / |gradU|
-# 'deform' (infinite norm of deformation tensor),
-# dtAdv = LCFL / (0.5(gradU + gradU^T))
-op = {}
-iop = IOParams("time_step")
-# Default topology (i.e. 3D, with ghosts)
-topo_with_ghosts = box.create_topology(d3dg)
-
-
-op['dtAdapt'] = AdaptTimeStep(velo, vorti, simulation=simu,
-                              discretization=topo_with_ghosts,
-                              method={TimeIntegrator: RK3,
-                                      SpaceDiscretisation: FD_C_4,
-                                      dtCrit: ['gradU', 'stretch']},
-                              io_params=iop,
-                              lcfl=0.125,
-                              cfl=0.5)
-
-op['advection'] = Advection(velo, vorti,
-                            discretization=d3d,
-                            method={Scales: 'p_M6',
-                                    Splitting: 'classic'}
-                            )
-
-op['stretching'] = Stretching(velo, vorti,
-                              discretization=topo_with_ghosts)
-
-op['diffusion'] = Diffusion(viscosity=VISCOSITY, vorticity=vorti,
-                            discretization=d3d)
-
-rate = VariableParameter(formula=computeFlowrate)
-op['poisson'] = Poisson(velo, vorti, discretization=d3d, flowrate=rate)
-
-op['rhsPrsPoisson'] = DivAdvection(velo, pressure,
-                                   discretization=topo_with_ghosts)
-
-op['poissonPressure'] = Poisson(pressure, pressure, discretization=d3d,
-                                method={SpaceDiscretisation: 'fftw',
-                                        GhostUpdate: True,
-                                        Formulation: 'pressure'})
-
-# ===== Discretization of computational operators ======
-for ope in op.values():
-    ope.discretize()
-
-topofft = op['poisson'].discreteFields[vorti].topology
-topoadvec = op['advection'].discreteFields[vorti].topology
-
-# =====  Smooth vorticity absorption at the outlet =====
-op['vort_absorption'] = AbsorptionBC(velo, vorti, discretization=topofft, 
-                                     req_flowrate=rate, 
-                                     x_coords_absorp=[7.24, 8.24])
-#                                     x_coords_absorp=[1.56, 2.56])
-op['vort_absorption'].discretize()
-
-# =====  Penalization of the vorticity on a sphere inside the domain =====
-from hysop.operator.penalize_vorticity import PenalizeVorticity
-op['penalVort'] = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                                    discretization=topo_with_ghosts,
-                                    obstacles=[sphere], coeff=1e8,
-                                    method={SpaceDiscretisation: FD_C_4})
-op['penalVort'].discretize()
-
-
-# ==== Operators to map data between the different computational operators ===
-# (i.e. between topologies)
-distr = {}
-distr['fft2str'] = RedistributeIntra(source=op['poisson'],
-                                     target=op['stretching'],
-                                     variables=[velo, vorti])
-distr['str2fft'] = RedistributeIntra(source=op['stretching'],
-                                     target=op['poisson'],
-                                     variables=[velo, vorti])
-distr['fft2advec'] = RedistributeIntra(source=op['poisson'],
-                                       target=op['advection'],
-                                       variables=[velo, vorti])
-distr['advec2fft'] = RedistributeIntra(source=op['advection'],
-                                       target=op['poisson'],
-                                       variables=[velo, vorti])
-distr['str2prs'] = RedistributeIntra(source=op['rhsPrsPoisson'],
-                                       target=op['poissonPressure'],
-                                       variables=[pressure])
-# ========= Monitoring operators =========
-monitors = {}
-iop = IOParams('fields', frequency=100)
-monitors['writer'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                                io_params=iop)
-
-io_ener = IOParams('energy_enstrophy')
-monitors['energy'] = EnergyEnstrophy(velo, vorti, discretization=topofft,
-                                     io_params=io_ener, is_normalized=False)
-
-from hysop.domain.subsets.control_box import ControlBox
-from hysop.operator.drag_and_lift import MomentumForces, NocaForces
-ref_step = topo_with_ghosts.mesh.space_step
-cbpos = npw.zeros(dim)
-cblength = npw.zeros(dim)
-cbpos[...] = boxorigin[...]
-cbpos +=  15 * ref_step
-cblength[...] = boxlength[...]
-cblength -= 30 * ref_step
-cb = ControlBox(parent=box, origin=cbpos, length=cblength)
-coeffForce = 1. / (0.5 * uinf ** 2 * pi * RADIUS ** 2)
-
-io_forces=IOParams('drag_and_lift_NocaII')
-#monitors['forcesNoca'] = NocaForces(velo, vorti, 
-#                                    discretization=topo_with_ghosts,
-#                                    nu=VISCOSITY, 
-#                                    volume_of_control=cb,
-#                                    normalization=coeffForce,
-#                                    obstacles=[sphere], 
-#                                    io_params=io_forces)
-
-io_forcesPenal=IOParams('drag_and_lift_Mom')
-monitors['forcesMom'] = MomentumForces(velocity=velo, 
-                                       discretization=topo_with_ghosts,
-                                       normalization=coeffForce,
-                                       obstacles=[sphere], 
-                                       penalisation_coeff=[1e8],
-                                       io_params=io_forcesPenal)
-
-#io_forcesPenal=IOParams('drag_and_lift_penal')
-#monitors['forcesPenal'] = DragAndLiftPenal(velo, vorti, coeffForce,
-#                                           discretization=topofft,
-#                                           obstacles=[sphere], factor=[1e8],
-#                                           io_params=io_forcesPenal)
-
-step_dir = ref_step[0]
-io_sliceXY = IOParams('sliceXY', frequency=5)
-thickSliceXY = ControlBox(parent=box, origin=[-2.0, -2.56, -2.0 * step_dir], 
-                          length=[10.24- step_dir, 5.12- step_dir, 4.0 * step_dir])
-#thickSliceXY = ControlBox(parent=box, origin=[-2.56, -2.56, -2.0 * step_dir], 
-#                          length=[5.12 - step_dir, 5.12 - step_dir, 4.0 * step_dir])
-monitors['writerSliceXY'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                                      io_params=io_sliceXY, subset=thickSliceXY, 
-                                      xmfalways=True)
-
-#io_sliceXZ = IOParams('sliceXZ', frequency=2000)
-#thickSliceXZ = ControlBox(box, origin=[-2.0, -2.0 * step_dir, -2.56], 
-#                          lengths=[10.24, 4.0 * step_dir, 5.12])
-#monitors['writerSliceXZ'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-#                                       io_params=io_sliceXZ, subset=thickSliceXZ, 
-#                                       xmfalways=True)
-
-io_slicePrs = IOParams('slicePrs', frequency=5)
-thickSlicePrs = ControlBox(parent=box, origin=[-2.0, -2.56, -2.0 * step_dir], 
-                           length=[10.24- step_dir, 5.12- step_dir, 4.0 * step_dir])
-monitors['writerSlicePrs'] = HDF_Writer(variables={pressure: topofft},
-                                        io_params=io_slicePrs, subset=thickSlicePrs, 
-                                        xmfalways=True)
-
-#io_subBox = IOParams('subBox', frequency=2000)
-#subBox = ControlBox(box, origin=[-0.7, -2.0, -2.0], lengths=[8.0, 4.0, 4.0])
-#monitors['writerSubBox'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-#                                      io_params=io_subBox, subset=subBox, 
-#                                      xmfalways=True)
-
-# ========= Setup for all declared operators/monitors =========
-time_setup = MPI.Wtime()
-for ope in op.values():
-    ope.setup()
-for ope in distr.values():
-    ope.setup()
-for monit in monitors.values():
-    monit.discretize()
-for monit in monitors.values():
-    monit.setup()
-
-print '[', main_rank, '] total time for setup:', MPI.Wtime() - time_setup
-
-# ========= Fields initialization =========
-# - initialize velo + vort on topostr
-# - penalize vorticity
-# - redistribute topostr --> topofft
-
-time_init = MPI.Wtime()
-ind = sphere.discretize(topofft)
-def initFields():
-    velo.initialize(topo=topo_with_ghosts)
-    vorti.initialize(topo=topo_with_ghosts)
-    pressure.initialize(topo=topo_with_ghosts)
-    op['penalVort'].apply(simu)
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-
-initFields()
-print '[', main_rank, '] total time for init :', MPI.Wtime() - time_init
-
-fullseq = []
-
-def run(sequence):
-    op['vort_absorption'].apply(simu)
-    op['poisson'].apply(simu)               # Poisson + correction
-    monitors['forcesMom'].apply(simu)       # Forces Heloise
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    op['penalVort'].apply(simu)             # Vorticity penalization
-#    distr['str2fft'].apply(simu)
-#    distr['str2fft'].wait()
-#    op['poisson'].apply(simu)
-#    distr['fft2str'].apply(simu)
-#    distr['fft2str'].wait()
-    op['stretching'].apply(simu)            # Stretching
-    op['rhsPrsPoisson'].apply(simu)         # RHS computation in Pressure Poisson eq
-#    monitors['forcesNoca'].apply(simu)      # Forces Noca
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-    distr['str2prs'].apply(simu)
-    distr['str2prs'].wait()
-    op['diffusion'].apply(simu)             # Diffusion
-    op['poissonPressure'].apply(simu)       # Pressure Poisson
-    distr['fft2advec'].apply(simu)
-    distr['fft2advec'].wait()
-    op['advection'].apply(simu)             # Advection (scales)
-    distr['advec2fft'].apply(simu)
-    distr['advec2fft'].wait()
-    monitors['writerSliceXY'].apply(simu)
-#    monitors['writerSliceXZ'].apply(simu)
-    monitors['writerSlicePrs'].apply(simu)
-#    monitors['writerSubBox'].apply(simu)
-    monitors['energy'].apply(simu)          # Energy/enstrophy
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    op['dtAdapt'].apply(simu)               # Update timestep
-    op['dtAdapt'].wait()
-
-# ==== Serialize some data of the problem to a "restart" file ====
-# def dump(filename):
-#      """
-#      Serialize some data of the problem to file
-#      (only data required for a proper restart, namely fields in self.input
-#      and simulation).
-#      @param filename : prefix for output file. Real name = filename_rk_N,
-#      N being current process number. If None use default value from problem
-#      parameters (self.filename)
-#      """
-#      if filename is not None:
-#          filename = filename
-#         filedump = filename + '_rk_' + str(main_rank)
-#     db = parmesPickle(filedump, mode='store')
-#     db.dump(simu, 'simulation')
-#     velo.dump(filename, mode='append')
-#     vorti.dump(filename, mode='append')
-
-# ## ====== Load some data of the problem from a "restart" file ======
-# def restart(filename):
-#     """
-#     Load serialized data to restart from a previous state.
-#     self.input variables and simulation are loaded.
-#     @param  filename : prefix for downloaded file.
-#     Real name = filename_rk_N, N being current process number.
-#     If None use default value from problem
-#     parameters (self.filename)
-#     """
-#     if filename is not None:
-#         filename = filename
-#         filedump = filename + '_rk_' + str(main_rank)
-#     db = parmesPickle(filedump, mode='load')
-#     simu = db.load('simulation')[0]
-#     simu.start = simu.time - simu.time_step
-#     ite = simu.current_iteration
-#     simu.initialize()
-#     simu.current_iteration = ite
-#     print 'simu', simu
-#     print ("load ...", filename)
-#     velo.load(filename)
-#     vorti.load(filename)
-#     return simu
-
-seq = fullseq
-
-simu.initialize()
-#doDump = False
-#doRestart = False
-#dumpFreq = 5000
-#io_default={"filename":'restart'}
-#dump_filename = io.Writer(params=io_default).filename
-#===== Restart (if needed) =====
-# if doRestart:
-#     simu = restart(dump_filename)
-#     # Set up for monitors and redistribute
-#     for ope in distr.values():
-#         ope.setUp()
-#     for monit in monitors.values():
-#         monit.setUp()
-
-# ======= Time loop =======
-time_run = MPI.Wtime()
-while not simu.isOver:
-    if topofft.rank == 0:
-        simu.printState()
-    run(seq)
-    simu.advance()
-# #     testdump = simu.current_iteration % dumpFreq is 0
-# #     if doDump and testdump:
-# #         dump(dump_filename)
-print '[', main_rank, '] total time for run :', MPI.Wtime() - time_run
-
-# ======= Finalize =======
-fftw2py.clean_fftw_solver(box.dimension)
-for ope in distr.values():
-    ope.finalize()
-for monit in monitors.values():
-    monit.finalize()
diff --git a/trashed_examples/LevelSet2D/levelSet2D.cl b/trashed_examples/LevelSet2D/levelSet2D.cl
deleted file mode 100644
index 9d41f627f668b2385a2acb4f1ffa429721c97570..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet2D/levelSet2D.cl
+++ /dev/null
@@ -1,39 +0,0 @@
-__kernel void initScalar(__global float* scalar,
-			 float4 minPos,
-			 float4 size,
-			 float t)
-{
-  uint gidX = get_global_id(0);
-  uint gidY = get_global_id(1);
-  uint i;
-  float2 pos, center=(float2)(0.5, 0.75);
-  for(i=gidX; i<NB_X; i+=WI_NB)
-    {
-      pos = minPos.xy + (float2)(i*size.x, gidY*size.y);
-      scalar[i+gidY*NB_X] = ((distance(pos, center)<0.15) ? 1.0 : 0.0);
-    }
-}
-
-#define PI (float) acos(-1.0)
-__kernel void initVelocity(__global float* veloX,__global float* veloY,
-			       float4 minPos,
-			       float4 size,
-			       float t)
-{
-  uint gidX = get_global_id(0);
-  uint gidY = get_global_id(1);
-  uint i;
-  float pix, piy;
-  /** Time dependant field */
-  float time_term = cos(t*PI/3.0);
-  /** Constant field
-      float time_term = cos(t*PI/3.0); */
-
-  piy = (minPos.y + gidY*size.y) * PI;
-  for(i=gidX; i<V_NB_X; i+=V_WI_NB)
-    {
-      pix = (minPos.x + i*size.x) * PI;
-      veloX[i+gidY*(V_NB_X)] = -sin(pix) * sin(pix) * sin(piy * 2) * time_term;
-      veloY[i*V_NB_Y+gidY] = sin(piy) *sin(piy) * sin(pix * 2) * time_term;
-    }
-}
diff --git a/trashed_examples/LevelSet2D/levelSet2D.py b/trashed_examples/LevelSet2D/levelSet2D.py
deleted file mode 100644
index 12ca3ecdbf06ed472a7f22b61f28d72732b3536f..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet2D/levelSet2D.py
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/usr/bin/env python
-import hysop
-import hysop.gpu
-hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5
-from hysop.mpi.main_var import MPI
-from hysop.tools.parameters import Discretization, IOParams
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, ExtraArgs
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.remeshing import L2_1, L4_2, L2_1, L4_4, L8_4, L6_4, Linear
-from hysop.operator.advection import Advection
-from hysop.operator.analytic import Analytic
-from hysop.operator.custom import CustomMonitor
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.problem.problem import Problem
-sin, cos, pi, sqrt = np.sin, np.cos, np.pi, np.sqrt
-
-
-def vitesse(res, x, y, t=0.):
-    res[0][...] = -np.sin(x * np.pi) ** 2 * np.sin(y * np.pi * 2) * \
-        np.cos(t * np.pi / 3.)
-    res[1][...] = np.sin(y * np.pi) ** 2 * np.sin(x * np.pi * 2) * \
-        np.cos(t * np.pi / 3.)
-    return res
-
-
-def scalaire(res, x, y, t=0.):
-    rr = np.sqrt((x - 0.5) ** 2 + (y - 0.75) ** 2)
-    res[0][...] = 0.
-    res[0][rr < 0.15] = 1.
-    return res
-
-
-def volume(_simu, var):
-    return np.sum(var[0].data[0] > 0.5) * \
-        np.prod(var[0].topology.mesh.space_step)
-
-
-# Parameters
-dim = 2
-boxLength = [1., 1.]
-boxMin = [0., 0.]
-nbElem_v = [1025] * dim
-nbElem_s = [1025] * dim
-dv = Discretization(nbElem_v)
-ds = Discretization(nbElem_s)
-
-time_step = 0.35 / (2. * np.pi)
-finalTime = 12. - time_step
-outputModulo = 20
-simu = hysop.Simulation(start=0.0, end=finalTime,
-                        time_step=time_step, max_iter=100000)
-
-# Domain
-box = hysop.Box(length=boxLength, origin=boxMin)
-
-# Fields
-scal = hysop.Field(domain=box, name='Scalar')  # formula=scalaire
-velo = hysop.Field(domain=box, name='Velocity',
-                   is_vector=True)  # formula=scalaire
-# Operators
-advec_method = {TimeIntegrator: RK,
-                Interpolation: Linear,
-                Remesh: L2_1,
-                Support: 'gpu_2k',
-                Splitting: 'o2',
-                MultiScale: Linear}
-advec_method[ExtraArgs] = {'user_src': ['./levelSet2D.cl']}
-# advec_method = {Scales: 'p_64', MultiScale: 'L4_4'}
-
-advec = Advection(velo,
-                  discretization=dv,
-                  variables={scal: ds},
-                  method=advec_method)
-advec.discretize()
-
-# Get topologies from operator
-topo_v = advec.advec_dir[0].discreteFields[velo].topology
-topo_s = advec.advec_dir[0].discreteFields[scal].topology
-
-velocity = Analytic(variables={velo: topo_v},
-                    method={Support: 'gpu'})
-
-volume_m = CustomMonitor(function=volume, res_shape=1,
-                         variables={scal: topo_s},
-                         io_params=IOParams(filename="volume.dat"))
-p = HDF_Writer(variables={scal: topo_s},
-               io_params=IOParams(frequency=outputModulo,
-                                  filename="levelset",
-                                  fileformat=HDF5))
-# ToHost frequency is set to 1 since the volume computation
-# is performed at each iteration.
-toHost = DataTransfer(source=topo_s, target=p,
-                      variables={scal: topo_s},
-                      freq=1,
-                      run_till=[p, volume_m])
-
-pb = Problem([velocity, advec, toHost, p, volume_m],
-             simu, dumpFreq=-1)
-
-pb.setup()
-scal.initialize(topo=topo_s)
-velo.initialize(topo=topo_v)
-
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-volume_m.apply(simu)
-p.apply(simu)
-
-ctime = MPI.Wtime()
-pb.solve()
-print MPI.Wtime() - ctime
-
-simu.finalize()
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-p.apply(simu)
-volume_m.apply(simu)
-
-pb.finalize()
-
-print pb.profiler
diff --git a/trashed_examples/LevelSet3D/levelSet3D.cl b/trashed_examples/LevelSet3D/levelSet3D.cl
deleted file mode 100644
index 2b2960be5f7936bf1cc6db9e4084601974d531b3..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet3D/levelSet3D.cl
+++ /dev/null
@@ -1,47 +0,0 @@
-__kernel void initScalar(__global float* scalar,
-			 float4 minPos,
-			 float4 size,
-			 float t)
-{
-  uint gidX = get_global_id(0);
-  uint gidY = get_global_id(1);
-  uint gidZ = get_global_id(2);
-  uint i;
-  float4 pos, center=(float4)(0.35, 0.35, 0.35, 0.0);
-  for(i=gidX; i<NB_X; i+=WI_NB)
-    {
-      pos = (float4)(i*size.x + minPos.x, gidY*size.y + minPos.y, gidZ*size.z + minPos.z, 0.0);
-      scalar[i+gidY*NB_X+gidZ*NB_X*NB_Y] = ((distance(pos, center)<0.15) ? 1.0 : 0.0);
-    }
-}
-
-#define PI (float) acos(-1.0)
-__kernel void initVelocity(__global float* veloX,
-			   __global float* veloY,
-			   __global float* veloZ,
-			   float4 minPos,
-			   float4 size,
-			   float t)
-{
-  uint gidX = get_global_id(0);
-  uint gidY = get_global_id(1);
-  uint gidZ = get_global_id(2);
-  uint i;
-  float pix,piy,piz;
-  /** Time dependant field */
-  float time_term = cos(t*PI/3.0);
-  /** Constant field
-      float time_term = cos(t*PI/3.0); */
-  piy = (minPos.y + gidY*size.y) * PI;
-  piz = (minPos.z + gidZ*size.z) * PI;
-
-  for(i=gidX; i<V_NB_X; i+=V_WI_NB)
-    {
-      pix = (minPos.x + i*size.x) * PI;
-
-      veloX[i + gidY*V_NB_X + gidZ*V_NB_X*V_NB_Y] = 2.0 * sin(pix)*sin(pix)*sin(2.0*piy)*sin(2.0*piz)*time_term;
-      veloY[gidY + i*V_NB_Y + gidZ*V_NB_Y*V_NB_X] = -sin(2.0*pix)*sin(piy)*sin(piy)*sin(2.0*piz)*time_term;
-      veloZ[gidZ + i*V_NB_Z + gidY*V_NB_Z*V_NB_X] = -sin(2.0*pix)*sin(2.0*piy)*sin(piz)*sin(piz)*time_term;
-    }
-}
-
diff --git a/trashed_examples/LevelSet3D/levelSet3D.py b/trashed_examples/LevelSet3D/levelSet3D.py
deleted file mode 100644
index ef4b982ed43346ca34f9816921c242ebeb1750cb..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet3D/levelSet3D.py
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/usr/bin/env python
-import hysop
-import hysop.gpu
-hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5
-from hysop.mpi.main_var import MPI, main_size, main_rank, main_comm
-from hysop.tools.parameters import Discretization, IOParams
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, ExtraArgs
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.remeshing import L6_4 as remesh_formula
-from hysop.numerics.remeshing import Linear
-from hysop.operator.advection import Advection
-from hysop.operator.analytic import Analytic
-from hysop.operators import Custom
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.problem.problem import Problem
-from hysop.problem.simulation import Simulation
-sin, cos, pi, sqrt = np.sin, np.cos, np.pi, np.sqrt
-
-
-def scalaire(res, x, y, z, t=0.):
-    r = sqrt((x - 0.35) ** 2 + (y - 0.35) ** 2 + (z - 0.35) ** 2)
-    res[0][...] = 0.
-    res[0][r < 0.15] = 1.
-    return res
-
-
-def vitesse(res, x, y, z, t=0.):
-    res[0][...] = 2. * sin(pi*x)**2*sin(2.*pi*y)*sin(2.*pi*z)*cos(t*pi/3.)
-    res[1][...] = -sin(2.*pi*x)*sin(pi*y)**2*sin(2.*pi*z)*cos(t*pi/3.)
-    res[2][...] = -sin(2.*pi*x)*sin(2.*pi*y)*sin(pi*z)**2*cos(t*pi/3.)
-    return res
-
-
-def volume(_simu, var, **kwds):
-    v_loc = np.sum(var[0].data[0] > 0.5) * np.prod(
-        var[0].topology.mesh.space_step)
-    return main_comm.allreduce(sendobj=v_loc, op=MPI.SUM)
-
-
-# Parameters
-dim = 3
-boxLength = [1., ] * dim
-boxMin = [0., ] * dim
-nbElem_v = [129] * dim
-nbElem_s = [129] * dim
-if nbElem_v[0] == nbElem_s[0]:
-    dv = Discretization(nbElem_v)
-else:
-    # In multi-scale, we need 1 ghost point for velocity
-    dv = Discretization(nbElem_v, ghosts=[1, ] * dim)
-ds = Discretization(nbElem_s)
-
-time_step = 0.35 / (4. * np.pi)
-finalTime = 3.
-outputModulo = 10
-simu = Simulation(start=0.0, end=finalTime,
-                  time_step=time_step, max_iter=120)
-
-# Domain
-box = hysop.Box(length=boxLength, origin=boxMin)
-
-# Fields
-scal = hysop.Field(domain=box, name='Scalar') # , formula=scalaire)
-velo = hysop.Field(domain=box, name='Velocity',
-                   is_vector=True) #, formula=vitesse)
-# Operators
-advec_method = {TimeIntegrator: RK,
-                Interpolation: Linear,
-                Remesh: remesh_formula,
-                Support: 'gpu_1k',
-                Splitting: 'o2',
-                MultiScale: Linear}
-if main_size == 1:
-    advec_method[ExtraArgs] = {'user_src': ['./levelSet3D.cl']}
-else:
-    # Multi-GPU advection must know a max dt and velocity to
-    # compute communication buffers
-    advec_method[ExtraArgs] = {'user_src': ['./levelSet3D.cl'],
-                               'device_id': main_rank % 2,
-                               'max_dt': simu.time_step,
-                               'max_velocity': [2, 1, 1]}
-
-advec = Advection(velo,
-                  discretization=dv,
-                  variables={scal: ds},
-                  method=advec_method)
-advec.discretize()
-
-# Get topologies from operator
-topo_v = advec.advec_dir[0].discreteFields[velo].topology
-topo_s = advec.advec_dir[0].discreteFields[scal].topology
-
-velocity = Analytic(variables={velo: topo_v},
-                    method={Support: 'gpu'})
-
-volume_m = Custom(function=volume, diagnostics_shape=(1, 2),
-                  in_fields=[scal], variables={scal: topo_s},
-                  io_params=IOParams(filename="volume.dat"))
-p = HDF_Writer(variables={scal: topo_s},
-               io_params=IOParams(frequency=outputModulo,
-                                  filename="levelset",
-                                  fileformat=HDF5))
-# ToHost frequency is set to 1 since the volume computation
-# is performed at each iteration.
-toHost = DataTransfer(source=topo_s, target=p,
-                      variables={scal: topo_s},
-                      freq=1,
-                      run_till=[p, volume_m])
-
-pb = Problem([velocity, advec, toHost, p, volume_m],
-             simu, dumpFreq=-1)
-
-pb.setup()
-scal.initialize(topo=topo_s)
-velo.initialize(topo=topo_v)
-
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-volume_m.apply(simu)
-p.apply(simu)
-
-ctime = MPI.Wtime()
-pb.solve()
-print MPI.Wtime() - ctime
-
-simu.finalize()
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-p.apply(simu)
-volume_m.apply(simu)
-
-pb.finalize()
-print pb.profiler
diff --git a/trashed_examples/LevelSet3D/levelSet3D_Scales.py b/trashed_examples/LevelSet3D/levelSet3D_Scales.py
deleted file mode 100644
index 4572efca65c05ea6a821d5ec7c7fc02ed89eef70..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet3D/levelSet3D_Scales.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-import hysop
-from hysop.constants import np, HDF5
-from hysop.mpi.main_var import MPI, main_comm
-from hysop.tools.parameters import Discretization, IOParams
-from hysop.methods_keys import Scales, MultiScale
-from hysop.operator.advection import Advection
-from hysop.operator.analytic import Analytic
-from hysop.operators import Custom
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.problem.problem import Problem
-from hysop.problem.simulation import Simulation
-sin, cos, pi, sqrt = np.sin, np.cos, np.pi, np.sqrt
-
-
-def scalaire(res, x, y, z, t=0.):
-    r = sqrt((x - 0.35) ** 2 + (y - 0.35) ** 2 + (z - 0.35) ** 2)
-    res[0][...] = 0.
-    res[0][r < 0.15] = 1.
-    return res
-
-
-def vitesse(res, x, y, z, t=0.):
-    res[0][...] = 2. * sin(pi*x)**2*sin(2.*pi*y)*sin(2.*pi*z)*cos(t*pi/3.)
-    res[1][...] = -sin(2.*pi*x)*sin(pi*y)**2*sin(2.*pi*z)*cos(t*pi/3.)
-    res[2][...] = -sin(2.*pi*x)*sin(2.*pi*y)*sin(pi*z)**2*cos(t*pi/3.)
-    return res
-
-
-def volume(_simu, var, **kwds):
-    v_loc = np.sum(var[0].data[0] > 0.5) * np.prod(
-        var[0].topology.mesh.space_step)
-    return main_comm.allreduce(sendobj=v_loc, op=MPI.SUM)
-
-
-# Parameters
-dim = 3
-boxLength = [1., ] * dim
-boxMin = [0., ] * dim
-nbElem_v = [65] * dim
-nbElem_s = [65] * dim
-dv = Discretization(nbElem_v)
-ds = Discretization(nbElem_s)
-
-time_step = 0.35 / (4. * np.pi)
-finalTime = 3.
-outputModulo = 10
-simu = Simulation(start=0.0, end=finalTime,
-                  time_step=time_step, max_iter=120)
-
-# Domain
-box = hysop.Box(length=boxLength, origin=boxMin)
-
-# Fields
-scal = hysop.Field(domain=box, name='Scalar', formula=scalaire)
-velo = hysop.Field(domain=box, name='Velocity',
-                   is_vector=True, formula=vitesse)
-# Operators
-advec = Advection(velo,
-                  discretization=dv,
-                  variables={scal: ds},
-                  method={Scales: 'p_64', MultiScale: 'L4_4'})
-advec.discretize()
-
-# Get topologies from operator
-topo_v = advec.discreteFields[velo].topology
-topo_s = advec.discreteFields[scal].topology
-
-velocity = Analytic(variables={velo: topo_v})
-
-volume_m = Custom(function=volume, res_shape=1,
-                  in_fields=[scal], discretization=topo_s,
-                  io_params=IOParams(filename="volume.dat"))
-p = HDF_Writer(variables={scal: topo_s},
-               io_params=IOParams(frequency=outputModulo,
-                                  filename="levelset",
-                                  fileformat=HDF5))
-pb = Problem([velocity, advec, p, volume_m],
-             simu, dumpFreq=-1)
-
-pb.setup()
-scal.initialize(topo=topo_s)
-velo.initialize(topo=topo_v)
-
-volume_m.apply(simu)
-p.apply(simu)
-
-ctime = MPI.Wtime()
-pb.solve()
-print MPI.Wtime() - ctime
-
-simu.finalize()
-p.apply(simu)
-volume_m.apply(simu)
-
-pb.finalize()
-print pb.profiler
diff --git a/trashed_examples/LevelSet3D/levelSet3D_Scales_MultiScale.py b/trashed_examples/LevelSet3D/levelSet3D_Scales_MultiScale.py
deleted file mode 100644
index 8b970f960006b1dade145fbb5d8d4bb85a06f456..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet3D/levelSet3D_Scales_MultiScale.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env python
-import hysop
-from hysop.constants import np, HDF5
-from hysop.mpi.main_var import MPI, main_comm
-from hysop.tools.parameters import Discretization, IOParams
-from hysop.methods_keys import Scales, MultiScale
-from hysop.operator.advection import Advection
-from hysop.operator.analytic import Analytic
-from hysop.operators import Custom
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.problem.problem import Problem
-from hysop.problem.simulation import Simulation
-sin, cos, pi, sqrt = np.sin, np.cos, np.pi, np.sqrt
-
-
-def scalaire(res, x, y, z, t=0.):
-    r = sqrt((x - 0.35) ** 2 + (y - 0.35) ** 2 + (z - 0.35) ** 2)
-    res[0][...] = 0.
-    res[0][r < 0.15] = 1.
-    return res
-
-
-def vitesse(res, x, y, z, t=0.):
-    res[0][...] = 2. * sin(pi*x)**2*sin(2.*pi*y)*sin(2.*pi*z)*cos(t*pi/3.)
-    res[1][...] = -sin(2.*pi*x)*sin(pi*y)**2*sin(2.*pi*z)*cos(t*pi/3.)
-    res[2][...] = -sin(2.*pi*x)*sin(2.*pi*y)*sin(pi*z)**2*cos(t*pi/3.)
-    return res
-
-
-def volume(_simu, var, **kwds):
-    v_loc = np.sum(var[0].data[0] > 0.5) * np.prod(
-        var[0].topology.mesh.space_step)
-    return main_comm.allreduce(sendobj=v_loc, op=MPI.SUM)
-
-
-# Parameters
-dim = 3
-boxLength = [1., ] * dim
-boxMin = [0., ] * dim
-nbElem_v = [65] * dim
-nbElem_s = [129] * dim
-dv = Discretization(nbElem_v)
-ds = Discretization(nbElem_s)
-
-time_step = 0.35 / (4. * np.pi)
-finalTime = 3.
-outputModulo = 10
-simu = Simulation(start=0.0, end=finalTime,
-                  time_step=time_step, max_iter=120)
-
-# Domain
-box = hysop.Box(length=boxLength, origin=boxMin)
-
-# Get topologies from operator
-topo_v = box.create_topology(dv, dim=2)
-topo_s = box.create_topology(ds, dim=2)
-
-
-# Fields
-scal = hysop.Field(domain=box, name='Scalar', formula=scalaire)
-velo = hysop.Field(domain=box, name='Velocity',
-                   is_vector=True, formula=vitesse)
-# Operators
-advec = Advection(velo,
-                  discretization=topo_v,
-                  variables={scal: topo_s},
-                  method={Scales: 'p_64', MultiScale: 'L4_4'})
-advec.discretize()
-
-velocity = Analytic(variables={velo: topo_v})
-
-volume_m = Custom(function=volume, diagnostics_shape=(1, 2),
-                  in_fields=[scal], variables={scal: topo_s},
-                  io_params=IOParams(filename="volume.dat"))
-p = HDF_Writer(variables={scal: topo_s},
-               io_params=IOParams(frequency=outputModulo,
-                                  filename="levelset",
-                                  fileformat=HDF5))
-pb = Problem([velocity, advec, p, volume_m],
-             simu, dumpFreq=-1)
-
-pb.setup()
-scal.initialize(topo=topo_s)
-velo.initialize(topo=topo_v)
-
-volume_m.apply(simu)
-p.apply(simu)
-
-ctime = MPI.Wtime()
-pb.solve()
-print MPI.Wtime() - ctime
-
-simu.finalize()
-p.apply(simu)
-volume_m.apply(simu)
-
-pb.finalize()
-print pb.profiler
diff --git a/trashed_examples/LevelSet3D/levelSet3D_gpu.py b/trashed_examples/LevelSet3D/levelSet3D_gpu.py
deleted file mode 100755
index e51895da6366910a6085b1fd090d6a696582473c..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet3D/levelSet3D_gpu.py
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/usr/bin/env python
-import hysop
-# import hysop.gpu
-# hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5
-from hysop.mpi.main_var import MPI, main_size, main_rank, main_comm
-from hysop.tools.parameters import Discretization, IOParams
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, ExtraArgs
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.remeshing import L6_4 as remesh_formula
-from hysop.numerics.remeshing import Linear
-from hysop.operator.advection import Advection
-from hysop.operator.analytic import Analytic
-from hysop.operators import Custom
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.problem.problem import Problem
-from hysop.problem.simulation import Simulation
-sin, cos, pi, sqrt = np.sin, np.cos, np.pi, np.sqrt
-
-
-def scalaire(res, x, y, z, t=0.):
-    r = sqrt((x - 0.35) ** 2 + (y - 0.35) ** 2 + (z - 0.35) ** 2)
-    res[0][...] = 0.
-    res[0][r < 0.15] = 1.
-    return res
-
-
-def vitesse(res, x, y, z, t=0.):
-    res[0][...] = 2. * sin(pi*x)**2*sin(2.*pi*y)*sin(2.*pi*z)*cos(t*pi/3.)
-    res[1][...] = -sin(2.*pi*x)*sin(pi*y)**2*sin(2.*pi*z)*cos(t*pi/3.)
-    res[2][...] = -sin(2.*pi*x)*sin(2.*pi*y)*sin(pi*z)**2*cos(t*pi/3.)
-    return res
-
-
-def volume(_simu, var, **kwds):
-    v_loc = np.sum(var[0].data[0] > 0.5) * np.prod(
-        var[0].topology.mesh.space_step)
-    return main_comm.allreduce(sendobj=v_loc, op=MPI.SUM)
-
-
-# Parameters
-dim = 3
-boxLength = [1., ] * dim
-boxMin = [0., ] * dim
-nbElem_v = [129] * dim
-nbElem_s = [129] * dim
-dv = Discretization(nbElem_v)
-ds = Discretization(nbElem_s)
-
-time_step = 0.35 / (4. * np.pi)
-finalTime = 3.
-outputModulo = 10
-simu = Simulation(start=0.0, end=finalTime,
-                  time_step=time_step, max_iter=120)
-
-# Domain
-box = hysop.Box(length=boxLength, origin=boxMin)
-
-# Fields
-scal = hysop.Field(domain=box, name='Scalar', formula=scalaire)
-velo = hysop.Field(domain=box, name='Velocity',
-                   is_vector=True, formula=vitesse)
-# Operators
-advec_method = {TimeIntegrator: RK,
-                Interpolation: Linear,
-                Remesh: remesh_formula,
-                Support: 'gpu_1k',
-                Splitting: 'o2',
-                MultiScale: Linear}
-if main_size > 1:
-    # Multi-GPU advection must know a max dt and velocity to
-    # compute communication buffers
-    advec_method[ExtraArgs] = {'device_id': main_rank % 2,
-                               'max_dt': simu.time_step,
-                               'max_velocity': [2, 1, 1]}
-
-advec = Advection(velo,
-                  discretization=dv,
-                  variables={scal: ds},
-                  method=advec_method)
-advec.discretize()
-
-# Get topologies from operator
-topo_v = advec.advec_dir[0].discreteFields[velo].topology
-topo_s = advec.advec_dir[0].discreteFields[scal].topology
-
-velocity = Analytic(variables={velo: topo_v})
-
-volume_m = Custom(function=volume, diagnostics_shape=1,
-                  in_fields=[scal], variables={scal: topo_s},
-                  io_params=IOParams(filename="volume.dat"))
-p = HDF_Writer(variables={scal: topo_s},
-               io_params=IOParams(frequency=outputModulo,
-                                  filename="levelset",
-                                  fileformat=HDF5))
-# ToHost frequency is set to 1 since the volume computation
-# is performed at each iteration.
-toHost = DataTransfer(source=topo_s, target=p,
-                      variables={scal: topo_s},
-                      freq=1,
-                      run_till=[p, volume_m])
-
-pb = Problem([velocity, advec, toHost, p, volume_m],
-             simu, dumpFreq=-1)
-
-pb.setup()
-scal.initialize(topo=topo_s)
-velo.initialize(topo=topo_v)
-
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-volume_m.apply(simu)
-p.apply(simu)
-
-ctime = MPI.Wtime()
-pb.solve()
-print MPI.Wtime() - ctime
-
-simu.finalize()
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-p.apply(simu)
-volume_m.apply(simu)
-
-pb.finalize()
-print pb.profiler
diff --git a/trashed_examples/LevelSet3D/levelSet3D_gpu_MultiScale.py b/trashed_examples/LevelSet3D/levelSet3D_gpu_MultiScale.py
deleted file mode 100644
index 3d05e5019213b42d16b5135878ac931f47d1840d..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet3D/levelSet3D_gpu_MultiScale.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-import hysop
-# import hysop.gpu
-# hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5
-from hysop.mpi.main_var import MPI, main_size, main_rank, main_comm
-from hysop.tools.parameters import Discretization, IOParams
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, ExtraArgs
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.remeshing import L6_4 as remesh_formula
-from hysop.numerics.remeshing import Linear
-from hysop.operator.advection import Advection
-from hysop.operator.analytic import Analytic
-from hysop.operators import Custom
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.problem.problem import Problem
-from hysop.problem.simulation import Simulation
-sin, cos, pi, sqrt = np.sin, np.cos, np.pi, np.sqrt
-
-
-def scalaire(res, x, y, z, t=0.):
-    r = sqrt((x - 0.35) ** 2 + (y - 0.35) ** 2 + (z - 0.35) ** 2)
-    res[0][...] = 0.
-    res[0][r < 0.15] = 1.
-    return res
-
-
-def vitesse(res, x, y, z, t=0.):
-    res[0][...] = 2. * sin(pi*x)**2*sin(2.*pi*y)*sin(2.*pi*z)*cos(t*pi/3.)
-    res[1][...] = -sin(2.*pi*x)*sin(pi*y)**2*sin(2.*pi*z)*cos(t*pi/3.)
-    res[2][...] = -sin(2.*pi*x)*sin(2.*pi*y)*sin(pi*z)**2*cos(t*pi/3.)
-    return res
-
-
-def volume(_simu, var, **kwds):
-    v_loc = np.sum(var[0].data[0] > 0.5) * np.prod(
-        var[0].topology.mesh.space_step)
-    return main_comm.allreduce(sendobj=v_loc, op=MPI.SUM)
-
-
-# Parameters
-dim = 3
-boxLength = [1., ] * dim
-boxMin = [0., ] * dim
-nbElem_v = [65] * dim
-nbElem_s = [129] * dim
-# In multi-scale, we need 1 ghost point for velocity
-dv = Discretization(nbElem_v, ghosts=[1, ] * dim)
-ds = Discretization(nbElem_s)
-
-time_step = 0.35 / (4. * np.pi)
-finalTime = 3.
-outputModulo = 10
-simu = Simulation(start=0.0, end=finalTime,
-                  time_step=time_step, max_iter=120)
-
-# Domain
-box = hysop.Box(length=boxLength, origin=boxMin)
-
-# Fields
-scal = hysop.Field(domain=box, name='Scalar', formula=scalaire)
-velo = hysop.Field(domain=box, name='Velocity',
-                   is_vector=True, formula=vitesse)
-# Operators
-advec_method = {TimeIntegrator: RK,
-                Interpolation: Linear,
-                Remesh: remesh_formula,
-                Support: 'gpu_1k',
-                Splitting: 'o2',
-                MultiScale: Linear}
-if main_size > 1:
-    # Multi-GPU advection must know a max dt and velocity to
-    # compute communication buffers
-    advec_method[ExtraArgs] = {'device_id': main_rank % 2,
-                               'max_dt': simu.time_step,
-                               'max_velocity': [2, 1, 1]}
-
-advec = Advection(velo,
-                  discretization=dv,
-                  variables={scal: ds},
-                  method=advec_method)
-advec.discretize()
-
-# Get topologies from operator
-topo_v = advec.advec_dir[0].discreteFields[velo].topology
-topo_s = advec.advec_dir[0].discreteFields[scal].topology
-
-velocity = Analytic(variables={velo: topo_v})
-
-volume_m = Custom(function=volume, diagnostics_shape=(1, 2),
-                  in_fields=[scal], variables={scal: topo_s},
-                  io_params=IOParams(filename="volume.dat"))
-p = HDF_Writer(variables={scal: topo_s},
-               io_params=IOParams(frequency=outputModulo,
-                                  filename="levelset",
-                                  fileformat=HDF5))
-# ToHost frequency is set to 1 since the volume computation
-# is performed at each iteration.
-toHost = DataTransfer(source=topo_s, target=p,
-                      variables={scal: topo_s},
-                      freq=1,
-                      run_till=[p, volume_m])
-
-pb = Problem([velocity, advec, toHost, p, volume_m],
-             simu, dumpFreq=-1)
-
-pb.setup()
-scal.initialize(topo=topo_s)
-velo.initialize(topo=topo_v)
-
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-volume_m.apply(simu)
-p.apply(simu)
-
-ctime = MPI.Wtime()
-pb.solve()
-print MPI.Wtime() - ctime
-
-simu.finalize()
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-p.apply(simu)
-volume_m.apply(simu)
-
-pb.finalize()
-print pb.profiler
diff --git a/trashed_examples/LevelSet3D/levelSet3D_only_gpu.py b/trashed_examples/LevelSet3D/levelSet3D_only_gpu.py
deleted file mode 100644
index a163a5fd25e6216c551015af7378c953c4672cce..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet3D/levelSet3D_only_gpu.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/env python
-import hysop
-# import hysop.gpu
-# hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5
-from hysop.mpi.main_var import MPI, main_size, main_rank, main_comm
-from hysop.tools.parameters import Discretization, IOParams
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, ExtraArgs
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.remeshing import L6_4 as remesh_formula
-from hysop.numerics.remeshing import Linear
-from hysop.operator.advection import Advection
-from hysop.operator.analytic import Analytic
-from hysop.operators import Custom
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.problem.problem import Problem
-from hysop.problem.simulation import Simulation
-sin, cos, pi, sqrt = np.sin, np.cos, np.pi, np.sqrt
-
-
-def volume(_simu, var, **kwds):
-    v_loc = np.sum(var[0].data[0] > 0.5) * np.prod(
-        var[0].topology.mesh.space_step)
-    return main_comm.allreduce(sendobj=v_loc, op=MPI.SUM)
-
-
-# Parameters
-dim = 3
-boxLength = [1., ] * dim
-boxMin = [0., ] * dim
-nbElem_v = [129] * dim
-nbElem_s = [129] * dim
-dv = Discretization(nbElem_v)
-ds = Discretization(nbElem_s)
-
-time_step = 0.35 / (4. * np.pi)
-finalTime = 3.
-outputModulo = 10
-simu = Simulation(start=0.0, end=finalTime,
-                  time_step=time_step, max_iter=120)
-
-# Domain
-box = hysop.Box(length=boxLength, origin=boxMin)
-
-# Fields
-scal = hysop.Field(domain=box, name='Scalar')
-velo = hysop.Field(domain=box, name='Velocity', is_vector=True)
-# Operators
-advec_method = {TimeIntegrator: RK,
-                Interpolation: Linear,
-                Remesh: remesh_formula,
-                Support: 'gpu_1k',
-                Splitting: 'o2',
-                MultiScale: Linear}
-if main_size == 1:
-    advec_method[ExtraArgs] = {'user_src': ['./levelSet3D.cl']}
-else:
-    # Multi-GPU advection must know a max dt and velocity to
-    # compute communication buffers
-    advec_method[ExtraArgs] = {'user_src': ['./levelSet3D.cl'],
-                               'device_id': main_rank % 2,
-                               'max_dt': simu.time_step,
-                               'max_velocity': [2, 1, 1],
-                               'velocity_only_on_device': True}
-
-advec = Advection(velo,
-                  discretization=dv,
-                  variables={scal: ds},
-                  method=advec_method)
-advec.discretize()
-
-# Get topologies from operator
-topo_v = advec.advec_dir[0].discreteFields[velo].topology
-topo_s = advec.advec_dir[0].discreteFields[scal].topology
-
-velocity = Analytic(variables={velo: topo_v},
-                    method={Support: 'gpu'})
-
-volume_m = Custom(function=volume, diagnostics_shape=(1, 2),
-                  in_fields=[scal], variables={scal: topo_s},
-                  io_params=IOParams(filename="volume.dat"))
-p = HDF_Writer(variables={scal: topo_s},
-               io_params=IOParams(frequency=outputModulo,
-                                  filename="levelset",
-                                  fileformat=HDF5))
-# ToHost frequency is set to 1 since the volume computation
-# is performed at each iteration.
-toHost = DataTransfer(source=topo_s, target=p,
-                      variables={scal: topo_s},
-                      freq=1,
-                      run_till=[p, volume_m])
-
-pb = Problem([velocity, advec, toHost, p, volume_m],
-             simu, dumpFreq=-1)
-
-pb.setup()
-scal.initialize(topo=topo_s)
-velo.initialize(topo=topo_v)
-
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-volume_m.apply(simu)
-p.apply(simu)
-
-ctime = MPI.Wtime()
-pb.solve()
-print MPI.Wtime() - ctime
-
-simu.finalize()
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-p.apply(simu)
-volume_m.apply(simu)
-
-pb.finalize()
-print pb.profiler
diff --git a/trashed_examples/LevelSet3D/levelSet3D_only_gpu_MultiScale.py b/trashed_examples/LevelSet3D/levelSet3D_only_gpu_MultiScale.py
deleted file mode 100644
index 0726d2328d3ba3fdbb281acce52108d4e5c5b79d..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet3D/levelSet3D_only_gpu_MultiScale.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python
-import hysop
-# import hysop.gpu
-# hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5
-from hysop.mpi.main_var import MPI, main_size, main_rank, main_comm
-from hysop.tools.parameters import Discretization, IOParams
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, ExtraArgs
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.remeshing import L6_4 as remesh_formula
-from hysop.numerics.remeshing import Linear
-from hysop.operator.advection import Advection
-from hysop.operator.analytic import Analytic
-from hysop.operators import Custom
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.problem.problem import Problem
-from hysop.problem.simulation import Simulation
-sin, cos, pi, sqrt = np.sin, np.cos, np.pi, np.sqrt
-
-
-def volume(simu, var, **kwds):
-    v_loc = np.sum(var[0].data[0] > 0.5) * np.prod(
-        var[0].topology.mesh.space_step)
-    return main_comm.allreduce(sendobj=v_loc, op=MPI.SUM)
-
-
-# Parameters
-dim = 3
-boxLength = [1., ] * dim
-boxMin = [0., ] * dim
-nbElem_v = [65] * dim
-nbElem_s = [129] * dim
-# In multi-scale, we need 1 ghost point for velocity
-dv = Discretization(nbElem_v, ghosts=[1, ] * dim)
-ds = Discretization(nbElem_s)
-
-time_step = 0.35 / (4. * np.pi)
-finalTime = 3.
-outputModulo = 10
-simu = Simulation(start=0.0, end=finalTime,
-                  time_step=time_step, max_iter=120)
-
-# Domain
-box = hysop.Box(length=boxLength, origin=boxMin)
-
-# Fields
-scal = hysop.Field(domain=box, name='Scalar')
-velo = hysop.Field(domain=box, name='Velocity', is_vector=True)
-# Operators
-advec_method = {TimeIntegrator: RK,
-                Interpolation: Linear,
-                Remesh: remesh_formula,
-                Support: 'gpu_1k',
-                Splitting: 'o2',
-                MultiScale: Linear}
-
-if main_size == 1:
-    advec_method[ExtraArgs] = {'user_src': ['./levelSet3D.cl']}
-else:
-    # Multi-GPU advection must know a max dt and velocity to
-    # compute communication buffers
-    advec_method[ExtraArgs] = {'user_src': ['./levelSet3D.cl'],
-                               'device_id': main_rank % 2,
-                               'max_dt': simu.time_step,
-                               'max_velocity': [2, 1, 1],
-                               'velocity_only_on_device': True}
-
-advec = Advection(velo,
-                  discretization=dv,
-                  variables={scal: ds},
-                  method=advec_method)
-advec.discretize()
-
-# Get topologies from operator
-topo_v = advec.advec_dir[0].discreteFields[velo].topology
-topo_s = advec.advec_dir[0].discreteFields[scal].topology
-
-velocity = Analytic(variables={velo: topo_v},
-                    method={Support: 'gpu'})
-
-volume_m = Custom(function=volume, diagnostics_shape=(1, 2),
-                  in_fields=[scal], variables={scal: topo_s},
-                  io_params=IOParams(filename="volume.dat"))
-p = HDF_Writer(variables={scal: topo_s},
-               io_params=IOParams(frequency=outputModulo,
-                                  filename="levelset",
-                                  fileformat=HDF5))
-# ToHost frequency is set to 1 since the volume computation
-# is performed at each iteration.
-toHost = DataTransfer(source=topo_s, target=p,
-                      variables={scal: topo_s},
-                      freq=1,
-                      run_till=[p, volume_m])
-
-pb = Problem([velocity, advec, toHost, p, volume_m],
-             simu, dumpFreq=-1)
-
-pb.setup()
-scal.initialize(topo=topo_s)
-velo.initialize(topo=topo_v)
-
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-volume_m.apply(simu)
-p.apply(simu)
-
-ctime = MPI.Wtime()
-pb.solve()
-print MPI.Wtime() - ctime
-
-simu.finalize()
-scal.discreteFields[topo_s].toHost()
-scal.discreteFields[topo_s].wait()
-p.apply(simu)
-volume_m.apply(simu)
-
-pb.finalize()
-print pb.profiler
diff --git a/trashed_examples/LevelSet3D/levelSet3D_python.py b/trashed_examples/LevelSet3D/levelSet3D_python.py
deleted file mode 100644
index 3a3d071d9243ca0eb4ce620e73978329100a8b63..0000000000000000000000000000000000000000
--- a/trashed_examples/LevelSet3D/levelSet3D_python.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env python
-import hysop
-import hysop.gpu
-hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5
-from hysop.mpi.main_var import MPI, main_size, main_rank, main_comm
-from hysop.tools.parameters import Discretization, IOParams
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.remeshing import L2_1 as remesh_formula
-from hysop.numerics.interpolation import Linear
-from hysop.operator.advection import Advection
-from hysop.operator.analytic import Analytic
-from hysop.operators import Custom
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.problem.problem import Problem
-from hysop.problem.simulation import Simulation
-sin, cos, pi, sqrt = np.sin, np.cos, np.pi, np.sqrt
-
-
-def scalaire(res, x, y, z, t=0.):
-    r = sqrt((x - 0.35) ** 2 + (y - 0.35) ** 2 + (z - 0.35) ** 2)
-    res[0][...] = 0.
-    res[0][r < 0.15] = 1.
-    return res
-
-
-def vitesse(res, x, y, z, t=0.):
-    res[0][...] = 2. * sin(pi*x)**2*sin(2.*pi*y)*sin(2.*pi*z)*cos(t*pi/3.)
-    res[1][...] = -sin(2.*pi*x)*sin(pi*y)**2*sin(2.*pi*z)*cos(t*pi/3.)
-    res[2][...] = -sin(2.*pi*x)*sin(2.*pi*y)*sin(pi*z)**2*cos(t*pi/3.)
-    return res
-
-
-def volume(_simu, var, **kwds):
-    v_loc = np.sum(var[0].data[0] > 0.5) * np.prod(
-        var[0].topology.mesh.space_step)
-    return main_comm.allreduce(sendobj=v_loc, op=MPI.SUM)
-
-
-# Parameters
-dim = 3
-boxLength = [1., ] * dim
-boxMin = [0., ] * dim
-nbElem_v = [65] * dim
-nbElem_s = [65] * dim
-if nbElem_v[0] == nbElem_s[0]:
-    dv = Discretization(nbElem_v)
-else:
-    # In multi-scale, we need 1 ghost point for velocity
-    dv = Discretization(nbElem_v, ghosts=[1, ] * dim)
-ds = Discretization(nbElem_s)
-
-time_step = 0.35 / (4. * np.pi)
-finalTime = 3.
-outputModulo = 10
-simu = Simulation(start=0.0, end=finalTime,
-                  time_step=time_step, max_iter=120)
-
-# Domain
-box = hysop.Box(length=boxLength, origin=boxMin)
-
-# Fields
-scal = hysop.Field(domain=box, name='Scalar', formula=scalaire)
-velo = hysop.Field(domain=box, name='Velocity',
-                   is_vector=True, formula=vitesse)
-# Operators
-advec_method = {TimeIntegrator: RK,
-                Interpolation: Linear,
-                Remesh: remesh_formula,
-                Support: '',
-                Splitting: 'o2',
-                MultiScale: Linear}
-
-#if main_size == 1:
-advec = Advection(velo,
-                  discretization=dv,
-                  variables={scal: ds},
-                  method=advec_method)
-    #user_src=['./levelSet3D.cl'])
-#else:
-    # # Multi-GPU advection must know a max dt and velocity to
-    # # compute communication buffers
-    # advec = Advection(velo,
-    #                   discretization=dv,
-    #                   variables={scal: ds},
-    #                   method=advec_method,
-    #                   user_src=['./levelSet3D.cl'],
-    #                   device_id=main_rank % 2,
-    #                   max_dt=simu.time_step,
-    #                   max_velocity=[2, 1, 1])
-advec.discretize()
-
-# Get topologies from operator
-topo_v = advec.advec_dir[0].discreteFields[velo].topology
-topo_s = advec.advec_dir[0].discreteFields[scal].topology
-
-velocity = Analytic(variables={velo: topo_v})
-                    #method={Support: 'gpu'})
-
-volume_m = Custom(function=volume, diagnostics_shape=(1, 2),
-                  in_fields=[scal], variables={scal: topo_s},
-                  io_params=IOParams(filename="volume.dat"))
-p = HDF_Writer(variables={scal: topo_s},
-               io_params=IOParams(frequency=outputModulo,
-                                  filename="levelset",
-                                  fileformat=HDF5))
-# ToHost frequency is set to 1 since the volume computation
-# is performed at each iteration.
-# toHost = DataTransfer(source=topo_s, target=p,
-#                       variables={scal: topo_s},
-#                       freq=1,
-#                       run_till=[p, volume_m])
-
-pb = Problem([velocity, advec, p, volume_m],
-             simu, dumpFreq=-1)
-
-pb.setup()
-scal.initialize(topo=topo_s)
-velo.initialize(topo=topo_v)
-
-#scal.discreteFields[topo_s].toHost()
-#scal.discreteFields[topo_s].wait()
-volume_m.apply(simu)
-p.apply(simu)
-
-ctime = MPI.Wtime()
-pb.solve()
-print MPI.Wtime() - ctime
-
-simu.finalize()
-#scal.discreteFields[topo_s].toHost()
-#scal.discreteFields[topo_s].wait()
-p.apply(simu)
-volume_m.apply(simu)
-
-pb.finalize()
-print pb.profiler
diff --git a/trashed_examples/Multiphase/NS_planeJet_hybrid_MS_MP.py b/trashed_examples/Multiphase/NS_planeJet_hybrid_MS_MP.py
deleted file mode 100644
index bcf74505fc3fb023a20fe7a8f10f2a043b913dad..0000000000000000000000000000000000000000
--- a/trashed_examples/Multiphase/NS_planeJet_hybrid_MS_MP.py
+++ /dev/null
@@ -1,699 +0,0 @@
-#!/usr/bin/env python
-# Scripts arguments:
-# 1. Flow resolution
-# 2. Scalar resolution
-# 3. Dictionary for devices id: (mpi rank: device id)
-# 4. Is the initial condition is perturbed
-# 5. Is data output
-# 6. Flow density
-# 7. Jet density
-# mpirun -np 10 python ./NS_planeJet_hybrid_MS_MP.py "[129,129,129]" "[257,257,257]" "{0:0,5:1}" "True" "True" "1" "3"
-import sys
-USER_NB_ELEM_UW = eval(sys.argv[1])
-USER_NB_ELEM_S = eval(sys.argv[2])
-USER_RANK_DEVICE_ID = eval(sys.argv[3])
-RANDOM_INIT = eval(sys.argv[4])
-IS_OUTPUT = eval(sys.argv[5])
-FLOW_DENSITY = eval(sys.argv[6])
-JET_DENSITY = eval(sys.argv[7])
-import hysop
-import hysop.gpu
-#hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5, ASCII, HYSOP_MPI_REAL
-from hysop.mpi.main_var import MPI, main_size, main_rank, main_comm
-from hysop.tools.parameters import MPIParams, Discretization, IOParams
-from hysop.problem.simulation import Simulation
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, SpaceDiscretisation, \
-    GhostUpdate, Scales, dtCrit, ExtraArgs
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.finite_differences import FD_C_4
-from hysop.numerics.remeshing import L6_4 as remesh_formula
-from hysop.numerics.remeshing import L2_1
-from hysop.numerics.remeshing import Linear
-from hysop.operator.advection import Advection
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.stretching import Stretching
-from hysop.operator.poisson import Poisson
-from hysop.operator.differential import Curl
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operators import Custom
-from hysop.operator.redistribute_inter import RedistributeInter
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.domain.subsets import SubBox
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-import hysop.tools.numpywrappers as npw
-from hysop.tools.profiler import Profiler, FProfiler
-from hysop.tools.io_utils import IO
-from hysop.operator.baroclinic_from_rhs import BaroclinicFromRHS
-from hysop.operator.multiresolution_filter import MultiresolutionFilter
-from hysop.operator.multiphase_gradp import MultiphaseGradP
-from hysop.operator.multiphase_baroclinic_rhs import MultiphaseBaroclinicRHS
-from hysop.operator.spectrum import Spectrum
-IO.set_default_path('/scratch_p/jmetancelin/PlaneJet_F{0}_J{1}'.format(FLOW_DENSITY,JET_DENSITY))
-
-pi = np.pi
-cos = np.cos
-sin = np.sin
-exp = np.exp
-abs = np.abs
-tanh = np.tanh
-
-
-TASK_UW = 1
-TASK_SCALAR = 2
-PROC_TASKS = [TASK_UW, ] * main_size
-for p in USER_RANK_DEVICE_ID:
-    PROC_TASKS[p] = TASK_SCALAR
-try:
-    DEVICE_ID = USER_RANK_DEVICE_ID[main_rank]
-except KeyError:
-    DEVICE_ID = None
-out_freq = 10
-# Physical parameters:
-# Flow viscosity
-VISCOSITY = 1e-4
-# Schmidt number
-SC = ((1. * USER_NB_ELEM_S[0] - 1.)/(1. * USER_NB_ELEM_UW[0] - 1.))**2
-# Scalar diffusivity
-DIFF_COEFF_SCAL = VISCOSITY / SC
-
-width = 0.01
-ampl3 = 0.3
-ampl = 0.05
-ampl2 = 0.05
-
-
-ctime = MPI.Wtime()
-setup_time = FProfiler("Setup")
-
-# Domain
-box = hysop.Box(length=[1., 1., 1.], origin=[0., 0., 0.],
-                proc_tasks=PROC_TASKS)
-mpi_params = MPIParams(comm=box.comm_task, task_id=PROC_TASKS[main_rank])
-mpi_params_S = MPIParams(comm=box.comm_task, task_id=TASK_SCALAR)
-mpi_params_UW = MPIParams(comm=box.comm_task, task_id=TASK_UW)
-
-
-def computeVel(res, x, y, z, t):
-    yy = abs(y - 0.5)
-    aux = (0.1 - 2. * yy) / (4. * width)
-    strg1 = exp(-abs(aux ** 2))
-    strg2 = exp(-abs(aux ** 2))
-    strg3 = exp(-abs(aux ** 2))
-    if RANDOM_INIT:
-        from create_random_arrays import random_init
-        randX, randY, randZ = random_init(res[0].shape, box.comm_task)
-        strg1 = exp(-abs(aux ** 2)) * randX
-        strg2 = exp(-abs(aux ** 2)) * randY
-        strg3 = exp(-abs(aux ** 2)) * randZ
-    else:
-        strg1 = 0.
-        strg2 = 0.
-        strg3 = 0.
-    res[0][...] = 0.5 * (1. + tanh(aux))
-    res[0][...] *= (1. + ampl3 * sin(8. * pi * x))
-    res[0][...] *= (1. + ampl * strg1)
-    res[1][...] = ampl * strg2
-    res[2][...] = ampl * strg3
-    return res
-
-
-
-def initScal(res, x, y, z, t):
-    yy = abs(y - 0.5)
-    aux = (0.1 - 2. * yy) / (4. * width)
-    res[0][...] = 0.5 * (1. + tanh(aux))
-    res[0][...] *= (1. + ampl3 * sin(8. * pi * x))
-    return res
-
-temp_maxvelo = npw.zeros((3, ))
-maxvelo_values = npw.zeros((3, ))
-
-
-def calc_maxvelo(simu, v):
-    temp_maxvelo[0] = np.max(np.abs(v[0].data[0]))
-    temp_maxvelo[1] = np.max(np.abs(v[0].data[1]))
-    temp_maxvelo[2] = np.max(np.abs(v[0].data[2]))
-    v[0].topology.comm.Allreduce(sendbuf=[temp_maxvelo, 3, HYSOP_MPI_REAL],
-                                 recvbuf=[maxvelo_values, 3, HYSOP_MPI_REAL],
-                                 op=MPI.MAX)
-    return maxvelo_values
-
-
-# Fields
-velo = hysop.Field(domain=box, formula=computeVel,
-                   name='Velocity', is_vector=True)
-vorti = hysop.Field(domain=box,
-                    name='Vorticity', is_vector=True)
-scal = hysop.Field(domain=box, formula=initScal,
-                   name='Scalar', is_vector=False)
-gradp = hysop.Field(domain=box,
-                    name='GradP', is_vector=True)
-baroclinic_rhs = hysop.Field(domain=box,
-                             name='B_rhs', is_vector=True)
-
-
-data = {'dt': 0.01}
-dt = VariableParameter(data)
-simu = Simulation(start=0.0, end=5., time_step=0.01, max_iter=1000)
-
-# Flow discretizations:
-d_F_0g = Discretization(USER_NB_ELEM_UW)
-d_F_2g = Discretization(USER_NB_ELEM_UW, [2, ] * 3)
-# Scalar discretization
-d_S_0g = Discretization(USER_NB_ELEM_S)
-# Velocity discretization for scalar advection
-if USER_NB_ELEM_UW[0] == USER_NB_ELEM_S[0]:
-    d_F_1g = Discretization(USER_NB_ELEM_UW)
-else:
-    d_F_1g = Discretization(USER_NB_ELEM_UW, [1, ] * 3)
-
-# Topologies
-topo_S_0g_1d = None
-topo_F_1g_1d = None
-topo_F_0g_1d = None
-topo_F_2g_1d = None
-topo_F_0g_2d = None
-topo_F_0g_2d = box.create_topology(
-    d_F_0g, dim=2, mpi_params=mpi_params)
-topo_F_2g_1d = box.create_topology(
-    d_F_2g, dim=1, mpi_params=mpi_params)
-topo_F_0g_1d = box.create_topology(
-    d_F_0g, dim=1, mpi_params=mpi_params)
-topo_F_1g_1d = box.create_topology(
-    d_F_1g, dim=1, mpi_params=mpi_params)
-topo_S_0g_1d = box.create_topology(
-    d_S_0g, dim=1, mpi_params=mpi_params)
-
-
-# Operators
-# GPU operators
-advec_scal_method = {TimeIntegrator: RK,
-                     Interpolation: Linear,
-                     Remesh: remesh_formula,
-                     Support: 'gpu_2k',
-                     Splitting: 'o2',
-                     MultiScale: Linear}
-if PROC_TASKS.count(TASK_SCALAR) == 1:
-    advec_scal_method[ExtraArgs] = {'device_id': DEVICE_ID,
-                                    'device_type': 'gpu'}
-else:
-    advec_scal_method[ExtraArgs] = {'max_velocity': [1.2, 0.7, 0.7],
-                                    'max_dt': 0.012,
-                                    'device_id': DEVICE_ID,
-                                    'device_type': 'gpu',
-                                    'velocity_only_on_device': False}
-advec_scal = Advection(velo,
-                       discretization=topo_F_1g_1d,
-                       variables={scal: topo_S_0g_1d},
-                       mpi_params=mpi_params_S,
-                       method=advec_scal_method)
-# diffusion_scal = Diffusion(viscosity=DIFF_COEFF_SCAL,
-#                            vorticity=scal,
-#                            discretization=topo_S_0g_1d,
-#                            mpi_params=mpi_params_S,
-#                            method={Support: 'gpu',
-#                                    SpaceDiscretisation: 'fd',
-#                                    ExtraArgs: {'device_id': DEVICE_ID,
-#                                                'device_type': 'gpu'}})
-# diffusion_scal.name += '_(Scalar)'
-# tanh(x) is in [-1:1] and -1 stand for the flow.
-# We want [FLOW_DENSITY:JET_DENSITY] if FLOW_DENSITY<JET_DENSITY
-# We want [JET_DENSITY:FLOW_DENSITY] if FLOW_DENSITY>JET_DENSITY
-if FLOW_DENSITY < JET_DENSITY:
-    scal_to_rho = '{0:f}*(0.5*tanh(100.0*x-50.0)+0.5)+{1:f}'.format(
-        abs(FLOW_DENSITY - JET_DENSITY),
-        min(FLOW_DENSITY, JET_DENSITY))
-else:
-    scal_to_rho = '{0:f}*(-0.5*tanh(100.0*x-50.0)+0.5)+{1:f}'.format(
-        abs(FLOW_DENSITY - JET_DENSITY),
-        min(FLOW_DENSITY, JET_DENSITY))
-print
-baroclinic_rhs_op = MultiphaseBaroclinicRHS(
-    baroclinic_rhs, scal, gradp,
-    variables={baroclinic_rhs: topo_S_0g_1d,
-               gradp: topo_F_2g_1d,
-               scal: topo_S_0g_1d},
-    method={Support: 'gpu',
-            SpaceDiscretisation: FD_C_4,
-            ExtraArgs: {'density_func': scal_to_rho,
-                        'device_id': DEVICE_ID,
-                        'device_type': 'gpu'}},
-    mpi_params=mpi_params_S)
-filter_scal = MultiresolutionFilter(
-    d_in=topo_S_0g_1d, d_out=topo_F_2g_1d,
-    variables={baroclinic_rhs: topo_F_2g_1d},
-    method={Remesh: L2_1,
-            Support: 'gpu',
-            ExtraArgs: {'device_id': DEVICE_ID,
-                        'device_type': 'gpu'}},
-    mpi_params=mpi_params_S)
-gradp_op = MultiphaseGradP(velocity=velo, gradp=gradp, viscosity=VISCOSITY,
-                           discretization=topo_F_2g_1d,
-                           mpi_params=mpi_params_S,
-                           method={SpaceDiscretisation: FD_C_4,
-                                   ExtraArgs: {'gravity': [0., 0., 0.]}})
-
-# CPU operators
-advec = Advection(velo,
-                  discretization=topo_F_0g_2d,
-                  variables={vorti: topo_F_0g_2d},
-                  mpi_params=mpi_params_UW,
-                  method={Scales: 'p_64', MultiScale: 'L4_4'})
-stretch = Stretching(velo, vorti, discretization=topo_F_2g_1d,
-                     mpi_params=mpi_params_UW)
-baroclinic = BaroclinicFromRHS(vorti, baroclinic_rhs,
-                               discretization=topo_F_0g_1d,
-                               mpi_params=mpi_params_UW)
-diffusion = Diffusion(variables={vorti: topo_F_0g_1d},
-                      viscosity=VISCOSITY,
-                      mpi_params=mpi_params_UW)
-poisson = Poisson(velo, vorti, discretization=topo_F_0g_1d,
-                  mpi_params=mpi_params_UW)
-c = Curl(velo, vorti, discretization=topo_F_0g_1d,
-         method={SpaceDiscretisation: 'fftw', GhostUpdate: True},
-         mpi_params=mpi_params_UW)
-#dt_output = None
-#if IS_OUTPUT:
-dt_output = IOParams(frequency=1, filename='dt.dat', fileformat=ASCII)
-dt_adapt = AdaptTimeStep(velo, vorti,
-                         simulation=simu,
-                         time_range=[0, np.infty],
-                         discretization=topo_F_2g_1d,
-                         method={TimeIntegrator: RK3,
-                                 SpaceDiscretisation: FD_C_4,
-                                 dtCrit: ['gradU', 'cfl', ]},
-                         lcfl=0.15,
-                         cfl=1.5,
-                         io_params=dt_output,
-                         mpi_params=mpi_params_UW)
-
-# Operators discretizations
-if box.is_on_task(TASK_SCALAR):
-    for op in (advec_scal, gradp_op, filter_scal, baroclinic_rhs_op):  # , diffusion_scal):
-        op.discretize()
-if box.is_on_task(TASK_UW):
-    for op in (advec, stretch, diffusion, poisson, c, dt_adapt, baroclinic):
-        op.discretize()
-
-if IS_OUTPUT:
-    # Defining subdomains
-    L_V = 1. - 1. / (1. * USER_NB_ELEM_UW[0])
-    L_S = 1. - 1. / (1. * USER_NB_ELEM_S[0])
-    XY_plane_v = SubBox(origin=[0., 0., 0.5], length=[L_V, L_V, 0.],
-                        parent=box)
-    XZ_plane_v = SubBox(origin=[0., 0.5, 0.], length=[L_V, 0., L_V],
-                        parent=box)
-    XY_plane_s = SubBox(origin=[0., 0., 0.5], length=[L_S, L_S, 0.],
-                        parent=box)
-    XZ_plane_s = SubBox(origin=[0., 0.5, 0.], length=[L_S, 0., L_S],
-                        parent=box)
-    # Defining output operators
-    p_velo = HDF_Writer(variables={velo: topo_F_0g_1d},
-                        mpi_params=mpi_params_UW,
-                        io_params=IOParams(frequency=out_freq,
-                                           filename='flow',
-                                           fileformat=HDF5))
-    p_velo_xy = HDF_Writer(variables={velo: topo_F_0g_1d,
-                                      vorti: topo_F_0g_1d},
-                           var_names={velo: 'Velocity', vorti: 'Vorticity'},
-                           subset=XY_plane_v,
-                           mpi_params=mpi_params_UW,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='flow_XY',
-                                              fileformat=HDF5))
-    p_velo_xz = HDF_Writer(variables={velo: topo_F_0g_1d,
-                                      vorti: topo_F_0g_1d},
-                           var_names={velo: 'Velocity', vorti: 'Vorticity'},
-                           subset=XZ_plane_v,
-                           mpi_params=mpi_params_UW,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='flow_XZ',
-                                              fileformat=HDF5))
-    p_gradp_xy = HDF_Writer(variables={gradp: topo_F_2g_1d},
-                           var_names={gradp: 'gradp'},
-                           subset=XY_plane_s,
-                           mpi_params=mpi_params_S,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='gradp_XY',
-                                              fileformat=HDF5))
-    # p_rhs_xy = HDF_Writer(variables={baroclinic_rhs: topo_S_0g_1d},
-    #                        var_names={baroclinic_rhs: 'RHS'},
-    #                        subset=XY_plane_s,
-    #                        mpi_params=mpi_params_S,
-    #                        io_params=IOParams(frequency=out_freq,
-    #                                           filename='rhs_XY',
-    #                                           fileformat=HDF5))
-    p_scal_xy = HDF_Writer(variables={scal: topo_S_0g_1d},
-                           var_names={scal: 'Scalar'},
-                           subset=XY_plane_s,
-                           mpi_params=mpi_params_S,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='scal_XY',
-                                              fileformat=HDF5))
-    p_scal_xz = HDF_Writer(variables={scal: topo_S_0g_1d},
-                           var_names={scal: 'Scalar'},
-                           subset=XZ_plane_s,
-                           mpi_params=mpi_params_S,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='scal_XZ',
-                                              fileformat=HDF5))
-    p_scal_xy.name += '_(Scalar)'
-    p_scal_xz.name += '_(Scalar)'
-    energy = EnergyEnstrophy(velocity=velo,
-                             vorticity=vorti,
-                             discretization=topo_F_0g_1d,
-                             mpi_params=mpi_params_UW,
-                             io_params=IOParams(frequency=1,
-                                                filename='energy.dat',
-                                                fileformat=ASCII))
-    maxvelo = Custom(in_fields=[velo],
-                     variables={velo: topo_F_0g_1d},
-                     function=calc_maxvelo,
-                     diagnostics_shape=(1, 4),
-                     mpi_params=mpi_params_UW,
-                     io_params=IOParams(frequency=1,
-                                        filename='maxvelo.dat',
-                                        fileformat=ASCII))
-    if box.is_on_task(TASK_UW):
-        for op in (p_velo, p_velo_xy, p_velo_xz, energy, maxvelo):
-            op.discretize()
-    if box.is_on_task(TASK_SCALAR):
-        for op in (p_scal_xy, p_scal_xz, p_gradp_xy):
-            op.discretize()
-
-# Redistribute operators
-# CPU redistributes
-RF_vorti_0g2d_2g1d = RedistributeIntra(variables=[vorti],  # W_C_2d_to_2G
-                                  source=advec, target=stretch,
-                                  mpi_params=mpi_params_UW)
-RF_vorti_0g2d_2g1d.name += '_toG_W'
-RF_velo_0g1d_2g1d = RedistributeIntra(variables=[velo],  # U_C_1d_to_C_2d_2G
-                                 source=poisson, target=stretch,
-                                 run_till=[stretch, dt_adapt],
-                                 mpi_params=mpi_params_UW)
-RF_velo_0g1d_2g1d.name += '_toG_V'
-toDev_velo_1g1d = DataTransfer(source=topo_F_1g_1d, target=advec_scal,
-                     variables={velo: topo_F_1g_1d},
-                     run_till=[advec_scal, gradp_op],
-                     mpi_params=mpi_params_S)
-toDev_velo_1g1d.name += '_ToDev_V'
-RS_velo_0g1d_1g1d = RedistributeIntra(variables=[velo],  # U_C_1d_to_C_2d_2G
-                                    source=topo_F_0g_1d, target=topo_F_1g_1d,
-                                    run_till=[toDev_velo_1g1d],
-                                    mpi_params=mpi_params_S)
-RS_velo_0g1d_1g1d.name += '_GPUtoG_V'
-RF_vorti_0g1d_2g1d = RedistributeIntra(variables=[vorti],  # W_C_1d_to_C_2d_2G
-                                 source=diffusion, target=dt_adapt,
-                                 mpi_params=mpi_params_UW)
-RF_vorti_0g1d_2g1d.name += '_toG_W'
-RF_velo_0g1d_0g2d = RedistributeIntra(variables=[velo], # U_C_1d_to_C_2d
-                                  source=poisson, target=advec,
-                                  mpi_params=mpi_params_UW)
-RF_velo_0g1d_0g2d.name += '_toScales_V'
-RF_vorti_0g1d_0g2d = RedistributeIntra(variables=[vorti],  # W_C_1d_to_C_2d
-                                   source=poisson, target=advec,
-                                   mpi_params=mpi_params_UW)
-RF_vorti_0g1d_0g2d.name += '_toScales_W'
-RF_vorti_2g1d_0g1d = RedistributeIntra(variables=[vorti],   # W_C_2d_2G_to_1d
-                                    source=baroclinic,  #stretch,
-                                    target=diffusion,
-                                    mpi_params=mpi_params_UW)
-RF_vorti_2g1d_0g1d.name += '_FromG_W'
-
-F0g1d_to_S0g1d_velo = RedistributeInter(variables=[velo],
-                              parent=main_comm,
-                              source=topo_F_0g_1d, target=topo_F_0g_1d,
-                              source_id=TASK_UW, target_id=TASK_SCALAR,
-                              run_till=[gradp_op, RS_velo_0g1d_1g1d])
-S2g1d_to_F2g1d_rhs = RedistributeInter(variables=[baroclinic_rhs],
-                                   parent=main_comm,
-                                   source=topo_F_2g_1d, target=topo_F_0g_1d,
-                                   source_id=TASK_SCALAR, target_id=TASK_UW,
-                                   run_till=[baroclinic])
-S2g1d_to_F2g1d_rhs.name += '_rhs_GPU_to_CPU'
-toHost_rhs_2g1d = DataTransfer(source=topo_F_2g_1d,
-                         target=S2g1d_to_F2g1d_rhs,
-                         variables={baroclinic_rhs: topo_F_2g_1d},
-                         mpi_params=mpi_params_S)
-toHost_rhs_2g1d.name += '_ToHost_RHS'
-toDev_gradp_2g1d = DataTransfer(source=topo_F_2g_1d, target=baroclinic_rhs_op,
-                          variables={gradp: topo_F_2g_1d},
-                          mpi_params=mpi_params_S)
-toDev_gradp_2g1d.name += '_ToDev_GradP'
-if IS_OUTPUT:
-    toHost_scal_0g1d = DataTransfer(source=topo_S_0g_1d, target=p_scal_xy,
-                          variables={scal: topo_S_0g_1d},
-                          mpi_params=mpi_params_S,
-                          freq=out_freq,
-                          run_till=[p_scal_xz, ])
-    toHost_scal_0g1d.name += '_ToHost_S'
-
-# Operators setup
-if box.is_on_task(TASK_SCALAR):
-    for op in (advec_scal, gradp_op, baroclinic_rhs_op, filter_scal):  # , diffusion_scal):
-        op.setup()
-    if IS_OUTPUT:
-        for op in (p_gradp_xy, p_scal_xy, p_scal_xz, toHost_scal_0g1d):
-            op.setup()
-    for op in (toDev_velo_1g1d, toHost_rhs_2g1d, toDev_gradp_2g1d, RS_velo_0g1d_1g1d):
-        op.setup()
-if box.is_on_task(TASK_UW):
-    for op in (advec, stretch, diffusion, poisson, c, dt_adapt, baroclinic):
-        op.setup()
-    if IS_OUTPUT:
-        for op in (p_velo, p_velo_xy, p_velo_xz, energy, maxvelo):
-            op.setup()
-    for op in (RF_vorti_0g2d_2g1d, RF_vorti_2g1d_0g1d, RF_velo_0g1d_2g1d, RF_vorti_0g1d_2g1d,
-               RF_velo_0g1d_0g2d, RF_vorti_0g1d_0g2d):
-        op.setup()
-# Wait for all operators setup before setup the intra-comm redistribute
-main_comm.Barrier()
-F0g1d_to_S0g1d_velo.setup()
-S2g1d_to_F2g1d_rhs.setup()
-
-# Operators list
-if IS_OUTPUT:
-    operators_list = [RS_velo_0g1d_1g1d, toDev_velo_1g1d,  ## Scal
-                      gradp_op, p_gradp_xy, toDev_gradp_2g1d,  ## Scal
-                      advec, RF_vorti_0g2d_2g1d, stretch,  ## Flow
-                      advec_scal,# diffusion_scal,  ## Scal
-                      toHost_scal_0g1d, p_scal_xy, p_scal_xz,  ## Scal
-                      baroclinic_rhs_op, filter_scal, toHost_rhs_2g1d,  ## Scal
-                      S2g1d_to_F2g1d_rhs,  ## Scal->Flow
-                      baroclinic,  ## Flow
-                      diffusion, poisson,  ## Flow
-                      F0g1d_to_S0g1d_velo,  ## Flow->Scal
-                      p_velo, p_velo_xy, p_velo_xz, energy, maxvelo,  ## Flow
-                      RF_velo_0g1d_2g1d, RF_vorti_0g1d_2g1d,  ## Flow
-                      RF_velo_0g1d_0g2d, RF_vorti_0g1d_0g2d,  ## Flow
-                      dt_adapt,  ## Flow
-    ]
-else:
-    raise RuntimeError('')
-    # operators_list = [F0g1d_to_S2g1d_velo,
-    #                   toDev, advec_scal, diffusion_scal,
-    #                   advec, RF_vorti_0g2d_2g1d, stretch,
-    #                   RF_vorti_2g1d_0g1d, diffusion, poisson,
-    #                   RF_velo_0g1d_2g1d, RF_velo_0g1d_0g2d, RF_vorti_0g1d_0g2d, dt_adapt]
-
-# Fields initializations
-if box.is_on_task(TASK_SCALAR):
-    scal.initialize(topo=topo_S_0g_1d)
-    advec_dirX = advec_scal.advec_dir[0].discrete_op
-    gpu_scal = advec_dirX.fields_on_grid[0]
-    gpu_pscal = advec_dirX.fields_on_part[gpu_scal][0]
-    #diffusion_scal.discrete_op.set_field_tmp(gpu_pscal)
-    if IS_OUTPUT:
-        p_scal_xy.apply(simu)
-        p_scal_xz.apply(simu)
-if box.is_on_task(TASK_UW):
-    velo.initialize(topo=topo_F_0g_1d)
-    c.apply(simu)
-    poisson.apply(simu)
-    RF_velo_0g1d_2g1d.apply(simu)
-    RF_velo_0g1d_0g2d.apply(simu)
-    RF_vorti_0g1d_0g2d.apply(simu)
-    RF_vorti_0g1d_2g1d.apply(simu)
-    if IS_OUTPUT:
-        p_velo.apply(simu)
-        p_velo_xy.apply(simu)
-        p_velo_xz.apply(simu)
-F0g1d_to_S0g1d_velo.apply(simu)
-F0g1d_to_S0g1d_velo.wait()
-if box.is_on_task(TASK_SCALAR):
-    RS_velo_0g1d_1g1d.apply(simu)
-    RS_velo_0g1d_1g1d.wait()
-    toDev_velo_1g1d.apply(simu)
-    toDev_velo_1g1d.wait()
-    gradp_op.initialize_velocity()
-simu.initialize()
-setup_time += MPI.Wtime() - ctime
-main_comm.Barrier()
-
-# Solve
-total_time = FProfiler("Total")
-solve_time = FProfiler("Solve")
-cttime = MPI.Wtime()
-while not simu.isOver:
-    ctime = MPI.Wtime()
-    if main_rank == 0:
-        simu.printState()
-    for op in operators_list:
-        if box.is_on_task(op.task_id()):
-            op.apply(simu)
-    if box.is_on_task(TASK_SCALAR):
-        # Wait gpu operations on scalar
-        advec_scal.advec_dir[0].discreteFields[scal].wait()
-    solve_time += MPI.Wtime() - ctime
-    dt_adapt.wait()
-    # Synchronize threads
-    main_comm.Barrier()
-    simu.advance()
-
-main_comm.Barrier()
-nb_ite = simu.current_iteration
-total_time += MPI.Wtime() - cttime
-simu.finalize()
-
-
-# if IS_OUTPUT:
-#     if box.is_on_task(TASK_SCALAR):
-#         p_scal_xy.apply(simu)
-#         p_scal_xz.apply(simu)
-#     if box.is_on_task(TASK_UW):
-#         p_velo.apply(simu)
-#         p_velo_xy.apply(simu)
-#         p_velo_xz.apply(simu)
-
-
-# prof = Profiler(None, box.comm_task)
-# prof += setup_time
-# prof += solve_time
-# for op in operators_list:
-#     if box.is_on_task(op.task_id()):
-#         op.finalize()
-#         prof += op.profiler
-# for v in (velo, vorti, scal):
-#     prof += v.profiler
-# prof.summarize()
-
-# if box.is_on_task(TASK_SCALAR):
-#     prof.write(prefix=' '.join([str(s-1) for s in USER_NB_ELEM_UW]) +
-#                ' ' + str(nb_ite) +
-#                ' ' + str(PROC_TASKS[main_rank]) +
-#                ' ' + str(PROC_TASKS.count(PROC_TASKS[main_rank])),
-#                hprefix='Nx Ny Nz Nite Task Np')
-# main_comm.Barrier()
-# if box.is_on_task(TASK_UW):
-#     prof.write(prefix=' '.join([str(s-1) for s in USER_NB_ELEM_UW]) +
-#                ' ' + str(nb_ite) +
-#                ' ' + str(PROC_TASKS[main_rank]) +
-#                ' ' + str(PROC_TASKS.count(PROC_TASKS[main_rank])),
-#                hprefix='Nx Ny Nz Nite Task Np')
-
-
-# if main_rank < 2:
-#     print prof
-# for i in xrange(main_size):
-#     if i == main_rank:
-#         print prof
-#     main_comm.Barrier()
-# main_comm.Barrier()
-
-velo.finalize()
-if box.is_on_task(TASK_SCALAR):
-    scal.finalize()
-if box.is_on_task(TASK_UW):
-    vorti.finalize()
-main_comm.Barrier()
-
-
-# def extract_time(prof, key):
-#     if key is None:
-#         return prof.t
-#     if prof is None:
-#         return 0.0
-#     return prof.profiler[key].t
-
-# times = npw.asrealarray([np.sum([extract_time(pf, k) for pf, k in l]) for l in (
-#             ((total_time, None), ),
-#             ((solve_time, None), ),
-#             ((dt_adapt, 'apply'), ),
-#             ((advec, 'apply'), ),
-#             ((stretch, 'apply'), ),
-#             ((diffusion, 'apply'), ),
-#             ((poisson, 'apply'), ),
-#             ((advec_scal.advec_dir[0], 'apply'), ),
-#             ((advec_scal.advec_dir[0].discrete_op, 'OpenCL_copy'),
-#              (advec_scal.advec_dir[0].discrete_op, 'OpenCL_transpose_xy'),
-#              (advec_scal.advec_dir[0].discrete_op, 'OpenCL_advection_and_remeshing'), ),
-#             ((advec_scal.advec_dir[1], 'apply'), ),
-#             ((advec_scal.advec_dir[1].discrete_op, 'OpenCL_transpose_xy'),
-#              (advec_scal.advec_dir[1].discrete_op, 'OpenCL_transpose_xz'),
-#              (advec_scal.advec_dir[1].discrete_op, 'OpenCL_advection_and_remeshing'),
-#              (advec_scal.advec_dir[1].discrete_op, 'OpenCL_buff_advec_and_remesh_l'),
-#              (advec_scal.advec_dir[1].discrete_op, 'OpenCL_buff_advec_and_remesh'),
-#              (advec_scal.advec_dir[1].discrete_op, 'OpenCL_buff_advec_and_remesh_r'), ),
-#             ((advec_scal.advec_dir[1].discrete_op, 'comm_gpu_advec_set'),
-#              (advec_scal.advec_dir[1].discrete_op, 'comm_cpu_advec'),
-#              (advec_scal.advec_dir[1].discrete_op, 'comm_cpu_advec_get'),
-#              (advec_scal.advec_dir[1].discrete_op, 'comm_gpu_remesh_get'),
-#              (advec_scal.advec_dir[1].discrete_op, 'comm_gpu_remesh_set_loc'),
-#              (advec_scal.advec_dir[1].discrete_op, 'comm_gpu_remesh_get_loc'),
-#              (advec_scal.advec_dir[1].discrete_op, 'comm_cpu_remesh'), ),
-#             ((advec_scal.advec_dir[2], 'apply'), ),
-#             ((advec_scal.advec_dir[2].discrete_op, 'OpenCL_transpose_xz'),
-#              (advec_scal.advec_dir[2].discrete_op, 'OpenCL_advection_and_remeshing'),
-#              (advec_scal.advec_dir[2].discrete_op, 'OpenCL_buff_advec_and_remesh_l'),
-#              (advec_scal.advec_dir[2].discrete_op, 'OpenCL_buff_advec_and_remesh'),
-#              (advec_scal.advec_dir[2].discrete_op, 'OpenCL_buff_advec_and_remesh_r'), ),
-#             ((advec_scal.advec_dir[2].discrete_op, 'comm_gpu_advec_set'),
-#              (advec_scal.advec_dir[2].discrete_op, 'comm_cpu_advec'),
-#              (advec_scal.advec_dir[2].discrete_op, 'comm_cpu_advec_get'),
-#              (advec_scal.advec_dir[2].discrete_op, 'comm_gpu_remesh_get'),
-#              (advec_scal.advec_dir[2].discrete_op, 'comm_gpu_remesh_set_loc'),
-#              (advec_scal.advec_dir[2].discrete_op, 'comm_gpu_remesh_get_loc'),
-#              (advec_scal.advec_dir[2].discrete_op, 'comm_cpu_remesh'), ),
-#             # ((diffusion_scal, 'apply'), ),
-#             # ((diffusion_scal.discrete_op, 'OpenCL_enqueue_copy'),
-#             #  (diffusion_scal.discrete_op, 'OpenCL_diffusion'), ),
-#             # ((diffusion_scal.discrete_op, 'comm_diffusion'), ),
-#             ((RF_vorti_0g2d_2g1d, 'apply'),
-#              (RF_vorti_2g1d_0g1d, 'apply'),
-#              (RF_velo_0g1d_0g2d, 'apply'),
-#              (RF_vorti_0g1d_0g2d, 'apply'),
-#              (F0g1d_to_S2g1d_velo, 'apply'),
-#              (toDev_velo_1g1d, 'apply'), ),
-#             )])
-# tl = "Total Solve Dt Avection(w) Stretching Diffusion(w) Poisson "
-# tl += "Advection(s)_X Advection(s)_CL_X "
-# tl += "Advection(s)_Y Advection(s)_CL_Y Advection_Comm_Y "
-# tl += "Advection(s)_Z Advection(s)_CL_Z Advection_Comm_Z "
-# tl += "Diffusion(s) Diffusion(s)_CL Diffusion(s)_Comm Comm"
-
-
-# times_task = npw.zeros_like(times)
-# box.comm_task.Reduce([times, times.size, HYSOP_MPI_REAL],
-#                      [times_task, times.size, HYSOP_MPI_REAL])
-# times_task /= (1.0 * nb_ite * PROC_TASKS.count(PROC_TASKS[main_rank]))
-# if PROC_TASKS[main_rank] == TASK_UW:
-#     p_l = ["FLOW", ]
-# if PROC_TASKS[main_rank] == TASK_SCALAR:
-#     p_l = ["SCALAR", ]
-# p_l += [PROC_TASKS[main_rank], nb_ite] + USER_NB_ELEM_UW + USER_NB_ELEM_S
-# p_l += [PROC_TASKS.count(PROC_TASKS[main_rank]), ]
-# pp_l = "Label Task Ite UW_X UW_Y UW_Z S_X S_Y S_Z Np"
-# for i in xrange(main_size):
-#     if main_rank == i:
-#         if i == 0:
-#             print pp_l + ' ' + tl
-#         if box.comm_task.Get_rank() == 0:
-#             print ' '.join([str(e) for e in p_l]) + \
-#                 ' ' + ' '.join([str(e) for e in times_task])
-#     main_comm.Barrier()
diff --git a/trashed_examples/Multiphase/RTI.py b/trashed_examples/Multiphase/RTI.py
deleted file mode 100644
index 2e23829a6b00e086a4fbe07a68629b54074ce483..0000000000000000000000000000000000000000
--- a/trashed_examples/Multiphase/RTI.py
+++ /dev/null
@@ -1,542 +0,0 @@
-#!/usr/bin/env python
-# Rayleigh-Taylor Instability
-# Scripts arguments:
-# 1. Flow resolution
-# 2. Scalar resolution
-# 3. Dictionary for devices id: (mpi rank: device id)
-# 4. Is data output
-# 5. Low density
-# 6. High density
-# mpirun -np 10 python ./RTI_new.py "[129,129,129]" "[257,257,257]" "{0:0,5:1}" "True" "1" "3"
-import sys
-USER_NB_ELEM_UW = eval(sys.argv[1])
-USER_NB_ELEM_S = eval(sys.argv[2])
-USER_RANK_DEVICE_ID = eval(sys.argv[3])
-IS_OUTPUT = eval(sys.argv[4])
-LOW_DENSITY = eval(sys.argv[5])
-HIGH_DENSITY = eval(sys.argv[6])
-import hysop
-# import hysop.gpu
-# hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5, ASCII, HYSOP_MPI_REAL
-from hysop.mpi.main_var import MPI, main_size, main_rank, main_comm
-from hysop.tools.parameters import MPIParams, Discretization, IOParams
-from hysop.problem.simulation import Simulation
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, SpaceDiscretisation, \
-    GhostUpdate, Scales, dtCrit, ExtraArgs
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.finite_differences import FD_C_4
-from hysop.numerics.remeshing import L6_4 as remesh_formula
-from hysop.numerics.remeshing import L2_1
-from hysop.numerics.remeshing import Linear
-from hysop.operator.advection import Advection
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.stretching import Stretching
-from hysop.operator.poisson import Poisson
-from hysop.operator.baroclinic_from_rhs import BaroclinicFromRHS
-from hysop.operator.differential import Curl
-from hysop.operator.multiresolution_filter import MultiresolutionFilter
-from hysop.operator.multiphase_gradp import MultiphaseGradP
-from hysop.operator.multiphase_baroclinic_rhs import MultiphaseBaroclinicRHS
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operators import Custom
-from hysop.operator.redistribute_inter import RedistributeInter
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.domain.subsets import SubBox
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-import hysop.tools.numpywrappers as npw
-from hysop.tools.io_utils import IO
-from hysop.operator.spectrum import Spectrum
-pi = np.pi
-cos = np.cos
-sin = np.sin
-exp = np.exp
-abs = np.abs
-tanh = np.tanh
-
-TASK_UW = 1
-TASK_SCALAR = 2
-PROC_TASKS = [TASK_UW, ] * main_size
-for p in USER_RANK_DEVICE_ID:
-    PROC_TASKS[p] = TASK_SCALAR
-try:
-    DEVICE_ID = USER_RANK_DEVICE_ID[main_rank]
-except KeyError:
-    DEVICE_ID = None
-print USER_RANK_DEVICE_ID
-out_freq = 10
-# Physical parameters:
-# Flow viscosity
-VISCOSITY = 5e-5
-# Schmidt number
-SC = ((1. * USER_NB_ELEM_S[0] - 1.)/(1. * USER_NB_ELEM_UW[0] - 1.))**2
-# Scalar diffusivity
-DIFF_COEFF_SCAL = VISCOSITY / SC
-MAX_DT = 0.05  # ok with 0.05
-MAX_CFL = 1.5  # ok with 1.5 (lcfl bound)
-MAX_LCFL = 0.15 # ok with 0.2
-#MAX_CFL_VEC = [1.5, 0.6, 0.5]
-
-
-width = 0.01
-ampl3 = 0.3
-ampl = 0.05
-ampl2 = 0.05
-
-# NS
-# CPU ::  Velo(C_2d), Vorti(C_2d) > Advec > Vorti(C_2d)
-# CPU ::  Velo(C_2d,2G), Vorti(C_2d,2G) > Stretching > Vorti(C_2d,2G)
-# CPU ::  Vorti(C_2d,2G) > Penalisation > Vorti(C_2d,2G)
-# CPU ::  Vorti(C_1d) > Diffusion > Vorti(C_1d)
-# CPU ::  Vorti(C_1d) > Poisson > Velo(C_1d)
-# CPU ::  Velo(C_2d,2G), Vorti(C_2d,2G) > Dt
-# Scal
-# GPU ::  Velo(C_2d,1G), Scal(F_2d) > Advec > Scal(F_2d)
-# Baroclinic
-# GPU ::  Velo(C_2d,2G) > GradP > gradp(C_2d,2G)
-# GPU ::  Rho(F_2d), gradp(C_2d,2G) > BaroclinicRHS > rhs(F_2d)
-# GPU ::  rhs(F_2d) > Filter > rhs(C_2d,2G)
-# CPU ::  rhs(C_2d,2G), Vorti(C_2d,2G) > Baroclinic > Vorti(C_2d,2G)
-
-ctime = MPI.Wtime()
-
-# Domain
-box = hysop.Box(length=[1., 1., 1.], origin=[0., 0., 0.],
-                proc_tasks=PROC_TASKS)
-mpi_params = MPIParams(comm=box.comm_task, task_id=PROC_TASKS[main_rank])
-mpi_params_S = MPIParams(comm=box.comm_task, task_id=TASK_SCALAR)
-mpi_params_UW = MPIParams(comm=box.comm_task, task_id=TASK_UW)
-
-SIGMA=1e-3
-NOISE=1e-4
-
-def computeVelo(res, x, y, z, t):
-    zz = exp(-(z-0.5) * (z-0.5) / SIGMA) * NOISE
-    from create_random_arrays import random_init
-    randX, randY, randZ = random_init(res[0].shape, box.comm_task)
-    res[0][...] = zz * randX
-    res[1][...] = zz * randY
-    res[2][...] = zz * randZ
-    return res
-
-def initScal(res, x, y, z, t):
-    res[0][...] = z
-    return res
-
-
-temp_maxvelo = npw.zeros((3, ))
-maxvelo_values = npw.zeros((3, ))
-
-
-def calc_maxvelo(simu, v):
-    temp_maxvelo[0] = np.max(np.abs(v[0].data[0]))
-    temp_maxvelo[1] = np.max(np.abs(v[0].data[1]))
-    temp_maxvelo[2] = np.max(np.abs(v[0].data[2]))
-    v[0].topology.comm.Allreduce(sendbuf=[temp_maxvelo, 3, HYSOP_MPI_REAL],
-                                 recvbuf=[maxvelo_values, 3, HYSOP_MPI_REAL],
-                                 op=MPI.MAX)
-    return maxvelo_values
-
-
-# Fields
-velo = hysop.Field(domain=box, formula=computeVelo,
-                   name='Velocity', is_vector=True)
-vorti = hysop.Field(domain=box,
-                    name='Vorticity', is_vector=True)
-scal = hysop.Field(domain=box, formula=initScal,
-                   name='Scalar', is_vector=False)
-gradp = hysop.Field(domain=box,
-                    name='GradP', is_vector=True)
-baroclinic_rhs = hysop.Field(domain=box,
-                             name='B_rhs', is_vector=True)
-
-data = {'dt': 0.001}
-dt = VariableParameter(data)
-simu = Simulation(start=0.0, end=2.5, time_step=0.001, max_iter=10000)
-
-# Flow discretizations:
-d_C = Discretization(USER_NB_ELEM_UW)
-d_C_2G = Discretization(USER_NB_ELEM_UW, [2, ] * 3)
-# Scalar discretization
-d_F = Discretization(USER_NB_ELEM_S)
-
-# Topologies
-topo_C_2d = None
-topo_C_2d_2G = None
-topo_C_1d = None
-topo_F_2d = None
-topo_C_2d_2G = box.create_topology(
-    d_C_2G, dim=1, mpi_params=mpi_params)
-topo_C_2d = box.create_topology(
-    d_C, dim=2, mpi_params=mpi_params)
-topo_C_1d = box.create_topology(
-    d_C, dim=1, mpi_params=mpi_params)
-topo_F_2d = box.create_topology(
-    d_F, dim=1, mpi_params=mpi_params)
-###### WARNING This topo is 1D to be fftw compliant.
-
-
-# Operators
-# GPU operators
-advec_scal_method = {TimeIntegrator: RK,
-                     Interpolation: Linear,
-                     Remesh: remesh_formula,
-                     Support: 'gpu_1k',
-                     Splitting: 'o2',
-                     MultiScale: Linear}
-if PROC_TASKS.count(TASK_SCALAR) == 1:
-    advec_scal_method[ExtraArgs] = {'device_id': DEVICE_ID,
-                                    'device_type': 'gpu'}
-else:
-    advec_scal_method[ExtraArgs] = {'max_cfl': MAX_CFL,
-                                    'max_velocity': [2.5, 2.5, 3.5], # for rho=1<->3
-                                    'max_dt': MAX_DT,
-                                    'device_id': DEVICE_ID,
-                                    'device_type': 'gpu'}
-advec_scal = Advection(velo,
-                       discretization=topo_C_2d_2G,
-                       variables={scal: topo_F_2d},
-                       mpi_params=mpi_params_S,
-                       method=advec_scal_method)
-diffusion_scal = Diffusion(viscosity=DIFF_COEFF_SCAL,
-                           vorticity=scal,
-                           discretization=topo_F_2d,
-                           mpi_params=mpi_params_S,
-                           method={Support: 'gpu',
-                                   SpaceDiscretisation: 'fd',
-                                   ExtraArgs: {'device_id': DEVICE_ID,
-                                               'device_type': 'gpu'}})
-diffusion_scal.name += '_(Scalar)'
-# tanh(x) is in [-1:1] and -1 stand for the flow.
-# We want [LOW_DENSITY:HIGH_DENSITY]
-scal_to_rho = '{0:f}*({1}*tanh(100.0*x-50.0)+0.5)+{2:f}'.format(
-    abs(LOW_DENSITY - HIGH_DENSITY),
-    0.5 if LOW_DENSITY < HIGH_DENSITY else -0.5,
-    min(LOW_DENSITY, HIGH_DENSITY))
-baroclinic_rhs_op = MultiphaseBaroclinicRHS(
-    baroclinic_rhs, scal, gradp,
-    variables={baroclinic_rhs: topo_F_2d,
-               gradp: topo_C_2d_2G,
-               scal: topo_F_2d},
-    method={Support: 'gpu',
-            SpaceDiscretisation: FD_C_4,
-            ExtraArgs: {'density_func': scal_to_rho,
-                        'device_id': DEVICE_ID,
-                        'device_type': 'gpu'}},
-    mpi_params=mpi_params_S)
-filter_scal = MultiresolutionFilter(
-    d_in=topo_F_2d, d_out=topo_C_2d_2G,
-    variables={baroclinic_rhs: topo_C_2d_2G},
-    method={Remesh: L2_1,
-            Support: 'gpu',
-            ExtraArgs: {'device_id': DEVICE_ID,
-                        'device_type': 'gpu'}},
-    mpi_params=mpi_params_S)
-gradp_op = MultiphaseGradP(velocity=velo, gradp=gradp, viscosity=VISCOSITY,
-                           discretization=topo_C_2d_2G,
-                           mpi_params=mpi_params_S,
-                           method={SpaceDiscretisation: FD_C_4,
-                                   ExtraArgs: {'gravity': [0., 0., -1.]}})
-
-# CPU operators
-advec = Advection(velo,
-                  discretization=topo_C_2d,
-                  variables={vorti: topo_C_2d},
-                  mpi_params=mpi_params_UW,
-                  method={Scales: 'p_64', MultiScale: 'L4_4'})
-stretch = Stretching(velo, vorti, discretization=topo_C_2d_2G,
-                     mpi_params=mpi_params_UW)
-baroclinic = BaroclinicFromRHS(vorti, baroclinic_rhs,
-                               discretization=topo_C_2d_2G,
-                               mpi_params=mpi_params_UW)
-diffusion = Diffusion(variables={vorti: topo_C_1d},
-                      viscosity=VISCOSITY,
-                      mpi_params=mpi_params_UW)
-poisson = Poisson(velo, vorti, discretization=topo_C_1d,
-                  mpi_params=mpi_params_UW)
-c = Curl(velo, vorti, discretization=topo_C_1d,
-         method={SpaceDiscretisation: 'fftw', GhostUpdate: True},
-         mpi_params=mpi_params_UW)
-#dt_output = None
-#if IS_OUTPUT:
-dt_output = IOParams(frequency=1, filename='dt.dat', fileformat=ASCII)
-dt_adapt = AdaptTimeStep(velo, vorti,
-                         simulation=simu,
-                         time_range=[10, np.infty],
-                         discretization=topo_C_2d_2G,
-                         method={TimeIntegrator: RK3,
-                                 SpaceDiscretisation: FD_C_4,
-                                 dtCrit: ['gradU', 'cfl']},
-                         lcfl=MAX_LCFL, maxdt=MAX_DT,
-                         cfl=MAX_CFL,
-                         io_params=dt_output,
-                         mpi_params=mpi_params_UW)
-
-# Operators discretizations
-if box.is_on_task(TASK_SCALAR):
-    for op in (advec_scal, gradp_op, filter_scal,
-               baroclinic_rhs_op, diffusion_scal):
-        op.discretize()
-if box.is_on_task(TASK_UW):
-    for op in (advec, stretch, diffusion, poisson, c, dt_adapt, baroclinic):
-        op.discretize()
-
-if IS_OUTPUT:
-    # Defining subdomains
-    L_Vx = 1. - 1. / (1. * USER_NB_ELEM_UW[0])
-    L_Sx = 1. - 1. / (1. * USER_NB_ELEM_S[0])
-    L_Vy = 1. - 1. / (1. * USER_NB_ELEM_UW[1])
-    L_Sy = 1. - 1. / (1. * USER_NB_ELEM_S[1])
-    L_Vz = 1. - 1. / (1. * USER_NB_ELEM_UW[2])
-    L_Sz = 1. - 1. / (1. * USER_NB_ELEM_S[2])
-    XY_plane_v = SubBox(origin=[0., 0., 1.], length=[L_Vx, L_Vy, 0.],
-                        parent=box)
-    XZ_plane_v = SubBox(origin=[0., 0.5, 0.], length=[L_Vx, 0., L_Vz],
-                        parent=box)
-    YZ_plane_s = SubBox(origin=[0.5, 0., 0.], length=[0., L_Sy, L_Sz],
-                        parent=box)
-    XZ_plane_s = SubBox(origin=[0., 0.5, 0.], length=[L_Sx, 0., L_Sz],
-                        parent=box)
-    # Defining output operators
-    p_velo = HDF_Writer(variables={velo: topo_C_1d, vorti: topo_C_1d},
-                        mpi_params=mpi_params_UW,
-                        io_params=IOParams(frequency=out_freq,
-                                           filename='flow',
-                                           fileformat=HDF5))
-    p_scal_yz = HDF_Writer(variables={scal: topo_F_2d},
-                           var_names={scal: 'Scalar'},
-                           subset=YZ_plane_s,
-                           mpi_params=mpi_params_S,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='scal_YZ',
-                                              fileformat=HDF5))
-    p_scal_xz = HDF_Writer(variables={scal: topo_F_2d},
-                           var_names={scal: 'Scalar'},
-                           subset=XZ_plane_s,
-                           mpi_params=mpi_params_S,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='scal_XZ',
-                                              fileformat=HDF5))
-    p_scal = HDF_Writer(variables={scal: topo_F_2d},
-                        var_names={scal: 'Scalar'},
-                        mpi_params=mpi_params_S,
-                        io_params=IOParams(frequency=out_freq,
-                                           filename='scal',
-                                           fileformat=HDF5))
-    p_scal_yz.name += '_(Scalar)'
-    p_scal_xz.name += '_(Scalar)'
-    p_scal.name += '_(Scalar)'
-    energy = EnergyEnstrophy(velocity=velo,
-                             vorticity=vorti,
-                             discretization=topo_C_1d,
-                             mpi_params=mpi_params_UW,
-                             io_params=IOParams(frequency=1,
-                                                filename='energy.dat',
-                                                fileformat=ASCII))
-    maxvelo = Custom(in_fields=[velo],
-                     variables={velo: topo_C_1d},
-                     function=calc_maxvelo,
-                     diagnostics_shape=(1, 4),
-                     mpi_params=mpi_params_UW,
-                     io_params=IOParams(frequency=1,
-                                        filename='maxvelo.dat',
-                                        fileformat=ASCII))
-    if box.is_on_task(TASK_UW):
-        for op in (p_velo, energy, maxvelo):
-            op.discretize()
-    if box.is_on_task(TASK_SCALAR):
-        for op in (p_scal_yz, p_scal_xz, p_scal):
-            op.discretize()
-
-# Redistribute operators
-# CPU redistributes
-W_C_2d_to_2G = RedistributeIntra(variables=[vorti],
-                                 source=advec, target=stretch,
-                                 mpi_params=mpi_params_UW)
-W_C_2d_to_2G.name += '_W_C_2d_to_2G'
-W_C_2d_2G_to_1d = RedistributeIntra(variables=[vorti],
-                                    source=baroclinic,  # stretch,
-                                    target=diffusion,
-                                    mpi_params=mpi_params_UW)
-W_C_2d_2G_to_1d.name += '_W_C_2d_2G_to_1d'
-W_C_1d_to_C_2d_2G = RedistributeIntra(variables=[vorti],
-                                      source=diffusion, target=dt_adapt,
-                                      mpi_params=mpi_params_UW)
-W_C_1d_to_C_2d_2G.name += '_W_C_1d_to_C_2d_2G'
-W_C_1d_to_C_2d = RedistributeIntra(variables=[vorti],
-                                   source=diffusion, target=advec,
-                                   mpi_params=mpi_params_UW)
-W_C_1d_to_C_2d.name += "_W_C_1d_to_C_2d"
-U_C_1d_to_C_2d_2G = RedistributeIntra(variables=[velo],
-                                      source=poisson, target=stretch,
-                                      run_till=[stretch, dt_adapt],
-                                      mpi_params=mpi_params_UW)
-U_C_1d_to_C_2d_2G.name += '_U_C_1d_to_C_2d_2G'
-U_C_1d_to_C_2d = RedistributeIntra(variables=[velo],
-                                   source=poisson, target=advec,
-                                   mpi_params=mpi_params_UW)
-U_C_1d_to_C_2d.name += '_U_C_1d_to_C_2d'
-toDevU = DataTransfer(source=topo_C_2d_2G, target=advec_scal,
-                      variables={velo: topo_C_2d_2G},
-                      run_till=[advec_scal, gradp_op],
-                      mpi_params=mpi_params_S)
-toDevU.name += '_ToDev_U'
-U_CPU_to_GPU = RedistributeInter(variables=[velo],
-                                 parent=main_comm,
-                                 source=topo_C_1d, target=topo_C_2d_2G,
-                                 source_id=TASK_UW, target_id=TASK_SCALAR,
-                                 run_till=[toDevU])
-rhs_GPU_to_CPU = RedistributeInter(variables=[baroclinic_rhs],
-                                   parent=main_comm,
-                                   source=topo_C_2d_2G, target=topo_C_2d_2G,
-                                   source_id=TASK_SCALAR, target_id=TASK_UW,
-                                   run_till=[baroclinic])
-rhs_GPU_to_CPU.name += '_rhs_GPU_to_CPU'  # ok
-toHostRHS = DataTransfer(source=topo_C_2d_2G,
-                         target=rhs_GPU_to_CPU,
-                         variables={baroclinic_rhs: topo_C_2d_2G},
-                         mpi_params=mpi_params_S,)
-toHostRHS.name += '_ToHost_RHS'
-toDevGradP = DataTransfer(source=topo_C_2d_2G, target=baroclinic_rhs_op,
-                          variables={gradp: topo_C_2d_2G},
-                          mpi_params=mpi_params_S)
-toDevGradP.name += '_ToDev_GradP'
-
-if IS_OUTPUT:
-    toHostS = DataTransfer(source=topo_F_2d, target=p_scal_yz,
-                           variables={scal: topo_F_2d},
-                           mpi_params=mpi_params_S,
-                           freq=out_freq,
-                           run_till=[p_scal_xz, p_scal_yz, p_scal])
-    toHostS.name += '_ToHost_S'
-
-# Operators setup
-if box.is_on_task(TASK_SCALAR):
-    for op in (advec_scal, gradp_op,
-               baroclinic_rhs_op, filter_scal, diffusion_scal):
-        op.setup()
-    if IS_OUTPUT:
-        for op in (p_scal_yz, p_scal_xz, p_scal, toHostS):
-            op.setup()
-    for op in (toDevU, toHostRHS, toDevGradP):
-        op.setup()
-if box.is_on_task(TASK_UW):
-    for op in (advec, stretch, diffusion, poisson, c, dt_adapt, baroclinic):
-        op.setup()
-    if IS_OUTPUT:
-        for op in (p_velo, energy, maxvelo):
-            op.setup()
-    for op in (W_C_2d_to_2G, W_C_2d_2G_to_1d,
-               W_C_1d_to_C_2d, W_C_1d_to_C_2d_2G,
-               U_C_1d_to_C_2d, U_C_1d_to_C_2d_2G):
-        op.setup()
-# Wait for all operators setup before setup the intra-comm redistribute
-main_comm.Barrier()
-U_CPU_to_GPU.setup()
-rhs_GPU_to_CPU.setup()
-
-# Operators list
-if IS_OUTPUT:
-    operators_list = [toDevU, gradp_op, toDevGradP,
-                      advec_scal, diffusion_scal,
-                      toHostS, p_scal_yz, p_scal_xz, p_scal,
-                      baroclinic_rhs_op, filter_scal,
-                      toHostRHS, rhs_GPU_to_CPU,
-                      advec, W_C_2d_to_2G, stretch, baroclinic,
-                      W_C_2d_2G_to_1d, diffusion, poisson,
-                      p_velo, energy, maxvelo,
-                      U_CPU_to_GPU,
-                      U_C_1d_to_C_2d, U_C_1d_to_C_2d_2G,
-                      W_C_1d_to_C_2d, W_C_1d_to_C_2d_2G, dt_adapt]
-else:
-    operators_list = [toDevU, advec_scal, diffusion_scal,
-                      advec, W_C_2d_to_2G, stretch,
-                      W_C_2d_2G_to_1d, diffusion, poisson,
-                      energy, maxvelo,
-                      U_CPU_to_GPU,
-                      U_C_1d_to_C_2d, U_C_1d_to_C_2d_2G,
-                      W_C_1d_to_C_2d, W_C_1d_to_C_2d_2G, dt_adapt]
-
-# Fields initializations
-if box.is_on_task(TASK_SCALAR):
-    scal.initialize(topo=topo_F_2d)
-    advec_dirX = advec_scal.advec_dir[0].discrete_op
-    gpu_scal = advec_dirX.fields_on_grid[0]
-    gpu_pscal = advec_dirX.fields_on_part[gpu_scal][0]
-    diffusion_scal.discrete_op.set_field_tmp(gpu_pscal)
-    if IS_OUTPUT:
-        p_scal_yz.apply(simu)
-        p_scal_xz.apply(simu)
-        p_scal.apply(simu)
-if box.is_on_task(TASK_UW):
-    velo.initialize(topo=topo_C_1d)
-    c.apply(simu)
-    poisson.apply(simu)
-    U_C_1d_to_C_2d.apply(simu)
-    U_C_1d_to_C_2d_2G.apply(simu)
-    W_C_1d_to_C_2d.apply(simu)
-    W_C_1d_to_C_2d_2G.apply(simu)
-    if IS_OUTPUT:
-        p_velo.apply(simu)
-        # p_velo_xy.apply(simu)
-        # p_velo_xz.apply(simu)
-U_CPU_to_GPU.apply(simu)
-U_CPU_to_GPU.wait()
-if box.is_on_task(TASK_SCALAR):
-    toDevU.apply(simu)
-    toDevU.wait()
-    gradp_op.initialize_velocity()
-
-simu.initialize()
-setup_time = MPI.Wtime() - ctime
-main_comm.Barrier()
-
-# Solve
-solve_time = 0.
-while not simu.isOver:
-    ctime = MPI.Wtime()
-    if main_rank == 0:
-        simu.printState()
-    for op in operators_list:
-        if box.is_on_task(op.task_id()):
-            op.apply(simu)
-    if box.is_on_task(TASK_SCALAR):
-        # Wait gpu operations on scalar
-        advec_scal.advec_dir[0].discreteFields[scal].wait()
-    solve_time += MPI.Wtime() - ctime
-    dt_adapt.wait()
-    # Synchronize threads
-    main_comm.Barrier()
-    simu.advance()
-
-U_CPU_to_GPU.wait()
-main_comm.Barrier()
-nb_ite = simu.current_iteration
-simu.finalize()
-
-
-if IS_OUTPUT:
-    if box.is_on_task(TASK_SCALAR):
-        p_scal_yz.apply(simu)
-        p_scal_xz.apply(simu)
-        p_scal.apply(simu)
-    if box.is_on_task(TASK_UW):
-        p_velo.apply(simu)
-        # p_velo_xy.apply(simu)
-        # p_velo_xz.apply(simu)
-
-for op in operators_list:
-    if box.is_on_task(op.task_id()):
-        op.finalize()
-
-velo.finalize()
-if box.is_on_task(TASK_SCALAR):
-    scal.finalize()
-if box.is_on_task(TASK_UW):
-    vorti.finalize()
-main_comm.Barrier()
diff --git a/trashed_examples/Multiphase/create_random_arrays.py b/trashed_examples/Multiphase/create_random_arrays.py
deleted file mode 100644
index cb68fc081cc057b807251de4de8231bb6ecdf601..0000000000000000000000000000000000000000
--- a/trashed_examples/Multiphase/create_random_arrays.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-import numpy as np
-from hysop.constants import HYSOP_REAL, ORDER
-
-
-def random_init(shape, mpi_comm):
-    # Create a folder to store all random arrays
-    d = 'rand_init'
-    if mpi_comm.Get_rank() == 0:
-        if not os.path.exists(d):
-            os.makedirs(d)
-    mpi_comm.Barrier()
-    file_name = "{0}_{1}_{2}".format(*shape)
-    file_name += "_{0}p_{1}.dat".format(mpi_comm.Get_size(),
-                                        mpi_comm.Get_rank())
-    try:
-        randX = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randX_' + file_name),
-                                   dtype=HYSOP_REAL), shape),
-            dtype=HYSOP_REAL, order=ORDER)
-        randY = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randY_' + file_name),
-                                   dtype=HYSOP_REAL), shape),
-            dtype=HYSOP_REAL, order=ORDER)
-        randZ = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randZ_' + file_name),
-                                   dtype=HYSOP_REAL), shape),
-            dtype=HYSOP_REAL, order=ORDER)
-    except IOError:
-        randX = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randY = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randZ = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randX.tofile(os.path.join(d, 'randX_' + file_name))
-        randY.tofile(os.path.join(d, 'randY_' + file_name))
-        randZ.tofile(os.path.join(d, 'randZ_' + file_name))
-    return randX, randY, randZ
diff --git a/trashed_examples/NSDebug_faux2D.py b/trashed_examples/NSDebug_faux2D.py
deleted file mode 100755
index d69c4a7a943ca232c3ea0fbc24499256e784dbfd..0000000000000000000000000000000000000000
--- a/trashed_examples/NSDebug_faux2D.py
+++ /dev/null
@@ -1,439 +0,0 @@
-#!/usr/bin/python
-
-"""
-Navier Stokes 3D : flow past bluff bodies (using penalization).
-
-All parameters are set and defined in python module dataNS_bb.
-
-"""
-
-import parmepy as pp
-from parmepy.f2py import fftw2py
-import numpy as np
-from parmepy.problem.simulation import Simulation
-from parmepy.domain.obstacle.controlBox import ControlBox
-from parmepy.methods_keys import Scales, TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting, dtCrit, SpaceDiscretisation, GhostUpdate
-from parmepy.numerics.integrators.runge_kutta2 import RK2 as RK2
-from parmepy.numerics.integrators.runge_kutta3 import RK3 as RK3
-from parmepy.numerics.integrators.runge_kutta4 import RK4 as RK4
-from parmepy.numerics.finite_differences import FD_C_4, FD_C_2
-from parmepy.numerics.interpolation import Linear
-from parmepy.numerics.remeshing import L6_4 as rmsh
-import parmepy.tools.numpywrappers as npw
-from parmepy.constants import HDF5
-import parmepy.tools.io_utils as io
-
-
-## ----------- A 3d problem -----------
-print " ========= Start Navier-Stokes 3D (Flow past bluff bodies) ========="
-
-## pi constant
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-# ====== Flow constants =======
-uinf = 1.0
-VISCOSITY = 1. / 300.
-
-# ========= Geometry of the domain =========
-dim = 3
-boxlength = npw.realarray([6.0, 6.0, 0.08])
-boxorigin = npw.realarray([-2.0, -3.0, -0.04])
-#boxlength = npw.realarray([5.12, 5.12, 5.12])
-#boxorigin = npw.realarray([-2.56, -2.56, -2.56])
-# The domain
-box = pp.Box(dim, length=boxlength, origin=boxorigin)
-
-# Sphere inside the domain
-RADIUS = 0.5
-obst_pos = [0., 0., 0.]
-from parmepy.domain.obstacle.sphere import Sphere, HemiSphere
-from parmepy.domain.obstacle.cylinder import Cylinder, HemiCylinder
-#sphere = Sphere(box, position=obst_pos, radius=RADIUS)
-sphere = Cylinder(box, position=obst_pos, radius=RADIUS)
-
-# ========= Discretisation parameters =========
-nbElem = [301, 301, 5]
-#nbElem = [65, 65, 65]
-
-# ========= Vector Fields and variable parameters =========
-
-# Function to compute initial velocity
-def computeVel(res, x, y, z, t):
-    res[0][...] = uinf
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-# Function to compute initial vorticity
-def computeVort(res, x, y, z, t):
-    res[0][...] = 0.
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-from parmepy import VariableParameter, Field
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', is_vector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', is_vector=True)
-dt = VariableParameter(data=0.0125, name='dt')
-
-# ========= Operators that describe the problem =========
-op = {}
-
-# Adaptative timestep method : dt = min(values(dtCrit))
-# where dtCrit is a list of criterions on which the computation 
-# of the adaptative time step is based
-# ex : dtCrit = ['gradU', 'cfl', 'stretch'], means :
-# dt = min (dtAdv, dtCfl, dtStretch), where dtAdv is equal to LCFL / |gradU|
-# For dtAdv, the possible choices are the following:
-# 'vort' (infinite norm of vorticity) : dtAdv = LCFL / |vort|
-# 'gradU' (infinite norm of velocity gradient), dtAdv = LCFL / |gradU|
-# 'deform' (infinite norm of deformation tensor),
-# dtAdv = LCFL / (0.5(gradU + gradU^T))
-# The operator is defined on a Cartesian topology with ghost points, that will
-# be dedicated to operators using finite differences. 
-from parmepy.operator.adapt_timestep import AdaptTimeStep
-from parmepy.mpi.topology import Cartesian
-NBGHOSTS = 2
-ghosts = np.ones((box.dimension)) * NBGHOSTS
-topostr = Cartesian(box, box.dimension, nbElem, ghosts=ghosts)
-
-op['dtAdapt'] = AdaptTimeStep(velo, vorti,
-                              resolutions={velo: nbElem,
-                                           vorti: nbElem},
-                              dt_adapt=dt,
-                              method={TimeIntegrator: RK3, 
-                                      SpaceDiscretisation: FD_C_4, 
-                                      dtCrit: ['vort', 'stretch']},
-                              topo=topostr,
-                              io_params={},
-                              lcfl=0.125,
-                              cfl=0.5)
-
-from parmepy.operator.advection import Advection
-op['advection'] = Advection(velo, vorti,
-                            resolutions={velo: nbElem,
-                                         vorti: nbElem},
-                            method={Scales: 'p_M4', 
-                                    Splitting: 'classic'}) # Scales advection
-
-from parmepy.operator.stretching import Stretching
-op['stretching'] = Stretching(velo, vorti,
-                              resolutions={velo: nbElem, vorti: nbElem},
-                              topo=topostr)
-
-from parmepy.operator.diffusion import Diffusion
-op['diffusion'] = Diffusion(vorti, resolution=nbElem, viscosity=VISCOSITY)
-
-from parmepy.operator.poisson import Poisson
-op['poisson'] = Poisson(velo, vorti, resolutions={velo: nbElem, vorti: nbElem})
-
-from parmepy.operator.differential import Curl
-op['curl'] = Curl(velo, vorti, resolutions={velo: nbElem, vorti: nbElem},
-                  method={SpaceDiscretisation: FD_C_4})
-
-# Discretisation of computational operators and link
-# to the associated topologies.
-for ope in op.values():
-    ope.discretize()
-topofft = op['poisson'].discreteFields[op['poisson'].vorticity].topology
-topocurl = op['curl'].discreteFields[op['curl'].invar].topology
-topoadvec = op['advection'].discreteFields[op['advection'].velocity].topology
-topostr = op['stretching'].discreteFields[op['stretching'].velocity].topology
-
-# Penalization of the velocity on a sphere inside the domain.
-# We enforce penalization on the same topology as for fft operators.
-from parmepy.operator.penalization import Penalization
-op['penalization'] = Penalization(velo, [sphere], coeff=[1e8],
-                                  topo=topofft, resolutions={velo: nbElem})
-op['penalization'].discretize()
-
-# Kill the vorticity at the inlet.
-# We enforce penalization on the same topology as for fft operators.
-ref_step_fft = topofft.mesh.space_step
-ibpos = npw.zeros(dim)
-iblength = npw.zeros(dim)
-ibpos[...] = boxorigin[...]
-iblength[...] = boxlength[...]
-iblength[0] = 10 * ref_step_fft[0]
-inlet_band = ControlBox(box, ibpos, iblength)
-op['kill_vort'] = Penalization(vorti, [inlet_band], coeff=[1e10],
-                               topo=topofft, resolutions={vorti: nbElem})
-op['kill_vort'].discretize()
-
-# Correction of the velocity, used as post-process for Poisson solver,
-# based on a fixed flowrate through the entrance of the domain.
-# Time-dependant required-flowrate (Variable Parameter)
-def computeFlowrate(simu):
-    # === Time-dependant flow rate ===
-    t = simu.tk
-    Tstart = 3.0
-    flowrate = np.zeros(3)
-    flowrate[0] = uinf * box.length[1] * box.length[2]
-    if t >= Tstart and t <= Tstart + 1.0:
-        flowrate[1] = sin(pi * (t - Tstart)) * \
-                      box.length[1] * box.length[2]
-    # === Constant flow rate ===
-    #    flowrate = np.zeros(3)
-    #    flowrate[0] = uinf * box.length[1] * box.length[2]
-    return flowrate
-req_flowrate = VariableParameter(formula=computeFlowrate)
-
-frate = npw.zeros(3)
-frate[0] = uinf * box.length[1] * box.length[2]
-req_flowrate = VariableParameter(data=frate, name='flowRate')
-
-from parmepy.operator.velocity_correction import VelocityCorrection
-op['correction'] = VelocityCorrection(velo, vorti,
-                                      resolutions={velo: nbElem,
-                                                   vorti: nbElem},
-                                      req_flowrate=req_flowrate, topo=topofft)
-op['correction'].discretize()
-
-# ========= Bridges between the different topologies =========
-from parmepy.operator.redistribute import Redistribute
-distr = {}
-distr['fft2curl'] = Redistribute([velo], op['penalization'], op['curl'])
-distr['fft2advec'] = Redistribute([velo, vorti],
-                                   op['poisson'], op['advection'])
-distr['curl2fft'] = Redistribute([vorti], op['curl'], op['poisson'])
-distr['adv2str'] = Redistribute([velo, vorti],
-                                 op['advection'], op['stretching'])
-distr['str2diff'] = Redistribute([vorti], op['stretching'], op['diffusion'])
-distr['curl2adv'] = Redistribute([velo, vorti], op['curl'], op['advection'])
-distr['curl2str'] = Redistribute([velo, vorti], op['curl'], op['stretching'])
-distr['fft2str'] = Redistribute([velo, vorti], op['poisson'], op['stretching'])
-distr['str2curl'] = Redistribute([velo], op['stretching'], op['curl'])
-for ope in distr.values():
-    ope.discretize()
-
-# ========= Monitoring operators =========
-
-from parmepy.operator.monitors import Printer, Energy_enstrophy, DragAndLift,\
-    Reprojection_criterion
-monitors = {}
-monitors['printerFFT'] = Printer(variables=[velo, vorti], topo=topofft,
-                                 frequency=100, formattype=HDF5, prefix='fields',
-                                 xmfalways=True)
-
-monitors['printerCurl'] = Printer(variables=[velo, vorti], topo=topocurl,
-                                  formattype=HDF5, prefix='curl_io')
-
-monitors['printerAdvec'] = Printer(variables=[velo, vorti], topo=topofft,
-                                   formattype=HDF5, prefix='advec_io',
-                                   xmfalways=True)
-
-monitors['energy'] = Energy_enstrophy(velo, vorti, topo=topofft,
-                                      io_params={},
-                                      viscosity=VISCOSITY, isNormalized=False)
-
-reprojection_constant = 0.04
-reprojection_rate = 1
-monitors["reprojection"] = Reprojection_criterion(vorti, reprojection_constant,
-                                                  reprojection_rate, topofft,
-                                                  io_params={})
-# Add reprojection for poisson operator
-#op["poisson"].activateProjection(monitors["reprojection"])
-
-# Compute and save drag and lift on a
-# 3D Control box
-ref_step = topostr.mesh.space_step
-cbpos = npw.zeros(dim)
-cblength = npw.zeros(dim)
-cbpos[...] = boxorigin[...]
-cbpos[0] +=  10 * ref_step[0]
-cblength[...] = boxlength[...]
-cblength[0] -= 20 * ref_step[0]
-cb = ControlBox(box, cbpos, cblength)
-coeffForce = 1. / (0.5 * uinf ** 2 * pi * RADIUS ** 2)
-
-monitors['forces'] = DragAndLift(velo, vorti, VISCOSITY, coeffForce,
-                                 topostr, cb, obstacles=[sphere], io_params={})
-
-step_dir = ref_step[0]
-#thickSliceXY = ControlBox(box, origin=[-2.56, -2.56, -2.0 * step_dir], 
-#                           lengths=[5.12, 5.12, 4.0 * step_dir])
-#monitors['printerSliceXY'] = Printer(variables=[velo, vorti], topo=topofft,
-#                                    frequency=1, formattype=HDF5, prefix='sliceXY', 
-#                                    subset=thickSliceXY, xmfalways=True)
-
-#thickSliceXZ = ControlBox(box, origin=[-2.0, -2.0 * step_dir, -2.56], 
-#                         lengths=[10.24, 4.0 * step_dir, 5.12])
-#monitors['printerSliceXZ'] = Printer(variables=[velo, vorti], topo=topofft,
-#                                     frequency=100, formattype=HDF5, prefix='sliceXZ', 
-#                                     subset=thickSliceXZ, xmfalways=True)
-
-#subBox = ControlBox(box, origin=[-0.7, -1.25, -1.25], lengths=[7.0, 2.5, 2.5])
-#monitors['printerSubBox'] = Printer(variables=[velo, vorti], topo=topofft,
-#                                    frequency=100, formattype=HDF5, prefix='subBox', 
-#                                    subset=subBox, xmfalways=True)
-
-# ========= Setup for all declared operators =========
-for ope in op.values():
-    ope.setUp()
-for ope in distr.values():
-    ope.setUp()
-# Set up for monitors
-for monit in monitors.values():
-    monit.setUp()
-
-allops = dict(op.items() + distr.items() + monitors.items())
-
-# ========= Simulation setup =========
-simu = Simulation(start=0.0, end=100., time_step=0.0125, max_iter=1000000)
-#simu = Simulation(start=0.0, end=75., time_step=dt['dt'], max_iter=6)
-
-ind = sphere.discretize(topofft)
-vdfft = velo.discreteFields[topofft].data
-wdfft = vorti.discreteFields[topofft]
-
-## ========= Fields initialization =========
-# Initialisation, mode 1:
-# - compute velocity on topofft
-# - penalize
-# - compute vorticity with curl
-def initFields_mode1():
-    # The velocity is initialized on the fftw topology
-    # penalized and distributed on curl topo
-    velo.initialize(topo=topofft)
-    op['penalization'].apply(simu)
-    distr['fft2curl'].apply(simu)
-    distr['fft2curl'].wait()
-    # Vorticity is initialized after penalization of velocity
-    op['curl'].apply(simu)
-    distr['curl2adv'].apply(simu)
-    distr['curl2adv'].wait()
-
-## Call initialization
-initFields_mode1()
-
-##### Check initialize process results #####
-norm_vel_fft = velo.norm(topofft)
-norm_vel_curl = velo.norm(topocurl)
-norm_vort_fft = vorti.norm(topofft)
-norm_vort_curl = vorti.norm(topocurl)
-
-#assert np.allclose(norm_vort_curl, norm_vort_fft)
-assert np.allclose(norm_vel_curl, norm_vel_fft)
-
-# Print initial state
-for mon in monitors.values():
-    mon.apply(simu)
-
-## Note Franck : tests init ok, same values on both topologies, for v and w.
-# === After init ===
-#
-# v init on topocurl (which may be equal to topofft)
-#
-# === Definition of the 'one-step' sequence of operators ===
-#
-#  1 - w = curl(v), topocurl
-#  2 - w = advection(v,w), topo-advec
-#  3 - w = stretching(v,w), topostr
-#  4 - w = diffusion(w), topofft
-#  5 - v = poisson(w), topofft
-#  6 - v = correction(v, w), topofft
-#  7 - v = penalization(v), topofft
-#
-#  Required bridges :
-# 1 --> 2 : (v,w) on topo-advec
-# 2 --> 3 : (v,w) on topostr
-# 3 --> 4 : w on topofft
-# 7 --> 1 : v on topocurl
-
-#fullseq = ['advection',  # in: v,w out: w, on topoadvec
-#            'adv2str', 'stretching',  # in: v,w out: w on topostr
-#            # in: w, out: v, w on topofft
-#            'str2diff', 'diffusion', 'reprojection', 'poisson', 'correction',
-#            'penalization',
-#            'fft2curl', 'curl', 'forces',
-#            'curl2adv', 'energy', 'profiles', 'printerAdvec',
-#            ]
-
-fullseq = ['stretching',
-           'str2diff', 'diffusion', 'poisson', 'correction',
-           'penalization',
-           'fft2curl', 'curl',
-           'curl2fft', 'poisson', 'correction',
-           'fft2adv',
-           'advection',
-           #'printerSliceXY', #'printerSliceXZ', 'printerSubBox',
-           #'energy',
-           'adv2str',
-           'forces',
-           #'dtAdapt'
-           ]
-
-cb2 = op['correction'].cb
-#cb.discretize(topo=topofft)
-def pseudo_norm(topo, var):
-    sloc = npw.zeros(3)
-    gloc = npw.zeros(3)
-    sl = topo.mesh.compute_index
-    for i in xrange(3):
-        sloc[i] = np.sum((var[i][sl]))
-        # sfft[i] = la.norm((vfft[i][slfft]))
-    topo.comm.Allreduce(sloc, gloc)
-    return gloc
-    
-def run(sequence):
-    ## for name in sequence:
-    ##     assert allops.keys().count(name) > 0 or distr.keys().count(name) > 0\
-    ##         or monitors.keys().count(name) > 0, 'unknow key:' + name
-    ##     allops[name].apply(simu)
-    op['advection'].apply(simu)
-#    distr['adv2str'].apply(simu)
-#    distr['adv2str'].wait()    
-#    op['stretching'].apply(simu)
-#    distr['str2diff'].apply(simu)
-#    distr['str2diff'].wait()
-    op['diffusion'].apply(simu)
-    op['kill_vort'].apply(simu)
-    op['poisson'].apply(simu)
-    op['correction'].apply(simu)
-    op['penalization'].apply(simu)
-    distr['fft2curl'].apply(simu)
-    distr['fft2curl'].wait()
-    op['curl'].apply(simu)
-    distr['curl2fft'].apply(simu)
-    distr['curl2fft'].wait()
-    op['poisson'].apply(simu)
-    op['correction'].apply(simu)
-    monitors['printerFFT'].apply(simu)
-    monitors['energy'].apply(simu)
-    distr['fft2advec'].apply(simu)
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    monitors['forces'].apply(simu)
-    distr['fft2advec'].wait()
-
-seq = fullseq
-
-simu.initialize()
-
-while not simu.isOver:
-    if topocurl.rank == 0:
-        simu.printState()
-    run(seq)
-    simu.advance()
-
-
-## print 'total time (rank):', MPI.Wtime() - time, '(', topo.rank, ')'
-
-# === Finalize for all declared operators ===
-
-# Note FP : bug in fft finalize. To be checked ...
-# --> clean_fftw must be called only once
-#for ope in op.values():
-#    ope.finalize()
-## Clean memory buffers
-
-fftw2py.clean_fftw_solver(box.dimension)
-for ope in distr.values():
-    ope.finalize()
-for monit in monitors.values():
-    monit.finalize()
diff --git a/trashed_examples/NS_Richtmyer_Meshkov.py b/trashed_examples/NS_Richtmyer_Meshkov.py
deleted file mode 100755
index e6fa38c76ca09fc7a3d7d357c23dc1169bf48782..0000000000000000000000000000000000000000
--- a/trashed_examples/NS_Richtmyer_Meshkov.py
+++ /dev/null
@@ -1,341 +0,0 @@
-#!/usr/bin/python
-#import sys
-#sys.path.insert(0,'/scratch/mimeau/install-parmes3/')
-import parmepy as pp
-from parmepy.f2py import fftw2py
-import numpy as np
-from parmepy.fields.continuous import Field
-from parmepy.variables.variables import Variables
-from parmepy.mpi.topology import Cartesian
-from parmepy.domain.obstacle.sphere import Sphere
-from parmepy.operator.advection import Advection
-from parmepy.operator.stretching import Stretching
-from parmepy.operator.poisson import Poisson
-from parmepy.operator.velocity_correction import VelocityCorrection
-from parmepy.operator.diffusion import Diffusion
-from parmepy.operator.differential import Curl
-from parmepy.operator.redistribute import Redistribute
-from parmepy.operator.adapt_timestep import AdaptTimeStep
-from parmepy.operator.baroclinic import Baroclinic
-from parmepy.operator.density import DensityColor
-from parmepy.domain.obstacle.planes import PlaneBoundaries
-from parmepy.operator.penalization import Penalization
-from parmepy.problem.navier_stokes import NSProblem
-from parmepy.operator.monitors.printer import Printer
-from parmepy.operator.monitors.energy_enstrophy import Energy_enstrophy
-from parmepy.operator.monitors.compute_forces import Forces
-from parmepy.problem.simulation import Simulation
-from parmepy.constants import VTK, HDF5
-import math as m
-from dataNS_RMI import dim, nb, NBGHOSTS, ADVECTION_METHOD, \
-    BAROCLINIC_METHOD, VISCOSITY, \
-    OUTPUT_FREQ, FILENAME_ENER, PROJ, LCFL, CFL, CURL_METHOD,\
-    TIMESTEP_METHOD, RHO_BOTT, RHO_TOP
-
-## ----------- A 3d problem -----------
-print " ========= Start test for Navier-Stokes 3D (Rayleigh-Taylor instability)========="
-
-## constants
-pi = np.pi
-cos = np.cos
-sin = np.sin
-exp = np.exp
-abs = np.abs
-tanh = np.tanh
-
-## Parameters
-dim = 3
-densityVal = [RHO_BOTT, RHO_TOP]
-
-## Domain
-#box = pp.Box(dim, length=[0.5, 2.0, 0.125], 
-#             origin=[-0.25, -1.0, -0.0625])
-box = pp.Box(dim, length=[3.0, 3.0, 0.125], 
-             origin=[-1.5, -1.5, -0.0625])
-#box = pp.Box(dim, length=[3.0, 3.0, 0.0625], 
-#             origin=[0., 0., 0.])
-
-## Global resolution
-#nbElem = [nb] * dim
-nbElem = [289, 289, 13] 
-#nbElem = [97, 289, 97] 
-#nbElem = [101, 401, 25] 
-
-
-uinf = 1.0
-
-## Fields declaration
-def computeVel(res, x, y, z, t):
-#    res[0][...] = 0.
-#    res[1][...] = 0.
-#    res[2][...] = 0.
-
-# -------------- Cond papier Arepo --------------
-#    res[0][...] = 0.
-#    res[1][...] = 0.025 * (1 - cos(4.0 * pi * x)) #* (1.0 - cos(4.0 * pi * z / 3.0))
-#    res[2][...] = 0.
-
-# -------------- Cond gaussienne --------------
-#    res[0][...] = 0.
-#    res[1][...] = 0.01 * sin(2.0 * pi * x / 0.5) * np.exp(-(y-0.)**2/(0.4**2))
-    Lx = box.length[0]
-    sigm = 0.4
-    coef = 0.02
-    middle = box.origin + box.length / 2.0
-    res[0][...] = -(2.0 * y / sigm ** 2) * coef * (cos(2.0 * 3.0 * pi * x / Lx) / (2.0 * 3.0 * pi / Lx)) \
-                   * np.exp(-(y - middle[1]) ** 2 / (sigm ** 2))
-#    res[0][...] = -(2.0 * y / sigm ** 2) * coef * cos(2.0 * 3.0 * pi * x / Lx) \
-#                   * np.exp(-(y - middle[1]) ** 2 / (sigm ** 2))
-    res[1][...] = coef * sin(2.0 * 3.0 * pi * x / Lx) * np.exp(-(y - middle[1]) ** 2/(sigm ** 2))
-    res[2][...] = 0.
-
-# -----------Conditions CEA (Jerome Griffond) --------
-#    ampli = 0.0027935
-#    lamb = 0.05933
-#    compr = 0.767
-#    width = 0.0025152
-#    xd = compr * width / np.sqrt(pi)
-#    xk = 2.0 * pi / lamb
-#    dadt = 52.29 * xk * ampli
-#    v0 = 2.0 * dadt
-#    fctG = np.zeros_like(y)
-#    fctH = np.zeros_like(y)
-#    for i in xrange(np.shape(y)[0]):
-#        fctG[0,i,0] = v0 * 0.25 * m.exp((0.5 * xd * xk) ** 2) \
-#                      * ((1.0 - m.erf(0.5 * xd * xk - y[0,i,0] / xd)) \
-#                      * m.exp(-xk * y[0,i,0]) + (1.0 - m.erf(0.5 * xd * xk + y[0,i,0] / xd)) \
-#                      * m.exp(xk * y[0,i,0]))
-#        fctH[0,i,0] = v0 * 0.25 * m.exp((0.5 * xd * xk) ** 2) \
-#                      * ((1.0 - m.erf(0.5 * xd * xk - y[0,i,0] / xd)) \
-#                      * m.exp(-xk * y[0,i,0]) - (1.0 - m.erf(0.5 * xd * xk + y[0,i,0] / xd)) \
-#                      * m.exp(xk * x[0,i,0]))
-#    res[0][...] = fctH * sin(xk * x)
-#    res[1][...] = fctG * cos(xk * x)
-#    res[2][...] = 0.
-
-# -----------Conditions Sheu/Yu (Commun. Comput. Phys.) --------
-#    res[0][...] = 0.
-#    res[1][...] = 2.0 + 0.1 * cos(2.0 * pi * x)
-#    res[2][...] = 0.
-    return res
-
-def computeVort(res, x, y, z, t):
-    res[0][...] = 0.
-    res[1][...] = 0.
-    res[2][...] = 0.
-# -----------Conditions CEA (Jerome Griffond) --------
-#    ampli = 0.0027935
-#    lamb = 0.05933
-#    compr = 0.767
-#    width = 0.0025152
-#    xd = compr * width / np.sqrt(pi)
-#    xk = 2.0 * pi / lamb
-#    dadt = 52.29 * xk * ampli
-#    v0 = 2.0 * dadt
-#    res[0][...] = 0.
-#    res[1][...] = v0 * np.exp(-(y / xd) ** 2) / (xd * np.sqrt(pi))
-#    res[2][...] = 0.
-    return res
-
-def computeDensity(res, x, y, z, t):
-#    perturb = 0.2 * np.sin(2.0 * pi * y * z/ 5.9333)
-#    d = (3.0 + perturb + 0.5 - x)/(2.0 * 0.5)
-#    c = 1.0
-#    res[0][...] = densityVal[1]
-#    ind0 = np.where(np.logical_and(d > 0., d < 1.0))
-#    res[0][ind0] = densityVal[0] + (densityVal[1] - densityVal[0]) * 0.5
-#    ind1 = np.where(d >= 1.0)
-#    res[0][ind1] = densityVal[0]
-
-# -------------- Cond papier Arepo --------------
-#    res[0][...] = densityVal[0]
-#    ind = np.where(np.logical_and(np.logical_and(y >= 0.75, x>=0.), z>=0.))
-#    res[0][ind] = densityVal[1]
-
-# -----------Conditions CEA (Jerome Griffond) : lissage --------
-#    compr = 0.767
-#    width = 0.0025152
-#    origin = box.origin
-#    middle = box.origin + box.length / 2.0
-##    xd = compr * width / np.sqrt(pi)
-#    xd = 0.02
-
-#    ind_rhoBott = np.where(np.logical_and(np.logical_and(y< middle[1] - xd, x>=origin[0]), z>=origin[2]))
-#    ind_rhoTop = np.where(np.logical_and(np.logical_and(y> middle[1] + xd, x>=origin[0]), z>=origin[2]))
-
-#    res[0][ind_rhoBott] = densityVal[0]
-#    res[0][ind_rhoTop] = densityVal[1]
-#    Mrho = 0.5 * (densityVal[1] + densityVal[0])
-#    Drho = 0.5 * (densityVal[1] - densityVal[0])
-#    for j in xrange(np.shape(y)[1]):
-#        if (y[0,j,0]<= middle[1] + xd) and (y[0,j,0]>= middle[1] - xd):
-#            y[0,j,0] = m.erf((y[0,j,0] - middle[1]) / xd)
-#            res[0][:,j,:] = Mrho + Drho * y[0,j,0]
-
-# -----------Conditions GHC (log(rho))--------
-    origin = box.origin
-    middle = box.origin + box.length / 2.0
-    ind_rhoTop = np.where(np.logical_and(np.logical_and(y> middle[1], x>=origin[0]), z>=origin[2]))
-
-    res[0][...] = m.log(densityVal[0])
-    res[0][ind_rhoTop] = m.log(densityVal[1])
-    return res
-
-## Fields
-velo = pp.Field(domain=box, formula=computeVel, 
-                name='Velocity', is_vector=True)
-vorti = pp.Field(domain=box, formula=computeVort, 
-                 name='Vorticity', is_vector=True)
-rho = pp.Field(domain=box, formula=computeDensity,
-                 name='Density', is_vector=False)
-
-## Usual Cartesian topology definition
-ghosts = np.ones((box.dimension)) * NBGHOSTS
-topo = Cartesian(box, box.dimension, nbElem,
-                 ghosts=ghosts)
-
-## Operators
-advec = Advection(velo, vorti,
-                  resolutions={velo: nbElem,
-                               vorti: nbElem},
-                  method=ADVECTION_METHOD)
-
-advecScalar = Advection(velo, rho,
-                        resolutions={velo: nbElem,
-                                     rho: nbElem},
-                        method=ADVECTION_METHOD)
-
-stretch = Stretching(velo, vorti,
-                     resolutions={velo: nbElem,
-                                  vorti: nbElem},
-                     topo=topo)
-
-baroclinic = Baroclinic(velo, vorti, rho,
-                        viscosity=VISCOSITY,
-                        resolutions={velo: nbElem,
-                                     vorti: nbElem,
-                                     rho: nbElem},
-                        topo=topo,
-                        method=BAROCLINIC_METHOD,
-                        formula=computeVel)
-
-poisson = Poisson(velo, vorti,
-                  resolutions={velo: nbElem,
-                               vorti: nbElem},
-                  projection=PROJ)
-
-#-----------------------------
-# Topology without ghost points
-poisson.discretize()
-topofft = poisson.discreteFields[poisson.vorticity].topology
-#-----------------------------
-
-diffusion = Diffusion(vorti,
-                      resolution=nbElem,
-                      topo=topofft,
-                      viscosity=VISCOSITY)
-
-densityColor = DensityColor(rho,
-                            resolutions={rho: nbElem},
-                            topo=topofft,
-                            densityVal=densityVal)
-
-curl = Curl(velo, vorti,
-            resolutions={velo: nbElem,
-                         vorti: nbElem},
-            method=CURL_METHOD,
-            topo=topofft)
-
-#bc = PlaneBoundaries(box, 1, thickness=0.2)
-
-#penal = Penalization(velo, [bc],
-#                     coeff=[1e8],
-#                     topo=topofft,
-#                     resolutions={velo: nbElem})
-
-## Bridges between the different topologies in order to
-## redistribute data.
-# 0 - Poisson to Curl_FD and Curl_FD to advection
-distrCurlAdv = Redistribute([vorti, velo], curl, advec)
-# 1 -Advection to stretching
-distrPoiStr_velo = Redistribute([velo], poisson, stretch)
-distrAdvStr_vorti = Redistribute([vorti], advec, stretch)
-# 3 - Stretching to Diffusion
-distrStrDiff = Redistribute([vorti], stretch, diffusion)
-# 4 - Advection_Scalar to Baroclinic and Diff to Baroclinic
-distrAdvBaro = Redistribute([rho], advecScalar, baroclinic)
-distrDiffBaro = Redistribute([vorti], diffusion, baroclinic)
-# 5 - Baroclinic to Poisson
-distrBaroPoiss = Redistribute([vorti], baroclinic, poisson)
-
-## Simulation
-simu = Simulation(start=0.0,
-                  end=250., time_step=0.005,
-                  max_iter=1000000)
-
-#  Define the problem to solve
-pb = NSProblem(operators=[distrPoiStr_velo,
-                          advecScalar,  #
-                          advec,        #
-                          distrAdvStr_vorti, 
-                          stretch,      #
-                          distrStrDiff,
-                          diffusion,    #
-                          distrAdvBaro,
-                          distrDiffBaro,
-                          baroclinic,   #
-                          distrBaroPoiss,
-#                          densityColor, #
-                          poisson],     #
-               simulation=simu, dumpFreq=-1)
-
-
-## Setting solver to Problem (only operators for computational tasks)
-pb.pre_setUp()
-
-printer = Printer(variables=[velo, vorti, rho],
-                  topo=topofft,
-                  frequency=50,
-                  prefix='./res/NS_RMI',
-                  formattype=VTK)
-
-energy = Energy_enstrophy(velo, vorti,
-                          topo=topofft,
-                          viscosity=VISCOSITY,
-                          isNormalized=False,
-                          frequency=OUTPUT_FREQ,
-                          filename=FILENAME_ENER,
-                          safeOutput=True)
-
-## Add monitors and setting solver to Problem
-vorti.setTopoInit(topofft)
-velo.setTopoInit(topofft)
-rho.setTopoInit(topofft)
-pb.addMonitors([energy, printer])
-pb.setUp()
-
-# setUp for temporary operators
-curl.discretize()
-curl.setUp()
-#poisson.setUp()
-#distrCurlAdv.setUp()
-
-# compute vorticity from initial velocity field
-#poisson.apply(simu)
-curl.apply(simu)
-#distrCurlAdv.apply(simu)
-
-def run():
-## Solve problem
-    pb.solve()
-
-## end of time loop ##
-
-from parmepy.mpi import MPI
-print "Start computation ..."
-time = MPI.Wtime()
-run()
-print 'total time (rank):', MPI.Wtime() - time, '(', topo.rank, ')'
-
-## Clean memory buffers
-fftw2py.clean_fftw_solver(box.dimension)
diff --git a/trashed_examples/NS_bluff_bodies.py b/trashed_examples/NS_bluff_bodies.py
deleted file mode 100755
index 9b74f4cfef70a56e9c407662d5a19c7545769763..0000000000000000000000000000000000000000
--- a/trashed_examples/NS_bluff_bodies.py
+++ /dev/null
@@ -1,273 +0,0 @@
-#!/usr/bin/python
-
-"""
-Navier Stokes 3D : flow past bluff bodies (using penalization).
-
-All parameters are set and defined in python module dataNS_bb.
-
-"""
-
-import parmepy as pp
-from parmepy.f2py import fftw2py
-import numpy as np
-from parmepy.fields.continuous import Field
-from parmepy.variables.variables import Variables
-from parmepy.mpi.topology import Cartesian
-from parmepy.domain.obstacle.sphere import Sphere
-from parmepy.operator.advection import Advection
-from parmepy.operator.stretching import Stretching
-from parmepy.operator.poisson import Poisson
-from parmepy.operator.velocity_correction import VelocityCorrection
-from parmepy.operator.diffusion import Diffusion
-from parmepy.operator.penalization import Penalization
-from parmepy.operator.differential import Curl
-from parmepy.operator.redistribute import Redistribute
-from parmepy.operator.adapt_timestep import AdaptTimeStep
-from parmepy.problem.navier_stokes import NSProblem
-from parmepy.operator.monitors.printer import Printer
-from parmepy.operator.monitors.energy_enstrophy import Energy_enstrophy
-from parmepy.operator.monitors.compute_forces import Forces
-from parmepy.problem.simulation import Simulation
-from parmepy.constants import VTK
-from dataNS_bb import dim, NBGHOSTS, ADVECTION_METHOD, VISCOSITY, \
-    OUTPUT_FREQ, FILENAME, PROJ, LCFL, CFL, CURL_METHOD, \
-    TIMESTEP_METHOD, OBST_Ox, OBST_Oy, OBST_Oz, RADIUS
-from parmepy.domain.obstacle.planes import PlaneBoundaries
-
-## ----------- A 3d problem -----------
-print " ========= Start Navier-Stokes 3D (Flow past bluff bodies) ========="
-
-## pi constant
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-## Domain
-#box = pp.Box(dim, length=[1., 1., 1.], origin=[0., 0., 0.])
-box = pp.Box(dim, length=[2.0 * pi, pi,  pi], origin=[-pi, -pi, -pi])
-#box = pp.Box(dim, length=[12., 10., 10.], origin=[-4., -5., -5.])
-#box = pp.Box(dim, length=[2.0 * pi, 2.0 * pi, 2.0 * pi])
-
-## Global resolution
-#nbElem = [nb] * dim
-nbElem = [65, 33, 33]
-
-# Upstream flow velocity
-uinf = 1.0
-
-# Function to compute potential flow past a sphere
-def computeVel(res, x, y, z, t):
-    module = np.sqrt((x - OBST_Ox) * (x - OBST_Ox) +
-                     (y - OBST_Oy) * (y - OBST_Oy) +
-                     (z - OBST_Oz) * (z - OBST_Oz))
-    
-#    print 'module', module, np.shape(module)
-    for i in range (np.shape(module)[0]):
-        for j in range (np.shape(module)[1]):
-            for k in range (np.shape(module)[2]):
-                if module[i,j,k]==0.0:
-                    module[i,j,k] = RADIUS
-    res[0][...] = - ((uinf * x) / module) * (1 - (RADIUS ** 3 / module ** 3))
-    res[1][...] = ((uinf * y) / module) * (1 + (RADIUS ** 3 / (2. * module ** 2)))
-    res[2][...] = 0.
-#    res[0][...] = uinf
-#    res[1][...] = 0.
-#    res[2][...] = 0.
-    return res
-
-# Function to compute initial vorticity
-def computeVort(res, x, y, z, t):
-    res[0][...] = 0.
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-## Function to compute TG velocity
-#def computeVel(res, x, y, z, t):
-#    res[0][...] = sin(x) * cos(y) * cos(z)
-#    res[1][...] = - cos(x) * sin(y) * cos(z)
-#    res[2][...] = 0.
-#    return res
-
-## Function to compute reference vorticity
-#def computeVort(res, x, y, z, t):
-#    res[0][...] = - cos(x) * sin(y) * sin(z)
-#    res[1][...] = - sin(x) * cos(y) * sin(z)
-#    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-#    return res
-
-## Fields
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', is_vector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', is_vector=True)
-
-## Variables
-dt_adapt = Variables(domain=box, name='adaptative time step', 
-                     data=[0.01])
-
-## Usual Cartesian topology definition
-# At the moment we use two (or three?) topologies :
-# - "topo" for Stretching and all operators based on finite differences.
-#    --> ghost layer = 2
-# - topo from Advection operator for all the other operators.
-#    --> no ghost layer
-# - topo from fftw for Poisson and Diffusion.
-# Todo : check compat between scales and fft operators topologies.
-ghosts = np.ones((box.dimension)) * NBGHOSTS
-topo = Cartesian(box, box.dimension, nbElem,
-                 ghosts=ghosts)
-
-## Obstacles (sphere + up and down plates)
-sphere = Sphere(box, position=[OBST_Ox, OBST_Oy, OBST_Oz],
-                radius=RADIUS)
-
-#plates = Plates(box, normal_dir=2, epsilon=0.005)
-
-## Tools Operators
-dtAdapt = AdaptTimeStep(velo, vorti,
-                        resolutions={velo: nbElem,
-                                     vorti: nbElem},
-                        dt_adapt=dt_adapt,
-                        method=TIMESTEP_METHOD,
-                        topo=topo,
-                        lcfl=LCFL,
-                        cfl=CFL)
-
-## Operators
-advec = Advection(velo, vorti,
-                  resolutions={velo: nbElem,
-                               vorti: nbElem},
-                  method=ADVECTION_METHOD
-                  )
-
-stretch = Stretching(velo, vorti,
-                     resolutions={velo: nbElem,
-                                  vorti: nbElem},
-                     topo=topo
-                     )
-
-diffusion = Diffusion(vorti,
-                      resolution=nbElem,
-                      viscosity=VISCOSITY
-                     )
-
-poisson = Poisson(velo, vorti,
-                  resolutions={velo: nbElem,
-                               vorti: nbElem},
-                  projection=PROJ)
-
-curl = Curl(velo, vorti,
-            resolutions={velo: nbElem,
-                         vorti: nbElem},
-            method=CURL_METHOD)
-#            topo=topo)
-
-## Bridges between the different topologies in order to
-## redistribute data.
-# 1 - Curl_fft to stretching (velocity only)
-distrCurlStr_velo = Redistribute([velo], curl, stretch)
-distrAdvStr_velo = Redistribute([velo], advec, stretch)
-# 2 - Advection to stretching (vorticity only)
-distrAdvStr_vorti = Redistribute([vorti], advec, stretch)
-# 3 - Stretching to Diffusion
-distrStrDiff = Redistribute([vorti], stretch, diffusion)
-# + - Brigdes required if Curl op. is solved using a FD method
-distrPoissCurl = Redistribute([vorti, velo], poisson, curl)
-distrCurlAdv = Redistribute([vorti, velo], curl, advec)
-
-## Simulation with fixed time step
-simu = Simulation(start=0.0,
-                  end=10., time_step=0.005,
-                  max_iter=1000000)
-
-## Define the problem to solve
-
-#pb = NSProblem(operators=[distrPoissCurl,
-#                          curl,
-#                          distrCurlAdv,
-#                          advec,             # <--------------------:
-#                          distrAdvStr_vorti, 
-#                          stretch, 
-#                          distrStrDiff,
-#                          diffusion, 
-#                          poisson],
-#               simulation=simu, dumpFreq=-1)
-
-pb = NSProblem(operators=[curl,
-                          distrCurlAdv,
-                          distrCurlStr_velo, # Redistribute while advec
-                          advec,             # <--------------------:
-                          distrAdvStr_vorti, 
-                          stretch, 
-                          distrStrDiff,
-                          diffusion, 
-                          poisson],
-               simulation=simu, dumpFreq=-1)
-
-## Setting solver to Problem (only operators for computational tasks)
-pb.pre_setUp()
-
-## Topology without ghost points
-topofft = poisson.discreteFields[poisson.vorticity].topology
-
-## Add operators related to the problem and depending on FFT topology
-correction = VelocityCorrection(velo, vorti,
-                                resolutions={velo: nbElem,
-                                             vorti: nbElem},
-                                uinf=uinf,
-                                topo=topofft)
-
-bc = PlaneBoundaries(box, 2, thickness=0.1)
-
-penal = Penalization(velo, [sphere, bc],
-                     coeff=[1e8],
-                     topo=topofft,
-                     resolutions={velo: nbElem})
-
-## Diagnostics/monitors related to the problem
-
-forces = Forces(velo, vorti, topo, 
-                obstacle=sphere, boxMin= [0.2, 0.2, 0.2], 
-                boxMax= [0.8, 0.8, 0.8], Reynolds=300., uinf=uinf,
-                frequency=1, prefix='./res/Noca_sphere.dat')
-
-printer = Printer(variables=[velo, vorti],
-                  topo=topofft,
-                  frequency=100,
-                  prefix='./res/NS_sphere',
-                  formattype=VTK)
-
-energy = Energy_enstrophy(velo, vorti,
-                          topo=topofft,
-                          viscosity=VISCOSITY,
-                          isNormalized=True,
-                          frequency=OUTPUT_FREQ,
-                          prefix=FILENAME)
-
-## Add monitors and setting solver to Problem
-vorti.setTopoInit(topofft)
-velo.setTopoInit(topofft)
-pb.addMonitors([correction, penal, energy, printer])
-penal.discretize()
-correction.discretize()
-correction.setUp()
-penal.setUp()
-pb.setUp()
-
-penal.apply(simu)
-
-def run():
-## Solve problem
-    pb.solve()
-
-## end of time loop ##
-
-from parmepy.mpi import MPI
-print "Start computation ..."
-time = MPI.Wtime()
-run()
-print 'total time (rank):', MPI.Wtime() - time, '(', topo.rank, ')'
-
-## Clean memory buffers
-fftw2py.clean_fftw_solver(box.dimension)
diff --git a/trashed_examples/PassiveControl_Hemisphere.py b/trashed_examples/PassiveControl_Hemisphere.py
deleted file mode 100644
index 1480dfa4c52a2b1027e1ebd04efc891bb1ca81ac..0000000000000000000000000000000000000000
--- a/trashed_examples/PassiveControl_Hemisphere.py
+++ /dev/null
@@ -1,407 +0,0 @@
-#!/usr/bin/python
-
-"""
-Taylor Green 3D : see paper van Rees 2011.
-
-All parameters are set and defined in python module dataTG.
-
-"""
-
-from hysop import Box
-from hysop.f2py import fftw2py
-import numpy as np
-from hysop.fields.continuous import Field
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.mpi.topology import Cartesian
-from hysop.operator.advection import Advection
-from hysop.operator.stretching import Stretching
-from hysop.operator.absorption_BC import AbsorptionBC
-from hysop.operator.poisson import Poisson
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-from hysop.problem.simulation import Simulation
-from hysop.methods_keys import Scales, TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting, dtCrit, SpaceDiscretisation
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK2
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.integrators.runge_kutta4 import RK4 as RK4
-from hysop.numerics.finite_differences import FD_C_4, FD_C_2
-from hysop.numerics.interpolation import Linear
-from hysop.numerics.remeshing import L6_4 as rmsh
-import hysop.tools.io_utils as io
-import hysop.tools.numpywrappers as npw
-from hysop.mpi import main_rank, MPI
-from hysop.tools.parameters import Discretization, IOParams
-
-print " ========= Start Navier-Stokes 3D (Taylor Green benchmark) ========="
-
-# ====== pi constant and trigonometric functions ======
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-# ====== Flow constants =======
-uinf = 1.0
-VISCOSITY = 1. / 300.
-
-# ======= Domain =======
-dim = 3
-#Nx = 513
-#Ny = Nz = 257
-Nx = 257
-Ny = Nz = 129
-g = 2
-boxlength = npw.asrealarray([10.24, 5.12, 5.12])
-boxorigin = npw.asrealarray([-2.0, -2.56, -2.56])
-box = Box(length=boxlength, origin=boxorigin)
-
-# A global discretization with ghost points
-d3dg = Discretization([Nx, Ny, Nz], [g, g, g])
-# A global discretization, without ghost points
-d3d = Discretization([Nx, Ny, Nz])
-
-# ====== Sphere inside the domain ======
-RADIUS = 0.5
-pos = [0., 0., 0.]
-from hysop.domain.subsets.sphere import Sphere, HemiSphere
-from hysop.domain.subsets.porous import BiPole, QuadriPole,\
-    Ring, RingPole, Porous
-ringPole = RingPole(parent=box, source=HemiSphere, origin=pos,
-                    layers=[0.1, 0.4], ring_width=0.1)
-#ringPole = HemiSphere(origin=pos, radius=RADIUS, parent=box)
-
-
-# ======= Function to compute initial velocity  =======
-def computeVel(res, x, y, z, t):
-    res[0][...] = uinf
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-
-# ======= Function to compute initial vorticity =======
-def computeVort(res, x, y, z, t):
-    res[0][...] = 0.
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-#  ====== Time-dependant required-flowrate (Variable Parameter) ======
-def computeFlowrate(simu):
-    # === Time-dependant flow rate ===
-    t = simu.tk
-    Tstart = 3.0
-    flowrate = np.zeros(3)
-    flowrate[0] = uinf * box.length[1] * box.length[2]
-    if t >= Tstart and t <= Tstart + 1.0:
-        flowrate[1] = sin(pi * (t - Tstart)) * \
-                      box.length[1] * box.length[2]
-    # === Constant flow rate ===
-    #    flowrate = np.zeros(3)
-    #    flowrate[0] = uinf * box.length[1] * box.length[2]
-    return flowrate
-
-
-# ======= Fields =======
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', is_vector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', is_vector=True)
-
-
-# ========= Simulation setup =========
-simu = Simulation(start=0.0, end=80.0, time_step=0.0125, max_iter=10000000)
-
-
-# Adaptative timestep method : dt = min(values(dtCrit))
-# where dtCrit is a list of criterions on which the computation
-# of the adaptative time step is based
-# ex : dtCrit = ['gradU', 'cfl', 'stretch'], means :
-# dt = min (dtAdv, dtCfl, dtStretch), where dtAdv is equal to LCFL / |gradU|
-# For dtAdv, the possible choices are the following:
-# 'vort' (infinite norm of vorticity) : dtAdv = LCFL / |vort|
-# 'gradU' (infinite norm of velocity gradient), dtAdv = LCFL / |gradU|
-# 'deform' (infinite norm of deformation tensor),
-# dtAdv = LCFL / (0.5(gradU + gradU^T))
-op = {}
-iop = IOParams("time_step")
-# Default topology (i.e. 3D, with ghosts)
-topo_with_ghosts = box.create_topology(d3dg)
-
-
-op['dtAdapt'] = AdaptTimeStep(velo, vorti, simulation=simu,
-                              discretization=topo_with_ghosts,
-                              method={TimeIntegrator: RK3,
-                                      SpaceDiscretisation: FD_C_4,
-                                      dtCrit: ['gradU', 'stretch']},
-                              io_params=iop,
-                              lcfl=0.125,
-                              cfl=0.5)
-
-op['advection'] = Advection(velo, vorti,
-                            discretization=d3d,
-                            method={Scales: 'p_M6',
-                                    Splitting: 'classic'}
-                            )
-
-op['stretching'] = Stretching(velo, vorti,
-                              discretization=topo_with_ghosts)
-
-op['diffusion'] = Diffusion(viscosity=VISCOSITY, vorticity=vorti,
-                            discretization=d3d)
-
-rate = VariableParameter(formula=computeFlowrate)
-op['poisson'] = Poisson(velo, vorti, discretization=d3d, flowrate=rate)
-
-# ===== Discretization of computational operators ======
-for ope in op.values():
-    ope.discretize()
-
-topofft = op['poisson'].discreteFields[vorti].topology
-topoadvec = op['advection'].discreteFields[vorti].topology
-
-# =====  Smooth vorticity absorption at the outlet =====
-op['vort_absorption'] = AbsorptionBC(velo, vorti, discretization=topofft, 
-                                     req_flowrate=rate, 
-                                     x_coords_absorp=[7.24, 8.24])
-#                                     x_coords_absorp=[1.56, 2.56])
-op['vort_absorption'].discretize()
-
-# =====  Penalization of the vorticity on a controlled obstacle inside the domain =====
-from hysop.operator.penalize_vorticity import PenalizeVorticity
-op['penalVort'] = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                                    discretization=topo_with_ghosts,
-#                                    obstacles=[ringPole], coeff=1e8,
-                                    obstacles={ringPole: [1e8, 1e8, 1]},
-                                    method={SpaceDiscretisation: FD_C_4})
-op['penalVort'].discretize()
-
-
-# ==== Operators to map data between the different computational operators ===
-# (i.e. between topologies)
-distr = {}
-distr['fft2str'] = RedistributeIntra(source=op['poisson'],
-                                     target=op['stretching'],
-                                     variables=[velo, vorti])
-distr['str2fft'] = RedistributeIntra(source=op['stretching'],
-                                     target=op['poisson'],
-                                     variables=[velo, vorti])
-distr['fft2advec'] = RedistributeIntra(source=op['poisson'],
-                                       target=op['advection'],
-                                       variables=[velo, vorti])
-distr['advec2fft'] = RedistributeIntra(source=op['advection'],
-                                       target=op['poisson'],
-                                       variables=[velo, vorti])
-# ========= Monitoring operators =========
-monitors = {}
-iop = IOParams('fields', frequency=100)
-monitors['writer'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                                io_params=iop)
-
-io_ener = IOParams('energy_enstrophy')
-monitors['energy'] = EnergyEnstrophy(velo, vorti, discretization=topofft,
-                                     io_params=io_ener, is_normalized=False)
-
-from hysop.domain.subsets.control_box import ControlBox
-from hysop.operator.drag_and_lift import MomentumForces, NocaForces
-ref_step = topo_with_ghosts.mesh.space_step
-cbpos = npw.zeros(dim)
-cblength = npw.zeros(dim)
-cbpos[...] = boxorigin[...]
-cbpos +=  15 * ref_step
-cblength[...] = boxlength[...]
-cblength -= 30 * ref_step
-cb = ControlBox(parent=box, origin=cbpos, length=cblength)
-coeffForce = 1. / (0.5 * uinf ** 2 * pi * RADIUS ** 2)
-
-io_forces=IOParams('drag_and_lift_NocaII')
-#monitors['forcesNoca'] = NocaForces(velo, vorti, 
-#                                    discretization=topo_with_ghosts,
-#                                    nu=VISCOSITY, 
-#                                    volume_of_control=cb,
-#                                    normalization=coeffForce,
-#                                    obstacles=[ringPole], 
-#                                    io_params=io_forces)
-
-io_forcesPenal=IOParams('drag_and_lift_Mom_1')
-monitors['forcesMom'] = MomentumForces(velocity=velo, 
-                                       discretization=topo_with_ghosts,
-                                       normalization=coeffForce,
-                                       obstacles=[ringPole], 
-                                       penalisation_coeff=[1e8, 1e8, 1],
-#                                       penalisation_coeff=[1e8],
-                                       io_params=io_forcesPenal)
-
-#io_forcesPenal=IOParams('drag_and_lift_penal')
-#monitors['forcesPenal'] = DragAndLiftPenal(velo, vorti, coeffForce,
-#                                           discretization=topofft,
-#                                           obstacles=[ringPole], factor=[1e8],
-#                                           io_params=io_forcesPenal)
-
-step_dir = ref_step[0]
-io_sliceXY = IOParams('sliceXY', frequency=2000)
-thickSliceXY = ControlBox(parent=box, origin=[-2.0, -2.56, -2.0 * step_dir], 
-                          length=[10.24- step_dir, 5.12- step_dir, 4.0 * step_dir])
-#thickSliceXY = ControlBox(parent=box, origin=[-2.56, -2.56, -2.0 * step_dir], 
-#                          length=[5.12 - step_dir, 5.12 - step_dir, 4.0 * step_dir])
-monitors['writerSliceXY'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                                      io_params=io_sliceXY, subset=thickSliceXY, 
-                                      xmfalways=True)
-
-#io_sliceXZ = IOParams('sliceXZ', frequency=2000)
-#thickSliceXZ = ControlBox(box, origin=[-2.0, -2.0 * step_dir, -2.56], 
-#                          lengths=[10.24, 4.0 * step_dir, 5.12])
-#monitors['writerSliceXZ'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-#                                       io_params=io_sliceXZ, subset=thickSliceXZ, 
-#                                       xmfalways=True)
-
-#io_subBox = IOParams('subBox', frequency=2000)
-#subBox = ControlBox(box, origin=[-0.7, -2.0, -2.0], lengths=[8.0, 4.0, 4.0])
-#monitors['writerSubBox'] = HDF_Writer(variables={velo: topofft, vorti: topofft},
-#                                      io_params=io_subBox, subset=subBox, 
-#                                      xmfalways=True)
-
-# ========= Setup for all declared operators/monitors =========
-time_setup = MPI.Wtime()
-for ope in op.values():
-    ope.setup()
-for ope in distr.values():
-    ope.setup()
-for monit in monitors.values():
-    monit.discretize()
-for monit in monitors.values():
-    monit.setup()
-
-print '[', main_rank, '] total time for setup:', MPI.Wtime() - time_setup
-
-# ========= Fields initialization =========
-# - initialize velo + vort on topostr
-# - penalize vorticity
-# - redistribute topostr --> topofft
-
-time_init = MPI.Wtime()
-ind = ringPole.discretize(topofft)
-def initFields():
-    velo.initialize(topo=topo_with_ghosts)
-    vorti.initialize(topo=topo_with_ghosts)
-    op['penalVort'].apply(simu)
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-
-initFields()
-print '[', main_rank, '] total time for init :', MPI.Wtime() - time_init
-
-fullseq = []
-
-def run(sequence):
-    op['vort_absorption'].apply(simu)
-    op['poisson'].apply(simu)               # Poisson + correction
-    monitors['forcesMom'].apply(simu)     # Forces Heloise
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    op['penalVort'].apply(simu)             # Vorticity penalization
-#    distr['str2fft'].apply(simu)
-#    distr['str2fft'].wait()
-#    op['poisson'].apply(simu)
-#    distr['fft2str'].apply(simu)
-#    distr['fft2str'].wait()
-    op['stretching'].apply(simu)            # Stretching
-#    monitors['forcesNoca'].apply(simu)          # Forces Noca
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-    op['diffusion'].apply(simu)             # Diffusion
-    distr['fft2advec'].apply(simu)
-    distr['fft2advec'].wait()
-    op['advection'].apply(simu)             # Advection (scales)
-    distr['advec2fft'].apply(simu)
-    distr['advec2fft'].wait()
-    monitors['writerSliceXY'].apply(simu)
-#    monitors['writerSliceXZ'].apply(simu)
-#    monitors['writerSubBox'].apply(simu)
-    monitors['energy'].apply(simu)          # Energy/enstrophy
-    distr['fft2str'].apply(simu)
-    distr['fft2str'].wait()
-    op['dtAdapt'].apply(simu)               # Update timestep
-    op['dtAdapt'].wait()
-
-# ==== Serialize some data of the problem to a "restart" file ====
-# def dump(filename):
-#      """
-#      Serialize some data of the problem to file
-#      (only data required for a proper restart, namely fields in self.input
-#      and simulation).
-#      @param filename : prefix for output file. Real name = filename_rk_N,
-#      N being current process number. If None use default value from problem
-#      parameters (self.filename)
-#      """
-#      if filename is not None:
-#          filename = filename
-#         filedump = filename + '_rk_' + str(main_rank)
-#     db = parmesPickle(filedump, mode='store')
-#     db.dump(simu, 'simulation')
-#     velo.dump(filename, mode='append')
-#     vorti.dump(filename, mode='append')
-
-# ## ====== Load some data of the problem from a "restart" file ======
-# def restart(filename):
-#     """
-#     Load serialized data to restart from a previous state.
-#     self.input variables and simulation are loaded.
-#     @param  filename : prefix for downloaded file.
-#     Real name = filename_rk_N, N being current process number.
-#     If None use default value from problem
-#     parameters (self.filename)
-#     """
-#     if filename is not None:
-#         filename = filename
-#         filedump = filename + '_rk_' + str(main_rank)
-#     db = parmesPickle(filedump, mode='load')
-#     simu = db.load('simulation')[0]
-#     simu.start = simu.time - simu.time_step
-#     ite = simu.current_iteration
-#     simu.initialize()
-#     simu.current_iteration = ite
-#     print 'simu', simu
-#     print ("load ...", filename)
-#     velo.load(filename)
-#     vorti.load(filename)
-#     return simu
-
-seq = fullseq
-
-simu.initialize()
-#doDump = False
-#doRestart = False
-#dumpFreq = 5000
-#io_default={"filename":'restart'}
-#dump_filename = io.Writer(params=io_default).filename
-#===== Restart (if needed) =====
-# if doRestart:
-#     simu = restart(dump_filename)
-#     # Set up for monitors and redistribute
-#     for ope in distr.values():
-#         ope.setUp()
-#     for monit in monitors.values():
-#         monit.setUp()
-
-# ======= Time loop =======
-time_run = MPI.Wtime()
-while not simu.isOver:
-    if topofft.rank == 0:
-        simu.printState()
-    run(seq)
-    simu.advance()
-# #     testdump = simu.current_iteration % dumpFreq is 0
-# #     if doDump and testdump:
-# #         dump(dump_filename)
-print '[', main_rank, '] total time for run :', MPI.Wtime() - time_run
-
-# ======= Finalize =======
-fftw2py.clean_fftw_solver(box.dimension)
-for ope in distr.values():
-    ope.finalize()
-for monit in monitors.values():
-    monit.finalize()
diff --git a/trashed_examples/Plane_jet/NS_planeJet_hybrid_MS.py b/trashed_examples/Plane_jet/NS_planeJet_hybrid_MS.py
deleted file mode 100644
index 6f65a4f347897448a95269ecbcb04f6a8ae99cc9..0000000000000000000000000000000000000000
--- a/trashed_examples/Plane_jet/NS_planeJet_hybrid_MS.py
+++ /dev/null
@@ -1,463 +0,0 @@
-#!/usr/bin/env python
-# Scripts arguments:
-# 1. Flow resolution
-# 2. Scalar resolution
-# 3. Dictionary for devices id: (mpi rank: device id)
-# 4. Is the initial condition is perturbed
-# 5. Is data output
-import sys
-USER_NB_ELEM_UW = eval(sys.argv[1])
-USER_NB_ELEM_S = eval(sys.argv[2])
-USER_RANK_DEVICE_ID = eval(sys.argv[3])
-RANDOM_INIT = eval(sys.argv[4])
-IS_OUTPUT = eval(sys.argv[5])
-import hysop
-# import hysop.gpu
-# hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5, ASCII, HYSOP_MPI_REAL
-from hysop.mpi.main_var import MPI, main_size, main_rank, main_comm
-from hysop.tools.parameters import MPIParams, Discretization, IOParams
-from hysop.problem.simulation import Simulation
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, SpaceDiscretisation, \
-    GhostUpdate, Scales, dtCrit, ExtraArgs
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.finite_differences import FD_C_4
-from hysop.numerics.remeshing import L6_4 as remesh_formula
-from hysop.numerics.remeshing import Linear
-from hysop.operator.advection import Advection
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.stretching import Stretching
-from hysop.operator.poisson import Poisson
-from hysop.operator.differential import Curl
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operators import Custom
-from hysop.operator.redistribute_inter import RedistributeInter
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.domain.subsets.boxes import SubBox
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-import hysop.tools.numpywrappers as npw
-pi = np.pi
-cos = np.cos
-sin = np.sin
-exp = np.exp
-abs = np.abs
-tanh = np.tanh
-
-
-TASK_UW = 1
-TASK_SCALAR = 2
-PROC_TASKS = [TASK_UW, ] * main_size
-for p in USER_RANK_DEVICE_ID:
-    PROC_TASKS[p] = TASK_SCALAR
-try:
-    DEVICE_ID = USER_RANK_DEVICE_ID[main_rank]
-except KeyError:
-    DEVICE_ID = None
-out_freq = 200
-# Physical parameters:
-# Flow viscosity
-VISCOSITY = 1e-4
-# Schmidt number
-SC = ((1. * USER_NB_ELEM_S[0] - 1.)/(1. * USER_NB_ELEM_UW[0] - 1.))**2
-# Scalar diffusivity
-DIFF_COEFF_SCAL = VISCOSITY / SC
-
-width = 0.01
-ampl3 = 0.3
-ampl = 0.05
-ampl2 = 0.05
-
-
-ctime = MPI.Wtime()
-
-# Domain
-box = hysop.Box(length=[1., 1., 1.], origin=[0., 0., 0.],
-                proc_tasks=PROC_TASKS)
-mpi_params = MPIParams(comm=box.comm_task, task_id=PROC_TASKS[main_rank])
-mpi_params_S = MPIParams(comm=box.comm_task, task_id=TASK_SCALAR)
-mpi_params_UW = MPIParams(comm=box.comm_task, task_id=TASK_UW)
-
-
-def computeVel(res, x, y, z, t):
-    yy = abs(y - 0.5)
-    aux = (0.1 - 2. * yy) / (4. * width)
-    strg1 = exp(-abs(aux ** 2))
-    strg2 = exp(-abs(aux ** 2))
-    strg3 = exp(-abs(aux ** 2))
-    if RANDOM_INIT:
-        from create_random_arrays import random_init
-        randX, randY, randZ = random_init(res[0].shape, box.comm_task)
-        strg1 = exp(-abs(aux ** 2)) * randX
-        strg2 = exp(-abs(aux ** 2)) * randY
-        strg3 = exp(-abs(aux ** 2)) * randZ
-    else:
-        strg1 = 0.
-        strg2 = 0.
-        strg3 = 0.
-    res[0][...] = 0.5 * (1. + tanh(aux))
-    res[0][...] *= (1. + ampl3 * sin(8. * pi * x))
-    res[0][...] *= (1. + ampl * strg1)
-    res[1][...] = ampl * strg2
-    res[2][...] = ampl * strg3
-    return res
-
-
-def initScal(res, x, y, z, t):
-    yy = abs(y - 0.5)
-    aux = (0.1 - 2. * yy) / (4. * width)
-    res[0][...] = 0.5 * (1. + tanh(aux))
-    res[0][...] *= (1. + ampl3 * sin(8. * pi * x))
-    return res
-
-
-temp_maxvelo = npw.zeros((3, ))
-maxvelo_values = npw.zeros((3, ))
-
-
-def calc_maxvelo(simu, v):
-    temp_maxvelo[0] = np.max(np.abs(v[0].data[0]))
-    temp_maxvelo[1] = np.max(np.abs(v[0].data[1]))
-    temp_maxvelo[2] = np.max(np.abs(v[0].data[2]))
-    v[0].topology.comm.Allreduce(sendbuf=[temp_maxvelo, 3, HYSOP_MPI_REAL],
-                                 recvbuf=[maxvelo_values, 3, HYSOP_MPI_REAL],
-                                 op=MPI.MAX)
-    return maxvelo_values
-
-
-# Fields
-velo = hysop.Field(domain=box, formula=computeVel,
-                   name='Velocity', is_vector=True)
-vorti = hysop.Field(domain=box,
-                    name='Vorticity', is_vector=True)
-scal = hysop.Field(domain=box, formula=initScal,
-                   name='Scalar', is_vector=False)
-
-data = {'dt': 0.001}
-dt = VariableParameter(data)
-simu = Simulation(start=0.0, end=5., time_step=0.001, max_iter=10000)
-
-# Flow discretizations:
-d_uw = Discretization(USER_NB_ELEM_UW)
-d_uw_ghosts = Discretization(USER_NB_ELEM_UW, [2, ] * 3)
-# Scalar discretization
-d_s = Discretization(USER_NB_ELEM_S)
-# Velocity discretization for scalar advection
-if USER_NB_ELEM_UW[0] == USER_NB_ELEM_S[0]:
-    d_uw_for_s = Discretization(USER_NB_ELEM_UW)
-else:
-    d_uw_for_s = Discretization(USER_NB_ELEM_UW, [1, ] * 3)
-
-# Topologies
-topo_GPU_scal = None
-topo_GPU_velo = None
-topo_CPU_velo_fft = None
-topo_CPU_velo_ghosts = None
-topo_CPU_velo_scales = None
-if box.isOnTask(TASK_UW):
-    topo_CPU_velo_scales = box.create_topology(
-        d_uw, dim=2, mpi_params=mpi_params_UW)
-if box.isOnTask(TASK_SCALAR):
-    topo_GPU_velo = box.create_topology(
-        d_uw_for_s, dim=1, mpi_params=mpi_params_S)
-    topo_GPU_scal = box.create_topology(
-        d_s, dim=1, mpi_params=mpi_params_S)
-
-# Operators
-# GPU operators
-advec_scal_method = {TimeIntegrator: RK,
-                     Interpolation: Linear,
-                     Remesh: remesh_formula,
-                     Support: 'gpu_1k',
-                     Splitting: 'o2',
-                     MultiScale: Linear}
-if PROC_TASKS.count(TASK_SCALAR) > 1:
-    advec_scal_method[ExtraArgs] = {'device_id': DEVICE_ID,
-                                    'device_type': 'gpu'}
-else:
-    advec_scal_method[ExtraArgs] = {'max_velocity': [1.2, 0.7, 0.7],
-                                    'max_dt': 0.012,
-                                    'device_id': DEVICE_ID,
-                                    'device_type': 'gpu'}
-advec_scal = Advection(velo,
-                       discretization=topo_GPU_velo,
-                       variables={scal: topo_GPU_scal},
-                       mpi_params=mpi_params_S,
-                       method=advec_scal_method)
-diffusion_scal = Diffusion(viscosity=DIFF_COEFF_SCAL,
-                           vorticity=scal,
-                           discretization=topo_GPU_scal,
-                           mpi_params=mpi_params_S,
-                           method={Support: 'gpu',
-                                   SpaceDiscretisation: 'fd',
-                                   ExtraArgs: {'device_id': DEVICE_ID,
-                                               'device_type': 'gpu'}})
-diffusion_scal.name += '_(Scalar)'
-
-# CPU operators
-advec = Advection(velo,
-                  discretization=topo_CPU_velo_scales,
-                  variables={vorti: topo_CPU_velo_scales},
-                  mpi_params=mpi_params_UW,
-                  method={Scales: 'p_64', MultiScale: 'L4_4'})
-stretch = Stretching(velo, vorti, discretization=d_uw_ghosts,
-                     mpi_params=mpi_params_UW)
-diffusion = Diffusion(variables={vorti: d_uw},
-                      viscosity=VISCOSITY,
-                      mpi_params=mpi_params_UW)
-poisson = Poisson(velo, vorti, discretization=d_uw,
-                  mpi_params=mpi_params_UW)
-c = Curl(velo, vorti, discretization=d_uw,
-         method={SpaceDiscretisation: 'fftw', GhostUpdate: True},
-         mpi_params=mpi_params_UW)
-#dt_output = None
-#if IS_OUTPUT:
-dt_output = IOParams(frequency=1, filename='dt.dat', fileformat=ASCII)
-dt_adapt = AdaptTimeStep(velo, vorti,
-                         simulation=simu,
-                         time_range=[0, np.infty],
-                         discretization=d_uw_ghosts,
-                         method={TimeIntegrator: RK3,
-                                 SpaceDiscretisation: FD_C_4,
-                                 dtCrit: ['gradU', 'cfl']},
-                         lcfl=0.15,
-                         cfl=1.5,
-                         io_params=dt_output,
-                         mpi_params=mpi_params_UW)
-
-# Operators discretizations
-if box.isOnTask(TASK_SCALAR):
-    for op in (advec_scal, diffusion_scal):
-        op.discretize()
-if box.isOnTask(TASK_UW):
-    for op in (advec, stretch, diffusion, poisson, c, dt_adapt):
-        op.discretize()
-
-# Get some topologies
-if box.isOnTask(TASK_UW):
-    topo_CPU_velo_fft = poisson.discreteFields[velo].topology
-    topo_CPU_velo_ghosts = stretch.discreteFields[velo].topology
-
-if IS_OUTPUT:
-    # Defining subdomains
-    L_V = 1. - 1. / (1. * USER_NB_ELEM_UW[0])
-    L_S = 1. - 1. / (1. * USER_NB_ELEM_S[0])
-    XY_plane_v = SubBox(origin=[0., 0., 0.5], length=[L_V, L_V, 0.],
-                        parent=box)
-    XZ_plane_v = SubBox(origin=[0., 0.5, 0.], length=[L_V, 0., L_V],
-                        parent=box)
-    XY_plane_s = SubBox(origin=[0., 0., 0.5], length=[L_S, L_S, 0.],
-                        parent=box)
-    XZ_plane_s = SubBox(origin=[0., 0.5, 0.], length=[L_S, 0., L_S],
-                        parent=box)
-    # Defining output operators
-    p_velo = HDF_Writer(variables={velo: topo_CPU_velo_fft},
-                        mpi_params=mpi_params_UW,
-                        io_params=IOParams(frequency=out_freq,
-                                           filename='flow',
-                                           fileformat=HDF5))
-    p_velo_xy = HDF_Writer(variables={velo: topo_CPU_velo_fft,
-                                      vorti: topo_CPU_velo_fft},
-                           var_names={velo: 'Velocity', vorti: 'Vorticity'},
-                           subset=XY_plane_v,
-                           mpi_params=mpi_params_UW,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='flow_XY',
-                                              fileformat=HDF5))
-    p_velo_xz = HDF_Writer(variables={velo: topo_CPU_velo_fft,
-                                      vorti: topo_CPU_velo_fft},
-                           var_names={velo: 'Velocity', vorti: 'Vorticity'},
-                           subset=XZ_plane_v,
-                           mpi_params=mpi_params_UW,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='flow_XZ',
-                                              fileformat=HDF5))
-    p_scal_xy = HDF_Writer(variables={scal: topo_GPU_scal},
-                           var_names={scal: 'Scalar'},
-                           subset=XY_plane_s,
-                           mpi_params=mpi_params_S,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='scal_XY',
-                                              fileformat=HDF5))
-    p_scal_xz = HDF_Writer(variables={scal: topo_GPU_scal},
-                           var_names={scal: 'Scalar'},
-                           subset=XZ_plane_s,
-                           mpi_params=mpi_params_S,
-                           io_params=IOParams(frequency=out_freq,
-                                              filename='scal_XZ',
-                                              fileformat=HDF5))
-    p_scal_xy.name += '_(Scalar)'
-    p_scal_xz.name += '_(Scalar)'
-    energy = EnergyEnstrophy(velocity=velo,
-                             vorticity=vorti,
-                             discretization=topo_CPU_velo_fft,
-                             mpi_params=mpi_params_UW,
-                             io_params=IOParams(frequency=1,
-                                                filename='energy.dat',
-                                                fileformat=ASCII))
-    maxvelo = Custom(in_fields=[velo],
-                     variables={velo: topo_CPU_velo_scales},
-                     function=calc_maxvelo,
-                     diagnostics_shape=(1, 4),
-                     mpi_params=mpi_params_UW,
-                     io_params=IOParams(frequency=1,
-                                        filename='maxvelo.dat',
-                                        fileformat=ASCII))
-    if box.isOnTask(TASK_UW):
-        for op in (p_velo, p_velo_xy, p_velo_xz, energy, maxvelo):
-            op.discretize()
-    if box.isOnTask(TASK_SCALAR):
-        for op in (p_scal_xy, p_scal_xz):
-            op.discretize()
-
-# Redistribute operators
-# CPU redistributes
-toGhost_vorti = RedistributeIntra(variables=[vorti],
-                                  source=advec, target=stretch,
-                                  mpi_params=mpi_params_UW)
-toGhost_vorti.name += '_toG_W'
-toGhost_velo = RedistributeIntra(variables=[velo],
-                                 source=poisson, target=stretch,
-                                 run_till=[stretch, dt_adapt],
-                                 mpi_params=mpi_params_UW)
-toGhost_velo.name += '_toG_V'
-toScales_velo = RedistributeIntra(variables=[velo],
-                                  source=poisson, target=advec,
-                                  mpi_params=mpi_params_UW)
-toScales_velo.name += '_toScales_V'
-toScales_vorti = RedistributeIntra(variables=[vorti],
-                                   source=poisson, target=advec,
-                                   mpi_params=mpi_params_UW)
-toScales_vorti.name += '_toScales_W'
-fromGhost_vorti = RedistributeIntra(variables=[vorti],
-                                    source=stretch, target=diffusion,
-                                    mpi_params=mpi_params_UW)
-fromGhost_vorti.name += '_FromG_W'
-toDev = DataTransfer(source=topo_GPU_velo, target=advec_scal,
-                     variables={velo: topo_GPU_velo},
-                     mpi_params=mpi_params_S)
-toDev.name += '_ToDev_V'
-
-redistrib = RedistributeInter(variables=[velo],
-                              parent=main_comm,
-                              source=topo_CPU_velo_fft, target=topo_GPU_velo,
-                              source_id=TASK_UW, target_id=TASK_SCALAR,
-                              run_till=[toDev])
-if IS_OUTPUT:
-    toHost = DataTransfer(source=topo_GPU_scal, target=p_scal_xy,
-                          variables={scal: topo_GPU_scal},
-                          mpi_params=mpi_params_S,
-                          freq=out_freq,
-                          run_till=[p_scal_xz, ])
-    toHost.name += '_ToHost_S'
-
-# Operators setup
-if box.isOnTask(TASK_SCALAR):
-    for op in (advec_scal, diffusion_scal):
-        op.setup()
-    if IS_OUTPUT:
-        for op in (p_scal_xy, p_scal_xz, toHost):
-            op.setup()
-    for op in (toDev, ):
-        op.setup()
-if box.isOnTask(TASK_UW):
-    for op in (advec, stretch, diffusion, poisson, c, dt_adapt):
-        op.setup()
-    if IS_OUTPUT:
-        for op in (p_velo, p_velo_xy, p_velo_xz, energy, maxvelo):
-            op.setup()
-    for op in (toGhost_vorti, fromGhost_vorti, toGhost_velo,
-               toScales_velo, toScales_vorti):
-        op.setup()
-# Wait for all operators setup before setup the intra-comm redistribute
-main_comm.Barrier()
-redistrib.setup()
-
-# Operators list
-if IS_OUTPUT:
-    operators_list = [redistrib,
-                      toDev, advec_scal, diffusion_scal,
-                      toHost, p_scal_xy, p_scal_xz,
-                      advec, toGhost_vorti, stretch,
-                      fromGhost_vorti, diffusion, poisson,
-                      p_velo, p_velo_xy, p_velo_xz, energy, maxvelo,
-                      toGhost_velo, toScales_velo, toScales_vorti, dt_adapt]
-else:
-    operators_list = [redistrib,
-                      toDev, advec_scal, diffusion_scal,
-                      advec, toGhost_vorti, stretch,
-                      fromGhost_vorti, diffusion, poisson,
-                      toGhost_velo, toScales_velo, toScales_vorti, dt_adapt]
-
-# Fields initializations
-if box.isOnTask(TASK_SCALAR):
-    scal.initialize(topo=topo_GPU_scal)
-    advec_dirX = advec_scal.advec_dir[0].discrete_op
-    gpu_scal = advec_dirX.fields_on_grid[0]
-    gpu_pscal = advec_dirX.fields_on_part[gpu_scal][0]
-    diffusion_scal.discrete_op.set_field_tmp(gpu_pscal)
-    if IS_OUTPUT:
-        p_scal_xy.apply(simu)
-        p_scal_xz.apply(simu)
-if box.isOnTask(TASK_UW):
-    velo.initialize(topo=topo_CPU_velo_fft)
-    c.apply(simu)
-    poisson.apply(simu)
-    toGhost_velo.apply(simu)
-    toScales_velo.apply(simu)
-    toScales_vorti.apply(simu)
-    if IS_OUTPUT:
-        p_velo.apply(simu)
-        p_velo_xy.apply(simu)
-        p_velo_xz.apply(simu)
-
-simu.initialize()
-setup_time = MPI.Wtime() - ctime
-main_comm.Barrier()
-
-# Solve
-solve_time = 0.
-while not simu.isOver:
-    ctime = MPI.Wtime()
-    if main_rank == 0:
-        simu.printState()
-    for op in operators_list:
-        if box.isOnTask(op.task_id()):
-            op.apply(simu)
-    if box.isOnTask(TASK_SCALAR):
-        # Wait gpu operations on scalar
-        advec_scal.advec_dir[0].discreteFields[scal].wait()
-    solve_time += MPI.Wtime() - ctime
-    dt_adapt.wait()
-    # Synchronize threads
-    main_comm.Barrier()
-    simu.advance()
-
-main_comm.Barrier()
-nb_ite = simu.current_iteration
-simu.finalize()
-
-
-if IS_OUTPUT:
-    if box.isOnTask(TASK_SCALAR):
-        p_scal_xy.apply(simu)
-        p_scal_xz.apply(simu)
-    if box.isOnTask(TASK_UW):
-        p_velo.apply(simu)
-        p_velo_xy.apply(simu)
-        p_velo_xz.apply(simu)
-
-for op in operators_list:
-    if box.isOnTask(op.task_id()):
-        op.finalize()
-
-velo.finalize()
-if box.isOnTask(TASK_SCALAR):
-    scal.finalize()
-if box.isOnTask(TASK_UW):
-    vorti.finalize()
-main_comm.Barrier()
diff --git a/trashed_examples/Plane_jet/create_random_arrays.py b/trashed_examples/Plane_jet/create_random_arrays.py
deleted file mode 100644
index de7322631e99cba53ce5e92b837ad3074bf21349..0000000000000000000000000000000000000000
--- a/trashed_examples/Plane_jet/create_random_arrays.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-import numpy as np
-from hysop.constants import HYSOP_REAL, ORDER
-
-
-def random_init(shape, mpi_comm):
-    # Create a folder to store all random arrays
-    d = 'rand_init'
-    if mpi_comm.Get_rank() == 0:
-        if not os.path.exists(d):
-            os.makedirs(d)
-    mpi_comm.Barrier()
-    file_name = "{0}_{1}_{2}".format(*shape)
-    file_name += "_{0}p_{1}.dat".format(mpi_comm.Get_size(),
-                                        mpi_comm.Get_rank())
-    try:
-        randX = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randX_' + file_name)),
-                       shape),
-            dtype=HYSOP_REAL, order=ORDER)
-        randY = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randY_' + file_name)),
-                       shape),
-            dtype=HYSOP_REAL, order=ORDER)
-        randZ = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randZ_' + file_name)),
-                       shape),
-            dtype=HYSOP_REAL, order=ORDER)
-    except IOError:
-        randX = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randY = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randZ = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randX.tofile(os.path.join(d, 'randX_' + file_name))
-        randY.tofile(os.path.join(d, 'randY_' + file_name))
-        randZ.tofile(os.path.join(d, 'randZ_' + file_name))
-    return randX, randY, randZ
diff --git a/trashed_examples/RMI/RMI_hybrid.py b/trashed_examples/RMI/RMI_hybrid.py
deleted file mode 100644
index b62eeab10eea67bd5a25fe0fa242940a9fb1bb3c..0000000000000000000000000000000000000000
--- a/trashed_examples/RMI/RMI_hybrid.py
+++ /dev/null
@@ -1,499 +0,0 @@
-#!/usr/bin/env python
-# Scripts arguments:
-# 1. Flow resolution
-# 2. Scalar resolution
-# 3. Dictionary for devices id: (mpi rank: device id)
-# 4. Is the initial condition is perturbed
-# 5. Is data output
-import sys
-msg = """
-This example computes a two-phase flow.
-Argments must be given in command line. For example:
-mpirun -np 9 python RMI_hybrid.py "[129,129,257]" "[129,129,257]" "{0:0}" "True" "True"
-"""
-assert len(sys.argv) == 6, msg
-USER_NB_ELEM_UW = eval(sys.argv[1])
-USER_NB_ELEM_S = eval(sys.argv[2])
-USER_RANK_DEVICE_ID = eval(sys.argv[3])
-RANDOM_INIT = eval(sys.argv[4])
-IS_OUTPUT = eval(sys.argv[5])
-import hysop
-# import hysop.gpu
-# hysop.gpu.CL_PROFILE = True
-from hysop.constants import np, HDF5, ASCII, HYSOP_MPI_REAL
-from hysop.mpi.main_var import MPI, main_size, main_rank, main_comm
-from hysop.tools.parameters import MPIParams, Discretization, IOParams
-from hysop.problem.simulation import Simulation
-from hysop.fields.variable_parameter import VariableParameter
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, SpaceDiscretisation, \
-    Scales, dtCrit, ExtraArgs
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.finite_differences import FD_C_4
-from hysop.numerics.remeshing import L6_4 as remesh_formula
-from hysop.numerics.remeshing import Linear
-from hysop.operator.advection import Advection
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.stretching import Stretching
-from hysop.operator.baroclinic import Baroclinic
-from hysop.operator.penalization import PenalizeVorticity
-from hysop.operator.poisson import Poisson
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operators import Custom
-from hysop.operator.redistribute_inter import RedistributeInter
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.domain.subsets import SubBox
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-import hysop.tools.numpywrappers as npw
-from hysop.tools.profiler import Profiler, FProfiler
-#from hysop.tools.io_utils import IO
-#IO.set_default_path("/scratch_p/jmetancelin")
-pi = np.pi
-cos = np.cos
-sin = np.sin
-exp = np.exp
-abs = np.abs
-tanh = np.tanh
-
-assert main_size > 1, "This example must run with at least 2 process"
-TASK_UW = 1
-TASK_SCALAR = 2
-PROC_TASKS = [TASK_UW, ] * main_size
-for p in USER_RANK_DEVICE_ID:
-    PROC_TASKS[p] = TASK_SCALAR
-try:
-    DEVICE_ID = USER_RANK_DEVICE_ID[main_rank]
-except KeyError:
-    DEVICE_ID = None
-out_freq = 10
-# Physical parameters:
-# Flow viscosity
-VISCOSITY = 1e-4
-AMPLITUDE = 0.02
-PERIOD = pi
-SIGMA = 0.1
-NOISE = 1.
-
-
-def computeVel(res, x, y, z, t):
-    zz = exp(-z * z / SIGMA)
-    res[0][...] = z * cos(PERIOD * x) * zz
-    coef = -2. * AMPLITUDE / (PERIOD * SIGMA)
-    res[0][...] *= coef
-    res[1][...] = 0.
-    res[2][...] = sin(PERIOD * x) * zz
-    res[2][...] *= AMPLITUDE
-    if RANDOM_INIT:
-        from create_random_arrays import random_init
-        randX, randY, randZ = random_init(res[0].shape, box.comm_task)
-        res[0][...] *= (1. + zz * randX * NOISE)
-        res[1][...] *= (1. + zz * randY * NOISE)
-        res[2][...] *= (1. + zz * randZ * NOISE)
-    return res
-
-
-def computeVort(res, x, y, z, t):
-    zz = exp(-z * z / SIGMA)
-    coeff = 4.*AMPLITUDE*z*z/(PERIOD*SIGMA*SIGMA)
-    res[0][...] = 0.
-    res[1][...] = coeff * zz * cos(PERIOD * x)
-    res[1][...] -= 2.*AMPLITUDE * zz * cos(PERIOD * x)/(PERIOD*SIGMA)
-    res[1][...] -= AMPLITUDE*PERIOD * zz * cos(PERIOD * x)
-    res[2][...] = 0.
-    if RANDOM_INIT:
-        from create_random_arrays import random_init
-        randX, randY, randZ = random_init(res[0].shape, box.comm_task)
-        res[0][...] *= (1. + zz * randX * NOISE)
-        res[1][...] *= (1. + zz * randY * NOISE)
-        res[2][...] *= (1. + zz * randZ * NOISE)
-    return res
-
-def initScal(res, x, y, z, t):
-    res[0][...] = z
-    return res
-
-def phitorho(res, a):
-    res[0][...] = np.tanh(a * 10.)
-    res[0][...] += 2.
-    return res
-
-def initRho(res, x, y, z, t):
-    return phitorho(res, z)
-
-
-def func_scal_to_rho(simu, f_in, f_out):
-    phitorho(f_out[0].data, f_in[0].data[0])
-
-
-temp_maxvelo = npw.zeros((3, ))
-
-
-def calc_maxvelo(simu, v, w=None, maxvelo_values):
-    temp_maxvelo[0] = np.max(np.abs(v[0].data[0]))
-    temp_maxvelo[1] = np.max(np.abs(v[0].data[1]))
-    temp_maxvelo[2] = np.max(np.abs(v[0].data[2]))
-    v[0].topology.comm.Allreduce(sendbuf=[temp_maxvelo, 3, HYSOP_MPI_REAL],
-                                 recvbuf=[maxvelo_values, 3, HYSOP_MPI_REAL],
-                                 op=MPI.MAX)
-
-ctime = MPI.Wtime()
-# Domain
-box = hysop.Box(length=[2., 2., 4.], origin=[-1., -1., -2.],
-                proc_tasks=PROC_TASKS)
-bc_b = SubBox(length=[2., 2., 0.1], origin=[-1., -1., -2.], parent=box)
-bc_t = SubBox(length=[2., 2., 0.1], origin=[-1., -1., 1.9], parent=box)
-mpi_params    = MPIParams(comm=box.comm_task, task_id=PROC_TASKS[main_rank])
-mpi_params_S  = MPIParams(comm=box.comm_task, task_id=TASK_SCALAR)
-mpi_params_UW = MPIParams(comm=box.comm_task, task_id=TASK_UW)
-
-
-# Fields
-velo = hysop.Field(domain=box, formula=computeVel,
-                   name='Velocity', is_vector=True)
-vorti = hysop.Field(domain=box, formula=computeVort,
-                    name='Vorticity', is_vector=True)
-scal = hysop.Field(domain=box, formula=initScal,
-                   name='Scalar', is_vector=False)
-rho = hysop.Field(domain=box, formula=initRho,
-                  name='Rho', is_vector=False)
-
-data = {'dt': 0.001}
-dt = VariableParameter(data)
-simu = Simulation(start=0.0, end=100., time_step=0.001, max_iter=1000000)
-
-# Flow discretizations:
-d_uw = Discretization(USER_NB_ELEM_UW)
-d_uw_ghosts = Discretization(USER_NB_ELEM_UW, [2, ] * 3)
-# Scalar discretization
-d_s = Discretization(USER_NB_ELEM_S)
-# Velocity discretization for scalar advection
-if USER_NB_ELEM_UW[0] == USER_NB_ELEM_S[0]:
-    d_uw_for_s = Discretization(USER_NB_ELEM_UW)
-else:
-    d_uw_for_s = Discretization(USER_NB_ELEM_UW, [1, ] * 3)
-
-# Topologies
-topo_GPU_scal = None
-topo_GPU_velo = None
-topo_CPU_velo_fft = None
-topo_CPU_velo_ghosts = None
-topo_CPU_velo_scales = None
-if box.is_on_task(TASK_UW):
-    topo_CPU_velo_scales = box.create_topology(
-        d_uw, dim=2, mpi_params=mpi_params_UW)
-if box.is_on_task(TASK_SCALAR):
-    topo_GPU_velo = box.create_topology(
-        d_uw_for_s, dim=1, mpi_params=mpi_params_S)
-    topo_GPU_scal = box.create_topology(
-        d_s, dim=1, mpi_params=mpi_params_S)
-
-# Operators
-# GPU operators
-advec_scal_method = {TimeIntegrator: RK,
-                     Interpolation: Linear,
-                     Remesh: remesh_formula,
-                     Support: 'gpu_1k',
-                     Splitting: 'o2',
-                     MultiScale: Linear}
-if PROC_TASKS.count(TASK_SCALAR) > 1:
-    advec_scal_method[ExtraArgs] = {'device_id': DEVICE_ID,
-                                    'device_type': 'gpu'}
-else:
-    advec_scal_method[ExtraArgs] = {'max_velocity': [1.2, 0.7, 0.7],
-                                    'max_dt': 0.012,
-                                    'device_id': DEVICE_ID,
-                                    'device_type': 'gpu'}
-advec_scal = Advection(velo,
-                       discretization=topo_GPU_velo,
-                       variables={scal: topo_GPU_scal},
-                       mpi_params=mpi_params_S,
-                       method=advec_scal_method)
-
-# CPU operators
-advec = Advection(velo,
-                  discretization=topo_CPU_velo_scales,
-                  variables={vorti: topo_CPU_velo_scales},
-                  mpi_params=mpi_params_UW,
-                  method={Scales: 'p_64', MultiScale: 'L4_4'})
-stretch = Stretching(velo, vorti, discretization=d_uw_ghosts,
-                     mpi_params=mpi_params_UW)
-scal_to_rho = Custom(in_fields=[scal, ], out_fields=[rho, ],
-                     function=func_scal_to_rho,
-                     variables={scal: d_uw_ghosts,
-                                rho: d_uw_ghosts},
-                     mpi_params=mpi_params_UW)
-penal = PenalizeVorticity(velocity=velo, vorticity=vorti,
-                          discretization=d_uw_ghosts,
-                          obstacles=[bc_t, bc_b], coeff=1e8,
-                          method={SpaceDiscretisation: FD_C_4})
-baroclinic = Baroclinic(velo, vorti, rho, VISCOSITY,
-                        discretization=d_uw_ghosts,
-                        mpi_params=mpi_params_UW)
-diffusion = Diffusion(variables={vorti: d_uw},
-                      viscosity=VISCOSITY,
-                      mpi_params=mpi_params_UW)
-poisson = Poisson(velo, vorti, discretization=d_uw,
-                  mpi_params=mpi_params_UW)
-dt_output = IOParams(frequency=1, filename='dt.dat', fileformat=ASCII)
-dt_adapt = AdaptTimeStep(velo, vorti,
-                         simulation=simu,
-                         time_range=[10, np.infty],
-                         discretization=d_uw_ghosts,
-                         method={TimeIntegrator: RK3,
-                                 SpaceDiscretisation: FD_C_4,
-                                 dtCrit: ['gradU']},
-                         lcfl=0.1,
-                         io_params=dt_output,
-                         mpi_params=mpi_params_UW)
-
-# Operators discretizations
-if box.is_on_task(TASK_SCALAR):
-    for op in (advec_scal, ):
-        op.discretize()
-if box.is_on_task(TASK_UW):
-    for op in (advec, stretch, scal_to_rho, baroclinic, penal,
-               diffusion, poisson, dt_adapt):
-        op.discretize()
-
-# Get some topologies
-if box.is_on_task(TASK_UW):
-    topo_CPU_velo_fft = poisson.discreteFields[velo].topology
-    topo_CPU_velo_ghosts = stretch.discreteFields[velo].topology
-
-if IS_OUTPUT:
-    # Defining output operators
-    p_velo = HDF_Writer(variables={velo: topo_CPU_velo_fft,
-                                   vorti: topo_CPU_velo_fft},
-                        mpi_params=mpi_params_UW,
-                        io_params=IOParams(frequency=out_freq,
-                                           filename='flow',
-                                           fileformat=HDF5))
-    p_rho = HDF_Writer(variables={rho: topo_CPU_velo_ghosts},
-                       mpi_params=mpi_params_UW,
-                       io_params=IOParams(frequency=out_freq,
-                                          filename='rho',
-                                          fileformat=HDF5))
-    p_scal = HDF_Writer(variables={scal: topo_GPU_scal},
-                        var_names={scal: 'Scalar'},
-                        mpi_params=mpi_params_S,
-                        io_params=IOParams(frequency=out_freq,
-                                           filename='scal',
-                                           fileformat=HDF5))
-    p_scal.name += '_(Scalar)'
-    energy = EnergyEnstrophy(velocity=velo,
-                             vorticity=vorti,
-                             discretization=topo_CPU_velo_fft,
-                             mpi_params=mpi_params_UW,
-                             io_params=IOParams(frequency=1,
-                                                filename='energy.dat',
-                                                fileformat=ASCII))
-    maxvelo = Custom(in_fields=[velo],
-                     variables={velo: topo_CPU_velo_scales},
-                     function=calc_maxvelo,
-                     diagnostics_shape=(1, 4),
-                     mpi_params=mpi_params_UW,
-                     io_params=IOParams(frequency=1,
-                                        filename='maxvelo.dat',
-                                        fileformat=ASCII))
-    if box.is_on_task(TASK_UW):
-        for op in (p_velo, p_rho, energy, maxvelo):
-            op.discretize()
-    if box.is_on_task(TASK_SCALAR):
-        for op in (p_scal, ):
-            op.discretize()
-
-# Redistribute operators
-# CPU redistributes
-advecToGhost_vorti = RedistributeIntra(variables=[vorti],
-                                       source=advec, target=stretch,
-                                       mpi_params=mpi_params_UW)
-advecToGhost_vorti.name += '_advec2Ghost_W'
-fftToGhost_velo = RedistributeIntra(variables=[velo],
-                                    source=poisson, target=stretch,
-                                    run_till=[stretch, dt_adapt],
-                                    mpi_params=mpi_params_UW)
-fftToGhost_velo.name += '_advec2Ghost_U'
-fftToGhost_vorti = RedistributeIntra(variables=[vorti],
-                                     source=poisson, target=stretch,
-                                     run_till=[stretch, dt_adapt],
-                                     mpi_params=mpi_params_UW)
-fftToGhost_vorti.name += '_fft2Ghost_W'
-fftToScales_velo = RedistributeIntra(variables=[velo],
-                                     source=poisson, target=advec,
-                                     mpi_params=mpi_params_UW)
-fftToScales_velo.name += '_fft2Scales_U'
-fftToScales_vorti = RedistributeIntra(variables=[vorti],
-                                      source=poisson, target=advec,
-                                      mpi_params=mpi_params_UW)
-fftToScales_vorti.name += '_fft2Scales_W'
-ghostToFFT_vorti = RedistributeIntra(variables=[vorti],
-                                     source=baroclinic, target=diffusion,
-                                     mpi_params=mpi_params_UW)
-ghostToFFT_vorti.name += '_ghost2FFT_W'
-ghostToFFT_velo = RedistributeIntra(variables=[velo],
-                                    source=penal, target=poisson,
-                                    mpi_params=mpi_params_UW)
-ghostToFFT_velo.name += '_ghost2FFT_U'
-toDev = DataTransfer(source=topo_GPU_velo, target=advec_scal,
-                     variables={velo: topo_GPU_velo},
-                     mpi_params=mpi_params_S)
-toDev.name += '_ToDev_U'
-
-redistrib = RedistributeInter(variables=[velo],
-                              parent=main_comm,
-                              source=topo_CPU_velo_fft, target=topo_GPU_velo,
-                              source_id=TASK_UW, target_id=TASK_SCALAR,
-                              run_till=[toDev])
-redistrib.name += '_CPU2GPU_U'
-redistrib_scal = RedistributeInter(variables=[scal],
-                                   parent=main_comm,
-                                   source=topo_GPU_velo,
-                                   target=topo_CPU_velo_ghosts,
-                                   source_id=TASK_SCALAR, target_id=TASK_UW,
-                                   run_till=[scal_to_rho])
-redistrib_scal.name += '_GPU2CPU_Scal'
-toHost = DataTransfer(source=topo_GPU_scal, target=p_scal,
-                      variables={scal: topo_GPU_scal},
-                      mpi_params=mpi_params_S,
-                      run_till=[redistrib_scal, ])
-toHost.name += '_ToHost_S'
-
-# Operators setup
-if box.is_on_task(TASK_SCALAR):
-    for op in (advec_scal, ):
-        op.setup()
-    if IS_OUTPUT:
-        for op in (p_scal, toHost):
-            op.setup()
-    for op in (toDev, ):
-        op.setup()
-if box.is_on_task(TASK_UW):
-    for op in (advec, stretch, scal_to_rho, baroclinic, penal,
-               diffusion, poisson, dt_adapt):
-        op.setup()
-    if IS_OUTPUT:
-        for op in (p_velo, p_rho, energy, maxvelo):
-            op.setup()
-    for op in (advecToGhost_vorti, fftToGhost_velo,
-               fftToGhost_vorti, fftToScales_velo,
-               fftToScales_vorti, ghostToFFT_vorti,
-               ghostToFFT_velo):
-        op.setup()
-# Wait for all operators setup before setup the intra-comm redistribute
-main_comm.Barrier()
-redistrib.setup()
-redistrib_scal.setup()
-
-# Operators list
-if IS_OUTPUT:
-    operators_list = [redistrib,
-                      toDev, advec_scal,
-                      toHost, p_scal,
-                      advec, advecToGhost_vorti, stretch, baroclinic,
-                      redistrib_scal,
-                      scal_to_rho, penal,
-                      ghostToFFT_velo, ghostToFFT_vorti,
-                      diffusion, poisson,
-                      p_velo, p_rho, energy, maxvelo,
-                      fftToGhost_velo, fftToScales_velo,
-                      fftToScales_vorti, dt_adapt]
-else:
-    operators_list = [redistrib,
-                      toDev, advec_scal,
-                      toHost,
-                      advec, advecToGhost_vorti, stretch, baroclinic,
-                      redistrib_scal,
-                      scal_to_rho, penal,
-                      ghostToFFT_velo, ghostToFFT_vorti,
-                      diffusion, poisson,
-                      fftToGhost_velo, fftToScales_velo,
-                      fftToScales_vorti, dt_adapt]
-
-
-# Fields initializations
-if box.is_on_task(TASK_SCALAR):
-    scal.initialize(topo=topo_GPU_scal)
-    advec_dirX = advec_scal.advec_dir[0].discrete_op
-    gpu_scal = advec_dirX.fields_on_grid[0]
-    gpu_pscal = advec_dirX.fields_on_part[gpu_scal][0]
-    if IS_OUTPUT:
-        p_scal.apply(simu)
-if box.is_on_task(TASK_UW):
-    velo.initialize(topo=topo_CPU_velo_ghosts)
-    vorti.initialize(topo=topo_CPU_velo_ghosts)
-    rho.initialize(topo=topo_CPU_velo_ghosts)
-    bc_b.discretize(topo_CPU_velo_ghosts)
-    bc_t.discretize(topo_CPU_velo_ghosts)
-    penal.apply(simu)
-    ghostToFFT_vorti.apply(simu)
-    ghostToFFT_vorti.wait()
-    ghostToFFT_velo.apply(simu)
-    ghostToFFT_velo.wait()
-    fftToScales_vorti.apply(simu)
-    fftToScales_vorti.wait()
-    fftToScales_velo.apply(simu)
-    fftToScales_velo.wait()
-    baroclinic.initialize_velocity()
-    if IS_OUTPUT:
-        p_velo.apply(simu)
-
-simu.initialize()
-setup_time = MPI.Wtime() - ctime
-main_comm.Barrier()
-
-# Solve
-solve_time = FProfiler("Solve")
-while not simu.isOver:
-    ctime = MPI.Wtime()
-    if main_rank == 0:
-        simu.printState()
-    for op in operators_list:
-        if box.is_on_task(op.task_id()):
-            op.apply(simu)
-    if box.is_on_task(TASK_SCALAR):
-        # Wait gpu operations on scalar
-        advec_scal.advec_dir[0].discreteFields[scal].wait()
-    solve_time += MPI.Wtime() - ctime
-    dt_adapt.wait()
-    # Synchronize threads
-    main_comm.Barrier()
-    simu.advance()
-
-main_comm.Barrier()
-nb_ite = simu.current_iteration
-simu.finalize()
-
-
-if IS_OUTPUT:
-    if box.is_on_task(TASK_SCALAR) and \
-       simu.time != p_scal._last_written_time:
-        p_scal.apply(simu)
-    if box.is_on_task(TASK_UW) and \
-       simu.time != p_velo._last_written_time:
-        p_velo.apply(simu)
-
-prof = Profiler(None, box.comm_task)
-prof += solve_time
-for op in operators_list:
-    if box.is_on_task(op.task_id()):
-        op.finalize()
-        prof += op.profiler
-for v in (velo, vorti, rho, scal):
-    prof += v.profiler
-prof.summarize()
-
-for i in xrange(main_size):
-    if i == main_rank:
-        print "Solving Time:", solve_time
-        print prof
-    main_comm.Barrier()
-
-velo.finalize()
-if box.is_on_task(TASK_SCALAR):
-    scal.finalize()
-if box.is_on_task(TASK_UW):
-    vorti.finalize()
-main_comm.Barrier()
diff --git a/trashed_examples/RMI/create_random_arrays.py b/trashed_examples/RMI/create_random_arrays.py
deleted file mode 100644
index cb68fc081cc057b807251de4de8231bb6ecdf601..0000000000000000000000000000000000000000
--- a/trashed_examples/RMI/create_random_arrays.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-import numpy as np
-from hysop.constants import HYSOP_REAL, ORDER
-
-
-def random_init(shape, mpi_comm):
-    # Create a folder to store all random arrays
-    d = 'rand_init'
-    if mpi_comm.Get_rank() == 0:
-        if not os.path.exists(d):
-            os.makedirs(d)
-    mpi_comm.Barrier()
-    file_name = "{0}_{1}_{2}".format(*shape)
-    file_name += "_{0}p_{1}.dat".format(mpi_comm.Get_size(),
-                                        mpi_comm.Get_rank())
-    try:
-        randX = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randX_' + file_name),
-                                   dtype=HYSOP_REAL), shape),
-            dtype=HYSOP_REAL, order=ORDER)
-        randY = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randY_' + file_name),
-                                   dtype=HYSOP_REAL), shape),
-            dtype=HYSOP_REAL, order=ORDER)
-        randZ = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randZ_' + file_name),
-                                   dtype=HYSOP_REAL), shape),
-            dtype=HYSOP_REAL, order=ORDER)
-    except IOError:
-        randX = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randY = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randZ = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randX.tofile(os.path.join(d, 'randX_' + file_name))
-        randY.tofile(os.path.join(d, 'randY_' + file_name))
-        randZ.tofile(os.path.join(d, 'randZ_' + file_name))
-    return randX, randY, randZ
diff --git a/trashed_examples/TaylorGreen/TaylorGreen3D.py b/trashed_examples/TaylorGreen/TaylorGreen3D.py
deleted file mode 100755
index 93684a350cff22237d9be777b37c5476fbf48a52..0000000000000000000000000000000000000000
--- a/trashed_examples/TaylorGreen/TaylorGreen3D.py
+++ /dev/null
@@ -1,260 +0,0 @@
-#!/usr/bin/python
-
-"""
-Taylor Green 3D : see paper van Rees 2011.
-
-All parameters are set and defined in python module dataTG.
-
-"""
-
-import parmepy as pp
-from parmepy.f2py import fftw2py
-import numpy as np
-from parmepy.fields.continuous import Field
-from parmepy.fields.variable_parameter import VariableParameter
-from parmepy.mpi.topology import Cartesian
-from parmepy.operator.advection import Advection
-from parmepy.operator.stretching import Stretching
-from parmepy.operator.poisson import Poisson
-from parmepy.operator.diffusion import Diffusion
-from parmepy.operator.adapt_timestep import AdaptTimeStep
-from parmepy.operator.redistribute import Redistribute
-from parmepy.problem.navier_stokes import NSProblem
-from parmepy.operator.monitors.printer import Printer
-from parmepy.operator.monitors.energy_enstrophy import Energy_enstrophy
-from parmepy.operator.monitors.reprojection_criterion import Reprojection_criterion
-from parmepy.problem.simulation import Simulation
-from parmepy.constants import VTK, HDF5
-from parmepy.methods_keys import Scales, TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting, dtCrit, SpaceDiscretisation
-from parmepy.numerics.integrators.runge_kutta2 import RK2 as RK2
-from parmepy.numerics.integrators.runge_kutta3 import RK3 as RK3
-from parmepy.numerics.integrators.runge_kutta4 import RK4 as RK4
-from parmepy.numerics.finite_differences import FD_C_4, FD_C_2
-from parmepy.numerics.interpolation import Linear
-from parmepy.numerics.remeshing import L6_4 as rmsh
-
-## ----------- A 3d problem -----------
-print " ========= Start Navier-Stokes 3D (Taylor Green benchmark) ========="
-## pi constant
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-## constants
-VISCOSITY = 1. / 1600.
-
-## Domain
-dim = 3
-box = pp.Box(dim, length=[2.0 * pi, 2.0 * pi, 2.0 * pi])
-
-## Global resolution
-nb = 33
-nbElem = [nb] * dim
-
-## Function to compute TG velocity
-def computeVel(res, x, y, z, t):
-    res[0][...] = sin(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-## Function to compute reference vorticity
-def computeVort(res, x, y, z, t):
-    res[0][...] = - cos(x) * sin(y) * sin(z)
-    res[1][...] = - sin(x) * cos(y) * sin(z)
-    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-    return res
-
-## Fields
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', is_vector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', is_vector=True)
-
-## Parameter Variable (adaptative timestep)
-data = {'dt': 0.01}
-dt_adapt = VariableParameter(data)
-
-## Usual Cartesian topology definition
-# At the moment we use two (or three?) topologies :
-# - "topo" for Stretching and all operators based on finite differences.
-#    --> ghost layer = 2
-# - topo from Advection operator for all the other operators.
-#    --> no ghost layer
-# - topo from fftw for Poisson and Diffusion.
-# Todo : check compat between scales and fft operators topologies.
-NBGHOSTS = 2
-ghosts = [NBGHOSTS] * box.dimension
-topo = Cartesian(box, box.dimension, nbElem, ghosts=ghosts)
-
-## Tools Operators
-# Adaptative timestep method : dt = min(values(dtCrit))
-# where dtCrit is a list of criterions on which the computation 
-# of the adaptative time step is based
-# ex : dtCrit = ['gradU', 'cfl', 'stretch'], means :
-# dt = min (dtAdv, dtCfl, dtStretch), where dtAdv is equal to LCFL / |gradU|
-# For dtAdv, the possible choices are the following:
-# 'vort' (infinite norm of vorticity) : dtAdv = LCFL / |vort|
-# 'gradU' (infinite norm of velocity gradient), dtAdv = LCFL / |gradU|
-# 'deform' (infinite norm of deformation tensor), dtAdv = LCFL / (0.5(gradU + gradU^T))
-dtAdapt = AdaptTimeStep(velo, vorti,
-                        resolutions={velo: nbElem,
-                                     vorti: nbElem},
-                        dt_adapt=dt_adapt,
-                        method={TimeIntegrator: RK3, 
-                                SpaceDiscretisation: FD_C_4, 
-                                dtCrit: ['deform', 'cfl', 'stretch']},
-                        topo=topo,
-                        io_params={},
-                        lcfl=0.125,
-                        cfl=0.5)
-
-## Navier Stokes Operators
-advec = Advection(velo, vorti,
-                  resolutions={velo: nbElem,
-                               vorti: nbElem},
-                  method={Scales: 'p_M6', Splitting: 'classic'} # Scales advection
-#                  method={TimeIntegrator: RK2,
-#                          Interpolation: Linear,
-#                          Remesh: rmsh,
-#                          Support: '',
-#                          Splitting: 'o2_FullHalf'} # pure Python advection
-                  )
-
-stretch = Stretching(velo, vorti,
-                     resolutions={velo: nbElem,
-                                  vorti: nbElem},
-                     topo=topo
-                     )
-
-diffusion = Diffusion(vorti,
-                      resolution=nbElem,
-                      viscosity=VISCOSITY
-                     )
-
-poisson = Poisson(velo, vorti,
-                  resolutions={velo: nbElem,
-                               vorti: nbElem}
-                 )
-
-#-----------------------------
-# Topology without ghost points
-poisson.discretize()
-topofft = poisson.discreteFields[poisson.vorticity].topology
-#-----------------------------
-
-# Bridges between the different topologies in order to
-# redistribute data.
-# 1 -Advection to stretching
-distrAdvStr_vorti = Redistribute([vorti], advec, stretch)
-distrPoiStr_velo = Redistribute([velo], poisson, stretch)
-# 2 - Stretching to Poisson/Diffusion
-distrStrDiff = Redistribute([vorti], stretch, diffusion)
-# 3 - Poisson to TimeStep
-distrPoissTimeStep = Redistribute([velo, vorti], poisson, dtAdapt)
-
-## Simulation
-simu = Simulation(start=0.0,
-                  end=10.0, 
-                  time_step=dt_adapt['dt'],
-                  max_iter=1000000)
-
-#  Define the problem to solve
-## With adaptative time step
-pb = NSProblem(operators=[distrPoiStr_velo, 
-                          advec, 
-                          distrAdvStr_vorti,
-                          stretch,
-                          distrStrDiff,
-                          diffusion,
-                          poisson,
-                          distrPoissTimeStep,
-                          dtAdapt],
-               simulation=simu, dumpFreq=-1)
-
-## With fixed time step
-#pb = NSProblem(operators=[distrPoiStr_velo, 
-#                          advec,
-#                          distrAdvStr_vorti,
-#                          stretch,
-#                          distrStrDiff,
-#                          diffusion,
-#                          poisson],
-#               simulation=simu, dumpFreq=-1)
-
-## Diagnostics related to the problem
-printer = Printer(variables=[velo, vorti],
-                  topo=topofft,
-                  frequency=100,
-                  formattype=HDF5,
-                  prefix='TG_io',
-                  xmfalways=True)
-
-energy = Energy_enstrophy(velo, vorti,
-                          viscosity=VISCOSITY,
-                          isNormalized=True ,
-                          topo=topofft,
-                          io_params={})
-
-reproj = Reprojection_criterion(vorti, 
-                                reproj_cst=0.04, 
-                                reprojRate=1,
-                                topo=topofft, 
-                                checkCriterion=False,
-                                io_params={})
-
-## Add the reprojection of vorticity field (i.e div(vort)=0) for poisson operator
-poisson.activateProjection(reproj)
-
-## Setting solver to Problem (only operators for computational tasks)
-pb.pre_setUp()
-
-## Initializations on required topos + add monitors to problem + setUp
-vorti.setTopoInit(topofft)
-velo.setTopoInit(topofft)
-pb.addMonitors([energy, printer])
-pb.setUp()
-
-printer.apply(simu)
-## Time loop
-def run():
-# =====Automatic problem launch=====
-    pb.solve()
-# =====Manually problem launch=====
-#    print "\n\n Start solving ..."
-#    simu.initialize()
-#    while not simu.isOver:
-#        distrPoiStr_velo.apply(simu)
-#        subSimu = Simulation(start=simu.tk, end=10., time_step=simu.time_step/2.0,
-#                             max_iter=2)
-#        subSimu.initialize()
-#        print '=============='
-#        simu.printState()
-#        while not subSimu.isOver:
-#            subSimu.printState()
-#            advec.apply(subSimu)
-#            poisson.apply(subSimu)
-#            subSimu.advance()
-#        distrAdvStr_vorti.apply(simu)
-#        stretch.apply(simu)
-#        distrStrDiff.apply(simu)
-#        diffusion.apply(simu)
-#        poisson.apply(simu)
-#        distrPoissTimeStep.apply(simu)
-#        dtAdapt.apply(simu)
-#        energy.apply(simu)
-##        reproj.apply(simu)
-#        simu.advance()
-
-## Solve problem
-from parmepy.mpi import MPI
-print "Start computation ..."
-time = MPI.Wtime()
-run()
-pb.finalize()
-print 'total time (rank):', MPI.Wtime() - time, '(', topo.rank, ')'
-
-## Clean memory buffers
-#fftw2py.clean_fftw_solver(box.dimension)
diff --git a/trashed_examples/TaylorGreen/TaylorGreen3D_GPU.py b/trashed_examples/TaylorGreen/TaylorGreen3D_GPU.py
deleted file mode 100644
index d10a69eb59e10efcd2d42f850ed6573364ed625d..0000000000000000000000000000000000000000
--- a/trashed_examples/TaylorGreen/TaylorGreen3D_GPU.py
+++ /dev/null
@@ -1,257 +0,0 @@
-#!/usr/bin/python
-
-"""
-Taylor Green 3D : see paper van Rees 2011.
-
-All parameters are set and defined in python module dataTG.
-
-"""
-
-import parmepy as pp
-from parmepy.f2py import fftw2py
-import numpy as np
-from parmepy.fields.continuous import Field
-from parmepy.variables.variables import Variables
-from parmepy.mpi.topology import Cartesian
-from parmepy.operator.advection import Advection
-from parmepy.operator.stretching import Stretching
-from parmepy.operator.adapt_timestep import AdaptTimeStep
-from parmepy.operator.poisson import Poisson
-from parmepy.operator.diffusion import Diffusion
-from parmepy.operator.redistribute import Redistribute
-from parmepy.problem.navier_stokes import NSProblem
-from parmepy.operator.monitors.energy_enstrophy import Energy_enstrophy
-from parmepy.operator.monitors.reprojection_criterion import Reprojection_criterion
-from parmepy.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, dtCrit, SpaceDiscretisation
-from parmepy.numerics.integrators.runge_kutta2 import RK2
-from parmepy.numerics.integrators.runge_kutta3 import RK3
-from parmepy.numerics.finite_differences import FD_C_4, FD_C_2
-from parmepy.numerics.interpolation import Linear
-from parmepy.numerics.remeshing import L4_2 as rmsh
-from parmepy.methods_keys import Scales
-from parmepy.problem.simulation import Simulation
-
-# problem dimension
-dim = 3
-# resolution
-nb = 65
-# number of ghosts in usual cartesian topo
-NBGHOSTS = 2
-TIMESTEP_METHOD = {TimeIntegrator: RK3, SpaceDiscretisation: FD_C_4,
-                   dtCrit: 'deform'}
-# Lagrangian CFL for adaptative timestep
-LCFL = 0.125
-# CFL (if CFL is None, no CFL condition is taken into account
-# in the computation of adaptative timesteps)
-CFL = 0.5
-# Advection method
-#ADVECTION_METHOD = {Scales: 'p_M6'
-ADVECTION_METHOD = {TimeIntegrator: RK2,
-                    Interpolation: Linear,
-                    Remesh: rmsh,
-                    Support: 'gpu_1k',
-                    Splitting: 'o2_FullHalf'}
-#
-VISCOSITY = 1. / 1600.
-# reprojection criterion
-REPROJ_CST = 0.04
-# Vorticity projection (list of 3 elements):
-# - the 1st element determines if the reprojection is needed
-# - the 2nd element gives the reprojection frequency (nber of iterations)
-# if the 1st element is True
-# - the 3rd element indicates if the reprojection frequency given before
-# can be reduced to satisfy the reprojection criterion
-# (governed by the reprojection constant given above)
-# ex1 : PROJ = [True, 10, False] means that the reprojection
-# of vorticty will strictly be applied every 10 iterations,
-# regardless of reprojection criterion
-# ex2 : PROJ = [True, 25, True] means that the reprojection
-# of vorticty will be applied by default every 25 iterations
-# AND also when the reprojection criterion won't be satisfied
-PROJ = [False, 1, False]
-
-# reprojection method
-REPROJ_METHOD = {SpaceDiscretisation: FD_C_4}
-
-# post treatment ...
-OUTPUT_FREQ = 1
-FILENAME = './res/energy_GPU.dat'
-
-## ----------- A 3d problem -----------
-print " ========= Start Navier-Stokes 3D (Taylor Green benchmark) ========="
-## pi constant
-pi = np.pi
-cos = np.cos
-sin = np.sin
-## Domain
-box = pp.Box(dim, length=[2.0 * pi, 2.0 * pi, 2.0 * pi])
-
-## Global resolution
-nbElem = [nb] * dim
-
-
-## Function to compute TG velocity
-def computeVel(res, x, y, z, t):
-    res[0][...] = sin(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-## Function to compute reference vorticity
-def computeVort(res, x, y, z, t):
-    res[0][...] = - cos(x) * sin(y) * sin(z)
-    res[1][...] = - sin(x) * cos(y) * sin(z)
-    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-    return res
-
-## Fields
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', is_vector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', is_vector=True)
-
-## Variables
-dt_adapt = Variables(domain=box, name='adaptative time step',
-                     data=[0.01])
-
-need_reproj = Variables(domain=box, name='vorticity projection frequency',
-                        data=PROJ)
-
-## Usual Cartesian topology definition
-# At the moment we use two (or three?) topologies :
-# - "topo" for Stretching and all operators based on finite differences.
-#    --> ghost layer = 2
-# - topo from Advection operator for all the other operators.
-#    --> no ghost layer
-# - topo from fftw for Poisson and Diffusion.
-# Todo : check compat between scales and fft operators topologies.
-ghosts = [NBGHOSTS] * box.dimension
-topo = Cartesian(box, box.dimension, nbElem,
-                 ghosts=ghosts)
-
-## Tools Operators
-dtAdapt = AdaptTimeStep(velo, vorti,
-                        resolutions={velo: nbElem,
-                                     vorti: nbElem},
-                        dt_adapt=dt_adapt,
-                        method=TIMESTEP_METHOD,
-                        topo=topo,
-                        lcfl=LCFL,
-                        cfl=CFL)
-
-## Operators
-advec = Advection(velo, vorti,
-                  resolutions={velo: nbElem,
-                               vorti: nbElem},
-                  method=ADVECTION_METHOD,
-                  )
-advec.discretize()
-
-stretch = Stretching(velo, vorti,
-                     resolutions={velo: nbElem,
-                                  vorti: nbElem},
-                     topo=topo
-                     )
-
-diffusion = Diffusion(vorti,
-                      resolution=nbElem,
-                      viscosity=VISCOSITY
-                      )
-
-poisson = Poisson(velo, vorti,
-                  resolutions={velo: nbElem,
-                               vorti: nbElem},
-                  projection=need_reproj)
-
-# Bridges between the different topologies in order to
-# redistribute data.
-# 1 -Advection to stretching
-# 1.1 -Advection output is vorticity on gpu
-vorti_advec_stretch = Redistribute([vorti], advec, stretch)
-# 1.2 -Streching input is already uptodate on cpu from Poisson output
-velo_poisson_stretch = Redistribute([velo], poisson, stretch)
-# 2 -Stretching to Poisson/Diffusion
-vorti_stretch_diffusion = Redistribute([vorti], stretch, diffusion)
-# 3 -Advection input to GPU from Poisson/Diffusion
-vorti_diff_advec = Redistribute([vorti], diffusion, advec)
-velo_diff_advec_X = Redistribute([velo], poisson, advec.advecDir[0],
-                                 component=0)
-velo_diff_advec_Y = Redistribute([velo], poisson, advec.advecDir[1],
-                                 component=1)
-velo_diff_advec_Z = Redistribute([velo], poisson, advec.advecDir[2],
-                                 component=2)
-# 4 - Poisson to TimeStep
-distrPoissTimeStep = Redistribute([velo, vorti], poisson, dtAdapt)
-
-## Simulation
-simu = Simulation(start=0.0,
-                  end=10., time_step=dt_adapt,
-                  max_iter=1000000)
-##  Define the problem to solve
-pb = NSProblem(operators=[velo_poisson_stretch,
-                          advec,
-                          vorti_advec_stretch,
-                          stretch,
-                          vorti_stretch_diffusion,
-                          diffusion,
-                          vorti_diff_advec,
-                          poisson,
-                          distrPoissTimeStep,
-                          velo_diff_advec_X,
-                          velo_diff_advec_Y,
-                          velo_diff_advec_Z,
-                          dtAdapt
-                          ],
-               simulation=simu, dumpFreq=-1)
-
-## Setting solver to Problem (only operators for computational tasks)
-pb.pre_setUp()
-## Diagnostics related to the problem
-topofft = poisson.discreteFields[poisson.vorticity].topology
-energy = Energy_enstrophy(velo, vorti,
-                          topo=topofft,
-                          viscosity=VISCOSITY,
-                          frequency=OUTPUT_FREQ,
-                          prefix=FILENAME)
-
-reproj = Reprojection_criterion(vorti,
-                                need_reproj=need_reproj,
-                                reproj_cst=REPROJ_CST,
-                                topo=topo,
-                                frequency=OUTPUT_FREQ,
-                                method=REPROJ_METHOD,
-                                prefix=None)
-
-## printer = Printer(variables=[velo, vorti],
-##                   topo=topo,
-##                   frequency=500,
-##                   prefix='./res/TG_',
-##                   ext='.vtk')
-vorti.setTopoInit(topofft)
-velo.setTopoInit(topofft)
-pb.addMonitors([energy])
-pb.setUp()
-advec.advecDir[0].discreteOperator.velocity.wait()
-velo_diff_advec_Z.apply()
-
-# from parmepy.tools.problem2dot import toDot
-# toDot(pb,filename='graph.dot')
-
-def run():
-    pb.solve()
-
-## Solve problem
-from parmepy.mpi import MPI
-print "Start computation ..."
-time = MPI.Wtime()
-run()
-print 'total time (rank):', MPI.Wtime() - time, '(', topo.rank, ')'
-
-pb.finalize()
-
-# advec.finalize()
-# print pb.timer
-# ## Clean memory buffers
-fftw2py.clean_fftw_solver(box.dimension)
diff --git a/trashed_examples/TaylorGreen/TaylorGreen3D_debug.py b/trashed_examples/TaylorGreen/TaylorGreen3D_debug.py
deleted file mode 100644
index 70152cb8ec41f399e4fa4c32167317b9005f6664..0000000000000000000000000000000000000000
--- a/trashed_examples/TaylorGreen/TaylorGreen3D_debug.py
+++ /dev/null
@@ -1,277 +0,0 @@
-#!/usr/bin/python
-
-"""
-Taylor Green 3D : see paper van Rees 2011.
-
-All parameters are set and defined in python module dataTG.
-
-"""
-
-from hysop import Box
-from hysop.f2py import fftw2py
-import numpy as np
-from hysop.fields.continuous import Field
-from hysop.operator.advection import Advection
-from hysop.operator.stretching import Stretching
-from hysop.operator.poisson import Poisson
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-from hysop.problem.simulation import Simulation
-from hysop.methods_keys import Scales, TimeIntegrator,\
-    Splitting, dtCrit, SpaceDiscretisation
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.finite_differences import FD_C_4
-from hysop.mpi import main_rank, MPI
-from hysop.tools.parameters import Discretization, IOParams
-
-print " ========= Start Navier-Stokes 3D (Taylor Green benchmark) ========="
-
-# ====== pi constant and trigonometric functions ======
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-VISCOSITY = 1. / 1600.
-# ======= Domain =======
-dim = 3
-Nx = Ny = Nz = 257
-g = 2
-box = Box(length=[2.0 * pi, 2.0 * pi, 2.0 * pi])
-
-# A global discretization with ghost points
-d3dg = Discretization([Nx, Ny, Nz], [g, g, g])
-# A global discretization, without ghost points
-d3d = Discretization([Nx, Ny, Nz])
-# Default topology (i.e. 3D, with ghosts)
-topo_with_ghosts = box.create_topology(d3dg)
-
-# ======= Function to compute TG velocity =======
-def computeVel(res, x, y, z, t):
-    res[0][...] = sin(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-# ======= Function to compute reference vorticity =======
-def computeVort(res, x, y, z, t):
-    res[0][...] = - cos(x) * sin(y) * sin(z)
-    res[1][...] = - sin(x) * cos(y) * sin(z)
-    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-    return res
-
-# ======= Fields =======
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', is_vector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', is_vector=True)
-
-
-# ========= Simulation setup =========
-simu = Simulation(start=0.0, end=10.0, time_step=0.0125, max_iter=10000000)
-
-# Adaptative timestep method : dt = min(values(dtCrit))
-# where dtCrit is a list of criterions on which the computation
-# of the adaptative time step is based
-# ex : dtCrit = ['gradU', 'cfl', 'stretch'], means :
-# dt = min (dtAdv, dtCfl, dtStretch), where dtAdv is equal to LCFL / |gradU|
-# For dtAdv, the possible choices are the following:
-# 'vort' (infinite norm of vorticity) : dtAdv = LCFL / |vort|
-# 'gradU' (infinite norm of velocity gradient), dtAdv = LCFL / |gradU|
-# 'deform' (infinite norm of deformation tensor),
-# dtAdv = LCFL / (0.5(gradU + gradU^T))
-op = {}
-iop = IOParams("time_step")
-
-op['dtAdapt'] = AdaptTimeStep(velo, vorti, simulation=simu,
-                              discretization=topo_with_ghosts,
-                              method={TimeIntegrator: RK3,
-                                      SpaceDiscretisation: FD_C_4,
-                                      dtCrit: ['deform', 'cfl', 'stretch']},
-                              io_params=iop,
-                              lcfl=0.125,
-                              cfl=0.5)
-
-op['advection'] = Advection(velo, vorti,
-                            discretization=d3d,
-                            method={Scales: 'p_M6',
-                                    Splitting: 'classic'}
-                            )
-
-op['stretching'] = Stretching(velo, vorti,
-                              discretization=topo_with_ghosts)
-
-op['diffusion'] = Diffusion(viscosity=VISCOSITY, vorticity=vorti,
-                            discretization=d3d)
-
-op['poisson'] = Poisson(velo, vorti, discretization=d3d, projection=1)
-
-# ===== Discretization of computational operators ======
-for ope in op.values():
-    ope.discretize()
-
-topofft = op['poisson'].discreteFields[vorti].topology
-topoadvec = op['advection'].discreteFields[vorti].topology
-
-# ==== Operators to map data between the different computational operators ===
-# (i.e. between topologies)
-distr = {}
-distr['adv2str'] = RedistributeIntra(source=op['advection'],
-                                     target=op['stretching'],
-                                     variables=[velo, vorti])
-distr['str2adv'] = RedistributeIntra(source=op['stretching'],
-                                     target=op['advection'],
-                                     variables=[velo, vorti])
-distr['fft2str'] = RedistributeIntra(source=op['poisson'],
-                                     target=op['stretching'],
-                                     variables=[velo])
-distr['fft2str2'] = RedistributeIntra(source=op['poisson'],
-                                     target=op['stretching'],
-                                     variables=[velo, vorti])
-distr['str2fft'] = RedistributeIntra(source=op['stretching'],
-                                     target=op['diffusion'],
-                                     variables=[vorti])
-# ## ========= Monitoring operators =========
-
-iop = IOParams('TG_io', frequency=200)
-writer = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                               io_params=iop)
-
-io_ener=IOParams('energy_enstrophy')
-energy = EnergyEnstrophy(velo, vorti, discretization=topofft,
-                         io_params=io_ener)
-writer.discretize()
-energy.discretize()
-
-# ========= Setup for all declared operators =========
-time_setup = MPI.Wtime()
-for ope in op.values():
-    ope.setup()
-for ope in distr.values():
-    ope.setup()
-writer.setup()
-energy.setup()
-
-print '[', main_rank, '] total time for setup:', MPI.Wtime() - time_setup
-
-# ========= Fields initialization =========
-# - initialize velo + vort on topostr
-# - penalize vorticity
-# - redistribute topostr --> topofft
-
-time_init = MPI.Wtime()
-
-
-def initFields_mode1():
-    velo.initialize(topo=topoadvec)
-    vorti.initialize(topo=topoadvec)
-
-
-initFields_mode1()
-print '[', main_rank, '] total time for init :', MPI.Wtime() - time_init
-
-fullseq = []
-
-
-def run(sequence):
-    op['advection'].apply(simu)
-#    print 'enstrophy 1'
-#    energy.apply(simu)
-    distr['adv2str'].apply(simu)
-    distr['adv2str'].wait()
-    op['stretching'].apply(simu)
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-#    print 'enstrophy 2'
-#    energy.apply(simu)
-    op['diffusion'].apply(simu)
-    op['poisson'].apply(simu)
-    energy.apply(simu)
-    writer.apply(simu)
-    distr['fft2str2'].apply(simu)
-    distr['fft2str2'].wait()
-    op['dtAdapt'].apply(simu)
-    op['dtAdapt'].wait()
-    distr['str2adv'].apply(simu)
-    distr['str2adv'].wait()
-
-# ==== Serialize some data of the problem to a "restart" file ====
-# def dump(filename):
-#      """
-#      Serialize some data of the problem to file
-#      (only data required for a proper restart, namely fields in self.input
-#      and simulation).
-#      @param filename : prefix for output file. Real name = filename_rk_N,
-#      N being current process number. If None use default value from problem
-#      parameters (self.filename)
-#      """
-#      if filename is not None:
-#          filename = filename
-#         filedump = filename + '_rk_' + str(main_rank)
-#     db = parmesPickle(filedump, mode='store')
-#     db.dump(simu, 'simulation')
-#     velo.dump(filename, mode='append')
-#     vorti.dump(filename, mode='append')
-
-# ## ====== Load some data of the problem from a "restart" file ======
-# def restart(filename):
-#     """
-#     Load serialized data to restart from a previous state.
-#     self.input variables and simulation are loaded.
-#     @param  filename : prefix for downloaded file.
-#     Real name = filename_rk_N, N being current process number.
-#     If None use default value from problem
-#     parameters (self.filename)
-#     """
-#     if filename is not None:
-#         filename = filename
-#         filedump = filename + '_rk_' + str(main_rank)
-#     db = parmesPickle(filedump, mode='load')
-#     simu = db.load('simulation')[0]
-#     simu.start = simu.time - simu.time_step
-#     ite = simu.current_iteration
-#     simu.initialize()
-#     simu.current_iteration = ite
-#     print 'simu', simu
-#     print ("load ...", filename)
-#     velo.load(filename)
-#     vorti.load(filename)
-#     return simu
-
-seq = fullseq
-
-simu.initialize()
-#doDump = False
-#doRestart = False
-#dumpFreq = 5000
-#io_default={"filename":'restart'}
-#dump_filename = io.Writer(params=io_default).filename
-#===== Restart (if needed) =====
-# if doRestart:
-#     simu = restart(dump_filename)
-#     # Set up for monitors and redistribute
-#     for ope in distr.values():
-#         ope.setUp()
-#     for monit in monitors.values():
-#         monit.setUp()
-
-# ======= Time loop =======
-time_run = MPI.Wtime()
-while not simu.isOver:
-    if topofft.rank == 0:
-        simu.printState()
-    run(seq)
-    simu.advance()
-# #     testdump = simu.current_iteration % dumpFreq is 0
-# #     if doDump and testdump:
-# #         dump(dump_filename)
-print '[', main_rank, '] total time for run :', MPI.Wtime() - time_run
-
-# ======= Finalize =======
-fftw2py.clean_fftw_solver(box.dimension)
-for ope in distr.values():
-    ope.finalize()
-writer.finalize()
-energy.finalize()
diff --git a/trashed_examples/TaylorGreen/TaylorGreen3D_debug_filter.py b/trashed_examples/TaylorGreen/TaylorGreen3D_debug_filter.py
deleted file mode 100644
index 723db975e2a8024f53488f36cb1ed1e6851371fd..0000000000000000000000000000000000000000
--- a/trashed_examples/TaylorGreen/TaylorGreen3D_debug_filter.py
+++ /dev/null
@@ -1,299 +0,0 @@
-#!/usr/bin/python
-
-"""
-Taylor Green 3D : see paper van Rees 2011.
-
-All parameters are set and defined in python module dataTG.
-
-"""
-
-from hysop import Box
-from hysop.f2py import fftw2py
-import numpy as np
-from hysop.fields.continuous import Field
-from hysop.operator.advection import Advection
-from hysop.operator.stretching import Stretching
-from hysop.operator.poisson import Poisson
-from hysop.operator.filtering import Filtering
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.adapt_timestep import AdaptTimeStep
-from hysop.operator.redistribute_intra import RedistributeIntra
-from hysop.operator.hdf_io import HDF_Writer
-from hysop.operator.energy_enstrophy import EnergyEnstrophy
-from hysop.problem.simulation import Simulation
-from hysop.methods_keys import Scales, TimeIntegrator,\
-    Splitting, dtCrit, SpaceDiscretisation
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.finite_differences import FD_C_4
-from hysop.mpi import main_rank, MPI
-from hysop.tools.parameters import Discretization, IOParams
-
-print " ========= Start Navier-Stokes 3D (Taylor Green benchmark) ========="
-
-# ====== pi constant and trigonometric functions ======
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-VISCOSITY = 1. / 1600.
-# ======= Domain =======
-dim = 3
-Nx = Ny = Nz = 129
-g = 2
-box = Box(length=[2.0 * pi, 2.0 * pi, 2.0 * pi])
-
-# A global discretization with ghost points
-d3dg = Discretization([Nx, Ny, Nz], [g, g, g])
-# A global discretization, without ghost points
-d3d = Discretization([Nx, Ny, Nz])
-# Default topology (i.e. 3D, with ghosts)
-topo_with_ghosts = box.create_topology(d3dg)
-
-# ======= Function to compute TG velocity =======
-def computeVel(res, x, y, z, t):
-    res[0][...] = sin(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-# ======= Function to compute reference vorticity =======
-def computeVort(res, x, y, z, t):
-    res[0][...] = - cos(x) * sin(y) * sin(z)
-    res[1][...] = - sin(x) * cos(y) * sin(z)
-    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-    return res
-
-# ======= Function to compute reference vorticity =======
-def computeVortFilt(res, x, y, z, t):
-    res[0][...] = 0.
-    res[1][...] = 0.
-    res[2][...] = 0.
-    return res
-
-# ======= Fields =======
-velo = Field(domain=box, formula=computeVel,
-             name='Velocity', is_vector=True)
-vorti = Field(domain=box, formula=computeVort,
-              name='Vorticity', is_vector=True)
-vortiFiltered = Field(domain=box, formula=computeVortFilt,
-                      name='VorticityFiltered', is_vector=True)
-
-
-# ========= Simulation setup =========
-simu = Simulation(start=0.0, end=10.0, time_step=0.0125, max_iter=10000000)
-
-# Adaptative timestep method : dt = min(values(dtCrit))
-# where dtCrit is a list of criterions on which the computation
-# of the adaptative time step is based
-# ex : dtCrit = ['gradU', 'cfl', 'stretch'], means :
-# dt = min (dtAdv, dtCfl, dtStretch), where dtAdv is equal to LCFL / |gradU|
-# For dtAdv, the possible choices are the following:
-# 'vort' (infinite norm of vorticity) : dtAdv = LCFL / |vort|
-# 'gradU' (infinite norm of velocity gradient), dtAdv = LCFL / |gradU|
-# 'deform' (infinite norm of deformation tensor),
-# dtAdv = LCFL / (0.5(gradU + gradU^T))
-op = {}
-iop = IOParams("time_step")
-
-op['dtAdapt'] = AdaptTimeStep(velo, vorti, simulation=simu,
-                              discretization=topo_with_ghosts,
-                              method={TimeIntegrator: RK3,
-                                      SpaceDiscretisation: FD_C_4,
-                                      dtCrit: ['deform', 'cfl', 'stretch']},
-                              io_params=iop,
-                              lcfl=0.125,
-                              cfl=0.5)
-
-op['advection'] = Advection(velo, vorti,
-                            discretization=d3d,
-                            method={Scales: 'p_M6',
-                                    Splitting: 'classic'}
-                            )
-
-op['stretching'] = Stretching(velo, vorti,
-                              discretization=topo_with_ghosts)
-
-op['diffusion'] = Diffusion(viscosity=VISCOSITY, vorticity=vorti,
-                            discretization=d3d)
-
-op['poisson'] = Poisson(velo, vorti, discretization=d3d, projection=1)
-
-op['filtering'] = Filtering(vorti, vortiFiltered, discretization=d3d)
-
-# ===== Discretization of computational operators ======
-for ope in op.values():
-    ope.discretize()
-
-topofft = op['poisson'].discreteFields[vorti].topology
-topoadvec = op['advection'].discreteFields[vorti].topology
-
-# ==== Operators to map data between the different computational operators ===
-# (i.e. between topologies)
-distr = {}
-distr['adv2str'] = RedistributeIntra(source=op['advection'],
-                                     target=op['stretching'],
-                                     variables=[velo, vorti])
-distr['str2adv'] = RedistributeIntra(source=op['stretching'],
-                                     target=op['advection'],
-                                     variables=[velo, vorti])
-distr['fft2str'] = RedistributeIntra(source=op['poisson'],
-                                     target=op['stretching'],
-                                     variables=[velo])
-distr['fft2str2'] = RedistributeIntra(source=op['poisson'],
-                                     target=op['stretching'],
-                                     variables=[velo, vorti])
-distr['str2fft'] = RedistributeIntra(source=op['stretching'],
-                                     target=op['diffusion'],
-                                     variables=[vorti])
-# ## ========= Monitoring operators =========
-
-iop = IOParams('TG_io', frequency=100)
-writer = HDF_Writer(variables={velo: topofft, vorti: topofft},
-                               io_params=iop)
-
-io_ener=IOParams('energy_enstrophy')
-energy = EnergyEnstrophy(velo, vorti, discretization=topofft,
-                         io_params=io_ener)
-
-io_enerFilt=IOParams('energy_enstrophy_filt')
-energyFilt = EnergyEnstrophy(velo, vortiFiltered, discretization=topofft,
-                             io_params=io_enerFilt)
-
-writer.discretize()
-energy.discretize()
-energyFilt.discretize()
-
-# ========= Setup for all declared operators =========
-time_setup = MPI.Wtime()
-for ope in op.values():
-    ope.setup()
-for ope in distr.values():
-    ope.setup()
-writer.setup()
-energy.setup()
-energyFilt.setup()
-
-print '[', main_rank, '] total time for setup:', MPI.Wtime() - time_setup
-
-# ========= Fields initialization =========
-# - initialize velo + vort on topostr
-# - penalize vorticity
-# - redistribute topostr --> topofft
-
-time_init = MPI.Wtime()
-
-
-def initFields_mode1():
-    velo.initialize(topo=topoadvec)
-    vorti.initialize(topo=topoadvec)
-    vortiFiltered.initialize(topo=topofft)
-
-
-initFields_mode1()
-print '[', main_rank, '] total time for init :', MPI.Wtime() - time_init
-
-fullseq = []
-
-
-def run(sequence):
-    op['advection'].apply(simu)
-#    print 'enstrophy 1'
-#    energy.apply(simu)
-    distr['adv2str'].apply(simu)
-    distr['adv2str'].wait()
-    op['stretching'].apply(simu)
-    distr['str2fft'].apply(simu)
-    distr['str2fft'].wait()
-#    print 'enstrophy 2'
-#    energy.apply(simu)
-    op['diffusion'].apply(simu)
-    op['poisson'].apply(simu)
-    op['filtering'].apply(simu)
-    energy.apply(simu)
-    energyFilt.apply(simu)
-    distr['fft2str2'].apply(simu)
-    distr['fft2str2'].wait()
-    op['dtAdapt'].apply(simu)
-    op['dtAdapt'].wait()
-    distr['str2adv'].apply(simu)
-    distr['str2adv'].wait()
-
-# ==== Serialize some data of the problem to a "restart" file ====
-# def dump(filename):
-#      """
-#      Serialize some data of the problem to file
-#      (only data required for a proper restart, namely fields in self.input
-#      and simulation).
-#      @param filename : prefix for output file. Real name = filename_rk_N,
-#      N being current process number. If None use default value from problem
-#      parameters (self.filename)
-#      """
-#      if filename is not None:
-#          filename = filename
-#         filedump = filename + '_rk_' + str(main_rank)
-#     db = parmesPickle(filedump, mode='store')
-#     db.dump(simu, 'simulation')
-#     velo.dump(filename, mode='append')
-#     vorti.dump(filename, mode='append')
-
-# ## ====== Load some data of the problem from a "restart" file ======
-# def restart(filename):
-#     """
-#     Load serialized data to restart from a previous state.
-#     self.input variables and simulation are loaded.
-#     @param  filename : prefix for downloaded file.
-#     Real name = filename_rk_N, N being current process number.
-#     If None use default value from problem
-#     parameters (self.filename)
-#     """
-#     if filename is not None:
-#         filename = filename
-#         filedump = filename + '_rk_' + str(main_rank)
-#     db = parmesPickle(filedump, mode='load')
-#     simu = db.load('simulation')[0]
-#     simu.start = simu.time - simu.time_step
-#     ite = simu.current_iteration
-#     simu.initialize()
-#     simu.current_iteration = ite
-#     print 'simu', simu
-#     print ("load ...", filename)
-#     velo.load(filename)
-#     vorti.load(filename)
-#     return simu
-
-seq = fullseq
-
-simu.initialize()
-#doDump = False
-#doRestart = False
-#dumpFreq = 5000
-#io_default={"filename":'restart'}
-#dump_filename = io.Writer(params=io_default).filename
-#===== Restart (if needed) =====
-# if doRestart:
-#     simu = restart(dump_filename)
-#     # Set up for monitors and redistribute
-#     for ope in distr.values():
-#         ope.setUp()
-#     for monit in monitors.values():
-#         monit.setUp()
-
-# ======= Time loop =======
-time_run = MPI.Wtime()
-while not simu.isOver:
-    if topofft.rank == 0:
-        simu.printState()
-    run(seq)
-    simu.advance()
-# #     testdump = simu.current_iteration % dumpFreq is 0
-# #     if doDump and testdump:
-# #         dump(dump_filename)
-print '[', main_rank, '] total time for run :', MPI.Wtime() - time_run
-
-# ======= Finalize =======
-fftw2py.clean_fftw_solver(box.dimension)
-for ope in distr.values():
-    ope.finalize()
-writer.finalize()
-energy.finalize()
-energyFilt.finalize()
diff --git a/trashed_examples/VortexRing3D.py b/trashed_examples/VortexRing3D.py
deleted file mode 100755
index e34900bb0a59d11d37bdbe930ab3fc580cc3aced..0000000000000000000000000000000000000000
--- a/trashed_examples/VortexRing3D.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/python
-#import sys
-#sys.path.insert(0,'/scratch/mimeau/install-parmes3/')
-import parmepy as pp
-from parmepy.f2py import fftw2py
-import numpy as np
-import math as m
-from parmepy.mpi.topology import Cartesian
-from parmepy.operator.advection import Advection
-from parmepy.operator.stretching import Stretching
-from parmepy.operator.poisson import Poisson
-from parmepy.operator.diffusion import Diffusion
-from parmepy.operator.redistribute import Redistribute
-from parmepy.problem.navier_stokes import NSProblem
-from parmepy.operator.monitors.printer import Printer
-from parmepy.operator.monitors.energy_enstrophy import Energy_enstrophy
-from dataTG import dim, nb, NBGHOSTS, ADVECTION_METHOD, VISCOSITY, \
-    WITH_PROJ, OUTPUT_FREQ, FILENAME, simu
-
-
-## ----------- A 3d problem -----------
-print " ========= Start Navier-Stokes 3D (Vortex Ring benchmark) ========="
-
-## pi constant
-pi = m.pi
-
-## Domain
-box = pp.Box(dim, length=[2.0 * pi, 2.0 * pi, 2.0 * pi])
-
-## Global resolution
-nbElem = [nb] * dim
-
-
-## Function to compute TG velocity
-def computeVel(x, y, z):
-    vx = 0.
-    vy = 0.
-    vz = 1.
-    return vx, vy, vz
-
-
-## Function to compute reference vorticity
-def computeVort(x, y, z):
-    xc = 6. / 2.
-    yc = 6. / 2.
-    zc = 6. / 6.
-    R = 1.5
-    sigma = R / 3.
-    Gamma = 0.75
-    dist = m.sqrt((x - xc) ** 2 + (y - yc) ** 2)
-    s2 = (z - zc) ** 2 + (dist - R) ** 2
-    wx = 0.
-    wy = 0.
-    wz = 0.
-    if (dist != 0.):
-        cosTheta = (x - xc) / dist
-        sinTheta = (y - yc) / dist
-        wTheta = Gamma / (pi * sigma ** 2) * m.exp(-(s2 / sigma ** 2))
-        wx = - wTheta * sinTheta
-        wy = wTheta * cosTheta
-        wz = 0.
-    return wx, wy, wz
-
-## Fields
-velo = pp.Field(domain=box, formula=computeVel,
-                name='Velocity', is_vector=True)
-vorti = pp.Field(domain=box, formula=computeVort,
-                 name='Vorticity', is_vector=True)
-
-## Usual Cartesian topology definition
-ghosts = np.ones((box.dimension)) * NBGHOSTS
-topo = Cartesian(box, box.dimension, nbElem,
-                 ghosts=ghosts)
-
-## Operators
-advec = Advection(velo, vorti,
-                  resolutions={velo: nbElem,
-                               vorti: nbElem},
-                  method=ADVECTION_METHOD
-                  )
-
-stretch = Stretching(velo, vorti,
-                     resolutions={velo: nbElem,
-                                  vorti: nbElem},
-                     topo=topo
-                     )
-
-diffusion = Diffusion(vorti,
-                      resolution=nbElem,
-                      viscosity=1.0e-6
-                     )
-
-poisson = Poisson(velo, vorti,
-                  resolutions={velo: nbElem,
-                               vorti: nbElem},
-                  projection=WITH_PROJ)
-
-## Diagnostics related to the problem
-
-energy = Energy_enstrophy(velo, vorti,
-                          topo=topo,
-                          viscosity=VISCOSITY,
-                          frequency=OUTPUT_FREQ,
-                          prefix=FILENAME)
-
-printer = Printer(fields=[vorti, velo],
-                  frequency=100,
-                  prefix='./res/vort_ring_',
-                  ext='.vtk')
-
-distrAdvStr = Redistribute([vorti, velo], advec, stretch)
-#distrStrPoiss = Redistribute([vorti, velo], stretch, poisson)
-
-distrStrAdv = Redistribute([vorti, velo], stretch, advec)
-
-## Define the problem to solve
-
-pb = NSProblem(operators=[advec, distrAdvStr, stretch, distrStrPoiss,
-                          diffusion, poisson],
-               simulation=simu, monitors=[printer], dumpFreq=-1)
-
-## Setting solver to Problem
-pb.setUp()
-
-print 'all topologies', box.topologies
-
-
-## Solve problem
-#poisson.apply(simu)
-pb.solve()
-
-## end of time loop ##
-
-# Clean memory buffers
-fftw2py.clean_fftw_solver(box.dimension)
diff --git a/trashed_examples/cpu_plane_jet/cpu_planejet.py b/trashed_examples/cpu_plane_jet/cpu_planejet.py
deleted file mode 100644
index 4ba17a6a7034f75e3dbc53642b093aa11dd87daf..0000000000000000000000000000000000000000
--- a/trashed_examples/cpu_plane_jet/cpu_planejet.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python
-
-import hysop
-
-from hysop.methods_keys import TimeIntegrator, Interpolation, Remesh,\
-    Support, Splitting, MultiScale, MultiScale, SpaceDiscretisation, \
-    GhostUpdate, Scales, dtCrit, ExtraArgs
-
-from hysop.problem.simulation        import Simulation
-from hysop.fields.variable_parameter import VariableParameter
-
-from hysop.numerics.odesolvers import RK2 as RK2
-from hysop.numerics.odesolvers import RK3 as RK3
-from hysop.numerics.odesolvers import RK4 as RK4
-
-from hysop.numerics.finite_differences import FD_C_4
-from hysop.numerics.remeshing import L6_4 as remesh_formula
-from hysop.numerics.remeshing import Linear
-
-from hysop.operator.advection import Advection
-from hysop.operator.diffusion import Diffusion
-from hysop.operator.stretching import Stretching
-from hysop.operator.poisson import Poisson
-from hysop.operator.adapt_timestep import AdaptTimeStep
-
-from hysop.operator.redistribute_inter import RedistributeInter
-from hysop.operator.redistribute_intra import RedistributeIntra
-
-from hysop.gpu.gpu_transfer import DataTransfer
-from hysop.operator.hdf_io  import HDF_Writer
-
-import utilities as utils
-
-
-## Simulation parameters
-dim = 3
-N   = 1<<8 + 1
-nb_elems = [N]*dim
-
-## Physical parameters:
-# Flow viscosity
-viscosity = 1e-4
-
-## I/O parameters
-dt_output    = IOParams(frequency=out_freq, filename='cpu_dt.dat',    fileformat=ASCII)
-velo_output  = IOParams(frequency=out_freq, filename='cpu_velo.dat',  fileformat=HDF5)
-vorti_output = IOParams(frequency=out_freq, filename='cpu_vorti.dat', fileformat=HDF5)
-
-
-# Domain
-box = hysop.Box(length=[1.0]*dim, 
-                origin=[0.0]*dim,
-                bc=PERIODIC)
-
-# Fields
-velo  = hysop.Field(domain=box, formula=utils.initialize_velocity,
-                   name='Velocity',  is_vector=True)
-vorti = hysop.Field(domain=box,
-                   name='Vorticity', is_vector=True)
-
-# Discretizations
-d_uw = Discretization(nb_elems)
-
-# Topologies
-topo_uw = box.create_topology(d_uw, dim=1)
-
-# Adaptative timestep simulation
-dt   = VariableParameter({'dt': 0.001})
-simu = Simulation(start=0.0, end=5.0, time_step=0.001, max_iter=10000)
-
-
-## Operators
-ops = {}
-
-# CPU operators
-ops['advec_w'] = Advection(velo,
-                     discretization=topo_uw,
-                     variables={vorti: topo_uw},
-                     method={Scales: 'p_64', MultiScale: 'L4_4'})
-ops['stretching'] = Stretching(velo, vorti, discretization=d_uw)
-ops['diffusion']  = Diffusion(variables={vorti: d_uw}, viscosity=viscosity)
-ops['poisson']    = Poisson(velo, vorti, discretization=d_uw)
-
-
-# other operators
-ops['dt_adapt'] = AdaptTimeStep(velo, vorti,
-                         simulation=simu,
-                         time_range=[0, np.infty],
-                         discretization=d_uw,
-                         method={TimeIntegrator: RK3,
-                                 SpaceDiscretisation: FD_C_4,
-                                 dtCrit: ['gradU', 'cfl']},
-                         lcfl=0.15,
-                         cfl=1.5,
-                         io_params=dt_output)
-
-ops['velo_io']  = HDF_Writer(variables={velo : topo_uw}, io_params=velo_output)
-ops['vorti_io'] = HDF_Writer(variables={vorti: topo_uw}, io_params=vorti_output)
-
-for op in ops:
-    op.discretize()
-
-
diff --git a/trashed_examples/cpu_plane_jet/create_random_arrays.py b/trashed_examples/cpu_plane_jet/create_random_arrays.py
deleted file mode 100644
index de7322631e99cba53ce5e92b837ad3074bf21349..0000000000000000000000000000000000000000
--- a/trashed_examples/cpu_plane_jet/create_random_arrays.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-import numpy as np
-from hysop.constants import HYSOP_REAL, ORDER
-
-
-def random_init(shape, mpi_comm):
-    # Create a folder to store all random arrays
-    d = 'rand_init'
-    if mpi_comm.Get_rank() == 0:
-        if not os.path.exists(d):
-            os.makedirs(d)
-    mpi_comm.Barrier()
-    file_name = "{0}_{1}_{2}".format(*shape)
-    file_name += "_{0}p_{1}.dat".format(mpi_comm.Get_size(),
-                                        mpi_comm.Get_rank())
-    try:
-        randX = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randX_' + file_name)),
-                       shape),
-            dtype=HYSOP_REAL, order=ORDER)
-        randY = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randY_' + file_name)),
-                       shape),
-            dtype=HYSOP_REAL, order=ORDER)
-        randZ = np.asarray(
-            np.reshape(np.fromfile(os.path.join(d, 'randZ_' + file_name)),
-                       shape),
-            dtype=HYSOP_REAL, order=ORDER)
-    except IOError:
-        randX = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randY = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randZ = np.asarray(np.random.random(shape),
-                           dtype=HYSOP_REAL, order=ORDER) - 0.5
-        randX.tofile(os.path.join(d, 'randX_' + file_name))
-        randY.tofile(os.path.join(d, 'randY_' + file_name))
-        randZ.tofile(os.path.join(d, 'randZ_' + file_name))
-    return randX, randY, randZ
diff --git a/trashed_examples/cpu_plane_jet/utilities.py b/trashed_examples/cpu_plane_jet/utilities.py
deleted file mode 100644
index f09774fe55ee70b011436fc66e40577a4b1e474c..0000000000000000000000000000000000000000
--- a/trashed_examples/cpu_plane_jet/utilities.py
+++ /dev/null
@@ -1,27 +0,0 @@
-
-import numpy as np
-
-width = 0.01
-ampl1 = 0.05
-ampl3 = 0.3
-
-
-def initialize_velocity(res, x, y, z, t, rand_init=False):
-    yy  = np.abs(y - 0.5)
-    aux = (0.1 - 2.0 * yy) / (4.0 * width)
-    strg = np.exp(np.abs(aux**2))
-    if rand_init:
-        from create_random_arrays import random_init
-        randX, randY, randZ = random_init(res[0].shape, box.comm_task)
-        strg1 = strg * randX
-        strg2 = strg * randY
-        strg3 = strg * randZ
-        res[0][...] = 0.5 * (1.0 + np.tanh(aux)) * (1.0 + ampl3 * np.sin(8.0 * np.pi * x)) * (1.0 + ampl1 * strg1)
-        res[1][...] = ampl1 * strg2
-        res[2][...] = ampl1 * strg3
-    else:
-        res[0][...] = 0.5 * (1.0 + np.tanh(aux)) * (1.0 + ampl3 * np.sin(8.0 * np.pi * x))
-        res[1][...] = 0.0
-        res[2][...] = 0.0
-    return res
-
diff --git a/trashed_examples/dataNS_RMI.py b/trashed_examples/dataNS_RMI.py
deleted file mode 100644
index 11594177a8ba4dc1c82f1cc5ee02ba9186ccd706..0000000000000000000000000000000000000000
--- a/trashed_examples/dataNS_RMI.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from hysop.methods_keys import Scales, TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting, dtCrit, SpaceDiscretisation, GhostUpdate
-from hysop.numerics.integrators.runge_kutta2 import RK2 as RK2
-from hysop.numerics.integrators.runge_kutta3 import RK3 as RK3
-from hysop.numerics.integrators.runge_kutta4 import RK4 as RK4
-from hysop.numerics.finite_differences import FD_C_4, FD_C_2
-from hysop.numerics.interpolation import Linear
-from hysop.f2hysop import fftw2py
-from hysop.numerics.remeshing import L4_2 as rmsh
-
-# problem dimension
-dim = 3
-# resolution
-nb = 33
-# number of ghosts in usual cartesian topo
-NBGHOSTS = 2
-# Adaptative timestep method
-TIMESTEP_METHOD = {TimeIntegrator: RK3, SpaceDiscretisation: FD_C_4, 
-                   dtCrit: 'deform'}
-# Lagrangian CFL for adaptative timestep
-LCFL = 0.125
-# CFL (if CFL is None, no CFL condition is taken into account 
-# in the computation of adaptative timesteps)
-CFL = 0.5
-# Advection method
-ADVECTION_METHOD = {Scales: 'p_M4', Splitting: 'classic'}
-#ADVECTION_METHOD = {TimeIntegrator: RK2,
-#                    Interpolation: Linear,
-#                    Remesh: rmsh,
-#                    Support: '',
-#                    Splitting: 'o1'}
-# Baroclinic Term method
-BAROCLINIC_METHOD = {SpaceDiscretisation: FD_C_4, GhostUpdate: True}
-# Curl method
-#CURL_METHOD = {SpaceDiscretisation: FD_C_4, GhostUpdate: True}
-CURL_METHOD = {SpaceDiscretisation: fftw2py, GhostUpdate: False}
-# Flow
-VISCOSITY = 0.001
-RHO_BOTT = 1.0 #2.28 
-RHO_TOP = 3.0 #12.04
-# reprojection criterion
-REPROJ_CST = 0.04
-# Vorticity projection (list of 3 elements): 
-# - the 1st element determines if the reprojection is needed
-# - the 2nd element gives the reprojection frequency (nber of iterations) 
-# if the 1st element is True 
-# - the 3rd element indicates if the reprojection frequency given before 
-# can be reduced to satisfy the reprojection criterion 
-# (governed by the reprojection constant given above)
-# ex1 : PROJ = [True, 10, False] means that the reprojection 
-# of vorticty is strictly be applied every 10 iterations, 
-# regardless of reprojection criterion
-# ex2 : PROJ = [True, 25, True] means that the reprojection 
-# of vorticty is be applied by default every 25 iterations 
-# AND also when the reprojection criterion is not satisfied
-PROJ = [True, 1, False]
-
-# reprojection method
-REPROJ_METHOD = {SpaceDiscretisation: FD_C_4}
diff --git a/trashed_examples/demo_2D.cl b/trashed_examples/demo_2D.cl
deleted file mode 100644
index 0ed50f55b9d177451c79f9dc80f265548ba68f66..0000000000000000000000000000000000000000
--- a/trashed_examples/demo_2D.cl
+++ /dev/null
@@ -1,19 +0,0 @@
-#define PI (float) acos(-1.0)
-
-__kernel void analyticVelocity(__global float* veloX,__global float* veloY,
-			       float4 minPos,
-			       float4 size,
-			       float t)
-{
-  uint gidX = get_global_id(0);
-  uint gidY = get_global_id(1);
-  uint i;
-  float pix, piy, time_term = cos(t*PI/3.0);
-  piy = (minPos.y + gidY*size.y) * PI;
-  for(i=gidX; i<NB_X; i+=WI_NB)
-    {
-      pix = (minPos.x + i*size.x) * PI;
-      veloX[i+gidY*(NB_X)] =  -sin(pix) * sin(pix) * sin(piy * 2) * time_term;
-      veloY[i*NB_Y+gidY] = sin(piy) *sin(piy) * sin(pix * 2) * time_term;
-    }
-}
diff --git a/trashed_examples/demo_2D_real-time.py b/trashed_examples/demo_2D_real-time.py
deleted file mode 100755
index 504fa21772db5e01e4407e276c7904a289df1bbf..0000000000000000000000000000000000000000
--- a/trashed_examples/demo_2D_real-time.py
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/usr/bin/env python
-import hysop
-#hysop.__VERBOSE__ = True
-from hysop.domain.box import Box
-from hysop.gpu import PARMES_REAL_GPU, PARMES_DOUBLE_GPU
-#hysop.gpu.CL_PROFILE = True
-
-from hysop.fields.continuous import Field
-from hysop.operator.advection import Advection
-from hysop.problem.transport import TransportProblem
-from hysop.operator.analytic import Analytic
-from hysop.gpu.QtRendering import QtOpenGLRendering
-from hysop.problem.simulation import Simulation
-import math
-import numpy as np
-
-norm2 = lambda x, y: x * x + y * y
-norm1 = lambda x, y: abs(x) + abs(y)
-norminf = lambda x, y: max(abs(x), abs(y))
-
-
-def initScalar(x, y):
-    return math.exp(-(norm2(x - 0.5, y - 0.75) / 0.0225) ** 6) \
-        + 0.75 * math.exp(-(norm2(x - 0.75, y - 0.25) / 0.0225) ** 6) \
-        + 0.5 * math.exp(-(norm1(x - 0.4, y - 0.4) / 0.1) ** 6) \
-        + 0.25 * math.exp(-(norminf(x - 0.6, y - 0.5) / 0.08) ** 6)
-    # if norm2(x - 0.5, y - 0.75) < 0.0225:
-    #     return  0.25
-    # elif norm2(x - 0.75, y - 0.25) < 0.0225:
-    #     return 0.5
-    # elif norm1(x - 0.4, y - 0.4) < 0.1:
-    #     return 0.75
-    # elif norminf(x - 0.6, y - 0.5) < 0.08:
-    #     return 1.
-    # else:
-    #     return 0.
-
-
-def vitesse(x, y, t=0):
-    vx = -math.sin(x * math.pi) ** 2 * math.sin(y * math.pi * 2.) * \
-        math.cos(t * math.pi / 3.)
-    vy = math.sin(y * math.pi) ** 2 * math.sin(x * math.pi * 2.) * \
-        math.cos(t * math.pi / 3.)
-    return vx, vy
-
-
-dim = 2
-boxLength = [1., 1.]
-boxMin = [0., 0.]
-nbElem = [513, 513]
-
-time_step = 0.075
-finalTime = 3.0 + time_step
-simu = Simulation(start=0.0, end=finalTime, time_step=time_step)
-
-## Domain
-box = Box(dim, length=boxLength, origin=boxMin)
-
-## Fields
-scal = Field(domain=box, name='Scalar', formula=initScalar)
-velo = Field(domain=box, name='Velocity', is_vector=True)
-
-## Operators
-advec = Advection(velo, scal,
-                  resolutions={velo: nbElem,
-                               scal: nbElem},
-                  method='gpu_1k_m6prime',
-                  #src=['./demo_2D.cl'],
-                  precision=PARMES_REAL_GPU,
-                  splittingConfig='o2'
-                  )
-velocity = Analytic(velo,  # formula=vitesse,
-                    resolutions={velo: nbElem},
-                    method='gpu',
-                    src=['./demo_2D.cl'],
-                    precision=PARMES_REAL_GPU,
-                    )
-render = QtOpenGLRendering(scal)
-
-# Problem
-pb = TransportProblem([velocity, advec], simu,
-                      monitors=[render])
-
-# Setting solver to Problem
-pb.setUp()
-
-# We copy the first discretisation of scalar
-scalar_initial = np.copy(scal.discreteFields.values()[0].data)
-
-## Solve problem
-pb.solve()
-
-scal_disc = scal.discreteFields.values()[0]
-scal_disc.toHost()
-print 'Erreur : ', np.max(np.abs(scalar_initial -
-                                 scal_disc.data)) / np.max(scalar_initial)
-print np.linalg.norm(scalar_initial - scal_disc.data, ord=2) / \
-    np.linalg.norm(scalar_initial, ord=2)
-
-pb.finalize()
diff --git a/trashed_examples/demo_hybrid.py b/trashed_examples/demo_hybrid.py
deleted file mode 100644
index 004ecbea962236578c6dd70be139ec8bf2824ab7..0000000000000000000000000000000000000000
--- a/trashed_examples/demo_hybrid.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-Task parallelism example.
-
-One process is dedicated to file outputs and other are computing the data.
-"""
-import hysop as pp
-from hysop.constants import np, HDF5
-from hysop.mpi.main_var import main_size, main_rank, main_comm
-from hysop.mpi.topology import Cartesian
-from hysop.fields.continuous import Field
-from hysop.operator.analytic import Analytic
-from hysop.operator.redistribute_intercomm import RedistributeIntercomm
-from hysop.operator.monitors.printer import Printer
-from hysop.problem.problem_tasks import ProblemTasks
-from hysop.problem.simulation import Simulation
-from hysop.tools.problem2dot import toDot
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-
-def func(res, x, y, z, t=0.):
-    res[0][...] = 2. * sin(pi*x)**2*sin(2.*pi*y)*sin(2.*pi*z)*cos(t*pi/3.)
-    res[1][...] = -sin(2.*pi*x)*sin(pi*y)**2*sin(2.*pi*z)*cos(t*pi/3.)
-    res[2][...] = -sin(2.*pi*x)*sin(2.*pi*y)*sin(pi*z)**2*cos(t*pi/3.)
-    return res
-
-assert main_size > 1
-
-nb_elem = [65, ] * 3
-b = pp.Box()
-proc_tasks = [0,] * main_size
-proc_tasks[0] = 1
-comm_s = main_comm.Split(color=proc_tasks[main_rank], key=main_rank)
-
-
-if proc_tasks[main_rank] == 1:
-    topo = Cartesian(b, dim=1, globalMeshResolution=nb_elem,
-                     cutdir=[True, False, False],
-                     comm=comm_s
-                     )
-elif proc_tasks[main_rank] == 0:
-    topo = Cartesian(b, dim=2, globalMeshResolution=nb_elem,
-                     cutdir=[False, True, True],
-                     comm=comm_s
-                     )
-
-field = Field(domain=b, name="Test_Vec",
-              is_vector=True, formula=func)
-
-op = Analytic([field], {field: nb_elem}, topo=topo, task_id=0)
-p = Printer(variables=[field],
-            topo=topo,
-            frequency=1,
-            prefix='./demo_task/',
-            formattype=HDF5, task_id=1)
-
-red = RedistributeIntercomm(field, topo, op, p, proc_tasks, main_comm,
-                                component=None)
-
-simu = Simulation(start=0.0, end=3., time_step=0.15, max_iter=200)
-
-
-pb = ProblemTasks([op, red, p], simu, proc_tasks)
-pb.pre_setUp()
-
-# As 1 process is only a printer, one need explicit discretize and
-# set the initialization topology for the field.
-field.discretize(topo)
-field.setTopoInit(topo)
-pb.setUp()
-pb.solve()
-pb.finalize()
diff --git a/trashed_examples/demo_mpi.py b/trashed_examples/demo_mpi.py
deleted file mode 100644
index 0eb595f36a17f352c741086f24fe90af653f150d..0000000000000000000000000000000000000000
--- a/trashed_examples/demo_mpi.py
+++ /dev/null
@@ -1,316 +0,0 @@
-"""Testing mpi multiple topologies transfers"""
-
-from hysop.constants import HYSOP_MPI_REAL, ORDERMPI, HYSOP_REAL
-import hysop.tools.numpywrappers as npw
-from hysop.fields.continuous import Field
-from hysop.mpi.main_var import main_rank, main_size, main_comm, MPI
-from hysop.mpi.topology import Cartesian
-from hysop.operator.analytic import Analytic
-from hysop.operator.redistribute import Redistribute
-from hysop.operator.redistribute_intercomm import RedistributeIntercomm
-import numpy as np
-
-if HYSOP_REAL is np.float32:
-    atol = 1.e-4
-else:
-    atol = 1e-10
-
-def func(res, x, y, z, t=0.):
-    res[0][...] = x
-    res[1][...] = y ** 2
-    res[2][...] = x + y
-    return res
-
-
-def test_topologies(domain, topo_from, topo_to, init_formula):
-    """
-    Testing the mpi transfers between the two topologies.
-    It initialises a field on topo_from and one Analytic operator on each
-    topology. A Redistribute operator is created to transfer the data.
-    """
-    field = Field(domain=domain, name="Test_Vec",
-              is_vector=True, formula=init_formula)
-
-    if not topo_from.isNew:
-        topo_from = topo_to
-    if not topo_to.isNew:
-        topo_to = topo_from
-
-    op_from = Analytic([field], {field: nb_elem}, topo=topo_from)
-    op_to = Analytic([field], {field: nb_elem}, topo=topo_to)
-    op_from.discretize()
-    op_to.discretize()
-    op_from.setUp()
-    op_to.setUp()
-
-    ope = Redistribute([field], op_from, op_to)
-    ope.discretize()
-    ope.setUp()
-
-    field.initialize(topo=topo_from)
-    ope.apply()
-    ope.wait()
-
-    assert np.allclose(field.norm(topo_from),
-                                    field.norm(topo_to),
-                                    atol=atol), \
-        "[{0}] Data differs: norm, topo_from - topo_to = {1}".format(
-            main_rank, field.norm(topo_from) - field.norm(topo_to))
-
-
-nb_elem = [17, ] * 3
-b = pp.Box()
-
-## CASE 1:
-## Testing for two topologies with the same MPI size but different dimensions
-if main_rank == 0:
-    print "Testing CASE 1 ...",
-    b.reset()
-topo_q = Cartesian(b, dim=2, globalMeshResolution=[17, 17, 17],
-                   cutdir=[False, True, True]
-                   )
-topo_p = Cartesian(b, dim=1, globalMeshResolution=[17, 17, 17],
-                   cutdir=[True, False, False]
-                   )
-if not topo_q.isNew:
-    if main_rank == 0:
-        print "Reuse topology"
-    topo_q = topo_p
-if not topo_p.isNew:
-    if main_rank == 0:
-        print "Reuse topology"
-    topo_p = topo_q
-test_topologies(b, topo_p, topo_q, func)
-test_topologies(b, topo_q, topo_p, func)
-if main_rank == 0:
-    print "Ok"
-main_comm.Barrier()
-
-
-## CASE 2:
-## Testing for two topologies with based on a MPI sub-communicator of same
-## size. Only half of total processes are working.
-if main_size % 2 == 0:
-    if main_rank == 0:
-        print "Testing CASE 2 ...",
-    b.reset()
-    proc_tasks = [0,] * (main_size/2) + [1,] * (main_size/2)
-    comm_s = main_comm.Split(color=proc_tasks[main_rank], key=main_rank)
-    topo_q = Cartesian(b, dim=2, globalMeshResolution=[17, 17, 17],
-                       cutdir=[False, True, True],
-                       comm=comm_s
-                       )
-    topo_p = Cartesian(b, dim=1, globalMeshResolution=[17, 17, 17],
-                       cutdir=[True, False, False],
-                       comm=comm_s
-                       )
-    if not topo_q.isNew:
-        if main_rank == 0:
-            print "Reuse topology"
-        topo_q = topo_p
-    if not topo_p.isNew:
-        if main_rank == 0:
-            print "Reuse topology"
-        topo_p = topo_q
-    if proc_tasks[main_rank] == 0:
-        test_topologies(b, topo_p, topo_q, func)
-        test_topologies(b, topo_q, topo_p, func)
-    if main_rank == 0:
-        print "Ok"
-else:
-    if main_rank == 0:
-        print "CASE 2 is not tested (require an even process number)"
-main_comm.Barrier()
-
-## CASE 3 (small case):
-## Testing for two topologies with based on a MPI sub-communicator of different
-## size. Only half of total processes are working. Initialisation is performed
-## by each processus in using his topology.
-if main_size > 2:
-    if main_rank == 0:
-        print "Testing CASE 3 ...",
-    b.reset()
-    proc_tasks = [0,] * main_size
-    proc_tasks[0] = 1
-    comm_s = main_comm.Split(color=proc_tasks[main_rank], key=main_rank)
-    topo = Cartesian(b, dim=2, globalMeshResolution=[17, 17, 17],
-                       cutdir=[False, True, True],
-                       comm=comm_s
-                       )
-    field = Field(domain=b, name="Test_Vec",
-                  is_vector=True, formula=func)
-    field.discretize(topo)
-    field.initialize(topo=topo)
-    norms = npw.zeros((main_size, 3))
-
-    norms[main_rank,:] = field.norm(topo)
-    for i in xrange(main_size):
-        norms[i,:] = main_comm.bcast(norms[i,:], root=i)
-    for i in xrange(main_size):
-        assert np.allclose(norms[i,:], norms[main_rank,:],
-                                        atol=atol)
-    if main_rank == 0:
-        print " Ok"
-else:
-    if main_rank == 0:
-        print "CASE 3 is not tested (require process number > 2)"
-
-
-## CASE 4:
-## Extending CASE 3: the field is initialized by process of only one
-## task id.
-## 'From' topology is 1D and 'To' is 2D
-if main_size > 2:
-    if main_rank == 0:
-        print "Testing CASE 4 ...",
-    b.reset()
-    proc_tasks = [0,] * main_size
-    proc_tasks[0:2] = [1, 1]
-    comm_s = main_comm.Split(color=proc_tasks[main_rank], key=main_rank)
-    if proc_tasks[main_rank] == 1:
-        topo = Cartesian(b, dim=1, globalMeshResolution=[17, 17, 17],
-                         cutdir=[True, False, False],
-                         comm=comm_s
-                         )
-    elif proc_tasks[main_rank] == 0:
-        topo = Cartesian(b, dim=2, globalMeshResolution=[17, 17, 17],
-                         cutdir=[False, True, True],
-                         comm=comm_s
-                         )
-    field = Field(domain=b, name="Test_Vec",
-                  is_vector=True, formula=func)
-    field_r = Field(domain=b, name="Test_Vec_R",
-                    is_vector=True, formula=func)
-
-    field.discretize(topo)
-    field_r.discretize(topo)
-    if proc_tasks[main_rank] == 1:
-        field.initialize(topo=topo)
-    if proc_tasks[main_rank] == 0:
-        field_r.initialize(topo=topo)
-
-    op_from = Analytic([field], {field: [17, 17, 17]}, topo=topo, task_id=1)
-    op_to = Analytic([field], {field: [17, 17, 17]}, topo=topo, task_id=0)
-
-    red = RedistributeIntercomm(field, topo, op_from, op_to,
-                                proc_tasks, main_comm,
-                                component=None)
-    red_r = RedistributeIntercomm(field_r, topo, op_to, op_from,
-                                  proc_tasks, main_comm,
-                                  component=None)
-    red.discretize()
-    red_r.discretize()
-    red.setUp()
-    red_r.setUp()
-
-    red.apply()
-    red_r.apply()
-
-    norms = npw.zeros((main_size, 3))
-    norms[main_rank, :] = field.norm(topo)
-    for i in xrange(main_size):
-        norms[i, :] = main_comm.bcast(norms[i, :], root=i)
-    for i in xrange(main_size):
-        assert np.allclose(norms[i, :], norms[main_rank, :],
-                                        atol=atol)
-    norms[main_rank, :] = field_r.norm(topo)
-    for i in xrange(main_size):
-        norms[i, :] = main_comm.bcast(norms[i, :], root=i)
-    for i in xrange(main_size):
-        assert np.allclose(norms[i, :], norms[main_rank, :],
-                                        atol=atol)
-
-    if main_rank == 0:
-        print " Ok"
-else:
-    if main_rank == 0:
-        print "CASE 4 is not tested (require process number > 2)"
-
-## CASE 5:
-## Extending CASE 4: Using topology with ghosts
-if main_size > 2:
-    if main_rank == 0:
-        print "Testing CASE 5 ...",
-    b.reset()
-    proc_tasks = [0,] * main_size
-    proc_tasks[0:2] = [1, 1]
-    comm_s = main_comm.Split(color=proc_tasks[main_rank], key=main_rank)
-    if proc_tasks[main_rank] == 1:
-        topo = Cartesian(b, dim=1, globalMeshResolution=[17, 17, 17],
-                         cutdir=[True, False, False], ghosts=[2, 0, 1],
-                         comm=comm_s
-                         )
-    elif proc_tasks[main_rank] == 0:
-        topo = Cartesian(b, dim=2, globalMeshResolution=[17, 17, 17],
-                         cutdir=[False, True, True], ghosts=[2, 0, 1],
-                         comm=comm_s
-                         )
-    field = Field(domain=b, name="Test_Vec",
-                  is_vector=True, formula=func)
-    field_r = Field(domain=b, name="Test_Vec_R",
-                    is_vector=True, formula=func)
-
-    field.discretize(topo)
-    field_r.discretize(topo)
-    if proc_tasks[main_rank] == 1:
-        field.initialize(topo=topo)
-    if proc_tasks[main_rank] == 0:
-        field_r.initialize(topo=topo)
-
-    op_from = Analytic([field], {field: [17, 17, 17]}, topo=topo, task_id=1)
-    op_to = Analytic([field], {field: [17, 17, 17]}, topo=topo, task_id=0)
-
-    red = RedistributeIntercomm(field, topo, op_from, op_to,
-                                proc_tasks, main_comm,
-                                component=None)
-    red_r = RedistributeIntercomm(field_r, topo, op_to, op_from,
-                                  proc_tasks, main_comm,
-                                  component=None)
-    red.discretize()
-    red_r.discretize()
-    red.setUp()
-    red_r.setUp()
-
-    red.apply()
-    red_r.apply()
-
-    norms = npw.zeros((main_size, 3))
-    norms[main_rank, :] = field.norm(topo)
-    for i in xrange(main_size):
-        norms[i, :] = main_comm.bcast(norms[i, :], root=i)
-    for i in xrange(main_size):
-        assert np.allclose(norms[i, :], norms[main_rank, :],
-                                        atol=atol)
-    norms[main_rank, :] = field_r.norm(topo)
-    for i in xrange(main_size):
-        norms[i, :] = main_comm.bcast(norms[i, :], root=i)
-    for i in xrange(main_size):
-        assert np.allclose(norms[i, :], norms[main_rank, :],
-                                        atol=atol)
-
-    # Assert that ghosts have not been exchanged
-    if proc_tasks[main_rank] == 0:
-        assert np.allclose(field.discreteFields[topo].data[0][0:2, :, :], 0.,
-                           atol=atol)
-        assert np.allclose(field.discreteFields[topo].data[0][-2:, :, :], 0.,
-                           atol=atol)
-        assert np.allclose(field.discreteFields[topo].data[0][:, :, 0:1], 0.,
-                           atol=atol)
-        assert np.allclose(field.discreteFields[topo].data[0][:, :, -1:], 0.,
-                           atol=atol)
-    if proc_tasks[main_rank] == 1:
-        assert np.allclose(field_r.discreteFields[topo].data[0][0:2, :, :], 0.,
-                           atol=atol)
-        assert np.allclose(field_r.discreteFields[topo].data[0][-2:, :, :], 0.,
-                           atol=atol)
-        assert np.allclose(field_r.discreteFields[topo].data[0][:, :, 0:1], 0.,
-                           atol=atol)
-        assert np.allclose(field_r.discreteFields[topo].data[0][:, :, -1:], 0.,
-                           atol=atol)
-
-    if main_rank == 0:
-        print " Ok"
-
-else:
-    if main_rank == 0:
-        print "CASE 5 is not tested (require process number > 2)"
diff --git a/trashed_examples/howto_integrators.py b/trashed_examples/howto_integrators.py
deleted file mode 100644
index b082f18e68ac2edf013b8a0830f0a65efd8aa337..0000000000000000000000000000000000000000
--- a/trashed_examples/howto_integrators.py
+++ /dev/null
@@ -1,654 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file gives some examples on how parmepy integrators must
-# be used.
-# 
-#
-# \todo : complete it with 2D and 3D cases.
-# 
-
-from parmepy.methods import Euler, RK2, RK3, RK4
-from parmepy.constants import WITH_GUESS
-from parmepy.tools import numpywrappers as npw
-import math
-import numpy as np
-pi = math.pi
-sin = np.sin
-cos = np.cos
-import matplotlib.pyplot as plt
-from parmepy.constants import MPI
-from parmepy.mpi.topology import Cartesian
-
-# Grid resolution for tests
-nb = 5
-# Initial time
-start = 0.
-# Final time
-end = 0.5
-# Time step
-#dt = 1e-3
-# time sequence
-#time = npu.seq(start, end, dt)
-#nbSteps = time.size
-import math as m
-import parmepy as pp
-pi = m.pi
-
-# A set of tests and reference functions
-
-def func1D(t, y, sol):
-    sol[0][...] = -y[0]
-    return sol
-
-
-def func2D(t, y, sol):
-    sol[0][...] = y[1]
-    sol[1][...] = -y[0]
-    return sol
-
-### odespy utilities, for comparisons ... ###
-try:
-    import odespy as ode
-
-except ImportError, e:
-    msg = 'This tutorial needs odespy package.'
-    msg += 'Please check https://github.com/hplgit/odespy for details.'
-    print msg
-
-
-def funcODESPY(y, t):
-    return -y
-
-
-def func2DODESPY(y, t):
-    return [y[1], -y[0]]
-
-
-def rhs(y, w):
-    w[0][...] = y[0]
-    w[1][...] = 2 * y[1]
-    return w
-
-shapey = (500, 500, 500)
-
-
-# No work, no guess, no result.
-def solve0(y):
-    nb = 2
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    #print "solve ...", id(y[0]), " ", id(work[0])
-    work[:nb] = rhs(y, work[:nb])
-    #print "post rhs  ...", id(y[0]), " ", id(work[0])
-    [np.add(y[i], work[i] * 0.1, work[i])
-     for i in xrange(nb)]
-    #print "post op  ...", id(y[0]), " ", id(work[0])
-    return work[:nb]
-
-nb = 2
-
-
-# work, no guess, no result
-def solve(y, work):
-    work[:nb] = rhs(y, work[:nb])
-    [np.add(y[i], work[i] * 0.1, work[i])
-     for i in xrange(nb)]
-    return work[:nb]
-
-
-def solve2(y, work):
-    [np.add(y[i], work[i] * 0.1, work[i])
-     for i in xrange(nb)]
-    return work[:nb]
-
-
-def solve3(y, work):
-    [np.add(y[i], work[i] * 0.1, work[i + 2])
-     for i in xrange(nb)]
-    return work[nb:nb * 2]
-
-
-def solve4(y, work, res):
-    [np.add(y[i], work[i] * 0.1, res[i])
-     for i in xrange(nb)]
-    #res[0][...] = work[0][...]
-    #res[1][...] = work[1][...]
-    return res
-
-
-def solve6(y, work, res):
-    [np.add(y[i], work[i] * 0.1, res[i])
-     for i in xrange(nb)]
-    #res[0][...] = work[0][...]
-    #res[1][...] = work[1][...]
-    #    return res
-
-## Deepcopy for res : bad perf, change res address.
-## def solve5(y, work, res):
-##     nb = 2
-##     #print "solve2 ...", id(y[0]), " ", id(work[0])
-##     [np.add(y[i], work[i] * 0.1, work[i])
-##      for i in xrange(nb)]
-##     #print "post op  ...", id(y[0]), " ", id(work[0])
-##     res = copy.deepcopy(work[:nb])
-##     return res
-
-
-### No inplace computation ###
-# OK
-def cas_basic():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    start = MPI.Wtime()
-    sol = solve0(y)
-    print "basic time : ",  MPI.Wtime() - start
-    print sol[0].mean() + sol[1].mean()
-    print id0, id(y[0])
-
-
-# OK
-def cas_work():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    start = MPI.Wtime()
-    sol = solve(y, work)
-    print "work time", MPI.Wtime() - start
-    print sol[0].mean() + sol[1].mean()
-    print id0, id(y[0])
-
-
-# Ok, little better than cas_work
-def cas_work2():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    print id(work[0])
-    start = MPI.Wtime()
-    work = solve(y, work)
-    print "work 2 time", MPI.Wtime() - start
-    print id(work[0])
-    print work[0].mean() + work[1].mean()
-    print id0, id(y[0])
-
-
-# Ok
-def cas_guess():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    start = MPI.Wtime()
-    work[:2] = rhs(y, work[:2])
-    sol = solve2(y, work)
-    print "guess time", MPI.Wtime() - start
-    print sol[0].mean() + sol[1].mean()
-    print id0, id(y[0])
-
-
-#  Ok, same as cas_guess
-def cas_guess2():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    print id(work[0])
-    start = MPI.Wtime()
-    work[:2] = rhs(y, work[:2])
-    work = solve2(y, work)
-    print "guess2 time", MPI.Wtime() - start
-    print id(work[0])
-    print work[0].mean() + work[1].mean()
-    print id0, id(y[0])
-
-
-# Ok, same as  guess and guess2
-def cas_guess3():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    #res = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    work = [npw.zeros(shapey), npw.zeros(shapey),
-            npw.zeros(shapey), npw.zeros(shapey)]
-    #  res[0], res[1]]
-    res = work[2:4]
-    print id(work[2])
-    start = MPI.Wtime()
-    work[:2] = rhs(y, work[:2])
-    res = solve3(y, work)
-    print "guess3 time", MPI.Wtime() - start
-    print id(work[2])
-    print res[0].mean() + res[1].mean()
-    print id0, id(y[0])
-
-
-# Ok, poor perf.
-def cas_guess7():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    res = [npw.zeros(shapey), npw.zeros(shapey)]
-    print id(res[0])
-    start = MPI.Wtime()
-    work[:2] = rhs(y, work[:2])
-    res = solve4(y, work, res)
-    print "guess7 time", MPI.Wtime() - start
-    print id(res[0])
-    print res[0].mean() + res[1].mean()
-    print id0, id(y[0])
-
-# Ok but poor performances.
-## def cas_guess8():
-##     y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-##     id0 = id(y[0])
-##     work = [npw.zeros(shapey), npw.zeros(shapey)]
-##     work[:2] = rhs(y, work[:2])
-##     res = [npw.zeros(shapey), npw.zeros(shapey)]
-##     #print "main ...", id(y[0]), " ", id(work[0])
-##     print id(res[0])
-##     start = MPI.Wtime()
-##     res = solve5(y, work, res)
-##     print "guess8 time", MPI.Wtime() - start
-##     #print "post ...", id(y[0]), " ", id(work[0])
-##     print id(res[0])
-##     print res[0].mean() + res[1].mean()
-##     print id0, id(y[0])
-
-
-### Inplace computation ###
-# Ok ... but y looses its id
-def cas_basic_inplace():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    #print "main ...", id(y[0])
-    start = MPI.Wtime()
-    y = solve0(y)
-    print "basic/inplace time : ",  MPI.Wtime() - start
-    #print "post ...", id(y[0])
-    print y[0].mean() + y[1].mean()
-    print id0, id(y[0])
-
-
-# ok
-def cas_basic3():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    start = MPI.Wtime()
-    #    print "main ...", id(y[0])
-    y[0][...], y[1][...] = solve0(y)
-    print "basic3 time : ",  MPI.Wtime() - start
-    print "basic3 ...", id(y[0])
-    print y[0].mean() + y[1].mean()
-    print id0, id(y[0])
-
-
-# Ok ... but y looses its id
-def cas_work_inplace():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    start = MPI.Wtime()
-    #print "main ...", id(y[0]), " ", id(work[0])
-    y = solve(y, work)
-    print "work/inplace time", MPI.Wtime() - start
-    print y[0].mean() + y[1].mean()
-    print id0, id(y[0])
-
-
-# Ok. Best perf.
-def cas_guess_inplace():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey), y[0], y[1]]
-    start = MPI.Wtime()
-    work[:2] = rhs(y, work[:2])
-    y = solve3(y, work)
-    print "guess/inplace time", MPI.Wtime() - start
-    print y[0].mean() + y[1].mean()
-    print id0, id(y[0]), id(work[0])
-
-
-# Ok. Poor perf.
-def cas_guess6():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    start = MPI.Wtime()
-    work[:2] = rhs(y, work[:2])
-    sol = solve2(y, work)
-    y[0][...] = sol[0][...]
-    y[1][...] = sol[1][...]
-    print "guess6 time", MPI.Wtime() - start
-    print id(sol[0])
-    print y[0].mean() + y[1].mean()
-    print id0, id(y[0])
-
-
-# Ok, seems to be the more efficient for inplace computation.
-def cas_guess10():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey), y[0], y[1]]
-    start = MPI.Wtime()
-    work[:2] = rhs(y, work[:2])
-    y = solve3(y, work)
-    print "guess10 time", MPI.Wtime() - start
-    print y[0].mean() + y[1].mean()
-    print id0, id(y[0])
-
-# No : change y address
-## def cas_guess9():
-##     y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-##     id0 = id(y[0])
-##     work = [npw.zeros(shapey), npw.zeros(shapey)]
-##     work[:2] = rhs(y, work[:2])
-##     #res = [npw.zeros(shapey), npw.zeros(shapey)]
-##     #print "main ...", id(y[0]), " ", id(work[0])
-##     start = MPI.Wtime()
-##     y = solve5(y, work, y)
-##     print "guess9 time", MPI.Wtime() - start
-##     #print "post ...", id(y[0]), " ", id(work[0])
-##     print y[0].mean() + y[1].mean()
-##     print id0, id(y[0])
-
-
-# Ok. Same as 11.
-def cas_guess11():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    start = MPI.Wtime()
-    work[:2] = rhs(y, work[:2])
-    y = solve4(y, work, y)
-    print "guess11 time", MPI.Wtime() - start
-    print y[0].mean() + y[1].mean()
-    print id0, id(y[0])
-
-
-# Ok. Same as 11.
-def cas_guess12():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    start = MPI.Wtime()
-    work[:2] = rhs(y, work[:2])
-    solve4(y, work, y)
-    print "guess11 time", MPI.Wtime() - start
-    print y[0].mean() + y[1].mean()
-    print id0, id(y[0])
-
-
-# Ok. Same as 11.
-def cas_guess13():
-    y = [npw.ones(shapey), npw.ones(shapey) * 1.2]
-    id0 = id(y[0])
-    work = [npw.zeros(shapey), npw.zeros(shapey)]
-    start = MPI.Wtime()
-    work[:2] = rhs(y, work[:2])
-    solve6(y, work, y)
-    print "guess13 time", MPI.Wtime() - start
-    print y[0].mean() + y[1].mean()
-    print id0, id(y[0])
-## cas_basic()
-## print '------------'
-## cas_work()
-## print '------------'
-## cas_work2()
-## print '------------'
-## cas_guess()
-## print '------------'
-## cas_guess2()
-## print '------------'
-## cas_guess3()
-## print '------------'
-## cas_guess7()
-## print '------------'
-
-
-## cas_basic_inplace()
-## print '------------'
-## cas_basic3()
-## print '------------'
-## cas_work_inplace()
-## print '------------'
-## cas_guess_inplace()
-## print '------------'
-## cas_guess6()
-## print '------------'
-## cas_guess10()
-## print '------------'
-## cas_guess11()
-## print '------------'
-## cas_guess12()
-## print '------------'
-## cas_guess13()
-## print '------------'
-
-
-def odespy1D(solver, nbSteps):
-    """
-    Integration with odespy
-    """
-    time_points = np.linspace(start, end, nbSteps)
-    dtt = time_points[1] - time_points[0]
-    u, t = solver.solve(time_points)
-    err = 0
-    err = (np.abs(u[:] - np.exp(-time_points))).max()
-    return dtt, err
-
-
-def odespy2D(solver, nbSteps):
-    """
-    Integration with odespy for solver.neq > 1
-    """
-    time_points = np.linspace(start, end, nbSteps)
-    dtt = time_points[1] - time_points[0]
-    u, t = solver.solve(time_points)
-    neq = solver.neq
-    result = [0] * neq
-    result[0] = (np.abs(u[:, 0] - np.cos(time_points))).max()
-    result[1] = (np.abs(u[:, 1] + np.sin(time_points))).max()
-    result.insert(0, dtt)
-    return result
-
-
-# -- 1D cases --
-def integrate(integ, nbSteps):
-    """
-    Integration with parmepy
-    """
-    t = start
-    time_points = np.linspace(start, end, nbSteps)
-    dtt = time_points[1] - time_points[0]
-    y = [npw.ones(nb) * math.exp(-start)]
-    #res = [npw.zeros(nb)]
-    # work = None
-    i = 1
-    ref = npw.zeros((nbSteps, nb))
-    ref[0, :] = y[0][:]
-    while i < nbSteps:
-        y = integ(t, y, dtt)
-        #y[0][...] = res[0]
-        ref[i, :] = y[0][:]
-        t += dtt
-        i += 1
-    err = 0.0
-    for i in xrange(nb):
-        err = max(err, (np.abs(ref[:, i] - np.exp(-time_points))).max())
-    return dtt, err
-
-
-def test_1D_0():
-
-    nbdt = 15
-    nbSteps = 100
-    errE = npw.ones(nbdt)
-    dtE = npw.ones(nbdt)
-    err2 = npw.ones(nbdt)
-    dt2 = npw.ones(nbdt)
-    err3 = npw.ones(nbdt)
-    dt3 = npw.ones(nbdt)
-    err4 = npw.ones(nbdt)
-    dt4 = npw.ones(nbdt)
-    errodespy = npw.ones(nbdt)
-    dtodespy = npw.ones(nbdt)
-    err2odespy = npw.ones(nbdt)
-    dt2odespy = npw.ones(nbdt)
-    err3odespy = npw.ones(nbdt)
-    dt3odespy = npw.ones(nbdt)
-    err4odespy = npw.ones(nbdt)
-    dt4odespy = npw.ones(nbdt)
-    for i in xrange(nbdt):
-        # Euler
-        dtE[i], errE[i] = integrate(Euler(1, func1D), nbSteps)
-        solver = ode.Euler(funcODESPY)
-        solver.set_initial_condition([math.exp(-start)])
-        dtodespy[i], errodespy[i] = odespy1D(solver, nbSteps)
-        # RK2
-        dt2[i], err2[i] = integrate(RK2(1, func1D), nbSteps)
-        solver = ode.RK2(funcODESPY)
-        solver.set_initial_condition([math.exp(-start)])
-        dt2odespy[i], err2odespy[i] = odespy1D(solver, nbSteps)
-        # RK3
-        dt3[i], err3[i] = integrate(RK3(1, func1D), nbSteps)
-        solver = ode.RK3(funcODESPY)
-        solver.set_initial_condition([math.exp(-start)])
-        dt3odespy[i], err3odespy[i] = odespy1D(solver, nbSteps)
-        # RK4
-        dt4[i], err4[i] = integrate(RK4(1, func1D), nbSteps)
-        solver = ode.RK4(funcODESPY)
-        solver.set_initial_condition([math.exp(-start)])
-        dt4odespy[i], err4odespy[i] = odespy1D(solver, nbSteps)
-        nbSteps += 10
-
-    assert (dtE == dtodespy).all()
-    assert (dtE == dt2odespy).all()
-    assert (dtE == dt3odespy).all()
-    assert (dtE == dt4odespy).all()
-    assert (dtE == dt2).all()
-    assert (dtE == dt3).all()
-    assert (dtE == dt4).all()
-
-    x = np.log10(1. / dtE)
-    plt.figure(1)
-    plt.plot(x, np.log10(errE), x, np.log10(err2),
-             x, np.log10(err3), x, np.log10(err4),
-             x, np.log10(errodespy))
-
-    print " ==== EULER ===="
-    print "Erreur Euler :", errE
-    print "Erreur Euler (odespy) :", errodespy
-    print "dt : ", dtE
-    print "Pente euler:",\
-        (np.log10(errE)[-1] - np.log10(errE)[0]) / (x[-1] - x[0])
-    print "Pente odespy: ",\
-        (np.log10(errodespy)[-1] - np.log10(errodespy)[0]) / (x[-1] - x[0])
-    print " ==== RK2 ===="
-    print "Erreur RK2 :", err2
-    print "Erreur RK2 (odespy) :", err2odespy
-    print "dt : ", dt2
-    print "Pente RK2:",\
-        (np.log10(err2)[-1] - np.log10(err2)[0]) / (x[-1] - x[0])
-    print "Pente odespy (RK2): ",\
-        (np.log10(err2odespy)[-1] - np.log10(err2odespy)[0]) / (x[-1] - x[0])
-    print " ==== RK3 ===="
-    print "Erreur RK3 :", err3
-    print "Erreur RK3 (odespy) :", err3odespy
-    print "dt : ", dt3
-    print "Pente RK3:",\
-        (np.log10(err3)[-1] - np.log10(err3)[0]) / (x[-1] - x[0])
-    print "Pente odespy (RK3): ",\
-        (np.log10(err3odespy)[-1] - np.log10(err3odespy)[0]) / (x[-1] - x[0])
-    print " ==== RK4 ===="
-    print "Erreur RK4 :", err4
-    print "Erreur RK4 (odespy) :", err4odespy
-    print "dt : ", dt4
-    print "Pente RK4:",\
-        (np.log10(err4)[-1] - np.log10(err4)[0]) / (x[-1] - x[0])
-    print "Pente odespy (RK4): ",\
-        (np.log10(err4odespy)[-1] - np.log10(err4odespy)[0]) / (x[-1] - x[0])
-
-
-# -- 2D cases --
-def integrate2D(integ, nbSteps):
-    """
-    Integration with parmepy
-    """
-    t = start
-    time_points = np.linspace(start, end, nbSteps)
-    dtt = time_points[1] - time_points[0]
-    y = [npw.ones((nb, nb)), npw.zeros((nb, nb))]
-#    res = [npw.zeros((nb, nb)), npw.zeros((nb, nb))]
-    i = 1
-    refX = npw.zeros((nbSteps, nb, nb))
-    refY = npw.zeros((nbSteps, nb, nb))
-    refX[0, :, :] = y[0][...]
-    refY[0, :, :] = y[1][...]
-    work = integ.work
-    #  [npw.ones((nb, nb)), npw.zeros((nb, nb)), npw.zeros((nb, nb)),
-    #  npw.ones((nb, nb)), npw.zeros((nb, nb)), npw.zeros((nb, nb))]
-
-    while i < nbSteps:
-        work[:2] = func2D(t, y, work[:2])
-        y = integ(t, y, dtt, result=y)
-        refX[i, :, :] = y[0][...]
-        refY[i, :, :] = y[1][...]
-        t += dtt
-        i += 1
-    errX = 0.0
-    errY = 0.0
-    for i in xrange(nb):
-        for j in xrange(nb):
-            errX = max(errX, (np.abs(refX[:, i, j]
-                                     - np.cos(time_points))).max())
-            errY = max(errY, (np.abs(refY[:, i, j]
-                                     + np.sin(time_points))).max())
-
-    return dtt, errX, errY
-
-
-def test_2D_0(method, odemethod, optim):
-    nbdt = 15
-    ghosts = [0, 0]
-    box2 = pp.Box(2, length=[2.0 * pi, 2 * pi], origin=[0., 0.])
-    topo2 = Cartesian(box2, 1, [nb + 1, nb + 1], ghosts=ghosts)
-    nbSteps = 100
-    errEx = npw.ones(nbdt)
-    errEy = npw.ones(nbdt)
-    dtE = npw.ones(nbdt)
-    errxodespy = npw.ones(nbdt)
-    erryodespy = npw.ones(nbdt)
-    dtodespy = npw.ones(nbdt)
-    lwork = method.getWorkLengths(2)
-    work = [npw.zeros((nb, nb)) for i in xrange(lwork)]
-    for i in xrange(nbdt):
-        # Euler
-        dtE[i], errEx[i], errEy[i] = integrate2D(method(2, work, topo2, func2D,
-                                                        optim=optim),
-                                                 nbSteps)
-        solver = odemethod(func2DODESPY)
-        solver.set_initial_condition([1, 0])
-        dtodespy[i], errxodespy[i], erryodespy[i] = odespy2D(solver, nbSteps)
-        nbSteps += 10
-
-    assert (dtE == dtodespy).all()
-
-    x = np.log10(1. / dtE)
-    plt.figure(2)
-    plt.plot(x, np.log10(errEx), x, np.log10(errEy),
-             x, np.log10(errxodespy), x, np.log10(errxodespy))
-    name = str(method).rpartition('.')[-1][0:-2]
-    print " ==== ", name, " ===="
-    print "Erreur :", errEx, errEy
-    print "Erreur (odespy) :", errxodespy, erryodespy
-    print "dt : ", dtE
-    print "Pente :",\
-        (np.log10(errEx)[-1] - np.log10(errEx)[0]) / (x[-1] - x[0])
-    print "Pente :",\
-        (np.log10(errEy)[-1] - np.log10(errEy)[0]) / (x[-1] - x[0])
-    print "Pente odespy: ",\
-        (np.log10(errxodespy)[-1] - np.log10(errxodespy)[0]) / (x[-1] - x[0])
-    print "Pente odespy: ",\
-        (np.log10(erryodespy)[-1] - np.log10(erryodespy)[0]) / (x[-1] - x[0])
-
-print "\n====================== 1D ======================\n"
-#test_1D_0()
-print "\n====================== 2D ======================\n"
-optim = WITH_GUESS
-test_2D_0(RK4, ode.RK4, optim)
diff --git a/trashed_examples/mainJM.py b/trashed_examples/mainJM.py
deleted file mode 100755
index 0c9c48522e5a47a512923c79792b78832f88afbf..0000000000000000000000000000000000000000
--- a/trashed_examples/mainJM.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-
-import time
-from parmepy.domain.box import Box
-from parmepy.fields.continuous import Field
-from parmepy.operator.advection import Advection
-from parmepy.problem.transport import TransportProblem
-from parmepy.gpu import PARMES_REAL_GPU, PARMES_DOUBLE_GPU
-
-
-def vitesse(x, y, z):
-    vx = 1. + x
-    vy = - x * y
-    vz = x * y * z + 10.
-    return vx, vy, vz
-
-
-def scalaire(x, y, z):
-    if x < 0.5 and y < 0.5 and z < 0.5:
-        return 1.
-    else:
-        return 0.
-
-
-def run():
-    # Parameters
-    nb = 65
-    nbElem = (nb, nb, nb)
-    time_step = 0.02
-    finalTime = 1.
-    outputFilePrefix = './res/RK2_'
-    outputModulo = 0
-
-    t0 = time.time()
-
-    ## Domain
-    box = Box(3, length=[1., 1., 1.], origin=[0., 0., 0.])
-
-    ## Fields
-    scal = Field(domain=box, name='Scalar')
-    velo = Field(domain=box, name='Velocity', is_vector=True)
-    #scal = pp.AnalyticalField(domain=box, name='Scalar')
-    #velo = pp.AnalyticalField(domain=box, formula=vitesse, name='Velocity', is_vector=True)
-
-    ## Operators
-    advec = Advection(velo, scal,
-                      resolutions={velo: nbElem,
-                                   scal: nbElem},
-                      #method='gpu_1k_m4prime',
-                      #method='gpu_1k_m6prime',
-                      #method='gpu_1k_m8prime',
-                      method='gpu_2k_m4prime',
-                      #method='gpu_2k_m6prime',
-                      #method='gpu_2k_m8prime',
-                      #method='scales'
-                      src=['./levelSet3D.cl'],
-                      precision=PARMES_REAL_GPU,
-                      #precision=PARMES_REAL_GPU,
-                      )
-
-    ##Problem
-    pb = TransportProblem([advec])
-
-    ## Setting solver to Problem
-    pb.setUp(finalTime, time_step)
-
-    t1 = time.time()
-    ## Solve problem
-    timings = pb.solve()
-    tf = time.time()
-
-    print "\n"
-    print "Total time : ", tf - t0, "sec (CPU)"
-    print "Init time : ", t1 - t0, "sec (CPU)"
-    print "Solving time : ", tf - t1, "sec (CPU)"
-
-
-if __name__ == "__main__":
-    run()
diff --git a/trashed_examples/mainJM_kernels.cl b/trashed_examples/mainJM_kernels.cl
deleted file mode 100644
index a45672ba4f0cb0a30e5c2292ce4b7ae8cf9e228d..0000000000000000000000000000000000000000
--- a/trashed_examples/mainJM_kernels.cl
+++ /dev/null
@@ -1,55 +0,0 @@
-
-__kernel void initScalar(__global float* values,
-			 __private const float4 min,
-			 __private const float4 size)
-{
-  __private uint ind,ix,iy,iz,nbx,nby,nbz;
-  __private float px,py,pz,s;
-
-  ix = get_global_id(0);
-  iy = get_global_id(1);
-  iz = get_global_id(2);
-  nbx = get_global_size(0);
-  nby = get_global_size(1);
-  nbz = get_global_size(2);
-
-  px = min.x + (float)(ix)*size.x;
-  py = min.y + (float)(iy)*size.y;
-  pz = min.z + (float)(iz)*size.z;
-
-  if ((px < 0.5f) && (py < 0.5f) && (pz < 0.5f))
-    s = 1.0f;
-  else
-    s = 0.0f;
-  // Write
-  ind = iz*nby*nbx + iy*nbx + ix;
-  values[ind] = s;
-}
-
-// velocity field
-__kernel void initVelocity(__global float* values_x,
-			   __global float* values_y,
-			   __global float* values_z,
-			   __private const float4 min,
-			   __private const float4 size
-			   )
-{
-  __private uint ix,iy,iz,nbx,nby,nbz, ind;
-
-  ix = get_global_id(0);
-  iy = get_global_id(1);
-  iz = get_global_id(2);
-  nbx = get_global_size(0);
-  nby = get_global_size(1);
-  nbz = get_global_size(2);
-
-
-  // Write x component
-  ind = iz*nby*nbx + iy*nbx + ix;
-  values_x[ind] = 1.0f;
-  // Write y component
-  values_y[ind] = 1.0f;
-  // Write z component
-  values_z[ind] = 1.0f;
-
-}
diff --git a/trashed_examples/poisson2d.py b/trashed_examples/poisson2d.py
deleted file mode 100755
index d90ccfd3f7246baf1722ae1e5286d97ec3fe6881..0000000000000000000000000000000000000000
--- a/trashed_examples/poisson2d.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/python
-
-import parmepy as pp
-import parmepy.f2py
-import numpy as np
-import mpi4py.MPI as MPI
-import math
-import numpy.fft as fft
-
-pi = math.pi
-ppfft = parmepy.f2py.fftw2py
-
-rank = MPI.COMM_WORLD.Get_rank()
-print "Mpi process number ", rank
-
-# ----------- A 2d problem -----------
-print " ========= Start test for 2D poisson problem ========="
-
-# Physical Domain description
-Lx = Ly = 2 * pi
-myDomain2d = pp.Box(dimension=2, length=[Lx, Ly], origin=[0., 0.])
-resolution2d = np.asarray((129, 65))
-ncells = resolution2d - 1
-hx = Lx / ncells[0]
-hy = Ly / ncells[1]
-x = np.arange(ncells[0], dtype='float64') * hx
-y = np.arange(ncells[1], dtype='float64') * hy
-Y, X = np.meshgrid(y, x)
-
-############ REF ##############
-omegaRef = np.zeros((ncells), dtype='complex128', order='Fortran')
-omegaRef[:, :] = 4 * pi ** 2 * np.cos(2 * pi / Lx * X) * \
-    np.sin(2. * pi / Ly * Y) * (1. / Lx ** 2 + 1. / Ly ** 2)
-refx1 = 2. * pi / Ly * np.cos(2 * pi / Lx * X) * np.cos(2 * pi / Ly * Y)
-refy1 = 2. * pi / Lx * np.sin(2 * pi / Lx * X) * np.sin(2 * pi / Ly * Y)
-kx = np.arange(ncells[0], dtype='complex128')
-ky = np.arange(ncells[1], dtype='complex128')
-midIndX = math.ceil(ncells[0] / 2.)
-midIndY = math.ceil(ncells[1] / 2.)
-kx[-midIndX + 1:] = -kx[midIndX - 1:0:-1]
-ky[-midIndY + 1:] = -ky[midIndY - 1:0:-1]
-kx = 2 * pi * kx / Lx
-ky = 2 * pi * ky / Ly
-
-kxX, kyY = np.meshgrid(ky, kx)
-coeff_tmp = kxX ** 2 + kyY ** 2
-coeff = coeff_tmp.copy()
-coeff[1:, :] = 1j / coeff_tmp[1:, :]
-coeff[0, 1:] = 1j / coeff_tmp[0, 1:]
-coeff[0, 0] = 0.0
-res = fft.fft2(omegaRef)
-
-tvx = (coeff.copy() * ky) * res
-tvy = -(coeff.copy().T * kx).T * res
-
-velx = fft.ifft2(tvx)
-vely = fft.ifft2(tvy)
-
-print "test 0", np.allclose(velx, refx1)
-print "test 0", np.allclose(vely, refy1)
-
-##########################
-
-localres, localoffset = ppfft.init_fftw_solver(resolution2d,
-                                               myDomain2d.length)
-
-print "topo locale :", localres, localoffset
-
-
-x = np.arange(localoffset[0], localres[0] + localoffset[0],
-              dtype='float64') * hx
-y = np.arange(localoffset[1], localres[1] + localoffset[1],
-              dtype='float64') * hy
-Y, X = np.meshgrid(y, x)
-
-omega = np.zeros((localres), order='Fortran', dtype='float64')
-vx = np.zeros((localres), order='Fortran', dtype='float64')
-vy = np.zeros((localres), order='Fortran', dtype='float64')
-omega[:, :] = 4 * pi ** 2 * np.cos(2 * pi / Lx * X) * \
-    np.sin(2. * pi / Ly * Y) * (1. / Lx ** 2 + 1. / Ly ** 2)
-refx = 2. * pi / Ly * np.cos(2 * pi / Lx * X) * np.cos(2 * pi / Ly * Y)
-refy = 2. * pi / Lx * np.sin(2 * pi / Lx * X) * np.sin(2 * pi / Ly * Y)
-
-vx, vy = ppfft.solve_poisson_2d(omega, vx, vy)
-
-print np.allclose(refx, vx)
-print np.allclose(refy, vy)
-
-ppfft.clean_fftw_solver(myDomain2d.dimension)
diff --git a/trashed_examples/poisson3d.py b/trashed_examples/poisson3d.py
deleted file mode 100755
index 9e2e85b27b1b7ee2cdc28b06d45c9e22641c44f6..0000000000000000000000000000000000000000
--- a/trashed_examples/poisson3d.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/python
-import parmepy as pp
-import parmepy.f2py
-import numpy as np
-import mpi4py.MPI as MPI
-import math as m
-from parmepy.mpi.topology import Cartesian
-
-#from numpy import linalg as LA
-
-pi = m.pi
-ppfft = parmepy.f2py.fftw2py
-
-rank = MPI.COMM_WORLD.Get_rank()
-print "Mpi process number ", rank
-
-# ----------- A 3d problem -----------
-print " ========= Start test for 3D poisson problem ========="
-
-# Physical Domain description
-Lx = Ly = Lz = 2 * pi
-myDomain3d = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[0., 0., 0.])
-resolution3d = np.asarray((65, 65, 65))
-ncells = resolution3d - 1
-hx = Lx / ncells[0]
-hy = Ly / ncells[1]
-hz = Lz / ncells[2]
-print "INIT ...."
-localres, localoffset = ppfft.init_fftw_solver(resolution3d,
-                                               myDomain3d.length)
-
-from parmepy.mpi import main_size
-topodim = np.ones((3))
-topodim[-1] = main_size
-print topodim
-print "make topo ..."
-topoPoisson = Cartesian.withPrecomputedResolution(myDomain3d, topodim, resolution3d,
-                                                  localres=localres, offset=localoffset)
-
-
-
-for topo in myDomain3d.topologies.values():
-    print topo
-
-#print "topo locale :", localres, localoffset
-
-#assert np.equal(localres, topoPoisson.localGridResolution).all()
-#assert np.equal(localoffset,topoPoisson.G_start).all()
-
-exit
-
-x = np.arange(localoffset[0], localres[0] + localoffset[0],
-              dtype='float64') * hx
-y = np.arange(localoffset[1], localres[1] + localoffset[1],
-              dtype='float64') * hy
-z = np.arange(localoffset[2], localres[2] + localoffset[2],
-              dtype='float64') * hz
-
-############ REF ##############
-omega_x = np.zeros((localres), dtype='float64', order='Fortran')
-omega_y = np.zeros((localres), dtype='float64', order='Fortran')
-omega_z = np.zeros((localres), dtype='float64', order='Fortran')
-vx = np.zeros((localres), dtype='float64', order='Fortran')
-vy = np.zeros((localres), dtype='float64', order='Fortran')
-vz = np.zeros((localres), dtype='float64', order='Fortran')
-ref_x = np.zeros((localres), dtype='float64', order='Fortran')
-ref_y = np.zeros((localres), dtype='float64', order='Fortran')
-ref_z = np.zeros((localres), dtype='float64', order='Fortran')
-
-cden = 4 * pi ** 2 * (Ly ** 2 * Lz ** 2 + Lx ** 2 * Lz ** 2 +
-                      Lx ** 2 * Ly ** 2) / (Lx ** 2 * Ly ** 2 * Lz ** 2)
-cx = 2 * pi / Lx
-cy = 2 * pi / Ly
-cz = 2 * pi / Lz
-
-# Initialize vorticity (This init should be improved or done if C or Fortran)
-for k in range(localres[2]):
-    for j in range(localres[1]):
-        for i in range(localres[0]):
-            omega_x[i, j, k] = cden * (m.sin(cx * x[i]) *
-                                       m.sin(cy * y[j]) * m.cos(cz * z[k]))
-            omega_y[i, j, k] = cden * (m.cos(cx * x[i]) *
-                                       m.sin(cy * y[j]) * m.sin(cz * z[k]))
-            omega_z[i, j, k] = cden * (m.cos(cx * x[i]) *
-                                       m.cos(cy * y[j]) * m.sin(cz * z[k]))
-            ref_x[i, j, k] = -cy * (m.cos(cx * x[i]) * m.sin(cy * y[j])
-                                    * m.sin(cz * z[k])) - \
-                cz * (m.cos(cx * x[i]) * m.sin(cy * y[j]) * m.cos(cz * z[k]))
-
-            ref_y[i, j, k] = -cz * (m.sin(cx * x[i]) * m.sin(cy * y[j])
-                                    * m.sin(cz * z[k])) + \
-                cx * (m.sin(cx * x[i]) * m.cos(cy * y[j]) * m.sin(cz * z[k]))
-
-            ref_z[i, j, k] = -cx * (m.sin(cx * x[i]) * m.sin(cy * y[j])
-                                    * m.sin(cz * z[k])) - \
-                cy * (m.sin(cx * x[i]) * m.cos(cy * y[j]) * m.cos(cz * z[k]))
-
-##########################
-
-print 'start fftw solver ...'
-start = MPI.Wtime()
-vx, vy, vz = ppfft.solve_poisson_3d(omega_x, omega_y, omega_z, vx, vy, vz)
-print "elasped time =", MPI.Wtime() - start
-print np.allclose(ref_x, vx)
-print np.allclose(ref_y, vy)
-print np.allclose(ref_z, vz)
-
-ppfft.clean_fftw_solver(myDomain3d.dimension)
diff --git a/trashed_examples/postNSBluff.py b/trashed_examples/postNSBluff.py
deleted file mode 100644
index 1ca239dea031b916005a453605e7a609167a862c..0000000000000000000000000000000000000000
--- a/trashed_examples/postNSBluff.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import scitools.filetable as ft
-import matplotlib.pyplot as plt
-
-
-def plotEnergyEnstrophy(filename):
-    ff = open(filename)
-    data = ft.read(ff)
-    time = data[:, 0]
-    energy = data[:, 1]
-    enstrophy = data[:, 2]
-    plt.subplot(211)
-    plt.plot(time, energy, '+-', label='energy')
-    plt.xlabel('time')
-    plt.subplot(212)
-    plt.plot(time, enstrophy, '+-', label='enstrophy')
-    plt.xlabel('time')
-    plt.legend()
-    plt.suptitle('Results from simu in:' + filename)
-    plt.show()
-
-
-def plotDragAndLift(filename):
-    ff = open(filename)
-    data = ft.read(ff)
-    time = data[:, 0]
-    drag = data[:, 1]
-    lift = data[:, 2:]
-    plt.subplot(211)
-    plt.plot(time, drag, '+-', label='drag')
-    plt.xlabel('time')
-    plt.subplot(212)
-    for i in xrange(lift.shape[1]):
-        plt.plot(time, lift[:, i], '+-', label='lift' + str(i))
-    plt.xlabel('time')
-    plt.legend()
-    plt.suptitle('Results from simu in:' + filename)
-    plt.show()
-
-
-def plotReprojection(filename):
-    ff = open(filename)
-    data = ft.read(ff)
-    time = data[:, 0]
-    crit = data[:, 1]
-    counter = data[:, 2]
-    d1 = data[:, 3]
-    d2 = data[:, 4]
-    plt.subplot(221)
-    plt.plot(time, crit, '+-', label='criterion')
-    plt.xlabel('time')
-    plt.subplot(222)
-    plt.plot(time, d1, '+-', label='d1')
-    plt.xlabel('time')
-    plt.subplot(223)
-    plt.plot(time, d2, '+-', label='d2')
-    plt.xlabel('time')
-    plt.legend()
-    plt.suptitle('Results from simu in:' + filename)
-    plt.show()
-
-plt.ioff()
-direc = 'NSDebug'
-nprocs = 8
-fileroot = direc + '/p' + str(nprocs) + '/'
-plotEnergyEnstrophy(fileroot + 'energy_enstrophy')
-plotDragAndLift(fileroot + 'drag_and_lift')
-
diff --git a/trashed_examples/postTaylor.py b/trashed_examples/postTaylor.py
deleted file mode 100644
index c97150ef8ad95a6eb74c78e5a8dc9bdcc818dd07..0000000000000000000000000000000000000000
--- a/trashed_examples/postTaylor.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import scitools.filetable as ft
-import matplotlib.pyplot as plt
-import glob
-
-## file1 = open("resp1/energy.dat")
-## ener = ft.read(file1)
-## time = ener[:, 0]
-## energy = ener[:, 1]
-## enstrophy = ener[:, 2]
-## #ratio = ener[:,3]
-## #dedt = ener[:,4]
-## #nuS = ener[:,5]
-## #nuES = ener[:,6]
-VISCOSITY = 1. / 1600.
-## nuS_calc = VISCOSITY * enstrophy[2:]
-import numpy as np
-# multiproc #
-filepath = './res_'
-plt.ioff()
-plt.figure(1)
-
-error = []
-
-filelist = glob.glob(filepath + '*')
-for f in filelist:
-    filename = open(f + '/ener')
-    data = ft.read(filename)
-    time = data[:, 0]
-    energy = data[:, 1]
-    enstrophy = data[:, 2]
-    plt.plot(time, energy, '+-', label=f)
-    error.append(np.sum(energy))
-
-plt.legend()
-plt.show()
-
-print error
diff --git a/trashed_examples/testControlBox.py b/trashed_examples/testControlBox.py
deleted file mode 100644
index 083a5e615c5887381e11eab4fe38b05625e7be37..0000000000000000000000000000000000000000
--- a/trashed_examples/testControlBox.py
+++ /dev/null
@@ -1,224 +0,0 @@
-import numpy as np
-from parmepy.domain.obstacle.controlBox import ControlBox
-import parmepy as pp
-import parmepy.mpi as mpi
-from parmepy.domain.obstacle.planes import SubSpace, SubPlane
-from parmepy.domain.obstacle.sphere import Sphere
-
-from parmepy.operator.monitors.printer import Printer
-from parmepy.problem.simulation import Simulation
-from parmepy.constants import HDF5
-from parmepy.operator.monitors.compute_forces import DragAndLift
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-
-nb = 129
-
-Lx = Ly = Lz = 2
-dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
-dom2 = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
-resol3D = [nb, nb, nb]
-resol2D = [nb, nb]
-
-
-## Function to compute TG velocity
-def computeVel(res, x, y, z, t):
-    res[0][...] = sin(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-## Function to compute reference vorticity
-def computeVort(res, x, y, z, t):
-    res[0][...] = cos(x) * sin(y) * sin(z)
-    res[1][...] = sin(x) * cos(y) * sin(z)
-    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-    return res
-
-# 2D Field
-scal2 = pp.Field(domain=dom2)
-
-# 3D Field
-scal3 = pp.Field(domain=dom, name='s1')
-sc3 = pp.Field(domain=dom, name='s2')
-velo = pp.Field(domain=dom, formula=computeVel, is_vector=True, name='v1')
-vorti = pp.Field(domain=dom, formula=computeVort, is_vector=True, name='w1')
-boxl = np.asarray([0.8, .8, .7])
-boxl = np.asarray([1., 1., 1.])
-boxpos = np.asarray([-0.5, -0.5, -0.5])
-
-# 2D control box
-
-#cb2 = ControlBox(dom2, boxpos[:2], boxl[:2])
-ng = 2
-topo2 = mpi.topology.Cartesian(dom2, 2, resol2D, ghosts=[ng, ng])
-
-# 3D Control box
-cb1 = ControlBox(dom, boxpos, boxl)
-cb2 = ControlBox(dom, boxpos, boxl)
-topo3 = mpi.topology.Cartesian(dom, 3, resol3D, ghosts=[ng, ng, ng])
-
-# init fields
-scal2.discretize(topo2)
-scal3.discretize(topo3)
-sc3.discretize(topo3)
-velo.discretize(topo3)
-vorti.discretize(topo3)
-velo.initialize(topo=topo3)
-vorti.initialize(topo=topo3)
-pref2 = './res2_' + str(topo2.size) + '/cb'
-pref3 = './res3_' + str(topo2.size) + '/cb'
-printer2D = Printer([scal2], topo2, prefix=pref2, frequency=1)
-printer2D.setUp()
-#printer2HDF5 = Printer([scal2], topo2, frequency=1, formattype=HDF5)
-#printer2HDF5.setUp()
-
-sd2 = scal2.discreteFields[topo2].data
-sd3 = scal3.discreteFields[topo3].data
-sd4 = sc3.discreteFields[topo3].data
-wd = vorti.discreteFields[topo3].data
-printer3D = Printer([scal3], topo3, prefix=pref3, frequency=1)
-printer3D.setUp()
-#printerHDF5 = Printer([scal3], topo3, frequency=1, formattype=HDF5)
-#printerHDF5.setUp()
-simulation = Simulation()
-
-
-sd2[0][...] = 1.
-sd3[0][...] = 1.
-sd4[0][...] = 1.
-
-#printer3D.apply(simulation)
-
-topo2.comm.barrier()
-
-#ib2 = cb2.discretize(topo2)
-
-ib3 = cb1.discretize(topo3)
-ib33 = cb2.discretize(topo3)
-slice3 = cb1.slices[topo3]
-
-topo2.comm.barrier()
-sl2 = []
-sl3 = []
-
-#for s in cb2.upperS:
-##     sl2.append(s)
-## for s in cb2.lowerS:
-##     sl2.append(s)
-for s in cb1.upperS:
-    sl3.append(s)
-for s in cb1.lowerS:
-    sl3.append(s)
-
-## Subspaces
-#subsp = SubSpace(dom2, [0, 1], [-0.8, -0.8], [0.5, 0.5])
-#subsp3 = SubSpace(dom, [0, 1, 0], [-0.8, -0.8, -0.8], [0.5, 0.5, 1.0])
-#ind = subsp3.discretize(topo3)
-
-## Subplanes
-
-#sp2 = SubPlane(dom2, [0, 1], [-0.8, -0.8], [0.5, 0.5])
-#sp3 = SubPlane(dom, [0, 1, 0], [-0.75, -0.75, -0.75], [0.5, 0.5, 1.0])
-#ind = sp3.discretize(topo3)
-
-
-integ = cb1.integrate(scal3, topo3)
-integ2 = cb1.integrate(scal3, topo3, useSlice=False)
-integ3 = cb2.integrate(scal3, topo3, useSlice=False)
-
-if topo3.rank == 0:
-    print "integ = ", integ, integ2, integ3
-
-#print topo3.rank, cb.slices
-
-#sd3[0][ib3] = 0.0
-#sd4[0][slice3] = 0.0
-
-#printer3D.apply(simulation)
-cc = topo3.mesh.coords
-
-## for s in sl3:
-##  #   print s.slices
-##     normal = s.normal
-##     ind = np.where(s.normal != 0)[0]
-##     resup = cb.integrateOnSurface(scal3, topo3, normalDir=ind, up=True)
-##     resdown = cb.integrateOnSurface(scal3, topo3, normalDir=ind, up=False)
-##     print topo3.rank, resup, resdown
-##     resup = cb.integrateOnSurface(scal3, topo3, normalDir=ind, up=True, useSlice=False)
-##     resdown = cb.integrateOnSurface(scal3, topo3, normalDir=ind, up=False, useSlice=False)
-##     print 'v2', topo3.rank, resup, resdown
-##     #print topo3.rank, ind, s.slices[topo3]
-##     #print topo3.rank, ind, cc[2].flat[s.slices[topo3][2]]
-
-    
-##     ## if topo3.rank == 0:
-##     ##     print 'int ...', topo3.rank, resup, resdown
-##     #sd3[0][s.slices[topo3]] = 0.
-
-## integ = cb2.integrate(scal2, topo2)
-
-## print "integ = ", integ
-
-## for s in sl2:
-##     normal = s.normal
-##     ind = np.where(s.normal != 0)[0]
-##     resup = cb2.integrateOnSurface(scal2, topo2, normalDir=ind, up=True)
-##     resdown = cb2.integrateOnSurface(scal2, topo2, normalDir=ind, up=False)
-
-##     if topo2.rank == 0:
-##         print 'int ...', topo2.rank, resup, resdown
-sphere = Sphere(dom, position=[0., 0., 0.], radius=0.3)
-sphere.discretize(topo3)
-
-## wd[0][sphere.ind[topo3][0]] *= 1e7
-#wd[1][sphere.ind[topo3][0]] = 1e6
-#print 
-## wd[2][sphere.ind[topo3][0]] *= 1e5
-
-print wd[1].max()
-print wd[1][sphere.ind[topo3]].max()
-nu = 0.3
-dr = DragAndLift(velo, vorti, nu, topo3, cb1, filename=pref3 + 'forces.dat')
-dr2 = DragAndLift(velo, vorti, nu, topo3, cb2, obstacles=[sphere])
-dr.discretize()
-dr2.discretize()
-## #import parmepy.tools.numpywrappers as npw
-## #res = npw.zeros(3)
-## #res = dr._integrateOnBox(res)
-## #print 'forces loc ...', res
-
-## #resok= topo3.comm.allreduce(res)
-
-#print cb.coords[topo3]
-
-for i in xrange(10):
-    dr.apply(simulation)
-    simulation.advance()
-    
-dr2.apply(simulation)
-sd3[0][...] = 0.0
-sd3[0][cb1.slices[topo3]] = 1.
-sd3[0][sphere.ind[topo3]] = 2.
-
-printer3D.apply(simulation)
-
-print 'forces 1 ...', dr.force
-#simulation.advance()
-#wd[0][...] +=12.3
-#dr.apply(simulation)
-
-print 'forces 2...', dr2.force
-
-## #
-## printer3D.apply(simulation)
-## printer2D.apply(simulation)
-## #printerHDF5.apply(simulation)
-
-## print topo3.rank, cb.mesh[topo3]
-
-## print 'full', topo3.rank, topo3.mesh
-## print topo3.mesh.iCompute
diff --git a/trashed_examples/testCurl.py b/trashed_examples/testCurl.py
deleted file mode 100644
index 0a08866abc350b39154f6411dea6a20914b08b81..0000000000000000000000000000000000000000
--- a/trashed_examples/testCurl.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import numpy as np
-from parmepy.domain.obstacle.controlBox import ControlBox
-import parmepy as pp
-import parmepy.mpi as mpi
-from parmepy.domain.obstacle.planes import SubSpace, SubPlane
-from parmepy.operator.monitors.printer import Printer
-from parmepy.problem.simulation import Simulation
-from parmepy.constants import HDF5
-import numpy as np
-## pi constant
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-nb = 129
-
-Lx = Ly = Lz = 2 * pi
-dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-pi, -pi, -pi])
-resol3D = [nb, nb, nb]
-
-from parmepy.methods_keys import Scales, TimeIntegrator, Interpolation,\
-    Remesh, Support, Splitting, dtCrit, SpaceDiscretisation, GhostUpdate
-from parmepy.methods import FD_C_4
-
-## Function to compute TG velocity
-def computeVel(res, x, y, z, t):
-    res[0][...] = sin(x) * cos(y) * cos(z)
-    res[1][...] = - cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-## Function to compute reference vorticity
-def computeVort(res, x, y, z, t):
-    res[0][...] = - cos(x) * sin(y) * sin(z)
-    res[1][...] = - sin(x) * cos(y) * sin(z)
-    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-    return res
-
-velo = pp.Field(domain=dom, formula=computeVel, name='Velocity', is_vector=True)
-velo2 = pp.Field(domain=dom, formula=computeVel, name='Veloty2', is_vector=True)
-vortifft = pp.Field(domain=dom, name='vortifft', is_vector=True)
-vortiFD = pp.Field(domain=dom, is_vector=True)
-vortiref = pp.Field(domain=dom, name='vortiref', formula=computeVort, is_vector=True)
-vortiref2 = pp.Field(domain=dom, name='vortiref2', formula=computeVort, is_vector=True)
-
-from parmepy.operator.differential import Curl
-from parmepy.f2py import fftw2py
-topo3 = mpi.topology.Cartesian(dom, 3, resol3D, ghosts=[2, 2, 2])
-
-curlfft = Curl(velo, vortifft, resolutions={velo: resol3D, vortifft: resol3D},
-               method={SpaceDiscretisation: fftw2py, GhostUpdate: False})
-
-curlFD = Curl(velo2, vortiFD, resolutions={velo2: resol3D, vortiFD: resol3D},
-              method={SpaceDiscretisation: FD_C_4, GhostUpdate: True},
-              topo=topo3)
-
-curlfft.discretize()
-curlFD.discretize()
-
-topofft = curlfft.discreteFields[curlfft.outvar].topology
-vortiref.discretize(topofft)
-vortiref2.discretize(topo3)
-velo.initialize(topo=topofft)
-velo2.initialize(topo=topo3)
-vortiref.initialize(topo=topofft)
-vortiref2.initialize(topo=topo3)
-curlfft.setUp()
-curlFD.setUp()
-
-wdfft = vortifft.discreteFields[topofft].data
-wFD = vortiFD.discreteFields[topo3].data
-wREF1 = vortiref.discreteFields[topofft].data
-wREF2 = vortiref2.discreteFields[topo3].data
-
-vd1 = velo.discreteFields[topofft].data
-vd2 = velo2.discreteFields[topo3].data
-
-from parmepy.constants import VTK
-printer = Printer(variables=[vortiref, vortifft],
-                  topo=topofft,
-                  frequency=1,
-                  prefix='./testCurl',
-                  formattype=VTK)
-printer.setUp()
-printer2 = Printer(variables=[vortiFD, vortiref2],
-                   topo=topo3,
-                   frequency=1,
-                   prefix='./testCurl2',
-                   formattype=VTK)
-
-printer2.setUp()
-simulation = Simulation()
-
-curlfft.apply(simulation)
-curlFD.apply(simulation)
-
-printer.apply(simulation)
-printer2.apply(simulation)
-
-ind = topo3.mesh.compute_index
-indfft = topofft.mesh.compute_index
-
-
-for i in xrange(3):
-    print vortifft.norm()
-    print vortiFD.norm()
-    print vortiref.norm()
-    print vortiref2.norm()
-    ## print np.allclose(wdfft[i][...], wFD[i][ind])
-    ## print np.allclose(wdfft[i][...], wREF1[i])
-    ## print np.allclose(wFD[i][ind], wREF2[i][ind])
-    ## print np.allclose(wREF1[i][indfft], wREF2[i][ind])
-    ## print np.sum(wdfft[i][...] - wFD[i][ind])
-    ## print 'max :', np.abs((wdfft[i][indfft] - wFD[i][ind])).max()
-    ## print 'max fft/ref1 :', np.abs((wdfft[i] - wREF1[i])).max()
-    ## print 'max fft/ref11 :', np.abs((wdfft[i][indfft] - wREF1[i][indfft])).max()
-    ## print 'max fft/ref11 :', np.abs((wdfft[i][...] - wREF1[i][...])).max()
-    ## print 'max fft/ref2 :', np.abs((wdfft[i][indfft] - wREF2[i][ind])).max()
-    ## print 'max FD/ref:', np.abs((wFD[i][ind] - wREF2[i][ind])).max()
-    ## print 'max ref/ref:', np.abs((wREF1[i][indfft] - wREF2[i][ind])).max()
-
diff --git a/trashed_examples/testDiffusion.py b/trashed_examples/testDiffusion.py
deleted file mode 100755
index 5e485e004d9252b92ae1527735c1f594fa52de68..0000000000000000000000000000000000000000
--- a/trashed_examples/testDiffusion.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/python
-import parmepy as pp
-from parmepy.operator.diffusion import Diffusion
-from parmepy.operator.poisson import Poisson
-from parmepy.operator.monitors.energy_enstrophy import Energy_enstrophy
-from parmepy.problem.simulation import Simulation
-import numpy as np
-import math
-pi = math.pi
-sin = np.sin
-cos = np.cos
-
-## Physical Domain description
-dim = 3
-LL = 2 * pi * np.ones((dim))
-dom = pp.Box(dimension=dim, length=LL)
-resol = [65, 65, 65]
-
-
-## Fields
-def computeVel(x, y, z):
-#    # --------Taylor Green------------
-    vx = np.sin(x) * np.cos(y) * np.cos(z)
-    vy = - np.cos(x) * np.sin(y) * np.cos(z)
-    vz = 0.
-    return vx, vy, vz
-
-velocity = pp.Field(domain=dom, formula=computeVel,
-                    is_vector=True, name='Velocity')
-
-# formula to compute initial vorticity field
-coeff = 4 * pi ** 2 * (LL[1] ** 2 * LL[2] ** 2 + LL[0] ** 2 * LL[2] ** 2 +
-                       LL[0] ** 2 * LL[1] ** 2) / (LL[0] ** 2 * LL[1] ** 2
-                                                   * LL[2] ** 2)
-
-cc = 2 * pi / LL
-
-
-def computeVort(x, y, z):
-    # --------Taylor Green------------
-    wx = - np.cos(x) * np.sin(y) * np.sin(z)
-    wy = - np.sin(x) * np.cos(y) * np.sin(z)
-    wz = 2. * np.sin(x) * np.sin(y) * np.cos(z)
-    return wx, wy, wz
-
-vorticity = pp.Field(domain=dom, formula=computeVort,
-                     name='Vorticity', is_vector=True)
-
-
-## Definition of the Diffusion and Poisson operators
-output = 'energy.dat'
-nu = 1e-3
-finalTime = 1.0
-dt = 0.01
-restartTime = 0.5
-filenameVel = 'dump_velo_T=_'
-filenameVel += str(restartTime)
-
-filenameVort = 'dump_vort_T=_'
-filenameVort += str(restartTime)
-
-diffusion = Diffusion(vorticity, resolution=resol, viscosity=nu)
-
-poisson = Poisson(velocity, vorticity, resolutions={velocity: resol,
-                                                    vorticity: resol})
-
-## Definition of monitor
-monitor = Energy_enstrophy(velocity, vorticity,
-                           resolutions={velocity: resol,
-                                        vorticity: resol},
-                           viscosity=nu,
-                           frequency=1,
-                           prefix=output)
-
-## Definition of simulation parameters
-tini = restartTime
-niter = (finalTime - tini) / dt
-simu = Simulation(start=tini, end=finalTime, nbiter=niter)
-
-diffusion.setUp()
-poisson.setUp()
-monitor.setUp()
-
-velocity.initialize()
-vorticity.initialize()
-print "pre ", vorticity.norm()
-velocity.load('dump_velo_T=_0.5')
-vorticity.load('dump_vort_T=_0.5')
-
-while simu.time <= finalTime:
-    print 'time, niter     : ', simu.time, simu.current_iteration
-#    if (simu.current_iteration  == 50):
-#        print 'dump fields ...'
-#        velocity.dump(filenameVel)
-#        vorticity.dump(filenameVort)
-#        print "norm restart ", vorticity.norm()
-    diffusion.apply(simu)
-    poisson.apply(simu)
-    monitor.apply(simu)
-    simu.time += simu.time_step
-    simu.current_iteration += 1
-
-
-print "post ", vorticity.norm()
diff --git a/trashed_examples/testInit.py b/trashed_examples/testInit.py
deleted file mode 100644
index fb38ac00b85232daf499374cf0a6acfcd98d7849..0000000000000000000000000000000000000000
--- a/trashed_examples/testInit.py
+++ /dev/null
@@ -1,94 +0,0 @@
-import numpy as np
-from parmepy.domain.obstacle.controlBox import ControlBox
-import parmepy as pp
-import parmepy.mpi as mpi
-from parmepy.domain.obstacle.planes import SubSpace, SubPlane
-from parmepy.domain.obstacle.sphere import Sphere
-
-from parmepy.operator.monitors.printer import Printer
-from parmepy.problem.simulation import Simulation
-from parmepy.constants import HDF5
-from parmepy.operator.monitors.compute_forces import DragAndLift
-pi = np.pi
-cos = np.cos
-sin = np.sin
-
-
-nb = 33
-
-Lx = Ly = Lz = 2
-dom = pp.Box(dimension=3, length=[Lx, 2*Ly, 2*Lz], origin=[-1., 0., 3.])
-dom2 = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
-resol3D = [17, 33, 45]
-resol2D = [nb, nb]
-
-
-## Function to compute TG velocity
-def computeVel(res, x, y, z, t):
-    res[0][...] = x # sin(x) * cos(y) * cos(z)
-    res[1][...] = cos(y) #)- cos(x) * sin(y) * cos(z)
-    res[2][...] = 0.
-    return res
-
-
-## Function to compute reference vorticity
-def computeVort(res, x, y, z, t):
-    res[0][...] = cos(x) * sin(y) * sin(z)
-    res[1][...] = sin(x) * cos(y) * sin(z)
-    res[2][...] = 2. * sin(x) * sin(y) * cos(z)
-    return res
-
-# 2D Field
-scal2 = pp.Field(domain=dom2)
-
-# 3D Field
-scal3 = pp.Field(domain=dom, name='s1')
-sc3 = pp.Field(domain=dom, name='s2')
-velo = pp.Field(domain=dom, formula=computeVel, is_vector=True, name='v1')
-vorti = pp.Field(domain=dom, formula=computeVort, is_vector=True, name='w1')
-
-ng = 2
-simulation = Simulation()
-
-from parmepy.operator.poisson import Poisson
-poisson = Poisson(velo, vorti, resolutions={velo: resol3D,
-                                            vorti: resol3D})
-
-poisson.discretize()
-
-topofft = poisson.discreteFields[poisson.vorticity].topology
-
-topo3 = mpi.topology.Cartesian(dom, 3, resol3D, ghosts=[ng, ng, ng])
-
-pref3 = './res3_' + str(topo3.size) + '/v'
-
-# init fields
-scal3.discretize(topo3)
-sc3.discretize(topo3)
-velo.discretize(topo3)
-vorti.discretize(topo3)
-vorti.initialize(topo=topo3)
-sd3 = scal3.discreteFields[topo3].data
-wd = vorti.discreteFields[topo3].data
-
-printer3D = Printer([velo], topo3, prefix=pref3, frequency=1)
-printer3D.setUp()
-printerHDF5 = Printer([velo], topo3, frequency=1, prefix=pref3,
-                      formattype=HDF5)
-printerHDF5.setUp()
-preffft = './res3_fft' + str(topofft.size) + '/v'
-printerFFT = Printer([velo], topofft, prefix=preffft, frequency=1)
-printerFFT.setUp()
-
-velo.initialize(topo=topofft)
-velo.initialize(topo=topo3)
-
-printerFFT.apply(simulation)
-printer3D.apply(simulation)
-printerHDF5.apply(simulation)
-vd = velo.discreteFields[topo3].data
-vdfft = velo.discreteFields[topofft].data
-
-
-#for i in xrange(3):
-#    print np.allclose(vd[i], vdfft[i])
diff --git a/trashed_examples/testOperator.py b/trashed_examples/testOperator.py
deleted file mode 100644
index a2597f8e9693ef1b5a871fc9f05c6c95bbf94b12..0000000000000000000000000000000000000000
--- a/trashed_examples/testOperator.py
+++ /dev/null
@@ -1,63 +0,0 @@
-## Tests of the creation and initialisation of parmepy operators.
-## This is to be include later into some 'non-regression' tests.
-import parmepy as pp
-import math
-from parmepy.fields.continuous import Field
-from parmepy.operator.analytic import Analytic
-from parmepy.operator.stretching import Stretching
-from parmepy.operator.advection import Advection
-from parmepy.operator.poisson import Poisson
-from parmepy.operator.redistribute import Redistribute
-pi = math.pi
-
-
-def vitesse(x, y, z, t, dt, ite):
-    vx = x
-    vy = y
-    vz = z
-    return vx, vy, vz
-
-
-###
-# Physical Domain description
-Lx = Ly = Lz = 2 * pi
-dom = pp.Box(dimension=3, length=[Lx, Ly, Lz])
-resol1 = [9, 9, 9]
-resol2 = [5, 5, 5]
-
-## Fields
-scal = Field(domain=dom, name='Scalar')
-velo = Field(domain=dom, name='Velocity', is_vector=True)
-vort = Field(domain=dom, name='Vorticity', is_vector=True)
-
-
-
-## Analytic operator
-vel = Analytic(velo, formula=vitesse,
-               resolutions={velo: resol1})
-vel.setUp()
-
-## Advection operator
-advec = Advection(velo, scal, resolutions={velo: resol1, scal: resol2},
-                  method='scales')
-advec.setUp()
-
-## Poisson operator
-poisson = Poisson(velo, vort, resolutions={velo: resol2, vort: resol2})
-poisson.setUp()
-
-## Stretching operator
-stretch = Stretching(velo, vort, resolutions={velo: resol2, vort: resol2})
-stretch.setUp()
-
-## Data redistribution for velocity between Stretching and Advection
-redistr = Redistribute(velo, stretch, advec)
-redistr.setUp()
-
-
-
-print scal
-print velo
-
-print velo.discreteFields.values()[0]
-print velo.discreteFields.keys()[0]
diff --git a/trashed_examples/testPenal.py b/trashed_examples/testPenal.py
deleted file mode 100644
index f8c481a2fb494d81d1b0ad8c45fd5121601c779c..0000000000000000000000000000000000000000
--- a/trashed_examples/testPenal.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import numpy as np
-import parmepy as pp
-import math
-from parmepy.fields.continuous import Field
-from parmepy.operator.analytic import Analytic
-from parmepy.domain.obstacle.sphere import Sphere
-pi = math.pi
-from parmepy.operator.penalization import Penalization
-
-
-def vitesse(x, y, z):
-    return x, y, z
-
-nb = 9
-
-Lx = Ly = Lz = 2
-dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
-resol3D = [nb, nb, nb]
-
-# Fields
-scal = Field(domain=dom, name='Scalar')
-op = Analytic(scal, formula=vitesse, resolutions={scal: resol3D})
-
-op.setUp()
-
-topo = dom.topologies[0]
-coords = topo.mesh.coords
-
-ff = scal.discreteFields[topo].data
-
-ff[...]=128
-
-sphere = Sphere(dom, position=[0., 0., 0.], radius=0.5)
-ind = sphere.discretize(topo)
-
-penal = Penalization(scal, sphere, coeff=1e6,
-                     resolutions={scal: resol3D})
-
-penal.setUp()
-
-print scal.norm()
-
-penal.apply(0.1)
-
-print scal.norm()
diff --git a/trashed_examples/testPenalization.py b/trashed_examples/testPenalization.py
deleted file mode 100644
index d56a998fb24e87f5704d699729d7f41e1d432be1..0000000000000000000000000000000000000000
--- a/trashed_examples/testPenalization.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import parmepy as pp
-import math
-from parmepy.fields.continuous import Field
-from parmepy.operator.analytic import Analytic
-from parmepy.domain.obstacle.sphere import Sphere, HemiSphere
-from parmepy.domain.obstacle.cylinder2d import Cylinder2D, SemiCylinder2D
-from parmepy.domain.obstacle.plates import Plates
-pi = math.pi
-from parmepy.operator.penalization import Penalization
-from parmepy.operator.monitors.printer import Printer
-from parmepy.problem.simulation import Simulation
-
-
-def vitesse(x, y, z):
-    return x
-
-
-def vitesse2(x, y):
-    return x
-
-nb = 33
-
-Lx = Ly = Lz = 2
-dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
-dom2 = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
-resol3D = [nb, nb, nb]
-resol2D = [nb, nb]
-
-# Fields
-scal = Field(domain=dom, name='Scalar')
-op = Analytic(scal, formula=vitesse, resolutions={scal: resol3D})
-scal2 = Field(domain=dom2, name='Scalar')
-op2 = Analytic(scal2, formula=vitesse2, resolutions={scal2: resol2D})
-velo = Field(domain=dom2, name='Velo', is_vector=True)
-
-op.setUp()
-op2.setUp()
-topo = dom.topologies[0]
-coords = topo.mesh.coords
-
-ff = scal.discreteFields[topo].data[0]
-
-
-ff[...] = 128
-
-topo2 = dom2.topologies.values()[0]
-
-ff2 = scal2.discreteFields[topo2].data[0]
-ff2[...] = 128
-sphere = Sphere(dom, position=[0., 0., 0.], radius=0.5, porousLayers=[0.13])
-
-hsphere = HemiSphere(dom, position=[0., 0., 0.],
-                     radius=0.5, porousLayers=[0.13])
-
-plates = Plates(dom, normal_dir=0, epsilon=0.1)
-plates2 = Plates(dom2, normal_dir=1, epsilon=0.1)
-
-cyl = Cylinder2D(dom2, position=[0., 0.], radius=0.5, porousLayers=[0.13])
-hcyl = SemiCylinder2D(dom2, position=[0., 0.], radius=0.5, porousLayers=[0.13])
-
-ind = hsphere.discretize(topo)
-
-penal = Penalization(scal, [hsphere, plates], coeff=[1e6, 10],
-                     resolutions={velo: resol3D})
-penal2 = Penalization(velo, [hcyl, plates2], coeff=[1e6, 10],
-                      resolutions={scal2: resol2D})
-
-penal.setUp()
-penal2.setUp()
-
-velod = velo.discreteFields[topo2]
-velod[0] = 128
-velod[1] = 12
-#velod[2] = 4.3
-
-
-printer = Printer(fields=[scal], frequency=1)
-printer.setUp()
-print scal.norm()
-simulation = Simulation()
-penal.apply(simulation)
-penal2.apply(simulation)
-printer.apply(simulation)
-print "print ..."
-print scal.norm()
-
-velo.dump('sc')
diff --git a/trashed_examples/testPoisson.py b/trashed_examples/testPoisson.py
deleted file mode 100755
index 4dc58b34b4e2412428c2714248c707d3e38d8db5..0000000000000000000000000000000000000000
--- a/trashed_examples/testPoisson.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/python
-import parmepy as pp
-from parmepy.operator.poisson import Poisson
-from parmepy.operator.analytic import Analytic
-from parmepy.problem.problem import Problem
-from parmepy.problem.navier_stokes import NSProblem
-from parmepy.problem.simulation import Simulation
-from parmepy.operator.monitors.printer import Printer
-
-import numpy as np
-import math
-pi = math.pi
-sin = np.sin
-cos = np.cos
-
-## Physical Domain description
-dim = 3
-LL = 2 * pi * np.ones((dim))
-dom = pp.Box(dimension=dim, length=LL)
-resol = [65, 65, 65]
-
-## Fields
-velocity = pp.Field(domain=dom, is_vector=True, name='Velocity')
-
-# formula to compute initial vorticity field
-coeff = 4 * pi ** 2 * (LL[1] ** 2 * LL[2] ** 2 + LL[0] ** 2 * LL[2] ** 2 +
-                       LL[0] ** 2 * LL[1] ** 2) / (LL[0] ** 2 * LL[1] ** 2
-                                                   * LL[2] ** 2)
-
-cc = 2 * pi / LL
-
-
-def computeVort(x, y, z):
-    wx = coeff * sin(x * cc[0]) * sin(y * cc[1]) * cos(z * cc[2])
-    wy = coeff * cos(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])
-    wz = coeff * cos(x * cc[0]) * cos(y * cc[1]) * sin(z * cc[2])
-    return wx, wy, wz
-
-vorticity = pp.Field(domain=dom, formula=computeVort,
-                     name='Vorticity', is_vector=True)
-
-
-# ref. field
-def computeRef(x, y, z, t):
-    refx = -2. * pi / LL[1] * \
-        (cos(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
-        - 2. * pi / LL[2] * (cos(x * cc[0]) * sin(y * cc[1]) * cos(z * cc[2]))
-
-    refy = -2. * pi / LL[2] * \
-        (sin(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
-        + 2. * pi / LL[0] * (sin(x * cc[0]) * cos(y * cc[1]) * sin(z * cc[2]))
-
-    refz = -2. * pi / LL[0] * \
-        (sin(x * cc[0]) * sin(y * cc[1]) * sin(z * cc[2])) \
-        - 2. * pi / LL[1] * (sin(x * cc[0]) * cos(y * cc[1]) * cos(z * cc[2]))
-
-    return refx, refy, refz
-
-
-## Definition of the Poisson operator
-poisson = Poisson(velocity, vorticity,
-                  resolutions={velocity: resol, vorticity: resol})
-
-## Analytic operator to compute the reference field
-ref = pp.Field(domain=dom, name='reference', is_vector=True)
-refOp = Analytic(ref, formula=computeRef,
-                 resolutions={ref: resol})
-
-time_step = 0.25
-simu = Simulation()
-#simu.max_iter = simu.max_iter - 2
-refOp.discretize()
-poisson.discretize()
-refOp.setUp()
-poisson.setUp()
-refOp.apply(simu)
-
-#pb = NSProblem([refOp, poisson], simu,
-#               monitors=[Printer(fields=[velocity],
-#                                 frequency=1)],
-#               dumpFreq=2, name='toto')
-
-#pb.setUp()
-
-#pb.solve()
-
-## refOp.apply(0, 0, 0)
-
-vorticity.initialize()
-## print vorticity.norm()
-poisson.apply(simu)
-## print velocity.norm()
-## print ref.norm()
-
-
-refD = ref.discreteFields.values()[0]
-vd = velocity.discreteFields.values()[0]
-
-assert np.allclose(ref.norm(), velocity.norm())
-for i in range(dom.dimension):
-    assert np.allclose(vd[i], refD[i])
-
-print velocity.norm()
-
-poisson.finalize()
-
-#print poisson
diff --git a/trashed_examples/testScales.py b/trashed_examples/testScales.py
deleted file mode 100755
index 34b47da963997e6695eafc9f5cb3023f19565119..0000000000000000000000000000000000000000
--- a/trashed_examples/testScales.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/python
-import parmepy as pp
-import parmepy.f2py
-scales = parmepy.f2py.scales2py
-import numpy as np
-import mpi4py.MPI as MPI
-import math
-
-pi = math.pi
-
-comm = MPI.COMM_WORLD
-rank = comm.Get_rank()
-print "start simulation for process number", rank
-
-pbDim = 3
-
-# Number of MPI processus
-nbProcs = comm.Get_size()
-
-# Physical Domain description
-Lx = Ly = Lz = 2 * pi
-myDomain3d = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[0., 0., 0.])
-nbCells = np.asarray((16, 16, 32))
-
-# ===== Initialize the particular solver =====
-# First choose the number of mpi process in each dir
-#topodims = MPI.Compute_dims(nbProcs, 3)
-if nbProcs > 1:
-    topodims = [1, 2, nbProcs / 2]
-else:
-    topodims = [1, 1, 1]
-
-# It must be such that nbCells is a multiple of topodims
-# and such that nbCells/topodims is a multiple of 2, 4 or 5.
-localres, localoffset, stab_coeff = \
-    scales.init_advection_solver(nbCells, myDomain3d.length,
-                                 topodims, order='p_O2')
-
-if(rank == 0):
-    print "Start initialisation ..."
-
-# ===== Create and initialize Fields =====
-#scal3D = np.zeros((localres), dtype='float64', order='Fortran')
-scal3D = np.zeros((localres), dtype='float64')
-
-r02 = ((myDomain3d.length / 10.).min()) ** 2  # ??
-
-# Coordinates of the lowest point of the current subdomain
-step = myDomain3d.length / nbCells
-coord0 = localoffset * localres * step
-vz = np.zeros((localres), dtype='float64', order='Fortran')
-vx = np.zeros((localres), dtype='float64', order='Fortran')  # vz.copy()
-vy = np.zeros((localres), dtype='float64', order='Fortran')  # vz.copy()
-period = 1.0
-coef = 2. * pi / period
-
-for k in range(localres[2]):
-    rz = (step[2] * k + coord0[2]) ** 2
-    for j in range(localres[1]):
-        ry = (step[1] * j + coord0[1]) ** 2
-        for i in range(localres[0]):
-            rx = (step[0] * i + coord0[0]) ** 2
-            rr = rx + ry + rz
-            if(rr < r02):
-                scal3D[i, j, k] = (1 - rr / r02) ** 4
-            vx[i, j, k] = coef * (0.5 - coord0[1] - step[1] * j)
-            vy[i, j, k] = coef * (step[0] * i + coord0[0] - 0.5)
-
-goodScal = scal3D.copy()
-goodVelo = vx.copy()
-
-time_step = 0.01
-finalTime = 2. * period
-currentTime = 0.0
-
-comm.Barrier()
-
-if(rank == 0):
-    print "End of initialisation ..."
-
-#while currentTime<finalTime:
-
-# Simulation
-t0 = MPI.Wtime()
-scal3D = scales.solve_advection(time_step, vx, vy, vz, scal3D)
-
-print "elapsed time on processus ", rank, ": ", MPI.Wtime() - t0
-
-currentTime += time_step
-
-
-
-
-
-
diff --git a/trashed_examples/testTopoBridge.py b/trashed_examples/testTopoBridge.py
deleted file mode 100644
index 87ad488d0372b7a4c405c5437aeeda762c509bc1..0000000000000000000000000000000000000000
--- a/trashed_examples/testTopoBridge.py
+++ /dev/null
@@ -1,87 +0,0 @@
-## This is an example of usage for a bridge between two operators.
-## Todo : move this to non-regr. tests.
-import parmepy as pp
-import math
-from parmepy.fields.continuous import Field
-from parmepy.operator.analytic import Analytic
-from parmepy.operator.stretching import Stretching
-from parmepy.operator.poisson import Poisson
-from parmepy.operator.redistribute import Redistribute
-from parmepy.mpi import main_rank, main_comm
-pi = math.pi
-
-
-def vitesse(x, y, z, t, dt, ite):
-    vx = x
-    vy = y
-    vz = z
-    return vx, vy, vz
-
-
-# Physical Domain description
-Lx = Ly = Lz = 2 * pi
-dom = pp.Box(dimension=3, length=[Lx, Ly, Lz])
-resol1 = [9, 9, 9]
-resol2 = [5, 5, 5]
-
-## Fields
-scal = Field(domain=dom, name='Scalar')
-velo = Field(domain=dom, name='Velocity', is_vector=True)
-vort = Field(domain=dom, name='Vorticity', is_vector=True)
-
-## First operator
-velocity = Analytic(velo, formula=vitesse,
-                    resolutions={velo: resol1})
-
-## Second operator
-op2 = Analytic(velo, formula=vitesse,
-               resolutions={velo: resol2})
-
-
-op3 = Stretching(velo, vort, resolutions={velo: resol2, vort: resol2})
-op4 = Poisson(velo, vort, resolutions={velo: resol2, vort: resol2})
-op4.setUp()
-
-distr = Redistribute(velo, op2, op4)
-distr2 = Redistribute(velo, op4,  op3)
-
-#velocity.setUp()
-op2.setUp()
-op3.setUp()
-distr2.setUp()
-distr.setUp()
-
-## print main_rank, "send ...",  distr2.bridges[velo].sendTo
-## print main_rank, "recv ...", distr2.bridges[velo].recvFrom
-
-
-#for v in velo.discreteFields.values():
-#    if main_rank == 3:
-#        print v.topology
-
-
-## for topo in velo.discreteFields.keys():
-##     print velo.discreteFields[topo].data
-
-op2.apply(1, 1, 1)
-
-#op2.apply(0, 0, 0)
-
-for topo in velo.discreteFields.keys():
-    a = velo.norm(topo)
-    if main_rank == 0:
-        print "pre :", a
-
-## #print " compute ..."
-distr.apply()
-
-distr.wait()
-
-distr2.apply()
-distr2.wait()
-for topo in velo.discreteFields.keys():
-    a = velo.norm(topo)
-    if main_rank == 0:
-        print "post :", a
-
-
diff --git a/trashed_examples/testVisu.py b/trashed_examples/testVisu.py
deleted file mode 100755
index 579efe8133d2a015071b4951c3549cb4578a17c0..0000000000000000000000000000000000000000
--- a/trashed_examples/testVisu.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-# Example of field creation/discretisation and output
-# to test post-treatment (vtk, hdf5 ...)
-#
-# Usage :
-# python testVisu.py
-# or
-# mpirun -np NB python testVisu.py
-# NB = number of mpi processes
-
-
-import parmepy as pp
-import math
-from parmepy.fields.continuous import Field
-from parmepy.domain.obstacle.sphere import Sphere
-from parmepy.operator.analytic import Analytic
-from parmepy.domain.obstacle.planes import SubPlane
-pi = math.pi
-from parmepy.operator.penalization import Penalization
-from parmepy.operator.monitors.printer import Printer
-from parmepy.problem.simulation import Simulation
-import numpy as np
-cos = np.cos
-sin = np.sin
-nb = 5
-Lx = Ly = Lz = 2
-
-
-## ======== 3D ======== ##
-
-# Domain (box-shaped)
-dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
-
-# global resolution for the grid
-resol3D = [nb, nb, nb]
-
-# Continuous fields (a scalar and a vector)
-
-
-# Example of function to init the scalar field
-def func3D(res, x, y, z, t):
-    res[0][...] = 1.#cos(t * x) + sin(y) + z
-    return res
-
-
-scal3D = Field(domain=dom, formula=func3D, name='Scal')
-scalRef = Field(domain=dom, formula=func3D, name='Ref')
-resolution3D = np.ones(3) * nb
-ghosts = np.zeros(3) + 2
-from parmepy.mpi.topology import Cartesian
-topo = Cartesian(dom, dom.dimension, resolution3D, ghosts=ghosts)
-
-scalRef.discretize(topo)
-scalRef.initialize(topo=topo)
-hh = topo.mesh.space_step[2]
-planes = SubPlane(dom, normal=[0, 0, 1], point=[0, -2*hh, 2*hh],
-                  lengths=[0.8, 0.8, 0.5])
-
-op = Analytic(scal3D, resolutions={scal3D: resolution3D},
-              topo=topo)
-op.discretize()
-op.setUp()
-simu = Simulation(start=0.0, end=2., nbIter=100, max_iter=1000000)
-op.apply(simu)
-
-sc3D = scal3D.discretization(topo)
-scRef = scalRef.discretization(topo)
-
-assert np.allclose(sc3D.data[0], scRef.data[0])
-## # Monitor : print scal3D to vtk file.
-from parmepy.constants import HDF5
-printer = Printer(variables=[scal3D], topo=topo,
-                  formattype=HDF5, prefix='testSlice', subset=planes)
-printerRef = Printer(variables=[scalRef], topo=topo,
-                     formattype=HDF5, prefix='testRef')
-printer.setUp()
-printerRef.setUp()
-printerRef.apply(simu)
-
-simu.initialize()
-
-while not simu.isOver:
-    op.apply(simu)
-#    printer.apply(simu)
-    printerRef.apply(simu)
-    simu.advance()
-   # simu.isOver=True
-printerRef.finalize()
-sc3D.data[0][...] = 0.0
-
-## Example to postprocess some hdf5 files and save only a required section : 
-
-import glob
-import parmepy.tools.io_utils as io
-filepath = io.io.defaultPath()
-prefix = '/testRef'
-# The list of all file with testRef_...
-filelist = glob.glob(filepath + prefix + '*.h5')
-print filepath + prefix
-filelist.sort()
-print filelist
-# for each file in dir ...
-# 1 - Dowload the full-grid hdf file and use it to fill in a field
-# First iter:
-from parmepy.operator.monitors.reader import Reader
-import os
-name = os.path.basename(filelist[0])
-name = name.rsplit('.')[0]
-
-reader0 = Reader(variables=[scal3D], topo=topo, prefix=name)
-reader0.setUp()
-reader0.apply()
-printer.apply(simu)
-simu.initialize()
-for pref in filelist[1:]:
-    name = os.path.basename(pref)
-    name = name.rsplit('.')[0]
-    reader = Reader(variables=[scal3D], topo=topo, prefix=name)
-    reader.setUp()
-    simu.printState()
-        #print 'pre', scal3D.norm(topo)
-    reader.apply()
-    # 2 - Use the predifined printer on planes to save data
-    printer.apply(simu)
-    simu.advance()
-##     #print scalRef.norm(topo)
-##     #print scal3D.norm(topo)
-##     #assert np.allclose(sc3D.data[0][topo.mesh.compute_index], scRef.data[0][topo.mesh.compute_index])
-
-
-velo = Field(domain=dom, name='Velocity', is_vector=True)
-vorti = Field(domain=dom, name='vty', is_vector=True)
-reader = Reader(variables=[velo, vorti], topo=topo, prefix='curl_io_00000')
-