From 2f15cf3a630a6ca2526553409f8f0895ff449bfd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Franck=20P=C3=A9rignon?= <franck.perignon@imag.fr>
Date: Thu, 30 Jan 2014 17:26:52 +0000
Subject: [PATCH]  Add Drag/Lift computation + control box Fix bug in HDF5/VTK
 output Fix bug in field initialization Modif in FD and diff operators

---
 CodingRules.org                               |   17 +
 Examples/NSDebug.py                           |   25 +-
 Examples/NS_bluff_bodies.py                   |    9 +-
 Examples/postTaylor.py                        |   51 +-
 Examples/testCurl.py                          |    8 +-
 HySoP/CMakeLists.txt                          |    2 +-
 HySoP/DoxyConf/parmes.doxyfile.in             | 2334 ++++++++++-------
 HySoP/hysop/constants.py                      |   15 +
 HySoP/hysop/domain/box.py                     |   24 +-
 HySoP/hysop/domain/obstacle/controlBox.py     |  257 ++
 HySoP/hysop/domain/obstacle/cylinder.py       |  109 -
 HySoP/hysop/domain/obstacle/cylinder2d.py     |  129 -
 HySoP/hysop/domain/obstacle/disk.py           |   74 +
 HySoP/hysop/domain/obstacle/hemisphere.py     |  124 -
 HySoP/hysop/domain/obstacle/obstacle.py       |   20 +-
 HySoP/hysop/domain/obstacle/planes.py         |  320 +++
 HySoP/hysop/domain/obstacle/plates.py         |   63 -
 HySoP/hysop/domain/obstacle/sphere.py         |   18 +-
 HySoP/hysop/domain/tests/test_obstacle.py     |  246 +-
 HySoP/hysop/f2py/fftw2py.f90                  |    2 +-
 HySoP/hysop/fields/continuous.py              |   17 +-
 HySoP/hysop/mpi/mesh.py                       |   32 +-
 .../hysop/numerics/differential_operations.py |   62 +-
 HySoP/hysop/numerics/finite_differences.py    |  447 ++--
 HySoP/hysop/numerics/tests/test_diffOp.py     |    5 +-
 HySoP/hysop/operator/advection.py             |    1 -
 HySoP/hysop/operator/advection_dir.py         |   12 +-
 HySoP/hysop/operator/analytic.py              |    5 +-
 HySoP/hysop/operator/continuous.py            |   18 +-
 HySoP/hysop/operator/differential.py          |   58 +-
 HySoP/hysop/operator/discrete/discrete.py     |   13 +-
 HySoP/hysop/operator/discrete/penalization.py |    2 +-
 HySoP/hysop/operator/discrete/stretching.py   |   29 +-
 HySoP/hysop/operator/energy_enstrophy.py      |   11 -
 .../hysop/operator/monitors/compute_forces.py |  258 +-
 HySoP/hysop/operator/monitors/printer.py      |  141 +-
 HySoP/hysop/operator/penalization.py          |    4 +-
 HySoP/hysop/operator/poisson.py               |    5 -
 HySoP/hysop/operator/stretching.py            |   13 +-
 .../tests/ref_scal2D_PenalSphere_rk_0.dat     |  Bin 0 -> 8335 bytes
 .../tests/ref_scal2D_PenalSphere_rk_0.map     |    1 +
 .../tests/ref_scal3D_PenalSphere_rk_0.dat     |  Bin 0 -> 262289 bytes
 .../tests/ref_scal3D_PenalSphere_rk_0.map     |    1 +
 HySoP/hysop/operator/tests/test_analytic.py   |   16 +-
 .../hysop/operator/tests/test_penalization.py |   83 +-
 HySoP/hysop/tools/numpywrappers.py            |   57 +-
 HySoP/src/client_data.f90                     |    2 +-
 47 files changed, 3202 insertions(+), 1938 deletions(-)
 create mode 100644 HySoP/hysop/domain/obstacle/controlBox.py
 delete mode 100644 HySoP/hysop/domain/obstacle/cylinder.py
 delete mode 100644 HySoP/hysop/domain/obstacle/cylinder2d.py
 create mode 100644 HySoP/hysop/domain/obstacle/disk.py
 delete mode 100644 HySoP/hysop/domain/obstacle/hemisphere.py
 create mode 100644 HySoP/hysop/domain/obstacle/planes.py
 delete mode 100644 HySoP/hysop/domain/obstacle/plates.py
 create mode 100644 HySoP/hysop/operator/tests/ref_scal2D_PenalSphere_rk_0.dat
 create mode 100644 HySoP/hysop/operator/tests/ref_scal2D_PenalSphere_rk_0.map
 create mode 100644 HySoP/hysop/operator/tests/ref_scal3D_PenalSphere_rk_0.dat
 create mode 100644 HySoP/hysop/operator/tests/ref_scal3D_PenalSphere_rk_0.map

diff --git a/CodingRules.org b/CodingRules.org
index 12a50935d..de7f3033d 100644
--- a/CodingRules.org
+++ b/CodingRules.org
@@ -25,6 +25,23 @@ This file provides a list of coding rules for Parmes, parmepy that MUST be appli
     
 ** use distutils (__init__.py ...)
    see http://docs.python.org/distutils/setupscript.html
+** default arguments in functions:
+   Because of warning in pylint/pyflake "dangerous default argument", 
+   do not use arg = [] or {} but None.
+   Example :
+   bad:
+   def __init__(self, var=[]):
+       self.var = var
+
+   good:
+   def __init__(self, var=None):
+       if var is None:
+           var = []
+       self.var = var
+
+   For detailed explanations, see http://eli.thegreenplace.net/2009/01/16/python-insight-beware-of-mutable-default-values-for-arguments/
+   
+   
 * Test and examples
   Three directories :
 ** test in Parmes => unitary tests 
diff --git a/Examples/NSDebug.py b/Examples/NSDebug.py
index 6881f0f91..6d70a253c 100755
--- a/Examples/NSDebug.py
+++ b/Examples/NSDebug.py
@@ -29,9 +29,11 @@ from parmepy.operator.monitors.energy_enstrophy import Energy_enstrophy
 from parmepy.operator.monitors.compute_forces import Forces
 from parmepy.problem.simulation import Simulation
 from parmepy.constants import VTK
+from parmepy.domain.obstacle.planes import PlaneBoundaries
 from dataNS_bb import dim, nb, NBGHOSTS, ADVECTION_METHOD, VISCOSITY, \
     OUTPUT_FREQ, FILENAME, PROJ, LCFL, CFL, CURL_METHOD, \
     TIMESTEP_METHOD, OBST_Ox, OBST_Oy, OBST_Oz, RADIUS
+import os
 
 ## ----------- A 3d problem -----------
 print " ========= Start Navier-Stokes 3D (Flow past bluff bodies) ========="
@@ -119,7 +121,7 @@ topo = Cartesian(box, box.dimension, nbElem,
 sphere = Sphere(box, position=[OBST_Ox, OBST_Oy, OBST_Oz],
                 radius=RADIUS)
 
-#plates = Plates(box, normal_dir=2, epsilon=0.005)
+bc = PlaneBoundaries(box, 2, thickness=0.1)
 
 ## Operators
 advec = Advection(velo, vorti,
@@ -164,7 +166,7 @@ velo.setTopoInit(topofft)
 velo.initialize()
 ind = sphere.discretize(topofft)
 vd = velo.discreteFields[topofft].data
-penal = Penalization(velo, [sphere],
+penal = Penalization(velo, [sphere, bc],
                      coeff=[1e8],
                      topo=topofft,
                      resolutions={velo: nbElem})
@@ -192,19 +194,23 @@ distrCurlAdv = Redistribute([vorti, velo], curl, advec)
 
 ## Diagnostics/monitors related to the problem
 
+outputdir = 'res_' + str(topofft.size) + 'procs'
+pref = outputdir + '/vwfft'
+
 printerFFT = Printer(variables=[velo, vorti],
                      topo=topofft,
                      frequency=1,
-                     prefix='./res/vwfft',
+                     prefix=pref,
                      formattype=VTK)
 printerFFT.setUp()
+prefE = outputdir + '/ener'
 
 energy = Energy_enstrophy(velo, vorti,
                           topo=topofft,
                           viscosity=VISCOSITY,
                           isNormalized=False,
-                          frequency=OUTPUT_FREQ,
-                          prefix=FILENAME)
+                          frequency=1,
+                          prefix=prefE)
 
 ## Simulation with fixed time step
 simu = Simulation(tinit=0.0,
@@ -232,6 +238,7 @@ distrCurlAdv.setUp()
 
 energy.setUp()
 
+
 ## Initialization of velocity on topofft
 def run():
     penal.apply(simu)
@@ -244,14 +251,14 @@ def run():
         print "start curl"
     curl.apply(simu)
 
-    printerFFT.apply(simu)
+    #printerFFT.apply(simu)
     # From topo fft to topo advec
     if topofft.rank == 0:
         print "start d0"
     distrCurlAdv.apply(simu)
     if topofft.rank == 0:
         print "start advec"
-    #advec.apply(simu)
+    advec.apply(simu)
 
     # From topo adv to topo stretch
     if topofft.rank == 0:
@@ -285,11 +292,13 @@ def run():
     printerFFT.apply(simu)
 
 
-while not simu.isOver:
+#while not simu.isOver:
+for i in xrange(200):
     simu.printState()
     run()
     simu.advance()
 
+
 ## print 'total time (rank):', MPI.Wtime() - time, '(', topo.rank, ')'
 
 ## Clean memory buffers
diff --git a/Examples/NS_bluff_bodies.py b/Examples/NS_bluff_bodies.py
index 4fa4de483..c9af9a64b 100755
--- a/Examples/NS_bluff_bodies.py
+++ b/Examples/NS_bluff_bodies.py
@@ -32,6 +32,7 @@ from parmepy.constants import VTK
 from dataNS_bb import dim, nb, NBGHOSTS, ADVECTION_METHOD, VISCOSITY, \
     OUTPUT_FREQ, FILENAME, PROJ, LCFL, CFL, CURL_METHOD, \
     TIMESTEP_METHOD, OBST_Ox, OBST_Oy, OBST_Oz, RADIUS
+from parmepy.domain.obstacle.planes import PlaneBoundaries
 
 ## ----------- A 3d problem -----------
 print " ========= Start Navier-Stokes 3D (Flow past bluff bodies) ========="
@@ -43,13 +44,13 @@ sin = np.sin
 
 ## Domain
 #box = pp.Box(dim, length=[1., 1., 1.], origin=[0., 0., 0.])
-box = pp.Box(dim, length=[4.0 * pi, 2.0 * pi, 2.0 * pi], origin=[-pi, -pi, -pi])
+box = pp.Box(dim, length=[2.0 * pi, pi,  pi], origin=[-pi, -pi, -pi])
 #box = pp.Box(dim, length=[12., 10., 10.], origin=[-4., -5., -5.])
 #box = pp.Box(dim, length=[2.0 * pi, 2.0 * pi, 2.0 * pi])
 
 ## Global resolution
 #nbElem = [nb] * dim
-nbElem = [129, 65, 65]
+nbElem = [65, 33, 33]
 
 # Upstream flow velocity
 uinf = 1.0
@@ -216,7 +217,9 @@ correction = VelocityCorrection(velo, vorti,
                                 uinf=uinf,
                                 topo=topofft)
 
-penal = Penalization(velo, [sphere],
+bc = PlaneBoundaries(box, 2, thickness=0.1)
+
+penal = Penalization(velo, [sphere, bc],
                      coeff=[1e8],
                      topo=topofft,
                      resolutions={velo: nbElem})
diff --git a/Examples/postTaylor.py b/Examples/postTaylor.py
index 2eedcdba0..c97150ef8 100644
--- a/Examples/postTaylor.py
+++ b/Examples/postTaylor.py
@@ -1,5 +1,6 @@
 import scitools.filetable as ft
 import matplotlib.pyplot as plt
+import glob
 
 ## file1 = open("resp1/energy.dat")
 ## ener = ft.read(file1)
@@ -12,45 +13,25 @@ import matplotlib.pyplot as plt
 ## #nuES = ener[:,6]
 VISCOSITY = 1. / 1600.
 ## nuS_calc = VISCOSITY * enstrophy[2:]
-
+import numpy as np
 # multiproc #
-file2 = open("resp2_RK2/energy.dat")
-ener2 = ft.read(file2)
-time2 = ener2[:, 0]
-energy2 = ener2[:, 1]
-enstrophy2 = ener2[:, 2]
-nuS_calc2 = enstrophy2[:] * VISCOSITY
-
+filepath = './res_'
 plt.ioff()
 plt.figure(1)
 
-#plt.plot(time, nuS_calc)
-plt.plot(time2, enstrophy2, 'o--')
-plt.show()
-
+error = []
 
-## size = energy.shape[0]
-## import numpy as np
-## em2 = np.zeros(size - 2)
-## em1 = np.zeros(size - 2)
-## em2[:] = energy[:-2]
-## em1[:] = energy[1:-1]
-## dt = np.zeros(size - 1)
-## dt[:] = time[1:] - time[:-1]
-## en = energy[2:]
+filelist = glob.glob(filepath + '*')
+for f in filelist:
+    filename = open(f + '/ener')
+    data = ft.read(filename)
+    time = data[:, 0]
+    energy = data[:, 1]
+    enstrophy = data[:, 2]
+    plt.plot(time, energy, '+-', label=f)
+    error.append(np.sum(energy))
 
-## dedt_calc = - (3. * en[:] - 4. * em1[:] + em2[:]) / (2. * dt[1:])
-## nuEff = dedt_calc / enstrophy[2:]
-## ratio_calc = nuEff * 1600
-## nuES_calc = nuEff * enstrophy[2:]
-
-
-## size = energy2.shape[0]
-## em22 = np.zeros(size - 2)
-## em12 = np.zeros(size - 2)
-## em22[:] = energy2[:-2]
-## em12[:] = energy2[1:-1]
-## dt2 = np.zeros(size - 1)
-## dt2[:] = time2[1:] - time2[:-1]
-## en2 = energy2[2:]
+plt.legend()
+plt.show()
 
+print error
diff --git a/Examples/testCurl.py b/Examples/testCurl.py
index b7e300c14..c47d31232 100644
--- a/Examples/testCurl.py
+++ b/Examples/testCurl.py
@@ -61,10 +61,10 @@ curlFD.discretize()
 topofft = curlfft.discreteFields[curlfft.outvar].topology
 vortiref.discretize(topofft)
 vortiref2.discretize(topo3)
-velo.initialize(topofft)
-velo2.initialize(topo3)
-vortiref.initialize(topofft)
-vortiref2.initialize(topo3)
+velo.initialize(topo=topofft)
+velo2.initialize(topo=topo3)
+vortiref.initialize(topo=topofft)
+vortiref2.initialize(topo=topo3)
 curlfft.setUp()
 curlFD.setUp()
 
diff --git a/HySoP/CMakeLists.txt b/HySoP/CMakeLists.txt
index 2bc40fef6..4682220ff 100644
--- a/HySoP/CMakeLists.txt
+++ b/HySoP/CMakeLists.txt
@@ -188,7 +188,7 @@ add_custom_target(python-cleaninstall COMMAND rm -rf ${CMAKE_INSTALL_PREFIX}*
   COMMENT "remove parmepy package and its dependencies")
 
 # Target to clean sources (remove .pyc files) and build dir.
-file(GLOB_RECURSE PYCFILES "${CMAKE_SOURCE_DIR}/*.pyc"})
+file(GLOB_RECURSE PYCFILES "${CMAKE_SOURCE_DIR}/*.pyc")
 add_custom_target(pyclean COMMAND rm -f ${PYCFILES}
   COMMAND make clean
   COMMAND rm -rf ${CMAKE_BINARY_DIR}/build  ${CMAKE_BINARY_DIR}/DoxygenGeneratedDoc
diff --git a/HySoP/DoxyConf/parmes.doxyfile.in b/HySoP/DoxyConf/parmes.doxyfile.in
index 64f0f003c..4e5a89fb3 100644
--- a/HySoP/DoxyConf/parmes.doxyfile.in
+++ b/HySoP/DoxyConf/parmes.doxyfile.in
@@ -1,110 +1,119 @@
-# Doxyfile 1.8.2
+# Doxyfile 1.8.5
 
 # This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project
+# doxygen (www.doxygen.org) for a project.
 #
-# All text after a hash (#) is considered a comment and will be ignored
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
 # The format is:
-#       TAG = value [value, ...]
-# For lists items can also be appended using:
-#       TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ")
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
 
 #---------------------------------------------------------------------------
 # Project related configuration options
 #---------------------------------------------------------------------------
 
 # This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
 
 DOXYFILE_ENCODING      = UTF-8
 
-# The PROJECT_NAME tag is a single word (or sequence of words) that should
-# identify the project. Note that if you do not use Doxywizard you need
-# to put quotes around the project name if it contains spaces.
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
 
 PROJECT_NAME           = @PROJECT_NAME@
 
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
 
 PROJECT_NUMBER         = @PACKAGE_VERSION@
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer
-# a quick idea about the purpose of the project. Keep the description short.
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
 
 PROJECT_BRIEF          = "Particle Methods simulation on hybrid architectures"
 
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
 
 PROJECT_LOGO           =
 
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
 
 OUTPUT_DIRECTORY       = @CMAKE_BINARY_DIR@/DoxygenGeneratedDoc
 
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
 
 CREATE_SUBDIRS         = YES
 
 # The OUTPUT_LANGUAGE tag is used to specify the language in which all
 # documentation generated by doxygen is written. Doxygen will use this
 # information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+# Possible values are: Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-
+# Traditional, Croatian, Czech, Danish, Dutch, English, Esperanto, Farsi,
+# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en,
+# Korean, Korean-en, Latvian, Norwegian, Macedonian, Persian, Polish,
+# Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish,
+# Turkish, Ukrainian and Vietnamese.
+# The default value is: English.
 
 OUTPUT_LANGUAGE        = English
 
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
 
 BRIEF_MEMBER_DESC      = YES
 
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
 # brief descriptions will be completely suppressed.
+# The default value is: YES.
 
 REPEAT_BRIEF           = YES
 
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
 
 ABBREVIATE_BRIEF       =
 
 # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
+# doxygen will generate a detailed section even if there is only a brief
 # description.
+# The default value is: NO.
 
 ALWAYS_DETAILED_SEC    = NO
 
@@ -112,179 +121,204 @@ ALWAYS_DETAILED_SEC    = NO
 # inherited members of a class in the documentation of that class as if those
 # members were ordinary class members. Constructors, destructors and assignment
 # operators of the base classes will not be shown.
+# The default value is: NO.
 
 INLINE_INHERITED_MEMB  = NO
 
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
 
 FULL_PATH_NAMES        = NO
 
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip. Note that you specify absolute paths here, but also
-# relative paths, which will be relative from the directory where doxygen is
-# started.
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
 
 STRIP_FROM_PATH        =
 
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
 
 STRIP_FROM_INC_PATH    =
 
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
 
 SHORT_NAMES            = NO
 
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
 
 JAVADOC_AUTOBRIEF      = YES
 
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
 
 QT_AUTOBRIEF           = NO
 
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
 
 MULTILINE_CPP_IS_BRIEF = NO
 
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
 
 INHERIT_DOCS           = YES
 
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
 
 SEPARATE_MEMBER_PAGES  = NO
 
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
 
 TAB_SIZE               = 8
 
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
 
 ALIASES                =
 
 # This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding
-# "class=itcl::class" will allow you to use the command class in the
-# itcl::class meaning.
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
 
 TCL_SUBST              =
 
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
 
 OPTIMIZE_OUTPUT_FOR_C  = NO
 
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
 
 OPTIMIZE_OUTPUT_JAVA   = YES
 
 # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
 
 OPTIMIZE_FOR_FORTRAN   = NO
 
 # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
 
 OPTIMIZE_OUTPUT_VHDL   = NO
 
 # Doxygen selects the parser to use depending on the extension of the files it
 # parses. With this tag you can assign which parser to use for a given
 # extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension,
-# and language is one of the parsers supported by doxygen: IDL, Java,
-# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
-# C++. For instance to make doxygen treat .inc files as Fortran files (default
-# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
-# that for custom extensions you also need to set FILE_PATTERNS otherwise the
-# files are not read by doxygen.
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
 
 EXTENSION_MAPPING      =
 
-# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
-# comments according to the Markdown format, which allows for more readable
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
 # documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you
-# can mix doxygen, HTML, and XML commands with Markdown formatting.
-# Disable only in case of backward compatibilities issues.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
 
 MARKDOWN_SUPPORT       = NO
 
-# When enabled doxygen tries to link words that correspond to documented classes,
-# or namespaces to their corresponding documentation. Such a link can be
-# prevented in individual cases by by putting a % sign in front of the word or
-# globally by setting AUTOLINK_SUPPORT to NO.
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
 
 AUTOLINK_SUPPORT       = YES
 
 # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
 # diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
 
 BUILTIN_STL_SUPPORT    = NO
 
 # If you use Microsoft's C++/CLI language, you should set this option to YES to
 # enable parsing support.
+# The default value is: NO.
 
 CPP_CLI_SUPPORT        = NO
 
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
 
 SIP_SUPPORT            = NO
 
 # For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES (the
-# default) will make doxygen replace the get and set methods by a property in
-# the documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
 
 IDL_PROPERTY_SUPPORT   = YES
 
@@ -292,67 +326,61 @@ IDL_PROPERTY_SUPPORT   = YES
 # tag is set to YES, then doxygen will reuse the documentation of the first
 # member in the group (if any) for the other members of the group. By default
 # all members of a group must be documented explicitly.
+# The default value is: NO.
 
 DISTRIBUTE_GROUP_DOC   = NO
 
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
 
 SUBGROUPING            = YES
 
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
-# unions are shown inside the group in which they are included (e.g. using
-# @ingroup) instead of on a separate page (for HTML and Man pages) or
-# section (for LaTeX and RTF).
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
 
 INLINE_GROUPED_CLASSES = NO
 
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
-# unions with only public data fields will be shown inline in the documentation
-# of the scope in which they are defined (i.e. file, namespace, or group
-# documentation), provided this scope is documented. If set to NO (the default),
-# structs, classes, and unions are shown on a separate page (for HTML and Man
-# pages) or section (for LaTeX and RTF).
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
 
 INLINE_SIMPLE_STRUCTS  = NO
 
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
 # typedef struct TypeS {} TypeT, will appear in the documentation as a struct
 # with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
 # types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
 
 TYPEDEF_HIDES_STRUCT   = NO
 
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penalty.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will roughly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
-
-SYMBOL_CACHE_SIZE      = 0
-
-# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
-# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
-# their name and scope. Since this can be an expensive process and often the
-# same symbol appear multiple times in the code, doxygen keeps a cache of
-# pre-resolved symbols. If the cache is too small doxygen will become slower.
-# If the cache is too large, memory is wasted. The cache size is given by this
-# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols.
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
 
 LOOKUP_CACHE_SIZE      = 0
 
@@ -361,344 +389,387 @@ LOOKUP_CACHE_SIZE      = 0
 #---------------------------------------------------------------------------
 
 # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
 
 EXTRACT_ALL            = NO
 
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
 
 EXTRACT_PRIVATE        = NO
 
 # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
 # scope will be included in the documentation.
+# The default value is: NO.
 
 EXTRACT_PACKAGE        = YES
 
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
 
 EXTRACT_STATIC         = YES
 
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
 
 EXTRACT_LOCAL_CLASSES  = YES
 
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
 
 EXTRACT_LOCAL_METHODS  = YES
 
 # If this flag is set to YES, the members of anonymous namespaces will be
 # extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
 
 EXTRACT_ANON_NSPACES   = NO
 
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
 
 HIDE_UNDOC_MEMBERS     = YES
 
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
 
 HIDE_UNDOC_CLASSES     = YES
 
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
 
 HIDE_FRIEND_COMPOUNDS  = NO
 
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
 
 HIDE_IN_BODY_DOCS      = NO
 
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
 
 INTERNAL_DOCS          = NO
 
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
 # allowed. This is useful if you have classes or files whose names only differ
 # in case and if your file system supports case sensitive file names. Windows
 # and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
 
 CASE_SENSE_NAMES       = NO
 
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
 
 HIDE_SCOPE_NAMES       = NO
 
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
 
 SHOW_INCLUDE_FILES     = YES
 
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
 
 FORCE_LOCAL_INCLUDES   = NO
 
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
 
 INLINE_INFO            = YES
 
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
 
 SORT_MEMBER_DOCS       = NO
 
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: NO.
 
 SORT_BRIEF_DOCS        = NO
 
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
 
 SORT_MEMBERS_CTORS_1ST = YES
 
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
 
 SORT_GROUP_NAMES       = NO
 
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
 # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
 
 SORT_BY_SCOPE_NAME     = YES
 
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
-# do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even
-# if there is only one candidate or it is obvious which candidate to choose
-# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
 
 STRICT_PROTO_MATCHING  = NO
 
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
 
 GENERATE_TODOLIST      = YES
 
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
 
 GENERATE_TESTLIST      = YES
 
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
 
 GENERATE_BUGLIST       = YES
 
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
 
 GENERATE_DEPRECATEDLIST= YES
 
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
 
 ENABLED_SECTIONS       =
 
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
 
 MAX_INITIALIZER_LINES  = 29
 
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
 
 SHOW_USED_FILES        = YES
 
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
 
 SHOW_FILES             = YES
 
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.  This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
 
 SHOW_NAMESPACES        = NO
 
 # The FILE_VERSION_FILTER tag can be used to specify a program or script that
 # doxygen should invoke to get the current version for each file (typically from
 # the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
 
 FILE_VERSION_FILTER    =
 
 # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
 # by doxygen. The layout file controls the global structure of the generated
 # output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
 
 LAYOUT_FILE            =
 
-# The CITE_BIB_FILES tag can be used to specify one or more bib files
-# containing the references data. This must be a list of .bib files. The
-# .bib extension is automatically appended if omitted. Using this command
-# requires the bibtex tool to be installed. See also
-# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
-# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
-# feature you need bibtex and perl available in the search path.
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
 
 CITE_BIB_FILES         =
 
 #---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
+# Configuration options related to warning and progress messages
 #---------------------------------------------------------------------------
 
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
 
 QUIET                  = NO
 
 # The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
 
 WARNINGS               = YES
 
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
 
 WARN_IF_UNDOCUMENTED   = YES
 
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
 
 WARN_IF_DOC_ERROR      = YES
 
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
 
 WARN_NO_PARAMDOC       = YES
 
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
 
 WARN_FORMAT            = "$file:$line: $text"
 
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
 
 WARN_LOGFILE           =
 
 #---------------------------------------------------------------------------
-# configuration options related to the input files
+# Configuration options related to the input files
 #---------------------------------------------------------------------------
 
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
 
 INPUT                  = @CMAKE_SOURCE_DIR@/parmepy \
                          @CMAKE_SOURCE_DIR@/DoxyConf/mainpage.doxygen \
                          @CMAKE_SOURCE_DIR@/src/fftw
 
 # This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
 
 INPUT_ENCODING         = UTF-8
 
 # If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
 
 FILE_PATTERNS          = *.doxygen \
                          *.py \
                          *.cl \
                          *.f90
 
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
 
 RECURSIVE              = YES
 
 # The EXCLUDE tag can be used to specify files and/or directories that should be
 # excluded from the INPUT source files. This way you can easily exclude a
 # subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
 # Note that relative paths are relative to the directory from which doxygen is
 # run.
 
@@ -707,14 +778,16 @@ EXCLUDE                =
 # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
 # directories that are symbolic links (a Unix file system feature) are excluded
 # from the input.
+# The default value is: NO.
 
 EXCLUDE_SYMLINKS       = NO
 
 # If the value of the INPUT tag contains directories, you can use the
 # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
 
 EXCLUDE_PATTERNS       = */.svn/* \
                          */tests/*
@@ -724,758 +797,1099 @@ EXCLUDE_PATTERNS       = */.svn/* \
 # output. The symbol name can be a fully qualified name, a word, or if the
 # wildcard * is used, a substring. Examples: ANamespace, AClass,
 # AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
 
 EXCLUDE_SYMBOLS        =
 
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
 
 EXAMPLE_PATH           =
 
 # If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
 
 EXAMPLE_PATTERNS       = *
 
 # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
 
 EXAMPLE_RECURSIVE      = NO
 
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
 
 IMAGE_PATH             =
 
 # The INPUT_FILTER tag can be used to specify a program that doxygen should
 # invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.  If FILTER_PATTERNS is specified, this tag will be
-# ignored.
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
 
 INPUT_FILTER           =
 
 # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.  Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.  The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
 
 FILTER_PATTERNS        = *.py=/usr/local/bin/doxypy.py
 
 # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
 
 FILTER_SOURCE_FILES    = YES
 
 # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
 
 FILTER_SOURCE_PATTERNS =
 
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
 #---------------------------------------------------------------------------
-# configuration options related to source browsing
+# Configuration options related to source browsing
 #---------------------------------------------------------------------------
 
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
 
 SOURCE_BROWSER         = NO
 
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
 
 INLINE_SOURCES         = NO
 
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C, C++ and Fortran comments will always remain visible.
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
 
 STRIP_CODE_COMMENTS    = YES
 
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
 
 REFERENCED_BY_RELATION = NO
 
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
 
 REFERENCES_RELATION    = NO
 
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.  Otherwise they will link to the documentation.
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
 
 REFERENCES_LINK_SOURCE = YES
 
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
 
 USE_HTAGS              = NO
 
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
 
 VERBATIM_HEADERS       = YES
 
+# If the CLANG_ASSISTED_PARSING tag is set to YES, then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more acurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# compiled with the --with-libclang option.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS          =
+
 #---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
+# Configuration options related to the alphabetical class index
 #---------------------------------------------------------------------------
 
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
 
 ALPHABETICAL_INDEX     = YES
 
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
 
 COLS_IN_ALPHA_INDEX    = 5
 
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
 
 IGNORE_PREFIX          =
 
 #---------------------------------------------------------------------------
-# configuration options related to the HTML output
+# Configuration options related to the HTML output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
 
 GENERATE_HTML          = YES
 
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_OUTPUT            = html
 
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_FILE_EXTENSION    = .html
 
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header. Note that when using a custom header you are responsible
-# for the proper inclusion of any scripts and style sheets that doxygen
-# needs, which is dependent on the configuration options used.
-# It is advised to generate a default header using "doxygen -w html
-# header.html footer.html stylesheet.css YourConfigFile" and then modify
-# that header. Note that the header is subject to change so you typically
-# have to redo this when upgrading to a newer version of doxygen or when
-# changing the value of configuration settings such as GENERATE_TREEVIEW!
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_HEADER            =
 
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_FOOTER            =
 
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If left blank doxygen will
-# generate a default style sheet. Note that it is recommended to use
-# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
-# tag will in the future become obsolete.
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_STYLESHEET        =
 
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
-# user-defined cascading style sheet that is included after the standard
-# style sheets created by doxygen. Using this option one can overrule
-# certain style aspects. This is preferred over using HTML_STYLESHEET
-# since it does not replace the standard style sheet and is therefor more
-# robust against future updates. Doxygen will copy the style sheet file to
-# the output directory.
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefor more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_EXTRA_STYLESHEET  =
 
 # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
 # other source files which should be copied to the HTML output directory. Note
 # that these files will be copied to the base HTML output directory. Use the
-# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that
-# the files will be copied as-is; there are no commands or markers available.
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_EXTRA_FILES       =
 
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the style sheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_COLORSTYLE_HUE    = 115
 
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_COLORSTYLE_SAT    = 115
 
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_COLORSTYLE_GAMMA  = 124
 
 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_TIMESTAMP         = YES
 
 # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
 # documentation will contain sections that can be hidden and shown after the
 # page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_DYNAMIC_SECTIONS  = NO
 
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
-# entries shown in the various tree structured indices initially; the user
-# can expand and collapse entries dynamically later on. Doxygen will expand
-# the tree to such a level that at most the specified number of entries are
-# visible (unless a fully collapsed tree already exceeds this amount).
-# So setting the number of entries 1 will produce a full collapsed tree by
-# default. 0 is a special value representing an infinite number of entries
-# and will result in a full expanded tree by default.
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 HTML_INDEX_NUM_ENTRIES = 100
 
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
 # for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_DOCSET        = NO
 
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
 
 DOCSET_FEEDNAME        = "Doxygen generated docs"
 
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
 
 DOCSET_BUNDLE_ID       = org.doxygen.Project
 
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
-# identify the documentation publisher. This should be a reverse domain-name
-# style string, e.g. com.mycompany.MyDocSet.documentation.
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
 
 DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
 
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
 
 DOCSET_PUBLISHER_NAME  = Publisher
 
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_HTMLHELP      = NO
 
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
 # written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 CHM_FILE               =
 
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 HHC_LOCATION           =
 
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 GENERATE_CHI           = NO
 
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 CHM_INDEX_ENCODING     =
 
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 BINARY_TOC             = NO
 
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
 
 TOC_EXPAND             = NO
 
 # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_QHP           = NO
 
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QCH_FILE               =
 
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_NAMESPACE          = org.doxygen.Project
 
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_VIRTUAL_FOLDER     = doc
 
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_CUST_FILTER_NAME   =
 
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_CUST_FILTER_ATTRS  =
 
 # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHP_SECT_FILTER_ATTRS  =
 
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
 
 QHG_LOCATION           =
 
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-# will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_ECLIPSEHELP   = NO
 
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
 
 ECLIPSE_DOC_ID         = org.doxygen.Project
 
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
-# at top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it. Since the tabs have the same information as the
-# navigation tree you can set this option to NO if you already set
-# GENERATE_TREEVIEW to YES.
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 DISABLE_INDEX          = NO
 
 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-# Since the tree basically has the same information as the tab index you
-# could consider to set DISABLE_INDEX to NO when enabling this option.
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 GENERATE_TREEVIEW      = NO
 
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
-# (range [0,1..20]) that doxygen will group on one line in the generated HTML
-# documentation. Note that a value of 0 will completely suppress the enum
-# values from appearing in the overview section.
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 ENUM_VALUES_PER_LINE   = 4
 
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 TREEVIEW_WIDTH         = 250
 
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 EXT_LINKS_IN_WINDOW    = NO
 
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 FORMULA_FONTSIZE       = 10
 
 # Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 FORMULA_TRANSPARENT    = YES
 
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you may also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 USE_MATHJAX            = YES
 
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to
-# the MathJax Content Delivery Network so you can quickly see the result without
-# installing MathJax.  However, it is strongly recommended to install a local
-# copy of MathJax from http://www.mathjax.org before deployment.
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
 
 MATHJAX_RELPATH        = http://www.mathjax.org/mathjax
 
-# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
-# names that should be enabled during MathJax rendering.
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
 
 MATHJAX_EXTENSIONS     =
 
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
 
 SEARCHENGINE           = YES
 
 # When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavours of web server based searching depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools. See
+# the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
 
 SERVER_BASED_SEARCH    = NO
 
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
 #---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
+# Configuration options related to the LaTeX output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
 
 GENERATE_LATEX         = NO
 
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_OUTPUT           = latex
 
 # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_CMD_NAME         = latex
 
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 MAKEINDEX_CMD_NAME     = makeindex
 
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 COMPACT_LATEX          = YES
 
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 PAPER_TYPE             = a4
 
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 EXTRA_PACKAGES         =
 
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_HEADER           =
 
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
-# the generated latex document. The footer should contain everything after
-# the last chapter. If it is left blank doxygen will generate a
-# standard footer. Notice: only use this tag if you know what you are doing!
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_FOOTER           =
 
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 PDF_HYPERLINKS         = YES
 
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
 # higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 USE_PDFLATEX           = YES
 
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_BATCHMODE        = NO
 
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_HIDE_INDICES     = NO
 
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_SOURCE_CODE      = NO
 
 # The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
-# http://en.wikipedia.org/wiki/BibTeX for more info.
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
 
 LATEX_BIB_STYLE        = plain
 
 #---------------------------------------------------------------------------
-# configuration options related to the RTF output
+# Configuration options related to the RTF output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
 
 GENERATE_RTF           = NO
 
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_OUTPUT             = rtf
 
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 COMPACT_RTF            = NO
 
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_HYPERLINKS         = NO
 
-# Load style sheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_STYLESHEET_FILE    =
 
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
 
 RTF_EXTENSIONS_FILE    =
 
 #---------------------------------------------------------------------------
-# configuration options related to the man page output
+# Configuration options related to the man page output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
 
 GENERATE_MAN           = NO
 
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
 
 MAN_OUTPUT             = man
 
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
 
 MAN_EXTENSION          = .3
 
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
 
 MAN_LINKS              = NO
 
 #---------------------------------------------------------------------------
-# configuration options related to the XML output
+# Configuration options related to the XML output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
 
 GENERATE_XML           = NO
 
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
 
 XML_OUTPUT             = xml
 
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
+# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
 
 XML_SCHEMA             =
 
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
+# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
 
 XML_DTD                =
 
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
 
 XML_PROGRAMLISTING     = YES
 
 #---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
 
 GENERATE_AUTOGEN_DEF   = NO
 
 #---------------------------------------------------------------------------
-# configuration options related to the Perl module output
+# Configuration options related to the Perl module output
 #---------------------------------------------------------------------------
 
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
 
 GENERATE_PERLMOD       = NO
 
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
 
 PERLMOD_LATEX          = NO
 
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.  This is useful
-# if you want to understand what is going on.  On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
 
 PERLMOD_PRETTY         = YES
 
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
 
 PERLMOD_MAKEVAR_PREFIX =
 
@@ -1483,104 +1897,128 @@ PERLMOD_MAKEVAR_PREFIX =
 # Configuration options related to the preprocessor
 #---------------------------------------------------------------------------
 
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
 
 ENABLE_PREPROCESSING   = YES
 
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 MACRO_EXPANSION        = NO
 
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 EXPAND_ONLY_PREDEF     = NO
 
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# pointed to by INCLUDE_PATH will be searched when a #include is found.
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 SEARCH_INCLUDES        = YES
 
 # The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
 
 INCLUDE_PATH           =
 
 # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
 # patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 INCLUDE_FILE_PATTERNS  =
 
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 PREDEFINED             =
 
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that
-# overrules the definition found in the source code.
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 EXPAND_AS_DEFINED      =
 
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all refrences to function-like macros that are alone on a line, have an
+# all uppercase name, and do not end with a semicolon. Such function macros are
+# typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
 SKIP_FUNCTION_MACROS   = YES
 
 #---------------------------------------------------------------------------
-# Configuration::additions related to external references
+# Configuration options related to external references
 #---------------------------------------------------------------------------
 
-# The TAGFILES option can be used to specify one or more tagfiles. For each
-# tag file the location of the external documentation should be added. The
-# format of a tag file without this location is as follows:
-#   TAGFILES = file1 file2 ...
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
 # Adding location for the tag files is done as follows:
-#   TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths
-# or URLs. Note that each tag file must have a unique name (where the name does
-# NOT include the path). If a tag file is not located in the directory in which
-# doxygen is run, you must also specify the path to the tagfile here.
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have an unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
 
 TAGFILES               =
 
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
 
 GENERATE_TAGFILE       =
 
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
 
 ALLEXTERNALS           = NO
 
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
 
 EXTERNAL_GROUPS        = YES
 
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
 # The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
 
 PERL_PATH              = /usr/bin/perl
 
@@ -1588,222 +2026,280 @@ PERL_PATH              = /usr/bin/perl
 # Configuration options related to the dot tool
 #---------------------------------------------------------------------------
 
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
 
 CLASS_DIAGRAMS         = YES
 
 # You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
 # documentation. The MSCGEN_PATH tag allows you to specify the directory where
 # the mscgen tool resides. If left empty the tool is assumed to be found in the
 # default search path.
 
 MSCGEN_PATH            =
 
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
 
 HIDE_UNDOC_RELATIONS   = YES
 
 # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
 
 HAVE_DOT               = YES
 
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_NUM_THREADS        = 0
 
-# By default doxygen will use the Helvetica font for all dot files that
-# doxygen generates. When you want a differently looking font you can specify
-# the font name using DOT_FONTNAME. You need to make sure dot is able to find
-# the font, which can be done by putting it in a standard location or by setting
-# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
-# directory containing the font.
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_FONTNAME           = Helvetica
 
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_FONTSIZE           = 10
 
-# By default doxygen will tell dot to use the Helvetica font.
-# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
-# set the path where dot can find it.
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_FONTPATH           =
 
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# CLASS_DIAGRAMS tag to NO.
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 CLASS_GRAPH            = YES
 
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 COLLABORATION_GRAPH    = YES
 
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 GROUP_GRAPHS           = YES
 
 # If the UML_LOOK tag is set to YES doxygen will generate inheritance and
 # collaboration diagrams in a style similar to the OMG's Unified Modeling
 # Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 UML_LOOK               = YES
 
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside
-# the class node. If there are many fields or methods and many nodes the
-# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
-# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
-# exceeded by 50% before the limit is enforced.
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 UML_LIMIT_NUM_FIELDS   = 0
 
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 TEMPLATE_RELATIONS     = NO
 
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 INCLUDE_GRAPH          = YES
 
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 INCLUDED_BY_GRAPH      = YES
 
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 CALL_GRAPH             = YES
 
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 CALLER_GRAPH           = YES
 
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 GRAPHICAL_HIERARCHY    = YES
 
-# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DIRECTORY_GRAPH        = YES
 
 # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are svg, png, jpg, or gif.
-# If left blank png will be used. If you choose svg you need to set
-# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible in IE 9+ (other browsers do not have this requirement).
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_IMAGE_FORMAT       = png
 
 # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
 # enable generation of interactive SVG images that allow zooming and panning.
-# Note that this requires a modern browser other than Internet Explorer.
-# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
-# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
-# visible. Older versions of IE do not have SVG support.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 INTERACTIVE_SVG        = NO
 
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
 # found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_PATH               = @DOXYGEN_DOT_PATH@
 
 # The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOTFILE_DIRS           =
 
 # The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
 
 MSCFILE_DIRS           =
 
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_GRAPH_MAX_NODES    = 4
 
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
 # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 MAX_DOT_GRAPH_DEPTH    = 0
 
 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_TRANSPARENT        = NO
 
 # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
 # files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_MULTI_TARGETS      = NO
 
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 GENERATE_LEGEND        = YES
 
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
 
 DOT_CLEANUP            = YES
diff --git a/HySoP/hysop/constants.py b/HySoP/hysop/constants.py
index 5f1b68e31..c92cc42db 100644
--- a/HySoP/hysop/constants.py
+++ b/HySoP/hysop/constants.py
@@ -27,6 +27,13 @@ PARMES_INTEGER = np.int64
 PARMES_MPI_REAL = MPI.DOUBLE
 ## default array layout (fortran or C convention)
 ORDER = 'F'
+# to check array ordering with :
+# assert tab.flags.f_contiguous is CHECK_F_CONT
+if ORDER is 'F':
+    CHECK_F_CONT = True
+else:
+    CHECK_F_CONT = False
+
 ## Default array layout for MPI
 ORDERMPI = MPI.ORDER_F
 ## label for x direction
@@ -54,6 +61,14 @@ WITH_GUESS = 1
 ## y is different from result arg.
 NOALIAS = 2
 
+# File format types for output
+## VTK ouput for printers
+VTK = 0
+## HDF5 output for printers
+HDF5 = 1
+## ascii text output
+DATA = 2
+
 
 #define debug decorator:
 def debugdecorator(f):
diff --git a/HySoP/hysop/domain/box.py b/HySoP/hysop/domain/box.py
index 668353f92..3cb0d050c 100644
--- a/HySoP/hysop/domain/box.py
+++ b/HySoP/hysop/domain/box.py
@@ -3,7 +3,7 @@
 Box-shaped domains definition.
 
 """
-from domain import Domain
+from parmepy.domain.domain import Domain
 from parmepy.constants import np, PARMES_REAL, PARMES_INTEGER, PERIODIC, \
     debug
 
@@ -16,18 +16,15 @@ class Box(Domain):
     """
 
     @debug
-    def __init__(self, dimension=3, length=[1.0, 1.0, 1.0],
-                 origin=[0., 0., 0.]):
+    def __init__(self, dimension=3, length=None, origin=None):
         """
         Create a Periodic Box from a dimension, length and origin.
         Parameters dimensions must coincide. Raise ValueError
         in case of inconsistent parameters dimensions.
 
-        By defaults, it creates a \f$[0;1]^3\f$ Box.
-
         @param dimension : Box dimension. Default: 3
-        @param length : Box length. Default [1.0, 1.0, 1.0]
-        @param origin : Box minimum position. Default [0., 0., 0.]
+        @param length : Box length. Default [1.0, ...]
+        @param origin : Box minimum position. Default [0., ...]
         \code
         >>> import parmepy as pp
         >>> import numpy as np
@@ -37,10 +34,14 @@ class Box(Domain):
 
         \endcode
         """
-        if not (dimension == len(length) and dimension == len(origin)):
-            raise ValueError("Box parameters inconsistents dimensions")
+        ## Space dimension
+        self.dimension = dimension
         Domain.__init__(self, dimension)
         ##  Box length.
+        if length is None:
+            length = [1.0] * self.dimension
+        if origin is None:
+            origin = [0.] * self.dimension
         self.length = np.asarray(length, dtype=PARMES_REAL)
         ##  Box origin
         self.origin = np.asarray(origin, dtype=PARMES_REAL)
@@ -61,8 +62,3 @@ class Box(Domain):
         s += "   origin : " + str(self.origin) + ", maxPosition :" \
              + str(self.max) + ", lengths :" + str(self.length) + "."
         return s
-
-if __name__ == "__main__":
-    print __doc__
-    print "- Provided class : Box."
-    print Box.__doc__
diff --git a/HySoP/hysop/domain/obstacle/controlBox.py b/HySoP/hysop/domain/obstacle/controlBox.py
new file mode 100644
index 000000000..fb29cb9c0
--- /dev/null
+++ b/HySoP/hysop/domain/obstacle/controlBox.py
@@ -0,0 +1,257 @@
+"""
+@file controlBox.py
+Define a sub-domain with a box-liked shape.
+"""
+from parmepy.domain.obstacle.obstacle import Obstacle
+from parmepy.domain.obstacle.planes import SubSpace, SubPlane
+from parmepy.mpi.mesh import SubMesh
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+
+
+class ControlBox(Obstacle):
+    """
+    Build a sub-domain, box-shaped
+    ==> define set of indices inside this domain (ind member)
+    and set of indices belonging to surfaces of this domain (slices members).
+    Useful to define control volume to perform integration.
+    See for example parmepy.operator.monitor.forces
+    """
+
+    def __init__(self, domain, origin, lengths, vd=0):
+        """
+        Build the volume of control
+        @param domain : parmepy.domain of defintion
+        @param origin : coordinates of the lowest point in the sub-domain
+        @param lengths : lengths of box sides.
+        @param velocity of the center of mass of the domain (rigid body),
+        default = 0.
+        """
+        Obstacle.__init__(self, domain, vd=vd)
+
+        self._dim = self.domain.dimension
+        ## Lowest point of the box
+        self.origin = npw.realarray(origin)
+        ## Box's sides dimension
+        self.lengths = npw.realarray(lengths)
+        ## Dictionnary of local meshes, such that
+        ## mesh[topo] is the restriction of topo.mesh
+        ## to the current control box.
+        self.mesh = {}
+        self.upper = None
+        self.lower = None
+        self.upperS = None
+        self.lowerS = None
+        self.slices = {}
+        self.indReduced = {}
+        self._boxCreated = False
+        ## Check if the defined box contains points
+        ## for a given topology. self.isEmpty[topo] = False
+        ## if some grid points are inside the box on
+        ## the current processor for topo discretization.
+        self.isEmpty = {}
+        ## Dict of local coordinates for a given topology
+        self.coords = {}
+
+    def createVolumeAndSides(self, spaceStep):
+        """
+        @param[in] array of size self._dim, space step size in each direction
+        This value will be used to compute a tolerance and detect
+        points inside the box.
+        """
+        # Build Half-spaces indices list in all directions
+        normalUp = np.identity(self._dim)
+        normalDown = np.identity(self._dim) * -1.
+        pointsUp = npw.zeros((self._dim, self._dim))
+        # Control box will be used for integration, so we remove the
+        # last point in the grid.
+        boxlengths = self.lengths - spaceStep
+        tol = spaceStep * 0.5
+
+        for i in xrange(self._dim):
+            pointsUp[:, i] = self.origin
+        pointsUp.flat[::self._dim + 1] += self.lengths
+        # -- Control volume : union of two halfspaces --
+        if self.upper is None:
+            self.upper = [SubSpace(self.domain, normal=normalUp[:, i],
+                                   point=pointsUp[:, i],
+                                   lengths=boxlengths,
+                                   epsilon=tol[i])
+                          for i in xrange(self._dim)]
+        if self.lower is None:
+            self.lower = [SubSpace(self.domain, normal=normalDown[:, i],
+                                   point=self.origin, lengths=boxlengths,
+                                   epsilon=tol[i])
+                          for i in xrange(self._dim)]
+
+        # Create objects to describe the sides of the box
+        if self.upperS is None:
+            self.upperS = [SubPlane(self.domain, normal=normalUp[:, i],
+                                    point=pointsUp[:, i],
+                                    lengths=boxlengths,
+                                    epsilon=tol[i])
+                           for i in xrange(self._dim)]
+
+        if self.lowerS is None:
+            self.lowerS = [SubPlane(self.domain, normal=normalDown[:, i],
+                                    point=self.origin, lengths=boxlengths,
+                                    epsilon=tol[i])
+                           for i in xrange(self._dim)]
+        self._boxCreated = True
+
+    def discretize(self, topo):
+        """
+        Discretize the box volume and its surfaces.
+        @param topo : the topology that described the discretization.
+        """
+        # Check if already done. If so, this function has no effect.
+        if topo not in self.ind.keys():
+            spaceStep = topo.mesh.space_step
+            # -- Control volume : union of two halfspaces --
+            if not self._boxCreated:
+                self.createVolumeAndSides(spaceStep)
+
+            # Discretize all volume and surfaces of
+            # the box for topo
+            for i in xrange(self._dim):
+                self.lower[i].discretize(topo)
+                self.upper[i].discretize(topo)
+                self.lowerS[i].discretize(topo)
+                self.upperS[i].discretize(topo)
+
+            # 1 -- Compute list of indices inside the box,
+            #  for topo --> ind[topo]
+            self.ind[topo] = []
+
+            self.ind[topo].append(np.logical_and(self.upper[0].ind[topo][0],
+                                                 self.lower[0].ind[topo][0]))
+            for i in xrange(1, self._dim):
+                cond = np.logical_and(self.upper[i].ind[topo][0],
+                                      self.lower[i].ind[topo][0])
+                self.ind[topo][0] = np.logical_and(self.ind[topo][0], cond)
+
+            ind = np.where(self.ind[topo][0])
+
+            # 2 -- Convert ind[topo] (array of bool) to slices
+            # which may be more convenient for computations
+            # --> slices[topo]
+            # + mesh[topo], a parmepy.mpi.SubMesh, useful
+            # to get local coordinates and so on
+            if ind[0].size == 0:
+                self.slices[topo] = [slice(0, 0) for i in xrange(self._dim)]
+                self.mesh[topo] = None
+                self.isEmpty[topo] = True
+            else:
+                self.isEmpty[topo] = False
+                ic = topo.mesh.iCompute
+                lstart = [ind[i].min() if ind[i].size > 0 else None
+                          for i in xrange(self._dim)]
+                lstart = npw.integerarray([max(lstart[i], ic[i].start)
+                                          for i in xrange(self._dim)])
+                end = [ind[i].max() for i in xrange(self._dim)]
+                end = npw.integerarray([min(end[i], ic[i].stop - 1)
+                                       for i in xrange(self._dim)])
+                # slice(start,end) --> end not included, so +1
+                end += 1
+                resol = end - lstart + 2 * topo.ghosts
+                gstart = lstart + topo.mesh.global_start - topo.ghosts
+                self.mesh[topo] = SubMesh(topo, gstart, resol)
+                self.slices[topo] = [slice(lstart[i], end[i])
+                                     for i in xrange(self._dim)]
+                coords = []
+                for i in xrange(self._dim):
+                    cc = topo.mesh.coords[i].flat[self.slices[topo][i]]
+                    coords.append(cc)
+                coords = tuple(coords)
+                self.coords[topo] = np.ix_(*coords)
+
+                # --> self.ind[topo][0] components are True
+                # for points inside the volume
+                # --> self.slices[topo] represent the same thing
+                # but using slices of numpy arrays.
+                # Usage (vd being a numpy array discretized
+                # on the whole domain, cb a control box):
+                # # Set values to all points inside the control box
+                # vd[cb.ind[topo][0]] = ...
+                # # Get a sub-array of vd representing the control box
+                # # and use it
+                # result[...] = vd[cb.slices] + ...
+                # The important difference between slices and ind is:
+                # 1 - vd[ind] returns a 1D array whatever vd shape is.
+                # 2 - vd[slices] return an array of the same dim as vd,
+                # with shape given by slices.
+
+            return self.ind[topo]
+
+    def sub(self, obstacle, topo):
+        """
+        Remove all points corresponding to the input obstacle from
+        the current control box
+        """
+        if topo not in self.ind.keys():
+            obstacle.discretize(topo)
+            self.discretize(topo)
+            self.indReduced[topo] = []
+            # Warning : obstacle may have several layers
+            cond = obstacle.ind[topo][0]
+            for i in xrange(1, len(obstacle.ind[topo])):
+                cond = npw.asarray(np.logical_or(cond, obstacle.ind[topo][i]))
+            cond = np.logical_not(cond)
+            self.indReduced[topo].append(np.logical_and(self.ind[topo][0],
+                                                        cond))
+        return self.indReduced[topo][-1]
+
+    def integrate_on_proc(self, field, topo, useSlice=True, component=0):
+        """
+        integrate field on the box
+        """
+        if useSlice:
+            cond = self.slices[topo]
+        else:
+            iC = topo.mesh.iCompute
+            cond = self.ind[topo][0][iC]
+        dvol = npw.prod(topo.mesh.space_step)
+        result = npw.sum(field.discretize(topo)[component][cond])
+        result *= dvol
+        return result
+
+    def integrate(self, field, topo, useSlice=True,
+                  component=0, root=0, mpiall=True):
+        res = self.integrate_on_proc(field, topo, useSlice, component)
+        if mpiall:
+            return topo.comm.allreduce(res)
+        else:
+            return topo.comm.reduce(res, root=root)
+
+    def integrateOnSurface(self, field, topo, normalDir=0, up=True,
+                           useSlice=True, component=0, root=0, mpiall=True):
+        """
+        integrate field on top (if up is True) or down surface
+        normal to a direction
+        """
+        res = self.integrateOnSurf_proc(field, topo, normalDir, up, useSlice,
+                                        component)
+        if mpiall:
+            return topo.comm.allreduce(res)
+        else:
+            return topo.comm.reduce(res, root=root)
+
+    def integrateOnSurf_proc(self, field, topo, normalDir=0,
+                             up=True, useSlice=True, component=0):
+        """
+        integrate field on top and down surfaces normal to a direction
+        """
+        if up:
+            surf = self.upperS[normalDir]
+        else:
+            surf = self.lowerS[normalDir]
+        if useSlice:
+            cond = surf.slices[topo]
+        else:
+            iC = topo.mesh.iCompute
+            cond = surf.ind[topo][0][iC]
+        dirs = np.logical_not(np.arange(self._dim) == normalDir)
+        dS = npw.prod(topo.mesh.space_step[dirs])
+        result = npw.sum(field.discretize(topo)[component][cond])
+        result *= dS
+        return result
diff --git a/HySoP/hysop/domain/obstacle/cylinder.py b/HySoP/hysop/domain/obstacle/cylinder.py
deleted file mode 100644
index a46339856..000000000
--- a/HySoP/hysop/domain/obstacle/cylinder.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-Cylinder obstacle description
-"""
-from parmepy.domain.obstacle.obstacle import Obstacle
-import math
-import numpy as np
-
-
-class Cylinder(Obstacle2D):
-    """
-    Discrete obstacles representation.
-    """
-
-    def __init__(self, obst, radius=0.2):
-        """
-        Creates the cylinder obsctacle and y-boundaries and 
-        returns 3 arrays corresponding to the characteristic 
-        functions of three different porous media.
-
-        @param obstacle2D : Two dimensional obstacle description.
-        @param radius : Cylinder radius
-        """
-        ## 2D parent obstacle
-        self.obst = obst
-        ## Radius of the cylinder
-        self.radius = radius
-        ## Characteristic function (array) for y-boundaries
-        self.chiBoundary = None
-        ## Characteristic function (array) for the solid
-        self.chiSolid = None
-        ## Characteristic function (array) for the porous area
-        self.chiPorous = None
-        print 'obstacle =', self.obst.obstacleName
-
-    def setUp(self, topology):
-        """
-        Compute the characteristic functions associated 
-        to y-boundaries and cylinder
-        """
-        ## Temporary arrays
-        chiBoundary_i = []
-        chiBoundary_j = []
-        chiSolid_i = []
-        chiSolid_j = []
-        chiPorous_i = []
-        chiPorous_j = []
-
-        step = topology.mesh.space_step
-        ghosts = topology.ghosts
-        local_start = topology.mesh.local_start
-        local_end = topology.mesh.local_end
-        coord_start = topology.mesh.origin + (ghosts * step)
-        coord_end = topology.mesh.end - (ghosts * step)
-        layerMin = coord_start[1] + self.obst.zlayer
-        layerMax = coord_end[1] - self.obst.zlayer
-        if not (self.obst.porousLayerThickn <= self.radius):
-            raise ValueError("Error, porous layer thickness" +
-                             "is higher than cylinder radius.")
-        radiusMinuslayer = self.radius- self.obst.porousLayerThickn
-
-        print 'step, ghosts, local_start, local_end, zlayer', \
-              step, ghosts, local_start, local_end, self.obst.zlayer
-        print 'start, end, layerMin, layerMax, radiusMinuslayer', \
-              coord_start, coord_end, layerMin, layerMax, radiusMinuslayer
-
-        for j in xrange (local_start[1], local_end[1] + 1):
-            cy = coord_start[1] + j * step[1]
-            for i in xrange (local_start[0], local_end[0] + 1):
-                if (cy >= layerMax or cy <= layerMin):
-                # we are in the y-layer boundary:
-                    chiBoundary_i.append(i)
-                    chiBoundary_j.append(j)
-                else :
-                    cx = coord_start[0] + i * step[0]
-                    dist = np.sqrt((cx - self.obst.center[0]) ** 2 +
-                                   (cy - self.obst.center[1]) ** 2)
-                    if (radiusMinuslayer < dist 
-                        and dist <= self.radius + 1E-12 
-                        and self.obst.porousLayerThickn != 0.):
-                        # we are in the porous region of the cylinder:
-                        chiPorous_i.append(i)
-                        chiPorous_j.append(j)
-                    if (dist <= radiusMinuslayer + 1E-12):
-                        # we are in the solid region of the cylinder:
-                        chiSolid_i.append(i)
-                        chiSolid_j.append(j)
-
-        ## Characteristic function of penalized boundaries
-        chiBoundary_i = np.asarray(chiBoundary_i, dtype=PARMES_INTEGER)
-        chiBoundary_j = np.asarray(chiBoundary_j, dtype=PARMES_INTEGER)
-        self.chiBoundary = tuple([chiBoundary_i, chiBoundary_j])
-
-        ## Characteristic function of solid areas
-        chiSolid_i = np.asarray(chiSolid_i, dtype=PARMES_INTEGER)
-        chiSolid_j = np.asarray(chiSolid_j, dtype=PARMES_INTEGER)
-        self.chiSolid = tuple([chiSolid_i, chiSolid_j])
-
-        ## Characteristic function of porous areas
-        chiPorous_i = np.asarray(chiPorous_i, dtype=PARMES_INTEGER)
-        chiPorous_j = np.asarray(chiPorous_j, dtype=PARMES_INTEGER)
-        self.chiPorous = tuple([chiPorous_i, chiPorous_j])
-
-    def __str__(self):
-        """ToString method"""
-        return "Cylinder"
-
-if __name__ == "__main__" :
-    print "This module defines the following classe:"
-    print "Cylinder: ", Cylinder.__doc__
diff --git a/HySoP/hysop/domain/obstacle/cylinder2d.py b/HySoP/hysop/domain/obstacle/cylinder2d.py
deleted file mode 100644
index 49fc29415..000000000
--- a/HySoP/hysop/domain/obstacle/cylinder2d.py
+++ /dev/null
@@ -1,129 +0,0 @@
-"""
-@file cylinder2d.py
-Rigid disk.
-"""
-from parmepy.domain.obstacle.sphere import Sphere, HemiSphere
-import math
-import numpy as np
-
-
-class Cylinder2D(Sphere):
-    """
-    Disk in a 2D domain.
-    """
-
-    def __init__(self, domain, position, radius=1.0, vd=0.0, porousLayers=[]):
-        """
-        Description of a disk in a domain.
-        @param domain : the physical domain that contains the sphere.
-        @param position : position of the center
-        @param radius : sphere radius, default = 1
-        @param porousLayers : a list of thicknesses
-        for successive porous layers
-        radius is the inside sphere radius and thicknesses are given from
-        inside layer to outside one.
-        @param vd : velocity of the disk (considered as a rigid body),
-        default = 0.
-        """
-        Sphere.__init__(self, domain, position, radius, vd, porousLayers)
-        assert self.domain.dimension == 2
-
-        def dist(x, y, R):
-            """
-            """
-            return math.sqrt((x - self.position[0]) ** 2
-                             + (y - self.position[1]) ** 2) - R
-        self.chi = [np.vectorize(dist)]
-        ## List of thicknesses for porous layers
-
-    ## def discretize(self, topo):
-    ##     # first check if we have already compute indices for
-    ##     # this topology
-    ##     if topo not in self.ind.keys():
-    ##         currentRadius = self.radius
-    ##         self.ind[topo] = []
-    ##         # for each indicator function
-    ##         for thickness in self.layers:
-    ##             # apply indicator function on topo local mesh
-    ##             args = (currentRadius,)
-    ##             condA = self.chi[0](*(topo.mesh.coords + args)) <= 0
-    ##             args = (currentRadius - thickness,)
-    ##             condB = self.chi[0](*(topo.mesh.coords + args)) > 0
-    ##             self.ind[topo].append(np.logical_and(condA, condB))
-    ##             # update current radius
-    ##             currentRadius = currentRadius - thickness
-    ##         # and finally the 'internal' sphere
-    ##         args = (currentRadius,)
-    ##         self.ind[topo].append(self.chi[0](*(topo.mesh.coords + args)) <= 0)
-    ##     return self.ind[topo]
-
-    def __str__(self):
-        s = '2D Cylinder of radius ' + str(self.radius)
-        s += ' and center position ' + str(self.position)
-        return s
-
-
-class SemiCylinder2D(HemiSphere):
-    """
-    Half disk in a 2D domain.
-    """
-    def __init__(self, domain, position, radius=1.0, vd=0.0, porousLayers=[]):
-        """
-        Constructor for the semi-disk.
-        @param domain : the physical domain that contains the sphere.
-        @param position : position of the center
-        @param radius : sphere radius, default = 1
-        (if box ...)
-        @param vd : velocity of the disk (considered as a rigid body),
-        default = 0.
-        """
-        HemiSphere.__init__(self, domain, position, radius, vd, porousLayers)
-        assert self.domain.dimension == 2
-
-        def dist(x, y, R):
-            """
-            """
-            return math.sqrt((x - self.position[0]) ** 2
-                             + (y - self.position[1]) ** 2) - R
-        self.chi = [np.vectorize(dist)]
-
-        def LeftBox(x, y):
-            return x - self.position[0]
-
-        self.LeftBox = np.vectorize(LeftBox)
-
-    ## def discretize(self, topo):
-    ##     # first check if we have already compute indices for
-    ##     # this topology
-    ##     if topo not in self.ind.keys():
-    ##         currentRadius = self.radius
-    ##         self.ind[topo] = []
-    ##         # First cond : we must be in the left half plane
-    ##         cond0 = self.HalfPlane(*(topo.mesh.coords)) <= 0
-    ##         # for each indicator function
-    ##         for thickness in self.layers:
-    ##             # apply indicator function on topo local mesh
-    ##             args = (currentRadius,)
-    ##             condA = self.chi[0](*(topo.mesh.coords + args)) <= 0
-    ##             args = (currentRadius - thickness,)
-    ##             condB = self.chi[0](*(topo.mesh.coords + args)) > 0
-    ##             np.logical_and(condA, condB, condA)
-    ##             np.logical_and(condA, cond0, condA)
-    ##             self.ind[topo].append(condA)
-    ##             # update current radius
-    ##             currentRadius = currentRadius - thickness
-    ##         # and finally the 'internal' sphere
-    ##         args = (currentRadius,)
-    ##         condA = self.chi[0](*(topo.mesh.coords + args)) <= 0
-    ##         self.ind[topo].append(np.logical_and(condA, cond0))
-    ##     return self.ind[topo]
-
-    def __str__(self):
-        s = '2D semi-cylinder of radius ' + str(self.radius)
-        s += ' and center position ' + str(self.position)
-        return s
-
-if __name__ == "__main__":
-    print "This module defines the following classe:"
-    print "Disk: ", Cylinder2D.__doc__
-    print "Half-disk: ", SemiCylinder2D.__doc__
diff --git a/HySoP/hysop/domain/obstacle/disk.py b/HySoP/hysop/domain/obstacle/disk.py
new file mode 100644
index 000000000..5f27b13cd
--- /dev/null
+++ b/HySoP/hysop/domain/obstacle/disk.py
@@ -0,0 +1,74 @@
+"""
+@file disk.py
+Rigid disk (2D)
+"""
+from parmepy.domain.obstacle.sphere import Sphere, HemiSphere
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+
+
+class Disk(Sphere):
+    """
+    Disk in a 2D domain.
+    """
+
+    def __init__(self, domain, position, radius=1.0, vd=0.0, porousLayers=[]):
+        """
+        Description of a disk in a domain.
+        @param domain : the physical domain that contains the sphere.
+        @param position : position of the center
+        @param radius : sphere radius, default = 1
+        @param porousLayers : a list of thicknesses
+        for successive porous layers
+        radius is the inside sphere radius and thicknesses are given from
+        inside layer to outside one.
+        @param vd : velocity of the disk (considered as a rigid body),
+        default = 0.
+        """
+        Sphere.__init__(self, domain, position, radius, vd, porousLayers)
+        assert self.domain.dimension == 2
+
+        def dist(x, y, R):
+            return npw.asarray(np.sqrt((x - self.position[0]) ** 2
+                                       + (y - self.position[1]) ** 2) - R)
+        self.chi = [dist]
+
+    def __str__(self):
+        s = 'Disk of radius ' + str(self.radius)
+        s += ' and center position ' + str(self.position)
+        return s
+
+
+class HalfDisk(HemiSphere):
+    """
+    Half disk in a 2D domain.
+    """
+    def __init__(self, domain, position, radius=1.0, vd=0.0, porousLayers=[]):
+        """
+        Constructor for the semi-disk.
+        @param domain : the physical domain that contains the sphere.
+        @param position : position of the center
+        @param radius : sphere radius, default = 1
+        (if box ...)
+        @param vd : velocity of the disk (considered as a rigid body),
+        default = 0.
+        """
+        HemiSphere.__init__(self, domain, position, radius, vd, porousLayers)
+        assert self.domain.dimension == 2
+
+        def dist(x, y, R):
+            """
+            """
+            return npw.asarray(np.sqrt((x - self.position[0]) ** 2
+                                       + (y - self.position[1]) ** 2) - R)
+        self.chi = [dist]
+
+        def LeftBox(x, y):
+            return x - self.position[0]
+
+        self.LeftBox = LeftBox
+
+    def __str__(self):
+        s = 'Half-disk of radius ' + str(self.radius)
+        s += ' and center position ' + str(self.position)
+        return s
diff --git a/HySoP/hysop/domain/obstacle/hemisphere.py b/HySoP/hysop/domain/obstacle/hemisphere.py
deleted file mode 100644
index 7bed176c2..000000000
--- a/HySoP/hysop/domain/obstacle/hemisphere.py
+++ /dev/null
@@ -1,124 +0,0 @@
-"""
-@file sphere.py
-Spherical sub-domain
-Hemisphere obstacle description
-"""
-from parmepy.constants import np
-from parmepy.domain.obstacle.sphere import Sphere
-
-
-class Hemisphere(Sphere):
-    """
-    Discrete obstacles representation.
-    """
-
-    def __init__(self, obst, radius=0.2):
-        """
-        Creates the hemisphere obsctacle and z-boundaries and 
-        returns 3 arrays corresponding to the characteristic 
-        functions of three different porous media.
-
-        @param obst : Three dimensional parent obstacle.
-        @param radius : Hemisphere radius
-        """
-        ## 3D parent obstacle
-        self.obst = obst
-        ## Radius of the hemisphere
-        self.radius = radius
-        ## Characteristic function (array) for z-boundaries
-        self.chiBoundary = None
-        ## Characteristic function (array) for the solid
-        self.chiSolid = None
-        ## Characteristic function (array) for the porous area
-        self.chiPorous = None
-        print 'obstacle =', self.obst.obstacleName
-
-    def setUp(self, topology):
-        """
-        Compute the characteristic functions associated 
-        to z-boundaries and hemisphere
-        """
-        ## Temporary arrays
-        chiBoundary_i = []
-        chiBoundary_j = []
-        chiBoundary_k = []
-        chiSolid_i = []
-        chiSolid_j = []
-        chiSolid_k = []
-        chiPorous_i = []
-        chiPorous_j = []
-        chiPorous_k = []
-
-        step = topology.mesh.space_step
-        ghosts = topology.ghosts
-        local_start = topology.mesh.local_start
-        local_end = topology.mesh.local_end
-        coord_start = topology.mesh.origin + (ghosts * step)
-        coord_end = topology.mesh.end - (ghosts * step)
-        layerMin = coord_start[2] + self.obst.zlayer
-        layerMax = coord_end[2] - self.obst.zlayer
-        if not (self.obst.porousLayerThickn <= self.radius):
-            raise ValueError("Error, porous layer thickness" +
-                             "is higher than hemisphere radius.")
-        radiusMinuslayer = self.radius- self.obst.porousLayerThickn
-
-        print 'step, ghosts, local_start, local_end, zlayer', \
-              step, ghosts, local_start, local_end, self.obst.zlayer
-        print 'start, end, layerMin, layerMax, radiusMinuslayer', \
-              coord_start, coord_end, layerMin, layerMax, radiusMinuslayer
-
-        for k in xrange (local_start[2], local_end[2] + 1):
-            cz = coord_start[2] + k * step[2]
-            for j in xrange (local_start[1], local_end[1] + 1):
-                cy = coord_start[1] + j * step[1]
-                for i in xrange (local_start[0], local_end[0] + 1):
-                    if (cz >= layerMax or cz <= layerMin):
-                    # we are in the z-layer boundary:
-                        chiBoundary_i.append(i)
-                        chiBoundary_j.append(j)
-                        chiBoundary_k.append(k)
-                    else :
-                        cx = coord_start[0] + i * step[0]
-                        dist = np.sqrt((cx - self.obst.center[0]) ** 2 +
-                               (cy - self.obst.center[1]) ** 2 +
-                               (cz - self.obst.center[2]) ** 2)
-                        if (radiusMinuslayer < dist 
-                            and dist <= self.radius + 1E-12
-                            and cx <= self.obst.center[0]
-                            and self.obst.porousLayerThickn != 0.):
-                            # we are in the porous region of the hemisphere:
-                            chiPorous_i.append(i)
-                            chiPorous_j.append(j)
-                            chiPorous_k.append(k)
-                        if (dist <= radiusMinuslayer + 1E-12
-                            and cx <= self.obst.center[0]):
-                            # we are in the solid region of the hemisphere:
-                            chiSolid_i.append(i)
-                            chiSolid_j.append(j)
-                            chiSolid_k.append(k)
-
-        ## Characteristic function of penalized boundaries
-        chiBoundary_i = np.asarray(chiBoundary_i)
-        chiBoundary_j = np.asarray(chiBoundary_j)
-        chiBoundary_k = np.asarray(chiBoundary_k)
-        self.chiBoundary = tuple([chiBoundary_i, chiBoundary_j, chiBoundary_k])
-
-        ## Characteristic function of solid areas
-        chiSolid_i = np.asarray(chiSolid_i)
-        chiSolid_j = np.asarray(chiSolid_j)
-        chiSolid_k = np.asarray(chiSolid_k)
-        self.chiSolid = tuple([chiSolid_i, chiSolid_j, chiSolid_k])
-
-        ## Characteristic function of porous areas
-        chiPorous_i = np.asarray(chiPorous_i)
-        chiPorous_j = np.asarray(chiPorous_j)
-        chiPorous_k = np.asarray(chiPorous_k)
-        self.chiPorous = tuple([chiPorous_i, chiPorous_j, chiPorous_k])
-
-    def __str__(self):
-        """ToString method"""
-        return "Hemisphere"
-
-if __name__ == "__main__" :
-    print "This module defines the following classe:"
-    print "Hemisphere: ", Hemisphere.__doc__
diff --git a/HySoP/hysop/domain/obstacle/obstacle.py b/HySoP/hysop/domain/obstacle/obstacle.py
index e764524d5..d5094a759 100644
--- a/HySoP/hysop/domain/obstacle/obstacle.py
+++ b/HySoP/hysop/domain/obstacle/obstacle.py
@@ -1,6 +1,7 @@
 """@file obstacle.py
 
-General interface for physical obstacle description.
+General interface to define a new geometry
+inside a domain (sphere, control box ...)
 """
 import numpy as np
 
@@ -10,7 +11,6 @@ class Obstacle(object):
     An obstacle is the geometrical description of
     a physical sub-domain.
     """
-
     def __init__(self, domain, formula=None, vd=0.0):
         """ Constructor
         @param domain : the domain that contains this obstacle.
@@ -37,8 +37,11 @@ class Obstacle(object):
                 self.chi = [np.vectorize(formula)]
         ## A dictionnary of lists of indices ...
         ## ind[topo][i] represents the set of points of the domain
-        ## discretized with topo that are in area defined with chi[i].
+        ## discretized with topo that are in the area defined with chi[i].
         self.ind = {}
+        ## Velocity of the center of mass of the obstacle
+        ## (considered as a rigid body)
+        self.vd = vd
 
     def discretize(self, topo):
         """
@@ -79,14 +82,3 @@ class Obstacle(object):
                 self.ind[topo].append(self.chi[i](*topo.mesh.coords) <= 0)
 
         return self.ind[topo]
-
-    def __str__(self):
-        s = str(self.domain.dimension) + 'D obstacle defined with '
-        s += 'the function : ' + str(self.chi_function.func_name)
-        return s
-
-if __name__ == "__main__":
-    print __doc__
-    print "- Provided class : Obstacle (abstract)."
-    print Obstacle.__doc__
-
diff --git a/HySoP/hysop/domain/obstacle/planes.py b/HySoP/hysop/domain/obstacle/planes.py
new file mode 100644
index 000000000..6791bb23f
--- /dev/null
+++ b/HySoP/hysop/domain/obstacle/planes.py
@@ -0,0 +1,320 @@
+"""
+@file planes.py
+Plate-like sub-domains at boundaries, normal
+to a given direction.
+"""
+from parmepy.domain.obstacle.obstacle import Obstacle
+import numpy as np
+import parmepy.tools.numpywrappers as npw
+
+
+class HalfSpace(Obstacle):
+    """
+    Divide domain into two sub-spaces, on each side of a plane
+    defined by its normal and a point.
+    Indices of this.ind describe the half-space below the plane,
+    'normal' being the outward normal of the plane.
+    """
+
+    def __init__(self, domain, normal, point, epsilon=1e-2, vd=0.0):
+        """
+        Half space define by the points of the domain on one side
+        of a plane.
+        @param domain : the physical domain that contains the plane
+        @param normal : outward normal
+        @param point : coordinates of a point of the plane.
+        @param epsilon : tolerance
+        @param vd : velocity of obstacle (considered as a rigid body),
+        default = 0.
+        """
+        Obstacle.__init__(self, domain, vd=vd)
+        assert epsilon > 0.0, 'Tolerance value must be positive'
+        ## Tolerance used to considered that points at the boundary are
+        ## in the subspace. Good choice may be grid space_step / 2.
+        self.epsilon = epsilon
+        ## Direction of the normal to the plate (0:x, 1:y, 2:z))
+        ## normal is the 'outer' normal of the 'in' subspace.
+        self.normal = npw.integerarray(normal)
+        self.point = point
+
+        def Outside(*coords):
+            return sum([(coords[i] - self.point[i]) * self.normal[i]
+                        for i in xrange(self.domain.dimension)])
+
+        ## Test function for half-space.
+        ## Positive value if outside subdomain else negative
+        self.chi = [Outside]
+        self.slices = {}
+
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+
+        if topo not in self.ind.keys():
+            self.ind[topo] = []
+            # apply indicator function on topo local mesh
+            cond = npw.asarray(self.chi[0](*topo.mesh.coords) <= self.epsilon)
+            self.ind[topo].append(cond)
+
+        return self.ind[topo]
+
+    def __str__(self):
+        s = 'Plane normal to vector' + str(self.normal)
+        s += ' going through point ' + str(self.point)
+        return s
+
+
+class Plane(HalfSpace):
+    """
+    A plane in the domain, defined by its normal and a point.
+    Indices of plane.ind describe the points belonging to the plane.
+    """
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+
+        if topo not in self.ind.keys():
+            self.ind[topo] = []
+            # apply indicator function on topo local mesh
+            cond = npw.abs(self.chi[0](*topo.mesh.coords)) < self.epsilon
+            self.ind[topo].append(cond)
+
+            # assert that the plane is a real surface, i.e.
+            # only one value for coords[normalDir].
+            # The expr is a bit tricky but it works ...
+            ndir = np.where(self.normal != 0)[0][0]
+            assert assertSubPlane(ndir, self.ind[topo][0], *topo.mesh.coords),\
+                'Your plane is not a surface but a volume.\
+                Please reduce epsilon value.'
+
+        return self.ind[topo]
+
+
+class SubSpace(HalfSpace):
+    """
+    Define a rectangular space in a plane normal to one
+    coord. axis and the subspace below this suface.
+    'Below' = direction opposite to the outward normal of the plane
+    (input param)
+    """
+    def __init__(self, domain, normal, point, lengths, epsilon=1e-2, vd=0.0):
+        """
+        @param domain : the physical domain that contains the space
+        @param normal : outward normal
+        @param point : coordinates of a point of the plane.
+        @param lengths : lengths of the subplane
+        @param epsilon : tolerance
+        @param vd : velocity of the obstacle (considered as a rigid body),
+        default = 0.
+        """
+        HalfSpace.__init__(self, domain, normal, point, epsilon, vd)
+
+        def dist(cdir, val, *coords):
+            return coords[cdir] - val
+
+        self.dist = dist
+        self.origin = npw.realarray(point)
+        self.max = self.origin + npw.realarray(lengths)
+        ndir = np.where(self.normal != 0)[0][0]
+        if normal[ndir] > 0:
+            self.max[ndir] = self.origin[ndir]
+        elif normal[ndir] < 0:
+            self.max[ndir] = self.domain.max[ndir]
+        # Only implemented for planes orthogonal to coord. axes
+        assert len(self.normal[self.normal == 0]) == self.domain.dimension - 1
+        self.coords = {}
+        ## Check if some grid points are present inside the current object
+        ## for the current mpi proc. If not, isEmpty = True.
+        self.isEmpty = {}
+
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+        condMax = [0] * self.domain.dimension
+        condMin = [0] * self.domain.dimension
+        if topo not in self.ind.keys():
+            self.ind[topo] = []
+            # apply indicator function on topo local mesh
+            coords = topo.mesh.coords
+            cond = npw.asarray(self.chi[0](*coords) < self.epsilon)
+            indices = np.where(self.normal == 0)[0]
+
+            for i in indices:
+                condMax[i] = self.dist(i, self.max[i], *coords) < self.epsilon
+                condMin[i] = self.dist(i, self.origin[i], *coords) > - self.epsilon
+                condMin[i] = np.logical_and(condMax[i], condMin[i])
+                cond = npw.asarray(np.logical_and(cond, condMin[i]))
+
+            self.ind[topo].append(cond)
+
+        return self.ind[topo]
+
+
+class SubPlane(SubSpace):
+    """
+    Define a rectangular surf in a plane normal to one
+    coord. axis.
+    """
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+        dim = self.domain.dimension
+        condMax = [0] * dim
+        condMin = [0] * dim
+        if topo not in self.ind.keys():
+            self.ind[topo] = []
+            # apply indicator function on topo local mesh
+            coords = topo.mesh.coords
+            cond = npw.abs(self.chi[0](*coords)) < self.epsilon
+            indices = np.where(self.normal == 0)[0]
+            for i in indices:
+                condMax[i] = self.dist(i, self.max[i], *coords) < self.epsilon
+                condMin[i] = self.dist(i, self.origin[i], *coords) > -self.epsilon
+                condMin[i] = np.logical_and(condMax[i], condMin[i])
+                cond = npw.asarray(np.logical_and(cond, condMin[i]))
+
+            self.ind[topo].append(cond)
+            ilist = np.where(cond)
+            if ilist[0].size == 0:
+                self.slices[topo] = [slice(0, 0) for i in xrange(dim)]
+                self.isEmpty[topo] = True
+            else:
+                self.isEmpty[topo] = False
+                start = [ilist[i].min() for i in xrange(dim)]
+                # Ghost points must not be included into surf. points
+                ic = topo.mesh.iCompute
+                start = [max(start[i], ic[i].start) for i in xrange(dim)]
+                end = [ilist[i].max() for i in xrange(dim)]
+                end = npw.integerarray([min(end[i], ic[i].stop - 1)
+                                        for i in xrange(dim)])
+                end += 1
+                ndir = np.where(self.normal != 0)[0][0]
+                end[ndir] = start[ndir] + 1
+                self.slices[topo] = [slice(start[i], end[i])
+                                     for i in xrange(dim)]
+                assert assertSubPlane(ndir, self.ind[topo][0],
+                                      *topo.mesh.coords),\
+                    'Your plane is not a surface but a volume.\
+                Please reduce epsilon value.'
+            subcoords = []
+            # !! Warning : slices will be used for integration,
+            # so the last point in each dir is not included.
+            # Same thing for coords.
+            for i in xrange(dim):
+                subcoords.append(coords[i].flat[self.slices[topo][i]])
+            subcoords = tuple(subcoords)
+            self.coords[topo] = np.ix_(*subcoords)
+        return self.ind[topo]
+
+
+class PlaneBoundaries(Obstacle):
+    """
+    Defines top and down (meaning for min and max value in
+    a given direction) planes at boundaries.
+    All points in the spaces above the top plane and below the down plane
+    will be included in the PlaneBoundaries list of indices.
+    Thickness of the top/down areas is given as an input param.
+    Example for z dir:
+    \f$ \{x,y,z\} \ for \ z_{max} - \epsilon \leq z \leq z_{max} + \epsilon
+    \ or \ z_{min} - \epsilon \leq z \leq z_{min}\f$
+    """
+
+    def __init__(self, domain, normal_dir, thickness=0.1):
+        """
+        Description of a sphere in a domain.
+        @param domain : the physical domain that contains the sphere.
+        @param thickness : thickness of boundary areas
+        @param vd : velocity of obstacle (considered as a rigid body),
+        default = 0.
+        """
+        Obstacle.__init__(self, domain, vd=0.0)
+        assert thickness > 0.0, 'Plate thickness must be positive'
+        ## Thickness/2
+        self.thickness = thickness
+        ## Direction of the normal to the plate (0:x, 1:y, 2:z))
+        normalUp = np.zeros((self.domain.dimension))
+        normalUp[normal_dir] = -1
+        pointUp = npw.zeros((self.domain.dimension))
+        pointUp[normal_dir] = domain.max[normal_dir] - thickness
+        self.upper = HalfSpace(domain, normal=normalUp, point=pointUp,
+                               epsilon=1e-3)
+        normalDown = np.zeros((self.domain.dimension))
+        normalDown[normal_dir] = 1
+        pointDown = npw.zeros((self.domain.dimension))
+        pointDown[normal_dir] = domain.origin[normal_dir] + thickness
+        self.lower = HalfSpace(domain, normal=normalDown, point=pointDown,
+                               epsilon=1e-3)
+
+    def discretize(self, topo):
+        # first check if we have already compute indices for
+        # this topology
+
+        self.lower.discretize(topo)
+        self.upper.discretize(topo)
+        if topo not in self.ind.keys():
+            # Warning FP : ind[topo] must be a list to be coherent
+            # with sphere definition, where porous layers are allowed.
+            # todo if required : add porous layers for planes.
+            self.ind[topo] = []
+            self.ind[topo].append(np.logical_or(self.upper.ind[topo][0],
+                                                self.lower.ind[topo][0]))
+
+        return self.ind[topo]
+
+
+def assertSubPlane(ndir, ind, *coords):
+    dim = len(coords)
+    if dim == 2:
+        return assertline(ndir, ind, *coords)
+    elif dim == 3:
+        return assertsurface(ndir, ind, *coords)
+
+
+def assertsurface(nd, ind, *coords):
+
+    dim = len(coords)
+    shape = np.zeros(dim, dtype=np.int32)
+    shape[:] = [coords[i].shape[i] for i in xrange(dim)]
+    cshape = coords[nd].shape
+    if nd == 0:
+        return max([a.max() - a.min()
+                    for a in [coords[nd][ind[:, i, j]]
+                              for i in xrange(shape[1])
+                              for j in xrange(shape[2])
+                              if coords[nd][ind[:, i, j]].size
+                              > 0]] + [0]) == 0.
+    elif nd == 1:
+        return max([a.max() - a.min()
+                    for a in [coords[nd][ind[i, :, j].reshape(cshape)]
+                              for i in xrange(shape[0])
+                              for j in xrange(shape[2])
+                              if coords[nd][ind[i, :, j].reshape(cshape)].size
+                              > 0]] + [0]) == 0.
+
+    else:
+        return max([a.max() - a.min()
+                    for a in [coords[nd][ind[i, j, :].reshape(cshape)]
+                              for i in xrange(shape[0])
+                              for j in xrange(shape[2])
+                              if coords[nd][ind[i, j, :].reshape(cshape)].size
+                              > 0]] + [0]) == 0.
+
+
+def assertline(nd, ind, *coords):
+
+    dim = len(coords)
+    shape = np.zeros(dim, dtype=np.int32)
+    shape[:] = [coords[i].shape[i] for i in xrange(dim)]
+    cshape = coords[nd].shape
+    if nd == 0:
+        return max([a.max() - a.min()
+                    for a in [coords[nd][ind[:, i]]
+                              for i in xrange(shape[1])
+                              if coords[nd][ind[:, i]].size
+                              > 0]] + [0]) == 0.
+    elif nd == 1:
+        return max([a.max() - a.min()
+                    for a in [coords[nd][ind[i, :].reshape(cshape)]
+                              for i in xrange(shape[0])
+                              if coords[nd][ind[i, :].reshape(cshape)].size
+                              > 0]] + [0]) == 0.
diff --git a/HySoP/hysop/domain/obstacle/plates.py b/HySoP/hysop/domain/obstacle/plates.py
deleted file mode 100644
index 79632643e..000000000
--- a/HySoP/hysop/domain/obstacle/plates.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""
-@file sphere.py
-Plate-like sub-domains.
-"""
-from parmepy.domain.obstacle.obstacle import Obstacle
-import numpy as np
-
-
-class Plates(Obstacle):
-    """
-    Top and down (z-axis) plate-like sub-domains.
-    Defines two areas on the min and max z position of
-    the main domain.
-    zplates:
-    \f$ \{x,y,z\} \ for \ z_{max} - \epsilon \leq z \leq z_{max} + \epsilon
-    \ or \ z_{min} - \epsilon \leq z \leq z_{min}\f$
-    """
-
-    def __init__(self, domain, normal_dir, epsilon=0.1, vd=0.0):
-        """
-        Description of a sphere in a domain.
-        @param domain : the physical domain that contains the sphere.
-        @param epsilon : thickness/2 of the plates
-        @param vd : velocity of obstacle (considered as a rigid body),
-        default = 0.
-        """
-        Obstacle.__init__(self, domain, vd=vd)
-        assert epsilon > 0.0, 'Plate thickness must be positive'
-        ## Thickness/2
-        self.epsilon = epsilon
-        ## Direction of the normal to the plate (0:x, 1:y, 2:z))
-        self.normal = normal_dir
-        assert self.normal < domain.dimension and self.normal >= 0
-
-        def Upper(*args):
-            return domain.max[self.normal] - self.epsilon - args[self.normal]
-
-        def Lower(*args):
-            return args[self.normal] - self.epsilon - \
-                domain.origin[self.normal]
-
-        self.Upper = np.vectorize(Upper)
-        self.Lower = np.vectorize(Lower)
-
-    def discretize(self, topo):
-        # first check if we have already compute indices for
-        # this topology
-
-        if topo not in self.ind.keys():
-            self.ind[topo] = []
-            # apply indicator function on topo local mesh
-            condUpper = self.Upper(*topo.mesh.coords) <= 0
-            condLower = self.Lower(*topo.mesh.coords) <= 0
-            self.ind[topo].append(np.logical_or(condUpper, condLower))
-
-        return self.ind[topo]
-
-    def __str__(self):
-        """ToString method"""
-        s = 'Upper and lower (z-axis) plane areas of thickness '
-        s += str(2 * self.epsilon) + '.'
-        return s
-
diff --git a/HySoP/hysop/domain/obstacle/sphere.py b/HySoP/hysop/domain/obstacle/sphere.py
index ae2420223..5d14340a7 100644
--- a/HySoP/hysop/domain/obstacle/sphere.py
+++ b/HySoP/hysop/domain/obstacle/sphere.py
@@ -3,8 +3,8 @@
 Spherical or hemispherical sub-domain.
 """
 from parmepy.domain.obstacle.obstacle import Obstacle
-import math
 import numpy as np
+import parmepy.tools.numpywrappers as npw
 
 
 class Sphere(Obstacle):
@@ -35,11 +35,11 @@ class Sphere(Obstacle):
         def dist(x, y, z, R):
             """
             """
-            return math.sqrt((x - self.position[0]) ** 2
-                             + (y - self.position[1]) ** 2
-                             + (z - self.position[2]) ** 2) - R
+            return npw.asarray(np.sqrt((x - self.position[0]) ** 2
+                                       + (y - self.position[1]) ** 2
+                                       + (z - self.position[2]) ** 2) - R)
 
-        self.chi = [np.vectorize(dist)]
+        self.chi = [dist]
         ## List of thicknesses for porous layers
         self.layers = porousLayers
 
@@ -98,7 +98,7 @@ class HemiSphere(Sphere):
 
         def LeftBox(x, y, z):
             return x - self.position[0]
-        self.LeftBox = np.vectorize(LeftBox)
+        self.LeftBox = LeftBox
 
     def discretize(self, topo):
         # first check if we have already compute indices for
@@ -122,6 +122,7 @@ class HemiSphere(Sphere):
                 condB = self.chi[0](*(topo.mesh.coords + args)) <= 0
                 np.logical_and(condA, condB, condA)
                 np.logical_and(condA, cond0, condA)
+                condA = npw.asarray(condA)
                 self.ind[topo].append(condA)
                 # update current radius
                 currentRadius = currentRadius + thickness
@@ -132,8 +133,3 @@ class HemiSphere(Sphere):
         s = 'hemisphere of radius ' + str(self.radius)
         s += ' and center position ' + str(self.position)
         return s
-
-if __name__ == "__main__":
-    print "This module defines the following classes:"
-    print "Sphere: ", Sphere.__doc__
-    print "HemiSphere: ", HemiSphere.__doc__
diff --git a/HySoP/hysop/domain/tests/test_obstacle.py b/HySoP/hysop/domain/tests/test_obstacle.py
index d1363806e..c40c2a37e 100644
--- a/HySoP/hysop/domain/tests/test_obstacle.py
+++ b/HySoP/hysop/domain/tests/test_obstacle.py
@@ -1,5 +1,247 @@
 """
 Testing parmepy.domain.obstacle.Obstacle
 """
-#import parmepy as pp
-#from parmepy.constants import np
+import parmepy as pp
+from parmepy.fields.continuous import Field
+from parmepy.mpi.topology import Cartesian
+from parmepy.domain.obstacle.sphere import Sphere, HemiSphere
+from parmepy.domain.obstacle.disk import Disk, HalfDisk
+from parmepy.domain.obstacle.planes import HalfSpace, Plane, SubSpace,\
+    SubPlane, PlaneBoundaries
+from parmepy.domain.obstacle.controlBox import ControlBox
+import numpy as np
+from parmepy.constants import CHECK_F_CONT
+
+
+nb = 129
+Lx = Ly = Lz = 2
+dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
+dom2D = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
+resol3D = [nb, nb, nb]
+resol2D = [nb, nb]
+scal = Field(domain=dom)
+scal2D = Field(domain=dom2D)
+topo = Cartesian(dom, 3, resol3D)
+topo2D = Cartesian(dom2D, 2, resol2D)
+coords = topo.mesh.coords
+coords2D = topo2D.mesh.coords
+scald = scal.discretize(topo).data[0]
+scald2D = scal2D.discretize(topo2D).data[0]
+h3d = topo.mesh.space_step
+h2d = topo2D.mesh.space_step
+dvol = np.prod(h3d)
+ds = np.prod(h2d)
+import math
+pi = math.pi
+tol = 1e-3
+lengths = np.asarray([20 * h3d[0], 22 * h3d[1], 31 * h3d[2]])
+rlengths = lengths + h3d
+rlengths2d = lengths[:2] + h2d
+scald[:] = 1.
+scald2D[:] = 1.
+
+
+def testSphere():
+    scald[:] = 1.
+    rad = 0.3
+    sphere = Sphere(dom, position=[0., 0., 0.],
+                    radius=rad, porousLayers=[0.13])
+
+    sphere.discretize(topo)
+    ind = sphere.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([-0.2, 0, 0.2])
+    assert ind[ix, iy, iz]
+    (ix, iy, iz) = topo.mesh.indices([0.5, 0.1, 0.2])
+    assert not ind[ix, iy, iz]
+
+
+def testHemiSphere():
+    scald[:] = 1.
+    rad = 0.3
+    sphere = HemiSphere(dom, position=[0., 0., 0.],
+                        radius=rad, porousLayers=[0.13])
+
+    sphere.discretize(topo)
+    ind = sphere.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([-0.3, 0., 0.])
+    assert ind[ix, iy, iz]
+    (ix, iy, iz) = topo.mesh.indices([0.3, 0., 0.])
+    assert not ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testDisk():
+    scald2D[:] = 1.
+    rad = 0.3
+    sphere = Disk(dom2D, position=[0., 0.],
+                  radius=rad, porousLayers=[0.13])
+
+    sphere.discretize(topo2D)
+    ind = sphere.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.2, 0.])
+    assert ind[ix, iy]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testHalfDisk():
+    scald2D[:] = 1.
+    rad = 0.3
+    sphere = HalfDisk(dom2D, position=[0., 0.],
+                      radius=rad, porousLayers=[0.13])
+
+    sphere.discretize(topo2D)
+    ind = sphere.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.2, 0.])
+    assert ind[ix, iy]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testHalfSpace2D():
+    hsp = HalfSpace(dom2D, [1, 1], [0., 0.])
+    hsp.discretize(topo2D)
+    ind = hsp.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.8, 0.5])
+    assert ind[ix, iy]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testHalfSpace3D():
+    hsp = HalfSpace(dom, [1, 1, 1], [0., 0., 0.])
+    hsp.discretize(topo)
+    ind = hsp.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([-0.8, 0.5, -0.5])
+    assert ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testPlane2D():
+    plane = Plane(dom2D, [1, 1], [0., 0.])
+    plane.discretize(topo2D)
+    ind = plane.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.5, 0.5])
+    assert ind[ix, iy]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testPlane3D():
+    plane = Plane(dom, [1, 1, 1], [0., 0., 0.])
+    plane.discretize(topo)
+    ind = plane.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([-0.3, 0.5, -0.2])
+    assert ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testSubSpace2D():
+    ssp = SubSpace(dom2D, [1, 0.], [0., 0.], lengths[:2])
+    ssp.discretize(topo2D)
+    ind = ssp.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.5, 0.2])
+    assert ind[ix, iy]
+
+
+def testSubSpace3D():
+    ssp = SubSpace(dom, [0, 1, 0], [0., 0., 0.], lengths)
+    ssp.discretize(topo)
+    ind = ssp.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([0.3, -0.1, 0.2])
+    assert ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testSubPlane2D():
+    ssp = SubPlane(dom2D, [1, 0], [0., 0.], lengths[:2])
+    ssp.discretize(topo2D)
+    ind = ssp.ind[topo2D][0]
+    ll = np.sum(scald2D[ind]) * h2d[1]
+    rll = rlengths2d[1]
+    assert abs(ll - rll) < tol
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testSubPlane3D():
+    ssp = SubPlane(dom, [0, 1, 0], [0., 0., 0.], lengths)
+    ssp.discretize(topo)
+    ind = ssp.ind[topo][0]
+    surf = np.sum(scald[ind]) * ds
+    rsurf = rlengths[0] * rlengths[2]
+    assert abs(surf - rsurf) < tol
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testPlaneBC2D():
+    bc = PlaneBoundaries(dom2D, 1, thickness=0.2)
+    bc.discretize(topo2D)
+    ind = bc.ind[topo2D][0]
+    (ix, iy) = topo2D.mesh.indices([-0.5, - Ly * 0.5])
+    assert ind[ix, iy]
+    (ix, iy) = topo2D.mesh.indices([-0.5, Ly * 0.5 - 2 * h2d[1]])
+    assert ind[ix, iy]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testPlaneBC3D():
+    bc = PlaneBoundaries(dom, 1, thickness=0.2)
+    bc.discretize(topo)
+    ind = bc.ind[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([-0.5, -Ly * 0.5, 0.3])
+    assert ind[ix, iy, iz]
+    (ix, iy, iz) = topo.mesh.indices([-0.5, Ly * 0.5 - 2 * h2d[1], 0.3])
+    assert ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
+
+
+def testControlBox2D():
+    lx = 10 * h3d[0]
+    ly = 22 * h3d[1]
+
+    cb = ControlBox(dom2D, [-0.5, -0.5], [lx, ly])
+    cb.discretize(topo2D)
+    surf = cb.integrate(scal2D, topo2D)
+    rsurf = lx * ly
+    assert abs(surf - rsurf) < tol
+    assert cb.ind[topo2D][0].flags.f_contiguous is CHECK_F_CONT
+
+
+def testControlBox3D():
+    ll = np.asarray([0] * 3)
+    ll[0] = 10 * h3d[0]
+    ll[1] = 22 * h3d[1]
+    ll[2] = 51 * h3d[2]
+
+    cb = ControlBox(dom, [0.5, -0.5, -0.5], ll)
+    cb.discretize(topo)
+    vol = cb.integrate(scal, topo)
+    rvol = np.prod(ll)
+    assert abs(rvol - vol) < tol
+
+    vol = cb.integrate(scal, topo, useSlice=False)
+    assert abs(rvol - vol) < tol
+    ind = np.asarray([0, 1, 2])
+    for i in xrange(3):
+        surfUp = cb.integrateOnSurface(scal, topo, normalDir=i, up=True)
+        surfDown = cb.integrateOnSurface(scal, topo, normalDir=i, up=False)
+        j = np.where(ind != i)
+        sref = np.prod(ll[j])
+        assert abs(surfUp - sref) < tol
+        assert abs(surfDown - sref) < tol
+
+    assert cb.ind[topo][0].flags.f_contiguous is CHECK_F_CONT
+
+
+def testControlBoxSphere():
+    lx = 1.5
+    ly = 1.5
+    lz = 1.5
+    rad = 0.2
+    cb = ControlBox(dom, [-0.75, -0.75, -0.75], [lx, ly, lz])
+    layer = 2 * h3d[0]
+    sphere = Sphere(dom, position=[0., 0., 0.],
+                    radius=rad, porousLayers=[layer])
+    cb.sub(sphere, topo)
+    ind = cb.indReduced[topo][0]
+    (ix, iy, iz) = topo.mesh.indices([0.1, 0.0, 0.])
+    assert not ind[ix, iy, iz]
+    (ix, iy, iz) = topo.mesh.indices([0.3, 0.0, 0.])
+    assert ind[ix, iy, iz]
+    assert ind.flags.f_contiguous is CHECK_F_CONT
diff --git a/HySoP/hysop/f2py/fftw2py.f90 b/HySoP/hysop/f2py/fftw2py.f90
index 35dac8fc3..edf1c4a99 100755
--- a/HySoP/hysop/f2py/fftw2py.f90
+++ b/HySoP/hysop/f2py/fftw2py.f90
@@ -250,7 +250,7 @@ contains
     call filter_curl_3d()
     
     call c2r_3d(omega_x,omega_y,omega_z, ghosts_vort)
-
+    
   end subroutine solve_curl_3d
 
 end module fftw2py
diff --git a/HySoP/hysop/fields/continuous.py b/HySoP/hysop/fields/continuous.py
index 79cedbfc3..46e33e45e 100644
--- a/HySoP/hysop/fields/continuous.py
+++ b/HySoP/hysop/fields/continuous.py
@@ -5,7 +5,6 @@ Continuous variable description.
 """
 from parmepy.constants import debug
 from parmepy.fields.discrete import DiscreteField
-#from parmepy.fields.vector import VectorField
 from parmepy.mpi import main_rank
 
 
@@ -123,18 +122,20 @@ class Field(object):
         self.doVectorize = doVectorize
 
     @debug
-    def initialize(self, currentTime=0.):
+    def initialize(self, currentTime=0., topo=None):
         """
         Initialize all the discrete fields associated to this continuous field.
         using the formula set during construction or with setFormula method.
         If formula is not set, field values are set to zero.
-        @param currentTime : current time (default set to 0.0)
+        @param[in] currentTime current time
+        @param[in] topo a parmepy.mpi.Cartesian topology on which
+         the field must be initialized
         """
-        if self.topoInit is None:
-            self.topoInit = self.discreteFields.values()[0].topology
-        self.discreteFields[self.topoInit].initialize(
-            self._formula, self.doVectorize, currentTime,
-            *self.extraParameters)
+        if topo is None:
+            topo = self.topoInit
+        df = self.discretization(topo)
+        df.initialize(self._formula, self.doVectorize, currentTime,
+                      *self.extraParameters)
 
     def value(self, *pos):
         """
diff --git a/HySoP/hysop/mpi/mesh.py b/HySoP/hysop/mpi/mesh.py
index c585d7bd9..a0e7c63d7 100644
--- a/HySoP/hysop/mpi/mesh.py
+++ b/HySoP/hysop/mpi/mesh.py
@@ -4,6 +4,7 @@ Cartesian mesh class for local mesh.
 
 """
 from parmepy.constants import np, PARMES_INTEGER, debug
+import parmepy.tools.numpywrappers as npw
 
 
 class SubMesh(object):
@@ -39,28 +40,28 @@ class SubMesh(object):
         Proc no2:
         resolution: (8,)
         global start/end : 4/7
-        local start : 2/5
+        local start/end : 2/5
         \endcode
         """
         ## Topology that creates (and owns) this mesh
         self._topology = topo
-        ## Local resolution of this mesh, including ghost points
-        self.resolution = resolution
+        ## Local resolution of this mesh, INCLUDING ghost points
+        self.resolution = np.asarray(resolution, dtype=PARMES_INTEGER)
         ## Dimension of the mesh
         self.dim = self.resolution.size
         ## index of the lowest "computed" point of this mesh
         ## (in each dir) in the global mesh
-        self.global_start = g_start
+        self.global_start = np.asarray(g_start, dtype=PARMES_INTEGER)
         ## index of the upper point (in each dir), global mesh
         self.global_end = self.global_start + self.resolution - 1\
             - 2 * topo.ghosts
         ## Mesh step size
-        self.space_step = topo.domain.length / \
-            (topo.globalMeshResolution - 1)
+        self.space_step = npw.asarray(topo.domain.length /
+                                      (topo.globalMeshResolution - 1))
         ## Mesh local indices, only for "computed" points
         ## (i.e. excluding ghosts)
         self.local_start = topo.ghosts.copy()
-        self.local_end = self.resolution.copy() - topo.ghosts[:] - 1
+        self.local_end = self.resolution - topo.ghosts - 1
         ## List of indices for computational points (i.e. without ghosts)
         ## usage : field[iCompute] returns the array field for all indices
         ## excluding ghost points.
@@ -76,7 +77,7 @@ class SubMesh(object):
         ## and if origin is on ghosts or "real" points.
         self.origin = topo.domain.origin.copy()
         self.origin[:] += self.space_step[:] * \
-            (self.global_start[:].astype(PARMES_INTEGER) - topo.ghosts[:])
+            (self.global_start[:] - topo.ghosts[:])
 
         self.end = self.origin + self.space_step * (self.resolution - 1)
         if self.dim == 1:
@@ -97,6 +98,21 @@ class SubMesh(object):
                              self.resolution[2])[np.newaxis, np.newaxis, :]
             self.coords = tuple([cx, cy, cz])
 
+    def indices(self, tab):
+        """
+        returns indices of the point of coordinates (close to) tab = x, y, z
+        If (x, y, z) is not a grid point, it returns the closest grid point.
+        """
+        ind = []
+        for d in xrange(self.dim):
+            cond = np.where(abs(self.coords[d] - tab[d])
+                            < (self.space_step[d] * 0.5))
+            if cond[0].size > 0:
+                ind.append(cond[d][0])
+            else:
+                return False
+        return tuple(ind)
+
     def __str__(self):
         """ Sub mesh display """
         s = 'Coords (topo):' + str(self._topology.proc_coords[:])
diff --git a/HySoP/hysop/numerics/differential_operations.py b/HySoP/hysop/numerics/differential_operations.py
index 28c9ded86..f335b6b37 100755
--- a/HySoP/hysop/numerics/differential_operations.py
+++ b/HySoP/hysop/numerics/differential_operations.py
@@ -6,7 +6,7 @@ Library of functions used to perform classical vector calculus
 """
 from parmepy.constants import debug, XDIR, YDIR, ZDIR
 from abc import ABCMeta, abstractmethod
-from parmepy.numerics.finite_differences import FD_C_4, FD_C_2
+from parmepy.numerics.finite_differences import FD_C_4, FD2_C_2, FD_C_2
 import numpy as np
 
 
@@ -20,13 +20,15 @@ class DifferentialOperation(object):
     @debug
     @abstractmethod
     def __init__(self, topo, work):
+        if work is None:
+            work = []
         self._work = work
         for wk in self._work:
             assert wk.shape == tuple(topo.mesh.resolution)
         self._dim = topo.domain.dimension
 
     @staticmethod
-    def getWorkLengths(nb_components=None, domain_dim=None):
+    def getWorkLengths(nb_components=None, domain_dim=None, fd_method=None):
         """
         Compute the number of required work arrays for this method.
         @param nb_components : number of components of the
@@ -39,7 +41,7 @@ class DifferentialOperation(object):
 
 class Curl(DifferentialOperation):
     """
-    Computes \f$ \nabla \ times (f) \f$, f being a vector field.
+    Computes \f$ \nabla \times V \f$, V being a vector field.
     """
     def __init__(self, topo, work, method=FD_C_4):
 
@@ -68,7 +70,7 @@ class Curl(DifferentialOperation):
         self.fd_scheme.computeIndices(self._indices)
 
     @staticmethod
-    def getWorkLengths(nb_components=None, domain_dim=None):
+    def getWorkLengths(nb_components=None, domain_dim=None, fd_method=None):
         return 1
 
     def __call__(self, variable, result):
@@ -136,9 +138,9 @@ class DivV(DifferentialOperation):
             self.fd_scheme = FD_C_4(topo.mesh.space_step)
             # connect to finite differences function ...
             if fd_optim is 'CAA':
-                self.fcall = self.FDCentral4
-            else:
                 self.fcall = self.FDCentral4_CAA
+            else:
+                self.fcall = self.FDCentral4
             # check ghosts ... must be updated when
             # other fd schemes will be implemented.
             assert (topo.ghosts >= 2).all(),\
@@ -152,14 +154,14 @@ class DivV(DifferentialOperation):
         self.fd_scheme.computeIndices(self._indices)
 
     @staticmethod
-    def getWorkLengths(nb_components=None, domain_dim=None, fd_optim=None):
+    def getWorkLengths(nb_components=None, domain_dim=None, fd_method=None):
         """
         fd_method = FD_C_4 --> call fd_compute
         fd_method = FD_C_4_CAA --> call fd.compute_and_add
         """
         if domain_dim == 1 or nb_components == 1:
             return 1
-        elif fd_optim is not None:
+        elif fd_method is not None:
             return 1
         else:
             return 2
@@ -229,15 +231,15 @@ class DivV(DifferentialOperation):
         return result
 
 
-class DivT(DifferentialOperation):
+class DivWV(DifferentialOperation):
     """
     Computes \f$ \nabla.(W.Vx, W.Vy, W.Vz) \f$,
-    \f$w\f$ and V some vector fields.
+    \f$W\f$ and V some vector fields.
 
     Methods :
     1 - FD_C_4 (default) : 4th order, centered finite differences,
     based on fd.compute.
-    init : func = DivT(topo, work)
+    init : func = DivV(topo, work)
     call : result = func(var1, var2, result)
     If fd_optim == CAA, based on fd.compute_and_add.
 
@@ -282,6 +284,34 @@ class DivT(DifferentialOperation):
         return result
 
 
+class Laplacian(DifferentialOperation):
+    """
+    Computes  \f$ \Delta \rho, \rho \f$ a scalar field.
+    """
+    def __init__(self, topo, work=None, method=FD2_C_2):
+
+        DifferentialOperation.__init__(self, topo, work)
+        if method is FD2_C_2:
+            # - 2nd ordered FD,
+            self.fd_scheme = FD2_C_2(topo.mesh.space_step)
+        else:
+            raise ValueError("FD scheme Not yet implemented")
+        self._indices = topo.mesh.iCompute
+        self.fd_scheme.computeIndices(self._indices)
+        # dimension of the fields
+        self._dim = topo.domain.dimension
+
+    @staticmethod
+    def getWorkLengths(nb_components=None, domain_dim=None, fd_method=None):
+        return 0
+
+    def __call__(self, var, result):
+        self.fd_scheme.compute(var, 0, result)
+        for cdir in xrange(1, self._dim):
+            self.fd_scheme.compute_and_add(var, cdir, result)
+        return result
+
+
 class DivStressTensor3D(DifferentialOperation):
     """
     Computes the 3D stress tensor T defined by 
@@ -313,6 +343,10 @@ class DivStressTensor3D(DifferentialOperation):
             self.fcall = self.FDCentral2
             self.fd_scheme = FD_C_2((topo.mesh.space_step))
 
+    @staticmethod
+    def getWorkLengths(nb_components=None, domain_dim=None, fd_method=None):
+        return 0
+
     def __call__(self, var, result, ind):
         assert result[0].shape == var[0].shape
         return self.fcall(var, result, ind)
@@ -381,7 +415,7 @@ class DivStressTensor3D(DifferentialOperation):
 
 class GradS(DifferentialOperation):
     """
-    Computes \f$ \nabla(\rho) \f$, \f$ \rho\f$ a scalar field
+    Computes \f$ \nabla\rho \f$, \f$ \rho\f$ a scalar field
     """
     def __init__(self, topo, method=FD_C_4):
 
@@ -419,7 +453,7 @@ class GradS(DifferentialOperation):
 
 class GradV(DifferentialOperation):
     """
-    Computes \f$ [\nabla(V)] \f$, with
+    Computes \f$ \nabla V \f$, with
     \f$V\f$ a vector field.
     """
     def __init__(self, topo, method=FD_C_4):
@@ -472,7 +506,7 @@ class GradVxW(DifferentialOperation):
         self.fd_scheme.computeIndices(self._indices)
 
     @staticmethod
-    def getWorkLengths(nb_components=None, domain_dim=None):
+    def getWorkLengths(nb_components=None, domain_dim=None, fd_method=None):
         """
         Compute the number of required work arrays for this method.
         @param nb_components : number of components of the
diff --git a/HySoP/hysop/numerics/finite_differences.py b/HySoP/hysop/numerics/finite_differences.py
index e1245fc2e..a260da267 100644
--- a/HySoP/hysop/numerics/finite_differences.py
+++ b/HySoP/hysop/numerics/finite_differences.py
@@ -3,39 +3,48 @@
 Finite difference schemes description
 """
 from abc import ABCMeta, abstractmethod
-from parmepy.constants import debug, PARMES_REAL
+from parmepy.constants import debug
+import parmepy.tools.numpywrappers as npw
 import numpy as np
 
 
 class FiniteDifference(object):
-    __metaclass__ = ABCMeta
 
-    @debug
-    def __new__(cls, *args, **kw):
-        return object.__new__(cls, *args, **kw)
+    """
+    Describe and apply a finite difference scheme to compute
+    1st or second derivative of a variable saved in a numpy array.
+    usage :\n
+    1 - Initialization
+    - declare the scheme (by choosing the required FD class)
+    and initialize it with a step from domain discretisation description
 
-    @abstractmethod
-    def __init__(self, *args):
-        """
-        Build a finite difference scheme
-        """
+    \code
+    >>> step = topo.mesh_space_step
+    >>> scheme = FD_C_4(step)
+    \endcode
 
+    - Compute scheme indices, based on topology grid points :
 
-class FD_C_4(FiniteDifference):
-    """
-    Centered scheme, 4th order.
-
-    usage :
-    # a step from domain discretisation description
-    step = topo.mesh_space_step
-    # declare scheme
-    scheme = FD_C_4(step)
-    # init scheme using local grid points indices
-    scheme.computeIndices(topo.mesh.iCompute)
-    # Then compute : result = fd(tab)
-    scheme.compute(tab, dir, result)
-    # or : result = result + fd(tab)
-    scheme.compute_and_add()
+    \code
+    >>> scheme.computeIndices(topo.mesh.iCompute)
+    \endcode
+
+    2 - Computation
+
+    For a given numpy array (obviously discretized on the topo
+    used to compute indices), to compute
+    \f$ result = \frac{\partial tab}{\partial dir}\f$ :
+
+    \code
+    >>> scheme.compute(tab, dir, result)
+    \endcode
+
+    or :
+    \f$ result = result + \frac{\partial tab}{\partial dir}\f$
+
+    \code
+    >>> scheme.compute_and_add(tab, dir, result)
+    \endcode
 
     Notes FP :
     Compute method is much more performant than compute and add
@@ -45,126 +54,122 @@ class FD_C_4(FiniteDifference):
     See Global_tests/testPerfAndMemForFD_and_div.py for perf results.
     """
 
+    __metaclass__ = ABCMeta
+
+    @debug
+    def __new__(cls, *args, **kw):
+        return object.__new__(cls, *args, **kw)
+
+    @abstractmethod
     def __init__(self, step):
         """
-        Constructor
-        @param step : a numpy array with grid step size in each dir.
+        Build a finite difference scheme
+        @param step : resolution of the mesh
+        (list or numpy array of int)
         """
+        ## List of slices representing the mesh on which fd scheme is applied
+        self.indices = []
+        self._step = np.asarray(step)
         #  dim of the field on which scheme will be applied
         # (i.e dim of the domain)
-        step = np.asarray(step)
         self._dim = step.size
+
+    @abstractmethod
+    def computeIndices(self, indices):
+        """
+        @param indices : a list of slices (see for example
+        parmepy.mpi.mesh.Mesh iCompute) that represent the local
+        mesh on which finite-differences will be applied.
+        """
+
+    @abstractmethod
+    def compute(self, tab, cdir, result):
+        """
+        @param[in] tab : a numpy array
+        @param[in] cdir : direction of differentiation
+        @param[in, out] result : numpy array to save result
+
+        Apply finite different scheme on the variable
+        in numpy array var, for direction cdir and save
+        the result in ... result.
+        """
+
+    @abstractmethod
+    def compute_and_add(self, tab, cdir, result):
+        """
+        @param[in] tab : a numpy array
+        @param[in] cdir : direction of differentiation
+        @param[in, out] result : numpy array to save result
+
+        Apply finite different scheme on the variable
+        in numpy array var, for direction cdir and add
+        the result into result tab.
+        Note tab == result is allowed.
+
+        """
+
+
+class FD_C_2(FiniteDifference):
+    """
+    1st derivative, centered scheme, 2nd order.
+
+    """
+
+    def __init__(self, step):
+        FiniteDifference.__init__(self, step)
+
         # list of indices for index - 1
         self._m1 = []
-        # for index - 2
-        self._m2 = []
         # index + 1
         self._a1 = []
-        # index + 2
-        self._a2 = []
-        # FD scheme coefficients
-        self._coeff = np.asarray(1. / (12. * step), dtype=PARMES_REAL)
+        self._coeff = npw.asarray(1. / (2. * self._step))
 
     def computeIndices(self, indices):
 
         self.indices = indices
         self._m1 = []
-        self._m2 = []
         self._a1 = []
-        self._a2 = []
         for dim in xrange(self._dim):
             self._m1.append(list(self.indices))
             self._m1[dim][dim] = slice(self.indices[dim].start - 1,
                                        self.indices[dim].stop - 1,
                                        self.indices[dim].step)
-            self._m2.append(list(self.indices))
-            self._m2[dim][dim] = slice(self.indices[dim].start - 2,
-                                       self.indices[dim].stop - 2,
-                                       self.indices[dim].step)
             self._a1.append(list(self.indices))
             self._a1[dim][dim] = slice(self.indices[dim].start + 1,
                                        self.indices[dim].stop + 1,
                                        self.indices[dim].step)
-            self._a2.append(list(self.indices))
-            self._a2[dim][dim] = slice(self.indices[dim].start + 2,
-                                       self.indices[dim].stop + 2,
-                                       self.indices[dim].step)
-        #return self._m2, self._m1, self._a1, self._a2
 
     def compute(self, tab, cdir, result):
-        """
-        Compute \f$ \frac{\partial tab()}{\partial cdir} \f$ using fd scheme,
-        The result is saved in input/ouput parameter result.
-        @param tab : a numpy array
-        @param cdir : direction of differentiation
-        @param result : input/output numpy array to save the resulting data.
-        """
         assert result is not tab
         assert result.__class__ is np.ndarray
         assert tab.__class__ is np.ndarray
+        # FD scheme coefficient
+
         result[self.indices] = tab[self._a1[cdir]]
         result[self.indices] -= tab[self._m1[cdir]]
-        result[self.indices] *= 8
-        result[self.indices] += tab[self._m2[cdir]]
-        result[self.indices] -= tab[self._a2[cdir]]
         result[self.indices] *= self._coeff[cdir]
 
     def compute_and_add(self, tab, cdir, result):
-        """
-        Compute \f$ \frac{\partial tab()}{\partial cdir} \f$ using fd scheme,
-        The result is ADDED to input/ouput parameter result.
-        @param tab : a numpy array
-        @param cdir : direction of differentiation
-        @param result : input/output numpy array to save the resulting data.
-
-        Note FP : tab == result is allowed.
-        """
         assert result.__class__ is np.ndarray
         assert tab.__class__ is np.ndarray
-        result[self.indices] += self._coeff[cdir] * (8 *
-                                                     (tab[self._a1[cdir]] -
-                                                      tab[self._m1[cdir]]) +
-                                                     tab[self._m2[cdir]] -
-                                                     tab[self._a2[cdir]])
+        # FD scheme coefficient
+        result[self.indices] += self._coeff[cdir] * (tab[self._a1[cdir]] -
+                                                     tab[self._m1[cdir]])
 
-class FD_C_2(FiniteDifference):
+
+class FD2_C_2(FiniteDifference):
     """
-    Centered scheme, 2tnd order.
-
-    usage :
-    # a step from domain discretisation description
-    step = topo.mesh_space_step
-    # declare scheme
-    scheme = FD_C_2(step)
-    # init scheme using local grid points indices
-    scheme.computeIndices(topo.mesh.iCompute)
-    # Then compute : result = fd(tab)
-    scheme.compute(tab, dir, result)
-    # or : result = result + fd(tab)
-    scheme.compute_and_add()
+    Second derivative, centered scheme, 2nd order.
     """
 
     def __init__(self, step):
-        """
-        Constructor
-        @param step : a numpy array with grid step size in each dir.
-        """
-        #  dim of the field on which scheme will be applied
-        # (i.e dim of the domain)
-        self._step = np.asarray(step)
-        self._dim = step.size
+        FiniteDifference.__init__(self, step)
+
         # list of indices for index - 1
         self._m1 = []
         # index + 1
         self._a1 = []
-        # index1 - 1, index2 - 1
-        self._m1m1 = []
-        # index1 - 1, index2 + 1
-        self._m1a1 = []
-        # index1 + 1, index2 + 1
-        self._a1m1 = []
-        # index1 + 1, index2 + 1
-        self._a1a1 = []
+        self._coeff = npw.asarray(1. / (self._step * self._step))
 
     def computeIndices(self, indices):
 
@@ -181,200 +186,88 @@ class FD_C_2(FiniteDifference):
                                        self.indices[dim].stop + 1,
                                        self.indices[dim].step)
 
-    def computeIndices_crossed(self, indices):
-
-        self.indices = indices
-        self._m1m1 = []
-        self._m1a1 = []
-        self._a1m1 = []
-        self._a1a1 = []
-
-        for dim in xrange(self._dim):
-            if self._dim == 2:
-                cdir1 = 0
-                cdir2 = 1
-            elif self._dim == 3:
-                if dim==0 :
-                    cdir1 = 0
-                    cdir2 = 1
-                elif dim==1 :
-                    cdir1 = 1
-                    cdir2 = 2
-                else :
-                    cdir1 = 0
-                    cdir2 = 2
-            else:
-                raise ValueError("crossed derivatives are" +\
-                                 " incompatible with 1D problems")
-            self._m1m1.append(list(self.indices))
-            self._m1m1[dim][cdir1] = slice(self.indices[cdir1].start - 1,
-                                           self.indices[cdir1].stop - 1,
-                                           self.indices[cdir1].step)
-            self._m1m1[dim][cdir2] = slice(self.indices[cdir2].start - 1,
-                                           self.indices[cdir2].stop - 1,
-                                           self.indices[cdir2].step)
-
-            self._m1a1.append(list(self.indices))
-            self._m1a1[dim][cdir1] = slice(self.indices[cdir1].start - 1,
-                                           self.indices[cdir1].stop - 1,
-                                           self.indices[cdir1].step)
-            self._m1a1[dim][cdir2] = slice(self.indices[cdir2].start + 1,
-                                           self.indices[cdir2].stop + 1,
-                                           self.indices[cdir2].step)
-
-            self._a1m1.append(list(self.indices))
-            self._a1m1[dim][cdir1] = slice(self.indices[cdir1].start + 1,
-                                           self.indices[cdir1].stop + 1,
-                                           self.indices[cdir1].step)
-            self._a1m1[dim][cdir2] = slice(self.indices[cdir2].start - 1,
-                                           self.indices[cdir2].stop - 1,
-                                           self.indices[cdir2].step)
-
-            self._a1a1.append(list(self.indices))
-            self._a1a1[dim][cdir1] = slice(self.indices[cdir1].start + 1,
-                                           self.indices[cdir1].stop + 1,
-                                           self.indices[cdir1].step)
-            self._a1a1[dim][cdir2] = slice(self.indices[cdir2].start + 1,
-                                           self.indices[cdir2].stop + 1,
-                                           self.indices[cdir2].step)
-
-    def compute_1st_deriv(self, tab, cdir, result):
-        """
-        Compute \f$ \frac{\partial tab()}{\partial cdir} \f$ 
-        using 2nd order fd scheme.
-        The result is saved in input/ouput parameter result.
-        @param tab : a numpy array
-        @param cdir : direction of differentiation
-        @param result : input/output numpy array to save the resulting data.
-        """
-        assert result is not tab
-        assert result.__class__ is np.ndarray
-        assert tab.__class__ is np.ndarray
-        # FD scheme coefficient
-        coeff = np.asarray(1. / (2. * self._step), dtype=PARMES_REAL)
-        result[self.indices] = tab[self._a1[cdir]]
-        result[self.indices] -= tab[self._m1[cdir]]
-        result[self.indices] *= coeff[cdir]
-
-    def compute_2nd_deriv(self, tab, cdir, result):
-        """
-        Compute \f$ \frac{\partial^2 tab()}{\partial cdir^2} \f$ 
-        using 2nd order fd scheme.
-        The result is saved in input/ouput parameter result.
-        @param tab : a numpy array
-        @param cdir : direction of differentiation
-        @param result : input/output numpy array to save the resulting data.
-        """
+    def compute(self, tab, cdir, result):
         assert result is not tab
         assert result.__class__ is np.ndarray
         assert tab.__class__ is np.ndarray
         # FD scheme coefficient
-        coeff = np.asarray(1. / (self._step * self._step), dtype=PARMES_REAL)
         result[self.indices] = tab[self.indices]
         result[self.indices] *= -2
         result[self.indices] += tab[self._a1[cdir]]
         result[self.indices] += tab[self._m1[cdir]]
-        result[self.indices] *= coeff[cdir]
+        result[self.indices] *= self._coeff[cdir]
 
-    def compute_2nd_crossed_deriv(self, tab, cdir1, cdir2, result):
-        """
-        Compute \f$ \frac{\partial^2 tab()}{\partial cdir1 cdir2 \f$ 
-        using 2nd order fd scheme.
-        The result is saved in input/ouput parameter result.
-        @param tab : a numpy array
-        @param cdir : direction of differentiation
-        @param result : input/output numpy array to save the resulting data.
-        """
-        assert result is not tab
-        assert result.__class__ is np.ndarray
-        assert tab.__class__ is np.ndarray
-        if (cdir1==0 and cdir2==1) or (cdir1==1 and cdir2==0):
-            cdir = 0
-        elif (cdir1==1 and cdir2==2) or (cdir1==2 and cdir2==1):
-            cdir = 1
-        else :
-            cdir = 2
-
-        result[self.indices] = tab[self._m1m1[cdir]]
-        result[self.indices] += tab[self._a1a1[cdir]]
-        result[self.indices] -= tab[self._a1m1[cdir]]
-        result[self.indices] -= tab[self._m1a1[cdir]]
-        result[self.indices] *= 1. / (4. * self._step[cdir1] * self._step[cdir2])
-
-    def compute_and_add_1st_deriv(self, tab, cdir, result):
-        """
-        Compute \f$ \frac{\partial tab()}{\partial cdir} \f$ 
-        using 2nd order fd scheme.
-        The result is ADDED to input/ouput parameter result.
-        @param tab : a numpy array
-        @param cdir : direction of differentiation
-        @param result : input/output numpy array to save the resulting data.
-
-        Note FP : tab == result is allowed.
-        """
+    def compute_and_add(self, tab, cdir, result):
         assert result.__class__ is np.ndarray
         assert tab.__class__ is np.ndarray
         # FD scheme coefficient
-        coeff = np.asarray(1. / (2. * self._step), dtype=PARMES_REAL)
-        result[self.indices] += coeff[cdir] * (tab[self._a1[cdir]] -
-                                               tab[self._m1[cdir]])
 
-    def compute_and_mult_1st_deriv(self, tab, cdir, result):
-        """
-        Compute \f$ \frac{\partial tab()}{\partial cdir} \f$ 
-        using 2nd order fd scheme.
-        The result is MULTIPLIED to input/ouput parameter result.
-        @param tab : a numpy array
-        @param cdir : direction of differentiation
-        @param result : input/output numpy array to save the resulting data.
-
-        Note FP : tab == result is allowed.
-        """
-        assert result.__class__ is np.ndarray
-        assert tab.__class__ is np.ndarray
-        # FD scheme coefficient
-        coeff = np.asarray(1. / (2. * self._step), dtype=PARMES_REAL)
-        result[self.indices] *= coeff[cdir] * (tab[self._a1[cdir]] -
-                                               tab[self._m1[cdir]])
+        result[self.indices] += self._coeff[cdir] * (-2 * tab[self.indices] +
+                                                     tab[self._m1[cdir]] +
+                                                     tab[self._a1[cdir]])
 
-    def compute_and_add_2nd_deriv(self, tab, cdir, result):
-        """
-        Compute \f$ \frac{\partial^2 tab()}{\partial cdir^2} \f$ 
-        using 2nd order fd scheme.
-        The result is ADDED to input/ouput parameter result.
-        @param tab : a numpy array
-        @param cdir : direction of differentiation
-        @param result : input/output numpy array to save the resulting data.
-
-        Note FP : tab == result is allowed.
-        """
+
+class FD_C_4(FiniteDifference):
+    """
+    1st derivative, centered scheme, 4th order.
+    """
+
+    def __init__(self, step):
+        FiniteDifference.__init__(self, step)
+
+        # list of indices for index - 1
+        self._m1 = []
+        # for index - 2
+        self._m2 = []
+        # index + 1
+        self._a1 = []
+        # index + 2
+        self._a2 = []
+        # FD scheme coefficients
+        self._coeff = npw.asarray(1. / (12. * step))
+
+    def computeIndices(self, indices):
+
+        self.indices = indices
+        self._m1 = []
+        self._m2 = []
+        self._a1 = []
+        self._a2 = []
+        for dim in xrange(self._dim):
+            self._m1.append(list(self.indices))
+            self._m1[dim][dim] = slice(self.indices[dim].start - 1,
+                                       self.indices[dim].stop - 1,
+                                       self.indices[dim].step)
+            self._m2.append(list(self.indices))
+            self._m2[dim][dim] = slice(self.indices[dim].start - 2,
+                                       self.indices[dim].stop - 2,
+                                       self.indices[dim].step)
+            self._a1.append(list(self.indices))
+            self._a1[dim][dim] = slice(self.indices[dim].start + 1,
+                                       self.indices[dim].stop + 1,
+                                       self.indices[dim].step)
+            self._a2.append(list(self.indices))
+            self._a2[dim][dim] = slice(self.indices[dim].start + 2,
+                                       self.indices[dim].stop + 2,
+                                       self.indices[dim].step)
+
+    def compute(self, tab, cdir, result):
+        assert result is not tab
         assert result.__class__ is np.ndarray
         assert tab.__class__ is np.ndarray
-        # FD scheme coefficient
-        coeff = np.asarray(1. / (self._step * self._step), dtype=PARMES_REAL)
-        result[self.indices] += coeff[cdir] * (-2 * tab[self.indices] +
-                                               tab[self._m1[cdir]] +
-                                               tab[self._a1[cdir]])
+        result[self.indices] = tab[self._a1[cdir]]
+        result[self.indices] -= tab[self._m1[cdir]]
+        result[self.indices] *= 8
+        result[self.indices] += tab[self._m2[cdir]]
+        result[self.indices] -= tab[self._a2[cdir]]
+        result[self.indices] *= self._coeff[cdir]
 
-    def compute_and_add_2nd_crossed_deriv(self, tab, cdir1, cdir2, result):
-        """
-        Compute \f$ \frac{\partial^2 tab()}{\partial cdir1 cdir2 \f$ 
-        using 2nd order fd scheme.
-        The result is ADDED to input/ouput parameter result.
-        @param tab : a numpy array
-        @param cdir : direction of differentiation
-        @param result : input/output numpy array to save the resulting data.
-
-        Note FP : tab == result is allowed.
-        """
+    def compute_and_add(self, tab, cdir, result):
         assert result.__class__ is np.ndarray
         assert tab.__class__ is np.ndarray
-        if (cdir1==0 and cdir2==1) or (cdir1==1 and cdir2==0):
-            cdir = 0
-        elif (cdir1==1 and cdir2==2) or (cdir1==2 and cdir2==1):
-            cdir = 1
-        else :
-            cdir = 2
-        result[self.indices] += 1. / (4. * self._step[cdir1] * self._step[cdir2]) * \
-                                (tab[self._m1m1[cdir]] + tab[self._a1a1[cdir]] -
-                                 tab[self._a1m1[cdir]] - tab[self._m1a1[cdir]])
+        result[self.indices] += self._coeff[cdir] * (8 *
+                                                     (tab[self._a1[cdir]] -
+                                                      tab[self._m1[cdir]]) +
+                                                     tab[self._m2[cdir]] -
+                                                     tab[self._a2[cdir]])
+
diff --git a/HySoP/hysop/numerics/tests/test_diffOp.py b/HySoP/hysop/numerics/tests/test_diffOp.py
index 812d55174..ecfa40007 100755
--- a/HySoP/hysop/numerics/tests/test_diffOp.py
+++ b/HySoP/hysop/numerics/tests/test_diffOp.py
@@ -92,9 +92,9 @@ def testDivWV():
     refd = ref.discreteFields[topo]
     memshape = vd.data[0].shape
     # Div operator
-    lwork = diffop.DivT.getWorkLengths()
+    lwork = diffop.DivV.getWorkLengths()
     work = [npw.zeros(memshape) for i in xrange(lwork)]
-    divOp = diffop.DivT(topo, work)
+    divOp = diffop.DivV(topo, work)
     result = [npw.zeros(memshape) for i in xrange(3)]
     result = divOp(vd.data, wd.data, result)
 
@@ -126,6 +126,7 @@ def testGradVxW():
     for i in xrange(3):
         assert np.allclose(refd[i][ind], result[i][ind], rtol=errX)
 
+
 def testDivStressTensor():
     # Topologies
 #    topo1G = Cartesian(box, box.dimension, nbElem,
diff --git a/HySoP/hysop/operator/advection.py b/HySoP/hysop/operator/advection.py
index f63429fc4..024063ca6 100644
--- a/HySoP/hysop/operator/advection.py
+++ b/HySoP/hysop/operator/advection.py
@@ -10,7 +10,6 @@ from parmepy.methods_keys import Scales, TimeIntegrator, Interpolation,\
 from parmepy.numerics.integrators.runge_kutta2 import RK2
 from parmepy.numerics.interpolation import Linear
 from parmepy.numerics.remeshing import L2_1
-from parmepy.tools.timers import Timer
 from parmepy.fields.continuous import Field
 from parmepy.mpi import main_size, main_rank
 from parmepy.operator.redistribute import Redistribute
diff --git a/HySoP/hysop/operator/advection_dir.py b/HySoP/hysop/operator/advection_dir.py
index 0eb74e570..79581f799 100644
--- a/HySoP/hysop/operator/advection_dir.py
+++ b/HySoP/hysop/operator/advection_dir.py
@@ -139,15 +139,13 @@ class AdvectionDir(Operator):
         advectedDiscreteFields = [self.discreteFields[v]
                                   for v in self.variables
                                   if not v is self.velocity]
-
-        particles_advectedDiscreteFields = [
-            v.discretize(advectedDiscreteFields[0].topology)
-            for v in self.particle_fields]
+        topo = advectedDiscreteFields[0].topology
+        particles_advectedDiscreteFields = [v.discretize(topo)
+                                            for v in self.particle_fields]
         particles_positionsDiscreteField = None
         if self.particle_positions is not None:
-            particles_positionsDiscreteField = \
-                self.particle_positions.discretize(
-                advectedDiscreteFields[0].topology)
+            particles_positionsDiscreteField =\
+                self.particle_positions.discretize(topo)
 
         if self.method[Support].find('gpu_2k') >= 0:
             from parmepy.gpu.gpu_particle_advection_2k \
diff --git a/HySoP/hysop/operator/analytic.py b/HySoP/hysop/operator/analytic.py
index 6bf536336..fed8f12cc 100644
--- a/HySoP/hysop/operator/analytic.py
+++ b/HySoP/hysop/operator/analytic.py
@@ -68,7 +68,7 @@ class Analytic(Operator):
         self._isUpToDate = True
 
     @debug
-    def apply(self, simulation):
+    def apply(self, simulation=None):
         assert simulation is not None, \
             "Missing simulation value for computation."
 
@@ -79,9 +79,6 @@ class Analytic(Operator):
         for v in self.variables:
             v.initialize(simulation.time)
 
-    def addRedistributeRequirement(self, red):
-        self._redistributeRequirement.append(red)
-
     @debug
     def finalize(self):
         """
diff --git a/HySoP/hysop/operator/continuous.py b/HySoP/hysop/operator/continuous.py
index 1ed3a5558..4f56a4734 100644
--- a/HySoP/hysop/operator/continuous.py
+++ b/HySoP/hysop/operator/continuous.py
@@ -5,7 +5,6 @@ Interface common to all continuous operators.
 """
 from abc import ABCMeta, abstractmethod
 from parmepy.constants import debug
-from parmepy.mpi import main_rank
 import numpy as np
 from parmepy.tools.timers import Timer
 
@@ -32,8 +31,8 @@ class Operator(object):
 
     @debug
     @abstractmethod
-    def __init__(self, variables, method={},
-                 topo=None, ghosts=None, name_suffix=''):
+    def __init__(self, variables, method=None, topo=None, ghosts=None,
+                 name_suffix=''):
         """
         Build the operator.
         The only required parameter is a list of variables.
@@ -72,6 +71,8 @@ class Operator(object):
         ## variable velocity.
         self.discreteFields = {}
         ## The method used to discretize the operator.
+        ## if method is None:
+        ##     method = {}
         self.method = method
         ## Number of points in the ghost layer
         if ghosts is not None:
@@ -102,7 +103,11 @@ class Operator(object):
         """
         return 0, 0
 
-    def setWorks(self, rwork=[], iwork=[]):
+    def setWorks(self, rwork=None, iwork=None):
+        if rwork is None:
+            rwork = []
+        if iwork is None:
+            iwork = []
         self.discreteOperator.setWorks(rwork, iwork)
 
     @abstractmethod
@@ -181,8 +186,3 @@ class EmptyOperator(object):
 
     def apply(self, *args, **kwargs):
         pass
-
-if __name__ == "__main__":
-    print __doc__
-    print "- Provided class : Operator"
-    print Operator.__doc__
diff --git a/HySoP/hysop/operator/differential.py b/HySoP/hysop/operator/differential.py
index f6b6c3c29..012be6791 100644
--- a/HySoP/hysop/operator/differential.py
+++ b/HySoP/hysop/operator/differential.py
@@ -22,9 +22,10 @@ class Differential(Operator):
 
     @debug
     def __init__(self, invar, outvar, resolutions,
-                 method={SpaceDiscretisation: FD_C_4, GhostUpdate: True},
-                 topo=None, ghosts=None):
+                 method=None, topo=None, ghosts=None):
 
+        if method is None:
+            method = {SpaceDiscretisation: FD_C_4, GhostUpdate: True}
         Operator.__init__(self, [invar, outvar], method, topo=topo,
                           ghosts=ghosts)
         ## input variable
@@ -37,7 +38,7 @@ class Differential(Operator):
         if self.method[SpaceDiscretisation] is fftw2py:
             self.resolution = self.resolutions[self.outvar]
             assert self.resolution == self.resolutions[self.invar],\
-                'Poisson error : for fftw, all variables must have\
+                'for fftw method, all variables must have\
                 the same global resolution.'
         self.output = [outvar]
         self.input = [invar]
@@ -45,14 +46,15 @@ class Differential(Operator):
     def discretize(self):
         if self.method[SpaceDiscretisation] is FD_C_4:
             nbGhosts = 2
-#        else:
-#            raise ValueError("Unknown method for space discretization of the\
-#                stretching operator.")
+        elif self.method[SpaceDiscretisation] is fftw2py:
+            nbGhosts = 0
+        else:
+            raise ValueError("Unknown method for space discretization of the\
+                stretching operator.")
 
         # get (or create) the topology
         if self._predefinedTopo is not None:
-            if self.method[SpaceDiscretisation] is FD_C_4:
-                assert (self._predefinedTopo.ghosts >= nbGhosts).all()
+            assert (self._predefinedTopo.ghosts >= nbGhosts).all()
             topo = self._predefinedTopo
             for v in self.variables:
                 self.discreteFields[v] = v.discretize(topo)
@@ -67,42 +69,30 @@ class Differential(Operator):
                 topodims = np.ones((self.domain.dimension))
                 topodims[-1] = main_size
                 #variables discretization
-                if self.ghosts is not None:
-                    raise AttributeError("Ghosts points not yet\
-                    implemented for curl FFT operator.")
                 for v in self.variables:
-                    topo = self.domain.getOrCreateTopology(self.domain.dimension,
-                                                           self.resolution, 
-                                                           topodims,
-                                                           precomputed=True,
-                                                           offset=localoffset,
-                                                           localres=localres,
-                                                           ghosts=self.ghosts)
+                    topo = self.domain.getOrCreateTopology(
+                        self.domain.dimension,
+                        self.resolution, topodims, precomputed=True,
+                        offset=localoffset, localres=localres,
+                        ghosts=self.ghosts)
                     self.discreteFields[v] = v.discretize(topo)
-
             else:
-                # same topo for all variables
-                if self.ghosts is not None:
-                    self.ghosts[self.ghosts < nbGhosts] = nbGhosts
-                else:
-                    self.ghosts = np.ones(self.domain.dimension) * nbGhosts
-
                 # default topology constructor, with global resolution
                 # and domain
+                dim = self.domain.dimension
+                if self.ghosts is None:
+                    self.ghosts = np.asarray([nbGhosts] * dim)
                 for v in self.variables:
-                    topo = self.domain.getOrCreateTopology(self.domain.dimension,
-                                                           self.resolutions[v],
-                                                           ghosts=self.ghosts)
+                    topo = self.domain.getOrCreateTopology(
+                        dim, self.resolutions[v],
+                        ghosts=self.ghosts)
                     self.discreteFields[v] = v.discretize(topo)
 
         assert self.discreteFields[self.invar].topology == \
             self.discreteFields[self.outvar].topology, \
             'Operator not yet implemented for multiple resolutions.'
 
-#    def apply(self, simulation=None):
-#        # computation ...
-#        self.discreteOperator.apply(simulation)
-
+            
 class Curl(Differential):
     """
     Computes \f$ outVar = \nabla inVar \f$
@@ -126,10 +116,6 @@ class Curl(Differential):
         return Curl_d.getWorkLengths()
 
     def setWorks(self, rwork=None, iwork=None):
-        if rwork is None:
-            rwork = []
-        if iwork is None:
-            iwork = []
         self.discreteOperator.setWorks(rwork, iwork)
 
 
diff --git a/HySoP/hysop/operator/discrete/discrete.py b/HySoP/hysop/operator/discrete/discrete.py
index f3f2caa18..4d34a475d 100644
--- a/HySoP/hysop/operator/discrete/discrete.py
+++ b/HySoP/hysop/operator/discrete/discrete.py
@@ -23,7 +23,7 @@ class DiscreteOperator(object):
 
     @debug
     @abstractmethod
-    def __init__(self, variables, method={}):
+    def __init__(self, variables, method=None):
         """
         Create an empty discrete operator.
         """
@@ -37,6 +37,8 @@ class DiscreteOperator(object):
         ## Output variables
         self.output = []
         ## Operator numerical method.
+        if method is None:
+            method = {}
         self.method = method
         self.name = self.__class__.__name__
         ## Object to store computational times of lower level functions
@@ -50,6 +52,9 @@ class DiscreteOperator(object):
         self.requirements = []
         self._apply_timer = ManualFunctionTimer('apply_function')
         self.timer.addFunctionTimer(self._apply_timer)
+        # Local (optional) work arrays. Set with setWorks function
+        self._rwork = None
+        self._iwork = None
 
     @staticmethod
     def getWorkLengths(nb_components=None, domain_dim=None):
@@ -63,11 +68,15 @@ class DiscreteOperator(object):
         """
         return 0, 0
 
-    def setWorks(self, rwork=[], iwork=[]):
+    def setWorks(self, rwork=None, iwork=None):
 
         # Set work arrays for real and int.
         # Warning : no copy! We must have pointer
         # links between work and self.work arrays.
+        if rwork is None:
+            rwork = []
+        if iwork is None:
+            iwork = []
         self._rwork = rwork
         self._iwork = iwork
         self.hasExternalWork = True
diff --git a/HySoP/hysop/operator/discrete/penalization.py b/HySoP/hysop/operator/discrete/penalization.py
index 2fe57de87..5d3db9b7c 100644
--- a/HySoP/hysop/operator/discrete/penalization.py
+++ b/HySoP/hysop/operator/discrete/penalization.py
@@ -29,7 +29,7 @@ class Penalization_d(DiscreteOperator):
         ## Penalization parameter
         self.factor = np.asarray(factor)
         ## Obstacle
-        if  isinstance(obstacles, list):
+        if isinstance(obstacles, list):
             self.obstacles = obstacles
         else:
             self.obstacles = [obstacles]
diff --git a/HySoP/hysop/operator/discrete/stretching.py b/HySoP/hysop/operator/discrete/stretching.py
index 6186cbe02..be7845162 100755
--- a/HySoP/hysop/operator/discrete/stretching.py
+++ b/HySoP/hysop/operator/discrete/stretching.py
@@ -33,18 +33,19 @@ class Stretching(DiscreteOperator):
     __metaclass__ = ABCMeta
 
     @debug
-    def __init__(self, velocity, vorticity,
-                 method={TimeIntegrator: RK3, SpaceDiscretisation: FD_C_4}):
+    def __init__(self, velocity, vorticity, method=None):
         """
         @param velocity : discrete field
         @param vorticity : discrete field
         @param method : numerical method for space/time discretizations
+        Default = {TimeIntegrator: RK3, SpaceDiscretisation: FD_C_4}
         """
         ## velocity discrete field
         self.velocity = velocity
         ## vorticity discrete field
         self.vorticity = vorticity
-
+        if method is None:
+            method = {TimeIntegrator: RK3, SpaceDiscretisation: FD_C_4}
         DiscreteOperator.__init__(self, [self.velocity, self.vorticity],
                                   method=method)
 
@@ -74,13 +75,15 @@ class Stretching(DiscreteOperator):
         self._synchronize = UpdateGhosts(self.velocity.topology,
                                          self.velocity.nbComponents
                                          + self.vorticity.nbComponents)
+        ## Formulation used to compute stretching (default = DivWV)
+        self.formulation = diff_op.DivWV
 
     def setUp(self):
 
         memshape = self.velocity.data[0].shape
         # work list length for time-integrator
         self._work_length_ti = self.method[TimeIntegrator].getWorkLengths(3)
-        # work list length for DivT operation.
+        # work list length for DivWV operation.
         self._work_length_str = self.formulation.getWorkLengths()
         if not self.hasExternalWork:
             self._worklength = self._work_length_str + self._work_length_ti
@@ -107,16 +110,15 @@ class Conservative(Stretching):
     Discretisation of the following problem :
     \f{eqnarray*} \frac{\partial\omega}{\partial t} = \nabla.(\omega:v) \f}
     """
-    def __init__(self, velocity, vorticity,
-                 method={TimeIntegrator: RK3, SpaceDiscretisation: FD_C_4}):
+    def __init__(self, velocity, vorticity, method=None):
         Stretching.__init__(self, velocity, vorticity, method)
-        self.formulation = diff_op.DivT
+        self.formulation = diff_op.DivWV
 
     @staticmethod
     def getWorkLengths(timeIntegrator):
         # Stretching only in 3D
         rwork_length = timeIntegrator.getWorkLengths(3)
-        rwork_length += diff_op.DivT.getWorkLengths()
+        rwork_length += diff_op.DivWV.getWorkLengths()
         return rwork_length, 0
 
     def setUp(self):
@@ -136,7 +138,7 @@ class Conservative(Stretching):
                                         optim=WITH_GUESS)
         self._isUpToDate = True
 
-    def apply(self, simulation):
+    def apply(self, simulation=None):
         if simulation is None:
             raise ValueError("Missing simulation value for computation.")
 
@@ -163,13 +165,12 @@ class Conservative(Stretching):
 
 
 class GradUW(Stretching):
-    """
-    Discretisation of the following problem :
+    """ Discretisation of the following problem:
+    
     \f{eqnarray*} \frac{\partial\omega}{\partial t}=[\nabla(v)][\omega]\f}
     """
 
-    def __init__(self, velocity, vorticity,
-                 method={TimeIntegrator: RK3, SpaceDiscretisation: FD_C_4}):
+    def __init__(self, velocity, vorticity, method=None):
         Stretching.__init__(self, velocity, vorticity, method)
         self.formulation = diff_op.GradVxW
 
@@ -200,7 +201,7 @@ class GradUW(Stretching):
                                         optim=WITH_GUESS)
         self._isUpToDate = True
 
-    def apply(self, simulation):
+    def apply(self, simulation=None):
         # Calling for requirements completion
         DiscreteOperator.apply(self, simulation)
         ctime = MPI.Wtime()
diff --git a/HySoP/hysop/operator/energy_enstrophy.py b/HySoP/hysop/operator/energy_enstrophy.py
index e527e3874..6126c8d14 100644
--- a/HySoP/hysop/operator/energy_enstrophy.py
+++ b/HySoP/hysop/operator/energy_enstrophy.py
@@ -6,7 +6,6 @@ Compute Energy and Enstrophy
 import numpy as np
 from parmepy.constants import debug, XDIR  # , PARMES_MPI_REAL
 from parmepy.operator.monitors.monitoring import Monitoring
-from parmepy.mpi import MPI
 from parmepy.tools.timers import timed_function
 import parmepy.tools.numpywrappers as npw
 
@@ -187,18 +186,8 @@ class Energy_enstrophy(Monitoring):
             ##                                  timeDerivativeEnergy,
             ##                                  nuEnstrophy,
             ##                                  nu_effEnstrophy))
-        #print 'monitor time : ', MPI.Wtime() - time, self._topo.rank
 
     def finalize(self):
         pass 
 #        if self._topo.rank == 0:
 #            self.f.close()
-
-#    def __str__(self):
-#        s = "Energy_enstrophy. "
-#        return s
-
-if __name__ == "__main__":
-    print __doc__
-    print "- Provided class : Energy_enstrophy"
-    print Energy_enstrophy.__doc__
diff --git a/HySoP/hysop/operator/monitors/compute_forces.py b/HySoP/hysop/operator/monitors/compute_forces.py
index 74b9cba1b..28fd33b9a 100644
--- a/HySoP/hysop/operator/monitors/compute_forces.py
+++ b/HySoP/hysop/operator/monitors/compute_forces.py
@@ -3,18 +3,264 @@
 @file compute_forces.py
 Compute forces
 """
-from parmepy.constants import debug, np, PARMES_REAL, ORDER, \
-PARMES_INTEGER, PI, XDIR, YDIR, ZDIR
-from parmepy.mpi.topology import Cartesian
+from parmepy.constants import debug, np, ORDER, \
+    PARMES_INTEGER, PI, XDIR, YDIR, ZDIR
 from parmepy.numerics.updateGhosts import UpdateGhosts
-from parmepy.numerics.differential_operations import DivStressTensor3D
-from parmepy.numerics.finite_differences import FD_C_4, FD_C_2
+from parmepy.numerics.differential_operations import Laplacian,\
+    DivStressTensor3D
+from parmepy.numerics.finite_differences import FD_C_2
 from parmepy.operator.monitors.monitoring import Monitoring
 from parmepy.mpi import main_comm, main_rank, MPI
 from parmepy.tools.timers import timed_function
 import parmepy.tools.numpywrappers as npw
 
 
+class DragAndLift(Monitoring):
+    """
+    Compute drag and lift using Noca's formula.
+    See Noca99 or Plouhmans, 2002, Journal of Computational Physics
+    The present class implements formula (52) of Plouhmans2002.
+    Integral inside the obstacle is not taken into account.
+    """
+    def __init__(self, velocity, vorticity, topo, volumeOfControl,
+                 obstacles=None, frequency=1):
+        """
+        @param velocity field
+        @param vorticity field@
+        @param the topology on which forces will be computed
+        @param a volume of control
+        (parmepy.domain.obstacle.controlBox.ControlBox object)
+        @param frequency : output rate
+        """
+        Monitoring.__init__(self, [velocity, vorticity], topo, frequency)
+        self.velocity = velocity
+        self.vorticity = vorticity
+        self._voc = volumeOfControl
+        self.topo = self._predefinedTopo
+        self._dim = self.domain.dimension
+        msg = 'Force computation undefined for domain of dimension 1.'
+        assert self._dim > 1, msg
+        self._step = np.asarray(self.topo.mesh.space_step)
+        self._dvol = np.prod(self._step)
+        self._work = npw.zeros(self.topo.mesh.resolution)
+        if obstacles is None:
+            obstacles = []
+        ## A  list of obstacles (rigid bodies) in the control box
+        self.obstacles = obstacles
+        # Local buffers, used for time-derivative computation
+        self._previous = npw.zeros(self._dim)
+        self._buffer = npw.zeros(self._dim)
+        ## The computed forces
+        self.force = npw.zeros(self._dim)
+        # Coef in the Noca formula
+        self._coeff = 1. / (self._dim - 1)
+        # function to compute the laplacian of a
+        # scalar field. Default fd scheme. (See Laplacian)
+        self._laplacian = Laplacian(topo)
+        # function used to compute first derivative of
+        # a scalar field in a given direction.
+        # Default = FD_C_2. Todo : set this as an input method value.
+        self._fd_scheme = FD_C_2(topo.mesh.space_step)
+        # Set how reduction will be performed
+        # Default = reduction over all process.
+        # \todo : add param to choose this option
+        self.mpi_sum = self._mpi_allsum
+        # Ghost points synchronizer
+        self._synchronize = None
+        # discrete velocity field
+        self.vd = None
+        # discrete vorticity field
+        self.wd = None
+
+    def _mpi_allsum(self):
+        """
+        Performs MPI reduction (sum result value over all process)
+        All process get the result of the sum.
+        """
+        self.force = self.topo.comm.allreduce(self.force)
+
+    def _mpi_sum(self, root=0):
+        """
+        Performs MPI reduction (sum result value over all process)
+        Result send only to 'root' process.
+        @param root : number of the process which get the result.
+        """
+        self.force = self.topo.comm.reduce(self.force, root=root)
+
+    def discretize(self):
+        """
+        """
+        for v in self.variables:
+            # the discrete fields
+            self.discreteFields[v] = v.discretize(self.topo)
+        self.vd = self.discreteFields[self.velocity]
+        self.wd = self.discreteFields[self.vorticity]
+        self._voc.discretize(self.topo)
+        for obs in self.obstacles:
+            obs.discretize(self.topo)
+        # prepare ghost points synchro for velocity and vorticity used
+        # in fd schemes
+        self._synchronize = UpdateGhosts(self.topo,
+                                         self.vd.nbComponents
+                                         + self.wd.nbComponents)
+
+    def apply(self, simulation=None):
+        """
+        Perform integrals on volume and surfaces of the control box
+        @param parmepy.problem.simulation : object describing
+        simulation parameters
+        """
+        assert simulation is not None,\
+            "Simulation parameter is required for DragAndLift apply."
+        dt = simulation.timeStep
+
+        # Synchro of ghost points is required for fd schemes
+        self._synchronize(self.vd.data + self.wd.data)
+
+        # -- Integration over the volume of control --
+        # -1/(N-1) . d/dt int(x ^ w)
+        if self._voc.isEmpty[self.topo]:
+            self._buffer[...] = 0.0
+        else:
+            self._buffer = self._integrateOnBox(self._buffer)
+            self.force[...] = -1. / dt * self._coeff * (self._buffer
+                                                        - self._previous)
+        # Update previous for next time step ...
+        self._previous[...] = self._buffer[...]
+        # -- Integrals on surfaces --
+        for s in self._voc.upperS:
+            self._buffer = self._integrateOnSurface(s, self._buffer)
+            self.force += self._buffer
+        for s in self._voc.lowerS:
+            self._buffer = self._integrateOnSurface(s, self._buffer)
+            self.force += self._buffer
+
+        # Reduce results over all MPI process in topo
+        self.mpi_sum()
+
+        return self.force
+
+    def _integrateOnSurface(self, surf, res):
+
+        res[...] = 0.0
+
+        if surf.isEmpty[self.topo]:
+            return res
+
+        # Get normal of the surface
+        normal = surf.normal
+        # Get indices of the surface
+        sl = surf.slices[self.topo]
+        coords = surf.coords[self.topo]
+        vdata = self.vd.data
+        wdata = self.wd.data
+        # i1 : normal dir
+        # i2 : other dirs
+        i1 = np.where(normal)[0][0]
+        i2 = np.where(normal == 0)[0]
+        dsurf = np.prod(self.topo.mesh.space_step[i2])
+        # Indices used for cross-product
+        j1 = [YDIR, ZDIR, XDIR]
+        j2 = [ZDIR, XDIR, YDIR]
+
+        # i1 component
+        res[i1] = normal[i1] * 0.5 * np.sum((-vdata[i1][sl] ** 2
+                                             + sum([vdata[j][sl] ** 2
+                                                    for j in i2])))
+        # other components
+        for j in i2:
+            res[j] = -normal[i1] * np.sum(vdata[i1][sl] * vdata[j][sl])
+
+        # Second part of integral on surface ...
+        buff = npw.zeros(vdata[0][sl].shape)
+        for j in i2:
+            buff[...] = vdata[j1[j]][sl] * wdata[j2[j]][sl]\
+                - vdata[j2[j]][sl] * wdata[j1[j]][sl]
+            res[i1] -= self._coeff * normal[i1] * np.sum(coords[j] * buff)
+            res[j] -= self._coeff * normal[i1] * coords[i1] * np.sum(buff)
+
+        # Last part
+        # Update fd schemes so to compute laplacian and other derivatives
+        # only on the surface (i.e. for liste of indices in sl)
+        self._laplacian.fd_scheme.computeIndices(sl)
+        for j in i2:
+            self._work[...] = self._laplacian(vdata[j], self._work)
+            res[i1] += self._coeff * normal[i1] * np.sum(coords[j]
+                                                         * self._work[sl])
+            res[j] -= self._coeff * normal[i1] * coords[i1] * \
+                np.sum(self._work[sl])
+        self._fd_scheme.computeIndices(sl)
+        self._fd_scheme.compute(vdata[i1], i1, self._work)
+        res[i1] += normal[i1] * np.sum(self._work[sl])
+        for j in i2:
+            self._fd_scheme.compute(vdata[i1], j, self._work)
+            res[j] += normal[i1] * np.sum(self._work[sl])
+
+        res *= dsurf
+        return res
+
+    def _integrateOnBox(self, res):
+        assert self._dim == 3, 'Not defined for dim < 3'
+        coords = self._voc.coords[self.topo]
+        wdata = self.wd.data
+        i1 = [YDIR, ZDIR, XDIR]
+        i2 = [ZDIR, XDIR, YDIR]
+        direc = 0
+        sl = self._voc.slices[self.topo]
+        for (i, j) in zip(i1, i2):
+            self._work[sl] = coords[i] * wdata[j][sl]
+            self._work[sl] -= coords[j] * wdata[i][sl]
+            for obs in self.obstacles:
+                for inds in obs.ind[self.topo]:
+                    self._work[inds] = 0.0
+            res[direc] = np.sum(self._work[sl])
+            direc += 1
+        res *= self._dvol
+        return res
+
+    def _integrateOnBox2(self, res):
+        assert self._dim == 3, 'Not defined for dim < 3'
+        coords = self.topo.mesh.coords
+        wdata = self.wd.data
+        i1 = [YDIR, ZDIR, XDIR]
+        i2 = [ZDIR, XDIR, YDIR]
+        direc = 0
+        ind = self._voc.ind[self.topo][0]
+        ilist = np.where(ind)
+        nb = len(ilist[0])
+        ind = self._voc.ind[self.topo][0]
+        for (i, j) in zip(i1, i2):
+            self._work.flat[:nb] = coords[i].flat[ilist[i]] * wdata[j][ind]\
+                - coords[j].flat[ilist[j]] * wdata[i][ind]
+            res[direc] = np.sum(self._work.flat[:nb])
+            direc += 1
+        res *= self._dvol
+        return res
+
+    def _integrateOnBoxLoop(self, res):
+        """
+        Integrate over the control box using python loops.
+        ---> wrong way, seems to be really slower although
+        it costs less in memory.
+        Used only for tests (timing).
+        """
+        assert self._dim == 3, 'Not defined for dim < 3'
+        coords = self.topo.mesh.coords
+        ind = self._voc.ind[self.topo][0]
+        ilist = np.where(ind)
+        wdata = self.wd.data
+        for(ix, iy, iz) in zip(ilist[0], ilist[YDIR], ilist[ZDIR]):
+            res[XDIR] += coords[YDIR][0, iy, 0] * wdata[ZDIR][ix, iy, iz]\
+                - coords[ZDIR][0, 0, iz] * wdata[YDIR][ix, iy, iz]
+            res[YDIR] += coords[ZDIR][0, 0, iz] * wdata[XDIR][ix, iy, iz]\
+                - coords[XDIR][ix, 0, 0] * wdata[ZDIR][ix, iy, iz]
+            res[ZDIR] += coords[XDIR][ix, 0, 0] * wdata[YDIR][ix, iy, iz]\
+                - coords[YDIR][0, iy, 0] * wdata[XDIR][ix, iy, iz]
+
+        res *= self._dvol
+        return res
+
+
 class Forces(Monitoring):
     """
     Compute the forces according the Noca s formula
@@ -264,7 +510,7 @@ class Forces(Monitoring):
             localForce = self.integrateOnBox(localForce, vort, self.chi_box, dt)
 
             # Reduction operation
-            if (main_rank == 0):
+            if main_rank == 0:
                 comm = main_comm
                 comm.Reduce([localForce, MPI.DOUBLE], [force, MPI.DOUBLE], 
                             op=MPI.SUM, root=0)
diff --git a/HySoP/hysop/operator/monitors/printer.py b/HySoP/hysop/operator/monitors/printer.py
index 9411499fb..70b2cbc83 100644
--- a/HySoP/hysop/operator/monitors/printer.py
+++ b/HySoP/hysop/operator/monitors/printer.py
@@ -3,8 +3,10 @@
 
 Classes for handling ouputs.
 """
-from parmepy.constants import np, S_DIR, PARMES_REAL, debug
+from parmepy.constants import np, S_DIR, PARMES_REAL, debug, VTK, HDF5, DATA
 from parmepy.operator.monitors.monitoring import Monitoring
+import parmepy.tools.numpywrappers as npw
+import os
 try:
     import evtk.hl as evtk
 except ImportError as evtk_error:
@@ -13,7 +15,6 @@ try:
     import h5py
 except ImportError as h5py_error:
     h5py = None
-from parmepy.mpi import main_rank, MPI, main_size
 from parmepy.tools.timers import timed_function
 
 
@@ -23,8 +24,8 @@ class Printer(Monitoring):
 
     Performs outputs in VTK images.
     """
-
-    def __init__(self, variables, topo, frequency=0, prefix=None, ext=None):
+    def __init__(self, variables, topo, frequency=0,
+                 prefix=None, formattype=None):
         """
         Create a results printer for given fields, filename
         prefix (relative path) and an output frequency.
@@ -40,20 +41,21 @@ class Printer(Monitoring):
             self.prefix = './out_'
         else:
             self.prefix = prefix
-        ## Extension for filename
-        if ext is None:
-            self.ext = '.vtk'
+
+        ## Default output type
+        if formattype is None:
+            self.formattype = VTK
         else:
-            self.ext = ext
-        if self.ext == '.vtk' and evtk is None:
-            print "You set a printer with '.vtk' extension and evtk module ",
-            print "is not present. You must specify another extension",
-            print " ('.dat' or '.h5')"
+            self.formattype = formattype
+        if self.formattype == VTK and evtk is None:
+            print "You set a printer with VTK as format and evtk module ",
+            print "is not present. You must specify another format for output",
+            print " (DATA or HDF5)"
             raise evtk_error
-        if self.ext == '.h5' and h5py is None:
-            print "You set a printer with '.h5' extension and h5py module ",
+        if self.formattype == HDF5 and h5py is None:
+            print "You set a printer with HDF5 as format and h5py module ",
             print "is not present. You must specify another extension",
-            print " ('.dat' or '.vtk')"
+            print " (DATA or VTK)"
             raise h5py_error
 
         self.input = self.variables
@@ -71,6 +73,15 @@ class Printer(Monitoring):
         self.topo = self._predefinedTopo
         self._xmf = ""
 
+        # Create output dir if required
+        if self.topo.rank == 0:
+            d = os.path.dirname(self.prefix)
+            if not os.path.exists(d):
+                os.makedirs(d)
+        # Force synchro to be sure that all output dirs
+        # have been created.
+        self.topo.comm.barrier()
+
     def setUp(self):
         """
         Print initial state
@@ -146,22 +157,26 @@ class Printer(Monitoring):
             except AttributeError:
                 pass
         # Set output file name
-        filename = self.prefix + str(main_rank)
-        filename += '_' + "iter_{0:03d}".format(ite) + self.ext
+        filename = self.prefix + str(self.topo.rank)
+        filename += '_' + "iter_{0:03d}".format(ite)
 
         ## VTK output \todo: Need fix in 2D, getting an IOError.
-        if self.ext == '.vtk':
-            orig = tuple([self.topo.mesh.origin[i]
-                          for i in xrange(self.topo.mesh.dim)])
+        if self.formattype == VTK:
+            orig = [0.] * 3
+            dim = self.topo.mesh.dim
+            orig[:dim] = [self.topo.mesh.origin[i] for i in xrange(dim)]
+            #orig = tuple(orig)
             coords = self.topo.mesh.coords
             #ind = self.topo.mesh.local_start
             ## orig = tuple([self.topo.mesh.coords[i].flatten()[ind[i]]
             ##               for i in xrange(self.topo.mesh.dim)])
-            spacing = tuple(self.topo.mesh.space_step)
+            spacing = [0] * 3
+            spacing[:dim] = [self.topo.mesh.space_step[i] for i in xrange(dim)]
             evtk.imageToVTK(filename, origin=orig, spacing=spacing,
                             pointData=self._build_vtk_dict())
-        elif self.ext == '.h5':
-            filename = self.prefix + "_iter_{0:03d}".format(ite) + self.ext
+        elif self.formattype == HDF5:
+            filename = self.prefix + str(self.topo.size)
+            filename += "procs_iter_{0:03d}".format(ite) + '.h5'
             # Write the h5 file
             # (force np.float64, ParaView seems to not be able to read float32)
             # Writing compressed hdf5 files (gzip compression seems the best)
@@ -169,40 +184,36 @@ class Printer(Monitoring):
             # mpi rank.
             # TODO: Why gzip compression not working in parallel ??
             # Remark: h5py must be build with --mpi option
-            if main_size == 1:
+            if self.topo.size == 1:
                 f = h5py.File(filename, "w")
                 compression = 'gzip'
             else:
                 f = h5py.File(filename, 'w',
-                              driver='mpio', comm=MPI.COMM_WORLD)
+                              driver='mpio', comm=self.topo.comm)
                 compression = None
+
+            g_start = self.topo.mesh.global_start
+            g_end = self.topo.mesh.global_end + 1
+            sl = tuple([slice(g_start[i], g_end[i])
+                        for i in xrange(self.domain.dimension)])
+            datasetNames = []
             for field in self.variables:
                 df = field.discreteFields[self.topo]
-                if field.isVector:
-                    for d in xrange(df.nbComponents):
+                for d in xrange(df.nbComponents):
                     # creating datasets for the vector field
-                        ds = f.create_dataset(df.name + S_DIR[d],
-                                              self.topo.globalMeshResolution,
-                                              dtype=np.float64,
-                                              compression=compression)
-                        ds[...] = np.asarray(df.data[d], dtype=np.float64)
-                else:
-                    # creating dataset for the scalar field
-                    ds = f.create_dataset(df.name,
-                                          self.topo.globalMeshResolution,
+                    currentName = df.name + S_DIR[d]
+                    datasetNames.append(currentName)
+                    ds = f.create_dataset(currentName,
+                                          self.topo.globalMeshResolution - 1,
                                           dtype=np.float64,
                                           compression=compression)
-                    #ds[...] = np.asarray(df.data[0], dtype=np.float64)
                     # In parallel, each proc must write in the proper part
                     # Of the dataset (of site global resolution)
-                    # assume s is global_start and r the local resolution
-                    # we should write :
-                    s = self.topo.mesh.global_start
-                    e = self.topo.mesh.global_end + 1
-                    ds[s[0]:e[0], s[1]:e[1], s[2]:e[2]] = \
-                        np.asarray(df.data[0], dtype=np.float64)
+                    ds[sl] = np.asarray(df.data[d][self.topo.mesh.iCompute],
+                                        dtype=np.float64)
+
             f.close()
-            if main_rank == 0:
+            if self.topo.rank == 0:
                 # Write the xmf file driving all h5 files.
                 # Writing only one file
                 # We have a temporal list of Grid => a single xmf file
@@ -210,8 +221,8 @@ class Printer(Monitoring):
                 # difficult to compile and to use"
                 #  [Advanced HDF5 & XDMF - Groupe Calcul]
                 self._xmf += _TemporalGridXMF(
-                    self.topo, self.variables, ite, t, filename)
-                f = open(self.prefix + '.xmf', 'w')
+                    self.topo, datasetNames, ite, t, filename)
+                f = open(self.prefix + str(self.topo.size) + 'procs.xmf', 'w')
                 f.write("<?xml version=\"1.0\" ?>\n")
                 f.write("<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\">\n")
                 f.write("<Xdmf Version=\"2.0\">\n")
@@ -224,7 +235,8 @@ class Printer(Monitoring):
                 f.write("</Xdmf>\n")
                 f.close()
 
-        elif self.ext == '.dat':
+        elif self.formattype == DATA:
+            filename += '.dat'
             f = open(filename, 'w')
             shape = self.topo.mesh.resolution
             coords = self.topo.mesh.coords
@@ -291,7 +303,7 @@ def _listFormat(l):
         '(', '').replace(')', '')
 
 
-def _TemporalGridXMF(topo, var, ite, t, filename):
+def _TemporalGridXMF(topo, datasetNames, ite, t, filename):
     g = ""
     if topo.mesh.dim == 2:
         topoType = "2DCORECTMesh"
@@ -315,26 +327,17 @@ def _TemporalGridXMF(topo, var, ite, t, filename):
     g += "     " + _listFormat(topo.mesh.space_step) + "\n"
     g += "     </DataItem>\n"
     g += "    </Geometry>\n"
-    for field in var:
-        for df in [field.discreteFields.values()[0]]:
-            for d in xrange(df.nbComponents):
-                if df.isVector:
-                    g += "    <Attribute Name=\""
-                    g += df.name + S_DIR[d] + "\""
-                else:
-                    g += "    <Attribute Name=\""
-                    g += df.name + "\""
-                g += " AttributeType=\"Scalar\" Center=\"Node\">\n"
-                g += "     <DataItem Dimensions=\""
-                g += _listFormat(topo.globalMeshResolution - 1) + " \""
-                g += " NumberType=\"Float\" Precision=\"4\" Format=\"HDF\""
-                g += " Compression=\"Raw\">\n"  #
-                g += "      " + filename.split('/')[-1]
-                if df.isVector:
-                    g += ":/" + df.name + S_DIR[d]
-                else:
-                    g += ":/" + df.name
-                g += "\n     </DataItem>\n"
-                g += "    </Attribute>\n"
+    for name in datasetNames:
+        g += "    <Attribute Name=\""
+        g += name + "\""
+        g += " AttributeType=\"Scalar\" Center=\"Node\">\n"
+        g += "     <DataItem Dimensions=\""
+        g += _listFormat(topo.globalMeshResolution - 1) + " \""
+        g += " NumberType=\"Float\" Precision=\"8\" Format=\"HDF\""
+        g += " Compression=\"Raw\">\n"  #
+        g += "      " + filename.split('/')[-1]
+        g += ":/" + name
+        g += "\n     </DataItem>\n"
+        g += "    </Attribute>\n"
     g += "   </Grid>\n"
     return g
diff --git a/HySoP/hysop/operator/penalization.py b/HySoP/hysop/operator/penalization.py
index 6dba452e3..d48d02d5d 100644
--- a/HySoP/hysop/operator/penalization.py
+++ b/HySoP/hysop/operator/penalization.py
@@ -33,12 +33,12 @@ class Penalization(Operator):
         @param[in] resolutions :  list of resolutions (one per variable)
 
         """
-        Operator.__init__(self, variables, method, topo=topo, ghosts=ghosts)
+        Operator.__init__(self, variables, method={}, topo=topo, ghosts=ghosts)
 
         ## domain where penalization must be applied.
         ## A parmepy.domain.obstacle .
         ## Obstacle
-        if  isinstance(obstacles, list):
+        if isinstance(obstacles, list):
             self.obstacles = obstacles
         else:
             self.obstacles = [obstacles]
diff --git a/HySoP/hysop/operator/poisson.py b/HySoP/hysop/operator/poisson.py
index af1ab709b..4dfd69689 100644
--- a/HySoP/hysop/operator/poisson.py
+++ b/HySoP/hysop/operator/poisson.py
@@ -105,8 +105,3 @@ class Poisson(Operator):
                                            **self.config)
         self.discreteOperator.setUp()
         self._isUpToDate = True
-
-if __name__ == "__main__":
-    print __doc__
-    print "- Provided class : Poisson"
-    print Poisson.__doc__
diff --git a/HySoP/hysop/operator/stretching.py b/HySoP/hysop/operator/stretching.py
index c35659b51..cffb0527f 100755
--- a/HySoP/hysop/operator/stretching.py
+++ b/HySoP/hysop/operator/stretching.py
@@ -22,8 +22,7 @@ class Stretching(Operator):
 
     @debug
     def __init__(self, velocity, vorticity, resolutions,
-                 method={TimeIntegrator: RK3, Formulation: Conservative,
-                         SpaceDiscretisation: FD_C_4}, topo=None, ghosts=None):
+                 method=None, topo=None, ghosts=None):
         """
         Create a Stretching operator from given
         velocity and vorticity variables.
@@ -48,6 +47,9 @@ class Stretching(Operator):
         ## Grid resolution for each variable (dictionnary)
         self.resolutions = resolutions
         ## Numerical methods for time and space discretization
+        if method is None:
+            method = {TimeIntegrator: RK3, Formulation: Conservative,
+                      SpaceDiscretisation: FD_C_4}
         self.method = method
         assert Formulation in self.method.keys()
         assert SpaceDiscretisation in self.method.keys()
@@ -96,7 +98,11 @@ class Stretching(Operator):
         """
         return formulation.getWorkLengths(timeIntegrator)
 
-    def setWorks(self, rwork=[], iwork=[]):
+    def setWorks(self, rwork=None, iwork=None):
+        if rwork is None:
+            rwork = []
+        if iwork is None:
+            iwork = []
         self.discreteOperator.setWorks(rwork, iwork)
 
     @debug
@@ -114,4 +120,3 @@ class Stretching(Operator):
     def apply(self, simulation=None):
         # computation ...
         self.discreteOperator.apply(simulation)
-
diff --git a/HySoP/hysop/operator/tests/ref_scal2D_PenalSphere_rk_0.dat b/HySoP/hysop/operator/tests/ref_scal2D_PenalSphere_rk_0.dat
new file mode 100644
index 0000000000000000000000000000000000000000..71b8e2779d3eb5e3c83f0ac74d41b70173e13232
GIT binary patch
literal 8335
zcmeHKyH3L}6ir%S@DFs2#Nw%fg)$(~g#j#eKy-{CO^bv?9|_KoU*PZf9tKEg?{Xw7
zNn^Vya)#dC<mS9$M{St+@hV$x@?pGA7sF-tF5jXwjdJ#!F5>m-L#A0QS<>U%F}9j-
zFDHF2JUR7e@-aE#!sj9of!qzvWxiRk<djc)uh+s8elc{$MStqwOeT|upW^uYC=h1%
z7nkhG3%uR&GTwf1)#Xqo;MRd#2W}m>ePHfAFbL(>-5?xe{crmY{q5TyOD;|QjlS9W
z=<4&f>3v<xqe?&dY+9>u51oTbUsIoT=T&@%$^rG%_0e_?d2Rbge(Kx$s(U~2VYQV5
z^48U@dk%Ft|78O9f4As<u66P7`BQId9S}a%x;jnIkshkH{1cyPe=2<9$7;$y;Ub?(
zpXu|cgY=oM$ZOiC3LkZt_D%T6OT9{`?(@WlmGs%F$gkU{3J-a0`9&VoLA^?M+w+8v
z6?NI}Q4jX}`bHe=ZTqa^Lw!{}<x(|=*Q<1N<vQx#x9^G{T{=`b=)!k6Wdi2@2f2J*
OzHWUT|NfG*X!r{!ZabF%

literal 0
HcmV?d00001

diff --git a/HySoP/hysop/operator/tests/ref_scal2D_PenalSphere_rk_0.map b/HySoP/hysop/operator/tests/ref_scal2D_PenalSphere_rk_0.map
new file mode 100644
index 000000000..445b43089
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_scal2D_PenalSphere_rk_0.map
@@ -0,0 +1 @@
+0		 ScalarRef
diff --git a/HySoP/hysop/operator/tests/ref_scal3D_PenalSphere_rk_0.dat b/HySoP/hysop/operator/tests/ref_scal3D_PenalSphere_rk_0.dat
new file mode 100644
index 0000000000000000000000000000000000000000..966c2eb21c1777e837db35013449d8b45ef91fe2
GIT binary patch
literal 262289
zcmeI1&8{6+5ryp}Fz^dxip1oOESL}mNMynQuH*qC!-ynKkdVla&CS5Cz}xXW48Tn&
zMOsNwwRi8I>T^Fw%2svPu3D>h_vdqtKYsP}<yYT+`OTZ>FTZ~M#q%$}{oBJ|KYjiB
z(>Krl{Q8TRUw`%Yhu7b}e0cWioxAVSo_+QCceh`?dw2ij;iIRYJ^b<2dw2Iw@9uwg
z{P*y`w9g;jeDlS#S3kM?=$*g(`u@rN)3<3qy}N(+qo==l@#4iF{{8mPfB*Nk#%I6(
z#V?=z>B*D#o_zP`lMh*s2`~qq1D*rk12_N&-~b$e18@KizyUY_2jBo4fCF#<4!{98
z00-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@KizyUY_2jBo4fCF#<4!{98
z00-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@KizyUY_2jBo4fCF#<4!{98
z00-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@KizyUY_2jBo4fCF#<4!{98
z00-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@KizyUY_2jBo4fCF#<4!{98
z00-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@KizyUY_2jBo4fCF#<4!{98
z00-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@Kiz=7N4!22IR{PTD3fBfNN
zd(X|cu8(yNkIgrJj|to^*EnC!m*<=J8xFt$H~<IW033h=Z~zX#0XP5$-~b$e18@Ki
zzyUb$csQW{-si=)#s$x3bp7gci2P;mUv&;ye^DO(j|p6llhL_m?{ogwJbyd4>^W<m
z?Z1^({dw`N_{GODf$MRbbK~52Zh3Fv033h=Z~zX#0XP5$-~b$e18@KizyUbW&w<DG
zyRYXrx_))eME<h(uQrFQuhzV)%%Rm+Hov<6tdFyKRp-#^>w|wi`}qgI`p0iu_uY#B
zR^OIi)xYU7ougN)Uws|z+v-*Kt@=#&*;VUaE$;PyHJ@3%-aNAV$vY;{|NV>kF+ZLk
z-XAys2jBo4fCF#<4!{9800-az9DoCJIq+D1|MGmcUccJ&*y=BvUwsZ)A7}Hb&LQ$;
z|K1s`QJt&#kNkS~y*X6#@9DJbp1<h2?f2FybMW-p^~k4p-}ZYe{*MXN`Mm0#lXt$+
zb+2BPb5|XnUb`OowfmOut=hLr?CBkSy^3E?$7sJ+&+Aw9y6HZ<R`u7b%bQ1UJ@y?F
zsPp$A=fQdKJn}xm0XP5$-~b$e18@KizyUY_2R`}mvHbqw`E0#@HEy)}%jQ>~L)M4=
z&aZ2Bf3-P8y47bkj_OvKgQqj=Gs^So<NuhzzR#83xkTsLd*AD~>pAfB^!khTdHwKz
z+wVcObJyq1(-%F@+5297)v+t~^x5^SPtCLa-nz@L*Vogj*VU)Rn&-t{|0>r#-Dj^?
z@t4(c*01f)_Pa`+`O@_&`q6nzVBhC%=Fj|j{&@f3033h=Z~zX#0XP5$;6P0d==c0=
zeAV;J`fJUrK98&)`@K`w?EY$Vh;*yZY#i0CG6zp*)@PLG)!*&k(-ryYz3=tgH3v^m
zufJ%Y*RP8INdKt&ItTn86IlQGSI=S8`9^iLZ<X^mU6KCY`&M69{O@`GZSHy=7WbaB
z?z4KjTL0av>e5*KWMi$P@owMRU-fyh=ikfM>)N+kta?Yq)qH04^!m@%C-0cR`p-Yi
zow@Ve@!r7!H~<IW033h=Z~zX#fgk5U_IvQH%rEOV%Dc)uA|IpfSDQ!V%lo}Ax*pZ7
zG6zp*<S%>QtH0a7rz`8LHP7p}YYwWX)lYA}_Fe5?`&-=Wr*)t8({<Gxw6DdjK6>-5
zpRV{nCNTf=x6a+--t%nTXZ>{L{MVen%~|K)dXByM+Sl}~UR8(1y}qJ-R$tfEzO|p~
zu{iRRy>In&RUNxx)unOPr{-xLjd%I=`l`NJvDd%a^-=z-`Rdh^okwqd_8k+L|M`zO
zGiRPN-ZMA=2jBo4fCF#<4!{AA1KIC=n&;K4eqDWxim&clf8X-rXkOLk;OX`JwO;q?
z@1BFFtJPmN-|M$)4yq^X<7}SxUF~1{TYT16cE9!0b=4fSZ&vr{Jk^2!V*>L(hw0o$
zpKG?B>fro2{~gc2J%`!n_H3T@({<JJZ+*1?Szp=x+Sl}~UR8(1Ss$%=R$tfEzO|p~
zvAETrmv8lSRUNxy)u*xNPp|7fjd%Nu`m5eian!f!{a*i7{h;fZ!2HkI%$Yg!objH)
z0XP5$-~b$e190Fv9O(Vtr~9t<-mCsmarHTPKK1vVZ0yymHV02<)@PLG)!#h_Pgmrp
z_rBL}^&C7My}nxadHq+-LHoDrAC<2<@PABT{^tXo`>6A5t)n_Pf6kxhpZ9+@2b4P&
zxALnu-}>n~+qc%&{ucN8iuPH3T~*iWSoK&O`N`h5db+Om8TC_r7H55Gp6%B)(y{x#
z>KzsD_SM?|vHEVE6Zyvk=6`Ns&dizTjQ0!<zyUY_2jBo4*vSF=esrtPq4_(zUS&RK
zeP;Jpn@6Nuf7i*zQQh5h@bqQ<wdQ&KcFjTcwEF4I*S@R$Yk!M-{j~10e!8xjgZ6FJ
zKPq2!;QyGw{Ll3|_fhBBT1R!vK7Z|JajWi8`PN_8)pHo-r?t*%-!@n6+p2q1zUnZ2
zvsdjq%1>(@)iK+*)z`ijx9acBw|=^szE!LC@AcEVPy4R&t9?|5#jSp_`Bq<7)wMcS
zJsM~IX`a^6c(u={pX!?xM>?w9ALYM_pV2yOKBMbW@0h^+??KF&IrE(Hp1}b)00-az
z9DoBE4%p{@U2pCAQGdPhRp!#_&wlS{-B)cMk$&^px<++(&%x8z>NA_~_1iTE)syvc
zHc$Jm_OJadKI<#H-}>pgY7W{rt9x{w>cIaofq9?9b>0?_KG$qL>#ysq=Ro^pb<WOH
zU90?C-`W1W`qp39S^llA_Ve`3zOFh<-|SWU&i3imR~@r`TYc?ov8QwNb?c|A>07mG
z|It3ZdfIoDU+tqhEcSGty>9h&Rb8uN)uVB*Z>^*8YM)U*)fdG!mncu~M>?w9SKYmF
z6+feNcykz4pE}0`=6w%hzRZ{Bi}wo-zyUY_2jBo4=;45U54hFm$NZgLuQH#rKC}C)
z%_GunzjL&%QQh5h@btC%%;tOjcFjTcWPO~?)4r?yYk!N+`pWLNe!8xjgZ9np9-XH;
z@PABT{^xO>`{;Ac)>9p`&tLmloYgxz&-&}SdJdy~W$Uf>ZFAMWS-qq4REO!Cy=vdl
zzOwaH$86tLU;A2|)qOV4`sr%=R;}9qtgq~T?Yqja_E8-cXMMEhS$$nq*XmgHXx!@4
z^0kh}t9?fORNt&P(oyC9DF0RbjMicE8C{op#{}ko4`R;Dndgl63=Y5nH~<IW033h=
z-*dpe-<(~q_FkR!>+P>T2T!;Cp3}N|^{UOm)7k3R%lGQ<o`a{$^KtgN*KhS4JRN8K
zW%qmiSIt5DXLXOxQyus}Ch+~=zq96{b02-K*?OvDmS3%_eY1K;=cx|WIV;w_qkU!T
zsg7BGwXXKf>K&b@I#lPZSo@Clm93{bX8F~++SlT&?z4H;Pgm7BJJ$XdpY`YMxB9xy
z_8HYzJr;XDTCZC@T_atq@2ftGTm4$T?bmg+uhxF5cU0V}V|V^z^}T!U^gkvr|9cQ~
zX3jikyk~F#4!{9800-az9QYvzdcOx=^?mB~8||w;k4V4$t`n_MooaLN^hQ3j_r3bN
z=iuqe`fAPd`t6#7>S^`Uo3DLW``7*!_xfqwXZ>_tH3#k6s()0z>cIaofggVU7(ExA
z`>6A5t)n_d`}FE*-&WnD@>PeYcl34bJIYUM9n~?~r&mw=THLC?H{bf{>gk<zUHkX?
zY2BxNXZefjst$`={bcj4zOIqZ+4ohC#aUlbp4HQJwy)Ous!!v{r`^|m8n^0Nov(UF
z#jAac>euUkRDbFm6PW+Gm^m|No-^JvH~<IW033h=Z~zX#0pmdSd!XfCea-rf@~$$6
z$j7Mr)#efTvfs5@*QjolIe0o-{d)Oc{oVdOU7nA#*S&tb=Ae4c`pfRuzN`Ife~YvF
z&*oV_U02OP`&xX~S9ZVk(-r^61m=I<)VW)nJ-@Sg)=yW?f6e*ZoOS+Z&o#SW`<kBB
ztLm^g>!UT#>g&4NxAs##7PtEI@~xh(s$*BIx-|BDMc1{C#=HD_eO2GA*y~^I`Y8X^
zeD&(d&ZD<J`;H0B|NO_CnKREB?-?9`18@KizyUY_2jBo4_;C(ozxQh1Rp+TbM#a_V
zk@e~Q?w8$PZ4Qxc&u?@+s#|3ap3cZ$_P$qtw|`Gp)>mtu*KgMxJUy+xviV-WD*m%R
z&gSVH@PABT{pU|ThqLFJ-LHMCoWJSH`e@Cw`nuwO&+~6{*YmKr^?bd2tEa2=-@U3X
zjXht{b*-cEZr|Ep^?9-9+soJM+P7M)dPl|8d}j6Z`p?!U@0h^)&p*tax%1rd-oXJl
z00-az9DoCG01m(bIPm{C;QbzOYtN16v-SGb=F;jfn_qnnSs%Cd`)B0a^BY}{>Q;Fk
zJe`rh?0v63{*MXl`@H3yOZHq_^Spk$o&!%$tFLUn*AM@<{T@_1cYWSGec9(a%Jb^0
zj$N^*&#p)Qy!*D_TX*^O`g%I`daqxr<Mpp{-E@v#tN6?680A0P?<#rTT&|)YoyP?B
zeeP!d%%A6v_YV%h0XP5$-~b$e18@Kiz=11p;IaJv;rVR6ezo_y)n7Kh`W&)8&gNB}
zL*&c*-CwUqb*|=L{bgfs4%Pg7I_-Maujbi)Z>=&1PoG^^U)k99dn^8r3Do)A@10Zj
zyfx3OSLNJQho{%Bt3NNcd~emhRbo%?=<8MddOAk?wR&E^s@F~T*|n;_UR~Zidh4<8
zm_VJs2RRSUgXfX=5e~osH~<IW033h=Z~zX#0XP5$9y<pf+wWhV-{|_)pOeU6_Wsr8
zkoDD?ca=G``pV{4_n-A~Hm~X&T7B8Sf41(s75}ZiEx)RN(`7nGuU5bMI@-6@tL|I%
zneMZz*1cNX>;Gy#vwFRGWcQPIOn~pvJO?}nya#Xq4!{9800-az9DoCG01m(bH~<IW
z033h=bvWSt?yJ{t?Y&ih+4$;n$oh=(t~!UvSN1;sj|tS_SoS=!=Nsi^`&G+}d}Z%f
z^VO;+>r?Yu`&7?Ye_mYO2l|c)T#v_`8|TJz%X<q4-~b$e18@KizyUY_2jBo4fCF#<
z4!{9800-az9DoC}Ibi?(HM>u>`sQ2LYC31@G5@psQ2&?!JmWdwIp95j18@KizyUY_
z2jBo4fCF#<4!{9800-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@KizyUY_
z2jBo4fCF#<4!{9800-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@KizyUY_
z2jBo4fCF#<4!{9800-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@KizyUY_
z2jBo4fCF#<4!{9800-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@KizyUY_
z2jBo4fCF#<4!{9800-az9DoCG01m(bH~<IW033h=Z~zX#0XP5$-~b$e18@KizyUY_
P2jIY!Iq>k=r_cWblI>f`

literal 0
HcmV?d00001

diff --git a/HySoP/hysop/operator/tests/ref_scal3D_PenalSphere_rk_0.map b/HySoP/hysop/operator/tests/ref_scal3D_PenalSphere_rk_0.map
new file mode 100644
index 000000000..445b43089
--- /dev/null
+++ b/HySoP/hysop/operator/tests/ref_scal3D_PenalSphere_rk_0.map
@@ -0,0 +1 @@
+0		 ScalarRef
diff --git a/HySoP/hysop/operator/tests/test_analytic.py b/HySoP/hysop/operator/tests/test_analytic.py
index b4de312d1..b71cacc92 100644
--- a/HySoP/hysop/operator/tests/test_analytic.py
+++ b/HySoP/hysop/operator/tests/test_analytic.py
@@ -93,7 +93,7 @@ def test_analytical_field_1():
     assert np.allclose(cafd[0], refd.data[0])
     assert id(cafd.data[0]) == ids
     time = 3.0
-    caf.initialize(time)
+    caf.initialize(currentTime=time)
     refd.data = func_scal_1(refd.data, *(coords + (time,)))
     assert np.allclose(cafd[0], refd.data[0])
     assert id(cafd.data[0]) == ids
@@ -116,7 +116,7 @@ def test_analytical_field_2():
     assert np.allclose(cafd[0], refd.data[0])
     assert id(cafd.data[0]) == ids
     time = 3.0
-    caf.initialize(time)
+    caf.initialize(currentTime=time)
     refd.data = func_scal_1(refd.data, *(coords + (time,)))
     assert np.allclose(cafd[0], refd.data[0])
     assert id(cafd.data[0]) == ids
@@ -143,7 +143,7 @@ def test_analytical_field_3():
         assert np.allclose(cafd[i], refd.data[i])
         assert id(cafd.data[i]) == ids[i]
     time = 3.0
-    caf.initialize(time)
+    caf.initialize(currentTime=time)
     refd.data = func_vec_1(refd.data, *(coords + (time,)))
     for i in xrange(caf.nbComponents):
         assert np.allclose(cafd[i], refd.data[i])
@@ -171,7 +171,7 @@ def test_analytical_field_4():
         assert np.allclose(cafd[i], refd.data[i])
         assert id(cafd.data[i]) == ids[i]
     time = 3.0
-    caf.initialize(time)
+    caf.initialize(currentTime=time)
     refd.data = func_vec_1(refd.data, *(coords + (time,)))
     for i in xrange(caf.nbComponents):
         assert np.allclose(cafd[i], refd.data[i])
@@ -201,7 +201,7 @@ def test_analytical_field_5():
         assert np.allclose(cafd[i], refd.data[i])
         assert id(cafd.data[i]) == ids[i]
     time = 3.0
-    caf.initialize(time)
+    caf.initialize(currentTime=time)
     refd.data = func_vec_3(refd.data, *(coords + (time, theta)))
     for i in xrange(caf.nbComponents):
         assert np.allclose(cafd[i], refd.data[i])
@@ -231,7 +231,7 @@ def test_analytical_field_6():
         assert np.allclose(cafd[i], refd.data[i])
         assert id(cafd.data[i]) == ids[i]
     time = 3.0
-    caf.initialize(time)
+    caf.initialize(currentTime=time)
     refd.data = func_vec_3(refd.data, *(coords + (time, theta)))
     for i in xrange(caf.nbComponents):
         assert np.allclose(cafd[i], refd.data[i])
@@ -263,7 +263,7 @@ def test_analytical_field_7():
         assert np.allclose(cafd[i], refd.data[i])
         assert id(cafd.data[i]) == ids[i]
     time = 3.0
-    caf.initialize(time)
+    caf.initialize(currentTime=time)
     refd.data = func_vec_5(refd.data, *(coords + (time, theta)))
     for i in xrange(caf.nbComponents):
         assert np.allclose(cafd[i], refd.data[i])
@@ -294,7 +294,7 @@ def test_analytical_field_8():
         assert np.allclose(cafd[i], refd.data[i])
         assert id(cafd.data[i]) == ids[i]
     time = 3.0
-    caf.initialize(time)
+    caf.initialize(currentTime=time)
     refd.data = func_vec_6(refd.data, *(coords + (time, theta)))
     for i in xrange(caf.nbComponents):
         assert np.allclose(cafd[i], refd.data[i])
diff --git a/HySoP/hysop/operator/tests/test_penalization.py b/HySoP/hysop/operator/tests/test_penalization.py
index 04421545e..239821a08 100644
--- a/HySoP/hysop/operator/tests/test_penalization.py
+++ b/HySoP/hysop/operator/tests/test_penalization.py
@@ -1,10 +1,10 @@
 # -*- coding: utf-8 -*-
 import parmepy as pp
-from parmepy.domain.obstacle.cylinder2d import SemiCylinder2D
+from parmepy.domain.obstacle.disk import HalfDisk, Disk
 from parmepy.operator.penalization import Penalization
 from parmepy.fields.continuous import Field
 from parmepy.problem.simulation import Simulation
-from parmepy.domain.obstacle.plates import Plates
+from parmepy.domain.obstacle.planes import PlaneBoundaries
 
 
 def computeVel(x, y, ):
@@ -16,15 +16,18 @@ def computeScal(x, y, t):
 
 
 def testPenalScal2D():
-
+    """
+    Penalization in 2D, obstacles = semi-cylinder (disk indeed ...)
+    and a plate, field=scalar.
+    """
     nb = 33
     Lx = Ly = 2
     dom = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
     resol = [nb, nb]
     scal = Field(domain=dom, name='Scalar')
-    hcyl = SemiCylinder2D(dom, position=[0., 0.], radius=0.5,
-                          porousLayers=[0.13])
-    plates = Plates(dom, normal_dir=1, epsilon=0.1)
+    hcyl = HalfDisk(dom, position=[0., 0.], radius=0.5,
+                    porousLayers=[0.13])
+    plates = PlaneBoundaries(dom, normal_dir=1, thickness=0.1)
     penal = Penalization(scal, [hcyl, plates], coeff=[1e6, 10],
                          resolutions={scal: resol})
     penal.discretize()
@@ -40,6 +43,10 @@ def testPenalScal2D():
 
 
 def testPenalScal3D():
+    """
+    Penalization in 3D, obstacles = hemi-sphere and plates.
+    and a plate.
+    """
     from parmepy.domain.obstacle.sphere import HemiSphere
 
     nb = 33
@@ -50,7 +57,7 @@ def testPenalScal3D():
     hsphere = HemiSphere(dom, position=[0., 0., 0.],
                          radius=0.5, porousLayers=[0.13])
 
-    plates = Plates(dom, normal_dir=0, epsilon=0.1)
+    plates = PlaneBoundaries(dom, normal_dir=0, thickness=0.1)
     penal = Penalization(scal, [hsphere, plates], coeff=[1e6, 10],
                          resolutions={scal: resol})
     penal.discretize()
@@ -65,6 +72,60 @@ def testPenalScal3D():
     assert scalRef.norm() == scal.norm()
 
 
+def testPenalScal2D_2():
+    """
+    Penalization in 2D, obstacles = cylinder and plate.
+    Field=scalar.
+    """
+    nb = 33
+    Lx = Ly = 2
+    dom = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
+    resol = [nb, nb]
+    scal = Field(domain=dom, name='Scalar')
+    cyl = Disk(dom, position=[0., 0.], radius=0.5,
+               porousLayers=[0.13])
+    penal = Penalization(scal, [cyl], coeff=[1e6, 10],
+                         resolutions={scal: resol})
+    penal.discretize()
+    penal.setUp()
+    topo = scal.discreteFields.keys()[0]
+    scd = scal.discreteFields[topo]
+    scd[0] = 128
+    penal.apply(Simulation())
+    scalRef = Field(domain=dom, name='ScalarRef')
+    scalRef.discretize(topo)
+    scalRef.load('ref_scal2D_PenalSphere', fieldname='ScalarRef')
+    assert scalRef.norm() == scal.norm()
+
+
+def testPenalScal3D_2():
+    """
+    Penalization in 3D, obstacles = Sphere.
+    """
+    from parmepy.domain.obstacle.sphere import Sphere
+
+    nb = 33
+    Lx = Ly = Lz = 2
+    dom = pp.Box(dimension=3, length=[Lx, Ly, Lz], origin=[-1., -1., -1.])
+    resol = [nb, nb, nb]
+    scal = Field(domain=dom, name='Scalar')
+    sphere = Sphere(dom, position=[0., 0., 0.],
+                    radius=0.5, porousLayers=[0.13])
+
+    penal = Penalization(scal, [sphere], coeff=[1e6, 10],
+                         resolutions={scal: resol})
+    penal.discretize()
+    penal.setUp()
+    topo = scal.discreteFields.keys()[0]
+    scd = scal.discreteFields[topo]
+    scd[0] = 128
+    penal.apply(Simulation())
+    scalRef = Field(domain=dom, name='ScalarRef')
+    scd = scalRef.discretize(topo)
+    scalRef.load('ref_scal3D_PenalSphere', fieldname='ScalarRef')
+    assert scalRef.norm() == scal.norm()
+
+
 def testPenalVec2D():
 
     nb = 33
@@ -72,9 +133,9 @@ def testPenalVec2D():
     dom = pp.Box(dimension=2, length=[Lx, Ly], origin=[-1., -1.])
     resol = [nb, nb]
     velo = Field(domain=dom, name='Velo', isVector=True)
-    hcyl = SemiCylinder2D(dom, position=[0., 0.], radius=0.5,
-                          porousLayers=[0.13])
-    plates = Plates(dom, normal_dir=1, epsilon=0.1)
+    hcyl = HalfDisk(dom, position=[0., 0.], radius=0.5,
+                    porousLayers=[0.13])
+    plates = PlaneBoundaries(dom, normal_dir=1, thickness=0.1)
     penal = Penalization(velo, [hcyl, plates], coeff=[1e6, 10],
                          resolutions={velo: resol})
     penal.discretize()
@@ -101,7 +162,7 @@ def testPenalVec3D():
     hsphere = HemiSphere(dom, position=[0., 0., 0.],
                          radius=0.5, porousLayers=[0.13])
 
-    plates = Plates(dom, normal_dir=0, epsilon=0.1)
+    plates = PlaneBoundaries(dom, normal_dir=0, thickness=0.1)
     penal = Penalization(velo, [hsphere, plates], coeff=[1e6, 10],
                          resolutions={velo: resol})
     penal.discretize()
diff --git a/HySoP/hysop/tools/numpywrappers.py b/HySoP/hysop/tools/numpywrappers.py
index 7a65f9680..11d8011ba 100644
--- a/HySoP/hysop/tools/numpywrappers.py
+++ b/HySoP/hysop/tools/numpywrappers.py
@@ -4,7 +4,7 @@
 
 Tools to build numpy arrays based on parmepy setup (float type ...)
 """
-from parmepy.constants import PARMES_REAL, ORDER
+from parmepy.constants import PARMES_REAL, ORDER, PARMES_INTEGER, PARMES_INDEX
 import numpy as np
 
 
@@ -45,3 +45,58 @@ def copy(tab):
     @return a copy of tab with the same ordering type (fortran or C) as tab.
     """
     return tab.copy(order='A')
+
+
+def asarray(tab):
+    """
+    @param tab : a numpy array
+    @return a numpy array in the Parmes
+    predefined ordering type (fortran or C).
+    """
+    return np.asarray(tab, order=ORDER, dtype=tab.dtype)
+
+
+def realarray(tab):
+    """
+    @param tab : a numpy array
+    @return a numpy array in the Parmes of real,
+    precision set by PARMES_REAL,
+    predefined ordering type (fortran or C)
+    """
+    return np.asarray(tab, order=ORDER, dtype=PARMES_REAL)
+
+
+def indexarray(tab):
+    """
+    return an array of int, int type define by PARMES_INDEX
+    """
+    return np.asarray(tab, order=ORDER, dtype=PARMES_INDEX)
+
+
+def integerarray(tab):
+    """
+    return an array of int, int type define by PARMES_INDEX
+    """
+    return np.asarray(tab, order=ORDER, dtype=PARMES_INTEGER)
+
+
+def abs(tab):
+    """
+    @param tab : a numpy array
+    @return a numpy array in the Parmes
+    predefined ordering type (fortran or C), equal to abs(tab)
+    """
+    return np.abs(tab, order=ORDER, dtype=tab.dtype)
+
+
+def sum(tab, dtype=PARMES_REAL):
+    """
+    """
+    return np.sum(tab, dtype=dtype)
+
+
+def prod(tab, dtype=PARMES_REAL):
+    """
+    """
+    return np.prod(tab, dtype=dtype)
+
diff --git a/HySoP/src/client_data.f90 b/HySoP/src/client_data.f90
index 407862880..1c6944ad3 100755
--- a/HySoP/src/client_data.f90
+++ b/HySoP/src/client_data.f90
@@ -26,7 +26,7 @@ module client_data
   !> to activate (or not) screen output
   logical,parameter :: verbose = .True.
   !> i (sqrt(-1) ...)
-  complex(C_DOUBLE_COMPLEX),parameter :: Icmplx = cmplx(0._mk,1._mk)
+  complex(C_DOUBLE_COMPLEX), parameter :: Icmplx = cmplx(0._mk,1._mk, kind=mk)
   !> tolerance used to compute error
   real(mk), parameter :: tolerance = 1e-12
   
-- 
GitLab