diff --git a/hysop/backend/device/opencl/opencl_tools.py b/hysop/backend/device/opencl/opencl_tools.py
index 2be6854855f6c8be265a429d21eaf5ef8b380552..155e8609a0acb7fcf8bb8aa62032631500854c0a 100644
--- a/hysop/backend/device/opencl/opencl_tools.py
+++ b/hysop/backend/device/opencl/opencl_tools.py
@@ -15,16 +15,16 @@
 from hysop.deps import sys, os, re, itertools, hashlib, pickle, gzip, hashlib
 
 from hysop import __VERBOSE__, __KERNEL_DEBUG__, \
-                  __DEFAULT_PLATFORM_ID__, __DEFAULT_DEVICE_ID__
+    __DEFAULT_PLATFORM_ID__, __DEFAULT_DEVICE_ID__
 from hysop import vprint
 
 from hysop.backend.device.opencl import cl, __OPENCL_PROFILE__
-from hysop.constants        import np, Precision, DeviceType, HYSOP_REAL
-from hysop.core.mpi         import MPI, main_comm
+from hysop.constants import np, Precision, DeviceType, HYSOP_REAL
+from hysop.core.mpi import MPI
 from hysop.tools.parameters import MPIParams
-from hysop.tools.io_utils   import IO
+from hysop.tools.io_utils import IO
 from hysop.tools.decorators import static_vars
-from hysop.tools.types      import check_instance, to_tuple, first_not_None
+from hysop.tools.types import check_instance, to_tuple, first_not_None
 
 
 class KernelError(Exception):
@@ -32,7 +32,7 @@ class KernelError(Exception):
     Custom exception for kernel errors.
     """
     def __init__(msg, err):
-        super(KernelError,self).__init__(msg)
+        super(KernelError, self).__init__(msg)
         self.msg = msg
         self.err = err
 
@@ -91,12 +91,12 @@ def convert_device_type(device_type):
     check_instance(device_type, DeviceType)
 
     conversion = {
-            DeviceType.ALL:         cl.device_type.ALL,
-            DeviceType.ACCELERATOR: cl.device_type.ACCELERATOR,
-            DeviceType.CPU:         cl.device_type.CPU,
-            DeviceType.GPU:         cl.device_type.GPU,
-            #DeviceType.CUSTOM:     cl.device_type.CUSTOM,
-            DeviceType.DEFAULT:     cl.device_type.DEFAULT,
+        DeviceType.ALL:         cl.device_type.ALL,
+        DeviceType.ACCELERATOR: cl.device_type.ACCELERATOR,
+        DeviceType.CPU:         cl.device_type.CPU,
+        DeviceType.GPU:         cl.device_type.GPU,
+        # DeviceType.CUSTOM:     cl.device_type.CUSTOM,
+        DeviceType.DEFAULT:     cl.device_type.DEFAULT,
     }
 
     if device_type not in conversion.keys():
@@ -105,6 +105,7 @@ def convert_device_type(device_type):
 
     return conversion[device_type]
 
+
 def convert_precision(precision):
     """
     Converts a hysop precision to corresponding numpy dtype.
@@ -114,20 +115,20 @@ def convert_precision(precision):
     check_instance(precision, Precision)
 
     if precision == Precision.SAME:
-        msg='Cannot convert Precision.SAME to numpy dtype.'
+        msg = 'Cannot convert Precision.SAME to numpy dtype.'
         raise ValueError(msg)
     if precision == Precision.QUAD:
-        msg= 'Numpy does not support the 128-bit IEEE quad precision data type.'
+        msg = 'Numpy does not support the 128-bit IEEE quad precision data type.'
         raise RuntimeError(msg)
 
-    #TODO when long double will be supported check if device has np.float96 or np.float128 long doubles
+    # TODO when long double will be supported check if device has np.float96 or np.float128 long doubles
     # (ie padded to 3*32bits or 2*64bits)
     conversion = {
-            Precision.DEFAULT:     HYSOP_REAL,
-            Precision.LONG_DOUBLE: np.longdouble,
-            Precision.DOUBLE:      np.float64,
-            Precision.FLOAT:       np.float32,
-            Precision.HALF:        np.float16,
+        Precision.DEFAULT:     HYSOP_REAL,
+        Precision.LONG_DOUBLE: np.longdouble,
+        Precision.DOUBLE:      np.float64,
+        Precision.FLOAT:       np.float32,
+        Precision.HALF:        np.float16,
     }
 
     if precision not in conversion.keys():
@@ -136,19 +137,21 @@ def convert_precision(precision):
 
     return conversion[precision]
 
+
 @static_vars(opencl_environments=dict())
-def get_device_number(platform_id = None):
+def get_device_number(platform_id=None):
     platform_id = first_not_None(platform_id, __DEFAULT_PLATFORM_ID__)
     platform = get_platform(platform_id, strict=True)
     return len(platform.get_devices())
 
+
 @static_vars(opencl_environments=dict())
 def get_or_create_opencl_env(mpi_params,
-        platform_id = None,
-        device_id   = None,
-        device_type = None,
-        gl_sharing=False,
-        **kargs):
+                             platform_id=None,
+                             device_id=None,
+                             device_type=None,
+                             gl_sharing=False,
+                             **kargs):
     """
     Create or an OpenClEnvironment from given parameters if it does not already exists.
     All environements are kept alive (cached) in a dictionary local to this
@@ -156,7 +159,7 @@ def get_or_create_opencl_env(mpi_params,
     """
 
     platform_id = first_not_None(platform_id, __DEFAULT_PLATFORM_ID__)
-    device_id   = first_not_None(device_id,   __DEFAULT_DEVICE_ID__)
+    device_id = first_not_None(device_id,   __DEFAULT_DEVICE_ID__)
     device_type = first_not_None(device_type, DeviceType.ALL)
 
     check_instance(mpi_params, MPIParams)
@@ -173,8 +176,8 @@ def get_or_create_opencl_env(mpi_params,
 
     from hysop.backend.device.opencl.opencl_env import OpenClEnvironment
     env = OpenClEnvironment(platform_id=platform_id, device_id=device_id,
-            device_type=device_type, gl_sharing=gl_sharing, mpi_params=mpi_params,
-            **kargs)
+                            device_type=device_type, gl_sharing=gl_sharing, mpi_params=mpi_params,
+                            **kargs)
 
     opencl_envs[key] = env
 
@@ -197,7 +200,6 @@ def create_queue(ctx, props=None):
     return queue
 
 
-
 def get_work_items(resolution, vector_width=1):
     """Set the optimal work-item number and OpenCL space index.
 
@@ -254,7 +256,6 @@ def get_work_items(resolution, vector_width=1):
     return workItemNumber, gwi, lwi
 
 
-
 def get_platform(platform_id, strict):
     """Returns an OpenCL platform
     platform_id : int
@@ -270,17 +271,18 @@ def get_platform(platform_id, strict):
     except IndexError:
         plist = cl.get_platforms()
         platform = plist[0]
-        msg= ' Incorrect platform_id : {}'.format(platform_id)
-        msg+=' Only {} are available.'.format(len(plist))
+        msg = ' Incorrect platform_id : {}'.format(platform_id)
+        msg += ' Only {} are available.'.format(len(plist))
         if strict:
             msg += '\n FATAL ERROR: Strict platform_id condition violated.\n'
             print(msg)
             raise
         else:
-            msg+=' --> getting default platform {}.'.format(platform.name)
+            msg += ' --> getting default platform {}.'.format(platform.name)
             vprint(msg)
     return platform
 
+
 def get_device(platform, device_id, device_type, strict):
     """Returns an OpenCL device
 
@@ -306,7 +308,7 @@ def get_device(platform, device_id, device_type, strict):
         else:
             device = platform.get_devices()[device_id]
     except IndexError:
-        msg  = '\nIncorrect device_id {}'.format(device_id)
+        msg = '\nIncorrect device_id {}'.format(device_id)
         msg += '\nThere is only {} devices available.'.format(len(platform.get_devices()))
         if strict:
             msg += '\nFATAL ERROR: Strict device_id condition violated.\n'
@@ -317,7 +319,7 @@ def get_device(platform, device_id, device_type, strict):
             vprint(msg)
             device = platform.get_devices()[0]
     except:
-        msg  = '\nCould not get a device of type {}'.format(device_type)
+        msg = '\nCould not get a device of type {}'.format(device_type)
         if strict:
             msg += '\nFATAL ERROR: Strict device_type condition violated.\n'
             vprint(msg)
@@ -371,8 +373,6 @@ def get_context(devices, gl_sharing):
     return ctx
 
 
-
-
 def parse_opencl_file(f, n=8, nb_remesh_components=1):
     """Parse a file containing OpenCL sources.
 
diff --git a/hysop/domain/domain.py b/hysop/domain/domain.py
index 5daa7ec03588215046fafdc2ba0eb60383de6799..faf00b76e4a449f26aa6e9e6ca24e1111b32a8c2 100644
--- a/hysop/domain/domain.py
+++ b/hysop/domain/domain.py
@@ -14,6 +14,7 @@ from hysop.tools.numpywrappers import npw
 from hysop.symbolic.frame import SymbolicFrame
 from hysop.deps import hashlib, np
 
+
 class DomainView(TaggedObjectView):
     """Abstract base class for views on domains. """
     __metaclass__ = ABCMeta
@@ -39,6 +40,7 @@ class DomainView(TaggedObjectView):
     def _get_domain(self):
         """Return the domain on which the view is on."""
         return self._domain
+
     def _get_topology_state(self):
         """Return the topology state altering this domain view."""
         return self._topology_state
@@ -46,12 +48,15 @@ class DomainView(TaggedObjectView):
     def _get_dim(self):
         """Return the dimension of the domain."""
         return self._domain._dim
+
     def _get_parent_comm(self):
         """Return the parent communicator used to create this domain."""
         return self._domain._parent_comm
+
     def _get_parent_rank(self):
         """Return the rank of the process in the parent communicator."""
         return self._domain._parent_rank
+
     def _get_task_comm(self):
         """
         Return the communicator that owns the current process.
@@ -59,9 +64,11 @@ class DomainView(TaggedObjectView):
         the parent communicator by colors (proc_tasks).
         """
         return self._domain._task_comm
+
     def _get_task_rank(self):
         """Return the rank of the process in the task communicator."""
         return self._domain._task_rank
+
     def _get_machine_comm(self):
         """
         Return the communicator that owns the current process.
@@ -69,18 +76,22 @@ class DomainView(TaggedObjectView):
         the parent communicator by machine name.
         """
         return self._domain._machine_comm
+
     def _get_machine_rank(self):
         """Return the rank of the process in the machine communicator."""
         return self._domain._machine_rank
+
     def _get_proc_tasks(self):
         """Return mapping between mpi process rank and task identifier."""
         return self._domain._proc_tasks
+
     def _get_registered_topologies(self):
         """
         Return the dictionary of all topologies already built on this domain,
         with topology ids as keys and :class:`~hysop.topology.topology.Topology` as values.
         """
         return self._domain._registered_topologies
+
     def _get_frame(self):
         """Get symbolic frame associated to this domain."""
         return self._domain._frame
@@ -88,7 +99,7 @@ class DomainView(TaggedObjectView):
     def task_on_proc(self, parent_rank):
         """Get task identifier for a given mpi process (parent communicator rank)."""
         if parent_rank >= len(self._domain._proc_tasks):
-            msg='Unknown rank {} in parent communicator.'.format(parent_rank)
+            msg = 'Unknown rank {} in parent communicator.'.format(parent_rank)
             raise ValueError(msg)
         return self._domain._proc_tasks[parent_rank]
 
@@ -100,11 +111,11 @@ class DomainView(TaggedObjectView):
         """Test if the current process corresponds to param task."""
         if isinstance(params, MPIParams):
             task_id = params.task_id
-        elif isinstance(params, (int,long,npw.integer)):
+        elif isinstance(params, (int, long, npw.integer)):
             task_id = params
         else:
-            msg='Could not extract task_id from type {}.'
-            msg=msg.format(type(params))
+            msg = 'Could not extract task_id from type {}.'
+            msg = msg.format(type(params))
             raise TypeError(msg)
         return (self.current_task() == task_id)
 
@@ -127,14 +138,14 @@ class DomainView(TaggedObjectView):
     def __eq__(self, other):
         if not isinstance(other, DomainView):
             return NotImplemented
-        eq =  (self._domain is other._domain)
+        eq = (self._domain is other._domain)
         eq &= (self._topology_state == other._topology_state)
         return eq
 
     def __ne__(self, other):
         if not isinstance(other, DomainView):
             return NotImplemented
-        eq =  (self._domain is other._domain)
+        eq = (self._domain is other._domain)
         eq &= (self._topology_state == other._topology_state)
         return not eq
 
@@ -211,7 +222,7 @@ class Domain(RegisteredObject):
 
         dim = int(dim)
         parent_comm = parent_comm or main_comm
-        proc_tasks = proc_tasks or [HYSOP_DEFAULT_TASK_ID,] * parent_comm.Get_size()
+        proc_tasks = proc_tasks or [HYSOP_DEFAULT_TASK_ID, ] * parent_comm.Get_size()
         proc_tasks = npw.asdimarray(proc_tasks)
         assert proc_tasks.size == parent_comm.Get_size()
 
@@ -222,8 +233,8 @@ class Domain(RegisteredObject):
         check_instance(parent_comm, MPI.Intracomm)
         check_instance(proc_tasks, npw.ndarray, dtype=HYSOP_DIM)
 
-        obj = super(Domain,cls).__new__(cls, dim=dim, parent_comm=parent_comm,
-                proc_tasks=proc_tasks, tag_prefix='d', **kwds)
+        obj = super(Domain, cls).__new__(cls, dim=dim, parent_comm=parent_comm,
+                                         proc_tasks=proc_tasks, tag_prefix='d', **kwds)
 
         if not obj.obj_initialized:
             obj.__initialize(dim, parent_comm, proc_tasks)
@@ -235,7 +246,7 @@ class Domain(RegisteredObject):
         parent_rank = parent_comm.Get_rank()
         parent_size = parent_comm.Get_size()
 
-        if proc_tasks is main_comm:
+        if len(set(proc_tasks)) == 1:
             task_comm = parent_comm.Dup()
         else:
             assert len(proc_tasks) == parent_size
@@ -247,7 +258,8 @@ class Domain(RegisteredObject):
         # Build a per-machine communicator in order to get a rank on local machines
         # Split accoring to machine name hashed and converted to integer (strings generally differs only from a single character)
         machine_comm = parent_comm.Split(
-            color=np.int32(int(hashlib.md5(MPI.Get_processor_name()).hexdigest(),16)%np.iinfo(np.int32).max),
+            color=np.int32(int(hashlib.md5(MPI.Get_processor_name()).hexdigest(), 16) %
+                           np.iinfo(np.int32).max),
             key=parent_rank)
         machine_rank = machine_comm.Get_rank()
 
diff --git a/hysop/fields/field_requirements.py b/hysop/fields/field_requirements.py
index 6752c5d80cfbab96a57bf0067af23c2c6df91340..fa71bb10a76c4d08d6272a709a4479445201d870 100644
--- a/hysop/fields/field_requirements.py
+++ b/hysop/fields/field_requirements.py
@@ -15,7 +15,8 @@ from hysop.fields.discrete_field import DiscreteScalarField
 #   0: no debug logs
 #   1: topo creation summary for each field
 #   2: topo creation details for all discrete field requirements
-TOPO_CREATION_DEBUG_LEVEL=0
+TOPO_CREATION_DEBUG_LEVEL = 0
+
 
 def gprint(*args, **kwds):
     level = kwds.pop('level', 2)
@@ -33,33 +34,33 @@ class DiscreteFieldRequirements(object):
     _registered_requirements = set()
 
     def __init__(self, operator, variables, field,
-            min_ghosts=None,
-            max_ghosts=None,
-            can_split=None,
-            memory_order=None,
-            axes=None,
-            _register=True,
-            **kwds):
+                 min_ghosts=None,
+                 max_ghosts=None,
+                 can_split=None,
+                 memory_order=None,
+                 axes=None,
+                 _register=True,
+                 **kwds):
 
         if _register:
             key = (id(operator), id(variables), id(field))
             if key in self._registered_requirements:
-                msg='Operator {} has already registered requirements for field {} '
-                msg+='to variables id {}.'
-                msg=msg.format(operator.name, field.name, id(variables))
+                msg = 'Operator {} has already registered requirements for field {} '
+                msg += 'to variables id {}.'
+                msg = msg.format(operator.name, field.name, id(variables))
                 raise RuntimeError(msg)
             else:
                 if __DEBUG__:
-                    msg='Operator {} registered requirements of field {} to variables id {}.'
-                    msg=msg.format(operator.name, field.name, id(variables))
+                    msg = 'Operator {} registered requirements of field {} to variables id {}.'
+                    msg = msg.format(operator.name, field.name, id(variables))
                     print(msg)
                 self._registered_requirements.update(key)
 
         super(DiscreteFieldRequirements, self).__init__(**kwds)
         check_instance(field, ScalarField)
         check_instance(operator, ComputationalGraphNode, allow_none=(not _register))
-        check_instance(variables, dict, keys=ScalarField, 
-                values=(Topology,TopologyDescriptor), allow_none=(not _register))
+        check_instance(variables, dict, keys=ScalarField,
+                       values=(Topology, TopologyDescriptor), allow_none=(not _register))
 
         self._operator = operator
         self._field = field
@@ -82,24 +83,24 @@ class DiscreteFieldRequirements(object):
 
     def copy(self):
         return DiscreteFieldRequirements(operator=self._operator,
-                variables=self._variables, field=self._field,
-                min_ghosts = self._min_ghosts,
-                max_ghosts = self._max_ghosts,
-                can_split  = self._can_split,
-                memory_order = self._memory_order,
-                axes       = self._axes)
+                                         variables=self._variables, field=self._field,
+                                         min_ghosts=self._min_ghosts,
+                                         max_ghosts=self._max_ghosts,
+                                         can_split=self._can_split,
+                                         memory_order=self._memory_order,
+                                         axes=self._axes)
 
     def is_default(self):
         return (self == self._default())
 
     def _default(self):
         return DiscreteFieldRequirements(self._operator, self._variables, self._field,
-                _register=False)
+                                         _register=False)
 
     def __eq__(self, other):
-        eq  = (self.operator  is other.operator)
+        eq = (self.operator is other.operator)
         eq &= (self.variables is other.variables)
-        eq &= (self.field     is other.field)
+        eq &= (self.field is other.field)
         eq &= (self.min_ghosts == other.min_ghosts).all()
         eq &= (self.max_ghosts == other.max_ghosts).all()
         eq &= (self.can_split == other.can_split).all()
@@ -109,8 +110,8 @@ class DiscreteFieldRequirements(object):
 
     def __hash__(self):
         return id(self.operator) ^ id(self.variables) ^ id(self.field) ^ \
-                hash((to_tuple(self.min_ghosts), to_tuple(self.max_ghosts),
-                    self.memory_order, self.tstates))
+            hash((to_tuple(self.min_ghosts), to_tuple(self.max_ghosts),
+                  self.memory_order, self.tstates))
 
     def ghost_str(self, array):
         inf = u'+\u221e'
@@ -120,16 +121,17 @@ class DiscreteFieldRequirements(object):
     def __str__(self):
         return u'{:15s}  {:>10s}<=ghosts<{:<10s}  memory_order={}  can_split={}  tstates={}'.format(
                u'{}::{}'.format(getattr(self.operator, 'name', u'UnknownOperator'),
-                                getattr(self.field, 'name', u'UnknownField')), 
+                                getattr(self.field, 'name', u'UnknownField')),
                self.ghost_str(self.min_ghosts),
                self.ghost_str(self.max_ghosts+1),
                self.memory_order,
-               u''+str(self.can_split.view(np.int8)), 
-               u'[{}]'.format(u','.join(u''+str(ts) for ts in self.tstates)) \
-                       if self.tstates else u'ANY').encode('utf-8')
+               u''+str(self.can_split.view(np.int8)),
+               u'[{}]'.format(u','.join(u''+str(ts) for ts in self.tstates))
+            if self.tstates else u'ANY').encode('utf-8')
 
     def get_axes(self):
         return self._axes
+
     def set_axes(self, axes):
         check_instance(axes, tuple, values=tuple, allow_none=True)
         self._axes = axes
@@ -143,77 +145,85 @@ class DiscreteFieldRequirements(object):
 
     def get_memory_order(self):
         return self._memory_order
+
     def set_memory_order(self, memory_order):
         check_instance(memory_order, MemoryOrdering, allow_none=True)
         if (memory_order is None):
-           memory_order = MemoryOrdering.ANY
-        assert memory_order in (MemoryOrdering.C_CONTIGUOUS, 
+            memory_order = MemoryOrdering.ANY
+        assert memory_order in (MemoryOrdering.C_CONTIGUOUS,
                                 MemoryOrdering.F_CONTIGUOUS,
                                 MemoryOrdering.ANY), memory_order
         self._memory_order = memory_order
 
     def get_min_ghosts(self):
         return self._min_ghosts
+
     def set_min_ghosts(self, min_ghosts):
         self._min_ghosts = np.asarray(to_list(min_ghosts)
-                if (min_ghosts is not None) else [0]*self.workdim)
+                                      if (min_ghosts is not None) else [0]*self.workdim)
         assert self.min_ghosts.size == self.workdim
 
     def get_max_ghosts(self):
         return self._max_ghosts
+
     def set_max_ghosts(self, max_ghosts):
         self._max_ghosts = np.asarray(to_list(max_ghosts)
-                if (max_ghosts is not None) else [np.inf]*self.workdim)
+                                      if (max_ghosts is not None) else [np.inf]*self.workdim)
         assert self.max_ghosts.size == self.workdim
 
     def get_can_split(self):
         return self._can_split
+
     def set_can_split(self, can_split):
-        self._can_split  = np.asarray(to_list(can_split)
-                if (can_split is not None) else [1]*self.workdim, dtype=np.bool_)
-        assert self.can_split.size  == self.workdim
+        self._can_split = np.asarray(to_list(can_split)
+                                     if (can_split is not None) else [1]*self.workdim, dtype=np.bool_)
+        assert self.can_split.size == self.workdim
 
     def get_work_dim(self):
         return self._work_dim
+
     def get_operator(self):
         return self._operator
+
     def get_field(self):
         return self._field
+
     def get_variables(self):
         return self._variables
+
     def get_topology_descriptor(self):
         return self._topology_descriptor
 
-    can_split  = property(get_can_split, set_can_split)
+    can_split = property(get_can_split, set_can_split)
     min_ghosts = property(get_min_ghosts, set_min_ghosts)
     max_ghosts = property(get_max_ghosts, set_max_ghosts)
-    axes       = property(get_axes, set_axes)
-    tstates    = property(get_tstates)
+    axes = property(get_axes, set_axes)
+    tstates = property(get_tstates)
     memory_order = property(get_memory_order, set_memory_order)
 
-    workdim   = property(get_work_dim)
-    operator  = property(get_operator)
-    field     = property(get_field)
+    workdim = property(get_work_dim)
+    operator = property(get_operator)
+    field = property(get_field)
     variables = property(get_variables)
     topology_descriptor = property(get_topology_descriptor)
 
     def is_compatible_with(self, other, i=None):
         assert self.field == other.field, 'field mismatch.'
         if isinstance(other, DiscreteFieldRequirements):
-            others=set([other])
+            others = set([other])
         elif isinstance(other, MultiFieldRequirements):
             if self.topology_descriptor in other.requirements.keys():
-                others=other.requirements[self.topology_descriptor]
+                others = other.requirements[self.topology_descriptor]
             else:
                 return True
         else:
-            msg='Unknown type {}.'.format(other.__class__)
+            msg = 'Unknown type {}.'.format(other.__class__)
             raise TypeError(msg)
 
         for other in others:
             assert self.workdim == other.workdim, 'workdim mismatch.'
             assert self.topology_descriptor == other.topology_descriptor, \
-                    'topology_descriptor mismatch.'
+                'topology_descriptor mismatch.'
             if (self.field.lboundaries != other.field.lboundaries).any():
                 if (i is not None):
                     gprint("      => lboundaries mismatch with subgroup {}".format(i))
@@ -231,7 +241,7 @@ class DiscreteFieldRequirements(object):
                     gprint("      => ghosts incompatibility with subgroup {}".format(i))
                 return False
 
-            multiprocess = (main_size>1)
+            multiprocess = (main_size > 1)
             if multiprocess and not (other.can_split * self.can_split).any():
                 if (i is not None):
                     gprint("      => splitting incompatibility with subgroup {}".format(i))
@@ -256,30 +266,30 @@ class DiscreteFieldRequirements(object):
         topology = topology or self.variables[self.field]
         check_instance(topology, Topology)
         if topology.domain.dim != self.field.dim:
-            msg='{} Dimension mismatch between field and topology.\n field={}d, topology={}d.'
-            msg=msg.format(self._header, self.field.dim, topology.domain.dim)
+            msg = '{} Dimension mismatch between field and topology.\n field={}d, topology={}d.'
+            msg = msg.format(self._header, self.field.dim, topology.domain.dim)
             raise RuntimeError(msg)
         if (topology.grid_resolution != self.topology_descriptor.grid_resolution).any():
-            msg='{} Grid resolution mismatch between requirement and topology.\n '
-            msg+=' requirement={}\n topology={}'
-            msg=msg.format(self._header,
-                    self.topology_descriptor.grid_resolution,
-                    topology.grid_resolution)
+            msg = '{} Grid resolution mismatch between requirement and topology.\n '
+            msg += ' requirement={}\n topology={}'
+            msg = msg.format(self._header,
+                             self.topology_descriptor.grid_resolution,
+                             topology.grid_resolution)
             raise RuntimeError(msg)
         if (topology.global_resolution != self.topology_descriptor.global_resolution).any():
-            msg='{} Global resolution mismatch between requirement and topology.\n '
-            msg+=' requirement={}\n topology={}'
-            msg=msg.format(self._header,
-                    self.topology_descriptor.global_resolution,
-                    topology.global_resolution)
+            msg = '{} Global resolution mismatch between requirement and topology.\n '
+            msg += ' requirement={}\n topology={}'
+            msg = msg.format(self._header,
+                             self.topology_descriptor.global_resolution,
+                             topology.global_resolution)
             raise RuntimeError(msg)
         if (topology.ghosts < self.min_ghosts).any():
-            msg='{} min ghosts constraint was not met.\n min={}, actual={}.'
-            msg=msg.format(self._header, self.min_ghosts, topology.ghosts)
+            msg = '{} min ghosts constraint was not met.\n min={}, actual={}.'
+            msg = msg.format(self._header, self.min_ghosts, topology.ghosts)
             raise RuntimeError(msg)
         if (topology.ghosts > self.max_ghosts).any():
-            msg='{} max ghosts constraint was not met.\n max={}, actual={}.'
-            msg=msg.format(self._header, self.max_ghosts, topology.ghosts)
+            msg = '{} max ghosts constraint was not met.\n max={}, actual={}.'
+            msg = msg.format(self._header, self.max_ghosts, topology.ghosts)
             raise RuntimeError(msg)
 
     def check_discrete_topology_state(self, state):
@@ -288,16 +298,16 @@ class DiscreteFieldRequirements(object):
         if (self.memory_order is not None) and \
            (self.memory_order is not MemoryOrdering.ANY) and \
            (self.memory_order != state.memory_order):
-            msg='{} memory_order mismatch between requirement and topology state.\n reqs={}, state={}.'
-            msg=msg.format(self._header, self.memory_order, state.memory_order)
+            msg = '{} memory_order mismatch between requirement and topology state.\n reqs={}, state={}.'
+            msg = msg.format(self._header, self.memory_order, state.memory_order)
             raise RuntimeError(msg)
         if (self.tstates is not None) and \
                 (state.tstate not in self.tstates):
-            msg='{} Transposition state mismatch between requirement and topology state.\n'
-            msg+=' reqs=[{}], state={}.'
-            msg=msg.format(self._header,
-                    ','.join([str(x) for x in self.tstates]),
-                    state.tstate)
+            msg = '{} Transposition state mismatch between requirement and topology state.\n'
+            msg += ' reqs=[{}], state={}.'
+            msg = msg.format(self._header,
+                             ','.join([str(x) for x in self.tstates]),
+                             state.tstate)
             raise RuntimeError(msg)
 
     def check_state(self, dfield):
@@ -312,12 +322,11 @@ class DiscreteFieldRequirements(object):
         """
         assert isinstance(topology, Topology)
         assert not isinstance(self.variables[self.field], Topology) \
-                or (self.variables[self.field] == topology)
+            or (self.variables[self.field] == topology)
         self.check_topology(topology)
         self.variables[self.field] = topology
 
 
-
 class MultiFieldRequirements(object):
     __slots__ = ('field', 'requirements', 'built', 'common_can_split')
 
@@ -328,7 +337,7 @@ class MultiFieldRequirements(object):
         self.common_can_split = None
 
     def copy(self):
-        requirements = { k:v.copy() for (k,v) in self.requirements.items() }
+        requirements = {k: v.copy() for (k, v) in self.requirements.items()}
         obj = MultiFieldRequirements(field=self.field)
         obj.built = self.built
         obj.requirements = requirements
@@ -350,10 +359,10 @@ class MultiFieldRequirements(object):
             if (update_req is None):
                 continue
             if isinstance(update_req, MultiFieldRequirements):
-                tds  = update_req.requirements.keys()
+                tds = update_req.requirements.keys()
                 reqs = update_req.requirements.values()
             else:
-                tds  = [update_req.topology_descriptor]
+                tds = [update_req.topology_descriptor]
                 reqs = [[update_req]]
 
             for td, req in zip(tds, reqs):
@@ -365,17 +374,17 @@ class MultiFieldRequirements(object):
             return
 
         gprint("  1) SPLITTING REQUIREMENTS IN COMPATIBLE SUBGROUPS:")
-        multi_process = (main_size>1)
+        multi_process = (self.requirements.keys()[0].mpi_params.size > 1)
         splitted_reqs = self._split(multi_process)
 
-
         gprint("  2) DETERMINING COMMON CARTESIAN TOPOLOGY SPLITTING AXES (if possible):")
         can_split = 1
         for (i, compatible_reqs) in enumerate(splitted_reqs):
             subgroup_can_split = compatible_reqs.common_can_split
             can_split *= subgroup_can_split
             gprint('    *subgroup{}.can_split = {}'.format(i, subgroup_can_split))
-        gprint('     => Global available split directions for field {} are {}'.format(self.field.name, can_split))
+        gprint('     => Global available split directions for field {} are {}'.format(
+            self.field.name, can_split))
         if can_split.any():
             gprint('     => Enforcing this configuration for Cartesian topology creation.')
             for compatible_reqs in splitted_reqs:
@@ -383,7 +392,6 @@ class MultiFieldRequirements(object):
         else:
             gprint('     => No common splitting axes found between all subgroups.')
 
-        
         gprint("  3) BUILDING TOPOLOGIES:")
         all_topologies = set()
         for (i, compatible_reqs) in enumerate(splitted_reqs):
@@ -404,7 +412,7 @@ class MultiFieldRequirements(object):
     def all_compatible(self):
         for topology_descriptor in self.requirements:
             requirements = self.requirements[topology_descriptor]
-            assert len(requirements)>0
+            assert len(requirements) > 0
             for req0, req1 in it.combinations(requirements, 2):
                 if not req0.is_compatible_with(req1):
                     return False
@@ -416,13 +424,14 @@ class MultiFieldRequirements(object):
             for req in sorted(lreq, key=lambda x: str(x)):
                 gprint("    *Requirement {}".format(req))
                 ok = False
-                for (i,multi_reqs) in enumerate(sub_field_requirements):
+                for (i, multi_reqs) in enumerate(sub_field_requirements):
                     if req.is_compatible_with(multi_reqs, i):
                         multi_reqs.update(req)
-                        ok=True
+                        ok = True
                         break
                 if not ok:
-                    gprint("      => this requirement is not compatible with any existing requirement group, creating a new one (subgroup {}).".format(len(sub_field_requirements)))
+                    gprint("      => this requirement is not compatible with any existing requirement group, creating a new one (subgroup {}).".format(
+                        len(sub_field_requirements)))
                     new_group = MultiFieldRequirements(self.field)
                     new_group.update(req)
                     sub_field_requirements.append(new_group)
@@ -436,7 +445,7 @@ class MultiFieldRequirements(object):
                 can_split = npw.integer_ones(shape=(dim,))
                 for req in reqs:
                     if isinstance(req.topology_descriptor, Topology):
-                        can_split *= (req.topology_descriptor.proc_shape>1)
+                        can_split *= (req.topology_descriptor.proc_shape > 1)
                     else:
                         can_split *= req.can_split
                 assert (not multi_process) or can_split.any()
@@ -457,7 +466,7 @@ class MultiFieldRequirements(object):
                 known_topologies = set()
             unknown_topologies = set()
 
-            ghosts    = npw.integer_zeros(shape=(dim,))
+            ghosts = npw.integer_zeros(shape=(dim,))
             can_split = npw.integer_ones(shape=(dim,))
 
             for req in reqs:
@@ -470,9 +479,10 @@ class MultiFieldRequirements(object):
                     unknown_topologies.add(req)
 
             for req in unknown_topologies:
-                gprint('       >choose or create topology from {} existing topologies:'.format(len(known_topologies)), end='')
+                gprint('       >choose or create topology from {} existing topologies:'.format(
+                    len(known_topologies)), end='')
                 topo = req.topology_descriptor.choose_or_create_topology(known_topologies,
-                        ghosts=ghosts, cutdirs=self.common_can_split)
+                                                                         ghosts=ghosts, cutdirs=self.common_can_split)
                 if (topo in known_topologies):
                     gprint(" choosed existing topology {}.".format(topo.pretty_tag))
                 else:
@@ -480,32 +490,34 @@ class MultiFieldRequirements(object):
                     known_topologies.add(topo)
                 req.set_and_check_topology(topo)
             all_topologies.update(known_topologies)
-        
+
         return all_topologies
 
+
 class OperatorFieldRequirements(object):
 
     __slots__ = ('_input_field_requirements', '_output_field_requirements')
 
     def __init__(self, input_field_requirements=None,
-                       output_field_requirements=None,
-                       **kwds):
+                 output_field_requirements=None,
+                 **kwds):
         super(OperatorFieldRequirements, self).__init__(**kwds)
-        
-        check_instance(input_field_requirements, dict, keys=ScalarField, 
-                values=MultiFieldRequirements, allow_none=True)
+
+        check_instance(input_field_requirements, dict, keys=ScalarField,
+                       values=MultiFieldRequirements, allow_none=True)
         self._input_field_requirements = input_field_requirements or dict()
-        
-        check_instance(output_field_requirements, dict, keys=ScalarField, 
-                values=MultiFieldRequirements, allow_none=True)
+
+        check_instance(output_field_requirements, dict, keys=ScalarField,
+                       values=MultiFieldRequirements, allow_none=True)
         self._output_field_requirements = output_field_requirements or dict()
 
     def get_input_field_requirements(self):
         return self._input_field_requirements
+
     def get_output_field_requirements(self):
         return self._output_field_requirements
 
-    input_field_requirements  = property(get_input_field_requirements)
+    input_field_requirements = property(get_input_field_requirements)
     output_field_requirements = property(get_output_field_requirements)
 
     def update(self, requirements):
@@ -514,9 +526,9 @@ class OperatorFieldRequirements(object):
         self.update_outputs(requirements._output_field_requirements)
 
     def update_inputs(self, input_field_requirements):
-        check_instance(input_field_requirements, dict, keys=ScalarField, 
-                values=(DiscreteFieldRequirements,MultiFieldRequirements,type(None)))
-        for ifield,ireqs in input_field_requirements.iteritems():
+        check_instance(input_field_requirements, dict, keys=ScalarField,
+                       values=(DiscreteFieldRequirements, MultiFieldRequirements, type(None)))
+        for ifield, ireqs in input_field_requirements.iteritems():
             if (ireqs is not None):
                 ireqs = ireqs.copy()
             if not isinstance(ireqs, MultiFieldRequirements):
@@ -529,9 +541,9 @@ class OperatorFieldRequirements(object):
                 self._input_field_requirements[ifield] = ireqs
 
     def update_outputs(self, output_field_requirements):
-        check_instance(output_field_requirements, dict, keys=ScalarField, 
-                values=(DiscreteFieldRequirements,MultiFieldRequirements))
-        for ofield,oreqs in output_field_requirements.iteritems():
+        check_instance(output_field_requirements, dict, keys=ScalarField,
+                       values=(DiscreteFieldRequirements, MultiFieldRequirements))
+        for ofield, oreqs in output_field_requirements.iteritems():
             oreqs = oreqs.copy()
             if not isinstance(oreqs, MultiFieldRequirements):
                 _oreqs = oreqs
@@ -547,9 +559,9 @@ class OperatorFieldRequirements(object):
         """
         Iterates over (field, topology_descriptor, field_requirement) for all input requirements.
         """
-        for (field,freqs) in self.input_field_requirements.iteritems():
+        for (field, freqs) in self.input_field_requirements.iteritems():
             freqs = freqs.requirements
-            for (td,reqs) in freqs.iteritems():
+            for (td, reqs) in freqs.iteritems():
                 for req in reqs:
                     yield field, td, req
 
@@ -557,9 +569,9 @@ class OperatorFieldRequirements(object):
         """
         Iterates over (field, topology_descriptor, field_requirement) for all output requirements.
         """
-        for (field,freqs) in self.output_field_requirements.iteritems():
+        for (field, freqs) in self.output_field_requirements.iteritems():
             freqs = freqs.requirements
-            for (td,reqs) in freqs.iteritems():
+            for (td, reqs) in freqs.iteritems():
                 for req in reqs:
                     yield (field, td, req)
 
@@ -584,31 +596,32 @@ class OperatorFieldRequirements(object):
         """
         check_instance(field, ScalarField)
         if field not in field_requirements:
-            msg='No requirements found for field {}.'.format(field.name)
+            msg = 'No requirements found for field {}.'.format(field.name)
             raise AttributeError(msg)
         freqs = field_requirements[field].requirements
-        if len(freqs.keys())>1:
-            msg='Multiple topology descriptors are present for field {}.'.format(field.name)
+        if len(freqs.keys()) > 1:
+            msg = 'Multiple topology descriptors are present for field {}.'.format(field.name)
             raise RuntimeError(msg)
-        if len(freqs.keys())==0:
-            msg='No topology descriptors are present for field {}.'.format(field.name)
+        if len(freqs.keys()) == 0:
+            msg = 'No topology descriptors are present for field {}.'.format(field.name)
             raise RuntimeError(msg)
         td = freqs.keys()[0]
         reqs = freqs[td]
-        if len(reqs)>1:
-            msg='Multiple requirements are present for field {}.'.format(field.name)
+        if len(reqs) > 1:
+            msg = 'Multiple requirements are present for field {}.'.format(field.name)
             raise RuntimeError(msg)
         return (td, next(iter(reqs)))
 
     def get_input_requirement(self, field):
         return self._get_requirement(field, self._input_field_requirements)
+
     def get_output_requirement(self, field):
         return self._get_requirement(field, self._output_field_requirements)
 
     @debug
     def build_topologies(self):
-        fields = set(self._input_field_requirements.keys() 
-                  + self._output_field_requirements.keys())
+        fields = set(self._input_field_requirements.keys()
+                     + self._output_field_requirements.keys())
         # enforce deterministic iteration
         for field in sorted(fields, key=lambda x: '{}::{}'.format(x.name, x.short_description())):
             reqs = MultiFieldRequirements(field)
diff --git a/hysop/operator/base/spectral_operator.py b/hysop/operator/base/spectral_operator.py
index a886da3484aeffd22bd097c5143bb040e13533e0..d3d3396b37d7c5ea436f53cfa63144711736b9e9 100644
--- a/hysop/operator/base/spectral_operator.py
+++ b/hysop/operator/base/spectral_operator.py
@@ -1,27 +1,27 @@
-
-import warnings, math, os
+import warnings
+import math
+import os
 import sympy as sm
 import numpy as np
 
-from hysop.constants         import BoundaryCondition, BoundaryExtension, TransformType, \
-                                    MemoryOrdering, TranspositionState, Backend, \
-                                    SpectralTransformAction, Implementation
-from hysop.tools.misc        import compute_nbytes
-from hysop.tools.types       import check_instance, to_tuple, first_not_None, to_set
-from hysop.tools.decorators  import debug
-from hysop.tools.units       import bytes2str
+from hysop.constants import BoundaryCondition, BoundaryExtension, TransformType, \
+    MemoryOrdering, TranspositionState, Backend, \
+    SpectralTransformAction, Implementation
+from hysop.tools.misc import compute_nbytes
+from hysop.tools.types import check_instance, to_tuple, first_not_None, to_set
+from hysop.tools.decorators import debug
+from hysop.tools.units import bytes2str
 from hysop.tools.numerics import is_fp, is_complex, complex_to_float_dtype, \
-                                 float_to_complex_dtype, determine_fp_types
+    float_to_complex_dtype, determine_fp_types
 from hysop.tools.io_utils import IOParams
 from hysop.tools.spectral_utils import SpectralTransformUtils as STU, EnergyPlotter, EnergyDumper
-from hysop.core.mpi import main_rank
 from hysop.core.arrays.array_backend import ArrayBackend
 from hysop.core.arrays.array import Array
 from hysop.core.memory.memory_request import MemoryRequest, OperatorMemoryRequests
 from hysop.core.graph.graph import not_initialized as _not_initialized, \
-                                   initialized     as _initialized,     \
-                                   discretized     as _discretized,     \
-                                   ready           as _ready
+    initialized as _initialized,     \
+    discretized as _discretized,     \
+    ready as _ready
 from hysop.core.graph.computational_node_frontend import ComputationalGraphNodeFrontend
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 from hysop.parameters.buffer_parameter import BufferParameter
@@ -30,21 +30,21 @@ from hysop.symbolic.array import SymbolicArray
 from hysop.symbolic.spectral import WaveNumber, SpectralTransform, AppliedSpectralTransform
 from hysop.numerics.fft.fft import FFTI, simd_alignment, is_byte_aligned, HysopFFTWarning
 
+
 class SpectralComputationalGraphNodeFrontend(ComputationalGraphNodeFrontend):
-        
+
     def __init__(self, implementation, **kwds):
         impl, extra_kwds = self.get_actual_implementation(implementation=implementation, **kwds)
         for k in extra_kwds.keys():
             assert k not in kwds
         kwds.update(extra_kwds)
         super(SpectralComputationalGraphNodeFrontend, self).__init__(
-                implementation=impl, **kwds)
+            implementation=impl, **kwds)
 
-    
     @classmethod
-    def get_actual_implementation(cls, implementation, 
-            enforce_implementation=True, cl_env=None,
-            **kwds):
+    def get_actual_implementation(cls, implementation,
+                                  enforce_implementation=True, cl_env=None,
+                                  **kwds):
         """
         Parameters
         ----------
@@ -69,26 +69,26 @@ class SpectralComputationalGraphNodeFrontend(ComputationalGraphNodeFrontend):
         Notes
         -----
         clFFT (gpyFFT) support for OpenCL CPU devices is a bit neglected.
-        This function allows to override the implementation target from 
+        This function allows to override the implementation target from
         OPENCL to PYTHON when a CPU OpenCL environment is given as input.
 
-        By default, the CPU FFT target is FFTW (pyFFTW) which has much 
+        By default, the CPU FFT target is FFTW (pyFFTW) which has much
         better support (multithreaded fftw + multithreaded numba).
-        
-        OpenCL buffers are mapped to host memory with enqueue_map_buffer 
-        (this makes the assumption thal all OpenCL buffers have been allocated 
+
+        OpenCL buffers are mapped to host memory with enqueue_map_buffer
+        (this makes the assumption thal all OpenCL buffers have been allocated
         with zero-copy capability in the target OpenCL platform).
         """
         implementation = first_not_None(implementation, cls.default_implementation())
         assert implementation in cls.implementations()
-        extra_kwds = { 'enable_opencl_host_buffer_mapping': False }
+        extra_kwds = {'enable_opencl_host_buffer_mapping': False}
         if (enforce_implementation):
             return (implementation, extra_kwds)
         if (implementation == Implementation.OPENCL):
             if (cl_env is None):
-                msg='enforce_implementation was set to False, '
-                msg+='implementation is OPENCL, but no cl_env was passed '
-                msg+='to check if the device is of type CPU.'
+                msg = 'enforce_implementation was set to False, '
+                msg += 'implementation is OPENCL, but no cl_env was passed '
+                msg += 'to check if the device is of type CPU.'
                 raise RuntimeError(msg)
             from hysop.backend.device.opencl import cl
             if (cl_env.device.type == cl.device_type.CPU):
@@ -97,30 +97,29 @@ class SpectralComputationalGraphNodeFrontend(ComputationalGraphNodeFrontend):
                     from hysop.backend.host.host_operator import HostOperator, OpenClMappable
                     op_cls = cls.implementations()[Implementation.PYTHON]
                     if not issubclass(op_cls, HostOperator):
-                        msg='Operator {} is not a HostOperator.'
-                        msg=msg.format(op_cls)
+                        msg = 'Operator {} is not a HostOperator.'
+                        msg = msg.format(op_cls)
                         raise TypeError(msg)
                     if not issubclass(op_cls, OpenClMappable):
-                        msg='Operator {} does not support host to device opencl buffer mapping.'
-                        msg=msg.format(op_cls)
+                        msg = 'Operator {} does not support host to device opencl buffer mapping.'
+                        msg = msg.format(op_cls)
                         raise TypeError(msg)
-                    assert Backend.HOST   in op_cls.supported_backends()
+                    assert Backend.HOST in op_cls.supported_backends()
                     assert Backend.OPENCL in op_cls.supported_backends()
                     return (Implementation.PYTHON, extra_kwds)
         return (implementation, extra_kwds)
 
 
-
 class SpectralOperatorBase(object):
     """
     Common implementation interface for spectral based operators.
     """
-    
-    min_fft_alignment = simd_alignment #FFTW SIMD.
-    
+
+    min_fft_alignment = simd_alignment  # FFTW SIMD.
+
     @debug
-    def __init__(self, fft_interface=None, fft_interface_kwds=None, 
-                        **kwds):
+    def __init__(self, fft_interface=None, fft_interface_kwds=None,
+                 **kwds):
         """
         Initialize a spectral operator base.
         kwds: dict
@@ -131,21 +130,20 @@ class SpectralOperatorBase(object):
         check_instance(fft_interface, FFTI, allow_none=True)
         check_instance(fft_interface_kwds, dict, allow_none=True)
 
-        self.transform_groups = {} # dict[tag] -> SpectralTransformGroup
-        
+        self.transform_groups = {}  # dict[tag] -> SpectralTransformGroup
+
         # those values will be deleted at discretization
         self._fft_interface = fft_interface
         self._fft_interface_kwds = fft_interface_kwds
 
-
     @property
     def backend(self):
-        msg='FFT array backend depends on the transform group. Please use op.transform_group[key].backend instead.'
+        msg = 'FFT array backend depends on the transform group. Please use op.transform_group[key].backend instead.'
         raise AttributeError(msg)
 
     @property
     def FFTI(self):
-        msg='FFT interface depends on the transform group. Please use op.transform_group[key].FFTI instead.'
+        msg = 'FFT interface depends on the transform group. Please use op.transform_group[key].FFTI instead.'
         raise AttributeError(msg)
 
     def new_transform_group(self, tag=None, mem_tag=None):
@@ -155,9 +153,9 @@ class SpectralOperatorBase(object):
         backward field transforms as well as symbolic expressions and
         wave_numbers symbols.
         """
-        n    = len(self.transform_groups)
-        tag  = first_not_None(tag, 'transform_group_{}'.format(n))
-        msg  = 'Tag "{}" has already been registered.'
+        n = len(self.transform_groups)
+        tag = first_not_None(tag, 'transform_group_{}'.format(n))
+        msg = 'Tag "{}" has already been registered.'
         assert (tag not in self.transform_groups), msg.format(tag)
         trg = SpectralTransformGroup(op=self, tag=tag, mem_tag=mem_tag)
         self.transform_groups[tag] = trg
@@ -174,36 +172,35 @@ class SpectralOperatorBase(object):
         super(SpectralOperatorBase, self).initialize(**kwds)
         for tg in self.transform_groups.values():
             tg.initialize(**kwds)
-    
+
     def get_field_requirements(self):
         requirements = super(SpectralOperatorBase, self).get_field_requirements()
-        
+
         for is_input, (field, td, req) in requirements.iter_requirements():
             req.memory_order = MemoryOrdering.C_CONTIGUOUS
             req.axes = (TranspositionState[field.dim].default_axes(),)
             can_split = req.can_split
-            can_split[-1]  = False
+            can_split[-1] = False
             can_split[:-1] = True
             req.can_split = can_split
         return requirements
-    
+
     @debug
     def get_node_requirements(self):
         node_reqs = super(SpectralOperatorBase, self).get_node_requirements()
         node_reqs.enforce_unique_topology_shape = True
         return node_reqs
-    
-    
+
     def discretize(self, **kwds):
         super(SpectralOperatorBase, self).discretize(**kwds)
-        
+
         comm = self.mpi_params.comm
         size = self.mpi_params.size
-        if (size>1):
-            msg= '\n[FATAL ERROR] Spectral operators do not support the MPI interface yet.'
-            msg+='\nPlease use the Fortran FFTW interface if possible or '
-            msg+='use another discretization method for operator {}.\n'
-            msg=msg.format(self.node_tag)
+        if (size > 1):
+            msg = '\n[FATAL ERROR] Spectral operators do not support the MPI interface yet.'
+            msg += '\nPlease use the Fortran FFTW interface if possible or '
+            msg += 'use another discretization method for operator {}.\n'
+            msg = msg.format(self.node_tag)
             print msg
             raise NotImplementedError
 
@@ -214,14 +211,14 @@ class SpectralOperatorBase(object):
                           **kwds)
         del self._fft_interface
         del self._fft_interface_kwds
-    
+
     def get_mem_requests(self, **kwds):
         memory_requests = {}
         for tg in self.transform_groups.values():
-            for (k,v) in tg.get_mem_requests(**kwds).iteritems():
-                check_instance(k, str) # temporary buffer name
-                check_instance(v, int) # nbytes
-                K = (k,tg.backend)
+            for (k, v) in tg.get_mem_requests(**kwds).iteritems():
+                check_instance(k, str)  # temporary buffer name
+                check_instance(v, int)  # nbytes
+                K = (k, tg.backend)
                 if K in memory_requests:
                     memory_requests[K] = max(memory_requests[K], v)
                 else:
@@ -230,16 +227,15 @@ class SpectralOperatorBase(object):
 
     def get_work_properties(self, **kwds):
         requests = super(SpectralOperatorBase, self).get_work_properties(**kwds)
-        for ((k,backend),v) in self.get_mem_requests(**kwds).iteritems():
+        for ((k, backend), v) in self.get_mem_requests(**kwds).iteritems():
             check_instance(k, str)
             check_instance(v, (int, long))
-            if (v>0):
-                mrequest = MemoryRequest(backend=backend, size=v, 
-                                            alignment=self.min_fft_alignment) 
+            if (v > 0):
+                mrequest = MemoryRequest(backend=backend, size=v,
+                                         alignment=self.min_fft_alignment)
                 requests.push_mem_request(request_identifier=k, mem_request=mrequest)
         return requests
 
-
     def setup(self, work):
         self.allocate_tmp_fields(work)
         for tg in self.transform_groups.values():
@@ -247,25 +243,24 @@ class SpectralOperatorBase(object):
         super(SpectralOperatorBase, self).setup(work=work)
 
 
-
 class SpectralTransformGroup(object):
     """
-    Build and check a FFT transform group.    
-    
-    This object tells the planner to build a full forward transform for all given 
-    forward_fields. The planner will also build backward transforms for all specified 
+    Build and check a FFT transform group.
+
+    This object tells the planner to build a full forward transform for all given
+    forward_fields. The planner will also build backward transforms for all specified
     backward_fields.
-        
+
     The object will also automatically build per-axis wavenumbers up to certain powers,
     extracted from user provided sympy expressions.
 
-    Finally boundary condition (ie. transform type) compability will be checked by 
+    Finally boundary condition (ie. transform type) compability will be checked by
     using user provided sympy expressions.
-        
+
     Calling a forward transform ensures that forward source field is read-only
     and not destroyed.
     """
-    DEBUG=False
+    DEBUG = False
 
     def __init__(self, op, tag, mem_tag, **kwds):
         """
@@ -276,7 +271,7 @@ class SpectralTransformGroup(object):
         tag: str
             A tag to identify this transform group.
             Each tag can only be registered once in a SpectralOperatorBase instance.
-        
+
         Attributes:
         -----------
         tag: str
@@ -287,7 +282,7 @@ class SpectralTransformGroup(object):
             Forward fields to be planned for transform, according to Field boundary conditions.
         backward_fields: list of backward SpectralTransform
             Backward fields to be planned for transform, according to Field boundary conditions.
-        
+
         Notes
         -----
         All forward_fields and backward_fields have to live on the same domain and
@@ -308,21 +303,24 @@ class SpectralTransformGroup(object):
         self._wave_numbers = set()
         self._indexed_wave_numbers = {}
         self._expressions = ()
-        
+
         self._discrete_wave_numbers = None
 
     def indexed_wavenumbers(self, *wave_numbers):
         return tuple(self._indexed_wave_numbers[Wi] for Wi in wave_numbers)
-    
+
     @property
     def op(self):
         return self._op
+
     @property
     def tag(self):
         return self._tag
+
     @property
     def mem_tag(self):
         return self._mem_tag
+
     @property
     def name(self):
         return self._tag
@@ -330,9 +328,11 @@ class SpectralTransformGroup(object):
     @property
     def initialized(self):
         return self._op.initialized
+
     @property
     def discretized(self):
         return self._op.discretized
+
     @property
     def ready(self):
         return self._op.ready
@@ -340,26 +340,29 @@ class SpectralTransformGroup(object):
     @property
     def forward_fields(self):
         return map(lambda x: x[0], self._forward_transforms.keys())
+
     @property
     def backward_fields(self):
         return map(lambda x: x[0], self._backward_transforms.keys())
+
     @property
     def forward_transforms(self):
         return self._forward_transforms
+
     @property
     def backward_transforms(self):
         return self._backward_transforms
-    
+
     @_not_initialized
-    def initialize(self, 
-                    fft_granularity=None, 
-                    fft_concurrent_plans=1, 
-                    fft_plan_workload=1,
-                    **kwds):
+    def initialize(self,
+                   fft_granularity=None,
+                   fft_concurrent_plans=1,
+                   fft_plan_workload=1,
+                   **kwds):
         """
         Should be called after all require_forward_transform and require_backward_transform
         calls.
-        
+
         Parameters
         ----------
         fft_granularity: int, optional
@@ -369,37 +372,37 @@ class SpectralTransformGroup(object):
               3:   iterate over 3d blocks  (slices of dimension 3)
               n-1: iterate over hyperplans (slices of dimension n-1)
               n :  no iteration, the plan will handle the whole domain.
-            Contiguous buffers with sufficient alignement are allocated. 
+            Contiguous buffers with sufficient alignement are allocated.
             Default value is: 1 in 1D else n-1 (ie. hyperplans)
         fft_plan_workload: int, optional, defaults to 1
-            The number of blocks of dimension fft_granularity that a 
+            The number of blocks of dimension fft_granularity that a
             single plan will handle at once. Default is one block.
         fft_concurrent_plans: int, optional, defaults to 1
-            Number of concurrent plans. 
+            Number of concurrent plans.
             Should be 1 for HOST based FFT interfaces.
             Should be at least 3 for DEVICE based FFT interface if the device
             has two async copy engine (copy, transform, copy).
         """
-        (domain, dim) = self.check_fields(self.forward_fields, self.backward_fields) 
+        (domain, dim) = self.check_fields(self.forward_fields, self.backward_fields)
 
-        fft_granularity = first_not_None(fft_granularity, max(1,dim-1))
+        fft_granularity = first_not_None(fft_granularity, max(1, dim-1))
         check_instance(fft_granularity, int, minval=1, maxval=dim)
         check_instance(fft_concurrent_plans, int, minval=1)
         check_instance(fft_plan_workload, int, minval=1)
 
-        self._fft_granularity      = fft_granularity
+        self._fft_granularity = fft_granularity
         self._fft_concurrent_plans = fft_concurrent_plans
-        self._fft_plan_workload    = fft_plan_workload
+        self._fft_plan_workload = fft_plan_workload
 
         self._domain = domain
         self._dim = dim
-    
+
     @_initialized
-    def discretize(self, fft_interface, fft_interface_kwds, 
-            enable_opencl_host_buffer_mapping, **kwds):
+    def discretize(self, fft_interface, fft_interface_kwds,
+                   enable_opencl_host_buffer_mapping, **kwds):
         backends = set()
         grid_resolutions = set()
-        compute_axes   = set()
+        compute_axes = set()
         compute_shapes = set()
         compute_dtypes = set()
         for fwd in self.forward_transforms.values():
@@ -418,87 +421,88 @@ class SpectralTransformGroup(object):
             compute_dtypes.add(bwd.input_dtype)
 
         def format_error(data):
-            return '\n  *'+ '\n  *'.join(str(x) for x in data)
-        msg='Fields do not live on the same backend:'+format_error(backends)
-        assert len(backends)==1, msg
-        msg='Fields grid size mismatch:'+format_error(grid_resolutions)
-        assert len(grid_resolutions)==1, msg
-        assert len(compute_axes)==1,   'Fields axes mismatch:'+format_error(compute_axes)
-        assert len(compute_shapes)==1, 'Fields shape mismatch:'+format_error(compute_shapes)
-        assert len(compute_dtypes)==1, 'Fields data type mismatch.'+format_error(compute_dtypes)
-
-        backend         = next(iter(backends))
+            return '\n  *' + '\n  *'.join(str(x) for x in data)
+        msg = 'Fields do not live on the same backend:'+format_error(backends)
+        assert len(backends) == 1, msg
+        msg = 'Fields grid size mismatch:'+format_error(grid_resolutions)
+        assert len(grid_resolutions) == 1, msg
+        assert len(compute_axes) == 1,   'Fields axes mismatch:'+format_error(compute_axes)
+        assert len(compute_shapes) == 1, 'Fields shape mismatch:'+format_error(compute_shapes)
+        assert len(compute_dtypes) == 1, 'Fields data type mismatch.'+format_error(compute_dtypes)
+
+        backend = next(iter(backends))
         grid_resolution = next(iter(grid_resolutions))
-        compute_axes    = next(iter(compute_axes))
-        compute_shape   = next(iter(compute_shapes))
-        compute_dtype   = next(iter(compute_dtypes))
-        
+        compute_axes = next(iter(compute_axes))
+        compute_shape = next(iter(compute_shapes))
+        compute_dtype = next(iter(compute_dtypes))
+
         if enable_opencl_host_buffer_mapping:
-            msg='Trying to enable opencl device to host buffer mapping on {} target.'
+            msg = 'Trying to enable opencl device to host buffer mapping on {} target.'
             assert (backend.kind is Backend.OPENCL), msg.format(backend.kind)
 
         if (fft_interface is None):
             fft_interface_kwds = first_not_None(fft_interface_kwds, {})
-            fft_interface = FFTI.default_interface_from_backend(backend, 
-                    enable_opencl_host_buffer_mapping=enable_opencl_host_buffer_mapping,
-                    **fft_interface_kwds)
+            fft_interface = FFTI.default_interface_from_backend(backend,
+                                                                enable_opencl_host_buffer_mapping=enable_opencl_host_buffer_mapping,
+                                                                **fft_interface_kwds)
         else:
             assert not interface_kwds, 'FFT interface has already been built.'
-        
+
         check_instance(fft_interface, FFTI)
-        fft_interface.check_backend(backend, 
-                enable_opencl_host_buffer_mapping=enable_opencl_host_buffer_mapping)
+        fft_interface.check_backend(backend,
+                                    enable_opencl_host_buffer_mapping=enable_opencl_host_buffer_mapping)
 
         buffer_backend = backend
-        host_backend   = backend.host_array_backend
-        backend        = fft_interface.backend
-        
+        host_backend = backend.host_array_backend
+        backend = fft_interface.backend
+
         discrete_wave_numbers = {}
         for wn in self._wave_numbers:
             (idx, freqs, nd_freqs) = self.build_wave_number(self._domain, grid_resolution,
-                                                          backend, wn,
-                                                          compute_dtype, compute_axes, compute_shape)
-            self._indexed_wave_numbers[wn].indexed_object.to_backend(backend.kind).bind_memory_object(freqs)
+                                                            backend, wn,
+                                                            compute_dtype, compute_axes, compute_shape)
+            self._indexed_wave_numbers[wn].indexed_object.to_backend(
+                backend.kind).bind_memory_object(freqs)
             self._indexed_wave_numbers[wn].index.bind_axes(compute_axes)
             discrete_wave_numbers[wn] = (idx, freqs, nd_freqs)
         self._discrete_wave_numbers = discrete_wave_numbers
 
         self.buffer_backend = buffer_backend
-        self.host_backend   = host_backend
-        self.backend        = backend
-        self.FFTI           = fft_interface
-        
+        self.host_backend = host_backend
+        self.backend = backend
+        self.FFTI = fft_interface
+
         self.grid_resolution = grid_resolution
-        self.compute_axes  = compute_axes
+        self.compute_axes = compute_axes
         self.compute_shape = compute_shape
         self.compute_dtype = compute_dtype
-       
+
     @classmethod
     def build_wave_number(cls, domain, grid_resolution,
-                            backend, wave_number, 
-                            compute_dtype, compute_axes, 
-                            compute_resolution):
+                          backend, wave_number,
+                          compute_dtype, compute_axes,
+                          compute_resolution):
 
-        dim    = domain.dim
+        dim = domain.dim
         length = domain.length
 
         ftype, ctype = determine_fp_types(compute_dtype)
-        
-        axis      = wave_number.axis
+
+        axis = wave_number.axis
         transform = wave_number.transform
-        exponent  = wave_number.exponent
-        
+        exponent = wave_number.exponent
+
         idx = compute_axes.index(axis)
 
         L = domain.length[axis]
         N = grid_resolution[axis]
-        
+
         freqs = STU.compute_wave_numbers(transform=transform, N=N, L=L, ftype=ftype)
         freqs = freqs**exponent
         if STU.is_R2R(transform):
             sign_offset = STU.is_cosine(transform)
             freqs *= (-1)**((exponent+sign_offset)//2)
-        
+
         assert exponent != 0, 'exponent cannot be zero.'
         assert exponent > 0, 'negative powers not implemented yet.'
         if is_complex(freqs.dtype) and (exponent % 2 == 0):
@@ -509,14 +513,14 @@ class SpectralTransformGroup(object):
         backend_freqs[...] = freqs
         freqs = backend_freqs
 
-        nd_shape = [1,]*dim
+        nd_shape = [1, ]*dim
         nd_shape[idx] = freqs.size
         nd_shape = tuple(nd_shape)
         nd_freqs = freqs.reshape(nd_shape)
 
         if cls.DEBUG:
             print
-            print 'BUILD WAVENUMBER' 
+            print 'BUILD WAVENUMBER'
             print 'backend:       {}'.format(backend.kind)
             print 'grid_shape:    {}'.format(grid_resolution)
             print 'length:        {}'.format(length)
@@ -539,40 +543,40 @@ class SpectralTransformGroup(object):
             print '----'
 
         return (idx, freqs, nd_freqs)
-    
+
     @_discretized
     def get_mem_requests(self, **kwds):
         memory_requests = {}
         for fwd in self.forward_transforms.values():
             mem_requests = fwd.get_mem_requests(**kwds)
-            check_instance(mem_requests, dict, keys=str, values=(int,long))
-            for (k,v) in mem_requests.iteritems():
+            check_instance(mem_requests, dict, keys=str, values=(int, long))
+            for (k, v) in mem_requests.iteritems():
                 if k in memory_requests:
                     memory_requests[k] = max(memory_requests[k], v)
                 else:
                     memory_requests[k] = v
         for bwd in self.backward_transforms.values():
             mem_requests = bwd.get_mem_requests(**kwds)
-            check_instance(mem_requests, dict, keys=str, values=(int,long))
-            for (k,v) in mem_requests.iteritems():
+            check_instance(mem_requests, dict, keys=str, values=(int, long))
+            for (k, v) in mem_requests.iteritems():
                 if k in memory_requests:
                     memory_requests[k] = max(memory_requests[k], v)
                 else:
                     memory_requests[k] = v
         return memory_requests
-    
+
     @_discretized
     def setup(self, work):
         for fwd in self.forward_transforms.values():
-            fwd.setup(work=work)            
+            fwd.setup(work=work)
         for bwd in self.backward_transforms.values():
-            bwd.setup(work=work)            
+            bwd.setup(work=work)
 
     @_not_initialized
     def require_forward_transform(self, field, axes=None, transform_tag=None,
-                                    custom_output_buffer=None, action=None, 
-                                    dump_energy=None, plot_energy=None, 
-                                    **kwds):
+                                  custom_output_buffer=None, action=None,
+                                  dump_energy=None, plot_energy=None,
+                                  **kwds):
         """
         Tells this SpectralTransformGroup to build a forward SpectralTransform
         on given field. Only specified axes are transformed.
@@ -581,8 +585,8 @@ class SpectralTransformGroup(object):
             Periodic:               Periodic extension
             Homogeneous Dirichlet:  Odd extension
             Homogeneous Neumann:    Even extension
-        
-        This leads to 5 possible transforms for each axis (periodic-periodic, even-even, 
+
+        This leads to 5 possible transforms for each axis (periodic-periodic, even-even,
         odd-odd, even-odd, odd-even).
 
         Forward transforms used for each axis per extension pair:
@@ -593,15 +597,15 @@ class SpectralTransformGroup(object):
             *Neumann-Neumann     (EVEN-EVEN): DCT-I
 
         This method will return the SpectralTransform object associated to field.
-        
+
         Parameters
         ----------
-        field: ScalarField 
+        field: ScalarField
             The source field to be transformed.
         axes: array-like of integers
             The axes to be transformed.
         transform_tag: str
-            Extra tag to register the forward transform (a single scalar field can be 
+            Extra tag to register the forward transform (a single scalar field can be
             transformed multiple times). Default tag is 'default'.
         custom_output_buffer: None or str, optional
             Force this transform to output in one of the two common transform group buffers.
@@ -612,7 +616,7 @@ class SpectralTransformGroup(object):
             FFT operators to save one buffer for the last forward transform.
             Specifying 'auto' will tell the planner to choose either 'B0' or 'B1'.
         action: BackwardTransfromAction, optional
-            Defaults to SpectralTransformAction.OVERWRITE which will overwrite the 
+            Defaults to SpectralTransformAction.OVERWRITE which will overwrite the
             compute slices of the output buffer.
             SpectralTransformAction.ACCUMULATE will sum the current content of the buffer
             with the result of the forward transform.
@@ -632,7 +636,7 @@ class SpectralTransformGroup(object):
 
 
             dump_energy        plot_energy      result
-                 None                 None         nothing  
+                 None                 None         nothing
                  iop0                  0           energy is computed and dumped every iop0.frequency iterations
                   0                   iop1         energy is computed and dumped every iop1.frequency iterations
                  iop0                 iop1         energy is computed every iop1.frequency and iop2.frequency iterations
@@ -640,7 +644,7 @@ class SpectralTransformGroup(object):
                                                              plotted  every iop1.frequency
 
             About frequency:
-                if (frequency<0)  no dump 
+                if (frequency<0)  no dump
                 if (frequency==0) dump at time of interests and last iteration
                 if (frequency>=0) dump at time of interests, last iteration and every freq iterations
 
@@ -652,50 +656,52 @@ class SpectralTransformGroup(object):
         check_instance(transform_tag, str)
         check_instance(action, SpectralTransformAction)
         transforms = SpectralTransform(field=field, axes=axes, forward=True)
-        msg='Field {} with axes {} and transform_tag "{}" has already been registered for forward transform.'
+        msg = 'Field {} with axes {} and transform_tag "{}" has already been registered for forward transform.'
         if field.is_tensor:
             planned_transforms = field.new_empty_array()
             for (idx, f) in field.nd_iter():
-                assert (f,axes,transform_tag) not in self._forward_transforms, msg.format(f.name, axes, transform_tag)
+                assert (f, axes, transform_tag) not in self._forward_transforms, msg.format(
+                    f.name, axes, transform_tag)
                 assert f in self._op.input_fields
                 assert f is transforms[idx].field
                 assert transforms[idx].is_forward
                 planned_transforms[idx] = PlannedSpectralTransform(transform_group=self,
-                                                    tag=self.tag + '_' + transform_tag + '_' + f.name,
-                                                    symbolic_transform=transforms[idx],
-                                                    custom_output_buffer=custom_output_buffer, 
-                                                    action=action, 
-                                                    dump_energy=dump_energy, 
-                                                    plot_energy=plot_energy,
-                                                    **kwds)
-                self._forward_transforms[(f,axes,transform_tag)] = planned_transforms[idx]
+                                                                   tag=self.tag + '_' + transform_tag + '_' + f.name,
+                                                                   symbolic_transform=transforms[idx],
+                                                                   custom_output_buffer=custom_output_buffer,
+                                                                   action=action,
+                                                                   dump_energy=dump_energy,
+                                                                   plot_energy=plot_energy,
+                                                                   **kwds)
+                self._forward_transforms[(f, axes, transform_tag)] = planned_transforms[idx]
         else:
-            assert (field,axes,transform_tag) not in self._forward_transforms, msg.format(field.name, axes, transform_tag)
+            assert (field, axes, transform_tag) not in self._forward_transforms, msg.format(
+                field.name, axes, transform_tag)
             assert field in self._op.input_fields
             assert field is transforms.field
             assert transforms.is_forward
             planned_transforms = PlannedSpectralTransform(transform_group=self,
-                                                    tag=self.tag + '_' + transform_tag + '_' + field.name,
-                                                    symbolic_transform=transforms,
-                                                    custom_output_buffer=custom_output_buffer,
-                                                    action=action, 
-                                                    dump_energy=dump_energy, 
-                                                    plot_energy=plot_energy,
-                                                    **kwds)
-            self._forward_transforms[(field,axes,transform_tag)] = planned_transforms
+                                                          tag=self.tag + '_' + transform_tag + '_' + field.name,
+                                                          symbolic_transform=transforms,
+                                                          custom_output_buffer=custom_output_buffer,
+                                                          action=action,
+                                                          dump_energy=dump_energy,
+                                                          plot_energy=plot_energy,
+                                                          **kwds)
+            self._forward_transforms[(field, axes, transform_tag)] = planned_transforms
         return planned_transforms
-    
+
     @_not_initialized
     def require_backward_transform(self, field, axes=None, transform_tag=None,
-                                        custom_input_buffer=None,
-                                        matching_forward_transform=None,
-                                        action=None, 
-                                        dump_energy=None, plot_energy=None,
-                                        **kwds):
+                                   custom_input_buffer=None,
+                                   matching_forward_transform=None,
+                                   action=None,
+                                   dump_energy=None, plot_energy=None,
+                                   **kwds):
         """
         Same as require_forward_transform but for backward transforms.
         This corresponds to the following backward transform mappings:
-        
+
         if order[axis] is 0:
             *no transform -> no transform
         else, if order[axis] is even:
@@ -712,7 +718,7 @@ class SpectralTransformGroup(object):
             *DCT-III -> DST-II
             *DST-I   -> DCT-I
             *DST-III -> DCT-II
-        
+
         For backward transforms, boundary compatibility for output_fields is thus the following:
             if axis is even:
                 Boundary should be exactly the same on the axis.
@@ -724,15 +730,15 @@ class SpectralTransformGroup(object):
                 *(Dirichlet-Dirichlet)   ODD-ODD -> EVEN-EVEN (Neumann-Neumann)
 
         Order and boundary conditions are decuded from field.
-        
+
         Parameters
         ----------
-        field: ScalarField 
+        field: ScalarField
             The target field where the result of the inverse transform will be stored.
         axes: array-like of integers
             The axes to be transformed.
         transform_tag: str
-            Extra tag to register the backward transform (a single scalar field can be 
+            Extra tag to register the backward transform (a single scalar field can be
             transformed multiple times). Default tag is 'default'.
         custom_input_buffer: None or str or F, optional
             Force this transform to take as input one of the two common transform group buffers.
@@ -744,7 +750,7 @@ class SpectralTransformGroup(object):
             Specifying 'auto' will tell the planner to use the matching
             transform output buffer.
         action: BackwardTransfromAction, optional
-            Defaults to SpectralTransformAction.OVERWRITE which will overwrite the 
+            Defaults to SpectralTransformAction.OVERWRITE which will overwrite the
             compute slices of the given output field.
             SpectralTransformAction.ACCUMULATE will sum the current content of the field
             with the result of the backward transform.
@@ -765,7 +771,7 @@ class SpectralTransformGroup(object):
 
 
             dump_energy       plot_energy      result
-                 None             None         nothing  
+                 None             None         nothing
                  iop0              0           energy is computed and dumped every iop0.frequency iterations
                   0               iop1         energy is computed and dumped every iop1.frequency iterations
                  iop0             iop1         energy is computed every iop1.frequency and iop2.frequency iterations
@@ -773,7 +779,7 @@ class SpectralTransformGroup(object):
                                                              plotted  every iop1.frequency
 
             About frequency:
-                if (frequency<0)  no dump 
+                if (frequency<0)  no dump
                 if (frequency==0) dump at time of interests and last iteration
                 if (frequency>=0) dump at time of interests, last iteration and every freq iterations
 
@@ -784,39 +790,41 @@ class SpectralTransformGroup(object):
         check_instance(transform_tag, str)
         check_instance(action, SpectralTransformAction)
         transforms = SpectralTransform(field=field, axes=axes, forward=False)
-        msg='Field {} with axes {} and transform_tag "{}" has already been registered for backward transform.'
+        msg = 'Field {} with axes {} and transform_tag "{}" has already been registered for backward transform.'
         if field.is_tensor:
             planned_transforms = field.new_empty_array()
             for (idx, f) in field.nd_iter():
-                assert (f,axes,transform_tag) not in self._backward_transforms, msg.format(f.name, axes, transform_tag)
+                assert (f, axes, transform_tag) not in self._backward_transforms, msg.format(
+                    f.name, axes, transform_tag)
                 assert f in self._op.output_fields
                 assert not transforms[idx].is_forward
                 planned_transforms[idx] = PlannedSpectralTransform(transform_group=self,
-                                                    tag=self.tag + '_' + transform_tag + '_' + f.name,
-                                                    symbolic_transform=transforms[idx],
-                                                    custom_input_buffer=custom_input_buffer,
-                                                    matching_forward_transform=matching_forward_transform,
-                                                    action=action,
-                                                    dump_energy=dump_energy, 
-                                                    plot_energy=plot_energy,
-                                                    **kwds)
-                self._backward_transforms[(f,axes,transform_tag)] = planned_transforms[idx]
+                                                                   tag=self.tag + '_' + transform_tag + '_' + f.name,
+                                                                   symbolic_transform=transforms[idx],
+                                                                   custom_input_buffer=custom_input_buffer,
+                                                                   matching_forward_transform=matching_forward_transform,
+                                                                   action=action,
+                                                                   dump_energy=dump_energy,
+                                                                   plot_energy=plot_energy,
+                                                                   **kwds)
+                self._backward_transforms[(f, axes, transform_tag)] = planned_transforms[idx]
         else:
-            assert (field,axes,transform_tag) not in self._backward_transforms, msg.format(field.name, axes, transform_tag)
+            assert (field, axes, transform_tag) not in self._backward_transforms, msg.format(
+                field.name, axes, transform_tag)
             assert field in self._op.output_fields
             assert not transforms.is_forward
             planned_transforms = PlannedSpectralTransform(transform_group=self,
-                                                    tag=self.tag + '_' + transform_tag + '_' + field.name,
-                                                    symbolic_transform=transforms,
-                                                    custom_input_buffer=custom_input_buffer,
-                                                    matching_forward_transform=matching_forward_transform,
-                                                    action=action,
-                                                    dump_energy=dump_energy, 
-                                                    plot_energy=plot_energy,
-                                                    **kwds)
-            self._backward_transforms[(field,axes,transform_tag)] = planned_transforms
+                                                          tag=self.tag + '_' + transform_tag + '_' + field.name,
+                                                          symbolic_transform=transforms,
+                                                          custom_input_buffer=custom_input_buffer,
+                                                          matching_forward_transform=matching_forward_transform,
+                                                          action=action,
+                                                          dump_energy=dump_energy,
+                                                          plot_energy=plot_energy,
+                                                          **kwds)
+            self._backward_transforms[(field, axes, transform_tag)] = planned_transforms
         return planned_transforms
-    
+
     @property
     def output_parameters(self):
         parameters = set()
@@ -829,10 +837,10 @@ class SpectralTransformGroup(object):
         assert self.discretized
         discrete_wave_numbers = self._discrete_wave_numbers
         if (discrete_wave_numbers is None):
-            msg='discrete_wave_numbers has not been set yet.'
+            msg = 'discrete_wave_numbers has not been set yet.'
             raise AttributeError(msg)
         return self._discrete_wave_numbers
-    
+
     @_not_initialized
     def push_expressions(self, *exprs):
         exprs_wave_numbers = set()
@@ -852,42 +860,40 @@ class SpectralTransformGroup(object):
                 print '  wave_numbers: {}'.format(wn)
 
         return tuple(exprs_wave_numbers)
-            
-        
+
     @classmethod
     def check_fields(cls, forward_fields, backward_fields):
         all_fields = tuple(set(forward_fields+backward_fields))
         if not all_fields:
-            msg='At least one field is required.'
+            msg = 'At least one field is required.'
             raise ValueError(msg)
         domain = cls.determine_domain(*all_fields)
-        dim    = domain.dim
+        dim = domain.dim
         return (domain, dim)
 
-    @classmethod 
+    @classmethod
     def determine_domain(cls, *fields):
         domain = fields[0].domain
         for field in fields[1:]:
             if (field.domain is not domain):
-                msg='Domain mismatch between fields:\n{}\nvs.\n{}\n'
-                msg=msg.format(domain, field.domain)
+                msg = 'Domain mismatch between fields:\n{}\nvs.\n{}\n'
+                msg = msg.format(domain, field.domain)
                 raise ValueError(msg)
         return domain
 
 
-
 class PlannedSpectralTransform(object):
     """
     A planned spectral transform is an AppliedSpectralTransform wrapper.
     This object will be handled by the transform planner.
     """
-    DEBUG=False
+    DEBUG = False
 
     def __init__(self, transform_group, tag, symbolic_transform, action,
-            custom_input_buffer=None, custom_output_buffer=None,
-            matching_forward_transform=None, 
-            dump_energy=None, plot_energy=None,compute_energy_frequencies=None):
-        
+                 custom_input_buffer=None, custom_output_buffer=None,
+                 matching_forward_transform=None,
+                 dump_energy=None, plot_energy=None, compute_energy_frequencies=None):
+
         check_instance(transform_group, SpectralTransformGroup)
         check_instance(transform_group.op, SpectralOperatorBase)
         check_instance(tag, str)
@@ -895,48 +901,48 @@ class PlannedSpectralTransform(object):
         check_instance(action, SpectralTransformAction)
         check_instance(dump_energy, IOParams, allow_none=True)
         check_instance(plot_energy, IOParams, allow_none=True)
-        assert custom_input_buffer  in (None, 'B0', 'B1', 'auto'), custom_input_buffer
+        assert custom_input_buffer in (None, 'B0', 'B1', 'auto'), custom_input_buffer
         assert custom_output_buffer in (None, 'B0', 'B1', 'auto'), custom_output_buffer
 
-        field      = symbolic_transform.field
+        field = symbolic_transform.field
         is_forward = symbolic_transform.is_forward
-        
+
         self._transform_group = transform_group
         self._tag = tag
         self._symbol = symbolic_transform
         self._queue = None
-        self._custom_input_buffer  = custom_input_buffer
+        self._custom_input_buffer = custom_input_buffer
         self._custom_output_buffer = custom_output_buffer
         self._matching_forward_transform = matching_forward_transform
         self._action = action
 
-        self._do_dump_energy = (dump_energy is not None) and (dump_energy.frequency>=0)
-        self._do_plot_energy = (plot_energy is not None) and (plot_energy.frequency>=0)
-        
+        self._do_dump_energy = (dump_energy is not None) and (dump_energy.frequency >= 0)
+        self._do_plot_energy = (plot_energy is not None) and (plot_energy.frequency >= 0)
+
         compute_energy_frequencies = to_set(first_not_None(compute_energy_frequencies, set()))
         if self._do_dump_energy:
             compute_energy_frequencies.add(dump_energy.frequency)
         if self._do_plot_energy:
             compute_energy_frequencies.add(plot_energy.frequency)
-        compute_energy_frequencies = set(filter(lambda f: f>=0, compute_energy_frequencies))
-        do_compute_energy = (len(compute_energy_frequencies)>0)
-        
+        compute_energy_frequencies = set(filter(lambda f: f >= 0, compute_energy_frequencies))
+        do_compute_energy = (len(compute_energy_frequencies) > 0)
+
         self._do_compute_energy = do_compute_energy
         self._compute_energy_frequencies = compute_energy_frequencies
         self._plot_energy_ioparams = plot_energy
         self._dump_energy_ioparams = dump_energy
 
         if self._do_compute_energy:
-            ename  = 'E{}_{}'.format('f' if is_forward else 'b', field.name)
+            ename = 'E{}_{}'.format('f' if is_forward else 'b', field.name)
             pename = 'E{}_{}'.format('f' if is_forward else 'b', field.pretty_name)
             vename = 'E{}_{}'.format('f' if is_forward else 'b', field.var_name)
             self._energy_parameter = BufferParameter(name=ename, pretty_name=pename, var_name=vename,
-                                                        shape=None, dtype=None, initial_value=None)
+                                                     shape=None, dtype=None, initial_value=None)
         else:
             self._energy_parameter = None
-        self._energy_dumper  = None
+        self._energy_dumper = None
         self._energy_plotter = None
-        
+
         if is_forward:
             msg = "Cannot specify 'custom_input_buffer' for a forward transform."
             assert (custom_input_buffer is None), msg
@@ -946,75 +952,76 @@ class PlannedSpectralTransform(object):
             msg = "Cannot specify 'custom_output_buffer' for a backward transform."
             assert (self._custom_output_buffer is None), msg
             if (self._custom_input_buffer == 'auto'):
-                msg="Using 'auto' as 'custom_output_buffer' of a backward transform implies "
-                msg+="to specify a 'matching_forward_transform' to choose the buffer from."
+                msg = "Using 'auto' as 'custom_output_buffer' of a backward transform implies "
+                msg += "to specify a 'matching_forward_transform' to choose the buffer from."
                 assert (matching_forward_transform is not None), msg
                 assert isinstance(matching_forward_transform, PlannedSpectralTransform), msg
                 assert matching_forward_transform.is_forward, msg
             else:
-                msg="Using 'custom_output_buffer' different than 'auto' for a backward "
-                msg+="transform implies to set 'matching_forward_transform' to None."
+                msg = "Using 'custom_output_buffer' different than 'auto' for a backward "
+                msg += "transform implies to set 'matching_forward_transform' to None."
                 assert (matching_forward_transform is None), msg
 
         # reorder transforms in execution order (contiguous axe first)
-        transforms = self.s.transforms[::-1] 
+        transforms = self.s.transforms[::-1]
 
-        if len(transforms)!=field.dim:
-            msg='Number of transforms does not match field dimension.'
+        if len(transforms) != field.dim:
+            msg = 'Number of transforms does not match field dimension.'
             raise ValueError(msg)
 
         if all((tr is TransformType.NONE) for tr in transforms):
-            msg='All transforms are of type NONE.'
+            msg = 'All transforms are of type NONE.'
             raise ValueError(msg)
-        
+
         if is_forward:
-            input_dtype  = field.dtype
+            input_dtype = field.dtype
             output_dtype = STU.determine_output_dtype(
-                    field.dtype, *transforms)
+                field.dtype, *transforms)
         else:
-            input_dtype  = STU.determine_input_dtype(
-                    field.dtype, *transforms)
+            input_dtype = STU.determine_input_dtype(
+                field.dtype, *transforms)
             output_dtype = field.dtype
 
-        self._input_dtype  = np.dtype(input_dtype)
+        self._input_dtype = np.dtype(input_dtype)
         self._output_dtype = np.dtype(output_dtype)
 
-        self._input_shape  = None
+        self._input_shape = None
         self._output_shape = None
 
-        self._input_buffer  = None
+        self._input_buffer = None
         self._output_buffer = None
 
         self._dfield = None
-        self._input_symbolic_arrays  = set()
+        self._input_symbolic_arrays = set()
         self._output_symbolic_arrays = set()
         self._ready = False
 
     @property
     def output_parameters(self):
         return {self._energy_parameter} - {None}
-    
+
     def input_symbolic_array(self, name, **kwds):
         """Create a symbolic array that will be bound to input transform array."""
         assert ('memory_object' not in kwds)
         assert ('dim' not in kwds)
-        obj = SymbolicArray(name=name, memory_object=None, 
-                dim=self.field.dim, **kwds)
+        obj = SymbolicArray(name=name, memory_object=None,
+                            dim=self.field.dim, **kwds)
         self._input_symbolic_arrays.add(obj)
         return obj
-    
+
     def output_symbolic_array(self, name, **kwds):
         """Create a symbolic array that will be bound to output transform array."""
         assert ('memory_object' not in kwds)
         assert ('dim' not in kwds)
-        obj = SymbolicArray(name=name, memory_object=None, 
-                dim=self.field.dim, **kwds)
+        obj = SymbolicArray(name=name, memory_object=None,
+                            dim=self.field.dim, **kwds)
         self._output_symbolic_arrays.add(obj)
         return obj
 
     @property
     def transform_group(self):
         return self._transform_group
+
     @property
     def op(self):
         return self._transform_group.op
@@ -1022,6 +1029,7 @@ class PlannedSpectralTransform(object):
     @property
     def tag(self):
         return self._tag
+
     @property
     def name(self):
         return self._tag
@@ -1029,19 +1037,23 @@ class PlannedSpectralTransform(object):
     @property
     def symbol(self):
         return self._symbol
+
     @property
     def s(self):
         return self._symbol
-    
+
     @property
     def field(self):
         return self._symbol.field
+
     @property
     def is_forward(self):
         return self._symbol.is_forward
+
     @property
     def is_backward(self):
         return not self.is_forward
+
     @property
     def transforms(self):
         return self._symbol.transforms
@@ -1049,24 +1061,25 @@ class PlannedSpectralTransform(object):
     @property
     def input_dtype(self):
         return self._input_dtype
+
     @property
     def output_dtype(self):
         return self._output_dtype
 
-    
     @property
     def backend(self):
         assert self.discretized
         backend = self._backend
         if (backend is None):
-            msg='backend has not been set yet.'
+            msg = 'backend has not been set yet.'
             raise AttributeError(msg)
         return backend
+
     @property
     def dfield(self):
         assert self.discretized
         if (self._dfield is None):
-            msg='dfield has not been set.'
+            msg = 'dfield has not been set.'
             raise AttributeError(msg)
         return self._dfield
 
@@ -1074,114 +1087,120 @@ class PlannedSpectralTransform(object):
     def input_shape(self):
         assert self.discretized
         if (self._input_shape is None):
-            msg='input_shape has not been set.'
+            msg = 'input_shape has not been set.'
             raise AttributeError(msg)
         return self._input_shape
+
     @property
     def output_shape(self):
         assert self.discretized
         if (self._output_shape is None):
-            msg='output_shape has not been set.'
+            msg = 'output_shape has not been set.'
             raise AttributeError(msg)
         return self._output_shape
-    
+
     @property
     def input_transform_shape(self):
         assert self.discretized
         if (self._input_transform_shape is None):
-            msg='input_transform_shape has not been set.'
+            msg = 'input_transform_shape has not been set.'
             raise AttributeError(msg)
         return self._input_transform_shape
+
     @property
     def output_transform_shape(self):
         assert self.discretized
         if (self._output_transform_shape is None):
-            msg='output_transform_shape has not been set.'
+            msg = 'output_transform_shape has not been set.'
             raise AttributeError(msg)
         return self._output_transform_shape
-    
+
     @property
     def input_axes(self):
         assert self.discretized
         if (self._input_axes is None):
-            msg='input_axes has not been set.'
+            msg = 'input_axes has not been set.'
             raise AttributeError(msg)
         return self._input_axes
+
     @property
     def output_axes(self):
         assert self.discretized
         if (self._output_axes is None):
-            msg='output_axes has not been set.'
+            msg = 'output_axes has not been set.'
             raise AttributeError(msg)
         return self._output_axes
-    
+
     @property
     def input_slices(self):
         assert self.discretized
         buf = self._input_slices
         if (buf is None):
-            msg='input_slices has not been set yet.'
+            msg = 'input_slices has not been set yet.'
             raise AttributeError(msg)
         return buf
+
     @property
     def output_slices(self):
         assert self.discretized
         buf = self._output_slices
         if (buf is None):
-            msg='output_slices has not been set yet.'
+            msg = 'output_slices has not been set yet.'
             raise AttributeError(msg)
         return buf
-    
+
     @property
     def input_buffer(self):
         assert self.discretized
         buf = self._input_buffer
         if (buf is None):
-            msg='input_buffer has not been set yet.'
+            msg = 'input_buffer has not been set yet.'
             raise AttributeError(msg)
         return buf
+
     @property
     def output_buffer(self):
         assert self.discretized
         buf = self._output_buffer
         if (buf is None):
-            msg='output_buffer has not been set yet.'
+            msg = 'output_buffer has not been set yet.'
             raise AttributeError(msg)
         return buf
-    
+
     @property
     def full_input_buffer(self):
         assert self.discretized
         buf = self._full_input_buffer
         if (buf is None):
-            msg='full_input_buffer has not been set yet.'
+            msg = 'full_input_buffer has not been set yet.'
             raise AttributeError(msg)
         return buf
+
     @property
     def full_output_buffer(self):
         assert self.discretized
         buf = self._full_output_buffer
         if (buf is None):
-            msg='full_output_buffer has not been set yet.'
+            msg = 'full_output_buffer has not been set yet.'
             raise AttributeError(msg)
         return buf
-    
+
     @property
     def initialized(self):
         return self.op.initialized
+
     @property
     def discretized(self):
         return self.op.discretized
+
     @property
     def ready(self):
         return self._ready
 
-
     @_not_initialized
     def initialize(self, **kwds):
         pass
-    
-    
+
     @_initialized
     def discretize(self, **kwds):
         is_forward = self.is_forward
@@ -1191,65 +1210,66 @@ class PlannedSpectralTransform(object):
 
         if is_forward:
             (dfield, transform_info, transpose_info, transform_offsets) = \
-                    self._discretize_forward(field_axes, **kwds)
+                self._discretize_forward(field_axes, **kwds)
             assert transpose_info[0][1] == field_axes
         else:
             (dfield, transform_info, transpose_info, transform_offsets) = \
-                    self._discretize_backward(field_axes, **kwds)
+                self._discretize_backward(field_axes, **kwds)
             assert transpose_info[-1][2] == field_axes
-        assert dfield.dim==len(transform_info)==len(transpose_info)==dim
-        assert transform_info[0][2][1]  == self._input_dtype
+        assert dfield.dim == len(transform_info) == len(transpose_info) == dim
+        assert transform_info[0][2][1] == self._input_dtype
         assert transform_info[-1][3][1] == self._output_dtype
-        
+
         # filter out untransformed axes
         tidx = tuple(filter(lambda i: not STU.is_none(transform_info[i][1]),  xrange(dim)))
         assert tidx, 'Could not determine any transformed axe.'
         ntransforms = len(tidx)
         transform_info = tuple(map(transform_info.__getitem__, tidx))
         transpose_info = tuple(map(transpose_info.__getitem__, tidx))
-        assert len(transform_info)==len(transpose_info)==ntransforms
+        assert len(transform_info) == len(transpose_info) == ntransforms
 
         # determine input and output shapes
-        input_axes  = transpose_info[0][1]
+        input_axes = transpose_info[0][1]
         output_axes = transpose_info[-1][2]
         if is_forward:
-            assert (field_axes==input_axes), (field_axes, input_axes)
-            input_transform_shape  = transpose_info[0][3]
+            assert (field_axes == input_axes), (field_axes, input_axes)
+            input_transform_shape = transpose_info[0][3]
             output_transform_shape = transform_info[-1][3][0]
-            input_shape,  input_slices, _  = \
-                    self.determine_buffer_shape(input_transform_shape, False,
-                                                    transform_offsets, input_axes)
+            input_shape,  input_slices, _ = \
+                self.determine_buffer_shape(input_transform_shape, False,
+                                            transform_offsets, input_axes)
             output_shape, output_slices, zfos = \
-                    self.determine_buffer_shape(output_transform_shape, True,
-                                                    transform_offsets, output_axes)
+                self.determine_buffer_shape(output_transform_shape, True,
+                                            transform_offsets, output_axes)
             # We have a situation where we should impose zeros:
             #  1) output transform ghosts (when there are transform sizes mismatch DXT-I variants)
             zero_fill_output_slices = zfos
         else:
-            assert (field_axes==output_axes), (field_axes, output_axes)
-            input_transform_shape  = transform_info[0][2][0]
+            assert (field_axes == output_axes), (field_axes, output_axes)
+            input_transform_shape = transform_info[0][2][0]
             output_transform_shape = transpose_info[-1][4]
 
-            input_shape,  input_slices, _  = \
-                    self.determine_buffer_shape(input_transform_shape, True,
-                                                    transform_offsets, input_axes)
+            input_shape,  input_slices, _ = \
+                self.determine_buffer_shape(input_transform_shape, True,
+                                            transform_offsets, input_axes)
             output_shape, output_slices, zfos = \
-                    self.determine_buffer_shape(output_transform_shape, False,
-                                                    transform_offsets, output_axes)
+                self.determine_buffer_shape(output_transform_shape, False,
+                                            transform_offsets, output_axes)
             # We have a situation where we should impose zeros:
-            #  1) impose homogeneous dirichlet conditions on output 
+            #  1) impose homogeneous dirichlet conditions on output
             #      (implicit 0's are not part of the transform output).
             zero_fill_output_slices = zfos
-        
-        axes  = (output_axes if is_forward else input_axes)
+
+        axes = (output_axes if is_forward else input_axes)
         ptransforms = tuple(self.transforms[i] for i in axes)
         self._permuted_transforms = ptransforms
-        
+
         if self._do_compute_energy:
-            shape = (output_shape  if is_forward else input_shape)
+            shape = (output_shape if is_forward else input_shape)
             #view  = (output_slices if is_forward else input_slices)
             assert len(shape) == ntransforms
-            shape = tuple(Si-2 if sum(transform_offsets[i])==2 else Si  for i,Si in zip(axes, shape))
+            shape = tuple(Si-2 if sum(transform_offsets[i])
+                          == 2 else Si for i, Si in zip(axes, shape))
             K2 = ()
             for (tr, Ni) in zip(ptransforms, shape):
                 Ki = Ni//2 if STU.is_C2C(tr) else Ni-1
@@ -1261,75 +1281,76 @@ class PlannedSpectralTransform(object):
             else:
                 mutexes_nbytes = 0
             self._max_wavenumber = max_wavenumber
-            self._energy_nbytes  = energy_nbytes
+            self._energy_nbytes = energy_nbytes
             self._mutexes_nbytes = mutexes_nbytes
-            
+
             Ep = self._energy_parameter
             Ep.reallocate_buffer(shape=(max_wavenumber+1,), dtype=dfield.dtype)
-            
-            fname = fname='{}{}'.format(dfield.name, '_in' if is_forward else '_out')
-            
+
+            fname = fname = '{}{}'.format(dfield.name, '_in' if is_forward else '_out')
+
             # build txt dumper
             if self._do_dump_energy:
                 diop = self._dump_energy_ioparams
                 assert (diop is not None)
-                self._energy_dumper = EnergyDumper(energy_parameter=Ep, 
-                        io_params=self._dump_energy_ioparams, fname=fname)
+                self._energy_dumper = EnergyDumper(energy_parameter=Ep,
+                                                   io_params=self._dump_energy_ioparams, fname=fname)
 
             # build plotter if required
             if self._do_plot_energy:
                 piop = self._plot_energy_ioparams
                 assert (piop is not None)
-                pname = u'{}.{}.{}'.format(self.op.__class__.__name__, 
-                        'forward'if is_forward else 'backward',
-                         dfield.pretty_name.decode('utf-8'))
-                energy_parameters = { pname: self._energy_parameter}
+                pname = u'{}.{}.{}'.format(self.op.__class__.__name__,
+                                           'forward'if is_forward else 'backward',
+                                           dfield.pretty_name.decode('utf-8'))
+                energy_parameters = {pname: self._energy_parameter}
                 self._energy_plotter = EnergyPlotter(energy_parameters=energy_parameters,
-                                                        io_params=self._plot_energy_ioparams,
-                                                        fname=fname)
+                                                     io_params=self._plot_energy_ioparams,
+                                                     fname=fname)
         else:
             self._max_wavenumber = None
-            self._energy_nbytes  = None
+            self._energy_nbytes = None
             self._mutexes_nbytes = None
 
         self._dfield = dfield
         self._transform_info = transform_info
         self._transpose_info = transpose_info
-        self._ntransforms    = ntransforms
-        
-        self._input_axes     = input_axes
-        self._input_shape    = input_shape
-        self._input_slices   = input_slices
+        self._ntransforms = ntransforms
+
+        self._input_axes = input_axes
+        self._input_shape = input_shape
+        self._input_slices = input_slices
         self._input_transform_shape = input_transform_shape
 
-        self._output_axes    = output_axes
-        self._output_shape   = output_shape
-        self._output_slices  = output_slices
+        self._output_axes = output_axes
+        self._output_shape = output_shape
+        self._output_slices = output_slices
         self._output_transform_shape = output_transform_shape
-        
+
         self._zero_fill_output_slices = zero_fill_output_slices
 
         self._backend = dfield.backend
 
         if self.DEBUG:
             def axis_format(info):
-                prefix='\n'+' '*4
-                ss=''
-                for (i,data) in enumerate(info):
-                    ss+=prefix+'{}/ '.format(i)+str(data)
+                prefix = '\n'+' '*4
+                ss = ''
+                for (i, data) in enumerate(info):
+                    ss += prefix+'{}/ '.format(i)+str(data)
                 return ss
+
             def slc_format(slices):
                 if (slices is None):
                     return 'NONE'
                 else:
-                    prefix='\n'+' '*4
-                    ss=''
+                    prefix = '\n'+' '*4
+                    ss = ''
                     for slc in slices:
-                        ss+=prefix+str(slc)
+                        ss += prefix+str(slc)
                     return ss
             print '\n\n== SPECTRAL PLANNING INFO OF FIELD {} =='.format(dfield.pretty_name)
-            print 'transform direction:     {}'.format('FORWARD' if self.is_forward 
-                                                  else 'BACKWARD')
+            print 'transform direction:     {}'.format('FORWARD' if self.is_forward
+                                                       else 'BACKWARD')
             print 'transforms:              {}'.format(self.transforms)
             print ':CARTESIAN INFO:'
             print 'cart shape:              {}'.format(dfield.topology.cart_shape)
@@ -1354,50 +1375,51 @@ class PlannedSpectralTransform(object):
             print ':ZERO FILL:'
             print 'zero_fill_output_slices: {}'.format(slc_format(self._zero_fill_output_slices))
 
-
-    
     def get_mapped_input_buffer(self):
         return self.get_mapped_full_input_buffer()[self.input_slices]
+
     def get_mapped_output_buffer(self):
         return self.get_mapped_full_output_buffer()[self.output_slices]
+
     def get_mapped_full_input_buffer(self):
         dfield = self._dfield
-        if (self.is_forward 
-             and dfield.backend.kind == Backend.OPENCL
-             and self.transform_group._op.enable_opencl_host_buffer_mapping):
+        if (self.is_forward
+            and dfield.backend.kind == Backend.OPENCL
+                and self.transform_group._op.enable_opencl_host_buffer_mapping):
             return self.transform_group._op.get_mapped_object(dfield)[dfield.compute_slices]
         else:
             return self.full_input_buffer
+
     def get_mapped_full_output_buffer(self):
         dfield = self._dfield
         if (self.is_backward
-             and dfield.backend.kind == Backend.OPENCL
-             and self.transform_group._op.enable_opencl_host_buffer_mapping):
+            and dfield.backend.kind == Backend.OPENCL
+                and self.transform_group._op.enable_opencl_host_buffer_mapping):
             return self.transform_group._op.get_mapped_object(dfield)[dfield.compute_slices]
         else:
             return self.full_output_buffer
 
-    def determine_buffer_shape(cls, transform_shape, target_is_buffer, offsets, axes): 
+    def determine_buffer_shape(cls, transform_shape, target_is_buffer, offsets, axes):
         offsets = tuple(offsets[ai] for ai in axes)
         slices = []
         shape = []
         zero_fill_slices = []
         dim = len(axes)
-        for i,((lo,ro),si) in enumerate(zip(offsets, transform_shape)):
-            if (lo^ro) and target_is_buffer:
+        for i, ((lo, ro), si) in enumerate(zip(offsets, transform_shape)):
+            if (lo ^ ro) and target_is_buffer:
                 Si = si
                 slc = slice(0, si)
             else:
                 Si = si+lo+ro
                 slc = slice(lo, Si-ro)
-                if (lo>0):
-                    zfill = [slice(None,None,None)]*dim
-                    zfill[i] = slice(0,lo)
+                if (lo > 0):
+                    zfill = [slice(None, None, None)]*dim
+                    zfill[i] = slice(0, lo)
                     zfill = tuple(zfill)
                     zero_fill_slices.append(zfill)
-                if (ro>0):
-                    zfill = [slice(None,None,None)]*dim
-                    zfill[i] = slice(Si-ro,Si)
+                if (ro > 0):
+                    zfill = [slice(None, None, None)]*dim
+                    zfill[i] = slice(Si-ro, Si)
                     zfill = tuple(zfill)
                     zero_fill_slices.append(zfill)
             shape.append(Si)
@@ -1406,196 +1428,194 @@ class PlannedSpectralTransform(object):
 
     def configure_input_buffer(self, buf):
         input_dtype, input_shape = self.input_dtype, self.input_shape
-        buf_nbytes   = compute_nbytes(buf.shape, buf.dtype)
-        input_nbytes = compute_nbytes(input_shape, input_dtype) 
+        buf_nbytes = compute_nbytes(buf.shape, buf.dtype)
+        input_nbytes = compute_nbytes(input_shape, input_dtype)
         assert buf_nbytes >= input_nbytes, (buf_nbytes, input_nbytes)
-        if (buf.shape!=input_shape) or (buf.dtype!=input_dtype):
-            buf = buf.view(dtype=np.int8)[:input_nbytes].view(dtype=input_dtype).reshape(input_shape)
+        if (buf.shape != input_shape) or (buf.dtype != input_dtype):
+            buf = buf.view(dtype=np.int8)[:input_nbytes].view(
+                dtype=input_dtype).reshape(input_shape)
         if isinstance(buf, Array):
             buf = buf.handle
         input_buffer = buf[self.input_slices]
         assert input_buffer.shape == self.input_transform_shape
         self._full_input_buffer = buf
-        self._input_buffer      = input_buffer
+        self._input_buffer = input_buffer
         for symbol in self._input_symbolic_arrays:
             symbol.to_backend(self.backend.kind).bind_memory_object(buf)
         return input_buffer
 
-
     def configure_output_buffer(self, buf):
         output_dtype, output_shape = self.output_dtype, self.output_shape
-        buf_nbytes    = compute_nbytes(buf.shape, buf.dtype)
-        output_nbytes = compute_nbytes(output_shape, output_dtype) 
+        buf_nbytes = compute_nbytes(buf.shape, buf.dtype)
+        output_nbytes = compute_nbytes(output_shape, output_dtype)
         assert buf_nbytes >= output_nbytes, (buf_nbytes, output_nbytes)
-        if (buf.shape!=output_shape) or (buf.dtype!=output_dtype):
-            buf = buf.view(dtype=np.int8)[:output_nbytes].view(dtype=output_dtype).reshape(output_shape)
+        if (buf.shape != output_shape) or (buf.dtype != output_dtype):
+            buf = buf.view(dtype=np.int8)[:output_nbytes].view(
+                dtype=output_dtype).reshape(output_shape)
         if isinstance(buf, Array):
             buf = buf.handle
         output_buffer = buf[self.output_slices]
         assert output_buffer.shape == self.output_transform_shape
         self._full_output_buffer = buf
-        self._output_buffer      = output_buffer
+        self._output_buffer = output_buffer
         for symbol in self._output_symbolic_arrays:
             symbol.to_backend(self.backend.kind).bind_memory_object(buf)
         return output_buffer
 
     def _discretize_forward(self, field_axes, **kwds):
         dfield = self.op.input_discrete_fields[self.field]
-       
-        grid_resolution  = dfield.mesh.grid_resolution
+
+        grid_resolution = dfield.mesh.grid_resolution
         local_resolution = dfield.compute_resolution
 
         input_dtype = dfield.dtype
-        dim         = dfield.dim
-        
-        forward_transforms  = self.transforms[::-1]
+        dim = dfield.dim
+
+        forward_transforms = self.transforms[::-1]
         backward_transforms = STU.get_inverse_transforms(*forward_transforms)
-        
+
         (resolution, transform_offsets) = \
-                STU.get_transform_resolution(local_resolution, *forward_transforms)
+            STU.get_transform_resolution(local_resolution, *forward_transforms)
 
         local_transform_info = self._determine_transform_info(forward_transforms,
                                                               resolution, input_dtype)
-        local_transpose_info = self._determine_transpose_info(field_axes, 
+        local_transpose_info = self._determine_transpose_info(field_axes,
                                                               local_transform_info)
-        
-        local_transform_info = self._permute_transform_info(local_transform_info, 
+
+        local_transform_info = self._permute_transform_info(local_transform_info,
                                                             local_transpose_info)
 
         transform_info = local_transform_info
         transpose_info = local_transpose_info
 
-        return (dfield, transform_info, transpose_info, 
+        return (dfield, transform_info, transpose_info,
                 transform_offsets)
 
-
     def _discretize_backward(self, field_axes, **kwds):
-        
-        forward_transforms  = self.transforms[::-1]
+
+        forward_transforms = self.transforms[::-1]
         backward_transforms = STU.get_inverse_transforms(*forward_transforms)
 
         def reverse_transform_info(transform_info):
             transform_info = list(transform_info)
-            for (i,d) in enumerate(transform_info):
+            for (i, d) in enumerate(transform_info):
                 d = list(d)
                 d[1] = forward_transforms[i]
-                d2,d3 = d[2:4]
-                d[2:4] = d3,d2
+                d2, d3 = d[2:4]
+                d[2:4] = d3, d2
                 transform_info[i] = tuple(d)
             transform_info = tuple(transform_info)
             return transform_info[::-1]
 
         def reverse_transpose_info(transpose_info):
             transpose_info = list(transpose_info)
-            for (i,d) in enumerate(transpose_info):
+            for (i, d) in enumerate(transpose_info):
                 if (d[0] is not None):
                     d = list(d)
-                    d1,d2,d3,d4 = d[1:5]
-                    d[1:5] = d2,d1,d4,d3
+                    d1, d2, d3, d4 = d[1:5]
+                    d[1:5] = d2, d1, d4, d3
                     d[0] = tuple(d[1].index(ai) for ai in d[2])
                     d = tuple(d)
                 else:
                     # no permutation
-                    assert d[1]==d[2]
-                    assert d[3]==d[4]
+                    assert d[1] == d[2]
+                    assert d[3] == d[4]
                 transpose_info[i] = d
             return transpose_info[::-1]
 
         dfield = self.op.output_discrete_fields[self.field]
 
-        grid_resolution  = dfield.mesh.grid_resolution
+        grid_resolution = dfield.mesh.grid_resolution
         local_resolution = dfield.compute_resolution
 
         output_dtype = dfield.dtype
-        dim          = dfield.dim
-        
+        dim = dfield.dim
+
         (resolution, transform_offsets) = \
-                STU.get_transform_resolution(local_resolution, *backward_transforms)
+            STU.get_transform_resolution(local_resolution, *backward_transforms)
 
-        local_backward_transform_info = self._determine_transform_info(backward_transforms, 
-                                                        resolution, output_dtype)
-        local_backward_transpose_info = self._determine_transpose_info(field_axes, 
-                                                        local_backward_transform_info)
+        local_backward_transform_info = self._determine_transform_info(backward_transforms,
+                                                                       resolution, output_dtype)
+        local_backward_transpose_info = self._determine_transpose_info(field_axes,
+                                                                       local_backward_transform_info)
         local_backward_transform_info = self._permute_transform_info(
-                                            local_backward_transform_info, 
-                                            local_backward_transpose_info)
-        
-        local_forward_transform_info  = reverse_transform_info(local_backward_transform_info)
-        local_forward_transpose_info  = reverse_transpose_info(local_backward_transpose_info)
-        
+            local_backward_transform_info,
+            local_backward_transpose_info)
+
+        local_forward_transform_info = reverse_transform_info(local_backward_transform_info)
+        local_forward_transpose_info = reverse_transpose_info(local_backward_transpose_info)
+
         transform_info = local_forward_transform_info
         transpose_info = local_forward_transpose_info
 
-        return (dfield, transform_info, transpose_info, 
-                    transform_offsets)
-        
-    
+        return (dfield, transform_info, transpose_info,
+                transform_offsets)
+
     @classmethod
     def _determine_transform_info(cls, transforms, src_shape, src_dtype):
         transform_info = []
         dim = len(transforms)
         dst_shape, dst_dtype = src_shape, src_dtype
-        dst_view = [slice(0,si) for si in src_shape]
-        for (i,tr) in enumerate(transforms):
+        dst_view = [slice(0, si) for si in src_shape]
+        for (i, tr) in enumerate(transforms):
             axis = i
             src_shape = dst_shape
             src_dtype = dst_dtype
-            src_view  = dst_view
+            src_view = dst_view
             if STU.is_none(tr):
                 pass
             elif STU.is_backward(tr):
-                msg='{} is not a forward transform.'
-                msg=msg.format(tr)
+                msg = '{} is not a forward transform.'
+                msg = msg.format(tr)
                 raise ValueError(msg)
             elif STU.is_R2R(tr):
-                msg='Expected a floating point data type but got {}.'.format(src_dtype)
+                msg = 'Expected a floating point data type but got {}.'.format(src_dtype)
                 assert is_fp(src_dtype), msg
                 # data type and shape does not change
             elif STU.is_R2C(tr):
-                msg='Expected a floating point data type but got {}.'.format(src_dtype)
+                msg = 'Expected a floating point data type but got {}.'.format(src_dtype)
                 assert is_fp(src_dtype), msg
-                dst_shape = list(src_shape) 
+                dst_shape = list(src_shape)
                 dst_shape[dim-axis-1] = dst_shape[dim-axis-1]//2 + 1
                 dst_shape = tuple(dst_shape)
                 dst_dtype = float_to_complex_dtype(src_dtype)
             elif STU.is_C2C(tr):
-                msg='Expected a complex data type but got {}.'.format(src_dtype)
+                msg = 'Expected a complex data type but got {}.'.format(src_dtype)
                 assert is_complex(src_dtype), msg
                 # data type and shape does not change
             else:
-                msg='Unknown transform type {}.'.format(tr)
+                msg = 'Unknown transform type {}.'.format(tr)
                 raise ValueError(msg)
-            
-            
-            (lo,ro) = STU.get_transform_offsets(tr) 
-            src_view = src_view[:] 
+
+            (lo, ro) = STU.get_transform_offsets(tr)
+            src_view = src_view[:]
             src_view[dim-axis-1] = slice(lo, src_shape[dim-axis-1]-ro)
-            
+
             dst_view = src_view[:]
             dst_view[dim-axis-1] = slice(lo, dst_shape[dim-axis-1]-ro)
 
             src_dtype = np.dtype(src_dtype)
             dst_dtype = np.dtype(dst_dtype)
 
-            data = (axis, tr, (src_shape, src_dtype, tuple(src_view)), 
-                              (dst_shape, dst_dtype, tuple(dst_view)))
+            data = (axis, tr, (src_shape, src_dtype, tuple(src_view)),
+                    (dst_shape, dst_dtype, tuple(dst_view)))
             transform_info.append(data)
         transform_info = tuple(transform_info)
         return transform_info
-    
+
     @classmethod
     def _determine_transpose_info(cls, src_axes, transform_info):
         transpose_info = []
         dim = len(src_axes)
-        for (axis, tr, (src_shape, src_dtype, src_view), 
-                       (dst_shape, dst_dtype, dst_view)) in transform_info:
+        for (axis, tr, (src_shape, src_dtype, src_view),
+             (dst_shape, dst_dtype, dst_view)) in transform_info:
             dst_axis = dim - 1 - axis
             if (not STU.is_none(tr)) and (dst_axis != src_axes[-1]):
                 idx = src_axes.index(dst_axis)
                 dst_axes = list(src_axes)
                 dst_axes[idx] = src_axes[-1]
-                dst_axes[-1]  = dst_axis
-                dst_axes    = tuple(dst_axes)
+                dst_axes[-1] = dst_axis
+                dst_axes = tuple(dst_axes)
                 permutation = tuple(src_axes.index(ai) for ai in dst_axes)
             else:
                 dst_axes = src_axes
@@ -1610,32 +1630,32 @@ class PlannedSpectralTransform(object):
             src_axes = dst_axes
         transpose_info = tuple(transpose_info)
         return transpose_info
-   
+
     @classmethod
     def _permute_transform_info(cls, transform_info, transpose_info):
-        assert len(transform_info)==len(transpose_info)
+        assert len(transform_info) == len(transpose_info)
         transform_info = list(transform_info)
-        for i,(transpose, transform) in enumerate(zip(transpose_info, transform_info)):
+        for i, (transpose, transform) in enumerate(zip(transpose_info, transform_info)):
             (_, _, dst_axes, _, transpose_out_shape) = transpose
-            (_1,_2,(src_shape,_3,src_view), (dst_shape,_4,dst_view)) = transform
+            (_1, _2, (src_shape, _3, src_view), (dst_shape, _4, dst_view)) = transform
             permuted_src_shape = tuple(src_shape[ai] for ai in dst_axes)
-            permuted_src_view  = tuple(src_view[ai]  for ai in dst_axes)
+            permuted_src_view = tuple(src_view[ai] for ai in dst_axes)
             permuted_dst_shape = tuple(dst_shape[ai] for ai in dst_axes)
-            permuted_dst_view  = tuple(dst_view[ai]  for ai in dst_axes)
+            permuted_dst_view = tuple(dst_view[ai] for ai in dst_axes)
             assert (permuted_src_shape == transpose_out_shape)
-            transform = (_1,_2,(permuted_src_shape,_3,permuted_src_view), 
-                               (permuted_dst_shape,_4,permuted_dst_view))
+            transform = (_1, _2, (permuted_src_shape, _3, permuted_src_view),
+                         (permuted_dst_shape, _4, permuted_dst_view))
             transform_info[i] = transform
         transform_info = tuple(transform_info)
         return transform_info
-    
+
     @_discretized
     def get_mem_requests(self, **kwds):
 
         # first we need to find out src and dst buffers for transforms (B0 and B1)
         nbytes = 0
-        for (_, _, (src_shape, src_dtype, src_view), 
-                   (dst_shape, dst_dtype, dst_view)) in self._transform_info:
+        for (_, _, (src_shape, src_dtype, src_view),
+             (dst_shape, dst_dtype, dst_view)) in self._transform_info:
             nbytes = max(nbytes, compute_nbytes(src_shape, src_dtype))
             nbytes = max(nbytes, compute_nbytes(dst_shape, dst_dtype))
         nbytes = max(nbytes, compute_nbytes(self.input_shape, self.input_dtype))
@@ -1645,19 +1665,21 @@ class PlannedSpectralTransform(object):
         # we can only do it by creating temporary plans prior to setup
         # with temporary buffers.
         tmp_nbytes = 0
-        tg  = self.transform_group
-        src = tg.FFTI.backend.empty(shape=(nbytes,), dtype=np.uint8, min_alignment=tg.op.min_fft_alignment) 
-        dst = tg.FFTI.backend.empty(shape=(nbytes,), dtype=np.uint8, min_alignment=tg.op.min_fft_alignment) 
+        tg = self.transform_group
+        src = tg.FFTI.backend.empty(shape=(nbytes,), dtype=np.uint8,
+                                    min_alignment=tg.op.min_fft_alignment)
+        dst = tg.FFTI.backend.empty(shape=(nbytes,), dtype=np.uint8,
+                                    min_alignment=tg.op.min_fft_alignment)
         queue = tg.FFTI.new_queue(tg=tg, name='tmp_queue')
-        for (_, tr, (src_shape, src_dtype, src_view), 
-                    (dst_shape, dst_dtype, dst_view)) in self._transform_info:
+        for (_, tr, (src_shape, src_dtype, src_view),
+             (dst_shape, dst_dtype, dst_view)) in self._transform_info:
             src_nbytes = compute_nbytes(src_shape, src_dtype)
             dst_nbytes = compute_nbytes(dst_shape, dst_dtype)
             b0 = src[:src_nbytes].view(dtype=src_dtype).reshape(src_shape)
             b1 = dst[:dst_nbytes].view(dtype=dst_dtype).reshape(dst_shape)
-            fft_plan = tg.FFTI.get_transform(tr)(a=b0.handle, out=b1.handle, 
-                                                  axis=self.field.dim-1,
-                                                  verbose=False)
+            fft_plan = tg.FFTI.get_transform(tr)(a=b0.handle, out=b1.handle,
+                                                 axis=self.field.dim-1,
+                                                 verbose=False)
             fft_plan.setup(queue=queue)
             tmp_nbytes = max(tmp_nbytes, fft_plan.required_buffer_size)
 
@@ -1665,56 +1687,55 @@ class PlannedSpectralTransform(object):
         del dst
 
         if (tmp_nbytes > nbytes):
-            msg='Planner claims to need more than buffer bytes as temporary buffer:'
-            msg+='\n  *Buffer bytes: {}'.format(bytes2str(nbytes))
-            msg+='\n  *Tmp    bytes: {}'.format(bytes2str(tmp_nbytes))
+            msg = 'Planner claims to need more than buffer bytes as temporary buffer:'
+            msg += '\n  *Buffer bytes: {}'.format(bytes2str(nbytes))
+            msg += '\n  *Tmp    bytes: {}'.format(bytes2str(tmp_nbytes))
             warnings.warn(msg, HysopFFTWarning)
 
-        backend   = self.transform_group.backend
-        mem_tag   = self.transform_group.mem_tag
+        backend = self.transform_group.backend
+        mem_tag = self.transform_group.mem_tag
         field_tag = self.dfield.name
-        kind      = backend.kind
-        
-        B0_tag   = '{}_{}_B0'.format(mem_tag, kind)
-        B1_tag   = '{}_{}_B1'.format(mem_tag, kind)
-        TMP_tag  = '{}_{}_TMP'.format(mem_tag, kind)
+        kind = backend.kind
+
+        B0_tag = '{}_{}_B0'.format(mem_tag, kind)
+        B1_tag = '{}_{}_B1'.format(mem_tag, kind)
+        TMP_tag = '{}_{}_TMP'.format(mem_tag, kind)
         ENERGY_tag = '{}_{}_ENERGY'.format(mem_tag, kind)
         MUTEXES_tag = '{}_{}_MUTEXES'.format(mem_tag, kind)
         self.B0_tag, self.B1_tag, self.TMP_tag, self.ENERGY_tag, self.MUTEXES_tag = B0_tag, B1_tag, TMP_tag, ENERGY_tag, MUTEXES_tag
 
-        requests =  {B0_tag:   nbytes,
-                     B1_tag:   nbytes,
-                     TMP_tag:  tmp_nbytes}
+        requests = {B0_tag:   nbytes,
+                    B1_tag:   nbytes,
+                    TMP_tag:  tmp_nbytes}
 
-        if (self._energy_nbytes>0):
-            requests[ENERGY_tag]  = self._energy_nbytes
-        if (self._mutexes_nbytes>0):
+        if (self._energy_nbytes > 0):
+            requests[ENERGY_tag] = self._energy_nbytes
+        if (self._mutexes_nbytes > 0):
             requests[MUTEXES_tag] = self._mutexes_nbytes
 
         return requests
 
-    
     @_discretized
     def setup(self, work):
         SETUP_DEBUG = False
         assert not self.ready
-       
-        dim  = self.field.dim
-        op   = self.op
-        tg   = self.transform_group
+
+        dim = self.field.dim
+        op = self.op
+        tg = self.transform_group
         FFTI = tg.FFTI
-        
-        is_forward  = self.is_forward
+
+        is_forward = self.is_forward
         is_backward = self.is_backward
 
-        ntransforms    = self._ntransforms
+        ntransforms = self._ntransforms
         transform_info = self._transform_info
         transpose_info = self._transpose_info
 
         B0_tag, B1_tag = self.B0_tag, self.B1_tag
-        TMP_tag        = self.TMP_tag
-        ENERGY_tag     = self.ENERGY_tag
-        MUTEXES_tag    = self.MUTEXES_tag
+        TMP_tag = self.TMP_tag
+        ENERGY_tag = self.ENERGY_tag
+        MUTEXES_tag = self.MUTEXES_tag
 
         # get temporary buffers
         B0,  = work.get_buffer(op, B0_tag,  handle=True)
@@ -1723,26 +1744,26 @@ class PlannedSpectralTransform(object):
         assert is_byte_aligned(B1)
 
         try:
-            TMP, = work.get_buffer(op, TMP_tag, handle=True) 
+            TMP, = work.get_buffer(op, TMP_tag, handle=True)
         except ValueError:
             TMP = None
-        
-        if (self._energy_nbytes>0):
+
+        if (self._energy_nbytes > 0):
             ENERGY, = work.get_buffer(op, ENERGY_tag, handle=True)
             energy_buffer = ENERGY[:self._energy_nbytes].view(dtype=self.dfield.dtype)
             assert energy_buffer.size == self._max_wavenumber+1
         else:
-            ENERGY  = None
-            energy_buffer  = None
+            ENERGY = None
+            energy_buffer = None
 
-        if (self._mutexes_nbytes>0):
+        if (self._mutexes_nbytes > 0):
             MUTEXES, = work.get_buffer(op, MUTEXES_tag, handle=True)
             mutexes_buffer = MUTEXES[:self._mutexes_nbytes].view(dtype=np.int32)
             assert mutexes_buffer.size == self._max_wavenumber+1
         else:
             MUTEXES = None
             mutexes_buffer = None
-        
+
         # bind field buffer to input or output
         dfield = self.dfield
         if is_forward:
@@ -1751,34 +1772,35 @@ class PlannedSpectralTransform(object):
             self.configure_output_buffer(dfield.sbuffer[dfield.compute_slices])
 
         # bind group buffer to input or output if required.
-        custom_input_buffer  = self._custom_input_buffer
+        custom_input_buffer = self._custom_input_buffer
         custom_output_buffer = self._custom_output_buffer
         if (is_forward and custom_output_buffer):
-            if (custom_output_buffer=='auto'):
+            if (custom_output_buffer == 'auto'):
                 # will be determined and set later
                 pass
-            elif (custom_output_buffer=='B0'):
+            elif (custom_output_buffer == 'B0'):
                 self.configure_output_buffer(B0)
-            elif (custom_output_buffer=='B1'):
+            elif (custom_output_buffer == 'B1'):
                 self.configure_output_buffer(B1)
             else:
-                msg='Unknown custom output buffer {}.'.format(custom_output_buffer)
+                msg = 'Unknown custom output buffer {}.'.format(custom_output_buffer)
                 raise NotImplementedError(msg)
         if (is_backward and custom_input_buffer):
-            if (custom_input_buffer=='auto'):
+            if (custom_input_buffer == 'auto'):
                 assert self._matching_forward_transform.ready
                 custom_input_buffer = self._matching_forward_transform._custom_output_buffer
                 assert custom_input_buffer in ('B0', 'B1')
-            if (custom_input_buffer=='B0'):
+            if (custom_input_buffer == 'B0'):
                 self.configure_input_buffer(B0)
-            elif (custom_input_buffer=='B1'):
+            elif (custom_input_buffer == 'B1'):
                 self.configure_input_buffer(B1)
             else:
-                msg='Unknown custom input buffer {}.'.format(custom_input_buffer)
+                msg = 'Unknown custom input buffer {}.'.format(custom_input_buffer)
                 raise NotImplementedError(msg)
-        
+
         # define input and output buffer, as well as tmp buffers
         src_buffer, dst_buffer = B0, B1
+
         def nameof(buf):
             assert (buf is B0) or (buf is B1)
             if (buf is B0):
@@ -1788,26 +1810,27 @@ class PlannedSpectralTransform(object):
 
         def check_size(buf, nbytes, name):
             if (buf.nbytes < nbytes):
-                msg='Insufficient buffer size for buffer {} (shape={}, dtype={}).'.format(name, buf.shape, buf.dtype)
-                msg+='\nExpected at least {} bytes but got {}.'.format(nbytes, buf.nbytes)
+                msg = 'Insufficient buffer size for buffer {} (shape={}, dtype={}).'.format(
+                    name, buf.shape, buf.dtype)
+                msg += '\nExpected at least {} bytes but got {}.'.format(nbytes, buf.nbytes)
                 try:
                     bname = nameof(buf)
-                    msg+='\nThis buffer has been identified as {}.'.format(bname)
+                    msg += '\nThis buffer has been identified as {}.'.format(bname)
                 except:
                     pass
                 raise RuntimeError(msg)
-        
+
         # build spectral transform execution queue
-        qname = 'fft_planner_{}_{}'.format(self.field.name, 
-                'forward' if is_forward else 'backward')
+        qname = 'fft_planner_{}_{}'.format(self.field.name,
+                                           'forward' if is_forward else 'backward')
         queue = FFTI.new_queue(tg=self, name=qname)
 
         if SETUP_DEBUG:
             def print_op(description, category):
                 prefix = '     |> '
                 print '{}{: <40}[{}]'.format(prefix, description, category)
-        
-            msg='''
+
+            msg = '''
 SPECTRAL TRANSFORM SETUP
   op:          {}
   dim:         {}
@@ -1815,9 +1838,9 @@ SPECTRAL TRANSFORM SETUP
   group_tag:   {}
   is_forward:  {}
   is_backward: {}'''.format(
-           op.pretty_tag,
-           dim, ntransforms, self.tag, 
-           is_forward, is_backward)
+                op.pretty_tag,
+                dim, ntransforms, self.tag,
+                is_forward, is_backward)
             print msg
 
         fft_plans = ()
@@ -1825,31 +1848,31 @@ SPECTRAL TRANSFORM SETUP
             transpose = transpose_info[i]
             transform = transform_info[i]
             (permutation, _, _, input_shape, output_shape) = transpose
-            (_, tr, (src_shape, src_dtype, src_view), 
-                    (dst_shape, dst_dtype, dst_view)) = transform
+            (_, tr, (src_shape, src_dtype, src_view),
+             (dst_shape, dst_dtype, dst_view)) = transform
             assert not STU.is_none(tr), 'Got a NONE transform type.'
-            
-            is_first = (i==0)
-            is_last  = (i==ntransforms-1)
 
-            should_forward_permute  = (is_forward  and (permutation is not None))
+            is_first = (i == 0)
+            is_last = (i == ntransforms-1)
+
+            should_forward_permute = (is_forward and (permutation is not None))
             should_backward_permute = (is_backward and (permutation is not None))
-            
+
             if SETUP_DEBUG:
-                msg='  TRANSFORM INDEX {}:'.format(i)
+                msg = '  TRANSFORM INDEX {}:'.format(i)
 
                 if (permutation is not None):
-                    msg+='''
-    Transpose Info: 
+                    msg += '''
+    Transpose Info:
       permutation:  {}
       input_shape:  {}
       output_shape: {}
       forward_permute:  {}
       backward_permute: {}'''.format(
-            permutation, input_shape, output_shape,
-            should_forward_permute, should_backward_permute)
+                        permutation, input_shape, output_shape,
+                        should_forward_permute, should_backward_permute)
 
-                msg+='''
+                msg += '''
     Custom buffers:
       custom_input:  {}
       custom output: {}
@@ -1857,18 +1880,18 @@ SPECTRAL TRANSFORM SETUP
       SRC: shape {} and type {}, view {}
       DST: shape {} and type {}, view {}
     Planned Operations:'''.format(
-            custom_input_buffer, custom_output_buffer,
-            src_shape, src_dtype, src_view,
-            dst_shape, dst_dtype, dst_view)
+                    custom_input_buffer, custom_output_buffer,
+                    src_shape, src_dtype, src_view,
+                    dst_shape, dst_dtype, dst_view)
                 print msg
-            
+
             src_nbytes = compute_nbytes(src_shape, src_dtype)
             dst_nbytes = compute_nbytes(dst_shape, dst_dtype)
 
-            # build forward permutation if required 
+            # build forward permutation if required
             # (forward transforms transpose before actual transforms)
             if should_forward_permute:
-                input_nbytes  = compute_nbytes(input_shape,  src_dtype)
+                input_nbytes = compute_nbytes(input_shape,  src_dtype)
                 output_nbytes = compute_nbytes(output_shape, src_dtype)
                 assert output_shape == src_shape, 'Transpose to Transform shape mismatch.'
                 assert input_nbytes == output_nbytes, 'Transpose input and output size mismatch.'
@@ -1883,8 +1906,8 @@ SPECTRAL TRANSFORM SETUP
                 b1 = dst_buffer[:output_nbytes].view(dtype=src_dtype).reshape(output_shape)
                 queue += FFTI.plan_transpose(tg=tg, src=b0, dst=b1, axes=permutation)
                 if SETUP_DEBUG:
-                    sfrom='input_buffer' if is_first else nameof(src_buffer)
-                    sto=nameof(dst_buffer)
+                    sfrom = 'input_buffer' if is_first else nameof(src_buffer)
+                    sto = nameof(dst_buffer)
                     print_op('PlanTranspose(src={}, dst={})'.format(sfrom, sto, permutation),
                              'forward permute')
                 src_buffer, dst_buffer = dst_buffer, src_buffer
@@ -1892,14 +1915,14 @@ SPECTRAL TRANSFORM SETUP
                 assert (self.input_buffer.shape == src_shape), 'input buffer shape mismatch.'
                 assert (self.input_buffer.dtype == src_dtype), 'input buffer dtype mismatch.'
                 assert src_buffer.nbytes >= src_nbytes, 'Insufficient buffer size for src buf.'
-                if ((custom_input_buffer is not None) and 
+                if ((custom_input_buffer is not None) and
                         (nameof(src_buffer) == custom_input_buffer)):
                     src_buffer, dst_buffer = dst_buffer, src_buffer
                 b0 = src_buffer[:src_nbytes].view(dtype=src_dtype).reshape(src_shape)
                 queue += FFTI.plan_copy(tg=tg, src=self.get_mapped_input_buffer, dst=b0)
                 if SETUP_DEBUG:
-                    sfrom='input_buffer'
-                    sto=nameof(src_buffer)
+                    sfrom = 'input_buffer'
+                    sto = nameof(src_buffer)
                     print_op('PlanCopy(src={}, dst={})'.format(sfrom, sto),
                              'pre-transform copy')
 
@@ -1913,15 +1936,15 @@ SPECTRAL TRANSFORM SETUP
             fft_plans += (fft_plan,)
             queue += fft_plan
             if SETUP_DEBUG:
-                sfrom=nameof(src_buffer)
-                sto=nameof(dst_buffer)
+                sfrom = nameof(src_buffer)
+                sto = nameof(dst_buffer)
                 print_op('PlanTransform(src={}, dst={})'.format(sfrom, sto), tr)
             src_buffer, dst_buffer = dst_buffer, src_buffer
-            
-            # build backward permutation if required 
+
+            # build backward permutation if required
             # (backward transforms transpose after actual transforms)
             if should_backward_permute:
-                input_nbytes  = compute_nbytes(input_shape,  dst_dtype)
+                input_nbytes = compute_nbytes(input_shape,  dst_dtype)
                 output_nbytes = compute_nbytes(output_shape, dst_dtype)
                 assert input_shape == dst_shape, 'Transform to Transpose shape mismatch.'
                 assert input_nbytes == output_nbytes, 'Transpose input and output size mismatch.'
@@ -1930,58 +1953,59 @@ SPECTRAL TRANSFORM SETUP
                 b0 = src_buffer[:input_nbytes].view(dtype=dst_dtype).reshape(input_shape)
                 if is_last and (self._action is SpectralTransformAction.OVERWRITE):
                     assert (self.output_buffer.shape == output_shape), \
-                            'output buffer shape mismatch.'
+                        'output buffer shape mismatch.'
                     assert (self.output_buffer.dtype == dst_dtype), \
-                            'output buffer dtype mismatch.'
+                        'output buffer dtype mismatch.'
                     b1 = self.get_mapped_output_buffer
                 else:
                     b1 = dst_buffer[:output_nbytes].view(dtype=dst_dtype).reshape(output_shape)
                 queue += FFTI.plan_transpose(tg=tg, src=b0, dst=b1, axes=permutation)
                 if SETUP_DEBUG:
-                    sfrom=nameof(src_buffer)
-                    sto='output_buffer' if is_last else nameof(dst_buffer)
+                    sfrom = nameof(src_buffer)
+                    sto = 'output_buffer' if is_last else nameof(dst_buffer)
                     print_op('PlanTranspose(src={}, dst={})'.format(sfrom, sto),
                              'backward permute')
                 src_buffer, dst_buffer = dst_buffer, src_buffer
                 if is_last and (self._action is not SpectralTransformAction.OVERWRITE):
                     if (self._action is SpectralTransformAction.ACCUMULATE):
                         assert (self.output_buffer.shape == output_shape), \
-                                'output buffer shape mismatch.'
+                            'output buffer shape mismatch.'
                         assert (self.output_buffer.dtype == dst_dtype), \
-                                'output buffer dtype mismatch.'
-                        queue += FFTI.plan_accumulate(tg=tg, src=b1, dst=self.get_mapped_output_buffer)
+                            'output buffer dtype mismatch.'
+                        queue += FFTI.plan_accumulate(tg=tg, src=b1,
+                                                      dst=self.get_mapped_output_buffer)
                         if SETUP_DEBUG:
-                            sfrom=nameof(dst_buffer)
-                            sto='output_buffer'
+                            sfrom = nameof(dst_buffer)
+                            sto = 'output_buffer'
                             print_op('PlanAccumulate(src={}, dst={})'.format(sfrom, sto),
                                      'post-transform accumulate')
                     else:
-                        msg='Unsupported action {}.'.format(self._action)
+                        msg = 'Unsupported action {}.'.format(self._action)
                         raise NotImplementedError(msg)
 
             elif is_last:
-                if (custom_output_buffer is not None): 
+                if (custom_output_buffer is not None):
                     if custom_output_buffer not in ('B0', 'B1', 'auto'):
-                        msg='Unknown custom output buffer {}.'.format(custom_output_buffer)
+                        msg = 'Unknown custom output buffer {}.'.format(custom_output_buffer)
                         raise NotImplementedError(msg)
-                    elif (custom_output_buffer=='auto'):
+                    elif (custom_output_buffer == 'auto'):
                         custom_output_buffer = nameof(dst_buffer)
                         self._custom_output_buffer = custom_output_buffer
-                        if (custom_output_buffer=='B0'):
+                        if (custom_output_buffer == 'B0'):
                             self.configure_output_buffer(B0)
-                        elif (custom_output_buffer=='B1'):
+                        elif (custom_output_buffer == 'B1'):
                             self.configure_output_buffer(B1)
                         else:
                             raise RuntimeError
                     elif (nameof(src_buffer) == custom_output_buffer):
-                        # This is a special case where we need to copy back and forth 
+                        # This is a special case where we need to copy back and forth
                         # (because of offsets)
                         b0 = src_buffer[:dst_nbytes].view(dtype=dst_dtype).reshape(dst_shape)
                         b1 = dst_buffer[:dst_nbytes].view(dtype=dst_dtype).reshape(dst_shape)
                         queue += FFTI.plan_copy(tg=tg, src=b0, dst=b1)
                         if SETUP_DEBUG:
-                            sfrom=nameof(src_buffer)
-                            sto=nameof(dst_buffer)
+                            sfrom = nameof(src_buffer)
+                            sto = nameof(dst_buffer)
                             print_op('PlanCopy(src={}, dst={})'.format(sfrom, sto),
                                      'post-transform copy')
                         src_buffer, dst_buffer = dst_buffer, src_buffer
@@ -1990,25 +2014,25 @@ SPECTRAL TRANSFORM SETUP
                 assert src_buffer.nbytes >= dst_nbytes, 'Insufficient buffer size for src buf.'
                 b0 = src_buffer[:dst_nbytes].view(dtype=dst_dtype).reshape(dst_shape)
                 if self._action is SpectralTransformAction.OVERWRITE:
-                    pname='PlanCopy'
-                    pdes='post-transform-copy'
+                    pname = 'PlanCopy'
+                    pdes = 'post-transform-copy'
                     queue += FFTI.plan_copy(tg=tg, src=b0, dst=self.get_mapped_output_buffer)
                 elif self._action is SpectralTransformAction.ACCUMULATE:
-                    pname='PlanAccumulate'
-                    pdes='post-transform-accumulate'
+                    pname = 'PlanAccumulate'
+                    pdes = 'post-transform-accumulate'
                     queue += FFTI.plan_accumulate(tg=tg, src=b0, dst=self.get_mapped_output_buffer)
                 else:
-                    msg='Unsupported action {}.'.format(self._action)
+                    msg = 'Unsupported action {}.'.format(self._action)
                     raise NotImplementedError(msg)
                 if SETUP_DEBUG:
-                    sfrom=nameof(src_buffer)
-                    sto='output_buffer' if (custom_output_buffer is None) \
-                                        else custom_output_buffer
+                    sfrom = nameof(src_buffer)
+                    sto = 'output_buffer' if (custom_output_buffer is None) \
+                        else custom_output_buffer
                     print_op('{}(src={}, dst={})'.format(pname, sfrom, sto),
                              pdes)
-    
+
         if self._zero_fill_output_slices:
-            buf  = self.get_mapped_full_output_buffer
+            buf = self.get_mapped_full_output_buffer
             slcs = self._zero_fill_output_slices
             queue += FFTI.plan_fill_zeros(tg=tg, a=buf, slices=slcs)
             if SETUP_DEBUG:
@@ -2017,23 +2041,24 @@ SPECTRAL TRANSFORM SETUP
 
         # allocate fft plans
         FFTI.allocate_plans(op, fft_plans, tmp_buffer=TMP)
-        
+
         # build kernels to compute energy if required
         if self._do_compute_energy:
-            field_buffer    = self.input_buffer  if self.is_forward else self.output_buffer
+            field_buffer = self.input_buffer if self.is_forward else self.output_buffer
             spectral_buffer = self.output_buffer if self.is_forward else self.input_buffer
-            compute_energy_queue  = FFTI.new_queue(tg=self, name='dump_energy')
+            compute_energy_queue = FFTI.new_queue(tg=self, name='dump_energy')
             compute_energy_queue += FFTI.plan_fill_zeros(tg=tg, a=energy_buffer, slices=(Ellipsis,))
             if (mutexes_buffer is not None):
                 unlock_mutexes = FFTI.plan_fill_zeros(tg=tg, a=mutexes_buffer, slices=(Ellipsis,))
                 compute_energy_queue += unlock_mutexes
                 compute_energy_queue().wait()  # we need this before compute energy to unlock mutexes
             compute_energy_queue += FFTI.plan_compute_energy(tg=tg,
-                                                         fshape=field_buffer.shape,
-                                                         src=spectral_buffer, dst=energy_buffer,
-                                                         transforms=self._permuted_transforms,
-                                                         mutexes=mutexes_buffer)
-            compute_energy_queue += FFTI.plan_copy(tg=tg, src=energy_buffer, dst=self._energy_parameter._value)
+                                                             fshape=field_buffer.shape,
+                                                             src=spectral_buffer, dst=energy_buffer,
+                                                             transforms=self._permuted_transforms,
+                                                             mutexes=mutexes_buffer)
+            compute_energy_queue += FFTI.plan_copy(tg=tg,
+                                                   src=energy_buffer, dst=self._energy_parameter._value)
         else:
             compute_energy_queue = None
 
@@ -2048,7 +2073,7 @@ SPECTRAL TRANSFORM SETUP
         evt = self._queue.execute(wait_for=evt)
         evt = self._post_transform_actions(wait_for=evt, **kwds)
         return evt
-    
+
     def _pre_transform_actions(self, simulation=None, wait_for=None, **kwds):
         evt = wait_for
         if (simulation is False):
@@ -2068,20 +2093,21 @@ SPECTRAL TRANSFORM SETUP
             if self._do_plot_energy:
                 evt = self.plot_energy(simulation=simulation, wait_for=evt)
         return evt
-    
+
     def compute_energy(self, simulation, wait_for):
-        msg='No simulation was passed in {}.__call__().'.format(type(self))
+        msg = 'No simulation was passed in {}.__call__().'.format(type(self))
         assert (simulation is not None), msg
         evt = wait_for
-        should_compute_energy = any(simulation.should_dump(frequency=f, with_last=True) for f in self._compute_energy_frequencies)  
+        should_compute_energy = any(simulation.should_dump(frequency=f, with_last=True)
+                                    for f in self._compute_energy_frequencies)
         if should_compute_energy:
             evt = self._compute_energy_queue(wait_for=evt)
             if self._do_dump_energy:
                 self._energy_dumper.update(simulation=simulation, wait_for=evt)
         return evt
-    
+
     def plot_energy(self, simulation, wait_for):
-        msg='No simulation was passed in {}.__call__().'.format(type(self))
+        msg = 'No simulation was passed in {}.__call__().'.format(type(self))
         assert (simulation is not None), msg
         evt = wait_for
         self._energy_plotter.update(simulation=simulation, wait_for=evt)
diff --git a/hysop/operator/parameter_plotter.py b/hysop/operator/parameter_plotter.py
index 231a2e0daf83d0b1e07585fd9f1c59cfb9bcff19..d41ce08b44e4b447249dec8f8ae4b185e044a18b 100644
--- a/hysop/operator/parameter_plotter.py
+++ b/hysop/operator/parameter_plotter.py
@@ -1,5 +1,4 @@
 from abc import abstractmethod
-from hysop.core.mpi import main_rank
 from hysop.tools.types import to_tuple, check_instance, first_not_None
 from hysop.tools.numpywrappers import npw
 from hysop.tools.io_utils import IO
@@ -7,6 +6,7 @@ from hysop.core.graph.computational_graph import ComputationalGraphOperator
 from hysop.parameters.scalar_parameter import ScalarParameter
 from hysop.parameters.tensor_parameter import TensorParameter
 
+
 class PlottingOperator(ComputationalGraphOperator):
     """
     Base operator for plotting.
@@ -16,15 +16,15 @@ class PlottingOperator(ComputationalGraphOperator):
         return True
 
     def __init__(self, name=None,
-            dump_dir=None,
-            update_frequency=1,
-            save_frequency=100,
-            axes_shape=(1,),
-            figsize=(30,18),
-            visu_rank=0,
-            fig=None,
-            axes=None,
-            **kwds):
+                 dump_dir=None,
+                 update_frequency=1,
+                 save_frequency=100,
+                 axes_shape=(1,),
+                 figsize=(30, 18),
+                 visu_rank=0,
+                 fig=None,
+                 axes=None,
+                 **kwds):
 
         import matplotlib
         import matplotlib.pyplot as plt
@@ -35,9 +35,8 @@ class PlottingOperator(ComputationalGraphOperator):
         check_instance(axes_shape, tuple, minsize=1, allow_none=True)
         super(PlottingOperator, self).__init__(**kwds)
 
-
         if (fig is None) ^ (axes is None):
-            msg='figure and axes should be specified at the same time.'
+            msg = 'figure and axes should be specified at the same time.'
             raise RuntimeError(msg)
 
         dump_dir = first_not_None(dump_dir, IO.default_path())
@@ -50,12 +49,12 @@ class PlottingOperator(ComputationalGraphOperator):
 
         axes = npw.asarray(axes).reshape(axes_shape)
 
-        self.fig  = fig
+        self.fig = fig
         self.axes = axes
         self.update_frequency = update_frequency
         self.save_frequency = save_frequency
         self.imgpath = imgpath
-        self.should_draw = (visu_rank == main_rank)
+        self.should_draw = (visu_rank == self.mpi_params.rank)
         self.running = True
         self.plt = plt
 
@@ -86,10 +85,10 @@ class PlottingOperator(ComputationalGraphOperator):
 
     def save(self, **kwds):
         self.fig.savefig(self.imgpath, dpi=self.fig.dpi,
-                bbox_inches='tight')
+                         bbox_inches='tight')
 
     def on_close(self, event):
-        self.running  = False
+        self.running = False
 
     def on_key_press(self, event):
         key = event.key
@@ -104,13 +103,13 @@ class ParameterPlotter(PlottingOperator):
     """
 
     def __init__(self, name, parameters, alloc_size=128,
-            fig=None, axes=None, shape=None, **kwds):
+                 fig=None, axes=None, shape=None, **kwds):
 
         input_params = {}
         if (fig is not None) and (axes is not None):
             import matplotlib
             custom_axes = True
-            axes_shape=None
+            axes_shape = None
             check_instance(parameters, dict, keys=matplotlib.axes.Axes, values=dict)
             for params in parameters.values():
                 check_instance(params, dict, keys=str, values=ScalarParameter)
@@ -120,36 +119,37 @@ class ParameterPlotter(PlottingOperator):
             _parameters = {}
             if isinstance(parameters, TensorParameter):
                 _parameters[0] = parameters
-            elif isinstance(parameters, (list,tuple)):
-                for (i,p) in enumerate(parameters):
+            elif isinstance(parameters, (list, tuple)):
+                for (i, p) in enumerate(parameters):
                     _parameters[i] = parameters
             elif isinstance(parameters, dict):
                 _parameters = parameters.copy()
             else:
                 raise TypeError(type(parameters))
-            check_instance(_parameters, dict, keys=(int,tuple,list), values=(TensorParameter,list,tuple,dict))
+            check_instance(_parameters, dict, keys=(int, tuple, list),
+                           values=(TensorParameter, list, tuple, dict))
 
             parameters = {}
             axes_shape = (1,)*2
-            for (pos,params) in _parameters.iteritems():
+            for (pos, params) in _parameters.iteritems():
                 pos = to_tuple(pos)
                 pos = (2-len(pos))*(0,) + pos
                 check_instance(pos, tuple, values=int)
-                axes_shape=tuple(max(p0,p1+1) for (p0,p1) in zip(axes_shape, pos))
+                axes_shape = tuple(max(p0, p1+1) for (p0, p1) in zip(axes_shape, pos))
                 if isinstance(params, dict):
-                    input_params.update({p.name:p for p in params.values()})
+                    input_params.update({p.name: p for p in params.values()})
                 elif isinstance(params, TensorParameter):
                     input_params[params.name] = params
                     params = {params.name: params}
-                elif isinstance(params, (list,tuple)):
+                elif isinstance(params, (list, tuple)):
                     for p in params:
                         input_params[p.name] = p
-                    params = {p.name:p for p in params}
+                    params = {p.name: p for p in params}
                 else:
                     raise TypeError(type(params))
                 check_instance(params, dict, keys=str, values=TensorParameter)
                 _params = {}
-                for (pname,p) in params.iteritems():
+                for (pname, p) in params.iteritems():
                     if isinstance(p, ScalarParameter):
                         _params[pname] = p
                     else:
@@ -160,22 +160,22 @@ class ParameterPlotter(PlottingOperator):
                 parameters[pos] = _params
 
         super(ParameterPlotter, self).__init__(name=name, input_params=input_params,
-                axes_shape=axes_shape, axes=axes, fig=fig, **kwds)
+                                               axes_shape=axes_shape, axes=axes, fig=fig, **kwds)
 
         self.custom_axes = custom_axes
 
-        data  = {}
+        data = {}
         lines = {}
         times = npw.empty(shape=(alloc_size,), dtype=npw.float32)
-        for (pos,params) in parameters.iteritems():
-            params_data  = {}
+        for (pos, params) in parameters.iteritems():
+            params_data = {}
             params_lines = {}
-            for (pname,p) in params.iteritems():
+            for (pname, p) in params.iteritems():
                 pdata = npw.empty(shape=(alloc_size,), dtype=p.dtype)
                 pline = self.get_axes(pos).plot([], [], label=pname)[0]
-                params_data[p]  = pdata
+                params_data[p] = pdata
                 params_lines[p] = pline
-            data[pos]  = params_data
+            data[pos] = params_data
             lines[pos] = params_lines
         self.fig.canvas.set_window_title('HySoP Parameter Plotter')
 
@@ -193,7 +193,6 @@ class ParameterPlotter(PlottingOperator):
         else:
             return axes[pos]
 
-
     def __getitem__(self, i):
         if self.custom_axes:
             return self.axes[i]
@@ -202,7 +201,7 @@ class ParameterPlotter(PlottingOperator):
 
     def update(self, simulation, **kwds):
         # expand memory if required
-        if (self.counter+1>self.times.size):
+        if (self.counter+1 > self.times.size):
             times = npw.empty(shape=(2*self.times.size,), dtype=self.times.dtype)
             times[:self.times.size] = self.times
             self.times = times
@@ -214,8 +213,8 @@ class ParameterPlotter(PlottingOperator):
 
         times, data, lines = self.times, self.data, self.lines
         times[self.counter] = simulation.t()
-        for (pos,params) in self.parameters.iteritems():
-            for (pname,p) in params.iteritems():
+        for (pos, params) in self.parameters.iteritems():
+            for (pname, p) in params.iteritems():
                 data[pos][p][self.counter] = p()
                 lines[pos][p].set_xdata(times[:self.counter])
                 lines[pos][p].set_ydata(data[pos][p][:self.counter])
diff --git a/hysop/operators.py b/hysop/operators.py
index f1f732a86576a0072c32985573d185ebc7787c52..4ab660d2dc823676c7b346516617c1dccfe39bae 100644
--- a/hysop/operators.py
+++ b/hysop/operators.py
@@ -49,7 +49,6 @@ try:
     from hysop.backend.device.opencl.operator.external_force import SymbolicExternalForce
 except ImportError:
     SymbolicExternalForce = None
-from hysop.operator.permeability import PermeabilityEstimate
 
 from hysop.numerics.splitting.strang import StrangSplitting
 from hysop.operator.directional.symbolic_dir import DirectionalSymbolic
diff --git a/hysop/parameters/parameter.py b/hysop/parameters/parameter.py
index 93bd52157ba50d1536c9b3b4d0130974872ab825..f09da60d8fd6dba4678cd57cf60cee96ed3925b0 100644
--- a/hysop/parameters/parameter.py
+++ b/hysop/parameters/parameter.py
@@ -8,7 +8,7 @@ from hysop import dprint, vprint, __DEBUG__
 from hysop.tools.types import check_instance, to_tuple, first_not_None
 from hysop.tools.handle import TaggedObject
 from hysop.tools.variable import Variable, VariableTag
-from hysop.core.mpi import main_rank
+
 
 class Parameter(TaggedObject, VariableTag):
     """
@@ -18,10 +18,10 @@ class Parameter(TaggedObject, VariableTag):
     __metaclass__ = ABCMeta
 
     def __new__(cls, name, parameter_types,
-                    initial_value=None, allow_None=False,
-                    quiet=False, const=False, 
-                    pretty_name=None, var_name=None,
-                    is_view=False, **kwds):
+                initial_value=None, allow_None=False,
+                quiet=False, const=False,
+                pretty_name=None, var_name=None,
+                is_view=False, **kwds):
         """
         Create or _get an existing Parameter with a specific name
         and type.
@@ -72,7 +72,7 @@ class Parameter(TaggedObject, VariableTag):
             parameter_types = to_tuple(parameter_types)
 
         if const and (initial_value is None):
-            msg='Constant parameter should be initialized.'
+            msg = 'Constant parameter should be initialized.'
             raise ValueError(msg)
 
         # register own class to authorized parameter types
@@ -84,7 +84,7 @@ class Parameter(TaggedObject, VariableTag):
         parameter_types = tuple(set(parameter_types))
 
         obj = super(Parameter, cls).__new__(cls,
-                tag_prefix='p', variable_kind=Variable.PARAMETER, **kwds)
+                                            tag_prefix='p', variable_kind=Variable.PARAMETER, **kwds)
 
         pretty_name = first_not_None(pretty_name, name)
         if isinstance(pretty_name, unicode):
@@ -101,32 +101,34 @@ class Parameter(TaggedObject, VariableTag):
         obj._const = const
         obj._symbol = None
         obj._quiet = quiet
-        obj._is_view  = is_view
+        obj._is_view = is_view
 
         return obj
 
     def __eq__(self, other):
         return (self is other)
+
     def __ne__(self, other):
         return (self is not other)
+
     def __hash__(self):
         return id(self)
 
     def set_value(self, value):
         """Set the value of this Parameter object."""
         if self._const:
-            msg='Cannot modify the value of constant parameter {}.'.format(self.pretty_name)
+            msg = 'Cannot modify the value of constant parameter {}.'.format(self.pretty_name)
             raise RuntimeError(msg)
         if not isinstance(value, self._parameter_types):
-            msg ='Parameter.set_value() got a value of type {} but '
-            msg+='only the following types are valid for parameter {}:\n  {}'
-            msg=msg.format(type(value), self.pretty_name, self._parameter_types)
+            msg = 'Parameter.set_value() got a value of type {} but '
+            msg += 'only the following types are valid for parameter {}:\n  {}'
+            msg = msg.format(type(value), self.pretty_name, self._parameter_types)
             raise ValueError(msg)
         if isinstance(value, self.__class__):
             value = value._get_value()
         self._set_value_impl(value)
         if not self.quiet or __DEBUG__:
-            msg='>Parameter {} set to {}.'.format(
+            msg = '>Parameter {} set to {}.'.format(
                 self.pretty_name, value)
             vprint(msg)
 
@@ -140,15 +142,19 @@ class Parameter(TaggedObject, VariableTag):
     def _get_name(self):
         """Return parameter name."""
         return self._name
+
     def _get_pretty_name(self):
         """Return parameter pretty name."""
         return self._pretty_name
+
     def _get_var_name(self):
         """Return parameter variable name."""
         return self._pretty_name
+
     def _get_const(self):
         """Return True if this parameter was set to be constant."""
         return self._const
+
     def _get_quiet(self):
         """Return True if this parameter was set to be quiet."""
         return self._quiet
@@ -173,6 +179,7 @@ class Parameter(TaggedObject, VariableTag):
     @abstractmethod
     def _get_value_impl(self):
         pass
+
     @abstractmethod
     def _set_value_impl(self):
         pass
@@ -181,6 +188,7 @@ class Parameter(TaggedObject, VariableTag):
     def short_description(self):
         """Return a short description of this parameter as a string."""
         pass
+
     @abstractmethod
     def long_description(self):
         """Return a long description of this parameter as a string."""
diff --git a/hysop/tools/debug_dumper.py b/hysop/tools/debug_dumper.py
index 3c6acaa2a568a5158cb8a98889f841c8177d8b65..a7b51459a0506e2168a3fadc901ca55a1bcb4c7a 100644
--- a/hysop/tools/debug_dumper.py
+++ b/hysop/tools/debug_dumper.py
@@ -1,24 +1,29 @@
-
-import os, shutil, datetime, inspect, argparse, sys
+import os
+import shutil
+import datetime
+import inspect
+import argparse
+import sys
 import numpy as np
 
-from hysop import main_rank, MPI
+from hysop import MPI
 from hysop.tools.types import check_instance
 from hysop.fields.discrete_field import DiscreteScalarFieldView
 
+
 class DebugDumper(object):
     def __init__(self, name, path='/tmp/hysop/debug', force_overwrite=False,
-            enable_on_op_apply=False, dump_precision=10,
-            comm=MPI.COMM_WORLD, io_leader=0):
+                 enable_on_op_apply=False, dump_precision=10,
+                 comm=MPI.COMM_WORLD, io_leader=0):
         directory = path+'/'+name
         blobs_directory = directory + '/data'
-        
+
         self.name = name
         self.directory = directory
         self.blobs_directory = blobs_directory
         self.dump_id = 0
         self.enable_on_op_apply = enable_on_op_apply
-        self.dump_precision=dump_precision
+        self.dump_precision = dump_precision
 
         self.comm = comm
         self.io_leader = io_leader
@@ -27,13 +32,12 @@ class DebugDumper(object):
         assert 0 <= self.io_leader < self.comm_size
         self.is_io_leader = (self.comm_rank == self.io_leader)
 
-        
         if self.is_io_leader:
             if os.path.exists(directory):
                 if force_overwrite:
                     shutil.rmtree(directory)
                 else:
-                    msg='Directory \'{}\' already exists.'.format(directory)
+                    msg = 'Directory \'{}\' already exists.'.format(directory)
                     raise RuntimeError(msg)
             os.makedirs(blobs_directory)
             runfile = '{}/run.txt'.format(directory)
@@ -47,16 +51,16 @@ class DebugDumper(object):
         if hasattr(self, 'runfile') and (self.runfile is not None):
             self.runfile.close()
             self.runfile = None
-    
+
     @classmethod
     def lformat(cls, id_, iteration, time, tag, min_, max_, mean, variance, dtype, shape, description='', dump_precision=None):
         try:
             return '{:<4}  {:<10}  {:<20.{p}f}  {:<40}  {:<+20.{p}f}  {:<+20.{p}f}  {:<+20.{p}f}  {:<+20.{p}f}  {:<20}  {:<20}  {}'.format(
-                    id_, iteration, time, tag, min_, max_, mean, variance, dtype, shape, description, p=dump_precision)
+                id_, iteration, time, tag, min_, max_, mean, variance, dtype, shape, description, p=dump_precision)
         except:
             return '{:<4}  {:<10}  {:<20}  {:<40}  {:<20}  {:<20}  {:<20}  {:<20}  {:<20}  {:<20}  {}'.format(
-                    id_, iteration, time, tag, min_, max_, mean, variance, dtype, shape, description)
-    
+                id_, iteration, time, tag, min_, max_, mean, variance, dtype, shape, description)
+
     def print_header(self):
         now = datetime.datetime.now()
         self.runfile.write('DEBUG DUMP {} ({})\n'.format(
@@ -74,7 +78,6 @@ class DebugDumper(object):
         else:
             check_instance(arrays, tuple, values=np.ndarray)
             return arrays
-        
 
     def __call__(self, iteration, time, tag, arrays, description=''):
         check_instance(iteration, int)
@@ -83,18 +86,18 @@ class DebugDumper(object):
         comm = self.comm
         comm_size = self.comm_size
         for (i, data) in enumerate(arrays):
-            if (N>1):
+            if (N > 1):
                 tag_ = '{}_{}'.format(tag, i)
             else:
                 tag_ = tag
             if (description != ''):
-                if (N>1):
+                if (N > 1):
                     description_ = '{} (component {})'.format(description, i)
                 else:
                     description_ = description
             else:
-                _file,_line = inspect.stack()[1][1:3]
-                description_='{}:{}'.format(_file, _line)
+                _file, _line = inspect.stack()[1][1:3]
+                description_ = '{}:{}'.format(_file, _line)
             dtype = data.dtype
             if dtype in (np.complex64, np.complex128):
                 data = (data.real, data.imag)
@@ -102,22 +105,23 @@ class DebugDumper(object):
             else:
                 data = (data,)
                 tags = (tag_,)
-            for (d,tag_) in zip(data, tags):
+            for (d, tag_) in zip(data, tags):
                 dtype = d.dtype
                 shape = None
                 id_ = self.dump_id
-                _min      = comm.allreduce(float(np.nanmin(d)),  op=MPI.MIN)
-                _max      = comm.allreduce(float(np.nanmax(d)),  op=MPI.MAX)
-                mean      = comm.allreduce(float(np.nanmean(d))) / comm_size
-                variance  = comm.allreduce(float(np.nansum((d-mean)**2))) / float(comm.allreduce(long(d.size)))
-                entry = '\n'+self.lformat(id_, iteration, time, tag_, _min, _max, mean, variance, dtype, shape, description_, self.dump_precision)
-                
+                _min = comm.allreduce(float(np.nanmin(d)),  op=MPI.MIN)
+                _max = comm.allreduce(float(np.nanmax(d)),  op=MPI.MAX)
+                mean = comm.allreduce(float(np.nanmean(d))) / comm_size
+                variance = comm.allreduce(float(np.nansum((d-mean)**2))) / \
+                    float(comm.allreduce(long(d.size)))
+                entry = '\n'+self.lformat(id_, iteration, time, tag_, _min, _max,
+                                          mean, variance, dtype, shape, description_, self.dump_precision)
+
                 if self.is_io_leader:
                     self.runfile.write(entry)
-                
-                if (comm_size==1):
+
+                if (comm_size == 1):
                     dst = '{}/{}'.format(self.blobs_directory, self.dump_id)
                     np.savez_compressed(dst, data=d)
 
                 self.dump_id += 1
-