diff --git a/examples/particles_above_salt/particles_above_salt_bc.py b/examples/particles_above_salt/particles_above_salt_bc.py
index d662ea341bbd314e8add960952b54dc9555c2b3c..120f7bd84ded9408dd97629eb8b23e9250bdaca1 100644
--- a/examples/particles_above_salt/particles_above_salt_bc.py
+++ b/examples/particles_above_salt/particles_above_salt_bc.py
@@ -183,13 +183,16 @@ def compute(args):
     
     #> Diffusion of vorticity, S and C
     diffuse_S = Diffusion(implementation=impl,
+             enforce_implementation=False,
              name='diffuse_S',
              pretty_name='diffS',
              nu = nu_S,
              Fin = S,
              variables = {S: npts},
-             dt=dt, **extra_op_kwds)
+             dt=dt, 
+             **extra_op_kwds)
     diffuse_C = Diffusion(implementation=impl,
+             enforce_implementation=False,
              name='diffuse_C',
              pretty_name='diffC',
              nu = nu_C,
diff --git a/hysop/backend/device/opencl/opencl_operator.py b/hysop/backend/device/opencl/opencl_operator.py
index 385be9e0dba3ba8de590fd9c5afe594d101a1eb4..124c513e07e22584f1212b0d0aa18c6ca8626a83 100644
--- a/hysop/backend/device/opencl/opencl_operator.py
+++ b/hysop/backend/device/opencl/opencl_operator.py
@@ -108,8 +108,9 @@ class OpenClOperator(ComputationalGraphOperator):
             msg0='MPI Communicators do not match between OpenClEnvironment and MPIParams.'
             msg0+='\n  => {}'.format(msg)
             raise RuntimeError(msg0)
-
-    def supported_backends(self):
+    
+    @classmethod
+    def supported_backends(cls):
         """
         Return the backends that this operator's topologies can support.
         """
diff --git a/hysop/backend/host/host_operator.py b/hysop/backend/host/host_operator.py
index 21d820dd5154451214aaab05245ef492fa6d0e41..38c8748b7d33d1ddfb1e9e7cb04bf300207e70fa 100644
--- a/hysop/backend/host/host_operator.py
+++ b/hysop/backend/host/host_operator.py
@@ -25,9 +25,19 @@ class HostOperator(ComputationalGraphOperator):
         Backend.HOST and share the same HostEnvironment.
         """
         super(HostOperator, self).__init__(**kwds)
-
-    def supported_backends(self):
+    
+    @classmethod
+    def supported_backends(cls):
         """
         Return the backends that this operator's topologies can support.
         """
         return set([Backend.HOST])
+
+
+class OpenClMappable(object):
+    
+    @classmethod
+    def supported_backends(cls):
+        sb = super(OpenClMappable, cls).supported_backends()
+        sb.add(Backend.OPENCL)
+        return sb
diff --git a/hysop/backend/host/python/operator/diffusion.py b/hysop/backend/host/python/operator/diffusion.py
index 780c2c60036d8928d67df83985f3d67f8d7862e5..b84ffcf06137fb4960a50386472274afe8bddb42 100644
--- a/hysop/backend/host/python/operator/diffusion.py
+++ b/hysop/backend/host/python/operator/diffusion.py
@@ -1,12 +1,13 @@
 import functools
 import numba as nb
 
+from hysop.constants import Backend
 from hysop.tools.types import check_instance, first_not_None
 from hysop.tools.decorators import debug
 from hysop.tools.numpywrappers import npw
 from hysop.tools.numerics import is_complex, complex_to_float_dtype
 from hysop.tools.numba_utils import make_numba_signature
-from hysop.backend.host.host_operator import HostOperator
+from hysop.backend.host.host_operator import HostOperator, OpenClMappable
 from hysop.core.graph.graph import op_apply
 from hysop.fields.continuous_field import Field
 from hysop.parameters.parameter import Parameter
@@ -14,11 +15,11 @@ from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 from hysop.operator.base.diffusion import DiffusionOperatorBase
 
 
-class PythonDiffusion(DiffusionOperatorBase, HostOperator):
+class PythonDiffusion(DiffusionOperatorBase, OpenClMappable, HostOperator):
     """
     Solves the implicit diffusion equation using numpy fft.
     """
-
+        
     @classmethod
     def build_diffusion_filter(cls, dim, *args, **kwds):
         target = kwds.get('target', 'cpu')
@@ -94,5 +95,6 @@ class PythonDiffusion(DiffusionOperatorBase, HostOperator):
             Ft()
             filter_diffusion(nu_dt, Ft.output_buffer)
             Bt()
+        for Fo in self.dFout.dfields:
             Fo.exchange_ghosts()
 
diff --git a/hysop/core/graph/computational_operator.py b/hysop/core/graph/computational_operator.py
index e7c591666fe11ffbf47679f422b7549bd4ebd2bd..4b218dbad8338f55a548e8b437997400546bcd4c 100644
--- a/hysop/core/graph/computational_operator.py
+++ b/hysop/core/graph/computational_operator.py
@@ -521,13 +521,13 @@ class ComputationalGraphOperator(ComputationalGraphNode):
                 req_id = 'tmp_{}_{}'.format(dfield.name, dfield.tag)
                 data = work.get_buffer(self, req_id)
                 dfield.dfield.honor_memory_request(data)
-
-    def supported_backends(self):
+    
+    @classmethod
+    def supported_backends(cls):
         """
         Return the backends that this operator's topologies can support as a set.
         By default all operators support only Backend.HOST.
         """
-        # TODO check if this is really required
         return set([Backend.HOST])
 
     @debug
@@ -645,7 +645,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
                 msg+='\n -> this operator only supports the following backends:'
                 msg+='\n     *'+'\n     *'.join([str(b) for b in supported_backends])
                 msg+='\n -> bad fields were:'
-                msg+='\n     *'+'\n     *'.join([f.field_tag for f in bad_fields])
+                msg+='\n     *'+'\n     *'.join([f.full_tag for f in bad_fields])
                 print '\nFATAL ERROR: Topology backend mismatch.\n'
                 print 'Offending topologies were:'
                 for t in topologies:
diff --git a/hysop/numerics/fft/fft.py b/hysop/numerics/fft/fft.py
index 2bb87ba79fc381e0474b9c695eadd69c068197be..3e7d683341068f47c9f5f4483f386f1ccc29d909 100644
--- a/hysop/numerics/fft/fft.py
+++ b/hysop/numerics/fft/fft.py
@@ -264,23 +264,36 @@ class FFTI(object):
     }
     
     @classmethod
-    def default_interface_from_backend(cls, backend, **kwds):
+    def default_interface_from_backend(cls, backend, 
+            enable_opencl_host_buffer_mapping, **kwds):
         check_instance(backend, ArrayBackend)
         if (backend.kind is Backend.HOST):
             from hysop.numerics.fft.host_fft import HostFFTI
+            assert not enable_opencl_host_buffer_mapping
             return HostFFTI.default_interface(**kwds)
         elif (backend.kind is Backend.OPENCL):
-            from hysop.numerics.fft.opencl_fft import OpenClFFTI
-            return OpenClFFTI.default_interface(cl_env=backend.cl_env, **kwds)
+            if enable_opencl_host_buffer_mapping:
+                from hysop.numerics.fft.host_fft import HostFFTI
+                return HostFFTI.default_interface(backend=backend.host_array_backend, **kwds)
+            else:
+                from hysop.numerics.fft.opencl_fft import OpenClFFTI
+                return OpenClFFTI.default_interface(cl_env=backend.cl_env, **kwds)
         else:
             msg='Unknown backend kind {}.'.format(backend.kind)
 
-    def check_backend(self, backend):
+    def check_backend(self, backend,
+            enable_opencl_host_buffer_mapping):
         check_instance(backend, ArrayBackend)
-        if (self.backend is not backend):
-            msg='Backend mismatch {} vs {}.'
-            msg=msg.format(self.backend, backend)
-            raise RuntimeError(msg)
+        if enable_opencl_host_buffer_mapping:
+            if (self.backend is not backend.host_array_backend):
+                msg='Host array backend mismatch {} vs {}.'
+                msg=msg.format(self.backend, backend)
+                raise RuntimeError(msg)
+        else:
+            if (self.backend is not backend):
+                msg='Backend mismatch {} vs {}.'
+                msg=msg.format(self.backend, backend)
+                raise RuntimeError(msg)
     
     def get_transform(self, transform):
         check_instance(transform, TransformType)
diff --git a/hysop/numerics/fft/host_fft.py b/hysop/numerics/fft/host_fft.py
index 25a272b9596f77a187fdf062cb1191cfb307ba48..a477679ed0c3fdbf606e643f00fa3812e5593289 100644
--- a/hysop/numerics/fft/host_fft.py
+++ b/hysop/numerics/fft/host_fft.py
@@ -82,24 +82,41 @@ class HostFFTI(FFTI):
         return HostFFTQueue(name=name)
                        
     def plan_copy(self, op, src, dst):
+        src = self.ensure_callable(src)
+        dst = self.ensure_callable(dst)
         def exec_copy(src=src, dst=dst):
-            dst[...] = src
+            dst()[...] = src()
         return exec_copy
     
     def plan_accumulate(self, op, src, dst):
+        src = self.ensure_callable(src)
+        dst = self.ensure_callable(dst)
         def exec_copy(src=src, dst=dst):
             dst[...] += src
         return exec_copy
 
     def plan_transpose(self, op, src, dst, axes):
+        src = self.ensure_callable(src)
+        dst = self.ensure_callable(dst)
         def exec_transpose(src=src, dst=dst, axes=axes):
             dst[...] = np.transpose(a=src, axes=axes)
         return exec_transpose
     
     def plan_fill_zeros(self, op, a, slices):
         assert slices
+        a = self.ensure_callable(a)
         def exec_fill_zeros(a=a, slices=slices):
+            buf = a()
             for slc in slices:
-                a[slc] = 0
+                buf[slc] = 0
         return exec_fill_zeros
 
+
+    @classmethod
+    def ensure_callable(cls, get_buffer):
+        if callable(get_buffer):
+            return get_buffer
+        else:
+            def get_buf(buf=get_buffer):
+                return buf
+            return get_buf
diff --git a/hysop/numerics/fft/opencl_fft.py b/hysop/numerics/fft/opencl_fft.py
index ea4ee2d9d60801ddaec676c2bf71c8bd86dfa745..774079227844e86aa28a09e44279ebb9c1221ccf 100644
--- a/hysop/numerics/fft/opencl_fft.py
+++ b/hysop/numerics/fft/opencl_fft.py
@@ -95,17 +95,23 @@ class OpenClFFTI(FFTI):
         return OpenClFFTQueue(queue=op.cl_env.default_queue, name=name)
                        
     def plan_copy(self, op, src, dst):
+        src = self.ensure_buffer(src)
+        dst = self.ensure_buffer(dst)
         launcher = OpenClCopyBufferRectLauncher.from_slices('copy',
                             src=src, dst=dst)
         return launcher
     
     def plan_accumulate(self, op, src, dst):
+        src = self.ensure_buffer(src)
+        dst = self.ensure_buffer(dst)
         src, dst = self.kernel_generator.arrays_to_symbols(src, dst)
         expr = Assignment(dst, src+dst)
         launcher, _ = self.kernel_generator.elementwise_kernel('accumulate', expr)
         return launcher
 
     def plan_transpose(self, op, src, dst, axes):
+        src = self.ensure_buffer(src)
+        dst = self.ensure_buffer(dst)
         backend_kwds = {
                 'cl_env':           op.cl_env,
                 'typegen':          op.typegen,
@@ -123,6 +129,7 @@ class OpenClFFTI(FFTI):
     def plan_fill_zeros(self, op, a, slices):
         if not slices:
             return
+        a = self.ensure_buffer(a)
         launcher = OpenClKernelListLauncher(name='fill_zeros')
         for slc in slices:
             lnc = OpenClFillKernelLauncher.from_slices(varname='buffer', 
@@ -131,4 +138,11 @@ class OpenClFFTI(FFTI):
                     fill_value=0)
             launcher += lnc
         return launcher
-
+    
+    @classmethod
+    def ensure_buffer(cls, get_buffer):
+        if callable(get_buffer):
+            buf = get_buffer()
+        else:
+            buf = get_buffer
+        return buf
diff --git a/hysop/operator/base/redistribute_operator.py b/hysop/operator/base/redistribute_operator.py
index 1c1cedc1270766ed96b747244c6cecbd101675cb..c4aded49e3cd7c99ce39753bd51ad036e8c38003 100644
--- a/hysop/operator/base/redistribute_operator.py
+++ b/hysop/operator/base/redistribute_operator.py
@@ -25,7 +25,8 @@ class RedistributeOperatorBase(ComputationalGraphOperator):
         """
         pass
     
-    def supported_backends(self):
+    @classmethod
+    def supported_backends(cls):
         """
         return the backends that this operator's topologies can support.
         """
diff --git a/hysop/operator/base/spectral_operator.py b/hysop/operator/base/spectral_operator.py
index 0bd100e71d94f88e2a8d08742c57f1950d3dfd83..4727d444ce263879752fce5e6c8f19ce96bb4653 100644
--- a/hysop/operator/base/spectral_operator.py
+++ b/hysop/operator/base/spectral_operator.py
@@ -4,7 +4,7 @@ import numpy as np
 
 from hysop.constants         import BoundaryCondition, BoundaryExtension, TransformType, \
                                     MemoryOrdering, TranspositionState, Backend, \
-                                    SpectralTransformAction
+                                    SpectralTransformAction, Implementation
 from hysop.tools.misc        import compute_nbytes
 from hysop.tools.types       import check_instance, to_tuple, first_not_None
 from hysop.tools.decorators  import debug
@@ -18,12 +18,94 @@ from hysop.core.graph.graph import not_initialized as _not_initialized, \
                                    initialized     as _initialized,     \
                                    discretized     as _discretized,     \
                                    ready           as _ready
+from hysop.core.graph.computational_node_frontend import ComputationalGraphNodeFrontend
+from hysop.topology.topology_descriptor import TopologyDescriptor
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 from hysop.fields.continuous_field import Field, ScalarField, TensorField
 from hysop.symbolic.array import SymbolicArray
 from hysop.symbolic.spectral import WaveNumber, SpectralTransform, AppliedSpectralTransform
 from hysop.numerics.fft.fft import FFTI, simd_alignment, is_byte_aligned
 
+class SpectralComputationalGraphNodeFrontend(ComputationalGraphNodeFrontend):
+        
+    def __init__(self, implementation, **kwds):
+        impl, extra_kwds = self.get_actual_implementation(implementation=implementation, **kwds)
+        for k in extra_kwds.keys():
+            assert k not in kwds
+        kwds.update(extra_kwds)
+        super(SpectralComputationalGraphNodeFrontend, self).__init__(
+                implementation=impl, **kwds)
+
+    
+    @classmethod
+    def get_actual_implementation(cls, implementation, 
+            enforce_implementation=True, cl_env=None,
+            **kwds):
+        """
+        Parameters
+        ----------
+        implementation: Implementation, optional, defaults to None
+            User desired target implementation.
+        enforce_implementation: bool, optional, defaults to True
+            If this is set to True, input implementation is enforced.
+            Else, this function may select another implementation when some conditions are met:
+                Case 1: Host FFT by mapping CPU OpenCL buffers
+                   Conditions:
+                     a/ input implementation is set to OPENCL
+                     b/ cl_env.device is of type CPU
+                     c/ Implementation.PYTHON is a valid operator implementation
+                     d/ Target python operator supports OPENCL as backend
+                     e/ OpenCL platform has zero copy capabilities (cannot be checked)
+                   => If cl_env is not given, this will yield a RuntimeError
+                   => In this case PYTHON implementation is chosen instead.
+                      Buffer are mapped to host.
+                      By default this should give multithread FFTW + multithreaded numba.
+                For all other cases, this parameter is ignored.
+
+        Notes
+        -----
+        clFFT (gpyFFT) support for OpenCL CPU devices is a bit neglected.
+        This function allows to override the implementation target from 
+        OPENCL to PYTHON when a CPU OpenCL environment is given as input.
+
+        By default, the CPU FFT target is FFTW (pyFFTW) which has much 
+        better support (multithreaded fftw + multithreaded numba).
+        
+        OpenCL buffers are mapped to host memory with enqueue_map_buffer 
+        (this makes the assumption thal all OpenCL buffers have been allocated 
+        with zero-copy capability in the target OpenCL platform).
+        """
+        implementation = first_not_None(implementation, cls.default_implementation())
+        assert implementation in cls.implementations()
+        extra_kwds = { 'enable_opencl_host_buffer_mapping': False }
+        if (enforce_implementation):
+            return (implementation, extra_kwds)
+        if (implementation == Implementation.OPENCL):
+            if (cl_env is None):
+                msg='enforce_implementation was set to False, '
+                msg+='implementation is OPENCL, but no cl_env was passed '
+                msg+='to check if the device is of type CPU.'
+                raise RuntimeError(msg)
+            from hysop.backend.device.opencl import cl
+            if (cl_env.device.type == cl.device_type.CPU):
+                extra_kwds['enable_opencl_host_buffer_mapping'] = True
+                if (Implementation.PYTHON in cls.implementations()):
+                    from hysop.backend.host.host_operator import HostOperator, OpenClMappable
+                    op_cls = cls.implementations()[Implementation.PYTHON]
+                    if not issubclass(op_cls, HostOperator):
+                        msg='Operator {} is not a HostOperator.'
+                        msg=msg.format(op_cls)
+                        raise TypeError(msg)
+                    if not issubclass(op_cls, OpenClMappable):
+                        msg='Operator {} does not support host to device opencl buffer mapping.'
+                        msg=msg.format(op_cls)
+                        raise TypeError(msg)
+                    assert Backend.HOST   in op_cls.supported_backends()
+                    assert Backend.OPENCL in op_cls.supported_backends()
+                    return (Implementation.PYTHON, extra_kwds)
+        return (implementation, extra_kwds)
+
+
 
 class SpectralOperatorBase(object):
     """
@@ -31,9 +113,11 @@ class SpectralOperatorBase(object):
     """
     
     min_fft_alignment = simd_alignment #FFTW SIMD.
-
+    
     @debug
-    def __init__(self, fft_interface=None, fft_interface_kwds=None, **kwds):
+    def __init__(self, fft_interface=None, fft_interface_kwds=None, 
+                        enable_opencl_host_buffer_mapping=False,
+                        **kwds):
         """
         Initialize a spectral operator base.
         kwds: dict
@@ -45,9 +129,23 @@ class SpectralOperatorBase(object):
         check_instance(fft_interface_kwds, dict, allow_none=True)
 
         self.transform_groups = {} # dict[tag] -> SpectralTransformGroup
-                                                                 
+
+        if enable_opencl_host_buffer_mapping:
+            from hysop.backend.host.host_operator import HostOperator
+            from hysop.backend.device.opencl import cl
+            from hysop.backend.device.opencl.opencl_env import OpenClEnvironment
+            assert isinstance(self, HostOperator)
+            msg='cl_env has not been given.'
+            assert ('cl_env' in kwds), msg
+            cl_env = kwds.pop('cl_env')
+            check_instance(cl_env, OpenClEnvironment)
+            msg='enable_opencl_host_buffer_mapping is currently only compatible with CPU devices.'
+            assert (cl_env.device.type == cl.device_type.CPU), msg
+            self.cl_env = cl_env
+
         self.fft_interface = fft_interface
         self.fft_interface_kwds = fft_interface_kwds
+        self.enable_opencl_host_buffer_mapping = enable_opencl_host_buffer_mapping
 
     def new_transform_group(self, tag=None, mem_tag=None):
         """
@@ -69,6 +167,30 @@ class SpectralOperatorBase(object):
         for tg in self.transform_groups.values():
             backend = tg.initialize(**kwds)
     
+    @debug
+    def create_topology_descriptors(self): 
+        if self.enable_opencl_host_buffer_mapping:
+            # enforce opencl topology on host operator
+            for (field, topo_descriptor) in self.input_fields.iteritems():
+                topo_descriptor = TopologyDescriptor.build_descriptor(
+                        backend=Backend.OPENCL,
+                        operator=self,
+                        field=field,
+                        handle=topo_descriptor,
+                        cl_env=self.cl_env)
+                self.input_fields[field] = topo_descriptor
+
+            for (field, topo_descriptor) in self.output_fields.iteritems():
+                topo_descriptor = TopologyDescriptor.build_descriptor(
+                        backend=Backend.OPENCL,
+                        operator=self,
+                        field=field,
+                        handle=topo_descriptor,
+                        cl_env=self.cl_env)
+                self.output_fields[field] = topo_descriptor
+        else:
+            super(SpectralOperatorBase, self).create_topology_descriptors()
+    
     def get_field_requirements(self):
         requirements = super(SpectralOperatorBase, self).get_field_requirements()
         
@@ -97,18 +219,26 @@ class SpectralOperatorBase(object):
             backends.add(tg.backend)
         assert len(backends)==1, 'Fields do not live on the same backend.'
         backend = next(iter(backends))
+        
+        enable_opencl_host_buffer_mapping = self.enable_opencl_host_buffer_mapping
+        if enable_opencl_host_buffer_mapping:
+            msg='Trying to enable opencl device to host buffer mapping on {} target.'
+            assert (backend.kind is Backend.OPENCL), msg.format(backend.kind)
 
         fft_interface      = self.fft_interface
         fft_interface_kwds = self.fft_interface_kwds
 
         if (fft_interface is None):
             fft_interface_kwds = first_not_None(fft_interface_kwds, {})
-            fft_interface = FFTI.default_interface_from_backend(backend, **fft_interface_kwds)
+            fft_interface = FFTI.default_interface_from_backend(backend, 
+                    enable_opencl_host_buffer_mapping=enable_opencl_host_buffer_mapping,
+                    **fft_interface_kwds)
         else:
             assert not interface_kwds, 'FFT interface has already been built.'
-
+        
         check_instance(fft_interface, FFTI)
-        fft_interface.check_backend(backend)
+        fft_interface.check_backend(backend, 
+                enable_opencl_host_buffer_mapping=enable_opencl_host_buffer_mapping)
 
         self.backend      = backend
         self.host_backend = backend.host_array_backend
@@ -1067,6 +1197,15 @@ class PlannedSpectralTransform(object):
             print 'transpose_info:          {}'.format(axis_format(transpose_info))
             print ':ZERO FILL:'
             print 'zero_fill_output_slices: {}'.format(slc_format(self._zero_fill_output_slices))
+    
+    def get_mapped_input_buffer(self):
+        return self.input_buffer
+    def get_mapped_output_buffer(self):
+        return self.output_buffer
+    def get_mapped_full_input_buffer(self):
+        return self.full_input_buffer
+    def get_mapped_full_output_buffer(self):
+        return self.full_output_buffer
         
     def determine_buffer_shape(cls, transform_shape, target_is_buffer, offsets, axes): 
         offsets = tuple(offsets[ai] for ai in axes)
@@ -1390,8 +1529,6 @@ class PlannedSpectralTransform(object):
         
         # define input and output buffer, as well as tmp buffers
         src_buffer, dst_buffer = B0, B1
-        input_buffer  = self.input_buffer
-        # output buffer may not already be determined
         def nameof(buf):
             assert (buf is B0) or (buf is B1)
             if (buf is B0):
@@ -1423,6 +1560,12 @@ SPECTRAL TRANSFORM SETUP
             print msg
 
         fft_plans = ()
+        
+        # enqueue map for input and output buffers
+        if (self.backend.kind == Backend.HOST):
+            mapped_
+            FFTI.plan_map_buffers()
+
         for i in xrange(ntransforms):
             transpose = transpose_info[i]
             transform = transform_info[i]
@@ -1477,9 +1620,9 @@ SPECTRAL TRANSFORM SETUP
                 assert src_buffer.nbytes >= input_nbytes,  'Insufficient buffer size for src buf.'
                 assert dst_buffer.nbytes >= output_nbytes, 'Insufficient buffer size for dst buf.'
                 if is_first:
-                    assert (input_buffer.shape == input_shape), 'input_buffer shape mismatch.'
-                    assert (input_buffer.dtype == src_dtype), 'input_buffer dtype mismatch.'
-                    b0 = input_buffer
+                    assert (self.input_buffer.shape == input_shape), 'input_buffer shape mismatch.'
+                    assert (self.input_buffer.dtype == src_dtype), 'input_buffer dtype mismatch.'
+                    b0 = self.get_mapped_input_buffer
                 else:
                     b0 = src_buffer[:input_nbytes].view(dtype=src_dtype).reshape(input_shape)
                 b1 = dst_buffer[:output_nbytes].view(dtype=src_dtype).reshape(output_shape)
@@ -1491,14 +1634,14 @@ SPECTRAL TRANSFORM SETUP
                              'forward permute')
                 src_buffer, dst_buffer = dst_buffer, src_buffer
             elif is_first:
-                assert (input_buffer.shape == src_shape), 'input buffer shape mismatch.'
-                assert (input_buffer.dtype == src_dtype), 'input buffer dtype mismatch.'
+                assert (self.input_buffer.shape == src_shape), 'input buffer shape mismatch.'
+                assert (self.input_buffer.dtype == src_dtype), 'input buffer dtype mismatch.'
                 assert src_buffer.nbytes >= src_nbytes, 'Insufficient buffer size for src buf.'
                 if ((custom_input_buffer is not None) and 
                         (nameof(src_buffer) == custom_input_buffer)):
                     src_buffer, dst_buffer = dst_buffer, src_buffer
                 b0 = src_buffer[:src_nbytes].view(dtype=src_dtype).reshape(src_shape)
-                queue += FFTI.plan_copy(op=op, src=input_buffer, dst=b0)
+                queue += FFTI.plan_copy(op=op, src=self.get_mapped_input_buffer, dst=b0)
                 if SETUP_DEBUG:
                     sfrom='input_buffer'
                     sto=nameof(src_buffer)
@@ -1535,7 +1678,7 @@ SPECTRAL TRANSFORM SETUP
                             'output buffer shape mismatch.'
                     assert (self.output_buffer.dtype == dst_dtype), \
                             'output buffer dtype mismatch.'
-                    b1 = self.output_buffer
+                    b1 = self.get_mapped_output_buffer
                 else:
                     b1 = dst_buffer[:output_nbytes].view(dtype=dst_dtype).reshape(output_shape)
                 queue += FFTI.plan_transpose(op=op, src=b0, dst=b1, axes=permutation)
@@ -1551,7 +1694,7 @@ SPECTRAL TRANSFORM SETUP
                                 'output buffer shape mismatch.'
                         assert (self.output_buffer.dtype == dst_dtype), \
                                 'output buffer dtype mismatch.'
-                        queue += FFTI.plan_accumulate(op=op, src=b1, dst=self.output_buffer)
+                        queue += FFTI.plan_accumulate(op=op, src=b1, dst=self.get_mapped_output_buffer)
                         if SETUP_DEBUG:
                             sfrom=nameof(dst_buffer)
                             sto='output_buffer'
@@ -1594,11 +1737,11 @@ SPECTRAL TRANSFORM SETUP
                 if self._action is SpectralTransformAction.OVERWRITE:
                     pname='PlanCopy'
                     pdes='post-transform-copy'
-                    queue += FFTI.plan_copy(op=op, src=b0, dst=self.output_buffer)
+                    queue += FFTI.plan_copy(op=op, src=b0, dst=self.get_mapped_output_buffer)
                 elif self._action is SpectralTransformAction.ACCUMULATE:
                     pname='PlanAccumulate'
                     pdes='post-transform-accumulate'
-                    queue += FFTI.plan_accumulate(op=op, src=b0, dst=self.output_buffer)
+                    queue += FFTI.plan_accumulate(op=op, src=b0, dst=self.get_mapped_output_buffer)
                 else:
                     msg='Unsupported action {}.'.format(self._action)
                     raise NotImplementedError(msg)
@@ -1610,7 +1753,7 @@ SPECTRAL TRANSFORM SETUP
                              pdes)
     
         if self._zero_fill_output_slices:
-            buf  = self._full_output_buffer
+            buf  = self.get_mapped_full_output_buffer
             slcs = self._zero_fill_output_slices
             queue += FFTI.plan_fill_zeros(op=op, a=buf, slices=slcs)
             if SETUP_DEBUG:
diff --git a/hysop/operator/diffusion.py b/hysop/operator/diffusion.py
index 77893fdf49b12b9f0224b9250ae666279b872d19..5261eaca05d5ef5aeb38bfddf32c885a4944dfa9 100644
--- a/hysop/operator/diffusion.py
+++ b/hysop/operator/diffusion.py
@@ -9,13 +9,13 @@ from hysop.tools.decorators  import debug
 from hysop.fields.continuous_field import Field
 from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
 from hysop.parameters.scalar_parameter import ScalarParameter
-from hysop.core.graph.computational_node_frontend import ComputationalGraphNodeFrontend
 
+from hysop.operator.base.spectral_operator import SpectralComputationalGraphNodeFrontend
 from hysop.backend.host.python.operator.diffusion   import PythonDiffusion
 from hysop.backend.device.opencl.operator.diffusion import OpenClDiffusion
 from hysop.backend.host.fortran.operator.diffusion  import DiffusionFFTW
 
-class Diffusion(ComputationalGraphNodeFrontend):
+class Diffusion(SpectralComputationalGraphNodeFrontend):
     """
     Interface the diffusion solver.
     Available implementations are: FORTRAN: FFTW based solver (legacy fortran)
@@ -39,7 +39,8 @@ class Diffusion(ComputationalGraphNodeFrontend):
 
     @debug
     def __init__(self, Fin, variables, nu, dt,
-            Fout=None, implementation=None, base_kwds=None, **kwds):
+            Fout=None, implementation=None, 
+            base_kwds=None, **kwds):
         """
         Initialize a Poisson operator frontend.
         Solves dF/dt = nu * Laplacian(F)
@@ -81,8 +82,8 @@ class Diffusion(ComputationalGraphNodeFrontend):
         check_instance(base_kwds, dict, keys=str)
         check_instance(dt, ScalarParameter)
         check_instance(nu, (float,ScalarParameter))
-
+        
         super(Diffusion, self).__init__(Fin=Fin, Fout=Fout,
                                         variables=variables, nu=nu, dt=dt,
-                                        implementation=implementation, base_kwds=base_kwds,
-                                        **kwds)
+                                        implementation=implementation, 
+                                        base_kwds=base_kwds, **kwds)
diff --git a/hysop/operator/misc.py b/hysop/operator/misc.py
index 38a7aedbbf5084c75d33a0b146f5365643e563cc..13254bb1cb1d451f60540f0e98fd9241403510de 100644
--- a/hysop/operator/misc.py
+++ b/hysop/operator/misc.py
@@ -13,7 +13,8 @@ class Noop(ComputationalGraphOperator):
         """This is a noop."""
         pass
     
-    def supported_backends(self):
+    @classmethod
+    def supported_backends(cls):
         return Backend.all
     
     @classmethod