diff --git a/examples/scalar_advection/levelset.py b/examples/scalar_advection/levelset.py
index 1681b1b13e330e3be4ccf7e414438aefd54b189e..5a2d64c4c232cf1b68a10edddec09b394245724e 100644
--- a/examples/scalar_advection/levelset.py
+++ b/examples/scalar_advection/levelset.py
@@ -27,7 +27,7 @@ def compute(args):
         data[rr < 0.15] = 1.
         rr = np.sqrt((x - 0.75) ** 2 + (y - 0.75) ** 2 + (z - 0.75) ** 2)
         data[rr < 0.1] += 1.
-    
+
     # Define domain
     impl  = args.impl
     dim   = args.ndim
@@ -43,13 +43,13 @@ def compute(args):
     # Get default MPI Parameters from domain (even for serial jobs)
     mpi_params = MPIParams(comm=box.task_comm,
                            task_id=box.current_task())
-    
+
     method = {}
     extra_op_kwds = { 'mpi_params': mpi_params }
     if (impl is Implementation.OPENCL):
         from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env
         from hysop.methods import OpenClKernelConfig
-        cl_env = get_or_create_opencl_env(mpi_params=mpi_params, platform_id=args.cl_platform_id, 
+        cl_env = get_or_create_opencl_env(mpi_params=mpi_params, platform_id=args.cl_platform_id,
                                                                  device_id=args.cl_device_id)
         extra_op_kwds['cl_env'] = cl_env
         method[OpenClKernelConfig] = args.opencl_kernel_config
@@ -142,21 +142,21 @@ def compute(args):
         advec = StrangSplitting(splitting_dim=dim,
                                   order=args.strang_order)
         advec.push_operators(advec_dir)
-    
-    
+
+
     compute_velocity = AnalyticField(name='compute_velocity', pretty_name='V(t)',
                              field=velo, formula=compute_velocity,
                              variables={velo: npts},
                              extra_input_kwds={'t': simu.t},
                              implementation=Implementation.PYTHON if (impl is Implementation.FORTRAN) else impl,
                              **extra_op_kwds)
-    
+
     compute_norm = AnalyticField(name='compute_norm', pretty_name='||V||',
                              field=velo, formula=compute_norm,
                              variables={velo: npts},
                              implementation=Implementation.PYTHON if (impl is Implementation.FORTRAN) else impl,
                              **extra_op_kwds)
-    
+
     compute_volume = AnalyticField(name='compute_volume', pretty_name='S<0.5',
                              field=mask, formula=compute_volume,
                              variables={mask: snpts, scalar: snpts},
@@ -170,22 +170,21 @@ def compute(args):
                        variables={mask: snpts},
                        parameter=vol,
                        implementation=impl,
-                       allow_ghosts=True,
                        **extra_op_kwds)
-    
+
     # dump scalar and velocity norm
     if (npts==snpts):
         io_params = IOParams(filename='fields', frequency=args.dump_freq)
-        dump_fields = HDF_Writer(name='fields', io_params=io_params, 
+        dump_fields = HDF_Writer(name='fields', io_params=io_params,
                 force_backend=backend, variables={velo[0]: snpts, scalar: snpts}, **extra_op_kwds)
         dumpers = (dump_fields,)
     else:
         io_params = IOParams(filename='scalar', frequency=args.dump_freq)
-        dump_scalar = HDF_Writer(name='scalar', io_params=io_params, 
+        dump_scalar = HDF_Writer(name='scalar', io_params=io_params,
                 force_backend=backend, variables={scalar: snpts}, **extra_op_kwds)
 
         io_params = IOParams(filename='Vnorm', frequency=args.dump_freq)
-        dump_velocity_norm = HDF_Writer(name='Vnorm', io_params=io_params, 
+        dump_velocity_norm = HDF_Writer(name='Vnorm', io_params=io_params,
                 force_backend=backend, variables={velo[0]: npts}, **extra_op_kwds)
         dumpers = (dump_scalar, dump_velocity_norm)
 
@@ -194,7 +193,7 @@ def compute(args):
     problem.insert(compute_norm, *dumpers)
     problem.insert(compute_volume, volume)
     problem.build(args=args)
-    
+
     # If a visu_rank was provided, and show_graph was set,
     # display the graph on the given process rank.
     if args.display_graph:
@@ -217,16 +216,16 @@ if __name__=='__main__':
 
             description=colors.color('HySoP Levelset Example: ', fg='blue', style='bold')
             description+='Advect a scalar by a time dependent analytic velocity. '
-    
+
             super(LevelsetArgParser, self).__init__(
                  prog_name=prog_name,
                  description=description,
                  default_dump_dir=default_dump_dir)
-        
+
     parser = LevelsetArgParser()
 
-    parser.set_defaults(box_origin=(0.0,), box_length=(1.0,), 
-                       ndim=3, tstart=0.0, tend=3.0, 
+    parser.set_defaults(box_origin=(0.0,), box_length=(1.0,),
+                       ndim=3, tstart=0.0, tend=3.0,
                        npts=(128,), snpts=None, grid_ratio=1,
                        dump_period=0.1, dump_freq=0)
 
diff --git a/hysop/backend/device/opencl/operator/integrate.py b/hysop/backend/device/opencl/operator/integrate.py
index 6aa31e79d2307f73fccf915e1d1d7451d01081b0..d3feef93fdc472e6bcea9d6d83bd80ae825329c4 100644
--- a/hysop/backend/device/opencl/operator/integrate.py
+++ b/hysop/backend/device/opencl/operator/integrate.py
@@ -13,8 +13,7 @@ class OpenClIntegrate(IntegrateBase, OpenClOperator):
         # force 0 ghosts for the reduction (pyopencl reduction kernel)
         requirements = super(OpenClIntegrate, self).get_field_requirements()
         topo, req = requirements.get_input_requirement(self.field)
-        if not self.allow_ghosts:
-            req.max_ghosts = (0,)*self.field.dim
+        req.max_ghosts = (0,)*self.field.dim
         return requirements
 
     @debug
diff --git a/hysop/backend/host/python/operator/integrate.py b/hysop/backend/host/python/operator/integrate.py
new file mode 100644
index 0000000000000000000000000000000000000000..e797a5a761490d249a13792363f348ee216736e4
--- /dev/null
+++ b/hysop/backend/host/python/operator/integrate.py
@@ -0,0 +1,23 @@
+from hysop.tools.decorators import debug
+from hysop.core.graph.graph import op_apply
+from hysop.backend.host.host_operator import HostOperator
+from hysop.operator.base.integrate import IntegrateBase
+import numpy as np
+
+class PythonIntegrate(IntegrateBase, HostOperator):
+
+    @debug
+    def __init__(self, **kwds):
+        super(PythonIntegrate, self).__init__(**kwds)
+
+    @op_apply
+    def apply(self, **kwds):
+        value = self.parameter._value.copy()
+        for i in xrange(self.dF.nb_components):
+            Pi = np.sum(self.dF.data[i][self.dF.compute_slices])
+            if (self.scaling_coeff[i] is None):
+                self.scaling_coeff[i] = 1.0 / Pi
+            value[i] = self.scaling_coeff[i] * Pi
+
+        # compute value from all processes
+        self.parameter.value = self._collect(value)
diff --git a/hysop/operator/base/integrate.py b/hysop/operator/base/integrate.py
index bcdddc4282643fb866b50f7cbf4e3e84df69612a..33cc2cdc7a1528942737eba0b549927e64b51c43 100644
--- a/hysop/operator/base/integrate.py
+++ b/hysop/operator/base/integrate.py
@@ -17,7 +17,7 @@ class IntegrateBase(object):
     __metaclass__ = ABCMeta
 
     @debug
-    def __init__(self, field, variables, allow_ghosts=False,
+    def __init__(self, field, variables,
                     name=None, pretty_name=None, cst=1,
                     parameter=None, scaling=None, **kwds):
         """
@@ -89,7 +89,6 @@ class IntegrateBase(object):
         self.scaling   = scaling
         self.cst = cst
         self.scaling_coeff = None
-        self.allow_ghosts = allow_ghosts
 
         super(IntegrateBase, self).__init__(name=name, pretty_name=pretty_name,
                 input_fields=input_fields, output_params=output_params, **kwds)
diff --git a/hysop/operator/integrate.py b/hysop/operator/integrate.py
index 34de06ea22446d1a1ae14b5147ff3842a6606a60..271d7a2d4370cf9dbe25d2e2755c951fb1b3acd8 100644
--- a/hysop/operator/integrate.py
+++ b/hysop/operator/integrate.py
@@ -1,5 +1,3 @@
-
-
 """
 @file enstrophy.py
 Enstrophy solver frontend.
@@ -18,25 +16,27 @@ from hysop.parameters.default_parameters import VolumicIntegrationParameter
 class Integrate(ComputationalGraphNodeFrontend):
     """
     Interface for integrating fields on their domain
-    Available implementations are: 
+    Available implementations are:
         *OPENCL (gpu based implementation)
     """
-    
+
     @classmethod
     def implementations(cls):
         from hysop.backend.device.opencl.operator.integrate import OpenClIntegrate
+        from hysop.backend.host.python.operator.integrate import PythonIntegrate
         implementations = {
-                Implementation.OPENCL: OpenClIntegrate
+                Implementation.OPENCL: OpenClIntegrate,
+                Implementation.PYTHON: PythonIntegrate
         }
         return implementations
-    
+
     @classmethod
     def default_implementation(cls):
-        return Implementation.OPENCL
-    
+        return Implementation.PYTHON
+
     @debug
     def __init__(self, field, variables,
-                parameter=None, scaling=None, 
+                parameter=None, scaling=None,
                 implementation=None, base_kwds=None, **kwds):
         """
         Initialize a Integrate operator frontend.
@@ -49,7 +49,7 @@ class Integrate(ComputationalGraphNodeFrontend):
              P = scaling * integral_V(field)
              where V is the field domain volume
              and scaling depends on specified scaling method.
-        
+
         parameter
         ----------
         field: Field
@@ -58,12 +58,12 @@ class Integrate(ComputationalGraphNodeFrontend):
             dictionary of fields as keys and topologies as values.
         parameter: ScalarParameter or TensorParameter
             The output parameter that will contain the integral.
-            Should match field.nb_components. 
+            Should match field.nb_components.
             A default parameter will be created if not specified.
         scaling: None, float, str or array-like of str, optional
             Scaling method used after integration.
             'volumic':   scale by domain size (product of mesh space steps)
-            'normalize': scale by first integration (first value will be 1.0) 
+            'normalize': scale by first integration (first value will be 1.0)
             Defaults to volumic integration.
         implementation: Implementation, optional, defaults to None
             target implementation, should be contained in available_implementations().
@@ -72,22 +72,22 @@ class Integrate(ComputationalGraphNodeFrontend):
             Base class keywords arguments.
             If None, an empty dict will be passed.
         kwds:
-            Extra keywords arguments that will be passed towards implementation 
+            Extra keywords arguments that will be passed towards implementation
             enstrophy operator __init__.
 
         Notes
         -----
-        An Integrate operator implementation should at least support 
+        An Integrate operator implementation should at least support
         the hysop.operator.base.integrate.IntegrateBase interface.
         """
         base_kwds = base_kwds or dict()
-        
+
         check_instance(field, Field)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
         check_instance(parameter, (ScalarParameter, TensorParameter), allow_none=True)
         check_instance(scaling, str, allow_none=True)
         check_instance(base_kwds, dict, keys=str)
-        
+
         # Pregenerate parameter so that we can directly store it in self.
         if (parameter is None):
             parameter = VolumicIntegrationParameter(field=field)
@@ -95,7 +95,7 @@ class Integrate(ComputationalGraphNodeFrontend):
             msg='Expected a parameter of size {} but got a parameter of size {}.'
             msg=msg.format(field.nb_components, parameter.size)
             raise RuntimeError(msg)
-        
+
         super(Integrate, self).__init__(field=field, variables=variables,
                 parameter=parameter, scaling=scaling,
                 base_kwds=base_kwds, **kwds)