diff --git a/examples/scalar_advection/scalar_advection.py b/examples/scalar_advection/scalar_advection.py
index 45610c6462490f350e31382f059e9d15c9dbe280..6580c2a12cf9e8ad6558ecbdf12eeff5a6bc17ae 100644
--- a/examples/scalar_advection/scalar_advection.py
+++ b/examples/scalar_advection/scalar_advection.py
@@ -41,7 +41,7 @@ def run(npts=64+1, cfl=0.5):
     if impl is Implementation.OPENCL_CODEGEN:
         autotuner_config = OpenClKernelAutotunerConfig(
            autotuner_flag=AutotunerFlags.ESTIMATE, 
-           prune_threshold=1.2, override_cache=False, verbose=0)
+           prune_threshold=1.2, override_cache=True, verbose=0)
         kernel_config = OpenClKernelConfig(autotuner_config=autotuner_config)
         method = { OpenClKernelConfig : kernel_config }
     else:
diff --git a/hysop/__init__.py b/hysop/__init__.py
index 0d496006a739c48dd5c623e40d511b8b20d04b1f..279e86abeb02be2bfb61b0e3665f0412e9c2564c 100644
--- a/hysop/__init__.py
+++ b/hysop/__init__.py
@@ -16,11 +16,11 @@ __FFTW_ENABLED__   = "ON"   is "ON"
 __SCALES_ENABLED__ = "ON" is "ON"
 __OPTIMIZE__       = not __debug__
 
-__VERBOSE__        = True
+__VERBOSE__        = False
 __DEBUG__          = False
 __TRACE__          = False
 __TRACE_WARNINGS__ = False
-__KERNEL_DEBUG__   = False
+__KERNEL_DEBUG__   = True
 __PROFILE__        = True
 
 __ENABLE_LONG_TESTS__ = "OFF" is "ON"
diff --git a/hysop/backend/device/codegen/functions/directional_remesh.py b/hysop/backend/device/codegen/functions/directional_remesh.py
index 3cdb730856289ba43b64c2aa595ca2934351d7d9..78b618a0b5e9b4e27ae641455cd748304af60d86 100644
--- a/hysop/backend/device/codegen/functions/directional_remesh.py
+++ b/hysop/backend/device/codegen/functions/directional_remesh.py
@@ -229,7 +229,7 @@ class DirectionalRemeshFunction(OpenClFunctionCodeGenerator):
         y    = CodegenVectorClBuiltin(name='y',    btype=ftype, dim=nparticles, typegen=tg)
         ind  = CodegenVectorClBuiltin(name='ind',  btype=itype, dim=nparticles, typegen=tg)
         find = CodegenVectorClBuiltin(name='find', btype=ftype, dim=nparticles, typegen=tg)
-        vone = CodegenVectorClBuiltin(name='one', btype=ftype, dim=nparticles, typegen=tg)
+        vone = CodegenVectorClBuiltin(name='one', btype=ftype, dim=nparticles, typegen=tg, value=(1,)*nparticles)
         
         if poly_splitted:
             wl = CodegenVectorClBuiltin(name='Wl', btype=ftype, dim=nparticles, typegen=tg)
@@ -246,7 +246,8 @@ class DirectionalRemeshFunction(OpenClFunctionCodeGenerator):
                 eps.declare(s)
             s.jumpline()
             
-            s.decl_aligned_vars(find, ind, y, vone)
+            s.decl_aligned_vars(find, ind, y)
+            vone.declare(s, const=True)
             s.decl_vars(*weights)
             s.jumpline()
             
diff --git a/hysop/backend/device/kernel_autotuner.py b/hysop/backend/device/kernel_autotuner.py
index 4ce75bb7d453bb91e3c08463cce1e4741e0eb4ff..5c8b95438875556f7e94f7885c0c75bcf37afba5 100644
--- a/hysop/backend/device/kernel_autotuner.py
+++ b/hysop/backend/device/kernel_autotuner.py
@@ -151,14 +151,16 @@ class KernelAutotuner(object):
 
         if (kernel_name != cached_kernel_name):
             msg='\nCached kernel name did not match the benched one:\n {}\n {}\n'
-            msg+='\nThis might be due to a faulty implementation of {}.hash_extra_kwds().'
+            msg+='\nThis might be due to an upgrade of the generated code or '
+            msg+='a faulty implementation of {}.hash_extra_kwds().'
             msg=msg.format(kernel_name, cached_kernel_name, type(tkernel).__name__)
             warnings.warn(msg, CodeGeneratorWarning)
             return None
 
         if (src_hash != cached_src_hash):
             msg='\nCached kernel source hash did not match the benched one.\n {}\n {}'
-            msg+='\nThis might be due to a faulty implementation of {}.hash_extra_kwds().'
+            msg+='\nThis might be due to an upgrade of the generated code or '
+            msg+='a faulty implementation of {}.hash_extra_kwds().'
             msg=msg.format(src_hash, cached_src_hash, type(tkernel).__name__)
             warnings.warn(msg, CodeGeneratorWarning)
             return None
@@ -237,8 +239,9 @@ class KernelAutotuner(object):
                                 (cache_src_hash, cache_stats) = results[run_key]
                                 if cache_src_hash != src_hash:
                                     msg='\nCached parameters candidate did not match the benched one.\n {}\n {}'
-                                    msg+='\nThis might be due to a faulty implementation of {}.hash_extra_kwds().'
-                                    msg=msg.format(src_hash, cached_src_hash, type(tunable_kernel).__name__)
+                                    msg+='\nThis might be due to an upgrade of the generated code or '
+                                    msg+='a faulty implementation of {}.hash_extra_kwds().'
+                                    msg=msg.format(src_hash, cache_src_hash, type(tkernel).__name__)
                                     warnings.warn(msg, CodeGeneratorWarning)
                                     old_stats = None
                                 else:
diff --git a/hysop/backend/device/opencl/opencl_env.py b/hysop/backend/device/opencl/opencl_env.py
index c807744af886bd35fa80af2fe030d611c701452f..da81b51c678b5868afc9324a6e372b66f40d8ad8 100644
--- a/hysop/backend/device/opencl/opencl_env.py
+++ b/hysop/backend/device/opencl/opencl_env.py
@@ -599,7 +599,6 @@ Dumped OpenCL Kernel '{}'
             print('Dumping kernel src at \'{}\'.'.format(dump_file))
             with open(dump_file, 'w+') as f:
                 f.write(gpu_src)
-            #build_opts += ' '+' '.join(['-g', '-s "{}"'.format(dump_file)])
 
         # Build OpenCL program
         try:
diff --git a/hysop/backend/device/opencl/operator/directional/advection_dir.py b/hysop/backend/device/opencl/operator/directional/advection_dir.py
index 4b9950563d3a91fcd2f8bb6993d94f7511bc9da8..10d4e573c91579fb01db26301a2c4056a5f5c2b8 100644
--- a/hysop/backend/device/opencl/operator/directional/advection_dir.py
+++ b/hysop/backend/device/opencl/operator/directional/advection_dir.py
@@ -66,6 +66,9 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
         self.relax_min_particles = relax_min_particles
         self.remesh_criteria_eps = remesh_criteria_eps
 
+        self._force_autotuner_verbose = True
+        self._force_autotuner_debug   = True
+
     @debug
     def get_work_properties(self):
         requests  = super(OpenClDirectionalAdvection, self).get_work_properties()
@@ -107,7 +110,8 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
         kwds['velocity_cfl']    = self.velocity_cfl
         kwds['time_integrator'] = self.time_integrator
 
-        (advec_kernel, args_dict) = kernel.autotune(force_debug=True, **kwds)
+        (advec_kernel, args_dict) = kernel.autotune(force_verbose=self._force_autotuner_verbose, 
+                force_debug=self._force_autotuner_debug, **kwds)
 
         args_dict.pop('dt') 
         advec_launcher = advec_kernel.build_launcher(**args_dict)
@@ -145,7 +149,8 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
         kwds['force_atomics']       = self.force_atomics
         kwds['relax_min_particles'] = self.relax_min_particles
         
-        (remesh_kernel, args_dict) = kernel.autotune(**kwds)
+        (remesh_kernel, args_dict) = kernel.autotune(force_verbose=self._force_autotuner_verbose,
+                force_debug=self._force_autotuner_debug, **kwds)
 
         kl = remesh_kernel.build_launcher(**args_dict)
         self.remesh_kernel_launcher = kl