diff --git a/hysop/__init__.py.in b/hysop/__init__.py.in
index 701856c217556b09b0468e89f46aa8d91b5651e2..9107971480f38e9dd637b24b1bf114a7f6c6c88e 100644
--- a/hysop/__init__.py.in
+++ b/hysop/__init__.py.in
@@ -45,16 +45,16 @@ package_name = "@PACKAGE_NAME@"
 version      = "@HYSOP_VERSION@"
 
 # Compilation flags
-__MPI_ENABLED__    = "@USE_MPI@" is "ON"
-__GPU_ENABLED__    = "@WITH_GPU@" is "ON"
-__FFTW_ENABLED__   = "@WITH_FFTW@" is "ON"
-__SCALES_ENABLED__ = "@WITH_SCALES@" is "ON"
+__MPI_ENABLED__    = "@USE_MPI@" == "ON"
+__GPU_ENABLED__    = "@WITH_GPU@" == "ON"
+__FFTW_ENABLED__   = "@WITH_FFTW@" == "ON"
+__SCALES_ENABLED__ = "@WITH_SCALES@" == "ON"
 __OPTIMIZE__       = not __debug__
-__H5PY_PARALLEL_COMPRESSION_ENABLED__ = ("@H5PY_PARALLEL_COMPRESSION_ENABLED@" is "ON")
+__H5PY_PARALLEL_COMPRESSION_ENABLED__ = ("@H5PY_PARALLEL_COMPRESSION_ENABLED@" == "ON")
 
-__VERBOSE__        = get_env('VERBOSE', ("@VERBOSE@" is "ON"))
-__DEBUG__          = get_env('DEBUG',   ("@DEBUG@" is "ON"))
-__PROFILE__        = get_env('PROFILE', ("@PROFILE@" is "ON"))
+__VERBOSE__        = get_env('VERBOSE', ("@VERBOSE@" == "ON"))
+__DEBUG__          = get_env('DEBUG',   ("@DEBUG@" == "ON"))
+__PROFILE__        = get_env('PROFILE', ("@PROFILE@" == "ON"))
 
 __TRACE_CALLS__     = get_env('TRACE_CALLS',   False)
 __TRACE_WARNINGS__  = get_env('TRACE_WARNINGS', False)
@@ -66,7 +66,7 @@ __KERNEL_DEBUG__    = get_env('KERNEL_DEBUG', False)
 __BACKTRACE_BIG_MEMALLOCS__ = get_env('BACKTRACE_BIG_MEMALLOCS', False)
 
 __TEST_ALL_OPENCL_PLATFORMS__ = get_env('TEST_ALL_OPENCL_PLATFORMS', False)
-__ENABLE_LONG_TESTS__ = get_env('ENABLE_LONG_TESTS', ("@ENABLE_LONG_TESTS@" is "ON"))
+__ENABLE_LONG_TESTS__ = get_env('ENABLE_LONG_TESTS', ("@ENABLE_LONG_TESTS@" == "ON"))
 
 # Threads
 __ENABLE_THREADING__ = get_env('ENABLE_THREADING', True)
diff --git a/hysop/backend/device/autotunable_kernel.py b/hysop/backend/device/autotunable_kernel.py
index 8455e872cd6e836a8446694072191cd3a5ef1962..170f3348ec1006dbc3ccc0f649018a7ce0ae9ba0 100644
--- a/hysop/backend/device/autotunable_kernel.py
+++ b/hysop/backend/device/autotunable_kernel.py
@@ -316,7 +316,7 @@ class AutotunableKernel(object):
 
     def hash_extra_kwds(self, extra_kwds):
         """Hash extra_kwds dictionnary for caching purposes."""
-        for (k,v) in extra_kwds.iteritems():
+        for (k,v) in extra_kwds.items():
             try:
                 h = hash(v)
                 if (h == hash(id(v))):
@@ -330,7 +330,7 @@ class AutotunableKernel(object):
 
     def hash_extra_parameters(self, extra_parameters):
         """Hash extra_parameters dictionnary for caching purposes."""
-        for k,v in extra_parameters.iteritems():
+        for k,v in extra_parameters.items():
             if hash(v)==hash(id(v)):
                 msg='Parameter {} of type {} is not safe to hash.'
                 msg+='\nImplement a {}.__hash__() to that it depends only on its values '
diff --git a/hysop/backend/device/codegen/base/codegen.py b/hysop/backend/device/codegen/base/codegen.py
index 80750853f1a62349ef248b5df1e6032616de652f..2cd55caa148a6831d20b854255bb7216102af0a9 100644
--- a/hysop/backend/device/codegen/base/codegen.py
+++ b/hysop/backend/device/codegen/base/codegen.py
@@ -108,7 +108,7 @@ class CodeGenerator(object):
         return self
 
     def update_requirements(self, reqs):
-        for reqname,req in reqs.iteritems():
+        for reqname,req in reqs.items():
             self.reqs[reqname] = req
         return self
 
@@ -650,7 +650,7 @@ class CodeGenerator(object):
         else:
             blocks[dname] = [poverride if poverride else prio,self.code,self_com]
 
-        for blk_name, blk_val in self.blocks.iteritems():
+        for blk_name, blk_val in self.blocks.items():
             priority,self_code,self_com = blk_val
             poverride = self.block_priorities_override[blk_name] if blk_name in self.block_priorities_override else None
             if blk_name in blocks.keys():
diff --git a/hysop/backend/device/codegen/base/enum_codegen.py b/hysop/backend/device/codegen/base/enum_codegen.py
index 8701ee23396b504004fc36c79509a53b80163de4..cedab79ca66bb8521ed1a9cfeecd4110729fd0fe 100644
--- a/hysop/backend/device/codegen/base/enum_codegen.py
+++ b/hysop/backend/device/codegen/base/enum_codegen.py
@@ -10,7 +10,7 @@ class EnumCodeGenerator(CodeGenerator):
 
     def __init__(self, enum, comments=None, ext='.c',
             initial_indent_level=0,escape_seqs=None,keywords=None):
-        
+
         super(EnumCodeGenerator,self).__init__(name=enum.name,typegen=TypeGen('float'),
                 ext=ext,initial_indent_level=initial_indent_level,
                 escape_seqs=escape_seqs,keywords=keywords)
@@ -19,16 +19,16 @@ class EnumCodeGenerator(CodeGenerator):
             raise ValueError('Input enum should be generated with hysop.tools.enum.EnumFactory.')
 
         self.enum = enum
-        
+
         self.dtype       = self.enum.dtype
         self.fields      = self.enum.fields
         self.rfields     = self.enum.rfields
         self.__getitem__ = self.enum.__getitem__
-        for k,v in self.fields().iteritems():
+        for k,v in self.fields().items():
             setattr(self,k,v)
-        
+
         self.ctype = 'enum ' + self.enum.name
-        
+
         register_ctype_dtype(self.ctype,self.dtype)
         self.gencode(comments)
 
@@ -65,9 +65,9 @@ if __name__ == '__main__':
 
     from hysop.tools.enum import EnumFactory
 
-    days = ['MONDAY','TUESDAY']     
+    days = ['MONDAY','TUESDAY']
     enum = EnumFactory.create('Days',days)
     scg  = EnumCodeGenerator(enum, comments='Days enumeration')
     scg.edit()
-    
+
 
diff --git a/hysop/backend/device/codegen/base/function_codegen.py b/hysop/backend/device/codegen/base/function_codegen.py
index dfb36ff9d836b7576b9745cb787a40c2893fe5b8..9a0e1301c7a082c1cd0fedea8498dd5a1956dca1 100644
--- a/hysop/backend/device/codegen/base/function_codegen.py
+++ b/hysop/backend/device/codegen/base/function_codegen.py
@@ -22,7 +22,7 @@ class FunctionBase(object):
         check_instance(fargs,ArgDict)
 
         fargs.release()
-        for (varname, varval) in known_args.iteritems():
+        for (varname, varval) in known_args.items():
             if varname in fargs.keys():
                 if isinstance(varval, CodegenVariable):
                     if not varval.known():
diff --git a/hysop/backend/device/codegen/base/statistics.py b/hysop/backend/device/codegen/base/statistics.py
index 716015254a7a80ac0ea95c364b2bb0b52df7dc66..ad8d2d29a20a9fa0b8e2cdd62ef9164039f8d14f 100644
--- a/hysop/backend/device/codegen/base/statistics.py
+++ b/hysop/backend/device/codegen/base/statistics.py
@@ -67,7 +67,7 @@ class WorkStatistics(object):
         return float(self.global_mem_byte_writes)/self.global_mem_transactions()
     def global_mem_read_ratio(self):
         return float(self.global_mem_byte_reads)/self.global_mem_transactions()
-    
+
     def local_mem_transactions(self):
         return self.local_mem_byte_writes + self.local_mem_byte_reads
     def local_mem_rw_ratio(self):
@@ -82,7 +82,7 @@ class WorkStatistics(object):
         return (self.local_mem_transactions() > 0)
     def has_global_mem_transactions(self):
         return (self.global_mem_transactions() > 0)
-    
+
     def __add__(self, rhs):
         check_instance(rhs,WorkStatistics)
         stats = copy.deepcopy(self)
@@ -91,7 +91,7 @@ class WorkStatistics(object):
         stats.local_mem_byte_reads   += rhs.local_mem_byte_reads
         stats.local_mem_byte_writes  += rhs.local_mem_byte_writes
 
-        for (k,v) in rhs.ops_per_type.iteritems():
+        for (k,v) in rhs.ops_per_type.items():
             if k not in stats.ops_per_type:
                 stats.ops_per_type[k]  = v
             else:
@@ -114,18 +114,18 @@ class WorkStatistics(object):
         return self.__mul__(lhs)
 
     def __str__(self):
-        op_count = [''] + ['{}: {}'.format(k,v) for (k,v) in self.ops_per_type.iteritems() ]
+        op_count = [''] + ['{}: {}'.format(k,v) for (k,v) in self.ops_per_type.items() ]
         op_count = '\n    '.join(op_count)
 
         ss = ':: Work Statistics ::'
-        
+
         if self.has_global_mem_transactions():
             ss += '\n  Global memory:  load={} store={} total={} rw_ratio={}'.format(
             bytes2str(self.global_mem_byte_reads),
             bytes2str(self.global_mem_byte_writes),
             bytes2str(self.global_mem_transactions()),
             round(self.global_mem_rw_ratio(),2))
-        
+
         if self.has_local_mem_transactions():
             ss += '\n  Local  memory:  load={} store={} total={} rw_ratio={}'.format(
             bytes2str(self.local_mem_byte_reads),
@@ -136,7 +136,7 @@ class WorkStatistics(object):
         ss += '\n  Operations count: {}'.format(op_count)
 
         return ss
-            
+
 class TimedWorkStatistics(WorkStatistics):
     def __init__(self, workstat, duration):
         super(TimedWorkStatistics,self).__init__(workstat)
@@ -161,18 +161,18 @@ class TimedWorkStatistics(WorkStatistics):
             if dtype not in dtype_ops.keys():
                 msg = 'unknown type {}, valed types are:\n\t{}.'.format(dtype, dtype_ops.keys())
                 raise ValueError(msg)
-        
+
         ops_count = {}
-        for (dtype, N) in self.ops_per_type.iteritems():
+        for (dtype, N) in self.ops_per_type.items():
             (multiplier,op_category) = dtype_ops[dtype]
             if op_category not in ops_count:
                 ops_count[op_category] = 0.0
             ops_count[op_category] += multiplier*N
 
         ops_per_second = {}
-        for (op_category, op_count) in ops_count.iteritems():
+        for (op_category, op_count) in ops_count.items():
             ops_per_second[op_category] = op_count/self.duration
-        
+
         self._ops_per_category = ops_count
         self._ops_per_second   = ops_per_second
 
diff --git a/hysop/backend/device/codegen/base/struct_codegen.py b/hysop/backend/device/codegen/base/struct_codegen.py
index f858f28f44ec88832b67c6605688be344852be06..e6b0efee29d4a0bb1aaa0b034dcd995c935a33ee 100644
--- a/hysop/backend/device/codegen/base/struct_codegen.py
+++ b/hysop/backend/device/codegen/base/struct_codegen.py
@@ -15,17 +15,17 @@ class StructCodeGenerator(OpenClCodeGenerator):
             typedef=None,comments=None,
             ctype_overrides=None,
             custom_types={}):
-        
+
         super(StructCodeGenerator,self).__init__(name=name,typegen=typegen)
 
         self.typedef = typedef
         self.dtype = np.dtype(dtype)
         self.ctype = self.typedef if self.typedef else 'struct {}'.format(self.name)
-        
+
         cl.tools.get_or_register_dtype(self.ctype, self.dtype)
         register_ctype_dtype(self.ctype, self.dtype)
 
-        for _ctype,_dtype in custom_types.iteritems():
+        for _ctype,_dtype in custom_types.items():
             cl.tools.get_or_register_dtype(_ctype,dtype=_dtype)
 
         self.gencode(comments, ctype_overrides)
@@ -42,7 +42,7 @@ class StructCodeGenerator(OpenClCodeGenerator):
     def gencode(self, comments, ctype_overrides):
         struct_vars = re.compile('\s+((?:struct\s+)?\w+)\s+((?:\s*\**(?:\w+)(?:\[\d+\])*[,;])+)')
         lines = self.c_decl().split('\n')
-        
+
         with self._struct_(name=self.name,typedef=self.typedef):
             with self._var_block_() as vb:
                 i=0
@@ -75,5 +75,5 @@ if __name__ == '__main__':
 
     scg = StructCodeGenerator('TestStruct',dtype,typedef=None,typegen=tg)
     scg.edit()
-    
+
 
diff --git a/hysop/backend/device/codegen/base/utils.py b/hysop/backend/device/codegen/base/utils.py
index 7ef7fda6e714af8fb8e19689c51974ed735d9648..c8528608fde2c50f022441907f21798a87f9a7fc 100644
--- a/hysop/backend/device/codegen/base/utils.py
+++ b/hysop/backend/device/codegen/base/utils.py
@@ -1,4 +1,4 @@
-import hashlib 
+import hashlib
 
 class WriteOnceDict(dict):
     def __init__(self,**kargs):
@@ -65,7 +65,7 @@ class ArgDict(WriteOnceDict):
         return iter([(argname, self.__getitem__(argname)) for argname in self.arg_order])
 
     def update(self, other):
-        for (key,val) in other.iteritems():
+        for (key,val) in other.items():
             self[key] = val
         return self
 
@@ -88,7 +88,7 @@ class ArgDict(WriteOnceDict):
                 assert var.known()
                 assert var.symbolic_mode == False
                 assert var.is_symbolic() == False
-        
+
         if len(function_impl_args) and len(function_proto_args[-1]):
             if function_proto_args[-1][-1]=='\n':
                 function_proto_args[-1] = function_proto_args[-1][:-1]
@@ -112,7 +112,7 @@ class ArgDict(WriteOnceDict):
             return '_'+self.hash(suffix)
         else:
             return ''
-    
+
     #handle type function overloading
     def codegen_name_suffix(self,return_type, known_args):
         suffix = '({})_'.format(return_type)
@@ -130,7 +130,7 @@ class ArgDict(WriteOnceDict):
             return '_'+self.hash(suffix)
         else:
             return ''
-    
+
     # robust with up to 256 functions with the same basename
     # max_fun = sqrt(16**nb) = 2**(2*nb)
     def hash(self,string):
@@ -138,7 +138,7 @@ class ArgDict(WriteOnceDict):
 
 
 class SortedDict(dict):
-    
+
     @classmethod
     def _key(cls, k):
         if hasattr(k, 'name'):
diff --git a/hysop/backend/device/codegen/base/variables.py b/hysop/backend/device/codegen/base/variables.py
index 0b0b9f960ee98e4076b2f7f6f38a90bcbac21de9..d226baf0a36a136cd836926e1e2c34471aeecdf5 100644
--- a/hysop/backend/device/codegen/base/variables.py
+++ b/hysop/backend/device/codegen/base/variables.py
@@ -929,7 +929,7 @@ class CodegenStruct(CodegenVariable):
         if force is None:
             return
         super(CodegenStruct,self).force_symbolic(force)
-        for k,var in self.vars.iteritems():
+        for k,var in self.vars.items():
             var.force_symbolic(force)
 
     def __getitem__(self,key):
diff --git a/hysop/backend/device/codegen/functions/apply_stencil.py b/hysop/backend/device/codegen/functions/apply_stencil.py
index 2035d8e076ca3f779fb26a2d1c21f6c686dc64e3..d9d83534e59cf5340a3a1888adb6c553e1327ddd 100644
--- a/hysop/backend/device/codegen/functions/apply_stencil.py
+++ b/hysop/backend/device/codegen/functions/apply_stencil.py
@@ -67,7 +67,7 @@ class ApplyStencilFunction(OpenClFunctionCodeGenerator):
             args['offset']  = CodegenVariable('offset', itype, typegen, add_impl_const=True)
             args['stride']  = CodegenVectorClBuiltin('stride', itype, dim, typegen,
                     add_impl_const=True,nl=True)
-        for varname,vartype in multipliers.iteritems():
+        for varname,vartype in multipliers.items():
             if vartype=='ftype':
                 vartype=ftype
             elif vartype=='vtype':
diff --git a/hysop/backend/device/codegen/kernels/custom_symbolic.py b/hysop/backend/device/codegen/kernels/custom_symbolic.py
index 595ae3ce29d9b8d4c28b6f120a947fae727ac99e..066f9382e1b54b44bd229aa16a489092c311a623 100644
--- a/hysop/backend/device/codegen/kernels/custom_symbolic.py
+++ b/hysop/backend/device/codegen/kernels/custom_symbolic.py
@@ -221,7 +221,7 @@ class SymbolicCodegenContext(object):
             args['L'] = local_size
         self.local_size = local_size
 
-        for argname, arg in args.iteritems():
+        for argname, arg in args.items():
             setattr(self, argname, arg)
 
         return args
@@ -348,7 +348,7 @@ class SymbolicCodegenContext(object):
         # READ ONLY PARAMETERS
         # (ndim<=1) and (1<=size<=16) => simple vector constant
         # (ndim>1) or (size>16)       => ptr (const) __constant memory space
-        for (pname, param) in expr_info.input_params.iteritems():
+        for (pname, param) in expr_info.input_params.items():
             assert (pname not in expr_info.output_params)
             shape = param.shape
             ctype = param.ctype
@@ -365,7 +365,7 @@ class SymbolicCodegenContext(object):
 
         # OUTPUT PARAMETERS
         # not supported yet (should be non const __global ptrs).
-        for pname, param in expr_info.output_params.iteritems():
+        for pname, param in expr_info.output_params.items():
             raise NotImplementedError('Output parameters are not supported.')
 
         return args
@@ -375,7 +375,7 @@ class SymbolicCodegenContext(object):
         expr_info = self.expr_info
 
         args = ArgDict()
-        for (sname, scalar) in expr_info.scalars.iteritems():
+        for (sname, scalar) in expr_info.scalars.items():
             ctype = scalar.ctype
             vsize = self.vectorization
             scalar = CodegenVectorClBuiltin(sname, ctype, vsize, typegen=typegen,
@@ -527,7 +527,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         # read-only input fields
         ei = expr_info
         di = expr_info.discretization_info
-        for (obj, counts) in di.read_counter.iteritems():
+        for (obj, counts) in di.read_counter.items():
             assert (counts is not None)
             if npw.array_equal(counts,0):
                 continue
@@ -582,7 +582,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                 raise TypeError(msg)
 
         # output fields
-        for (obj, counts) in di.write_counter.iteritems():
+        for (obj, counts) in di.write_counter.items():
             assert (counts is not None)
             if npw.array_equal(counts,0):
                 continue
@@ -632,7 +632,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                 raise TypeError(msg)
 
         # parameters
-        for argname, arg in csc.param_args.iteritems():
+        for argname, arg in csc.param_args.items():
             param_args[argname] = arg
             kargs[argname] = arg
 
@@ -756,7 +756,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         has_private_loads, has_private_stores = False, False
         has_local_loads,   has_local_stores   = False, False
 
-        for (array, array_data) in array_args.iteritems():
+        for (array, array_data) in array_args.items():
             if isinstance(array, OpenClSymbolicArray):
                 name = array.varname
             elif isinstance(array, DiscreteScalarFieldView):
@@ -861,7 +861,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                 array_ghosts = self.csc.array_contiguous_ghosts[array]
                 local_size_per_index = local_size_per_field.setdefault(array,
                                             npw.int_zeros(shape=(array.nb_components, 3)))
-                for (i, data) in array_data.iteritems():
+                for (i, data) in array_data.items():
                     is_read    = (read_counts is not None)  and (read_counts[i]>0)
                     is_written = (write_counts is not None) and (write_counts[i]>0)
                     is_ro = (is_read and not is_written)
@@ -1068,7 +1068,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
             code = '{} = {};'.format(loop_id[kdim:kdim+granularity], gidx[:granularity])
             self.append(code)
             with self._align_() as al:
-                for (array, array_vid) in array_vids.iteritems():
+                for (array, array_vid) in array_vids.items():
                     ghosts = array_ghosts[array]
                     code = '{} $= {} $+ {};'.format(array_vid[kdim:kdim+granularity],
                                                   loop_id[kdim:kdim+granularity],
@@ -1195,10 +1195,10 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
                         if self.array_vids:
                             self.comment('Compute global offsets and line pointers')
                             with self._align_() as al:
-                                for array, vid in array_vids.iteritems():
+                                for array, vid in array_vids.items():
                                     gids    = array_gids[array]
                                     strides = array_strides[array]
-                                    for (key, gid) in gids.iteritems():
+                                    for (key, gid) in gids.items():
                                         stride = strides[key]
                                         idot = ' $+ '.join('{}*{}'.format(vid[i], stride[i])
                                                 for i in range(array_dim-1, -1, -1))
@@ -1229,7 +1229,7 @@ class CustomSymbolicKernelGenerator(KernelCodeGenerator):
         local_size  = s.vars['local_size']
         num_groups  = s.vars['num_groups']
 
-        field_mesh_infos = {k:s.args[v] for (k,v) in self.field_mesh_infos.iteritems()}
+        field_mesh_infos = {k:s.args[v] for (k,v) in self.field_mesh_infos.items()}
         self.field_mesh_infos = field_mesh_infos
 
         (compute_grid_size, loop_id, vectorization_var, max_extra_vwork_var, local_work,
diff --git a/hysop/backend/device/codegen/kernels/directional_stretching.py b/hysop/backend/device/codegen/kernels/directional_stretching.py
index 75a38e6e9f8a11753598c825f4e7557f8b1ac08f..813032f0597a3d0f1ff29f24900dc6b89118605c 100644
--- a/hysop/backend/device/codegen/kernels/directional_stretching.py
+++ b/hysop/backend/device/codegen/kernels/directional_stretching.py
@@ -488,7 +488,7 @@ class DirectionalStretchingKernel(KernelCodeGenerator):
 
             if is_cached:
                 with s._align_() as al:
-                    for varname,var in cached_vars.iteritems():
+                    for varname,var in cached_vars.items():
                         var.declare(al,align=True)
                 s.jumpline()
 
diff --git a/hysop/backend/device/codegen/kernels/stretching.py b/hysop/backend/device/codegen/kernels/stretching.py
index 202d1dc9c2fcaf395c3a6c61199521f748ece42e..901c03d136165f89d3d42bf077cae44979933dd6 100644
--- a/hysop/backend/device/codegen/kernels/stretching.py
+++ b/hysop/backend/device/codegen/kernels/stretching.py
@@ -178,7 +178,7 @@ if __name__ == '__main__':
             contexts[dev] = ctx
 
         tg = _test_typegen('float', 'dec')
-        for dev,ctx in contexts.iteritems():
+        for dev,ctx in contexts.items():
             ek = CachedStretchingKernel(typegen=tg, context=ctx, device=dev,
                     order=16, dim=1 ,ftype=tg.fbtype,
                     known_vars=dict(local_size=(1024,1,1)))
diff --git a/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_remesh.py b/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_remesh.py
index e4b373a0ae15b995a3ca364581dff7408e9363a7..729179140b34ebceb864974c791052a3dea4898e 100644
--- a/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_remesh.py
+++ b/hysop/backend/device/codegen/kernels/tests/test_codegen_directional_remesh.py
@@ -196,7 +196,7 @@ Allocating fields:
                     assert ref_values['dx'][0][0] == dx
 
                 dim_mesh_infos = mesh_infos.setdefault(dim, {})
-                for vname, field in all_fields[k].iteritems():
+                for vname, field in all_fields[k].items():
                     (values, mesh_info) = _test_mesh_info('{}_mesh_info'.format(vname),
                             typegen, dim, field.ghosts[:dim], field.grid_size[:dim])
                     if (dim==1):
@@ -211,11 +211,11 @@ Allocating fields:
                                         hostbuf=data.data[i]) for i in range(data.nb_components))
 
         host_buffers_init = dict(
-                no_ghosts   = {k:v.data for (k,v) in all_fields['no_ghosts'].iteritems()   },
-                with_ghosts = {k:v.data for (k,v) in all_fields['with_ghosts'].iteritems() })
+                no_ghosts   = {k:v.data for (k,v) in all_fields['no_ghosts'].items()   },
+                with_ghosts = {k:v.data for (k,v) in all_fields['with_ghosts'].items() })
         device_buffers = dict(
-                no_ghosts   = {k:clbuf(k,v) for (k,v) in all_fields['no_ghosts'].iteritems()   },
-                with_ghosts = {k:clbuf(k,v) for (k,v) in all_fields['with_ghosts'].iteritems() })
+                no_ghosts   = {k:clbuf(k,v) for (k,v) in all_fields['no_ghosts'].items()   },
+                with_ghosts = {k:clbuf(k,v) for (k,v) in all_fields['with_ghosts'].items() })
         host_buffers_reference = copy.deepcopy(host_buffers_init)
         host_buffers_gpu       = copy.deepcopy(host_buffers_init)
 
@@ -361,7 +361,7 @@ Allocating fields:
         MIN_GHOSTS = self.MAX_ADVEC + self.MAX_REMESH
         self.MIN_GHOSTS = MIN_GHOSTS
 
-        for target, fields in self.all_fields.iteritems():
+        for target, fields in self.all_fields.items():
             print('>Target {}'.format(target))
             host_init_buffers      = self.host_buffers_init[target]
             host_buffers_reference = self.host_buffers_reference[target]
@@ -547,7 +547,7 @@ _do_remesh_on_gpu_and_check()
         if (boundary != BoundaryCondition.NONE):
             raise RuntimeError('Unknown boundaty {}.'.format(boundary))
 
-        for target, fields in self.all_fields.iteritems():
+        for target, fields in self.all_fields.items():
             print('>Target {}'.format(target))
             mesh_infos             = self.all_mesh_infos[target][work_dim]
             device_buffers         = self.device_buffers[target]
@@ -560,7 +560,7 @@ _do_remesh_on_gpu_and_check()
             field_mesh_infos = {}
             field_offsets = {}
             field_strides = {}
-            for fname, field in fields.iteritems():
+            for fname, field in fields.items():
                 grid_size = field.grid_size
                 ghosts = field.ghosts
                 views = field.view(dim=work_dim)
diff --git a/hysop/backend/device/codegen/symbolic/functions/custom_symbolic_function.py b/hysop/backend/device/codegen/symbolic/functions/custom_symbolic_function.py
index f5db424fbce9e08c3f1283e6eb07798e11b5fb7d..20f969e6db0ce9148b26bc4770357711e8934ba5 100644
--- a/hysop/backend/device/codegen/symbolic/functions/custom_symbolic_function.py
+++ b/hysop/backend/device/codegen/symbolic/functions/custom_symbolic_function.py
@@ -42,7 +42,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
     @classmethod
     def array_name(cls, array):
         return cls.varname(array.varname)
-    
+
     @classmethod
     def default_out_of_bounds_value(cls, dtype):
         if is_complex(dtype):
@@ -50,7 +50,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
                 return '(float2)(NAN, NAN)'
             elif (dtype is npw.complex128):
                 return '(double2)(NAN, NAN)'
-            else: 
+            else:
                 msg='Unsupported complex dtype {}.'.format(dtype)
                 raise NotImplementedError(msg)
         elif is_fp(dtype):
@@ -58,7 +58,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             dval = 'NAN'
         elif is_unsigned(dtype):
             # uint max
-            dval = '0x'+ ''.join(('ff',)*npw.dtype(dtype).itemsize) 
+            dval = '0x'+ ''.join(('ff',)*npw.dtype(dtype).itemsize)
         elif is_signed(dtype):
             # int max
             dval = '0x7f' + ''.join(('ff',)*(npw.dtype(dtype).itemsize-1))
@@ -66,7 +66,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             msg='Dtype {} is not signed, unsigned or floating point...'.format(dtype)
             raise TypeError(msg)
         return dval
-    
+
     def __init__(self, csc, name, expr, target_ctype=None, inline=True, known_args=None):
         from hysop.backend.device.codegen.kernels.custom_symbolic import SymbolicCodegenContext
         check_instance(csc, SymbolicCodegenContext)
@@ -79,29 +79,29 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
         self.stencils = {}
         self.__fn_counter = 0
         self.known_args = known_args
-        
+
         pexpr = self.parse_expr(csc, name, expr, args, reqs)
-            
+
         if (target_ctype is not None) and (pexpr.ctype != target_ctype):
             pexpr = OpenClCastUtils.promote_to(pexpr, target_ctype)
 
         self.is_fcall = isinstance(pexpr, FunctionCall)
         self.pexpr = pexpr
 
-        super(CustomSymbolicFunction,self).__init__(basename=name, output=pexpr.ctype, args=args, 
+        super(CustomSymbolicFunction,self).__init__(basename=name, output=pexpr.ctype, args=args,
                 typegen=csc.typegen, inline=inline, known_args=known_args)
-        
+
 
         self.update_requirements(reqs)
-        
+
         self.gencode(csc, pexpr)
         self.ctype = pexpr.ctype
-    
+
     @classmethod
     def fmt_args(cls, args):
         new_args = ArgDict()
         argsmap = {}
-        for i, (argname, arg) in enumerate(args.iteritems()):
+        for i, (argname, arg) in enumerate(args.items()):
             new_argname = 'a{}'.format(i)
             new_arg = arg.newvar(name=new_argname)
             new_args[new_argname] = new_arg
@@ -110,16 +110,16 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
 
     def fmt_kwds(self, kwds):
         new_kwds = {}
-        for vname, val in kwds.iteritems():
+        for vname, val in kwds.items():
             new_kwds[self.argsmap[vname]] = val
         return new_kwds
-    
+
     def get_stencil_fn(self, csc, expr):
         from hysop.backend.device.codegen.symbolic.functions.apply_stencil import CustomApplyStencilFunction
         stencil = expr.stencil
         dummy_stencil_fn = CustomApplyStencilFunction(csc=csc, name='dummy', expr=expr, known_args=self.known_args)
         _hash = hash(str(dummy_stencil_fn))
-        if _hash in self.stencils: 
+        if _hash in self.stencils:
             stencil_fn = self.stencils[_hash]
         else:
             stencil_name = 'stencil_{}'.format(len(self.stencils))
@@ -134,9 +134,9 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
         fn_kwds = fn.args.copy()
         top_fn = fn
         if kwds:
-            s_fn_kwds = {k: '({})'.format(v) for (k,v) in fn_kwds.iteritems()}
+            s_fn_kwds = {k: '({})'.format(v) for (k,v) in fn_kwds.items()}
             fn_kwds = {}
-            for (argname, argval) in fcall.fn_kwds.iteritems():
+            for (argname, argval) in fcall.fn_kwds.items():
                 argval = str(argval)
                 for (an,av) in sorted(s_fn_kwds.items(), key=lambda x: len(x[0])):
                     argval.replace(an, av)
@@ -145,9 +145,9 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             fcall   = fn.pexpr
             ctype   = fn.ctype
             fn      = fcall.fn
-            s_fn_kwds = {k: '({})'.format(v) for (k,v) in fn_kwds.iteritems()}
+            s_fn_kwds = {k: '({})'.format(v) for (k,v) in fn_kwds.items()}
             fn_kwds = {}
-            for (argname, argval) in fcall.fn_kwds.iteritems():
+            for (argname, argval) in fcall.fn_kwds.items():
                 argval = str(argval)
                 for (an,av) in sorted(s_fn_kwds.items(), key=lambda x: len(x[0])):
                     argval.replace(an, av)
@@ -158,10 +158,10 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
         else:
             self.check_and_set(reqs, fn.name, fn)
             self.__fn_counter += 1
-        for argname, arg in top_fn.args.iteritems():
+        for argname, arg in top_fn.args.items():
             self.check_and_set(args, argname, arg)
         return fn, fn_kwds
-            
+
     @classmethod
     def check_and_set(cls, dic, key, value):
         if (key in dic):
@@ -198,7 +198,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             ctype = expr.parameter.ctype
             param = expr.parameter
             index = expr.idx
-            
+
             pname = param.name
             var = csc.param_args[pname]
 
@@ -224,13 +224,13 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
                 assert csc.local_size_known
                 size   = csc.array_size(field, index)
                 ghosts = csc.array_ghost(field, index)
-                vload = Vload(csc.typegen, var.ctype, csc.vectorization, default_val=dval, 
+                vload = Vload(csc.typegen, var.ctype, csc.vectorization, default_val=dval,
                         itype=csc.itype, restrict=True, storage=var.storage, known_args=dict(size=size))
                 if (vload.name in reqs):
                     vload = reqs[vload.name]
                 else:
                     reqs[vload.name] = vload
-                for argname, arg in vload.args.iteritems():
+                for argname, arg in vload.args.items():
                     if (argname == 'data'):
                         argname=var.name
                         arg=var
@@ -274,7 +274,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             ctype        = stencil_fn.ctype
             pexpr = FunctionCall(ctype, stencil_fn, stencil_kwds)
             self.check_and_set(reqs, stencil_fn.name, stencil_fn)
-            for argname, argval in stencil_fn.args.iteritems():
+            for argname, argval in stencil_fn.args.items():
                 self.check_and_set(args, argname, argval)
         elif isinstance(expr, WaveNumberIndex):
             expr = expr.real_index
@@ -284,7 +284,7 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             if (sname=='dx'):
                 args[csc.dx.name] = csc.dx
                 pexpr = OpenClVariable(csc.dx.ctype, csc.dx)
-            elif (expr in csc.space_symbols): 
+            elif (expr in csc.space_symbols):
                 xi = csc.space_symbols[expr]
                 self.check_and_set(args, xi.name, xi)
                 pexpr = OpenClVariable(xi.ctype, xi)
@@ -307,13 +307,13 @@ class CustomSymbolicFunction(OpenClFunctionCodeGenerator):
             msg='Unknown expression type {}.\n  __mro__ = {}, expr={}\n'
             msg=msg.format(type(expr), type(expr).__mro__, expr)
             raise NotImplementedError(msg)
-        
+
         if not isinstance(pexpr, (TypedI, CustomSymbolicFunction)):
             msg='Failed to parse the following expression:\n{}\n'.format(expr)
             msg+='Expanded expression is:\n{}\n'.format(pexpr)
             raise RuntimeError(msg)
         return pexpr
-    
+
     def gencode(self, csc, pexpr):
         with self._function_():
             printer = OpenClPrinter(csc.typegen, self)
diff --git a/hysop/backend/device/kernel_autotuner.py b/hysop/backend/device/kernel_autotuner.py
index e726d0ee3a0817f3b5e49914ce6cfd5391c42fc1..7bb7792d9d61e1fe51110e9cf8910080b787b9e3 100644
--- a/hysop/backend/device/kernel_autotuner.py
+++ b/hysop/backend/device/kernel_autotuner.py
@@ -572,7 +572,7 @@ class KernelAutotuner(object):
                     in zip(args_mapping.keys(), arg_indices))
             msg+='\nExpected contiguous integer argument positions.'
             raise ValueError(msg)
-        for (arg_name, arg_value) in kernel_args.iteritems():
+        for (arg_name, arg_value) in kernel_args.items():
             if (arg_name not in args_mapping):
                 msg='Unknown argument {}, valid ones are {}.'
                 msg=msg.format(arg_name, ', '.join(args_mapping.keys()))
@@ -802,7 +802,7 @@ class KernelAutotuner(object):
             id1 = self.indent(1)
             print('\n|> BEST OVERALL RESULT for kernel {}:'.format(self.name))
             print(id1+' => Extra params:')
-            for ep,val in best_extra_params.iteritems():
+            for ep,val in best_extra_params.items():
                 print(self.indent(2)+'*{}: {}'.format(ep, val))
             msg=id1+' => WL={} G={} L={}'
             msg=msg.format(best_work_load, best_global_size, best_local_size)
diff --git a/hysop/backend/device/kernel_autotuner_statistics.py b/hysop/backend/device/kernel_autotuner_statistics.py
index 1f086e2b00f60bc0b2c3838c5997c6a572477fc8..63cfb7bcc474046d33832d6cd3ca09105fe3666e 100644
--- a/hysop/backend/device/kernel_autotuner_statistics.py
+++ b/hysop/backend/device/kernel_autotuner_statistics.py
@@ -7,9 +7,9 @@ class AutotunedKernelStatistics(dict):
     class AutotunedParameterStatistics(dict):
         class AutotunedRunStatistics(object):
             def __init__(self,
-                        work_size, work_load, 
-                        local_work_size, global_work_size, 
-                        statistics, pruned, 
+                        work_size, work_load,
+                        local_work_size, global_work_size,
+                        statistics, pruned,
                         local_best, error):
                 self.work_size = work_size
                 self.work_load = work_load
@@ -22,7 +22,7 @@ class AutotunedKernelStatistics(dict):
             def good(self):
                 return (self.error is None)
         def __init__(self, extra_parameters,
-                       max_kernel_work_group_size=None, 
+                       max_kernel_work_group_size=None,
                        preferred_work_group_size_multiple=None):
             self.extra_parameters = extra_parameters
             self.max_kernel_work_group_size = max_kernel_work_group_size
@@ -91,10 +91,10 @@ class AutotunedKernelStatistics(dict):
 
     def collect_exec_times(self):
         run_times = ()
-        for (extra_param_hash, parameter_statistics) in self.iteritems():
+        for (extra_param_hash, parameter_statistics) in self.items():
             if not parameter_statistics.good():
                 continue
-            for (run_key, run_statistics) in parameter_statistics.iteritems():
+            for (run_key, run_statistics) in parameter_statistics.items():
                 if not run_statistics.good():
                     continue
                 run_time = run_statistics.statistics.mean
diff --git a/hysop/backend/device/opencl/autotunable_kernels/custom_symbolic.py b/hysop/backend/device/opencl/autotunable_kernels/custom_symbolic.py
index 240f4ede9867efe7f797c0f77edd1757a3748f9e..82d6e8f77bfd6aee16a9de6eb1fca06c8029eac3 100644
--- a/hysop/backend/device/opencl/autotunable_kernels/custom_symbolic.py
+++ b/hysop/backend/device/opencl/autotunable_kernels/custom_symbolic.py
@@ -89,13 +89,13 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
 
         name = expr_info.name
         for dfields in (expr_info.input_dfields, expr_info.output_dfields):
-            for (field, dfield) in dfields.iteritems():
+            for (field, dfield) in dfields.items():
                 if (dfield.compute_resolution != cshape).any():
                     msg='Resolution mismatch between discrete fields, '
                     msg+='got {} but cshape={}, cannot generate kernel.'
                     msg=msg.format(dfield.compute_resolution, cshape)
                     raise ValueError(msg)
-        for (field, dfield) in expr_info.input_dfields.iteritems():
+        for (field, dfield) in expr_info.input_dfields.items():
             if (dfield.dfield.ghosts < expr_info.min_ghosts[field]).any():
                 msg='Min ghosts condition not met for discrete field {}:\n'
                 msg+=' expected {} but got only {}.'
@@ -135,7 +135,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
         # read-only input fields (opencl buffers can be marked read-only in discrete field views)
         # read-only input arrays and input buffers
         di = expr_info.discretization_info
-        for (obj, counts) in self.sort_key_by_name(di.read_counter.iteritems()):
+        for (obj, counts) in self.sort_key_by_name(di.read_counter.items()):
             if isinstance(obj, di.IndexedCounterTypes):
                 assert isinstance(obj, DiscreteScalarFieldView)
                 dfield = expr_info.input_dfields[obj._field]
@@ -169,7 +169,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
             has_complex |= is_complex(obj.dtype)
 
         # output fields, output arrays (output buffers are not supported yet)
-        for (obj, counts) in self.sort_key_by_name(di.write_counter.iteritems()):
+        for (obj, counts) in self.sort_key_by_name(di.write_counter.items()):
             if isinstance(obj, di.IndexedCounterTypes):
                 assert isinstance(obj, DiscreteScalarFieldView)
                 dfield = expr_info.output_dfields[obj._field]
@@ -200,7 +200,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
 
 
         # read-only input parameters
-        for (pname, param) in expr_info.input_params.iteritems():
+        for (pname, param) in expr_info.input_params.items():
             if (pname in expr_info.output_params):
                 continue
             has_complex |= is_complex(param.dtype)
@@ -215,7 +215,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
                 assert make_param().dtype == param_dtype
 
         # output parameters
-        for (pname, param) in expr_info.output_params.iteritems():
+        for (pname, param) in expr_info.output_params.items():
             has_complex |= is_complex(param.dtype)
             if param.const:
                 msg='A constant parameter cannot be set as output parameter.'
@@ -263,7 +263,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
 
         # read-only input fields, input arrays, input buffers
         di = expr_info.discretization_info
-        for (obj, counts) in self.sort_key_by_name(di.read_counter.iteritems()):
+        for (obj, counts) in self.sort_key_by_name(di.read_counter.items()):
             if isinstance(obj, di.IndexedCounterTypes):
                 assert isinstance(obj, DiscreteScalarFieldView)
                 dfield = obj
@@ -299,7 +299,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
                 raise TypeError(msg)
 
         # output fields, arrays
-        for (obj, counts) in self.sort_key_by_name(di.write_counter.iteritems()):
+        for (obj, counts) in self.sort_key_by_name(di.write_counter.items()):
             if isinstance(obj, di.IndexedCounterTypes):
                 assert isinstance(obj, DiscreteScalarFieldView)
                 dfield = obj
@@ -332,14 +332,14 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
                 raise TypeError(msg)
 
         # read-only input parameters
-        for (pname, param) in expr_info.input_params.iteritems():
+        for (pname, param) in expr_info.input_params.items():
             if (pname in expr_info.output_params) or param.const:
                 continue
             args_mapping[pname] = (arg_index, parameter_dtypes[pname])
             arg_index += 1
 
         # output parameters
-        for (pname, param) in expr_info.output_params.iteritems():
+        for (pname, param) in expr_info.output_params.items():
             assert not param.const
             args_mapping[pname] = (arg_index, cl.MemoryObjectHolder)
             arg_index += 1
@@ -497,7 +497,7 @@ class OpenClAutotunableCustomSymbolicKernel(OpenClAutotunableKernel):
 
         # return a method to update non constant input parameters
         def update_input_parameters():
-            return { pname:pmake() for (pname, pmake) in parameter_make.iteritems() }
+            return { pname:pmake() for (pname, pmake) in parameter_make.items() }
 
         return (kernel, args_dict, update_input_parameters)
 
diff --git a/hysop/backend/device/opencl/opencl_array_backend.py b/hysop/backend/device/opencl/opencl_array_backend.py
index 7e5a6b3a765e5d7c3c47976e6cb2170af2cb218a..bd4e9c3bda3905f3b77a73ae3a5752acd43e36d6 100644
--- a/hysop/backend/device/opencl/opencl_array_backend.py
+++ b/hysop/backend/device/opencl/opencl_array_backend.py
@@ -615,7 +615,7 @@ class OpenClArrayBackend(ArrayBackend):
                 affectation = '{}\[i\]\s*=\s*([\s\S]*?)\s*$'.format(argname)
                 affectation = re.compile(affectation)
             is_cast = (argcast[i] is not None)
-            for (k,v) in filter_expr.iteritems():
+            for (k,v) in filter_expr.items():
                 expr = filter_expr[k]
                 if expr is None:
                     continue
@@ -998,7 +998,7 @@ class OpenClArrayBackend(ArrayBackend):
                 return out
             def dic2str(dic):
                 entries=[]
-                for k,v in dic.iteritems():
+                for k,v in dic.items():
                     e = '{}: {}'.format(k, val2str(v))
                     entries.append(e)
                 return '\n\t'+'\n\t'.join(entries)
diff --git a/hysop/backend/device/opencl/opencl_autotunable_kernel.py b/hysop/backend/device/opencl/opencl_autotunable_kernel.py
index 934e576cc4e09e3ad74b402dd68e80d2e1e3a2ac..8c0f8046ba79a230925e186423f0a6d12379bd10 100644
--- a/hysop/backend/device/opencl/opencl_autotunable_kernel.py
+++ b/hysop/backend/device/opencl/opencl_autotunable_kernel.py
@@ -540,7 +540,7 @@ class OpenClAutotunableKernel(AutotunableKernel):
 
     def build_array_args(self, hardcode_arrays=False, **arrays):
         kernel_args = {}
-        for name, data in arrays.iteritems():
+        for name, data in arrays.items():
             check_instance(data, (OpenClArray, clArray.Array))
             base = '{}_base'.format(name)
             kernel_args[base] = data.base_data
diff --git a/hysop/backend/device/opencl/opencl_hardware_backend.py b/hysop/backend/device/opencl/opencl_hardware_backend.py
index 6f4cd5f9ce2cce45d13de9872170b1a3294064d8..60eb844b5b1893885f8bea741ffe7530dec3e09e 100644
--- a/hysop/backend/device/opencl/opencl_hardware_backend.py
+++ b/hysop/backend/device/opencl/opencl_hardware_backend.py
@@ -22,17 +22,17 @@ class OpenClBackend(HardwareBackend):
                                  platform_id=i,
                                  platform_handle=p)
             self._platforms[i] = plat
-    
+
     def to_string(self, indent=0, increment=2):
         new_indent = indent + increment
-        platforms = '\n'.join(x.to_string(indent=new_indent, increment=increment) 
+        platforms = '\n'.join(x.to_string(indent=new_indent, increment=increment)
                                         for x in self.platforms.values())
         ss=\
 '''
 {ind}::OpenClBackend::
 {ind}{inc}Data collected using pyopencl {} compiled against OpenCL headers v{}.
 
-{}'''.format(self._version_text, self._cl_header_version_text, platforms, 
+{}'''.format(self._version_text, self._cl_header_version_text, platforms,
         ind=' '*indent, inc=' '*increment)
         return ss
 
@@ -43,13 +43,13 @@ class OpenClBackend(HardwareBackend):
         return self.to_string()
 
 
-class OpenClBackendStatistics(HardwareStatistics): 
+class OpenClBackendStatistics(HardwareStatistics):
 
     def __init__(self, backend=None):
         self._platform_statistics = {}
         if (backend is not None):
             check_instance(backend, OpenClBackend)
-            for (pid,platform) in backend._platforms.iteritems():
+            for (pid,platform) in backend._platforms.items():
                 self._platform_statistics[platform._name] = platform.stats()
 
     @property
@@ -65,7 +65,7 @@ class OpenClBackendStatistics(HardwareStatistics):
             msg='Unknown type {}, expected OpenClBackend or OpenClBackendStatistics.'
             msg=msg.format(type(other))
             raise TypeError(msg)
-        for (pname, pstats) in other._platform_statistics.iteritems():
+        for (pname, pstats) in other._platform_statistics.items():
             if (pname in self._platform_statistics):
                 self._platform_statistics[pname] += pstats
             else:
@@ -76,7 +76,7 @@ class OpenClBackendStatistics(HardwareStatistics):
         ind = ' '*indent
         inc = ' '*increment
         ss = []
-        for (pname,plat) in self._platform_statistics.iteritems():
+        for (pname,plat) in self._platform_statistics.items():
             ss += ['{{ind}}Platform {}:'.format(pname)]
             ss += [plat.to_string(indent+increment, increment)]
         return '\n'.join(s.format(ind=ind, inc=inc) for s in ss)
diff --git a/hysop/backend/device/opencl/opencl_kernel.py b/hysop/backend/device/opencl/opencl_kernel.py
index aeb4477b3ce26581628173be6c16a0599eae426b..5516aed300b77f2167f73a60ec96652805de87dc 100644
--- a/hysop/backend/device/opencl/opencl_kernel.py
+++ b/hysop/backend/device/opencl/opencl_kernel.py
@@ -102,7 +102,7 @@ class OpenClKernel(object):
                 msg=msg.format(argname,
                         ', '.join('\'{}\''.format(a) for a in args_mapping.keys()))
                 raise ValueError(msg)
-        for argname, (argpos, argtype) in args_mapping.iteritems():
+        for argname, (argpos, argtype) in args_mapping.items():
             assert isinstance(argpos, int)
             if not isinstance(argtype, (type, npw.dtype)):
                 check_instance(argtype, tuple, values=type)
@@ -183,14 +183,14 @@ class OpenClKernel(object):
         args_mapping = self.args_mapping
         nargs        = len(args_mapping)
 
-        arguments = { k:w for (k,w) in default_args.iteritems() }
+        arguments = { k:w for (k,w) in default_args.items() }
         arguments.update(kwds)
 
-        parameters_map = {k:v for (k,v) in args_mapping.iteritems() if (k not in arguments)}
+        parameters_map = {k:v for (k,v) in args_mapping.items() if (k not in arguments)}
         iterated_parameters = {}
 
         args_list = {}
-        for (arg_name, arg_value) in arguments.iteritems():
+        for (arg_name, arg_value) in arguments.items():
             if (arg_name not in args_mapping):
                 msg='Unknown argument {}, valid ones are {}.'
                 msg=msg.format(arg_name, ', '.join(args_mapping.keys()))
diff --git a/hysop/backend/device/opencl/opencl_kernel_launcher.py b/hysop/backend/device/opencl/opencl_kernel_launcher.py
index 0d11ff695ff01018dee36e75df7bed9f97fdec0b..0d203fc6377340fc907a8610d471512945852782 100644
--- a/hysop/backend/device/opencl/opencl_kernel_launcher.py
+++ b/hysop/backend/device/opencl/opencl_kernel_launcher.py
@@ -131,7 +131,7 @@ class OpenClKernelListLauncher(object):
                     msg=msg.format(launcher.name)
                     raise RuntimeError(msg)
                 if isinstance(launcher, OpenClParametrizedKernelLauncher):
-                    parameters = {k: v[1] for (k,v) in launcher.parameters_map.iteritems()}
+                    parameters = {k: v[1] for (k,v) in launcher.parameters_map.items()}
                     self._update_parameters_from_parametrized_kernel(launcher, parameters)
                 elif isinstance(launcher, HostLauncherI):
                     parameters = launcher.parameters()
@@ -195,7 +195,7 @@ class OpenClKernelListLauncher(object):
         check_instance(kernel, (OpenClParametrizedKernelLauncher, HostLauncherI))
         check_instance(parameters, dict, keys=str, values=(type,npw.dtype))
         sparameters = self._parameters
-        for (pname, ptype) in parameters.iteritems():
+        for (pname, ptype) in parameters.items():
             if pname in sparameters:
                 (stype, op_names) = sparameters[pname]
                 if (stype != ptype):
@@ -213,7 +213,7 @@ class OpenClKernelListLauncher(object):
         check_instance(kernel_list_launcher, OpenClKernelListLauncher)
         parameters = kernel_list_launcher._parameters
         sparameters = self._parameters
-        for (pname, (ptype,knames)) in parameters.iteritems():
+        for (pname, (ptype,knames)) in parameters.items():
             if pname in sparameters:
                 (stype, op_names) = sparameters[pname]
                 if (stype != ptype):
@@ -585,7 +585,7 @@ class OpenClParametrizedKernelLauncher(OpenClKernelLauncher):
     def _set_kernel_args(self, **kwds):
         """Set the arguments of this kernel and return the kernel."""
         kernel = super(OpenClParametrizedKernelLauncher, self)._set_kernel_args()
-        for pname, (pindex, ptypes) in self._parameters_map.iteritems():
+        for pname, (pindex, ptypes) in self._parameters_map.items():
             assert pname in kwds, '{} was not given.'.format(pname)
             pval = kwds[pname]
             self.check_kernel_arg(pval, pindex, pname, ptypes)
@@ -667,7 +667,7 @@ class OpenClIterativeKernelLauncher(OpenClParametrizedKernelLauncher):
         iterated_parameter_arg_names = ()
         iterated_parameter_arg_types = ()
         iterated_parameter_generators = ()
-        for pname, pgen in iterated_parameters.iteritems():
+        for pname, pgen in iterated_parameters.items():
             assert pname in parameters_map
             arg_id, arg_type = parameters_map.pop(pname)
             iterated_parameter_arg_ids += (arg_id,)
diff --git a/hysop/backend/device/opencl/opencl_operator.py b/hysop/backend/device/opencl/opencl_operator.py
index f72cd70960ee9a3537837075e4004cf9ee6fccba..b88796c7b012d79a6338f8bc5ae68514950a3ea8 100644
--- a/hysop/backend/device/opencl/opencl_operator.py
+++ b/hysop/backend/device/opencl/opencl_operator.py
@@ -2,7 +2,7 @@
 discrete operators working on the OpenCl backend.
 
 * :class:`~hysop.backend.device.opencl.opencl_operator.OpenClOperator` is an abstract class
-    used to provide a common interface to all discrete operators working on the 
+    used to provide a common interface to all discrete operators working on the
     opencl backend.
 """
 from abc import ABCMeta
@@ -26,15 +26,15 @@ class OpenClOperator(ComputationalGraphOperator):
     Abstract class for discrete operators working on OpenCL backends.
     """
     __metaclass__ = ABCMeta
-    
+
     __default_method = {
             OpenClKernelConfig: OpenClKernelConfig()
         }
-    
+
     __available_methods = {
         OpenClKernelConfig: InstanceOf(OpenClKernelConfig)
     }
-    
+
     @classmethod
     def default_method(cls):
         dm = super(OpenClOperator, cls).default_method()
@@ -48,7 +48,7 @@ class OpenClOperator(ComputationalGraphOperator):
         return am
 
     @debug
-    def __init__(self, cl_env=None, mpi_params=None, 
+    def __init__(self, cl_env=None, mpi_params=None,
             requested_symbolic_kernels=None, **kwds):
         """
         Create the common attributes of all OpenCL operators.
@@ -64,14 +64,14 @@ class OpenClOperator(ComputationalGraphOperator):
         Notes
         -----
         About method keys:
-            OpenClKernelConfig: user build options, defines, precision 
+            OpenClKernelConfig: user build options, defines, precision
                                 and autotuner configuration
         """
         check_instance(cl_env, OpenClEnvironment, allow_none=True)
         check_instance(mpi_params, MPIParams, allow_none=True)
-        
+
         msg='mpi_params was {} and cl_env was {}.'
-            
+
         for topo in kwds.get('input_fields',{}).values()+kwds.get('output_fields',{}).values():
             if isinstance(topo, Topology):
                 if (cl_env is None):
@@ -100,15 +100,15 @@ class OpenClOperator(ComputationalGraphOperator):
             else:
                 cl_env = cl_env
                 msg=msg.format('given', 'given')
-        
+
         super(OpenClOperator, self).__init__(mpi_params=mpi_params, **kwds)
         self.cl_env = cl_env
-        
+
         if (cl_env.mpi_params != self.mpi_params):
             msg0='MPI Communicators do not match between OpenClEnvironment and MPIParams.'
             msg0+='\n  => {}'.format(msg)
             raise RuntimeError(msg0)
-    
+
     @classmethod
     def supported_backends(cls):
         """
@@ -123,12 +123,12 @@ class OpenClOperator(ComputationalGraphOperator):
         """
         from hysop.backend.device.opencl.opencl_tools import convert_precision
         super(OpenClOperator,self).handle_method(method)
-        
+
         assert OpenClKernelConfig in method
 
         kernel_config = method.pop(OpenClKernelConfig)
         autotuner_config = kernel_config.autotuner_config
-        
+
         precision = kernel_config.precision
         float_dump_mode = kernel_config.float_dump_mode
         use_short_circuit_ops = kernel_config.use_short_circuit_ops
@@ -144,32 +144,32 @@ class OpenClOperator(ComputationalGraphOperator):
                 precision = Precision.HALF
             else:
                 precision = Precision.DEFAULT
-        
+
         if precision in [Precision.LONG_DOUBLE, Precision.QUAD]:
             msg='Precision {} is not supported yet for OpenCl environment.'
             msg=msg.format(precision)
             raise NotImplementedError(msg)
-   
-        self.typegen = self.cl_env.build_typegen(precision=precision, 
+
+        self.typegen = self.cl_env.build_typegen(precision=precision,
                 float_dump_mode=float_dump_mode,
-                use_short_circuit_ops=use_short_circuit_ops, 
+                use_short_circuit_ops=use_short_circuit_ops,
                 unroll_loops=unroll_loops)
         self.autotuner_config = autotuner_config
         self.kernel_config = kernel_config
         self.precision = convert_precision(precision)
-        
+
         self._initialize_cl_build_options(kernel_config.user_build_options)
         self._initialize_cl_size_constants(kernel_config.user_size_constants)
         self._initialize_kernel_generator()
-        
+
     def check(self):
         super(OpenClOperator, self).check()
         self._check_cl_env()
-        
+
     @debug
-    def create_topology_descriptors(self): 
-        # by default we create OPENCL (gpu) TopologyDescriptors 
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+    def create_topology_descriptors(self):
+        # by default we create OPENCL (gpu) TopologyDescriptors
+        for (field, topo_descriptor) in self.input_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                     backend=Backend.OPENCL,
                     operator=self,
@@ -178,7 +178,7 @@ class OpenClOperator(ComputationalGraphOperator):
                     cl_env=self.cl_env)
             self.input_fields[field] = topo_descriptor
 
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                     backend=Backend.OPENCL,
                     operator=self,
@@ -194,7 +194,7 @@ class OpenClOperator(ComputationalGraphOperator):
         topology requirements are:
             1) min and max ghosts for each input and output variables
             2) allowed splitting directions for cartesian topologies
-            3) required local and global transposition state, if any. 
+            3) required local and global transposition state, if any.
             and more
         they are stored in self.input_field_requirements and
         self.output_field_requirements.
@@ -214,13 +214,13 @@ class OpenClOperator(ComputationalGraphOperator):
             req.memory_order = MemoryOrdering.C_CONTIGUOUS
 
         return requirements
-    
+
     @debug
     def discretize(self):
         if self.discretized:
             return
         super(OpenClOperator, self).discretize()
-    
+
     @debug
     def setup(self, work):
         super(OpenClOperator, self).setup(work)
@@ -245,12 +245,12 @@ class OpenClOperator(ComputationalGraphOperator):
 
     def build_options(self):
         """
-        Build and return opencl build option string from 
+        Build and return opencl build option string from
         self._cl_build_options and self._cl_defines.
         """
         build_options = self._cl_build_options
         defines = set()
-        for define,value in self._cl_defines.iteritems():
+        for define,value in self._cl_defines.items():
             if (value is not None):
                 define = '{}={}'.format(define.strip(), value.strip())
             else:
@@ -262,14 +262,14 @@ class OpenClOperator(ComputationalGraphOperator):
     @debug
     def _check_cl_env(self):
         """
-        Check if all topologies are on OpenCL backend and check that all opencl environments 
+        Check if all topologies are on OpenCL backend and check that all opencl environments
         match.
         """
         topo = (self.input_fields.values()+self.output_fields.values())[0]
         assert isinstance(topo, TopologyView)
         assert topo.backend.kind == Backend.OPENCL
         ref_env = self.cl_env
-        
+
         for topo in set(self.input_fields.values()+self.output_fields.values()):
             assert isinstance(topo, TopologyView)
             assert topo.backend.kind == Backend.OPENCL
@@ -308,9 +308,9 @@ class OpenClOperator(ComputationalGraphOperator):
         """
         from hysop.backend.device.opencl.opencl_elementwise import OpenClElementwiseKernelGenerator
         self.elementwise_kernel_generator = OpenClElementwiseKernelGenerator(
-                cl_env=self.cl_env, 
+                cl_env=self.cl_env,
                 kernel_config=self.kernel_config)
-   
+
     @classmethod
     def supports_multiple_topologies(cls):
         return True
diff --git a/hysop/backend/device/opencl/opencl_platform.py b/hysop/backend/device/opencl/opencl_platform.py
index 89bcc705b2a4115a7936f7979821bc341f14aecf..78e20228c6b1bdf9ba328813a7f32a15783a2152 100644
--- a/hysop/backend/device/opencl/opencl_platform.py
+++ b/hysop/backend/device/opencl/opencl_platform.py
@@ -24,19 +24,19 @@ class OpenClPlatform(Platform):
         # we do not keep a reference to platform_handle as we
         # need to pickle this object
 
-        super(OpenClPlatform, self).__init__(hardware_topo=hardware_topo, 
+        super(OpenClPlatform, self).__init__(hardware_topo=hardware_topo,
                 platform_id=platform_id, platform_handle=platform_handle, **kwds)
 
     @classmethod
     def handle_cls(cls):
         return cl.Platform
-    
+
     def _discover_devices(self, hardware_topo, platform_handle):
         for i, device_handle in enumerate(platform_handle.get_devices()):
             dev = OpenClDevice(platform=self,
                                platform_handle=platform_handle,
                                device_id=i,
-                               device_handle=device_handle, 
+                               device_handle=device_handle,
                                hardware_topo=hardware_topo)
             self._logical_devices[i] = dev
 
@@ -45,7 +45,7 @@ class OpenClPlatform(Platform):
         ind=' '*indent
         inc=' '*increment
         new_indent = indent + 2*increment
-        devices = '\n'.join(x.to_string(indent=new_indent, increment=increment, short=True) 
+        devices = '\n'.join(x.to_string(indent=new_indent, increment=increment, short=True)
                                         for x in self.logical_devices.values())
         sep = '\n{ind}{inc}{inc}{inc}|'.format(ind=ind, inc=inc)
         extensions = sep + sep.join(e.strip() for e in sorted(self._extensions.split(' ')) if e not in ('', ' ', '\t', '\n'))
@@ -54,7 +54,7 @@ class OpenClPlatform(Platform):
 {ind}{inc}*Profile: {}
 {}
 {ind}{inc}*Extensions: {}
-'''.format(self._platform_id, self._name, self._version, self._profile, devices, extensions, 
+'''.format(self._platform_id, self._name, self._version, self._profile, devices, extensions,
             ind=ind+inc, inc=inc)
         return ss
 
@@ -65,7 +65,7 @@ class OpenClPlatform(Platform):
         return OpenClPlatformStatistics(self)
 
 
-class OpenClPlatformStatistics(HardwareStatistics): 
+class OpenClPlatformStatistics(HardwareStatistics):
     def __init__(self, platform=None):
         self._name = None
         self._counter = 0
@@ -74,7 +74,7 @@ class OpenClPlatformStatistics(HardwareStatistics):
             check_instance(platform, OpenClPlatform)
             self._name = platform._name
             self._counter += 1
-            for (device_id,device) in platform._logical_devices.iteritems():
+            for (device_id,device) in platform._logical_devices.items():
                 self._device_statistics[device._name] = device.stats()
 
     @property
@@ -95,7 +95,7 @@ class OpenClPlatformStatistics(HardwareStatistics):
             self._name = other._name
         assert self._name == other._name
         self._counter += other._counter
-        for (dname, dstats) in other._device_statistics.iteritems():
+        for (dname, dstats) in other._device_statistics.items():
             if (dname in self._device_statistics):
                 self._device_statistics[dname] += dstats
             else:
@@ -107,11 +107,11 @@ class OpenClPlatformStatistics(HardwareStatistics):
         inc = ' '*increment
         if self._device_statistics:
             ss = []
-            for dname in sorted(self._device_statistics, 
+            for dname in sorted(self._device_statistics,
                     key=lambda k: -self._device_statistics[k]._counter):
                 dstats = self._device_statistics[dname]
                 ss += [dstats.to_string(indent+increment, increment)]
         else:
             ss += ['{ind}{inc}No device found.']
         return '\n'.join(s.format(ind=ind, inc=inc) for s in ss)
-    
+
diff --git a/hysop/backend/device/opencl/opencl_symbolic.py b/hysop/backend/device/opencl/opencl_symbolic.py
index 9c2a93d9001da31d2c4b313f147efb88c2515899..a57ec6a0c54ce7eb2442ebcd146c6ed0f6f0911d 100644
--- a/hysop/backend/device/opencl/opencl_symbolic.py
+++ b/hysop/backend/device/opencl/opencl_symbolic.py
@@ -172,7 +172,7 @@ class OpenClSymbolic(OpenClOperator):
         check_instance(output_tensor_fields, tuple, values=Field)
 
         def _cmp(name, op_vars, expr_vars, exprs):
-            for (expr_var_key, expr_var_value) in expr_vars.iteritems():
+            for (expr_var_key, expr_var_value) in expr_vars.items():
                 vname = expr_var_key if isinstance(expr_var_key, str) else expr_var_key.name
                 if (expr_var_key not in op_vars):
                     msg='{} has not been set as {} in {}.__init__() but is required '
@@ -190,7 +190,7 @@ class OpenClSymbolic(OpenClOperator):
         variables = input_fields.copy()
         variables.update(output_fields)
         direction = None
-        for (name, exprs) in self.expressions.iteritems():
+        for (name, exprs) in self.expressions.items():
             expr_info = SymbolicExpressionParser.parse(name, variables, *exprs)
             if expr_info.has_direction:
                 if (direction is None) :
@@ -279,11 +279,11 @@ class OpenClSymbolic(OpenClOperator):
                         min_ghosts_per_components[field] = min_ghosts
 
             expr_info.min_ghosts = {k:npw.max(v, axis=0) for (k,v) in \
-                                            min_ghosts_per_components.iteritems()}
+                                            min_ghosts_per_components.items()}
             expr_info.min_ghosts_per_components = {field:gpc[:,-1-direction]
-                                for (field,gpc) in min_ghosts_per_components.iteritems()}
+                                for (field,gpc) in min_ghosts_per_components.items()}
 
-            for (array, reqs) in array_reqs.iteritems():
+            for (array, reqs) in array_reqs.items():
                 expr_info.min_ghosts[array] = reqs.min_ghosts.copy()
                 expr_info.min_ghosts_per_components[array] = reqs.min_ghosts[-1-direction]
         return requirements
@@ -320,7 +320,7 @@ class OpenClSymbolic(OpenClOperator):
         kernel_autotuner = OpenClAutotunableCustomSymbolicKernel(cl_env=cl_env, typegen=typegen,
                 build_opts=build_opts, autotuner_config=autotuner_config)
 
-        for (name, expr_info) in self.expr_infos.iteritems():
+        for (name, expr_info) in self.expr_infos.items():
             kernel, args_dict, update_input_parameters = \
                     kernel_autotuner.autotune(expr_info=expr_info, **self.extra_kwds[name])
             kl = kernel.build_launcher(**args_dict)
diff --git a/hysop/backend/device/opencl/operator/derivative.py b/hysop/backend/device/opencl/operator/derivative.py
index 222811aecf57f4de378258b2ee1f8620cc7c3943..7612f7db3bb7b86e69199f020193db0996081be3 100644
--- a/hysop/backend/device/opencl/operator/derivative.py
+++ b/hysop/backend/device/opencl/operator/derivative.py
@@ -20,12 +20,12 @@ from hysop.operator.base.custom_symbolic_operator import SymbolicExpressionParse
 from hysop.symbolic.relational import Assignment
 
 
-class OpenClFiniteDifferencesSpaceDerivative(FiniteDifferencesSpaceDerivativeBase, 
+class OpenClFiniteDifferencesSpaceDerivative(FiniteDifferencesSpaceDerivativeBase,
                                              OpenClSymbolic):
     @debug
     def __init__(self, **kwds):
         super(OpenClFiniteDifferencesSpaceDerivative, self).__init__(require_tmp=False, **kwds)
-        
+
         Fin  = self.Fin.s()
         Fout = self.Fout.s()
         A = self.A
@@ -39,12 +39,12 @@ class OpenClFiniteDifferencesSpaceDerivative(FiniteDifferencesSpaceDerivativeBas
         xd = space_symbols[self.direction]
         expr = Assignment(Fout, A*Fin.diff(xd, self.directional_derivative))
         self.require_symbolic_kernel('compute_derivative', expr)
-    
+
     @debug
     def setup(self, work):
         super(OpenClFiniteDifferencesSpaceDerivative, self).setup(work)
         (self.kernel, self.update_parameters) = self.symbolic_kernels['compute_derivative']
-    
+
     @op_apply
     def apply(self, **kwds):
         queue = self.cl_env.default_queue
@@ -57,13 +57,13 @@ class OpenClSpectralSpaceDerivative(SpectralSpaceDerivativeBase, OpenClSymbolic)
     Compute a derivative of a scalar field in a given direction
     using spectral methods.
     """
-    
+
     @debug
     def __init__(self, **kwds):
         """
         Initialize a SpectralSpaceDerivative operator on the opencl backend.
 
-        See hysop.operator.base.derivative.SpectralSpaceDerivativeBase for 
+        See hysop.operator.base.derivative.SpectralSpaceDerivativeBase for
         more information.
 
         Parameters
@@ -93,10 +93,10 @@ class OpenClSpectralSpaceDerivative(SpectralSpaceDerivativeBase, OpenClSymbolic)
             self._do_scale = True
         else:
             self._do_scale = False
-        
+
         Kr = 1
         Kc = None
-        for (wn, indexed_wn) in self.tg._indexed_wave_numbers.iteritems():
+        for (wn, indexed_wn) in self.tg._indexed_wave_numbers.items():
             if wn.is_real:
                 Kr *= indexed_wn
             else:
@@ -118,7 +118,7 @@ class OpenClSpectralSpaceDerivative(SpectralSpaceDerivativeBase, OpenClSymbolic)
     @debug
     def discretize(self, **kwds):
         super(OpenClSpectralSpaceDerivative, self).discretize(**kwds)
-    
+
     @debug
     def setup(self, work):
         super(OpenClSpectralSpaceDerivative, self).setup(work=work)
@@ -126,10 +126,10 @@ class OpenClSpectralSpaceDerivative(SpectralSpaceDerivativeBase, OpenClSymbolic)
             (self.scale_kernel, self.scale_update_parameters) = self.symbolic_kernels['scale_derivative']
         else:
             self.scale_derivative_kernel = lambda **kwds: None
-            self.scale_update_parameters = lambda: {} 
+            self.scale_update_parameters = lambda: {}
         self.compute_derivative_kernel, _ = self.symbolic_kernels['compute_derivative']
 
-    
+
     @op_apply
     def apply(self, **kwds):
         queue = self.cl_env.default_queue
diff --git a/hysop/backend/device/opencl/operator/external_force.py b/hysop/backend/device/opencl/operator/external_force.py
index 6bbce34dbd2f6df4a2f8bd5beba1b8b1c810efbd..f5e66c6df979c2ffac9b196ae808e837a68321f8 100644
--- a/hysop/backend/device/opencl/operator/external_force.py
+++ b/hysop/backend/device/opencl/operator/external_force.py
@@ -28,7 +28,7 @@ class OpenClSpectralExternalForce(SpectralExternalForceOperatorBase, OpenClSymbo
     """
     Operator to compute the curl of a symbolic expression.
     """
-    
+
     @debug
     def __init__(self, Fext, **kwds):
         check_instance(Fext, SymbolicExternalForce)
@@ -41,7 +41,7 @@ class SymbolicExternalForce(ExternalForce):
         """
         Specify an external force as a tuple of symbolic expressions.
 
-        2D ExternalForce example:  
+        2D ExternalForce example:
             1) Fext = -rho*g*e_y where rho is a field and g a constant
                 Fext = (0, -rho.s()*g)
             2) Fext = (Rs*S+C)*e_y
@@ -61,10 +61,10 @@ class SymbolicExternalForce(ExternalForce):
         Fext = tuple(Fext)
 
         super(SymbolicExternalForce, self).__init__(name=name, dim=dim, Fext=Fext, **kwds)
-        
+
         diffusion = first_not_None(diffusion, {})
-        diffusion = {k:v for (k,v) in diffusion.iteritems() if (v is not None)}
-        for (k,v) in diffusion.iteritems():
+        diffusion = {k:v for (k,v) in diffusion.items() if (v is not None)}
+        for (k,v) in diffusion.items():
             assert k in self.input_fields(), k.short_description()
             assert isinstance(v, ScalarParameter)
         self._diffusion = diffusion
@@ -79,7 +79,7 @@ class SymbolicExternalForce(ExternalForce):
         return p0.union(p1)
     def output_params(self):
         return set()
-    
+
     def initialize(self, op):
         tg = op.new_transform_group()
         fft_fields = tuple(self.input_fields())
@@ -111,12 +111,12 @@ class SymbolicExternalForce(ExternalForce):
                 frame = replace.values()[0].frame
                 e = e.xreplace(replace)
             fft_expressions += (e,)
-        
+
         if (frame is None):
             msg='Could not extract frame from expressions.'
             raise RuntimeError(frame)
         fft_expressions = to_tuple(curl(fft_expressions, frame))
-        
+
         self.tg = tg
         self.forward_transforms  = forward_transforms
         self.backward_transforms = backward_transforms
@@ -128,7 +128,7 @@ class SymbolicExternalForce(ExternalForce):
         dts  = op.dt.s
         forces = op.force.s()
         diffusion_kernels = {}
-        for (f, nu) in self.diffusion.iteritems():
+        for (f, nu) in self.diffusion.items():
             nus = nu.s
             kname = 'filter_diffusion_{}d_{}'.format(f.dim, f.name)
             Ft = self.forward_transforms[f]
@@ -141,23 +141,23 @@ class SymbolicExternalForce(ExternalForce):
             expr = Assignment(Fs, Fs / (1 - nus*dts*E))
             op.require_symbolic_kernel(kname, expr)
             diffusion_kernels[f] = kname
-        
+
         force_kernels = ()
         vorticity_kernels = ()
         assert len(op.vorticity.fields)==len(op.force.fields)==len(self.fft_expressions)
         for (Fi,Wi,e) in zip(
                 op.force.fields,
-                op.vorticity.fields, 
+                op.vorticity.fields,
                 self.fft_expressions):
             if (e==0):
                 force_kernels     += (None,)
                 vorticity_kernels += (None,)
                 continue
-            
+
             Fi_hat = self.force_backward_transforms[Fi]
             Fi_buf = Fi_hat.input_symbolic_array('{}_hat'.format(Fi.name))
             Wn     = self.tg.push_expressions(Assignment(Fi_hat, e))
-            
+
             msg='Could not extract transforms.'
             try:
                 transforms = e.atoms(AppliedSpectralTransform)
@@ -165,9 +165,9 @@ class SymbolicExternalForce(ExternalForce):
                 raise RuntimeError(msg)
             assert len(transforms)>=1, msg
 
-            fft_buffers = { Ft.s: Ft.output_symbolic_array('{}_hat'.format(Ft.field.name)) 
+            fft_buffers = { Ft.s: Ft.output_symbolic_array('{}_hat'.format(Ft.field.name))
                                 for Ft in self.forward_transforms.values() }
-            wavenumbers = { Wi: self.tg._indexed_wave_numbers[Wi] 
+            wavenumbers = { Wi: self.tg._indexed_wave_numbers[Wi]
                                 for Wi in Wn }
 
             replace = {}
@@ -175,7 +175,7 @@ class SymbolicExternalForce(ExternalForce):
             replace.update(wavenumbers)
             expr = e.xreplace(replace)
             expr = Assignment(Fi_buf, expr)
-            
+
             kname = 'compute_{}'.format(Fi.var_name)
             op.require_symbolic_kernel(kname, expr)
             force_kernels += (kname,)
@@ -195,10 +195,10 @@ class SymbolicExternalForce(ExternalForce):
 
     def discretize(self, op):
         pass
-    
+
     def get_mem_requests(self, op):
         requests = {}
-        for Fi in self.forward_transforms.keys(): 
+        for Fi in self.forward_transforms.keys():
             Ft = self.forward_transforms[Fi]
             Bt = self.backward_transforms.get(Fi, None)
             if (Bt is not None):
@@ -207,15 +207,15 @@ class SymbolicExternalForce(ExternalForce):
                 assert (Ft.output_shape == Bt.input_shape), (Ft.output_shape, Bt.input_shape)
             shape = Ft.output_shape
             dtype = Ft.output_dtype
-            request = MemoryRequest(backend=self.tg.backend, dtype=dtype, 
+            request = MemoryRequest(backend=self.tg.backend, dtype=dtype,
                                     shape=shape, nb_components=1,
                                     alignment=op.min_fft_alignment)
             name = '{}_hat'.format(Ft.field.name)
             requests[name] = request
         return requests
-    
+
     def pre_setup(self, op, work):
-        for Fi in self.forward_transforms.keys(): 
+        for Fi in self.forward_transforms.keys():
             Ft = self.forward_transforms[Fi]
             Bt = self.backward_transforms.get(Fi, None)
             dtmp, = work.get_buffer(op, '{}_hat'.format(Ft.field.name))
@@ -236,14 +236,14 @@ class SymbolicExternalForce(ExternalForce):
                 kwds = update_params()
                 return knl(queue=queue, **kwds)
             return kernel_launcher
-        
-        for (field, kname) in self.diffusion_kernel_names.iteritems():
+
+        for (field, kname) in self.diffusion_kernel_names.items():
             dfield = op.get_input_discrete_field(field)
             knl, update_params = op.symbolic_kernels[kname]
             diffusion_kernels[field] = build_launcher(knl, update_params)
             ghost_exchangers[field] = functools.partial(dfield.build_ghost_exchanger(),
                                                                             queue=queue)
-        
+
         if (op.Fmin is not None):
             min_values = npw.asarray(op.Fmin()).copy()
         if (op.Fmax is not None):
@@ -258,23 +258,23 @@ class SymbolicExternalForce(ExternalForce):
             Fi  = op.force.fields[i]
             dWi = op.dW.dfields[i]
             dFi = op.dF.dfields[i]
-            
+
             knl, update_params = op.symbolic_kernels[kname0]
             force_kernels[(Fi,Wi)]  = build_launcher(knl, update_params)
-            
+
             knl, update_params = op.symbolic_kernels[kname1]
             vorticity_kernels[(Fi,Wi)] = build_launcher(knl, update_params)
 
             ghost_exchangers[Wi] = functools.partial(dWi.build_ghost_exchanger(), queue=queue)
-            
-            def compute_statistic(op=op, queue=queue, dFi=dFi, 
+
+            def compute_statistic(op=op, queue=queue, dFi=dFi,
                                 min_values=min_values, max_values=max_values):
                 if (op.Fmin is not None):
                     min_values[i] = dFi.sdata.min(queue=queue).get()
                 if (op.Fmax is not None):
                     max_values[i] = dFi.sdata.max(queue=queue).get()
             compute_statistics[Fi] = compute_statistic
-        
+
         def update_statistics(op=op, min_values=min_values, max_values=max_values):
             if (op.Fmin is not None):
                 op.Fmin.value = min_values
@@ -282,7 +282,7 @@ class SymbolicExternalForce(ExternalForce):
                 op.Fmax.value = max_values
             if (op.Finf is not None):
                 op.Finf.value = npw.maximum(npw.abs(min_values), npw.abs(max_values))
-                
+
         assert len(diffusion_kernels) == len(self.diffusion) == len(self.backward_transforms)
         assert len(vorticity_kernels) == len(force_kernels) == len(self.force_backward_transforms)
         assert len(ghost_exchangers) == len(diffusion_kernels) + len(vorticity_kernels)
@@ -293,16 +293,16 @@ class SymbolicExternalForce(ExternalForce):
         self.ghost_exchangers   = ghost_exchangers
         self.compute_statistics = compute_statistics
         self.update_statistics  = update_statistics
-    
+
     @op_apply
     def apply(self, op, **kwds):
-        for (field, Ft) in self.forward_transforms.iteritems():
+        for (field, Ft) in self.forward_transforms.items():
             evt = Ft()
             if (field in self.backward_transforms):
                 evt = self.diffusion_kernels[field]()
                 evt = self.backward_transforms[field]()
                 evt = self.ghost_exchangers[field]()
-        
+
         for (Fi,Wi) in self.force_kernels.keys():
             evt = self.force_kernels[(Fi,Wi)]()
             evt = self.force_backward_transforms[Fi]()
@@ -323,17 +323,17 @@ class SymbolicExternalForce(ExternalForce):
 
     def short_description(self):
         return 'SymbolicExternalForce[name={}]'.format(self.name)
-    
+
     def long_description(self):
         sep = '\n      *'
         expressions = sep + sep.join('F{} = {}'.format(x,e) for (x,e) in zip('xyz',self.Fext))
-        diffusion = sep + sep.join('{}: {}'.format(f.pretty_name, p.pretty_name) 
-                                        for (f,p) in self.diffusion.iteritems())
+        diffusion = sep + sep.join('{}: {}'.format(f.pretty_name, p.pretty_name)
+                                        for (f,p) in self.diffusion.items())
         input_fields  = ', '.join(f.pretty_name for f in self.input_fields())
         output_fields = ', '.join(f.pretty_name for f in self.output_fields())
         input_params  = ', '.join(p.pretty_name for p in self.input_params())
         output_params = ', '.join(p.pretty_name for p in self.output_params())
-        
+
         ss = \
         '''SymbolicExternalForce:
     name:          {}
@@ -345,8 +345,8 @@ class SymbolicExternalForce(ExternalForce):
     output_fields: {}
     input_params:  {}
     output_params: {}
-        '''.format(self.name, self.pretty_name, 
-                expressions, diffusion, 
+        '''.format(self.name, self.pretty_name,
+                expressions, diffusion,
                 input_fields, output_fields,
                 input_params, output_params)
         return ss
diff --git a/hysop/backend/hardware/hwinfo.py b/hysop/backend/hardware/hwinfo.py
index f36c5e1ec396151466e96e609eec1b2f3ff252d1..227203cac644e4e2c4fdb7b00ddf4f353d499bd6 100644
--- a/hysop/backend/hardware/hwinfo.py
+++ b/hysop/backend/hardware/hwinfo.py
@@ -103,7 +103,7 @@ class TopologyObject(object):
 
     def print_attributes(self):
         print('{} attributes:'.format(self.__class__.__name__))
-        for k,v in self.attributes().iteritems():
+        for k,v in self.attributes().items():
             print(' {} -> {}'.format(k,v))
         print()
 
@@ -139,7 +139,7 @@ class TopologyObject(object):
             msg='Type \'{}\' does not match parsed type \'{}\'.'
             msg=msg.format(_type, self._parsed_type())
             raise ValueError(msg)
-        for k,v in attributes.iteritems():
+        for k,v in attributes.items():
             if (k.find('cpuset')>=0) or (k.find('nodeset')>=0):
                 vv = tuple(int(x,16) if x != '' else 0 for x in v.split(','))
                 v = 0
@@ -263,7 +263,7 @@ class TopologyStatistics(HardwareStatistics):
         self._processing_units  += other._processing_units
         self._has_opencl        += other._has_opencl
         self._has_cuda          += other._has_cuda
-        for (k,v) in other._backend_statistics.iteritems():
+        for (k,v) in other._backend_statistics.items():
             if k in self._backend_statistics:
                 self._backend_statistics[k] += v
             else:
diff --git a/hysop/backend/hardware/pci.py b/hysop/backend/hardware/pci.py
index 8f1ac9afd9c7a345fe8d657576b22786c3400a77..bcc06f07d45528bdc23d1eda1837c6d01c940a58 100644
--- a/hysop/backend/hardware/pci.py
+++ b/hysop/backend/hardware/pci.py
@@ -113,7 +113,7 @@ class OperatingSystemDevice(TopologyObject):
             backend_info = self.backend_info()
             if backend_info:
                 content=''
-                for k,v in backend_info.iteritems():
+                for k,v in backend_info.items():
                     content += '\n*{}: {}'.format(k,v)
                 return header+self.indent(content)
             else:
diff --git a/hysop/backend/host/fortran/operator/fortran_fftw.py b/hysop/backend/host/fortran/operator/fortran_fftw.py
index 87c552aabc51fff77872767178d61e902d5b5e97..643edfdaa22effa3c8a66b3c0adfb3f77afc1d82 100644
--- a/hysop/backend/host/fortran/operator/fortran_fftw.py
+++ b/hysop/backend/host/fortran/operator/fortran_fftw.py
@@ -62,8 +62,8 @@ class FortranFFTWOperator(FortranOperator):
                     msg+='\n  lboundaries: {}'.format(fi.lboundaries)
                     msg+='\n  rboundaries: {}'.format(fi.rboundaries)
                     raise RuntimeError(msg)
-    
-        # Special case: 3D diffusion of a scalar 
+
+        # Special case: 3D diffusion of a scalar
         self._scalar_3d = (nb_components == 1) and all(tf.nb_components==1 for tf in tensor_fields)
 
         domain = self.input_fields.keys()[0].domain
@@ -93,9 +93,9 @@ class FortranFFTWOperator(FortranOperator):
         super(FortranFFTWOperator,self).handle_topologies(input_topology_states, output_topology_states)
 
         topology = self.input_fields.values()[0].topology
-        for (field,topoview) in self.input_fields.iteritems():
+        for (field,topoview) in self.input_fields.items():
             assert all(topoview.topology.cart_shape == topology.cart_shape), 'topology mismatch'
-        for (field,topoview) in self.output_fields.iteritems():
+        for (field,topoview) in self.output_fields.items():
             assert all(topoview.topology.cart_shape == topology.cart_shape), 'topology mismatch'
         self.topology = topology
 
diff --git a/hysop/backend/host/host_operator.py b/hysop/backend/host/host_operator.py
index 0ec1cb73642a585a8797b2c26d637807ea2590fe..bf818f257ef66bf49f683a6097b2b4bd40cf0045 100644
--- a/hysop/backend/host/host_operator.py
+++ b/hysop/backend/host/host_operator.py
@@ -65,7 +65,7 @@ class OpenClMappable(object):
     def create_topology_descriptors(self):
         if self.enable_opencl_host_buffer_mapping:
             # enforce opencl topology on host operator
-            for (field, topo_descriptor) in self.input_fields.iteritems():
+            for (field, topo_descriptor) in self.input_fields.items():
                 topo_descriptor = TopologyDescriptor.build_descriptor(
                         backend=Backend.OPENCL,
                         operator=self,
@@ -74,7 +74,7 @@ class OpenClMappable(object):
                         cl_env=self.cl_env)
                 self.input_fields[field] = topo_descriptor
 
-            for (field, topo_descriptor) in self.output_fields.iteritems():
+            for (field, topo_descriptor) in self.output_fields.items():
                 topo_descriptor = TopologyDescriptor.build_descriptor(
                         backend=Backend.OPENCL,
                         operator=self,
@@ -180,7 +180,7 @@ class OpenClMappable(object):
         msg='Device memory objects have already been mapped to host.'
         assert not self.__mapped, msg
         evt = None
-        for (obj_key, (dev_buf, flags)) in self.__registered_objects.iteritems():
+        for (obj_key, (dev_buf, flags)) in self.__registered_objects.items():
             if DEBUG:
                 msg='Mapping {}...'.format(obj_key.full_tag)
                 print(msg)
@@ -189,7 +189,7 @@ class OpenClMappable(object):
             else:
                 host_buf, evt = dev_buf.map_to_host(queue=queue, is_blocking=is_blocking, flags=flags)
             self.__mapped_objects[obj_key] = host_buf
-        for (get_key, (obj_key, getter)) in self.__registered_getters.iteritems():
+        for (get_key, (obj_key, getter)) in self.__registered_getters.items():
             if DEBUG:
                 msg='Applying getter {} to mapped buffer {}...'.format(get_key.full_tag, obj_key.full_tag)
                 print(msg)
diff --git a/hysop/backend/host/python/operator/analytic.py b/hysop/backend/host/python/operator/analytic.py
index 7d9dd1a93dbb7473cfdbc16c6ad8ae4047955b7f..b95c358d02ab1af53edff9f6db35b91a489a1369 100644
--- a/hysop/backend/host/python/operator/analytic.py
+++ b/hysop/backend/host/python/operator/analytic.py
@@ -14,12 +14,12 @@ class PythonAnalyticField(HostOperator):
     """
 
     @debug
-    def __init__(self, field, formula, variables, 
-            extra_input_kwds=None, **kwds): 
+    def __init__(self, field, formula, variables,
+            extra_input_kwds=None, **kwds):
         """
         Initialize a Analytic operator on the python backend.
 
-        Apply a user-defined formula onto a field, possibly 
+        Apply a user-defined formula onto a field, possibly
         dependent on space variables and external fields/parameters.
 
         Parameters
@@ -37,7 +37,7 @@ class PythonAnalyticField(HostOperator):
         extra_input_kwds: dict, optional
             Extra inputs that will be forwarded to the formula.
             Fields and Parameters are handled correctly as input requirements.
-            If the output field is modified inplace, it should be added 
+            If the output field is modified inplace, it should be added
             to extra_input_kwds.
         kwds: dict, optional
             Base class arguments.
@@ -55,7 +55,7 @@ class PythonAnalyticField(HostOperator):
 
         extra_kwds = {}
         map_fields = {}
-        for (k,v) in extra_input_kwds.iteritems():
+        for (k,v) in extra_input_kwds.items():
             if isinstance(v, Field):
                 input_fields[v] = self.get_topo_descriptor(variables, v)
                 map_fields[v] = k
@@ -65,7 +65,7 @@ class PythonAnalyticField(HostOperator):
             else:
                 extra_kwds[k] = v
 
-        super(PythonAnalyticField, self).__init__(input_fields=input_fields, 
+        super(PythonAnalyticField, self).__init__(input_fields=input_fields,
                 output_fields=output_fields,
                 input_params=input_params, **kwds)
 
@@ -73,7 +73,7 @@ class PythonAnalyticField(HostOperator):
         self.formula = formula
         self.extra_kwds = extra_kwds
         self.map_fields = map_fields
-    
+
     @debug
     def discretize(self):
         if self.discretized:
@@ -86,7 +86,7 @@ class PythonAnalyticField(HostOperator):
         assert 'coords' not in extra_kwds
         extra_kwds['data']   = dfield.compute_data[0]
         extra_kwds['coords'] = dfield.compute_mesh_coords
-        for (field, dfield) in self.input_discrete_fields.iteritems():
+        for (field, dfield) in self.input_discrete_fields.items():
             assert field.name not in extra_kwds, field.name
             extra_kwds[map_fields[field]] = dfield.compute_data
         self.dfield = dfield
@@ -97,7 +97,7 @@ class PythonAnalyticField(HostOperator):
         super(PythonAnalyticField, self).apply(**kwds)
         self.formula(**self.extra_kwds)
         self.dfield.exchange_ghosts()
-    
+
     @classmethod
     def supports_mpi(cls):
         return True
diff --git a/hysop/backend/host/python/operator/penalization.py b/hysop/backend/host/python/operator/penalization.py
index 238b8dad33a617e40e918d7af607935c20839a09..4fb5ae297cbad85c765108a33e14d50647e5d9e8 100644
--- a/hysop/backend/host/python/operator/penalization.py
+++ b/hysop/backend/host/python/operator/penalization.py
@@ -67,7 +67,7 @@ class PythonPenalizeVorticity(HostOperator):
 
         if isinstance(obstacles, dict):
             obs = {}
-            for c, o in obstacles.iteritems():
+            for c, o in obstacles.items():
                 if isinstance(c, ScalarParameter):
                     obs[lambda x: c()*x] = o
                 elif isinstance(c, type(lambda x: x)):
@@ -148,7 +148,7 @@ class PythonPenalizeVorticity(HostOperator):
         W = tuple(Wi[view] for Wi in dw.buffers)
         self.W, self.V = W, V
         self.dobstacles = {}
-        for c, o in self.obstacles.iteritems():
+        for c, o in self.obstacles.items():
             o_df = self.input_discrete_fields[o]
             self.dobstacles[c] = o_df.data[0][o_df.local_slices(
                 ghosts=(G, )*dim)]
@@ -188,13 +188,13 @@ class PythonPenalizeVorticity(HostOperator):
     def _compute_penalization_implicit(self):
         dt = self.dt()
         self.tmp[...] = 0.
-        for c, o in self.dobstacles.iteritems():
+        for c, o in self.dobstacles.items():
             self.tmp[...] += (-dt) * c(o) / (1.0 + dt * c(o))
 
     def _compute_penalization_exact(self):
         dt = self.dt()
         self.tmp[...] = 1.
-        for c, o in self.dobstacles.iteritems():
+        for c, o in self.dobstacles.items():
             self.tmp[...] *= np.exp(-dt*c(o))
         self.tmp[...] -= 1.
 
diff --git a/hysop/backend/host/python/operator/spatial_filtering.py b/hysop/backend/host/python/operator/spatial_filtering.py
index 811384d313979d622506af035fe4ac1a0e0e19ba..eabfbf5a20991b22a4d2123767b3a296a14ccebc 100644
--- a/hysop/backend/host/python/operator/spatial_filtering.py
+++ b/hysop/backend/host/python/operator/spatial_filtering.py
@@ -11,7 +11,7 @@ from hysop.operator.base.spatial_filtering import (
         PolynomialInterpolationFilterBase,
         RemeshRestrictionFilterBase,
         SpectralRestrictionFilterBase,
-        SubgridRestrictionFilterBase, 
+        SubgridRestrictionFilterBase,
         PolynomialRestrictionFilterBase)
 
 
@@ -31,10 +31,10 @@ class PythonPolynomialInterpolationFilter(PolynomialInterpolationFilterBase, Hos
         periodicity = self.dFin.periodicity
         gr, n  = self.subgrid_interpolator.gr, self.subgrid_interpolator.n
         Wr     = self.Wr
-        
+
         for idx in np.ndindex(*self.iter_shape):
-            oslc = tuple(slice(j*gr[i], (j+1)*gr[i], 1) for i,j in enumerate(idx)) 
-            islc = tuple(slice(periodicity[i]+j, periodicity[i]+j+n[i], 1) 
+            oslc = tuple(slice(j*gr[i], (j+1)*gr[i], 1) for i,j in enumerate(idx))
+            islc = tuple(slice(periodicity[i]+j, periodicity[i]+j+n[i], 1)
                     for i,j in enumerate(idx))
             fout[oslc] = Wr.dot(fin[islc].ravel()).reshape(gr)
         self.dFout.exchange_ghosts()
@@ -58,9 +58,9 @@ class PythonPolynomialRestrictionFilter(PolynomialRestrictionFilterBase, HostOpe
         gr     = self.subgrid_restrictor.gr
         Rr     = self.Rr
         rshape = Rr.shape
-        
+
         for idx in np.ndindex(*self.iter_shape):
-            islc = tuple(slice(j*gr[i], j*gr[i]+rshape[i], 1) for i,j in enumerate(idx)) 
+            islc = tuple(slice(j*gr[i], j*gr[i]+rshape[i], 1) for i,j in enumerate(idx))
             fout[idx] = (Rr*fin[islc]).sum()
         self.dFout.exchange_ghosts()
 
@@ -77,7 +77,7 @@ class PythonRemeshRestrictionFilter(RemeshRestrictionFilterBase, HostOperator):
         oshape = self.fout.shape
 
         dviews = ()
-        for (idx, Wi) in self.nz_weights.iteritems():
+        for (idx, Wi) in self.nz_weights.items():
             slc = tuple(slice(i, i+r*s, r) for (i,s,r) in zip(idx, oshape, iratio))
             dviews += ((Wi, fin[slc]),)
         self.data_views = dviews
@@ -87,7 +87,7 @@ class PythonRemeshRestrictionFilter(RemeshRestrictionFilterBase, HostOperator):
         """Apply analytic formula."""
         super(PythonRemeshRestrictionFilter, self).apply(**kwds)
         fin, fout = self.fin, self.fout
-        
+
         fout[...] = 0
         for (Wi, iview) in self.data_views:
             fout[...] += Wi*iview
@@ -104,11 +104,11 @@ class PythonSpectralRestrictionFilter(SpectralRestrictionFilterBase, HostOperato
     def apply(self, simulation, **kwds):
         """Apply spectral filter (which is just a square window centered on low frequencies)."""
         super(PythonSpectralRestrictionFilter, self).apply(**kwds)
-        self.Ft(simulation=simulation) 
+        self.Ft(simulation=simulation)
         for i, (src_slc, dst_slc) in enumerate(zip(*self.fslices)):
             self.FOUT[dst_slc] = self.FIN[src_slc]
         self.FOUT[...] *= self.scaling
-        self.Bt(simulation=simulation) 
+        self.Bt(simulation=simulation)
         self.dFout.exchange_ghosts()
 
     def _compute_scaling_coefficient(self):
diff --git a/hysop/core/arrays/array_backend.py b/hysop/core/arrays/array_backend.py
index 6097e1c681c400801e2a5b93e292e54ef73a13ce..82b3812be7ccf85c92bc05de1aedd809892ffe7f 100644
--- a/hysop/core/arrays/array_backend.py
+++ b/hysop/core/arrays/array_backend.py
@@ -222,7 +222,7 @@ class ArrayBackend(TaggedObject):
         args = list(args)
         for i,arg in enumerate(args):
             args[i]  = self._arg(arg)
-        for k,arg in kargs.iteritems():
+        for k,arg in kargs.items():
             kargs[k] = self._arg(arg)
         return tuple(args), kargs
 
@@ -322,7 +322,7 @@ Exception was:
         orders = []
         input_dtypes = {}
         output_arg_names = []
-        for argname, arg in kargs.iteritems():
+        for argname, arg in kargs.items():
             if (argname.find('out')>=0):
                 if (arg is None):
                     output_arg_names.append(argname)
@@ -387,7 +387,7 @@ Exception was:
                         except:
                             type_info[typechar] = 'unknown type'
                     ss = '{}->{} ({})'.format(fin,fout, ', '.join('{}={}'.format(k,v)
-                        for (k,v) in type_info.iteritems()))
+                        for (k,v) in type_info.items()))
                     ftypes_str.append(ss)
 
                 print('   *ufunc available signatures:\n     {}'.format('\n     '.join(ftypes_str)))
@@ -2981,7 +2981,7 @@ def __hysop_array_generated_method(self, shape, fill_value, order=default_order,
     hysop_types = ['real', 'complex', 'integer', 'index', 'dim', 'bool']
 
     for ht in hysop_types:
-        for fname, fdefinition in functions.iteritems():
+        for fname, fdefinition in functions.items():
             fname = fname.format(type=ht, TYPE=ht.upper())
             fdef  = fdefinition.format(type=ht, TYPE=ht.upper())
             exec(fdef)
diff --git a/hysop/core/arrays/tests/test_array.py b/hysop/core/arrays/tests/test_array.py
index 8b9a7349704f73550e3a2891ee6c310c89e74409..d4010b4587076c9b37a79fddbe28367368594bd9 100644
--- a/hysop/core/arrays/tests/test_array.py
+++ b/hysop/core/arrays/tests/test_array.py
@@ -431,7 +431,7 @@ class TestArray(object):
 
         def clamp(_amin, _amax):
             def _filter(variables):
-                for k, _vars in variables.iteritems():
+                for k, _vars in variables.items():
                     for i, var in enumerate(_vars):
                         if is_complex(var):
                             if isinstance(var, np.ndarray):
@@ -478,7 +478,7 @@ class TestArray(object):
                 # if there is a specific constraint we copy everything
                 dtypes = {}
                 if opname in input_constraints:
-                    for vname, vargs in variables.iteritems():
+                    for vname, vargs in variables.items():
                         for i, var in enumerate(vargs):
                             variables[vname][i] = variables[vname][i].copy()
                     filters = to_list(input_constraints[opname])
@@ -486,7 +486,7 @@ class TestArray(object):
                         f(variables)
                 self.dtypes = dtypes
 
-                for vname, vargs in variables.iteritems():
+                for vname, vargs in variables.items():
                     dtypes[vname] = variables[vname][0].dtype
                     for i, var in enumerate(vargs):
                         varname = '{}{}'.format(vname, i)
diff --git a/hysop/core/graph/allocator.py b/hysop/core/graph/allocator.py
index 44fbda89271d5eb8039fd85dc9617414359338e3..c8d3f7a24eee0e172c5ff62b4452295913183837 100644
--- a/hysop/core/graph/allocator.py
+++ b/hysop/core/graph/allocator.py
@@ -47,10 +47,10 @@ class StandardArrayAllocator(MemoryAllocator):
         from hysop.core.graph.mem_request import OperatorMemoryRequests
         check_instance(op_requests, dict)
         ptr = array.ctypes.data
-        for (op,requests) in op_requests.iteritems():
+        for (op,requests) in op_requests.items():
             check_instance(requests, dict, values=OperatorMemoryRequests)
             start_idx = 0
-            for (req_id, req) in requests.iteritems():
+            for (req_id, req) in requests.items():
                 align_offset = (-ptr % req.alignment)
                 start_idx += align_offset
                 end_idx    = start_idx + req.data_bytes()
diff --git a/hysop/core/graph/computational_graph.py b/hysop/core/graph/computational_graph.py
index f700ccc4ae85320996a6e33bd21497efc573e25f..7f5226ffbcaa550fca52de6834f015273e8dec05 100644
--- a/hysop/core/graph/computational_graph.py
+++ b/hysop/core/graph/computational_graph.py
@@ -132,13 +132,13 @@ class ComputationalGraph(ComputationalGraphNode):
         def sorted_reqs(reqs):
             return sorted(reqs, key=lambda x:
                           '{}::{}'.format(x.field.name, x.operator.name))
-        for field, mreqs in requirements.input_field_requirements.iteritems():
-            for td, reqs in mreqs.requirements.iteritems():
+        for field, mreqs in requirements.input_field_requirements.items():
+            for td, reqs in mreqs.requirements.items():
                 for req in reqs:
                     inputs.setdefault(td, {}).setdefault(field, []).append(req)
-        for td, td_reqs in inputs.iteritems():
+        for td, td_reqs in inputs.items():
             sin = sinputs.setdefault(td, [])
-            for field, reqs in td_reqs.iteritems():
+            for field, reqs in td_reqs.items():
                 for req in sorted_reqs(reqs):
                     opname = getattr(req.operator, 'pretty_name', 'UnknownOperator').decode('utf-8')
                     fname = getattr(req.field,    'pretty_name', 'UnknownField').decode('utf-8')
@@ -153,13 +153,13 @@ class ComputationalGraph(ComputationalGraphNode):
                     tstates = u'{}'.format(u','.join(str(ts) for ts in req.tstates)) \
                         if req.tstates else 'ANY'
                     sin.append((opname, fname, discr, ghosts, memory_order, can_split, tstates))
-        for field, mreqs in requirements.output_field_requirements.iteritems():
-            for td, reqs in mreqs.requirements.iteritems():
+        for field, mreqs in requirements.output_field_requirements.items():
+            for td, reqs in mreqs.requirements.items():
                 for req in reqs:
                     outputs.setdefault(td, {}).setdefault(field, []).append(req)
-        for td, td_reqs in outputs.iteritems():
+        for td, td_reqs in outputs.items():
             sout = soutputs.setdefault(td, [])
-            for field, reqs in td_reqs.iteritems():
+            for field, reqs in td_reqs.items():
                 for req in sorted_reqs(reqs):
                     opname = getattr(req.operator, 'pretty_name', 'UnknownOperator').decode('utf-8')
                     fname = getattr(req.field,    'pretty_name', 'UnknownField').decode('utf-8')
@@ -191,7 +191,7 @@ class ComputationalGraph(ComputationalGraphNode):
 
         ss = u'>INPUTS:'
         if sinputs:
-            for (td, sreqs) in sinputs.iteritems():
+            for (td, sreqs) in sinputs.items():
                 if isinstance(td, Topology):
                     ss += u'\n {}'.format(td.short_description())
                 else:
@@ -212,7 +212,7 @@ class ComputationalGraph(ComputationalGraphNode):
             ss += u' None'
         ss += u'\n>OUTPUTS:'
         if soutputs:
-            for (td, sreqs) in soutputs.iteritems():
+            for (td, sreqs) in soutputs.items():
                 if isinstance(td, Topology):
                     ss += u'\n {}'.format(td.short_description())
                 else:
@@ -245,7 +245,7 @@ class ComputationalGraph(ComputationalGraphNode):
         newline_prefix = (None, ' ', '',   ' ', None)
         replace = ('',   '', '-',   '',  '')
 
-        for (domain, operators) in domains.iteritems():
+        for (domain, operators) in domains.items():
             if (domain is None):
                 continue
             for op in sorted(operators, key=lambda x: x.pretty_name):
@@ -309,7 +309,7 @@ class ComputationalGraph(ComputationalGraphNode):
         type_size = max(strlen(s[4]) for ss in ops.values() for s in ss)
 
         ss = u''
-        for (domain, dops) in ops.iteritems():
+        for (domain, dops) in ops.items():
             if (domain is None):
                 continue
             ss += u'\n>{}'.format(domain.short_description())
@@ -344,7 +344,7 @@ class ComputationalGraph(ComputationalGraphNode):
 
     def topology_report(self):
         ss = ''
-        for (backend, topologies) in self.get_topologies().iteritems():
+        for (backend, topologies) in self.get_topologies().items():
             ss += u'\n {}:'.format(backend.short_description())
             ss += u'\n  *'+'\n  *'.join(t.short_description()
                                         for t in sorted(topologies, key=lambda x: x.id))
@@ -436,11 +436,11 @@ class ComputationalGraph(ComputationalGraphNode):
                     handled_outputs += f.fields
             finputs += [u'{}.{}'.format(f.pretty_name.decode('utf-8'),
                                         t.pretty_tag.decode('utf-8'))
-                        for (f, t) in node.input_fields.iteritems()
+                        for (f, t) in node.input_fields.items()
                         if f not in handled_inputs]
             foutputs += [u'{}.{}'.format(f.pretty_name.decode('utf-8'),
                                          t.pretty_tag.decode('utf-8'))
-                         for (f, t) in node.output_fields.iteritems()
+                         for (f, t) in node.output_fields.items()
                          if f not in handled_outputs]
             finputs = u','.join(sorted(finputs))
             foutputs = u','.join(sorted(foutputs))
@@ -497,7 +497,7 @@ class ComputationalGraph(ComputationalGraphNode):
     def get_domains(self):
         domains = {}
         for node in self.nodes:
-            for (domain, ops) in node.get_domains().iteritems():
+            for (domain, ops) in node.get_domains().items():
                 domains.setdefault(domain, set()).update(ops)
         return domains
 
@@ -506,7 +506,7 @@ class ComputationalGraph(ComputationalGraphNode):
         topologies = {}
         for node in self.nodes:
             node_topologies = node.get_topologies()
-            for (backend, topos) in node_topologies.iteritems():
+            for (backend, topos) in node_topologies.items():
                 topos = to_set(topos)
                 if backend not in topologies:
                     topologies[backend] = set()
@@ -548,7 +548,7 @@ class ComputationalGraph(ComputationalGraphNode):
                 self.pretty_name.decode('utf-8'))
             raise RuntimeError(msg.encode('utf-8'))
         for node in self.nodes:
-            for (k, v) in node.available_methods().iteritems():
+            for (k, v) in node.available_methods().items():
                 v = to_set(v)
                 if (k in avail_methods):
                     avail_methods[k].update(v)
@@ -781,7 +781,7 @@ class ComputationalGraph(ComputationalGraphNode):
 
         if self.is_root:
             input_discrete_fields = {}
-            for (field, topo) in self.input_fields.iteritems():
+            for (field, topo) in self.input_fields.items():
                 istate = self.initial_input_topology_states[field][1]
                 # problem inputs are writeable for initialization
                 istate = istate.copy(is_read_only=False)
@@ -789,7 +789,7 @@ class ComputationalGraph(ComputationalGraphNode):
                 input_discrete_fields[field] = dfield
 
             output_discrete_fields = {}
-            for field, topo in self.output_fields.iteritems():
+            for field, topo in self.output_fields.items():
                 ostate = self.final_output_topology_states[field][1]
                 dfield = field.discretize(topo, ostate)
                 output_discrete_fields[field] = dfield
diff --git a/hysop/core/graph/computational_node.py b/hysop/core/graph/computational_node.py
index 89f7a1fe07c9206f2c4f5480dcbf77c5bd80c211..160db9b45ac5d327f8b7e71f5ddf446133365357 100644
--- a/hysop/core/graph/computational_node.py
+++ b/hysop/core/graph/computational_node.py
@@ -172,7 +172,7 @@ class ComputationalGraphNode(OperatorBase):
                         raise RuntimeError(msg)
         elif (input_fields is not None):
             input_tensor_fields = tuple(filter(lambda x: x.is_tensor, input_fields.keys()))
-            input_fields = {sfield: topod for (tfield, topod) in input_fields.iteritems()
+            input_fields = {sfield: topod for (tfield, topod) in input_fields.items()
                             for sfield in tfield.fields}
         else:
             input_tensor_fields = ()
@@ -187,7 +187,7 @@ class ComputationalGraphNode(OperatorBase):
                         raise RuntimeError(msg)
         elif (output_fields is not None):
             output_tensor_fields = tuple(filter(lambda x: x.is_tensor, output_fields.keys()))
-            output_fields = {sfield: topod for (tfield, topod) in output_fields.iteritems()
+            output_fields = {sfield: topod for (tfield, topod) in output_fields.items()
                              for sfield in tfield.fields}
         else:
             output_tensor_fields = ()
@@ -381,7 +381,7 @@ class ComputationalGraphNode(OperatorBase):
                     msg += '\n  *field:    {}'.format(topo.mpi_params)
                     msg += '\n'
                     raise RuntimeError(msg)
-        
+
         super(ComputationalGraphNode, self).__init__(name=self.name,
                                                      fields=fields,
                                                      tensor_fields=tfields,
@@ -402,7 +402,7 @@ class ComputationalGraphNode(OperatorBase):
             method.update(user_method)
 
         available_methods = self.available_methods()
-        for (k, v) in method.iteritems():
+        for (k, v) in method.items():
             if k not in available_methods.keys():
                 msg = '{} is not an available method key for computational node {}.'
                 msg = msg.format(k, self.name)
@@ -445,7 +445,7 @@ class ComputationalGraphNode(OperatorBase):
         Called automatically in ComputationalGraphNode.check()
         """
         for variables in [self.input_fields, self.output_fields]:
-            for (k, v) in variables.iteritems():
+            for (k, v) in variables.items():
                 if not isinstance(k, Field):
                     msg = 'Given key is not a continuous Field (got a {}).'
                     raise TypeError(msg.format(k.__class__))
@@ -476,7 +476,7 @@ class ComputationalGraphNode(OperatorBase):
         if topos:
             topo_ref = topos[0].topology
             for variables in [self.input_fields, self.output_fields]:
-                for field, topo in variables.iteritems():
+                for field, topo in variables.items():
                     if (topo.topology != topo_ref):
                         has_multiple_topologies = True
 
@@ -516,10 +516,10 @@ class ComputationalGraphNode(OperatorBase):
             msg = 'Graph operator {} does not support multiple field topologies yet.'
             msg = msg.format(self.node_tag)
             msg += '\n>Input topologies:'
-            for (field, topo) in self.input_fields.iteritems():
+            for (field, topo) in self.input_fields.items():
                 msg += '\n  *{} -> {}'.format(field.short_description(), topo.short_description())
             msg += '\n>Output topologies:'
-            for (field, topo) in self.output_fields.iteritems():
+            for (field, topo) in self.output_fields.items():
                 msg += '\n  *{} -> {}'.format(field.short_description(), topo.short_description())
             raise NotImplementedError(msg)
         if (self._is_distributed) and (not cls.supports_mpi()):
@@ -594,7 +594,7 @@ class ComputationalGraphNode(OperatorBase):
             3) check method against available_methods.
         The result of this process is fed as argument of this function.
         """
-        self.method = {k: v for (k, v) in method.iteritems()}
+        self.method = {k: v for (k, v) in method.items()}
 
     @abstractmethod
     @debug
@@ -941,10 +941,10 @@ class ComputationalGraphNode(OperatorBase):
                                                for field in tfield.fields)
 
         if with_tensors and (not as_scalars):
-            for (tfield, tdfield) in self.input_discrete_tensor_fields.iteritems():
+            for (tfield, tdfield) in self.input_discrete_tensor_fields.items():
                 yield (tfield, tdfield)
 
-        for (field, dfield) in self.input_discrete_fields.iteritems():
+        for (field, dfield) in self.input_discrete_fields.items():
             if field in input_scalar_fields_from_tensors:
                 # field is contained in a tensor field
                 if with_tensors and as_scalars:
@@ -971,10 +971,10 @@ class ComputationalGraphNode(OperatorBase):
                                                 for field in tfield.fields)
 
         if with_tensors and (not as_scalars):
-            for (tfield, tdfield) in self.output_discrete_tensor_fields.iteritems():
+            for (tfield, tdfield) in self.output_discrete_tensor_fields.items():
                 yield (tfield, tdfield)
 
-        for (field, dfield) in self.output_discrete_fields.iteritems():
+        for (field, dfield) in self.output_discrete_fields.items():
             if field in output_scalar_fields_from_tensors:
                 # field is contained in a tensor field
                 if with_tensors and as_scalars:
@@ -1075,7 +1075,7 @@ class ComputationalGraphNode(OperatorBase):
 
             io_params = IOParams(filename=filename, frequency=frequency,
                                  fileformat=fileformat, io_leader=io_leader)
-        
+
         self._input_fields_to_dump.append((fields, io_params, op_kwds))
 
     def dump_outputs(self, fields=None, io_params=None,
diff --git a/hysop/core/graph/computational_node_frontend.py b/hysop/core/graph/computational_node_frontend.py
index 00df673efcd2fdb3d4b5f497e9a49378558318e3..c67bf0626a08eeab146bd7099cc927bb79cc7f3f 100644
--- a/hysop/core/graph/computational_node_frontend.py
+++ b/hysop/core/graph/computational_node_frontend.py
@@ -110,7 +110,7 @@ class ComputationalGraphNodeFrontend(ComputationalGraphNodeGenerator):
             op = self.impl(**self.impl_kwds)
         except:
             sargs = ['*{} = {}'.format(k,v.__class__)
-                        for (k,v) in self.impl_kwds.iteritems()]
+                        for (k,v) in self.impl_kwds.items()]
             msg  = 'FATAL ERROR during {}.generate():\n'
             msg += ' => failed to call {}.__init__()\n    with the following keywords:'
             msg += '\n     '+'\n     '.join(sargs)
diff --git a/hysop/core/graph/computational_operator.py b/hysop/core/graph/computational_operator.py
index 14eee3f74e52be644807d44eaaf17774ab6f84e6..b71d3bc4c53634d244aaf5061ad3998f726b5266 100644
--- a/hysop/core/graph/computational_operator.py
+++ b/hysop/core/graph/computational_operator.py
@@ -180,7 +180,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
              2) allowed splitting directions for cartesian topologies
         """
         # by default we create HOST (cpu) TopologyDescriptors
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+        for (field, topo_descriptor) in self.input_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                     backend=Backend.HOST,
                     operator=self,
@@ -188,7 +188,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
                     handle=topo_descriptor)
             self.input_fields[field] = topo_descriptor
 
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                     backend=Backend.HOST,
                     operator=self,
@@ -214,7 +214,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
          # can_split set to True in all directions, all TranspositionStates
          # and C memory ordering).
          input_field_requirements  = {}
-         for (field, topo_descriptor) in self.input_fields.iteritems():
+         for (field, topo_descriptor) in self.input_fields.items():
              if (topo_descriptor is None):
                  req = None
              else:
@@ -225,7 +225,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
              input_field_requirements[field] = req
 
          output_field_requirements = {}
-         for (field, topo_descriptor) in self.output_fields.iteritems():
+         for (field, topo_descriptor) in self.output_fields.items():
              if (topo_descriptor is None):
                  req = None
              else:
@@ -424,12 +424,12 @@ class ComputationalGraphOperator(ComputationalGraphNode):
 
         # replace topologies by topology view using topology states
         input_fields  = self.input_fields
-        for field, topology in input_fields.iteritems():
+        for field, topology in input_fields.items():
             topology_state = input_topology_states[field].copy(is_read_only=True)
             input_fields[field] = topology.view(topology_state)
 
         output_fields = self.output_fields
-        for field, topology in output_fields.iteritems():
+        for field, topology in output_fields.items():
             topology_state = output_topology_states[field].copy(is_read_only=False)
             output_fields[field] = topology.view(topology_state)
 
@@ -469,10 +469,10 @@ class ComputationalGraphOperator(ComputationalGraphNode):
         super(ComputationalGraphOperator,self).discretize()
 
         # discretize ScalarFields
-        for (field, topology_view) in self.input_fields.iteritems():
+        for (field, topology_view) in self.input_fields.items():
             topology, topology_state = topology_view.topology, topology_view.topology_state
             self.input_discrete_fields[field] = field.discretize(topology, topology_state)
-        for (field, topology_view) in self.output_fields.iteritems():
+        for (field, topology_view) in self.output_fields.items():
             topology, topology_state = topology_view.topology, topology_view.topology_state
             self.output_discrete_fields[field] = field.discretize(topology, topology_state)
 
@@ -652,7 +652,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
                     for fi in f.fields:
                         ifields[fi] = input_fields[fi]
             variables = {k:(v if v.backend.kind in HDF_Writer.supported_backends() else None)
-                            for (k,v) in ifields.iteritems() }
+                            for (k,v) in ifields.items() }
             op = HDF_Writer(io_params=io_params, variables=variables, **op_kwds)
             op.initialize(topgraph_method=self.method)
             op.get_and_set_field_requirements()
@@ -670,7 +670,7 @@ class ComputationalGraphOperator(ComputationalGraphNode):
                     for fi in f.fields:
                         ofields[fi] = output_fields[fi]
             variables = {k:(v if v.backend.kind in HDF_Writer.supported_backends() else None)
-                            for (k,v) in ofields.iteritems() }
+                            for (k,v) in ofields.items() }
             op = HDF_Writer(io_params=io_params, variables=variables, **op_kwds)
             op.initialize(topgraph_method=self.method)
             op.get_and_set_field_requirements()
@@ -748,13 +748,13 @@ class ComputationalGraphOperator(ComputationalGraphNode):
         """
         topologies_per_backend = self.get_topologies()
         supported_backends = self.supported_backends()
-        for (backend, topologies) in topologies_per_backend.iteritems():
+        for (backend, topologies) in topologies_per_backend.items():
             if (backend.kind not in supported_backends):
                 bad_fields = set()
-                for (field, topo) in self.input_fields.iteritems():
+                for (field, topo) in self.input_fields.items():
                     if (topo.backend is backend):
                         bad_fields.add(field)
-                for (field, topo) in self.output_fields.iteritems():
+                for (field, topo) in self.output_fields.items():
                     if (topo.backend is backend):
                         bad_fields.add(field)
                 msg='\n\nOperator {} topology backend mismatch:'.format(self.node_tag)
diff --git a/hysop/core/graph/graph.py b/hysop/core/graph/graph.py
index 886cbd11a2efc37eda32ec5f9833568a759fd1af..d2eb4107abc9fb81ed8f198a325b26ae7469b922 100644
--- a/hysop/core/graph/graph.py
+++ b/hysop/core/graph/graph.py
@@ -6,7 +6,7 @@ from hysop.tools.decorators import not_implemented, debug, wraps, profile
 
 def is_directed_acyclic_graph(graph):
     return networkx.algorithms.dag.is_directed_acyclic_graph(graph)
-    
+
 def transitive_reduction(graph):
     reduced_graph = networkx.algorithms.dag.transitive_reduction(graph)
     # copy back edge attributes (node data is automatically transferred
@@ -22,8 +22,8 @@ def all_simple_paths(graph, src, dst):
 
 def lexicographical_topological_sort(graph):
     # Lexicographical sort ensures a unique permutations of nodes
-    # such that they are in the same topological order on each 
-    # MPI process. Else operators will not be executed in the same 
+    # such that they are in the same topological order on each
+    # MPI process. Else operators will not be executed in the same
     # order and everything deadlocks on MPI synchronization.
     topo_sort = tuple(networkx.algorithms.dag.lexicographical_topological_sort(
         graph, key=lambda x: int(x)))
@@ -63,7 +63,7 @@ def generate_vertex_colors():
         colors += c0[i::4] + c1[i::4]
     colors = tuple(map(matplotlib.colors.to_hex, colors))
     return colors
-        
+
 class VertexAttributes(object):
     """Simple class to hold vertex data."""
 
@@ -74,10 +74,10 @@ class VertexAttributes(object):
             graph._hysop_node_counter = 0
         node_id = graph._hysop_node_counter
         graph._hysop_node_counter += 1
-        
+
         self.node_id  = node_id
         self.operator = operator
-        
+
         self.input_states  = None
         self.output_states = None
         self.op_ordering   = None
@@ -87,10 +87,10 @@ class VertexAttributes(object):
         if (other is None):
             return self
         check_instance(other, VertexAttributes)
-        for vname in ('operator', 
-                      'input_states', 'output_states', 
+        for vname in ('operator',
+                      'input_states', 'output_states',
                       'op_ordering', 'command_queue'):
-            setattr(self, vname, first_not_None(getattr(self,  vname), 
+            setattr(self, vname, first_not_None(getattr(self,  vname),
                                                 getattr(other, vname)))
         return self
 
@@ -101,7 +101,7 @@ class VertexAttributes(object):
         self.input_states  = input_states
         self.output_states = output_states
         return self
-    
+
     # hashing for networkx
     def hash(self):
         return self.node_id
@@ -117,11 +117,11 @@ class VertexAttributes(object):
         if (self.op_ordering is not None):
             s = '({})\n{}'.format(self.op_ordering, s)
         return s
-    
+
     @property
     def title(self):
         return self.node_info().replace('\n','<br>')
-    
+
     def shape(self, with_custom_nodes=True):
         from hysop.operator.base.transpose_operator    import TransposeOperatorBase
         from hysop.operator.base.redistribute_operator import RedistributeOperatorBase
@@ -132,11 +132,11 @@ class VertexAttributes(object):
                 MemoryReorderingBase:     'box'
         }
         if with_custom_nodes:
-            for (op_type, shape) in special_shapes.iteritems():
+            for (op_type, shape) in special_shapes.items():
                 if isinstance(self.operator, op_type):
                     return shape
         return 'circle'
-    
+
     @property
     def color(self):
         cq = self.command_queue
@@ -184,7 +184,7 @@ class VertexAttributes(object):
             return param.pretty_name
         def opinfo(param):
             return param.pretty_name
-                
+
         prefix='&nbsp;&nbsp<b>'
         suffix='</b>&nbsp;&nbsp'
         sep = '\n'+'&nbsp'*14
@@ -192,18 +192,18 @@ class VertexAttributes(object):
         ss = '<h2>Operator {}</h2>{}{}{}{}{}\n{}'.format(op.name,
                 '{p}Rank:{s}{}\n\n'.format(self.op_ordering, p=prefix, s=suffix)
                     if self.op_ordering else '',
-                '{p}Pin:{s}{}\n'.format(sep.join(ipinfo(param) 
+                '{p}Pin:{s}{}\n'.format(sep.join(ipinfo(param)
                     for param in iparams.values()), p=prefix, s=suffix+'&nbsp&nbsp')
                     if iparams else '',
-                '{p}Fin:{s}{}\n'.format(sep.join([ifinfo(f,topo) 
-                    for (f,topo) in ifields.iteritems()]), p=prefix, 
+                '{p}Fin:{s}{}\n'.format(sep.join([ifinfo(f,topo)
+                    for (f,topo) in ifields.items()]), p=prefix,
                         s=suffix+'&nbsp&nbsp')
                     if ifields else '',
-                '{p}Pout:{s}{}\n'.format(sep.join([opinfo(param) 
-                    for param in oparams.values()]), p=prefix, s=suffix) 
+                '{p}Pout:{s}{}\n'.format(sep.join([opinfo(param)
+                    for param in oparams.values()]), p=prefix, s=suffix)
                     if oparams else '',
-                '{p}Fout:{s}{}\n'.format(sep.join([ofinfo(f,topo) 
-                    for (f,topo) in ofields.iteritems()]), p=prefix, s=suffix) 
+                '{p}Fout:{s}{}\n'.format(sep.join([ofinfo(f,topo)
+                    for (f,topo) in ofields.items()]), p=prefix, s=suffix)
                     if ofields else '',
                 '{p}Type:{s} {}'.format(
                     sep.join(map(lambda x: x.__name__, type(op).__mro__[:-2])),
@@ -216,7 +216,7 @@ class EdgeAttributes(object):
     def __init__(self, *args, **kwds):
         self.variables = {}
         self.update(*args, **kwds)
-    
+
     def update(self, variable=None, topology=None):
         if (variable is None):
             assert topology is None
@@ -227,13 +227,13 @@ class EdgeAttributes(object):
         prefix='&nbsp;&nbsp<b>'
         suffix='</b>&nbsp;&nbsp'
         ss = '<h2>Variable dependencies</h2>{}'.format('\n'.join(
-                '{p}{}:{s}{}'.format(v.pretty_name, 
-                    ', '.join(v.pretty_name if (t is None) else 
+                '{p}{}:{s}{}'.format(v.pretty_name,
+                    ', '.join(v.pretty_name if (t is None) else
                                 v[t].short_description() for t in self.variables[v]),
                     p=prefix,s=suffix) for v in self.variables))
         return ss.replace('\n','<br>')
 
- 
+
 class ComputationalGraphNodeData(object):
     """
     Simple class to hold some node data.
@@ -377,18 +377,18 @@ def op_apply(f):
             for dfield in sorted(op.input_discrete_fields.values(), key=lambda x: x.name):
                 tag = 'pre_{}_{}'.format(op.name, dfield.name)
                 kwds['debug_dumper'](it, t, tag,
-                    tuple(df.sdata.get().handle[df.compute_slices] 
+                    tuple(df.sdata.get().handle[df.compute_slices]
                         for df in dfield.dfields), description=description)
             ret = f(*args, **kwds)
             for param in sorted(op.output_params.values(), key=lambda x: x.name):
                 tag = 'post_{}_{}'.format(op.name, param.name)
                 kwds['debug_dumper'](it, t, tag,
                                      (param._value,), description=description)
-            for dfield in sorted(op.output_discrete_fields.values(), 
+            for dfield in sorted(op.output_discrete_fields.values(),
                                                     key=lambda x: x.name):
                 tag = 'post_{}_{}'.format(op.name, dfield.name)
                 kwds['debug_dumper'](it, t, tag,
-                            tuple(df.sdata.get().handle[df.compute_slices] 
+                            tuple(df.sdata.get().handle[df.compute_slices]
                                 for df in dfield.dfields), description=description)
             return ret
         elif dbg:
diff --git a/hysop/core/graph/graph_builder.py b/hysop/core/graph/graph_builder.py
index 59f63c8f0084592f22a0d70deb280f46ce66b3c1..48d37f665e6360e8a633197c0cbf87169ede0ad0 100644
--- a/hysop/core/graph/graph_builder.py
+++ b/hysop/core/graph/graph_builder.py
@@ -177,7 +177,7 @@ class GraphBuilder(object):
                 if not isinstance(op, Problem):
                     # try to fill in undertermined topologies (experimental feature)
                     backends = op.supported_backends()
-                    for (ifield, itopo) in sorted(ifields.iteritems(),
+                    for (ifield, itopo) in sorted(ifields.items(),
                                                     key=lambda x: x[0].name):
                         if (itopo is not None):
                             continue
@@ -207,7 +207,7 @@ class GraphBuilder(object):
                                 backend = itopo.backend.any_backend_from_kind(*backends)
                                 itopo   = itopo.topology_like(backend=backend)
                             ifields[ifield] = itopo
-                    for (ofield, otopo) in sorted(ofields.iteritems(),
+                    for (ofield, otopo) in sorted(ofields.items(),
                                                     key=lambda x: x[0].name):
                         if (otopo is not None):
                             continue
@@ -250,7 +250,7 @@ class GraphBuilder(object):
                 input_states = {}
                 if ifields:
                     gprint('   >Input fields')
-                    for (ifield,itopo) in sorted(ifields.iteritems(),
+                    for (ifield,itopo) in sorted(ifields.items(),
                                             key=lambda x: x[0].name, reverse=True):
                         gprint('     *{}{}'.format(ifield.name,
                              ' on an unknown topology (to be determined)'
@@ -288,7 +288,7 @@ class GraphBuilder(object):
                 output_states = {}
                 if ofields:
                     gprint('   >Output fields')
-                    for (ofield,otopo) in sorted(ofields.iteritems(),
+                    for (ofield,otopo) in sorted(ofields.items(),
                                             key=lambda x: x[0].name, reverse=True):
                         assert (otopo is not None)
                         gprint('     *{}.{}'.format(ofield.name, otopo.pretty_tag))
@@ -324,7 +324,7 @@ class GraphBuilder(object):
             input_states  = op_input_topology_states[op]
             output_states = op_output_topology_states[op]
             field_requirements = op.field_requirements
-            for (ifield,itopo) in sorted(ifields.iteritems(), key=lambda x: x[0].name):
+            for (ifield,itopo) in sorted(ifields.items(), key=lambda x: x[0].name):
                 if (itopo is not None):
                     continue
                 msg  = '\nGraphBuilder {} could not automatically determine the '
diff --git a/hysop/core/memory/memory_request.py b/hysop/core/memory/memory_request.py
index b7e025b2f3beff734b95aee18043b50ab26d78cc..5c4671f2731139784d7feeff4940829ccef163b4 100644
--- a/hysop/core/memory/memory_request.py
+++ b/hysop/core/memory/memory_request.py
@@ -274,17 +274,17 @@ class MultipleOperatorMemoryRequests(object):
         for mem_requests in requests:
             if isinstance(mem_requests, MultipleOperatorMemoryRequests):
                 for backend, op_requests in \
-                        mem_requests._all_requests_per_backend.iteritems():
+                        mem_requests._all_requests_per_backend.items():
                     if backend not in self._all_requests_per_backend.keys():
                         self._all_requests_per_backend[backend] = {}
-                    for (op, op_reqs) in op_requests.iteritems():
+                    for (op, op_reqs) in op_requests.items():
                         if op in self._all_requests_per_backend[backend].keys():
                             msg='Operator {} has already requested memory.'.format(op)
                             raise ValueError(msg)
                         self._all_requests_per_backend[backend][op] = op_reqs
             elif isinstance(mem_requests, OperatorMemoryRequests):
                 operator = mem_requests._operator
-                for (backend, requests) in mem_requests._requests_per_backend.iteritems():
+                for (backend, requests) in mem_requests._requests_per_backend.items():
                     if backend not in self._all_requests_per_backend.keys():
                         self._all_requests_per_backend[backend] = {}
                     if operator in self._all_requests_per_backend[backend].keys():
@@ -337,7 +337,7 @@ class MultipleOperatorMemoryRequests(object):
 
         if allow_subbuffers:
             data = backend.empty(shape=(total_bytes,), dtype=np.uint8)
-            for (op,requests) in op_requests.iteritems():
+            for (op,requests) in op_requests.items():
                 check_instance(requests, list, values=MemoryRequest)
                 start_idx, end_idx = 0, 0
                 for req in requests:
@@ -380,7 +380,7 @@ class MultipleOperatorMemoryRequests(object):
         else:
             buffer_sizes = []
             ordered_requests = {}
-            for (op,requests) in op_requests.iteritems():
+            for (op,requests) in op_requests.items():
                 assert op not in ordered_requests
                 check_instance(requests, list, values=MemoryRequest)
                 op_buffer_sizes = ()
@@ -407,7 +407,7 @@ class MultipleOperatorMemoryRequests(object):
             buffers = tuple(backend.empty(shape=(nbytes,), dtype=np.uint8)
                                                     for nbytes in buffer_sizes)
 
-            for (op, requests) in ordered_requests.iteritems():
+            for (op, requests) in ordered_requests.items():
                 assert len(buffers)>= len(requests)
                 views.setdefault(op, {})
                 old_req = None
@@ -461,7 +461,7 @@ class MultipleOperatorMemoryRequests(object):
     def sreport(self):
         all_requests = {}
         totals = {}
-        for (backend, backend_requests) in self._all_requests_per_backend.iteritems():
+        for (backend, backend_requests) in self._all_requests_per_backend.items():
             total=0
             for op in sorted(backend_requests.keys(), key=lambda op: getattr(op, 'name', None)):
                 op_requests = backend_requests[op]
@@ -493,7 +493,7 @@ class MultipleOperatorMemoryRequests(object):
                 template += u'{:'+(u'<' if i==0 else u'^')+u'{'+name+u'}}'
 
             ss=''
-            for (backend, backend_srequests) in all_requests.iteritems():
+            for (backend, backend_srequests) in all_requests.items():
                 total = totals[backend]
                 kind = backend.kind
                 if (kind == Backend.OPENCL):
diff --git a/hysop/core/memory/mempool.py b/hysop/core/memory/mempool.py
index a6217289c9a616b8d3777628950429c88f78c5f4..b3dcd7c9eb0e0abcd1fa0d6b676fef84d2933e7a 100644
--- a/hysop/core/memory/mempool.py
+++ b/hysop/core/memory/mempool.py
@@ -446,7 +446,7 @@ class MemoryPool(object):
         ss += '\n'
         ss += '\n  Detailed pool statistics:'
         has_stats=False
-        for stat_nr, stat in self.alloc_statistics.iteritems():
+        for stat_nr, stat in self.alloc_statistics.items():
             has_stats=True
             ss += '\n  {:>10} <{} x <= {:<10} => {}'.format(
                     bytes2str(2**(stat_nr), decimal=False),
diff --git a/hysop/core/mpi/__init__.py b/hysop/core/mpi/__init__.py
index d268d085bed74b70b4149d155e0f3d53b8fbd537..2591d0abd4d448a5e27a0fd582d67db3cb5e20a5 100644
--- a/hysop/core/mpi/__init__.py
+++ b/hysop/core/mpi/__init__.py
@@ -24,7 +24,7 @@ from mpi4py import MPI as MPI
 
 processor_name = MPI.Get_processor_name()
 """MPI processor name"""
-processor_hash = int(hashlib.sha1(processor_name).hexdigest(), 16) % (1<<31)
+processor_hash = int(hashlib.sha1(processor_name.encode('utf-8')).hexdigest(), 16) % (1<<31)
 """MPI hashed processor name as integer (fits into a 32bit signed integer)"""
 
 main_comm = MPI.COMM_WORLD.Dup()
@@ -48,7 +48,7 @@ if (shm_rank!=0):
     intershm_comm.Free()
     intershm_comm = None
     intershm_rank = None
-    intershm_size = shm_comm.bcast(None, root=0) 
+    intershm_size = shm_comm.bcast(None, root=0)
     is_multishm   = False
 else:
     intershm_rank = intershm_comm.Get_rank()
@@ -72,7 +72,7 @@ if (host_rank!=0):
     interhost_comm.Free()
     interhost_comm = None
     interhost_rank = None
-    interhost_size = main_comm.bcast(None, root=0) 
+    interhost_size = main_comm.bcast(None, root=0)
 else:
     interhost_rank = interhost_comm.Get_rank()
     """Communicator rank between hosts"""
diff --git a/hysop/core/mpi/topo_tools.py b/hysop/core/mpi/topo_tools.py
index ae8243e635d672f762e6278b2d805b59ea5e7697..b5bfe8d3f54e30f3ae724fcfc7f5ce44def0136e 100644
--- a/hysop/core/mpi/topo_tools.py
+++ b/hysop/core/mpi/topo_tools.py
@@ -258,7 +258,7 @@ class TopoTools(object):
 
         if isinstance(sl_dict, dict):
             subtypes = {}
-            for (rk,slc) in sl_dict.iteritems():
+            for (rk,slc) in sl_dict.items():
                 subtypes[rk] = _create_subarray(slc, data_shape)
             return subtypes
         else:
diff --git a/hysop/fields/cartesian_discrete_field.py b/hysop/fields/cartesian_discrete_field.py
index 17bb04774616332fc8d0431deee2baa6a984068c..3f678f565f6744cafb9eac517ccfa9160bc237c9 100644
--- a/hysop/fields/cartesian_discrete_field.py
+++ b/hysop/fields/cartesian_discrete_field.py
@@ -369,7 +369,7 @@ class CartesianDiscreteScalarFieldViewContainerI(object):
             for ndir in self.all_outer_ghost_slices:
                 for directions in self.all_outer_ghost_slices[ndir]:
                     for disp, (slc, _) in \
-                            self.all_outer_ghost_slices[ndir][directions].iteritems():
+                            self.all_outer_ghost_slices[ndir][directions].items():
                         if (sum(d != 0 for d in disp) == ndir) and ndir:
                             if callable(outer_ghosts):
                                 outer_ghosts = np.vectorize(outer_ghosts)
diff --git a/hysop/fields/field_requirements.py b/hysop/fields/field_requirements.py
index 7d619d65a8a7a44bf346982ddf293fcf76b7ef38..b87f520988a3dc8a7edbbd930216fe05065bd7e8 100644
--- a/hysop/fields/field_requirements.py
+++ b/hysop/fields/field_requirements.py
@@ -441,7 +441,7 @@ class MultiFieldRequirements(object):
                     sub_field_requirements.append(new_group)
         assert self.nrequirements() == sum(sf.nrequirements() for sf in sub_field_requirements)
         for multi_reqs in sub_field_requirements:
-            for topology_descriptor, reqs in multi_reqs.requirements.iteritems():
+            for topology_descriptor, reqs in multi_reqs.requirements.items():
                 if isinstance(topology_descriptor, Topology):
                     dim = topology_descriptor.domain_dim
                 else:
@@ -459,7 +459,7 @@ class MultiFieldRequirements(object):
     def _build_compatible_topologies(self):
         assert self.all_compatible()
         all_topologies = set()
-        for topology_descriptor, reqs in self.requirements.iteritems():
+        for topology_descriptor, reqs in self.requirements.items():
             if isinstance(topology_descriptor, Topology):
                 gprint("     -Topology {}".format(topology_descriptor.short_description()))
                 dim = topology_descriptor.domain_dim
@@ -532,7 +532,7 @@ class OperatorFieldRequirements(object):
     def update_inputs(self, input_field_requirements):
         check_instance(input_field_requirements, dict, keys=ScalarField,
                        values=(DiscreteFieldRequirements, MultiFieldRequirements, type(None)))
-        for ifield, ireqs in input_field_requirements.iteritems():
+        for ifield, ireqs in input_field_requirements.items():
             if (ireqs is not None):
                 ireqs = ireqs.copy()
             if not isinstance(ireqs, MultiFieldRequirements):
@@ -547,7 +547,7 @@ class OperatorFieldRequirements(object):
     def update_outputs(self, output_field_requirements):
         check_instance(output_field_requirements, dict, keys=ScalarField,
                        values=(DiscreteFieldRequirements, MultiFieldRequirements))
-        for ofield, oreqs in output_field_requirements.iteritems():
+        for ofield, oreqs in output_field_requirements.items():
             oreqs = oreqs.copy()
             if not isinstance(oreqs, MultiFieldRequirements):
                 _oreqs = oreqs
@@ -563,9 +563,9 @@ class OperatorFieldRequirements(object):
         """
         Iterates over (field, topology_descriptor, field_requirement) for all input requirements.
         """
-        for (field, freqs) in self.input_field_requirements.iteritems():
+        for (field, freqs) in self.input_field_requirements.items():
             freqs = freqs.requirements
-            for (td, reqs) in freqs.iteritems():
+            for (td, reqs) in freqs.items():
                 for req in reqs:
                     yield field, td, req
 
@@ -573,9 +573,9 @@ class OperatorFieldRequirements(object):
         """
         Iterates over (field, topology_descriptor, field_requirement) for all output requirements.
         """
-        for (field, freqs) in self.output_field_requirements.iteritems():
+        for (field, freqs) in self.output_field_requirements.items():
             freqs = freqs.requirements
-            for (td, reqs) in freqs.iteritems():
+            for (td, reqs) in freqs.items():
                 for req in reqs:
                     yield (field, td, req)
 
diff --git a/hysop/numerics/odesolvers/runge_kutta.py b/hysop/numerics/odesolvers/runge_kutta.py
index 05502b73e0ebd3c74528637fed1f303b0877961d..52c3d3fcbb9159a6916fc3b5d43e010143a01177 100644
--- a/hysop/numerics/odesolvers/runge_kutta.py
+++ b/hysop/numerics/odesolvers/runge_kutta.py
@@ -56,12 +56,12 @@ class ExplicitRungeKutta(RungeKutta):
         """Buffers = dict of (nb_stages+1) np.ndarray of size compatible with Xin"""
         check_instance(Xin, dict, keys=str, values=np.ndarray)
         varnames = Xin.keys()
-        views   = first_not_None(views,   { k:Ellipsis for (k,v) in Xin.iteritems() })
+        views   = first_not_None(views,   { k:Ellipsis for (k,v) in Xin.items() })
         Xout    = first_not_None(Xout,    { k:np.empty_like(v[views[k]])
-                                                for (k,v) in Xin.iteritems() })
+                                                for (k,v) in Xin.items() })
         buffers = first_not_None(buffers, { k:tuple(np.empty_like(v)
                                                 for i in range(self.stages+1))
-                                                for (k,v) in Xin.iteritems()})
+                                                for (k,v) in Xin.items()})
         check_instance(views,   dict, keys=str, values=(type(Ellipsis),slice,tuple))
         check_instance(Xout,    dict, keys=str, values=np.ndarray)
         check_instance(buffers, dict, keys=str, values=tuple)
@@ -85,8 +85,8 @@ class ExplicitRungeKutta(RungeKutta):
                     assert is_fp(buf.dtype), buf.dtype
                     assert np.all(buf.shape == ivar.shape)
 
-        Xtmp = {k: v[0] for (k,v) in buffers.iteritems()}
-        K = tuple( {k: v[i] for (k,v) in buffers.iteritems()}
+        Xtmp = {k: v[0] for (k,v) in buffers.items()}
+        K = tuple( {k: v[i] for (k,v) in buffers.items()}
                             for i in range(1, self.stages+1) )
         for i in range(self.stages):
             ai = self.alpha[i]
diff --git a/hysop/numerics/stencil/stencil_generator.py b/hysop/numerics/stencil/stencil_generator.py
index ed931ea8056611d9fcea4ea9f53da3d39bfb5aab..220bda6b1fe4f6d1ec4ee9a2d4252e62bc4a0ff7 100644
--- a/hysop/numerics/stencil/stencil_generator.py
+++ b/hysop/numerics/stencil/stencil_generator.py
@@ -502,7 +502,7 @@ class StencilGenerator(object):
                     order+=1
                 i+=1
             eqs = []
-            for k,eq in factor_split(err,df_vars).iteritems():
+            for k,eq in factor_split(err,df_vars).items():
                 if isinstance(eq,int):
                     continue
                 eq = sm.simplify(eq.xreplace(sol))
diff --git a/hysop/numerics/tests/bench_fft.py b/hysop/numerics/tests/bench_fft.py
index 7fd2d1a815a4bc140ccaca84ed4434d49d3e0665..ce252728f6aabb4baacaee4732d2ad4dadb546c5 100644
--- a/hysop/numerics/tests/bench_fft.py
+++ b/hysop/numerics/tests/bench_fft.py
@@ -86,8 +86,8 @@ class BenchFFT(object):
         results = {}
         for (descr, fn, Bi, Bo, it_kwds) in bench:
             print(descr)
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     results[name] = ()
                     if dtype not in impl.supported_ftypes:
                         continue
diff --git a/hysop/numerics/tests/test_fft.py b/hysop/numerics/tests/test_fft.py
index 150d31015d24459c64f8ca628986b573e070ee5f..9e50d506c1da6ffff1f7e5a9bc495ab320ec2ae1 100644
--- a/hysop/numerics/tests/test_fft.py
+++ b/hysop/numerics/tests/test_fft.py
@@ -115,8 +115,8 @@ class TestFFT(object):
             print('   FFT shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'),)
             Href = np.random.rand(2*N).astype(dtype).view(dtype=ctype).reshape(shape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     try:
@@ -149,8 +149,8 @@ class TestFFT(object):
             print('   IFFT shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'),)
             Href = np.random.rand(2*N).astype(dtype).view(dtype=ctype).reshape(shape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     try:
@@ -183,8 +183,8 @@ class TestFFT(object):
             print('   RFFT shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'), end=' ')
             Href = np.random.rand(*shape).astype(dtype).reshape(shape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     try:
@@ -217,8 +217,8 @@ class TestFFT(object):
             print('   IRFFT shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'), end=' ')
             Href = np.random.rand(2*Nc).astype(dtype).view(dtype=ctype).reshape(cshape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     try:
@@ -251,8 +251,8 @@ class TestFFT(object):
             print('   IRFFT shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'), end=' ')
             Href = np.random.rand(2*Nc).astype(dtype).view(dtype=ctype).reshape(cshape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     try:
@@ -290,8 +290,8 @@ class TestFFT(object):
                     shape = mk_shape(shape, -1, shape[-1] + 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         if dtype not in impl.supported_ftypes:
                             continue
                         if itype not in impl.supported_cosine_transforms:
@@ -332,8 +332,8 @@ class TestFFT(object):
                     shape = mk_shape(shape, -1, shape[-1] + 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         if dtype not in impl.supported_ftypes:
                             continue
                         if iitype not in impl.supported_cosine_transforms:
@@ -374,8 +374,8 @@ class TestFFT(object):
                     shape = mk_shape(shape, -1, shape[-1] - 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         if dtype not in impl.supported_ftypes:
                             continue
                         if itype not in impl.supported_sine_transforms:
@@ -416,8 +416,8 @@ class TestFFT(object):
                     shape = mk_shape(shape, -1, shape[-1] - 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         if dtype not in impl.supported_ftypes:
                             continue
                         if iitype not in impl.supported_sine_transforms:
@@ -459,7 +459,7 @@ class TestFFT(object):
         def check_distances(distances):
             failed = False
             ss=()
-            for (name, Einf) in distances.iteritems():
+            for (name, Einf) in distances.items():
                 if isinstance(Einf, HysopFFTDataLayoutError):
                     s='{}=UNSUPPORTED_STRIDES'.format(name)
                 elif np.isfinite(Einf):
@@ -484,8 +484,8 @@ class TestFFT(object):
             print('   X - IFFT(FFT(X)) shape={:12s} ghosts={:12s}'.format(shape, str(ghosts)+':'), end=' ')
             Href = np.random.rand(2*N).astype(dtype).view(dtype=ctype).reshape(shape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     D0 = mk_buffer(backend=impl.backend, shape=shape, dtype=ctype)
@@ -514,8 +514,8 @@ class TestFFT(object):
             print('   X - IRFFT(RFFT(X)) shape={:12s} ghosts={:12s} '.format(shape, str(ghosts)+':'), end=' ')
             Href = np.random.rand(*shape).astype(dtype).reshape(shape)
             results = {}
-            for (kind, implementations) in self.implementations.iteritems():
-                for (name, impl) in implementations.iteritems():
+            for (kind, implementations) in self.implementations.items():
+                for (name, impl) in implementations.items():
                     if dtype not in impl.supported_ftypes:
                         continue
                     D0 = mk_buffer(backend=impl.backend, shape=shape,  dtype=dtype)
@@ -552,8 +552,8 @@ class TestFFT(object):
                     shape = mk_shape(shape, -1, shape[-1] + 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         iitype = [1,3,2,4][itype-1]
                         if dtype not in impl.supported_ftypes:
                             continue
@@ -592,8 +592,8 @@ class TestFFT(object):
                     shape = mk_shape(shape, -1, shape[-1] - 1)
                 Href = np.random.rand(*shape).astype(dtype).reshape(shape)
                 results = {}
-                for (kind, implementations) in self.implementations.iteritems():
-                    for (name, impl) in implementations.iteritems():
+                for (kind, implementations) in self.implementations.items():
+                    for (name, impl) in implementations.items():
                         iitype = [1,3,2,4][itype-1]
                         if dtype not in impl.supported_ftypes:
                             continue
@@ -664,11 +664,11 @@ class TestFFT(object):
         assert self.report_eps <= self.fail_eps
         failed = False
         cnt = 0
-        for (dtype, fails) in failures.iteritems():
+        for (dtype, fails) in failures.items():
             if not fails:
                 continue
             print('::{} precision tests'.format(dtype))
-            for (tag, tfails) in fails.iteritems():
+            for (tag, tfails) in fails.items():
                 print('  |{} transform errors:'.format(tag))
                 for (r0,r1, shape, Einf, Eeps) in sorted(tfails,
                             key=lambda x: -x[-2]):
diff --git a/hysop/operator/base/custom_symbolic_operator.py b/hysop/operator/base/custom_symbolic_operator.py
index 01e5a96692a2277a41370616c2c446d8670e7ee0..408432613dc84f0854d736948b0dd372b430cd02 100644
--- a/hysop/operator/base/custom_symbolic_operator.py
+++ b/hysop/operator/base/custom_symbolic_operator.py
@@ -75,7 +75,7 @@ class ExprDiscretizationInfo(object):
 
     def copy(self):
         edi = ExprDiscretizationInfo()
-        for (obj, counts) in self.read_counter.iteritems():
+        for (obj, counts) in self.read_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counter = edi.read_counter.setdefault(obj,
                         npw.int_zeros(shape=(obj.nb_components,)))
@@ -85,7 +85,7 @@ class ExprDiscretizationInfo(object):
             else:
                 msg='Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        for (obj, counts) in self.write_counter.iteritems():
+        for (obj, counts) in self.write_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counter = edi.write_counter.setdefault(obj,
                         npw.int_zeros(shape=(obj.nb_components,)))
@@ -100,7 +100,7 @@ class ExprDiscretizationInfo(object):
 
     def update(self, other):
         check_instance(other, ExprDiscretizationInfo)
-        for (obj, counts) in other.read_counter.iteritems():
+        for (obj, counts) in other.read_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counter = self.read_counter.setdefault(obj,
                         npw.int_zeros(shape=(obj.nb_components,)))
@@ -111,7 +111,7 @@ class ExprDiscretizationInfo(object):
             else:
                 msg='Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        for (obj, counts) in other.write_counter.iteritems():
+        for (obj, counts) in other.write_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counter = self.write_counter.setdefault(obj,
                         npw.int_zeros(shape=(obj.nb_components,)))
@@ -131,7 +131,7 @@ class ExprDiscretizationInfo(object):
 
     def __iadd__(self, rhs):
         check_instance(rhs, int)
-        for (obj, counts) in self.read_counter.iteritems():
+        for (obj, counts) in self.read_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counts[counts>0] += rhs
             elif isinstance(obj, self.SimpleCounterTypes):
@@ -139,7 +139,7 @@ class ExprDiscretizationInfo(object):
             else:
                 msg='Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        for (obj, counts) in self.write_counter.iteritems():
+        for (obj, counts) in self.write_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counts[counts>0] += rhs
             elif isinstance(obj, self.SimpleCounterTypes):
@@ -151,7 +151,7 @@ class ExprDiscretizationInfo(object):
 
     def __imul__(self, rhs):
         check_instance(rhs, int)
-        for (obj, counts) in self.read_counter.iteritems():
+        for (obj, counts) in self.read_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counts[...] = rhs*counts
             elif isinstance(obj, self.SimpleCounterTypes):
@@ -159,7 +159,7 @@ class ExprDiscretizationInfo(object):
             else:
                 msg='Unsupported type {}.'.format(type(obj))
                 raise TypeError(msg)
-        for (obj, counts) in self.write_counter.iteritems():
+        for (obj, counts) in self.write_counter.items():
             if isinstance(obj, self.IndexedCounterTypes):
                 counts[...] = rhs*counts
             elif isinstance(obj, self.SimpleCounterTypes):
@@ -258,12 +258,12 @@ class SymbolicExpressionInfo(object):
         return self._dim
     def _get_fields(self):
         """Return input and output fields."""
-        fields = {k: v for (k,v) in self.input_fields.iteritems()}
+        fields = {k: v for (k,v) in self.input_fields.items()}
         fields.update(self.output_fields)
         return fields
     def _get_params(self):
         """Return input and output fields."""
-        fields = {k: v for (k,v) in self.input_params.iteritems()}
+        fields = {k: v for (k,v) in self.input_params.items()}
         fields.update(self.output_params)
         return fields
 
@@ -331,11 +331,11 @@ class SymbolicExpressionInfo(object):
         check_instance(output_dfields, dict, keys=Field, values=DiscreteScalarFieldView)
         assert len(set(self.input_fields.keys())  - set(input_dfields.keys())) == 0
         assert len(set(self.output_fields.keys()) - set(output_dfields.keys())) == 0
-        self.input_dfields  = {k:v for (k,v) in input_dfields.iteritems()
+        self.input_dfields  = {k:v for (k,v) in input_dfields.items()
                                 if (k in self.input_fields)}
-        self.output_dfields = {k:v for (k,v) in output_dfields.iteritems()
+        self.output_dfields = {k:v for (k,v) in output_dfields.items()
                                 if (k in self.output_fields)}
-        self.inout_dfields = {k:v for (k,v) in self.output_dfields.iteritems()
+        self.inout_dfields = {k:v for (k,v) in self.output_dfields.items()
                 if ((k in self.input_dfields) and (self.input_dfields[k].dfield is v.dfield))}
         self.stencils = SortedDict()
 
@@ -463,16 +463,16 @@ class SymbolicExpressionInfo(object):
     ', '.join('{}'.format(p) for p in self.input_params.keys())  if self.input_params  else 'none',
     ', '.join('{}'.format(p) for p in self.output_params.keys()) if self.output_params else 'none',
     '\n'+'\n'.join('    {}: {}'.format(f.name, (d.short_description() if isinstance(d, TopologyView)
-                else d)) for (f,d) in self.fields.iteritems()))
+                else d)) for (f,d) in self.fields.items()))
         if self.min_ghosts:
             msg += '''
   min_ghosts_per_components:{}
   min_ghosts:{}
 '''.format(
     '\n'+'\n'.join('    {}/ [{}]'.format(f.name,', '.join(str(x) for x in gpc))
-                                    for f,gpc in self.min_ghosts_per_components.iteritems()),
+                                    for f,gpc in self.min_ghosts_per_components.items()),
     '\n'+'\n'.join('    {}/ [{}]'.format(f.name,', '.join(str(x) for x in g))
-                                                    for f,g in self.min_ghosts.iteritems()))
+                                                    for f,g in self.min_ghosts.items()))
         if self.is_discretized:
             msg += '''
 
@@ -849,7 +849,7 @@ class SymbolicExpressionParser(object):
                 raise TypeError(msg)
 
             min_ghosts_expr_i = SortedDict()
-            for (obj, reqs) in obj_reqs.iteritems():
+            for (obj, reqs) in obj_reqs.items():
                 if isinstance(obj, tuple) and isinstance(obj[0],Field):
                     if (obj in field_requirements):
                         field_requirements[obj].update_requirements(reqs)
@@ -869,8 +869,8 @@ class SymbolicExpressionParser(object):
                 min_ghosts_expr_i[k] = v
             min_ghosts_per_expr[i] = min_ghosts_expr_i
 
-        lhs_fields  = {v[2]:k for (k,v) in updated_fields.iteritems()}
-        lhs_arrays  = {v[1]:k for (k,v) in updated_arrays.iteritems()}
+        lhs_fields  = {v[2]:k for (k,v) in updated_fields.items()}
+        lhs_arrays  = {v[1]:k for (k,v) in updated_arrays.items()}
         lhs_objects = lhs_fields.copy()
         lhs_objects.update(lhs_arrays)
 
@@ -914,9 +914,9 @@ class SymbolicExpressionParser(object):
         info.nobjects = nobjects
 
         expr_ghost_map = npw.int_zeros(shape=(nlhsobjects, nobjects))
-        for (fi_name, i) in lhs_objects.iteritems():
+        for (fi_name, i) in lhs_objects.items():
             min_ghosts = min_ghosts_per_expr[i]
-            for (fj_name, min_ghost) in min_ghosts.iteritems():
+            for (fj_name, min_ghost) in min_ghosts.items():
                 assert (fj_name in all_objects), fj_name
                 j = all_objects[fj_name]
                 expr_ghost_map[i,j] = min_ghost
@@ -938,9 +938,9 @@ class SymbolicExpressionParser(object):
         else:
             min_ghosts = npw.int_zeros(shape=(nobjects,))
 
-        lhs_objects = { v:k for (k,v) in lhs_objects.iteritems() }
+        lhs_objects = { v:k for (k,v) in lhs_objects.items() }
         lhs_objects = tuple(lhs_objects[i] for i in range(nlhsobjects))
-        all_objects = { v:k for (k,v) in all_objects.iteritems() }
+        all_objects = { v:k for (k,v) in all_objects.items() }
         all_objects = tuple(all_objects[i] for i in range(nobjects))
 
         info.expr_ghost_map = expr_ghost_map
@@ -1016,7 +1016,7 @@ class SymbolicExpressionParser(object):
             min_ghosts = max(stencil.L, stencil.R)
 
             obj_reqs = cls._extract_obj_requirements(info, dexpr)
-            for (obj, req) in obj_reqs.iteritems():
+            for (obj, req) in obj_reqs.items():
                 req.min_ghosts[-1-direction] += min_ghosts
             return obj_reqs
         elif isinstance(expr, Assignment):
@@ -1035,7 +1035,7 @@ class SymbolicExpressionParser(object):
             obj_requirements = SortedDict()
             for e in expr.args:
                 obj_reqs = cls._extract_obj_requirements(info, e)
-                for (obj, reqs) in obj_reqs.iteritems():
+                for (obj, reqs) in obj_reqs.items():
                     if obj in obj_requirements:
                         obj_requirements[obj].update_requirements(reqs)
                     else:
@@ -1358,7 +1358,7 @@ class CustomSymbolicOperatorBase(DirectionalOperatorBase):
         dt_coeff = first_not_None(dt_coeff, 1.0)
 
         # Expand tensor fields to scalar fields
-        scalar_variables = {sfield: topod for (tfield,topod) in variables.iteritems()
+        scalar_variables = {sfield: topod for (tfield,topod) in variables.items()
                                           for  sfield in tfield.fields }
 
         expr_info = SymbolicExpressionParser.parse(name, scalar_variables, *exprs, dt=dt, dt_coeff=dt_coeff,
@@ -1467,9 +1467,9 @@ class CustomSymbolicOperatorBase(DirectionalOperatorBase):
                     min_ghosts_per_components[field] = min_ghosts
 
         expr_info.min_ghosts = {k:npw.max(v, axis=0) for (k,v) in \
-                                        min_ghosts_per_components.iteritems()}
+                                        min_ghosts_per_components.items()}
         expr_info.min_ghosts_per_components = {field:gpc[:,-1-direction]
-                                for (field,gpc) in min_ghosts_per_components.iteritems()}
+                                for (field,gpc) in min_ghosts_per_components.items()}
 
         for (array, reqs) in array_reqs:
             expr_info.min_ghosts[array] = reqs.min_ghosts.copy()
diff --git a/hysop/operator/base/external_force.py b/hysop/operator/base/external_force.py
index ee94138d958d956c469c93053ecbd371069652b0..7cfb017985d830c467651d9b25a5a3bb7213a419 100644
--- a/hysop/operator/base/external_force.py
+++ b/hysop/operator/base/external_force.py
@@ -31,7 +31,7 @@ class ExternalForce(NamedObjectI):
             msg=msg.format(dim, f.dim, f.short_description())
             assert f.dim == dim, msg
 
-    
+
     @property
     def Fext(self):
         return self._Fext
@@ -79,7 +79,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
     """
     Compute the curl of a symbolic expression and perfom Euler time integration.
     """
-    
+
     @debug
     def __init__(self, vorticity, Fext, dt, variables,
                     Fmin=None, Fmax=None, Finf=None,
@@ -97,7 +97,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
             Fmax = max(force)
             Finf = max(abs(Fmin), abs(Fmax))
             W   += dt*force
-        
+
         where Fext is computed from user given ExternalForce.
 
         Parameters
@@ -131,7 +131,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
         check_instance(Fmax, (ScalarParameter,TensorParameter), allow_none=True)
         check_instance(Finf, (ScalarParameter,TensorParameter), allow_none=True)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
-        
+
         if (Fmin is not None):
             Fmin.value = npw.asarray((1e8,)*vorticity.nb_components, dtype=Fmin.dtype)
         if (Fmax is not None):
@@ -167,7 +167,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
         else:
             msg='Unsupported dimension {}.'.format(dim)
             raise RuntimeError(msg)
-            
+
         msg='TensorParameter shape mismatch, expected {} but got {{}} for parameter {{}}.'
         msg=msg.format(pshape)
         if isinstance(Fmin, TensorParameter):
@@ -201,7 +201,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
         output_fields = {f: self.get_topo_descriptor(variables, f) for f in output_fields}
         input_params  = {p.name: p for p in input_params}
         output_params = {p.name: p for p in output_params}
-        
+
         # TODO share tmp buffers for the whole tensor
         force = vorticity.tmp_like(name='Fext', ghosts=0, mem_tag='tmp_fext')
         for (Fi, Wi) in zip(force.fields, vorticity.fields):
@@ -209,7 +209,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
             output_fields[Fi] = self.get_topo_descriptor(variables, Wi)
 
         super(SpectralExternalForceOperatorBase, self).__init__(
-                input_fields=input_fields, output_fields=output_fields, 
+                input_fields=input_fields, output_fields=output_fields,
                 input_params=input_params, output_params=output_params,
                 **kwds)
 
@@ -221,13 +221,13 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
         self.dim = dim
         self.w_components = w_components
         self.f_components = f_components
-        
+
         self.Fmin = Fmin
         self.Fmax = Fmax
         self.Finf = Finf
         self.compute_statistics  = compute_statistics
-        
-################### 
+
+###################
 # from now on, we delegate everything to the ExternalForce implementation
 ###################
     def initialize(self, **kwds):
@@ -235,7 +235,7 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
             return
         self.Fext.initialize(self)
         return super(SpectralExternalForceOperatorBase, self).initialize(**kwds)
-        
+
     @debug
     def discretize(self):
         if self.discretized:
@@ -247,15 +247,15 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
 
     def get_work_properties(self):
         requests = super(SpectralExternalForceOperatorBase, self).get_work_properties()
-        for (name, request) in self.Fext.get_mem_requests(self).iteritems():
+        for (name, request) in self.Fext.get_mem_requests(self).items():
             requests.push_mem_request(name, request)
         return requests
-    
+
     def setup(self, work):
         self.Fext.pre_setup(self, work)
         super(SpectralExternalForceOperatorBase, self).setup(work)
         self.Fext.post_setup(self, work)
-    
+
     def apply(self, **kwds):
         self.Fext.apply(self, **kwds)
 
diff --git a/hysop/operator/base/min_max.py b/hysop/operator/base/min_max.py
index 2ec39a464b69f1803725ca5a87b23e10c55884f3..9aadc64bca4bc579c49d4381188a8d14b1788341 100644
--- a/hysop/operator/base/min_max.py
+++ b/hysop/operator/base/min_max.py
@@ -66,7 +66,7 @@ class MinMaxFieldStatisticsBase(object):
 
         parameters = {}
         _parameters = dict(Fmin=Fmin, Fmax=Fmax, Finf=Finf)
-        for (k,v) in _parameters.iteritems():
+        for (k,v) in _parameters.items():
             param = _parameters[k]
             if isinstance(param, TensorParameter):
                 pass
@@ -193,7 +193,7 @@ class MinMaxFieldStatisticsBase(object):
                     input_fields=input_fields, output_params=output_params, **kwds)
             self.has_derivative = False
 
-        for (pname, param) in parameters.iteritems():
+        for (pname, param) in parameters.items():
             setattr(self, pname, param)
             coeffs.setdefault(pname, 1)
 
@@ -226,7 +226,7 @@ class MinMaxFieldStatisticsBase(object):
         else:
             comm = self.mpi_params.comm
             Fmin, Fmax = self.Fmin, self.Fmax
-            if (self.Fmax is not None): 
+            if (self.Fmax is not None):
                 sendbuff = npw.zeros_like(Fmax.value)
                 recvbuff = npw.zeros_like(Fmax.value)
                 def _collect_max(val, sendbuff=sendbuff, recvbuff=recvbuff):
@@ -235,7 +235,7 @@ class MinMaxFieldStatisticsBase(object):
                     return recvbuff.copy()
             else:
                 _collect_max = None
-            if (self.Fmin is not None): 
+            if (self.Fmin is not None):
                 sendbuff = npw.zeros_like(Fmin.value)
                 recvbuff = npw.zeros_like(Fmin.value)
                 def _collect_min(val, sendbuff=sendbuff, recvbuff=recvbuff):
diff --git a/hysop/operator/base/poisson_curl.py b/hysop/operator/base/poisson_curl.py
index 0f95a700bb64db93d708bf72be2e7c39d476c9d2..68f0ad50e18f5691d0a5351e8217aa08aa5f927c 100644
--- a/hysop/operator/base/poisson_curl.py
+++ b/hysop/operator/base/poisson_curl.py
@@ -20,18 +20,18 @@ class PoissonCurlOperatorBase(object):
     """
     Solves the poisson-rotational equation using a specific implementation.
     """
-    
+
     @debug
-    def __init__(self, vorticity, velocity, variables, 
-            diffusion=None, dt=None, projection=None, 
-            dump_energy=None, dump_velocity_energy=None, 
+    def __init__(self, vorticity, velocity, variables,
+            diffusion=None, dt=None, projection=None,
+            dump_energy=None, dump_velocity_energy=None,
             dump_input_vorticity_energy=None, dump_output_vorticity_energy=None,
-            plot_energy=None, plot_velocity_energy=None, 
+            plot_energy=None, plot_velocity_energy=None,
             plot_input_vorticity_energy=None, plot_output_vorticity_energy=None, plot_inout_vorticity_energy=None,
-            **kwds): 
+            **kwds):
         """
         PoissonCurl operator to solve incompressible flows using various fft backends.
-        
+
         Parameters
         ----------
         velocity : :class:`~hysop.fields.continuous_field.Field
@@ -48,7 +48,7 @@ class PoissonCurlOperatorBase(object):
             If diffusion is not enabled, this parameter is ignored.
         projection: hysop.constants.FieldProjection or positive integer, optional
             Project vorticity such that resolved velocity is divergence free (for 3D fields).
-            When active, projection is done prior to every solve, unless projection is 
+            When active, projection is done prior to every solve, unless projection is
             an integer in which case it is done every given steps.
             This parameter is ignored for 2D fields and defaults to no projection.
         dump_energy: IOParams, optional, defaults to None
@@ -69,16 +69,16 @@ class PoissonCurlOperatorBase(object):
             Plot output vorticity field energy and save the plot to a custom file. Defaults to no plot.
         plot_inout_vorticity_energy: IOParams, optional, defaults to None
             Plot vorticity field energy before and after diffusion and projection on the same graph.
-        kwds : 
+        kwds :
             Base class parameters.
 
         Notes:
         ------
         All dump energy arguments also enables scalar energy dumping.
         This is not true for energy plotting.
-        Passing an integer instead of a IOParams will disable dump and plot arguments. 
+        Passing an integer instead of a IOParams will disable dump and plot arguments.
         """
-        
+
         check_instance(velocity,  Field)
         check_instance(vorticity, Field)
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
@@ -106,25 +106,25 @@ class PoissonCurlOperatorBase(object):
                 raise RuntimeError(msg)
         else:
             dt = None
-        
+
         # check for projection
         if (projection == FieldProjection.NONE) or (projection is None) \
                 or (projection==0) or (velocity.dim!=3):
             projection = FieldProjection.NONE
         should_project = (projection is not FieldProjection.NONE)
-        
+
         if (projection == FieldProjection.NONE):
             do_project = lambda simu: False
         elif (projection == FieldProjection.EVERY_STEP):
             do_project = lambda simu: True
-        else: # projection is an integer representing frequency 
+        else: # projection is an integer representing frequency
             freq = projection
             if (freq>=1):
                 do_project = lambda simu: ((simu.current_iteration % freq)==0)
             else:
                 msg='Got negative projection frequency {}.'.format(freq)
                 raise ValueError(msg)
-        
+
         # check fields
         dim=velocity.dim
         wcomp = vorticity.nb_components
@@ -138,7 +138,7 @@ class PoissonCurlOperatorBase(object):
             raise RuntimeError('Velocity projection only available in 3D.')
         if (velocity.dtype != vorticity.dtype):
             raise RuntimeError('Datatype mismatch between velocity and vorticity.')
-        
+
         # input and output fields
         vtopology = variables[velocity]
         wtopology = variables[vorticity]
@@ -151,7 +151,7 @@ class PoissonCurlOperatorBase(object):
             input_params[dt.name] = dt
         if (should_diffuse or should_project):
             output_fields[vorticity] = wtopology
-       
+
         dump_Uout_E   = first_not_None(dump_velocity_energy,         dump_energy)
         dump_Win_E    = first_not_None(dump_input_vorticity_energy,  dump_energy)
         dump_Wout_E   = first_not_None(dump_output_vorticity_energy, dump_energy)
@@ -183,14 +183,14 @@ class PoissonCurlOperatorBase(object):
         compute_Uout_E_param = EnergyDumper.build_energy_parameter(do_compute=do_compute_Uout_E, field=velocity,  output_params=output_params, prefix='out')
         compute_Wout_E_param = EnergyDumper.build_energy_parameter(do_compute=do_compute_Wout_E, field=vorticity, output_params=output_params, prefix='out')
 
-        super(PoissonCurlOperatorBase, self).__init__(input_fields=input_fields, 
-                output_fields=output_fields, input_params=input_params, 
+        super(PoissonCurlOperatorBase, self).__init__(input_fields=input_fields,
+                output_fields=output_fields, input_params=input_params,
                 output_params=output_params, **kwds)
 
         self.W = vorticity
         self.U = velocity
         self.dim = dim
-        
+
         self.should_diffuse = should_diffuse
         self.nu = diffusion
         self.dt = dt
@@ -207,7 +207,7 @@ class PoissonCurlOperatorBase(object):
         self.compute_energy_parameters  = {'Uout': compute_Uout_E_param, 'Win': compute_Win_E_param, 'Wout': compute_Wout_E_param }
         self.compute_energy_frequencies = {'Uout': compute_Uout_E_freqs, 'Win': compute_Win_E_freqs, 'Wout': compute_Wout_E_freqs }
 
-    
+
     @debug
     def discretize(self):
         if self.discretized:
@@ -215,12 +215,12 @@ class PoissonCurlOperatorBase(object):
         super(PoissonCurlOperatorBase, self).discretize()
         self.dW = self.get_input_discrete_field(self.W)
         self.dU = self.get_output_discrete_field(self.U)
-        
+
 
 class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorBase):
     @debug
-    def __init__(self, vorticity, velocity, variables, 
-            diffusion=None, dt=None, projection=None, **kwds): 
+    def __init__(self, vorticity, velocity, variables,
+            diffusion=None, dt=None, projection=None, **kwds):
 
         super(SpectralPoissonCurlOperatorBase, self).__init__(
                 vorticity=vorticity, velocity=velocity, variables=variables,
@@ -234,8 +234,8 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
 
         # build spectral transforms
         tg = self.new_transform_group()
-        W_forward_transforms  = to_tuple(tg.require_forward_transform(vorticity, 
-            compute_energy_frequencies=ce_freqs['Win'], 
+        W_forward_transforms  = to_tuple(tg.require_forward_transform(vorticity,
+            compute_energy_frequencies=ce_freqs['Win'],
             dump_energy=de_iops['Win']))
         U_backward_transforms = to_tuple(tg.require_backward_transform(velocity,
                                                             custom_input_buffer='B0',
@@ -247,7 +247,7 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
                 dump_energy=de_iops['Wout']))
         else:
             W_backward_transforms = (None,)*dim
-        
+
         W_Fts = npw.asarray([x.s for x in W_forward_transforms])
         U_Bts = npw.asarray([x.s for x in U_backward_transforms])
         W_Bts = npw.asarray([None if (x is None) else x.s for x in W_backward_transforms])
@@ -266,7 +266,7 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
             kd2 = sorted(tg.push_expressions(*to_tuple(expr2)), key=lambda k: k.axis)
             kd2s += (kd2,)
 
-        # curl 
+        # curl
         if (dim==2):
             W2,    = W_forward_transforms
             U0, U1 = U_backward_transforms
@@ -287,14 +287,14 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
             Uin  = (W2, W1, W0, W2, W1, W0)
             Uout = (U0, U0, U1, U1, U2, U2)
             Uk   = tuple(tg.push_expressions(e)[0] for e in exprs)
-        else: 
+        else:
             raise NotImplementedError
-        
+
         self.tg = tg
         self.W_forward_transforms  = W_forward_transforms
         self.U_backward_transforms = U_backward_transforms
         self.W_backward_transforms = W_backward_transforms
-        
+
         self.W_Fts = W_Fts
         self.U_Bts = U_Bts
         self.W_Bts = W_Bts
@@ -305,13 +305,13 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
         self.Uin  = Uin
         self.Uout = Uout
         self.Uk   = Uk
-    
+
     @debug
     def discretize(self):
         if self.discretized:
             return
         super(SpectralPoissonCurlOperatorBase, self).discretize()
-            
+
         fig_titles = {'Win':    u'Energy of input vorticity {}',
                       'Uout':   u'Energy of output velocity {}',
                       'Wout':   u'Energy of output vorticity {}' }
@@ -321,9 +321,9 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
         get_dfield = { 'Win':  self.dW,
                        'Wout': self.dW,
                        'Uout': self.dU }
-        
+
         compute_fns = ()
-        for (k, v) in self.do_compute_energy.iteritems():
+        for (k, v) in self.do_compute_energy.items():
             if not v:
                 assert not self.do_dump_energy[k]
                 assert not self.do_plot_energy[k]
@@ -332,15 +332,15 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
             for tr in get_transforms[k]:
                 if tr._energy_parameter is None:
                     msg='Energy parameter of {}.{} has not been build.'
-                    raise RuntimeError(msg.format(tr.field.name, 
+                    raise RuntimeError(msg.format(tr.field.name,
                         'forward' if tr.is_forward else 'backward'))
             E_params  = tuple(tr._energy_parameter for tr in get_transforms[k])
             E_buffers = tuple(p.value for p in E_params)
             E_size    = max(p.size for p in E_params)
-            
+
             Ep = self.compute_energy_parameters[k]
             Ep.reallocate_buffer(shape=(E_size,), dtype=self.dU.dtype)
-            
+
             if self.do_dump_energy[k]:
                 iop = self.dump_energy_ioparams[k]
                 assert (iop is not None)
@@ -353,7 +353,7 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
                 assert (iop is not None)
                 pname = u'{}.{}'.format(self.__class__.__name__, dfield.pretty_name.decode('utf-8'))
                 energy_parameters = { pname : Ep }
-                plt = EnergyPlotter(energy_parameters=energy_parameters, 
+                plt = EnergyPlotter(energy_parameters=energy_parameters,
                         io_params=iop, fname=k,
                         fig_title=fig_titles[k].format(dfield.pretty_name.decode('utf-8')))
             else:
@@ -381,9 +381,9 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
             assert (iop is not None)
             pname_in  = u'{}.input.{}'.format(self.__class__.__name__, self.dW.pretty_name.decode('utf-8'))
             pname_out = u'{}.output.{}'.format(self.__class__.__name__, self.dW.pretty_name.decode('utf-8'))
-            energy_parameters = { pname_in:  self.compute_energy_parameters['Win'], 
+            energy_parameters = { pname_in:  self.compute_energy_parameters['Win'],
                                   pname_out: self.compute_energy_parameters['Wout'] }
-            plt = EnergyPlotter(energy_parameters=energy_parameters, 
+            plt = EnergyPlotter(energy_parameters=energy_parameters,
                     io_params=iop, fname='Winout',
                     fig_title='Input and output vorticity energy (diffusion={:.2e}, project={})'.format(self.nu(), self.projection))
             def compute_fn(simulation, plt=plt):
@@ -414,16 +414,16 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
                 idx, dkd, nd_dkd = self.tg.discrete_wave_numbers[wi]
                 dkd2[idx] = dkd
             dkd2s += (tuple(dkd2),)
-        
+
         dUk = ()
         for ki in self.Uk:
             _, dki, _ = self.tg.discrete_wave_numbers[ki]
             dUk += (dki,)
-        
+
         self.dkd1s = dkd1s
         self.dkd2s = dkd2s
         self.dUk   = dUk
-    
+
     def get_work_properties(self):
         requests = super(SpectralPoissonCurlOperatorBase, self).get_work_properties()
         Ut = self.U_backward_transforms
@@ -440,22 +440,22 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
                 assert (Ft.output_shape == Bt.input_shape), (Ft.output_shape, Bt.input_shape)
             shape = Ft.output_shape
             dtype = Ft.output_dtype
-            request = MemoryRequest(backend=self.tg.backend, dtype=dtype, 
+            request = MemoryRequest(backend=self.tg.backend, dtype=dtype,
                                     shape=shape, nb_components=1,
                                     alignment=self.min_fft_alignment)
             requests.push_mem_request('fft_buffer_{}'.format(i), request)
         return requests
 
-    
+
     def setup(self, work):
         dim = self.dim
         Ks, KKs = self.dkd1s, self.dkd2s
 
-        W_forward_transforms  = self.W_forward_transforms 
+        W_forward_transforms  = self.W_forward_transforms
         W_backward_transforms = self.W_backward_transforms
         U_backward_transforms = self.U_backward_transforms
 
-        for (i,(W_Ft,W_Bt)) in enumerate(zip(W_forward_transforms, 
+        for (i,(W_Ft,W_Bt)) in enumerate(zip(W_forward_transforms,
                                              W_backward_transforms)):
             dtmp, = work.get_buffer(self, 'fft_buffer_{}'.format(i))
             W_Ft.configure_output_buffer(dtmp)
@@ -480,7 +480,7 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
         self.WIN  = WIN
         self.K  = K
         self.KK = KK
-                
+
         UIN, UOUT, UK = (), (), ()
         assert len(self.Uin) == len(self.Uout) == len(self.dUk)
         for i,(Uin, Uout, Uk) in enumerate(zip(self.Uin, self.Uout, self.dUk)):
@@ -489,7 +489,7 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB
             UIN  += (Uin,)
             UOUT += (Uout,)
             UK   += (Uk,)
-        
+
         self.UIN  = UIN
         self.UOUT = UOUT
         self.UK   = UK
diff --git a/hysop/operator/base/spatial_filtering.py b/hysop/operator/base/spatial_filtering.py
index ff79da9c0b4e8a31565994d5311c29b29b10d112..077bbe60aa50d0ccb223319e8c51413c8cc6f13d 100644
--- a/hysop/operator/base/spatial_filtering.py
+++ b/hysop/operator/base/spatial_filtering.py
@@ -331,7 +331,7 @@ class RemeshRestrictionFilterBase(RestrictionFilterBase):
                 nz_weights[idx] = W
         Ws = weights.sum()
         weights = weights / Ws
-        nz_weights = {k: v/Ws for (k,v) in nz_weights.iteritems()}
+        nz_weights = {k: v/Ws for (k,v) in nz_weights.items()}
 
         assert abs(weights.sum() - 1.0) < 1e-8, weights.sum()
         assert abs(npw.sum(nz_weights.values()) - 1.0) < 1e-8, npw.sum(nz_weights.values())
diff --git a/hysop/operator/base/spectral_operator.py b/hysop/operator/base/spectral_operator.py
index 6fe3ef1ba1539b28a606bc8efafda67910a8cb18..d099078ab9738e4982586f841d9156aebe08ca27 100644
--- a/hysop/operator/base/spectral_operator.py
+++ b/hysop/operator/base/spectral_operator.py
@@ -215,7 +215,7 @@ class SpectralOperatorBase(object):
     def get_mem_requests(self, **kwds):
         memory_requests = {}
         for tg in self.transform_groups.values():
-            for (k, v) in tg.get_mem_requests(**kwds).iteritems():
+            for (k, v) in tg.get_mem_requests(**kwds).items():
                 check_instance(k, str)  # temporary buffer name
                 check_instance(v, int)  # nbytes
                 K = (k, tg.backend)
@@ -227,7 +227,7 @@ class SpectralOperatorBase(object):
 
     def get_work_properties(self, **kwds):
         requests = super(SpectralOperatorBase, self).get_work_properties(**kwds)
-        for ((k, backend), v) in self.get_mem_requests(**kwds).iteritems():
+        for ((k, backend), v) in self.get_mem_requests(**kwds).items():
             check_instance(k, str)
             check_instance(v, (int, long))
             if (v > 0):
@@ -550,7 +550,7 @@ class SpectralTransformGroup(object):
         for fwd in self.forward_transforms.values():
             mem_requests = fwd.get_mem_requests(**kwds)
             check_instance(mem_requests, dict, keys=str, values=(int, long))
-            for (k, v) in mem_requests.iteritems():
+            for (k, v) in mem_requests.items():
                 if k in memory_requests:
                     memory_requests[k] = max(memory_requests[k], v)
                 else:
@@ -558,7 +558,7 @@ class SpectralTransformGroup(object):
         for bwd in self.backward_transforms.values():
             mem_requests = bwd.get_mem_requests(**kwds)
             check_instance(mem_requests, dict, keys=str, values=(int, long))
-            for (k, v) in mem_requests.iteritems():
+            for (k, v) in mem_requests.items():
                 if k in memory_requests:
                     memory_requests[k] = max(memory_requests[k], v)
                 else:
diff --git a/hysop/operator/derivative.py b/hysop/operator/derivative.py
index 75355a747e37212fccf1ff8ecf50039c96633ef5..b7c8a91ac7ede6ba52518dda069b8f8dc4d811a8 100644
--- a/hysop/operator/derivative.py
+++ b/hysop/operator/derivative.py
@@ -245,7 +245,7 @@ class MultiSpaceDerivatives(DirectionalOperatorGeneratorI, ComputationalGraphNod
              _variables += (var,)
          params['variables'] = _variables
 
-         params.update({k:fmt(v) for (k,v) in extra_params.iteritems()})
+         params.update({k:fmt(v) for (k,v) in extra_params.items()})
          self._params = params
          self._cls = cls
          self._op_kwds = op_kwds
diff --git a/hysop/operator/directional/directional.py b/hysop/operator/directional/directional.py
index 794e49fdf31bcaae29d8e060f3abeb825f59b8f0..c3b7af6853dfd11fdcb7b3070e8459a15c88395b 100644
--- a/hysop/operator/directional/directional.py
+++ b/hysop/operator/directional/directional.py
@@ -251,7 +251,7 @@ class DirectionalOperatorGenerator(DirectionalOperatorGeneratorI):
                     splitting_direction=i, dt_coeff=dt_coeff, **kargs)
         except:
             sargs = ['*{} = {}'.format(k,v.__class__)
-                        for (k,v) in kargs.iteritems()]
+                        for (k,v) in kargs.items()]
             msg  = 'FATAL ERROR during {}.generate():\n'
             msg += ' => failed to call {}.__init__()\n    with the following keywords:'
             msg += '\n     '+'\n     '.join(sargs)
diff --git a/hysop/operator/directional/symbolic_dir.py b/hysop/operator/directional/symbolic_dir.py
index 1ba95ade149399f7ba85ad30ca4f968af4b4fcc9..b1b5b905479f4336e1d2839dd8c7bf0555d71125 100644
--- a/hysop/operator/directional/symbolic_dir.py
+++ b/hysop/operator/directional/symbolic_dir.py
@@ -17,10 +17,10 @@ from hysop.symbolic.relational import Assignment
 class DirectionalSymbolic(DirectionalOperatorFrontend):
     """
     Custom symbolic expression directional splitting.
-    Available implementations are: 
+    Available implementations are:
         *OpenCL
     """
-    
+
     @classmethod
     def implementations(cls):
         from hysop.backend.device.opencl.operator.custom_symbolic import \
@@ -29,33 +29,33 @@ class DirectionalSymbolic(DirectionalOperatorFrontend):
                 Implementation.OPENCL: OpenClCustomSymbolicOperator
         }
         return __implementations
-    
+
     @classmethod
     def default_implementation(cls):
         return Implementation.OPENCL
-    
-    
+
+
     @debug
-    def __init__(self, name, exprs, variables, 
+    def __init__(self, name, exprs, variables,
                 no_split=False, fixed_residue=0, force_residue=None,
-                implementation=None, base_kwds=None, 
+                implementation=None, base_kwds=None,
                 candidate_input_tensors=None,
                 candidate_output_tensors=None,
                 **kwds):
         """
         Initialize a DirectionalSymbolic operator frontend.
-        
+
         Expressions are first splitted using contiguous memory
         accesses imposed by the discretization of field derivatives
         with finite differences.
-        
+
         If the splitting is not possible (ie. an expression contains
         multiple derivatives in different directions that cannot be
         splitted), a ValueError is raised.
 
         See hysop.operator.base.custom_symbolic.CustomSymbolicOperatorBase
         to see how expressions are parsed.
-        
+
         Parameters
         ----------
         name: str
@@ -81,12 +81,12 @@ class DirectionalSymbolic(DirectionalOperatorFrontend):
             Base class keywords arguments.
             If None, an empty dict will be passed.
         kwds:
-            Keywords arguments that will be passed towards implementation 
+            Keywords arguments that will be passed towards implementation
             operator __init__.
-        
+
         Notes
         -----
-        A valid implementation should at least support the 
+        A valid implementation should at least support the
         hysop.base.custom_symbolic.CustomSymbolicOperatorBase interface.
         """
         base_kwds = first_not_None(base_kwds, {})
@@ -100,7 +100,7 @@ class DirectionalSymbolic(DirectionalOperatorFrontend):
         if len(fixed_residue) == 1:
             fixed_residue *= len(exprs)
         check_instance(fixed_residue, tuple, size=len(exprs))
-        
+
         force_residue = to_tuple(force_residue)
         if len(force_residue) == 1:
             force_residue *= len(exprs)
@@ -108,13 +108,13 @@ class DirectionalSymbolic(DirectionalOperatorFrontend):
 
         candidate_input_tensors  = first_not_None(candidate_input_tensors, variables.keys())
         candidate_output_tensors = first_not_None(candidate_output_tensors, variables.keys())
-        
+
         super(DirectionalSymbolic, self).__init__(name=name, variables=variables,
-                base_kwds=base_kwds, implementation=implementation, 
+                base_kwds=base_kwds, implementation=implementation,
                 candidate_input_tensors=candidate_input_tensors,
                 candidate_output_tensors=candidate_output_tensors,
                 **kwds)
-        
+
         if not issubclass(self._operator, CustomSymbolicOperatorBase):
             msg='Class {} does not inherit from the directional operator interface '
             msg+='({}).'
@@ -131,7 +131,7 @@ class DirectionalSymbolic(DirectionalOperatorFrontend):
             directional_exprs = {}
             for e, fixed_res, forced_res in zip(exprs, fixed_residue, force_residue):
                 se = split_assignement(e, fixed_res, forced_res, None)
-                for (k,v) in se.iteritems():
+                for (k,v) in se.items():
                     if (v==0):
                         continue
                     if (k in directional_exprs):
diff --git a/hysop/operator/gradient.py b/hysop/operator/gradient.py
index fdec7bb93fce00e3c73f179be8459f4a21b3fb06..0aaf9da377950a6dd70b03766007f4fb8af54bac 100644
--- a/hysop/operator/gradient.py
+++ b/hysop/operator/gradient.py
@@ -321,9 +321,9 @@ class MinMaxGradientStatistics(Gradient):
         pbasename  = first_not_None(pbasename, gradF.name)
         ppbasename = first_not_None(ppbasename, gradF.pretty_name)
 
-        names = { k: v.format(pbasename) for (k,v) in _names.iteritems() }
+        names = { k: v.format(pbasename) for (k,v) in _names.items() }
         pretty_names = { k: v.format(ppbasename.decode('utf-8'))
-                            for (k,v) in _pretty_names.iteritems() }
+                            for (k,v) in _pretty_names.items() }
 
         def make_param(k, quiet):
             return TensorParameter(name=names[k], pretty_name=pretty_names[k],
@@ -331,7 +331,7 @@ class MinMaxGradientStatistics(Gradient):
 
         parameters = {}
         _parameters = dict(Fmin=Fmin, Fmax=Fmax, Finf=Finf)
-        for (k,v) in _parameters.iteritems():
+        for (k,v) in _parameters.items():
             param = _parameters[k]
             if isinstance(param, TensorParameter):
                 pass
@@ -363,7 +363,7 @@ class MinMaxGradientStatistics(Gradient):
                          'implementation': implementation }
 
         for (idx, Fi) in gradF.nd_iter():
-            for (statname, stat) in parameters.iteritems():
+            for (statname, stat) in parameters.items():
                 if (stat is None):
                     continue
                 pname  = _names[statname].format(Fi.name)
@@ -389,7 +389,7 @@ class MinMaxGradientStatistics(Gradient):
                 super(MergeTensorViewsOperator, self).apply(**kwds)
                 if not print_tensors:
                     return
-                for (k,v) in _parameters.iteritems():
+                for (k,v) in _parameters.items():
                     if (v is not None) and (v is not False):
                         param = parameters[k]
                         msg='>Parameter {} set to:\n{}'.format(param.pretty_name, param.value)
diff --git a/hysop/operator/hdf_io.py b/hysop/operator/hdf_io.py
index d80ef7cb758aa189127fc4d34a3e582e23219ccc..0334725246d1f8cb1b7d16956bed31b2b6b34639 100755
--- a/hysop/operator/hdf_io.py
+++ b/hysop/operator/hdf_io.py
@@ -148,7 +148,7 @@ class HDF_IO(ComputationalGraphOperator):
         # like a OpenCL mapped memory backend or when we do not want
         # to allocate memory for a topology that is just used for I/O.
         td_kwds = self._td_kwds
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+        for (field, topo_descriptor) in self.input_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                 backend=self._force_backend,
                 operator=self,
@@ -156,7 +156,7 @@ class HDF_IO(ComputationalGraphOperator):
                 handle=topo_descriptor, **td_kwds)
             self.input_fields[field] = topo_descriptor
 
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                 backend=self._force_backend,
                 operator=self,
@@ -209,7 +209,7 @@ class HDF_IO(ComputationalGraphOperator):
 
         local_compute_slices = {}
         global_compute_slices = {}
-        for (field, itopo) in self.input_fields.iteritems():
+        for (field, itopo) in self.input_fields.items():
             mesh = itopo.mesh
             assert (topo.domain._domain is itopo.domain._domain), 'domain mismatch'
             assert npw.array_equal(refmesh.grid_resolution, mesh.grid_resolution), 'global grid resolution mismatch'
@@ -246,7 +246,7 @@ class HDF_IO(ComputationalGraphOperator):
                     name += '_' + DirectionLabels[d] + name_postfix
                     self.dataset[name] = var_d.data[d]
 
-        for (f, name) in self.var_names.iteritems():
+        for (f, name) in self.var_names.items():
             assert f in self._local_compute_slices
             assert f in self._global_compute_slices
             self._local_compute_slices[name] = self._local_compute_slices[f]
@@ -337,7 +337,7 @@ class HDF_Writer(HDF_IO):
         requests = super(HDF_Writer, self).get_work_properties(**kwds)
 
         max_bytes = 0
-        for (name, data) in self.dataset.iteritems():
+        for (name, data) in self.dataset.items():
             if (data.backend.kind == Backend.HOST):
                 continue
             if (data.backend.kind == Backend.OPENCL):
@@ -357,7 +357,7 @@ class HDF_Writer(HDF_IO):
     def setup(self, work, **kwds):
         super(HDF_Writer, self).setup(work=work, **kwds)
         self._setup_grid_template()
-        for (name, data) in self.dataset.iteritems():
+        for (name, data) in self.dataset.items():
             data = data[self._local_compute_slices[name]]
             if (data.backend.kind is Backend.HOST):
                 def get_data(data=data.handle):
@@ -613,7 +613,7 @@ class HDF_Reader(HDF_IO):
         """
 
         vnames = ['{}[{}]'.format(var.name[:3], topo.id)
-                    for var,topo in variables.iteritems()]
+                    for var,topo in variables.items()]
         name = name or 'read_{}'.format(','.join(vnames))
         super(HDF_Reader, self).__init__(input_fields=None, output_fields=variables,
                                             name=name, **kwds)
diff --git a/hysop/operator/mean_field.py b/hysop/operator/mean_field.py
index e48595100424c2e5d92ccf3ca75b282ba8d2e560..94db193bd5817819799a217975f2a8d64003eaab 100644
--- a/hysop/operator/mean_field.py
+++ b/hysop/operator/mean_field.py
@@ -51,7 +51,7 @@ class ComputeMeanField(ComputationalGraphOperator):
         input_fields = { field: variables[field] for field in fields.keys() }
         super(ComputeMeanField, self).__init__(input_fields=input_fields, io_params=io_params, **kwds)
         self.averaged_fields = fields
-    
+
     @debug
     def get_field_requirements(self):
         # set good transposition state and no ghosts (because of OPENCL mean)
@@ -64,11 +64,11 @@ class ComputeMeanField(ComputationalGraphOperator):
             req.min_ghosts=(0,)*field.dim
             req.max_ghosts=(0,)*field.dim
         return requirements
-        
+
     def discretize(self):
         super(ComputeMeanField, self).discretize()
         averaged_dfields = {}
-        for field, (view, axes) in self.averaged_fields.iteritems():
+        for field, (view, axes) in self.averaged_fields.items():
             dfield = self.input_discrete_fields[field]
 
             axes = to_tuple(first_not_None(axes, range(dfield.dim)))
@@ -99,7 +99,7 @@ class ComputeMeanField(ComputationalGraphOperator):
             check_instance(view, tuple, values=(type(Ellipsis), slice), size=dfield.dim-len(axes))
             averaged_dfields[dfield] = (view, axes)
         self.averaged_dfields = averaged_dfields
-    
+
     def setup(self, work=None):
         super(ComputeMeanField, self).setup(work=work)
         path = self.io_params.filename
@@ -107,16 +107,16 @@ class ComputeMeanField(ComputationalGraphOperator):
             os.makedirs(path)
         self.path = path
         self.write_counter = 0
-    
+
     def filename(self, field, i):
         return '{}/{}_{}'.format(self.path, field.name, i)
-    
+
     @op_apply
     def apply(self, simulation, **kwds):
         if (simulation is None):
             raise ValueError("Missing simulation value for monitoring.")
         if self.io_params.should_dump(simulation=simulation):
-            for (dfield, (view, axes)) in self.averaged_dfields.iteritems():
+            for (dfield, (view, axes)) in self.averaged_dfields.items():
                 filename = self.filename(dfield, self.write_counter)
                 arrays = {}
                 for (i,data) in enumerate(dfield.data):
@@ -132,6 +132,6 @@ class ComputeMeanField(ComputationalGraphOperator):
     @classmethod
     def supported_backends(cls):
         return Backend.all
-    
+
 
 
diff --git a/hysop/operator/misc.py b/hysop/operator/misc.py
index 3f715b906a0cf289efa2c5e1c91ef1686cccd54e..a7ccd4c63f25b964c8d9cae69737cdb33e4553ff 100644
--- a/hysop/operator/misc.py
+++ b/hysop/operator/misc.py
@@ -8,15 +8,15 @@ from hysop.fields.continuous_field import Field
 
 class Noop(ComputationalGraphOperator):
     """An operator that does nothing and implements apply as noop."""
-    
+
     def apply(self, **kwds):
         """This is a noop."""
         pass
-    
+
     @classmethod
     def supported_backends(cls):
         return Backend.all
-    
+
     @classmethod
     def supports_multiple_field_topologies(cls):
         return True
@@ -26,7 +26,7 @@ class Noop(ComputationalGraphOperator):
     @classmethod
     def supports_mpi(cls):
         return True
-    
+
     def get_node_requirements(self):
         from hysop.core.graph.node_requirements import OperatorRequirements
         reqs = OperatorRequirements(self,
@@ -42,23 +42,23 @@ class ForceTopologyState(Noop):
     topology and data layout (local transposition state, array backend, memory ordering).
 
     This operator will just impose a given transposition states, a given memory ordering
-    and a given backend, to all of its input fields. This forces the graph generator to 
+    and a given backend, to all of its input fields. This forces the graph generator to
     generate additional operators and topologies to comply with those field requirements.
 
-    If transposition_state is not given, all input fields will be imposed to be in natural 
+    If transposition_state is not given, all input fields will be imposed to be in natural
     transposition order (YX in 2D and ZYX in 3D).
-    
+
     If memory_order is not given, all input fields will be imposed to be C_CONTIGUOUS.
 
     If backend is not given, all input fields will be imposed to live on Backend.HOST.
     """
 
     @debug
-    def __init__(self, fields, variables, 
+    def __init__(self, fields, variables,
             tstate=None, memorder=None,
-            backend=None, extra_kwds=None, 
-            mpi_params=None, cl_env=None, 
-            **kwds): 
+            backend=None, extra_kwds=None,
+            mpi_params=None, cl_env=None,
+            **kwds):
         extra_kwds = first_not_None(extra_kwds, {})
 
         fields = to_tuple(fields)
@@ -93,12 +93,12 @@ class ForceTopologyState(Noop):
         self.memorder = memorder
         self.backend = first_not_None(backend, Backend.HOST)
         self.extra_kwds = first_not_None(extra_kwds, {})
-    
+
     @debug
     def get_field_requirements(self):
         from hysop.topology.topology_descriptor import TopologyDescriptor
         from hysop.fields.field_requirements import DiscreteFieldRequirements
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+        for (field, topo_descriptor) in self.input_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                     backend=self.backend,
                     operator=self,
@@ -107,7 +107,7 @@ class ForceTopologyState(Noop):
                     **self.extra_kwds)
             self.input_fields[field] = topo_descriptor
 
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             topo_descriptor = TopologyDescriptor.build_descriptor(
                     backend=self.backend,
                     operator=self,
@@ -119,7 +119,7 @@ class ForceTopologyState(Noop):
         # and we use default DiscreteFieldRequirements (ie. no min ghosts, no max ghosts,
         # can_split set to True in all directions, all TranspositionStates).
         input_field_requirements  = {}
-        for (field, topo_descriptor) in self.input_fields.iteritems():
+        for (field, topo_descriptor) in self.input_fields.items():
             if (topo_descriptor is None):
                 req = None
             else:
@@ -130,7 +130,7 @@ class ForceTopologyState(Noop):
             input_field_requirements[field] = req
 
         output_field_requirements = {}
-        for (field, topo_descriptor) in self.output_fields.iteritems():
+        for (field, topo_descriptor) in self.output_fields.items():
             if (topo_descriptor is None):
                 req = None
             else:
@@ -144,4 +144,4 @@ class ForceTopologyState(Noop):
         requirements.update_inputs(input_field_requirements)
         requirements.update_outputs(output_field_requirements)
         return requirements
-    
+
diff --git a/hysop/operator/parameter_plotter.py b/hysop/operator/parameter_plotter.py
index d41ce08b44e4b447249dec8f8ae4b185e044a18b..76058605d0796a809e9d1c627c2c71069ed06f34 100644
--- a/hysop/operator/parameter_plotter.py
+++ b/hysop/operator/parameter_plotter.py
@@ -131,7 +131,7 @@ class ParameterPlotter(PlottingOperator):
 
             parameters = {}
             axes_shape = (1,)*2
-            for (pos, params) in _parameters.iteritems():
+            for (pos, params) in _parameters.items():
                 pos = to_tuple(pos)
                 pos = (2-len(pos))*(0,) + pos
                 check_instance(pos, tuple, values=int)
@@ -149,7 +149,7 @@ class ParameterPlotter(PlottingOperator):
                     raise TypeError(type(params))
                 check_instance(params, dict, keys=str, values=TensorParameter)
                 _params = {}
-                for (pname, p) in params.iteritems():
+                for (pname, p) in params.items():
                     if isinstance(p, ScalarParameter):
                         _params[pname] = p
                     else:
@@ -167,10 +167,10 @@ class ParameterPlotter(PlottingOperator):
         data = {}
         lines = {}
         times = npw.empty(shape=(alloc_size,), dtype=npw.float32)
-        for (pos, params) in parameters.iteritems():
+        for (pos, params) in parameters.items():
             params_data = {}
             params_lines = {}
-            for (pname, p) in params.iteritems():
+            for (pname, p) in params.items():
                 pdata = npw.empty(shape=(alloc_size,), dtype=p.dtype)
                 pline = self.get_axes(pos).plot([], [], label=pname)[0]
                 params_data[p] = pdata
@@ -205,16 +205,16 @@ class ParameterPlotter(PlottingOperator):
             times = npw.empty(shape=(2*self.times.size,), dtype=self.times.dtype)
             times[:self.times.size] = self.times
             self.times = times
-            for (pos, params) in self.data.iteritems():
-                for (p, pdata) in params.iteritems():
+            for (pos, params) in self.data.items():
+                for (p, pdata) in params.items():
                     new_pdata = npw.empty(shape=(2*pdata.size,), dtype=pdata.dtype)
                     new_pdata[:pdata.size] = pdata
                     params[p] = new_pdata
 
         times, data, lines = self.times, self.data, self.lines
         times[self.counter] = simulation.t()
-        for (pos, params) in self.parameters.iteritems():
-            for (pname, p) in params.iteritems():
+        for (pos, params) in self.parameters.items():
+            for (pname, p) in params.items():
                 data[pos][p][self.counter] = p()
                 lines[pos][p].set_xdata(times[:self.counter])
                 lines[pos][p].set_ydata(data[pos][p][:self.counter])
diff --git a/hysop/operator/tests/test_custom_symbolic.py b/hysop/operator/tests/test_custom_symbolic.py
index 51ad74c73021ecec803ecabc83360b9923d1f419..2bf80f1aaf33d492d3ded7104d308c9e7dacc80c 100644
--- a/hysop/operator/tests/test_custom_symbolic.py
+++ b/hysop/operator/tests/test_custom_symbolic.py
@@ -67,7 +67,7 @@ class TestCustomSymbolic(object):
     @staticmethod
     def iter_implementations(op_cls, base_kwds):
         for impl in op_cls.implementations():
-            base_kwds = {k:v for (k,v) in base_kwds.iteritems()}
+            base_kwds = {k:v for (k,v) in base_kwds.items()}
             base_kwds['implementation'] = impl
             if impl is Implementation.OPENCL:
                 for cl_env in iter_clenv():
@@ -271,7 +271,7 @@ class TestCustomSymbolic(object):
                 refin['f'][field] = ifield.sdata.get().handle.copy()
                 refin['dfields'][field] = ifield
                 in_names.append(field.name)
-            for pname, param in problem.input_params.iteritems():
+            for pname, param in problem.input_params.items():
                 refin['p'][pname] = param.value
                 in_names.append(pname)
 
@@ -287,7 +287,7 @@ class TestCustomSymbolic(object):
                         res = res[ofield.compute_slices]
                     refout['f'][field] += (res,)
                     out_names.append(field.name+'::{}'.format(i))
-            for pname, param in problem.output_params.iteritems():
+            for pname, param in problem.output_params.items():
                 refout['p'][pname] = compute_outputs(refin['f'], refin['p'], refin['dfields'], param, None)
                 out_names.append(pname)
 
@@ -301,7 +301,7 @@ class TestCustomSymbolic(object):
                     res = ofield.data[i].get()
                     res = res.handle[ofield.compute_slices].copy()
                     out['f'][field] += (res,)
-            for pname, param in problem.output_params.iteritems():
+            for pname, param in problem.output_params.items():
                 out['p'][pname] = param.value
 
             for field in refin['f']:
diff --git a/hysop/operator/transpose.py b/hysop/operator/transpose.py
index 48cb847b88ddcd357f95ba29b5faf9002cf83ec6..961b8f3cbda8a2a8d14d1e96b409e88e88b5a004 100644
--- a/hysop/operator/transpose.py
+++ b/hysop/operator/transpose.py
@@ -22,10 +22,10 @@ class TranspositionNotImplementedError(NotImplementedError):
 
 class Transpose(ComputationalGraphNodeGenerator):
     """
-    Operator generator for inplace and out of place field transposition 
+    Operator generator for inplace and out of place field transposition
     and permutations in general.
-    
-    Available implementations are: 
+
+    Available implementations are:
         *python (numpy based transpose, n-dimensional)
         *opencl (opencl codegen based transpose up to 16D)
 
@@ -33,10 +33,10 @@ class Transpose(ComputationalGraphNodeGenerator):
     graph node generator generates an operator per supplied field,
     possibly on different implementation backends.
 
-    See hysop.operator.base.transpose_operator.TransposeOperatorBase for operator backend 
+    See hysop.operator.base.transpose_operator.TransposeOperatorBase for operator backend
     implementation interface.
     """
-    
+
     @classmethod
     def implementations(cls):
         from hysop.backend.host.python.operator.transpose   import PythonTranspose
@@ -46,23 +46,23 @@ class Transpose(ComputationalGraphNodeGenerator):
                 Implementation.OPENCL: OpenClTranspose
         }
         return _implementations
-    
+
     @classmethod
     def default_implementation(cls):
         msg='Transpose has no default implementation, '
         msg+='implementation should match the discrete field topology backend.'
         raise RuntimeError(msg)
-    
+
     @debug
     def __init__(self, fields, variables, axes,
                 output_fields=None,
-                implementation=None, 
+                implementation=None,
                 name=None,
-                base_kwds=None, 
+                base_kwds=None,
                 **kwds):
         """
         Initialize a Transpose operator generator operating on CartesianTopology topologies.
-        
+
         Parameters
         ----------
         fields: Field, list or tuple of Fields
@@ -71,7 +71,7 @@ class Transpose(ComputationalGraphNodeGenerator):
         output_fields: Field, list or tuple of Fields, optional
             Output continuous fields where the results are stored.
             Transposed shapes should match the input.
-            By default output_fields are the same as input_fields 
+            By default output_fields are the same as input_fields
             resulting in inplace transpositions.
             Input and output are matched by order int list/tuple.
         variables: dict
@@ -79,24 +79,24 @@ class Transpose(ComputationalGraphNodeGenerator):
         axes: tuple of ints, or array like of tuples, or dict of (tuple, TranspositionState).
             Permutation of axes in numpy notations (as a tuple of ints).
             Axe dim-1 is the contiguous axe, axe 0 has the greatest stride in memory.
-            
+
             It can also be an array like of axes and the operator implementation will choose
-            the most suitable axe permutation scheme in this list. 
+            the most suitable axe permutation scheme in this list.
 
             If a dictionnary is provided, this gives the operator implementation to choose
             the most suitable axe permutation scheme with the knowledge of the target
             transposition state as well.
 
             All fields on the same backend will perform the same permutation.
-            Operator chosen permutation can be retrieved by using generated 
+            Operator chosen permutation can be retrieved by using generated
             operator 'axes' attribute.
 
         implementation: Implementation, optional, defaults to None
             target implementation, should be contained in available_implementations().
-            If implementation is set and topology does not match backend, 
+            If implementation is set and topology does not match backend,
                 RuntimeError will be raised on _generate.
             If None, implementation will be set according to topologies backend,
-            different implementations may be choosen for different Fields if 
+            different implementations may be choosen for different Fields if
             defined on different backends.
         name: string
             prefix for generated operator names
@@ -105,9 +105,9 @@ class Transpose(ComputationalGraphNodeGenerator):
             Base class keywords arguments.
             If None, an empty dict will be passed.
         kwds:
-            keywords arguments that will be passed towards implementation 
+            keywords arguments that will be passed towards implementation
             transpose operator __init__.
-        
+
         Notes
         -----
         Out of place transpose will always be faster to process.
@@ -128,10 +128,10 @@ class Transpose(ComputationalGraphNodeGenerator):
         A Transpose operator implementation should support the TransposeOperatorBase
         interface (see hysop.operator.base.transpose_operator.TransposeOperatorBase).
 
-        This ComputationalGraphNodeFrontend will generate a operator for each 
+        This ComputationalGraphNodeFrontend will generate a operator for each
         input and output ScalarField pair.
 
-        All implementations should raise TranspositionNotImplementedError is the user supplied 
+        All implementations should raise TranspositionNotImplementedError is the user supplied
         parameters leads to unimplemented or unsupported transposition features.
         """
         input_fields = to_tuple(fields)
@@ -142,19 +142,19 @@ class Transpose(ComputationalGraphNodeGenerator):
                                     for (ifield,ofield) in zip(input_fields, output_fields) )
         else:
             output_fields = tuple( ifield for ifield in input_fields )
-        
+
         check_instance(input_fields,  tuple, values=Field)
         check_instance(output_fields, tuple, values=Field, size=len(input_fields))
-        
+
         candidate_input_tensors  = filter(lambda x: x.is_tensor, input_fields)
         candidate_output_tensors = filter(lambda x: x.is_tensor, output_fields)
-        
+
         base_kwds = base_kwds or dict()
-        super(Transpose,self).__init__(name=name, 
+        super(Transpose,self).__init__(name=name,
                 candidate_input_tensors=candidate_input_tensors,
                 candidate_output_tensors=candidate_output_tensors,
                 **base_kwds)
-        
+
         # expand tensors
         ifields, ofields = (), ()
         for (ifield, ofield) in zip(input_fields, output_fields):
@@ -162,7 +162,7 @@ class Transpose(ComputationalGraphNodeGenerator):
             msg+='and field {} of shape {}.'
             if (ifield.is_tensor ^ ofield.is_tensor):
                 if ifield.is_tensor:
-                    msg=msg.format(ifield.short_description(), ifield.shape, 
+                    msg=msg.format(ifield.short_description(), ifield.shape,
                                    ofield.short_description(), '(1,)')
                 else:
                     msg=msg.format(ifield.short_description(), '(1,)',
@@ -174,13 +174,13 @@ class Transpose(ComputationalGraphNodeGenerator):
                 raise RuntimeError(msg)
             ifields += ifield.fields
             ofields += ofield.fields
-        
+
         input_fields  = ifields
         output_fields = ofields
 
         check_instance(input_fields,  tuple, values=ScalarField)
         check_instance(output_fields, tuple, values=ScalarField, size=len(input_fields))
-        
+
         check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
         check_instance(base_kwds, dict, keys=str)
 
@@ -227,7 +227,7 @@ class Transpose(ComputationalGraphNodeGenerator):
                 msg += 'input_field {} dimension {}.'
                 msg.format(input_field, idim, input_fields[0].name, dim)
                 raise ValueError(msg)
-        
+
         candidate_axes = self._check_axes(axes, dim)
 
         self.input_fields   = input_fields
@@ -236,7 +236,7 @@ class Transpose(ComputationalGraphNodeGenerator):
         self.candidate_axes = candidate_axes
         self.implementation = implementation
         self.kwds           = kwds
-    
+
     @staticmethod
     def _check_axes(axes, dim):
         msg = 'Unkown type for axes parameter.'
@@ -255,9 +255,9 @@ class Transpose(ComputationalGraphNodeGenerator):
         else:
             raise TypeError(msg)
         del axes
-            
+
         check_instance(candidate_axes, dict, keys=tuple)
-        for (axes, target_tstate) in candidate_axes.iteritems():
+        for (axes, target_tstate) in candidate_axes.items():
             check_instance(axes, tuple, values=(int,long))
             check_instance(target_tstate, TranspositionState[dim], allow_none=True)
             if len(axes)!=dim:
@@ -323,7 +323,7 @@ class Transpose(ComputationalGraphNodeGenerator):
             raise TypeError(msg)
 
         return op_cls
-    
+
     @debug
     def _generate(self):
         nodes = []
@@ -331,15 +331,15 @@ class Transpose(ComputationalGraphNodeGenerator):
             src_topo = ComputationalGraphNode.get_topo_descriptor(self.variables, ifield)
             dst_topo = ComputationalGraphNode.get_topo_descriptor(self.variables, ofield)
             kwds     = self.kwds.copy()
-            
+
             TransposeOp = self._get_op_and_check_implementation(src_topo, dst_topo)
             axes = TransposeOp.get_preferred_axes(src_topo, dst_topo, self.candidate_axes)
 
             variables = { ifield: src_topo }
             variables[ofield] = dst_topo
-                
+
             # instantiate operator
-            node = TransposeOp(input_field=ifield, output_field=ofield, 
+            node = TransposeOp(input_field=ifield, output_field=ofield,
                                variables=variables, axes=axes,
                                **kwds)
             nodes.append(node)
diff --git a/hysop/symbolic/base.py b/hysop/symbolic/base.py
index 5b5e2fa4cc1c73bd120967feed4abd55ce845214..232c6c8708671489745570c264759ab2d2c97b23 100644
--- a/hysop/symbolic/base.py
+++ b/hysop/symbolic/base.py
@@ -253,7 +253,7 @@ class TensorBase(npw.ndarray):
     def xreplace(self, replacements):
         """Elementwise sympy.xreplace()."""
         replace = {}
-        for (k,v) in replacements.iteritems():
+        for (k,v) in replacements.items():
             if isinstance(k, npw.ndarray):
                 for idx in npw.ndindex(*k.shape):
                     kk = k[idx]
diff --git a/hysop/symbolic/directional.py b/hysop/symbolic/directional.py
index 995c17f07d50d8430b43c9674184b98c4d338a07..09b8fa4027b7857ffbb9498fc2c76e96a6f3f721 100644
--- a/hysop/symbolic/directional.py
+++ b/hysop/symbolic/directional.py
@@ -58,7 +58,7 @@ def split(F, fixed_residue, force_residue, coords):
         for i in res.keys():
             res[i] += (residue/count)
     if (fixed_residue is not None):
-        for i,r in res.iteritems():
+        for i,r in res.items():
             if (r!=0):
                 res[i] += fixed_residue
     return res
@@ -66,7 +66,7 @@ def split(F, fixed_residue, force_residue, coords):
 def split_assignement(expr, fixed_residue, force_residue, coords):
     lhs, rhs = expr.args
     res = split(rhs, fixed_residue, force_residue, coords=coords)
-    for (i,ei) in res.iteritems():
+    for (i,ei) in res.items():
         if (ei != 0):
             res[i] = expr.func(lhs, ei)
     return res
@@ -93,6 +93,6 @@ if __name__ == '__main__':
     expr = (alpha*grad(u, frame) + (1-alpha)*grad(u, frame).T).dot(w)
     # expr = div(np.outer(u,w), frame)
     print(expr)
-    for i, x in split(expr, frame.coords).iteritems():
+    for i, x in split(expr, frame.coords).items():
         print(i, x.simplify())
 
diff --git a/hysop/tools/callback.py b/hysop/tools/callback.py
index b7bd848a5c783f80533b6f18c59d529eea5ca840..2d51b476bdc16e0babf10ade867ab5b1baeb8a4b 100644
--- a/hysop/tools/callback.py
+++ b/hysop/tools/callback.py
@@ -22,7 +22,7 @@ class TimerInterface(object):
         self.max   = None
         self.nruns = 0
         self.data  = []
-    
+
     def mean(self):
         if self.nruns==0:
             return None
@@ -37,11 +37,11 @@ class TimerInterface(object):
 
     def status(self):
         if self.state is None: #waiting 1st run
-            return 'W'       
+            return 'W'
         elif self.state < 0:   #running
             return 'R'
         else:                  #sleeping
-            return 'S'       
+            return 'S'
 
     def register_timing(self,timing):
         if self.min is None:
@@ -59,10 +59,10 @@ class TimerInterface(object):
 
     def __str__(self):
         return '({}) nruns={:4d}, min={}, max={}, mean={}, total={}' \
-                .format(self.status(), self.nruns, 
-                    time2str(self.min), time2str(self.max), 
+                .format(self.status(), self.nruns,
+                    time2str(self.min), time2str(self.max),
                     time2str(self.mean()), time2str(self.total()))
-    
+
     @staticmethod
     def _as_group(groupname,tasks,tic_callbacks=[],tac_callbacks=[]):
         return TimingGroup(name=groupname,tasks=tasks,
@@ -90,7 +90,7 @@ class MemInterface(TimerInterface):
         elif bdw > self.max_bandwidth:
             self.max_bandwidth = bdw
         self.bandwidth.append(bdw)
-    
+
     def mean_bandwidth(self):
         if self.nruns==0:
             return None
@@ -111,7 +111,7 @@ class MemInterface(TimerInterface):
 class MemcpyInterface(MemInterface):
     def __init__(self,membytes,**kargs):
         super(MemcpyInterface,self).__init__(membytes=membytes,**kargs)
-    
+
     def __str__(self):
         s = '\n{:15s} min_bdw={}, max_bdw={}, mean_bdw={}, total_mem_moved={}'.format(
                 '',
@@ -126,10 +126,10 @@ class ComputeInterface(MemInterface):
             raise ValueError('per_work_statistic is not a WorkStatistics')
         if total_work<1:
             raise ValueError('total_work < 1.')
-        
+
         membytes = total_work*per_work_statistic.global_mem_transactions()
         super(ComputeInterface, self).__init__(membytes=membytes,**kargs)
-        
+
         self.ftype = ftype
         self.total_work           = total_work
         self.per_work_statistic   = per_work_statistic
@@ -138,17 +138,17 @@ class ComputeInterface(MemInterface):
     def register_timing(self,timing):
         super(ComputeInterface,self).register_timing(timing)
         self.total_work_statistic += self.total_work*self.per_work_statistic
-    
+
     def stats_per_second(self):
         if self.nruns==0:
             return None
         else:
             return self.total_work_statistic.compute_timed_statistics(self.total())
-    
+
     def __str__(self):
-        
+
         s=''
-        
+
         timed_stats = self.stats_per_second()
         if (timed_stats is not None):
 
@@ -167,13 +167,13 @@ class ComputeInterface(MemInterface):
             flops *= float_op_factor
 
             opi = flops/timed_stats.global_mem_transactions()
-        
+
             if timed_stats.global_mem_throughput()>0:
                 s += '  throughput={}'.format(bdw2str(timed_stats.global_mem_throughput()))
                 if timed_stats.global_mem_throughput() < timed_stats.total_mem_throughput():
                     s+= ' (tot={})'.format(bdw2str(timed_stats.total_mem_throughput()))
                 s += ' OPI={}'.format(unit2str(opi,'FLOP/B',decimal=True,rounded=2))
-            for (op_category, ops_per_second) in timed_stats.ops_per_second().iteritems():
+            for (op_category, ops_per_second) in timed_stats.ops_per_second().items():
                 if op_category!='FLOPS':
                     s += '  {}'.format(unit2str(ops_per_second,op_category,decimal=True,rounded=2))
                 else:
@@ -189,7 +189,7 @@ class CallbackTask(object):
         self.tic_callbacks = []
         self.tac_callbacks = []
         self.register_callbacks(tic_callbacks,tac_callbacks)
-    def tic(self,**kargs): 
+    def tic(self,**kargs):
         self._on_tic(**kargs)
         for cb in self.tic_callbacks:
             cb(self,**kargs)
@@ -200,13 +200,13 @@ class CallbackTask(object):
     def register_callbacks(self,tic_callbacks=[],tac_callbacks=[]):
         tic_callbacks = _to_list(tic_callbacks)
         tac_callbacks = _to_list(tac_callbacks)
-        for cb in tic_callbacks: 
+        for cb in tic_callbacks:
             if cb not in self.tic_callbacks:
                 self.tic_callbacks.append(cb)
-        for cb in tac_callbacks: 
+        for cb in tac_callbacks:
             if cb not in self.tac_callbacks:
                 self.tac_callbacks.append(cb)
-    
+
     def _on_tic(self,**kargs):
         msg='_on_tic not implemented in class {}.'.format(self.__class__.__name__)
         raise NotImplementedError(msg)
@@ -219,7 +219,7 @@ class CallbackTask(object):
 
     def report(self,offset):
         return self.offset_str(offset) + '{:15s}'.format(self.name)
-    
+
     @staticmethod
     def offset_str(count):
         return '  '*count
@@ -229,11 +229,11 @@ class CallbackTask(object):
 
 class CallbackGroup(CallbackTask):
     def __init__(self,name,tasks,**kargs):
-        super(CallbackGroup,self).__init__(name,**kargs)    
+        super(CallbackGroup,self).__init__(name,**kargs)
         self.tasks  = tasks
         self.ticked = np.zeros(shape=(len(tasks),), dtype=bool)
         self.tacked = self.ticked.copy()
-        
+
         taskid = {}
         for i,task in enumerate(tasks):
             taskid[task.name] = i
@@ -253,10 +253,10 @@ class CallbackGroup(CallbackTask):
                 super(CallbackGroup,self).tac(**args)
                 self.ticked[:] = False
                 self.tacked[:] = False
-               
+
         for task in tasks:
             task.register_callbacks(tic_callbacks=_on_task_tic,tac_callbacks=_on_task_tac)
-    
+
     def _check(self):
         if len(self.tasks)==0:
             raise ValueError('Empty task list!')
@@ -267,7 +267,7 @@ class CallbackGroup(CallbackTask):
         raise RuntimeError('CallbackGroup.tic() should not be called explicitely.')
     def tac(self,**kargs):
         raise RuntimeError('CallbackGroup.tac() should never be called explicitely.')
-    
+
     def report(self,offset=0):
         s = ''
         s += '{}{}'.format(self.offset_str(offset), self.name)
@@ -277,7 +277,7 @@ class CallbackGroup(CallbackTask):
         s += '\n{}{:15s} {}'.format(self.offset_str(offset+1),'total:',
                 self.__str__())
         return s
-    
+
     @classmethod
     def _as_group(cls,groupname,tasks,tic_callbacks=[],tac_callbacks=[],**kargs):
         return self.__class__(name=groupname,tasks=tasks,
@@ -324,8 +324,8 @@ class TimingGroup(CallbackGroup, TimerInterface):
 
     def __str__(self):
         return TimerInterface.__str__(self)
-    
-class TimingTask(CallbackTask,TimerInterface): 
+
+class TimingTask(CallbackTask,TimerInterface):
     def __init__(self,**kargs):
         super(TimingTask,self).__init__(**kargs)
 
@@ -333,7 +333,7 @@ class TimingTask(CallbackTask,TimerInterface):
         return '{} {}'.format(CallbackTask.report(self,offset), TimerInterface.__str__(self))
     def __str__(self):
         return self.report()
-    
+
 class MemcpyTask(CallbackTask,MemcpyInterface):
     def __init__(self,MPI,**kargs):
         super(MemcpyTask,self).__init__(**kargs)
@@ -341,7 +341,7 @@ class MemcpyTask(CallbackTask,MemcpyInterface):
 
     def report(self,offset=0):
         return '{} {}'.format(
-                CallbackTask.report(self,offset), 
+                CallbackTask.report(self,offset),
                 self.format(MemcpyInterface.__str__(self),offset))
     def __str__(self):
         return self.report()
@@ -358,7 +358,7 @@ class ComputeTask(CallbackTask,ComputeInterface):
 
     def report(self,offset=0):
         return '{} {}'.format(
-                CallbackTask.report(self,offset), 
+                CallbackTask.report(self,offset),
                 self.format(ComputeInterface.__str__(self),offset))
     def __str__(self):
         return self.report()
@@ -390,7 +390,7 @@ class CallbackProfiler(object):
         self._check_registered(target)[target].tic(**kargs)
     def tac(self,target,**kargs):
         self._check_registered(target)[target].tac(**kargs)
-    
+
     def register_tasks(self,tasks, tic_callbacks=[], tac_callbacks=[],**kargs):
         tasks  = _to_list(tasks)
         for task in tasks:
@@ -409,8 +409,8 @@ class CallbackProfiler(object):
                     task = MPITimingTask(MPI=self._MPI,name=taskname,**kargs)
             task.register_callbacks(tic_callbacks,tac_callbacks)
         self.tasks[taskname] = task
-    
-    def register_group(self,groupname, tasknames, 
+
+    def register_group(self,groupname, tasknames,
             tic_callbacks=[], tac_callbacks=[]):
         if groupname in self.groups:
             source = set(tasknames)
@@ -452,17 +452,17 @@ class CallbackProfiler(object):
         if target in self.registered_targets():
             msg='Target {} was already registered!'.format(target)
             raise ValueError(msg)
-    
+
     def report(self,mode='recursive'):
         s = '=== Callback Profiler Report ==='
         if mode=='all':
             if self.has_tasks():
                 s += '\n ::Individual tasks::'
-                for taskname,task in self.tasks.iteritems():
+                for taskname,task in self.tasks.items():
                     s += '\n'+task.report(1)
                 if self.has_groups():
                     s += '\n ::Group tasks::'
-                    for taskname,task in self.groups.iteritems():
+                    for taskname,task in self.groups.items():
                         s += '\n'+task.report(1)
         elif mode =='recursive':
             if self.has_groups():
@@ -476,6 +476,6 @@ class CallbackProfiler(object):
                     task = self.tasks[taskname]
                     s += '\n'+task.report(1)
         return s
-    
+
     def __str__(self):
         return self.report()
diff --git a/hysop/tools/debug_utils.py b/hysop/tools/debug_utils.py
index b3748b5530f4ddaf0efc3b13418acdbf3bc30286..c3adf14edb9bdd1ee62e367352d764b26135c873 100644
--- a/hysop/tools/debug_utils.py
+++ b/hysop/tools/debug_utils.py
@@ -27,7 +27,7 @@ class ImshowDebugger(object):
             axes[k] = _axes[i]
 
         imgs = {}
-        for (k,v) in data.iteritems():
+        for (k,v) in data.items():
             v = self.get_data(v)
             for j in range(ntimes):
                 axes[k][j].set_title('{} at t=Tn-{}'.format(k,j))
@@ -83,7 +83,7 @@ class ImshowDebugger(object):
             queue.finish()
         imgs = self.imgs
         ntimes = self.ntimes
-        for (k,data) in self.data.iteritems():
+        for (k,data) in self.data.items():
             data = self.get_data(data)
             for j in range(ntimes-1,0,-1):
                 imgs[k][j].set_data(imgs[k][j-1].get_array())
diff --git a/hysop/tools/enum.py b/hysop/tools/enum.py
index a7c59967d7dbebeb0fab636b48c735d42744fe46..6ca539c068034cf7f4ae55b306b3ee8baef347b6 100644
--- a/hysop/tools/enum.py
+++ b/hysop/tools/enum.py
@@ -104,7 +104,7 @@ class EnumFactory(object):
                 msg+='\n\tregistered enum: {}\n\tnew values: {}'
                 msg=msg.format(name, enum.fields().keys(), fields.keys())
                 raise ValueError(msg)
-            elif any([ fields[k] != v for (k,v) in enum.fields().iteritems()]):
+            elif any([ fields[k] != v for (k,v) in enum.fields().items()]):
                 msg='Enum \'{}\' was already created with different values:'
                 msg+='\n\tregistered enum: {}\n\tnew values: {}'
                 msg=msg.format(name, enum.fields(), fields)
@@ -131,7 +131,7 @@ class EnumFactory(object):
                 msg=msg.format(k)
                 raise ValueError(msg)
 
-        fields  = dict(zip(fields.keys(), np.asarray(fields.values()).astype(dtype)))
+        fields  = dict(zip(fields.keys(), np.asarray(tuple(fields.values())).astype(dtype)))
         rfields = dict(zip(fields.values(), fields.keys()))
 
         def __fields(cls):
@@ -248,7 +248,7 @@ class EnumFactory(object):
 
         generated_enum = type(name+'Enum', (Enum,), {})
         _all = []
-        for k,v in fields.iteritems():
+        for k,v in fields.items():
             instance = generated_enum(field=k)
             setattr(mcls, k, instance)
             _all.append(instance)
diff --git a/hysop/tools/field_utils.py b/hysop/tools/field_utils.py
index 88db1dedc1a1bed4fdf4762c24a9ecb6122bcf50..5dca138946ced291dade9789f6047cb7651de708 100644
--- a/hysop/tools/field_utils.py
+++ b/hysop/tools/field_utils.py
@@ -223,7 +223,7 @@ class DifferentialStringFormatter(object):
             '<LBRACKET>': '{',
             '<RBRACKET>': '}',
         }
-        for (k,v) in special_characters.iteritems():
+        for (k,v) in special_characters.items():
             ss = ss.replace(k,v)
         if isinstance(ss, unicode):
             ss = ss.encode('utf-8')
diff --git a/hysop/tools/handle.py b/hysop/tools/handle.py
index 8c7859e2b09892de3d4a83add2d8783860304116..c51ea8c81d901657e91de5ce17b9237edd906078 100644
--- a/hysop/tools/handle.py
+++ b/hysop/tools/handle.py
@@ -338,7 +338,7 @@ class RegisteredObject(TaggedObject):
 
      def __del__(self):
          key = None
-         for k,v in self.__registered_objects.iteritems():
+         for k,v in self.__registered_objects.items():
              if v is self:
                  key = k
          if (key is not None):
diff --git a/hysop/tools/io_utils.py b/hysop/tools/io_utils.py
index 2095fee8fffdad69358166f870dd02cf8bcb3a9a..f7ae6cb1730437f4489de78a016e5a29f73ccea4 100755
--- a/hysop/tools/io_utils.py
+++ b/hysop/tools/io_utils.py
@@ -434,7 +434,7 @@ class IOParams(namedtuple("IOParams", ['filename', 'filepath',
         all_kwds = {}
         for k in keys:
             if (k == 'kwds'):
-                for (k, v) in kwds.get(k, getattr(self, k)).iteritems():
+                for (k, v) in kwds.get(k, getattr(self, k)).items():
                     all_kwds[k] = v
             else:
                 all_kwds[k] = kwds.get(k, getattr(self, k))
diff --git a/hysop/tools/numpywrappers.py b/hysop/tools/numpywrappers.py
index 29cfc7a5e59306f139723dc96c92c1f65dfbebe5..d50c769144a95dc684cfe3c7bd58269c567774cb 100644
--- a/hysop/tools/numpywrappers.py
+++ b/hysop/tools/numpywrappers.py
@@ -98,7 +98,7 @@ def __hysop_array_generated_method(shape, fill_value, order=HYSOP_ORDER, **kargs
     hysop_types = ['real', 'complex', 'integer', 'index', 'dim', 'bool']
 
     for ht in hysop_types:
-        for _fname, fdefinition in functions.iteritems():
+        for _fname, fdefinition in functions.items():
             fname = _fname.format(type=ht, TYPE=ht.upper())
             fdef  = fdefinition.format(type=ht, TYPE=ht.upper())
             exec(fdef)
@@ -183,12 +183,12 @@ def fancy_print(a, replace_values={}, replace_views={},
     else:
         strarr[...] = inital_val
 
-    for predicate, replace_val in replace_values.iteritems():
+    for predicate, replace_val in replace_values.items():
         assert callable(predicate)
         pred = predicate(a)
         strarr[pred] = replace_val
 
-    for view, replace_val in replace_views.iteritems():
+    for view, replace_val in replace_views.items():
         strarr[view] = replace_val
 
     _formatter = {
diff --git a/hysop/tools/spectral_utils.py b/hysop/tools/spectral_utils.py
index 3ee6ad10c12fbee98b484acade2ad83e9d700320..cb1a2f2886ad4dc98bef0c407adda0a21b3398e5 100644
--- a/hysop/tools/spectral_utils.py
+++ b/hysop/tools/spectral_utils.py
@@ -832,7 +832,7 @@ class EnergyPlotter(object):
 
             xmax = 1
             lines = ()
-            for (label,p) in energy_parameters.iteritems():
+            for (label,p) in energy_parameters.items():
                 assert p.size>1
                 Ix = np.arange(1, p.size)
                 xmax = max(xmax, p.size-1)
diff --git a/hysop_examples/example_utils.py b/hysop_examples/example_utils.py
index 7c7d66dcad6968d0efbfc5ef9274611f4bfdb1c3..a1c340411ed971c1fa959e352556bb9aaaf4d9d1 100644
--- a/hysop_examples/example_utils.py
+++ b/hysop_examples/example_utils.py
@@ -1668,7 +1668,7 @@ class HysopArgParser(argparse.ArgumentParser):
             return values[argvalue]
         msg='Failed to convert argument {}: {}'.format(argname, argvalue)
         msg+='\nPossible values are:\n  *'
-        msg+='\n  *'.join('{}: {}'.format(k, v) for (k,v) in values.iteritems())
+        msg+='\n  *'.join('{}: {}'.format(k, v) for (k,v) in values.items())
         self.error(msg)
 
     def _check_and_set_diffusion_mode(self, argname, args):