diff --git a/hysop/backend/device/opencl/opencl_symbolic.py b/hysop/backend/device/opencl/opencl_symbolic.py
index 6962050fb818a990431ccc4550dd8909c52d0910..3366de13a80dce590d382391d3ec947f46a70475 100644
--- a/hysop/backend/device/opencl/opencl_symbolic.py
+++ b/hysop/backend/device/opencl/opencl_symbolic.py
@@ -54,6 +54,7 @@ from hysop.backend.device.opencl.autotunable_kernels.custom_symbolic import (
     OpenClAutotunableCustomSymbolicKernel,
 )
 from hysop.tools.sympy_utils import subscript, subscripts
+from hysop.tools.parameters import MPIParams
 
 
 class OpenClSymbolic(OpenClOperator):
@@ -217,8 +218,8 @@ class OpenClSymbolic(OpenClOperator):
         check_instance(
             output_fields, dict, keys=ScalarField, values=CartesianTopologyDescriptors
         )
-        check_instance(input_params, dict, keys=str, values=Parameter)
-        check_instance(output_params, dict, keys=str, values=Parameter)
+        check_instance(input_params, dict, keys=Parameter, values=MPIParams)
+        check_instance(output_params, dict, keys=Parameter, values=MPIParams)
         check_instance(input_tensor_fields, tuple, values=Field)
         check_instance(output_tensor_fields, tuple, values=Field)
 
@@ -261,8 +262,18 @@ class OpenClSymbolic(OpenClOperator):
                     raise RuntimeError(msg)
             _cmp("input_fields", input_fields, expr_info.input_fields, exprs)
             _cmp("output_fields", output_fields, expr_info.output_fields, exprs)
-            _cmp("input_params", input_params, expr_info.input_params, exprs)
-            _cmp("output_params", output_params, expr_info.output_params, exprs)
+            _cmp(
+                "input_params",
+                dict((_.name, _) for _ in input_params.keys()),
+                expr_info.input_params,
+                exprs,
+            )
+            _cmp(
+                "output_params",
+                dict((_.name, _) for _ in output_params.keys()),
+                expr_info.output_params,
+                exprs,
+            )
             assert 0 <= self.cr <= expr_info.max_granularity, self.cr
             expr_info.compute_granularity = self.cr
             expr_info.time_integrator = self.time_integrator
diff --git a/hysop/backend/host/fortran/operator/diffusion.py b/hysop/backend/host/fortran/operator/diffusion.py
index 6ce82e9cc336251bb4f2625ded6dc057d84db10b..34c61208ca04343ea986c1d1d895aee8c9aa027b 100644
--- a/hysop/backend/host/fortran/operator/diffusion.py
+++ b/hysop/backend/host/fortran/operator/diffusion.py
@@ -80,9 +80,9 @@ class DiffusionFFTW(FortranFFTWOperator):
 
         input_fields = {Fin: variables[Fin]}
         output_fields = {Fout: variables[Fout]}
-        input_params = {dt.name: dt}
+        input_params = {dt}
         if isinstance(nu, ScalarParameter):
-            input_params[nu.name] = nu
+            input_params.update({nu})
         else:
             self._real_nu = nu
             nu = lambda: self._real_nu
diff --git a/hysop/backend/host/fortran/operator/scales_advection.py b/hysop/backend/host/fortran/operator/scales_advection.py
index 0eb9d12e4ddd90ab647c4347bd5472c03b6c17ed..ef5cd77bd86ed1f56c158879ccf10b6156037692 100644
--- a/hysop/backend/host/fortran/operator/scales_advection.py
+++ b/hysop/backend/host/fortran/operator/scales_advection.py
@@ -176,8 +176,8 @@ class ScalesAdvection(FortranOperator):
 
         input_fields = {velocity: variables[velocity]}
         output_fields = {}
-        input_params = {dt.name: dt}
-        output_params = {}
+        input_params = {dt}
+        output_params = set()
 
         is_inplace = True
         for ifield, ofield in zip(advected_fields_in, advected_fields_out):
diff --git a/hysop/backend/host/python/operator/analytic.py b/hysop/backend/host/python/operator/analytic.py
index 4851351d28ecf0381e3591284a9e4acc8cafa5d0..93edc950f3767e61c272a582f51ad31ad8fb1417 100644
--- a/hysop/backend/host/python/operator/analytic.py
+++ b/hysop/backend/host/python/operator/analytic.py
@@ -75,7 +75,7 @@ class PythonAnalyticField(HostOperator):
 
         input_fields = {}
         output_fields = {field: self.get_topo_descriptor(variables, field)}
-        input_params = {}
+        input_params = set()
 
         extra_kwds = {}
         map_fields = {}
@@ -84,7 +84,7 @@ class PythonAnalyticField(HostOperator):
                 input_fields[v] = self.get_topo_descriptor(variables, v)
                 map_fields[v] = k
             elif isinstance(v, Parameter):
-                input_params[k] = v
+                input_params.update({v})
                 extra_kwds[k] = v
             else:
                 extra_kwds[k] = v
diff --git a/hysop/backend/host/python/operator/flowrate_correction.py b/hysop/backend/host/python/operator/flowrate_correction.py
index fe3d6ebf4b653d9a1bcd1ac349dab47633cc40e1..beb3d6e13aeddf455b4e19a7b05184526063cf7a 100644
--- a/hysop/backend/host/python/operator/flowrate_correction.py
+++ b/hysop/backend/host/python/operator/flowrate_correction.py
@@ -76,7 +76,7 @@ class PythonFlowRateCorrection(HostOperator):
 
         input_fields = {velocity: variables[velocity], vorticity: variables[vorticity]}
         output_fields = {velocity: variables[velocity]}
-        input_params = {flowrate.name: flowrate}
+        input_params = {flowrate}
 
         self.velocity = velocity
         self.vorticity = vorticity
diff --git a/hysop/backend/host/python/operator/penalization.py b/hysop/backend/host/python/operator/penalization.py
index d74123582376c7cada1e6b902930cb986c0f023b..364e9c3c758c00136a93071462668b94471314cf 100644
--- a/hysop/backend/host/python/operator/penalization.py
+++ b/hysop/backend/host/python/operator/penalization.py
@@ -191,7 +191,7 @@ class PythonPenalizeVorticity(HostOperator, CommonPenalization):
 
         input_fields = {velocity: variables[velocity], vorticity: variables[vorticity]}
         output_fields = {vorticity: variables[vorticity]}
-        input_params = {dt.name: dt}
+        input_params = {dt}
         for o in obstacles.values() if isinstance(obstacles, dict) else obstacles:
             input_fields[o] = variables[o]
         if isinstance(ubar, Field):
@@ -407,7 +407,7 @@ class PythonPenalizeVelocity(HostOperator, CommonPenalization):
             velocity: variables[velocity],
         }
         output_fields = {velocity: variables[velocity]}
-        input_params = {dt.name: dt}
+        input_params = {dt}
         for o in obstacles.values() if isinstance(obstacles, dict) else obstacles:
             input_fields[o] = variables[o]
         if isinstance(ubar, Field):
diff --git a/hysop/backend/host/python/operator/vorticity_absorption.py b/hysop/backend/host/python/operator/vorticity_absorption.py
index 545553f14480957240907f8bd123ea6b5c25208e..cce9ec180eff7792fb0d2034626de51ec0fafbdf 100644
--- a/hysop/backend/host/python/operator/vorticity_absorption.py
+++ b/hysop/backend/host/python/operator/vorticity_absorption.py
@@ -102,7 +102,7 @@ class PythonVorticityAbsorption(HostOperator):
 
         input_fields = {velocity: variables[velocity], vorticity: variables[vorticity]}
         output_fields = {vorticity: variables[vorticity]}
-        input_params = {flowrate.name: flowrate}
+        input_params = {flowrate}
 
         self.velocity = velocity
         self.vorticity = vorticity
diff --git a/hysop/core/graph/computational_graph.py b/hysop/core/graph/computational_graph.py
index 0e4a8eb438839b0cf0bea193b0f510bf34459fcb..0ddc0d880d1cd08ff488d13ea4a9e8e3f7e16a55 100644
--- a/hysop/core/graph/computational_graph.py
+++ b/hysop/core/graph/computational_graph.py
@@ -376,10 +376,10 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta):
                     )
                 )
                 pinputs = ",".join(
-                    sorted(p.pretty_name for p in op.input_params.values())
+                    sorted(p.pretty_name for p in op.input_params.keys())
                 )
                 poutputs = ",".join(
-                    sorted(p.pretty_name for p in op.output_params.values())
+                    sorted(p.pretty_name for p in op.output_params.keys())
                 )
                 infields = f"[{finputs}]" if finputs else ""
                 outfields = f"[{foutputs}]" if foutputs else ""
@@ -411,10 +411,10 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta):
             operators = domains[None]
             for op in sorted(operators, key=lambda x: x.pretty_name):
                 pinputs = ",".join(
-                    sorted(p.pretty_name for p in op.input_params.values())
+                    sorted(p.pretty_name for p in op.input_params.keys())
                 )
                 poutputs = ",".join(
-                    sorted(p.pretty_name for p in op.output_params.values())
+                    sorted(p.pretty_name for p in op.output_params.keys())
                 )
                 inparams = f"[{pinputs}]" if pinputs else ""
                 outparams = f"[{poutputs}]" if poutputs else ""
@@ -617,10 +617,10 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta):
                 foutputs = ",".join(sorted(foutputs))
 
                 pinputs = ",".join(
-                    sorted(p.pretty_name for p in node.input_params.values())
+                    sorted(p.pretty_name for p in node.input_params.keys())
                 )
                 poutputs = ",".join(
-                    sorted(p.pretty_name for p in node.output_params.values())
+                    sorted(p.pretty_name for p in node.output_params.keys())
                 )
 
                 infields = f"[{finputs}]" if finputs else ""
@@ -833,7 +833,6 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta):
     @debug
     @not_initialized
     def push_nodes(self, *args):
-        from hysop.operators import InterTaskParamComm
         from hysop.problem import Problem
 
         nodes = ()
@@ -886,9 +885,7 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta):
                 self._last_pushed_node_mpi_params
                 and self._last_pushed_node_mpi_params.task_id != mpi_params.task_id
             ):
-                if not isinstance(node, InterTaskParamComm) and not (
-                    isinstance(node, Problem) and node.search_intertasks_ops
-                ):
+                if not (isinstance(node, Problem) and node.search_intertasks_ops):
                     self.nodes.append(_hidden_node(node, mpi_params))
             self._last_pushed_node_mpi_params = _get_mpi_params(node)
             if mpi_params and not mpi_params.on_task:
diff --git a/hysop/core/graph/computational_node.py b/hysop/core/graph/computational_node.py
index 80beabf38aa69b332f77aac72255e322df031c40..e142dd0d887ad0f834680279a398c4b92972a0dc 100644
--- a/hysop/core/graph/computational_node.py
+++ b/hysop/core/graph/computational_node.py
@@ -29,6 +29,7 @@ from abc import ABCMeta, abstractmethod
 from hysop import dprint
 from hysop.tools.htypes import InstanceOf, to_set, check_instance, first_not_None
 from hysop.tools.io_utils import IOParams
+from hysop.tools.parameters import MPIParams
 from hysop.parameters.parameter import Parameter
 from hysop.fields.continuous_field import Field, ScalarField, TensorField
 from hysop.core.graph.node_requirements import NodeRequirements
@@ -134,9 +135,9 @@ class ComputationalGraphNode(OperatorBase, metaclass=ABCMeta):
         output_fields: dict, optional
             output fields as a dictionnary (see Notes).
         input_params: array like of hysop.parameters.Parameter or dict, optional (see Notes)
-            input parameters as a list or a dictionnary.
+            input parameters as a set or a dictionnary.
         output_params: array like of hysop.parameters.Parameter or dict, optional (see Notes)
-            output parameters as a list or a dictionnary.
+            output parameters as a set or a dictionnary.
         input_tensor_fields: tuple, optional
             input tensor fields as a tuple.
             If given, input_fields is assumed to contain only ScalarFields.
@@ -191,8 +192,7 @@ class ComputationalGraphNode(OperatorBase, metaclass=ABCMeta):
 
         VectorFields and TensorFields are expanded to ScalarFields.
 
-        For input and output parameters, the keys of the dicts can be arbitrary names that
-        can be used to retrieve the parameters
+        For input and output parameters, in case of dict, the key is the actual Parameter and the value must be an MPIParams, or None
 
         Giving the following keywords as inputs (in **kwds) will throw a ValueError:
             input_vars, output_vars, variables, iwork, rwork, work, backend
@@ -295,10 +295,10 @@ class ComputationalGraphNode(OperatorBase, metaclass=ABCMeta):
             raise ValueError(msg.format(output_fields.__class__))
         if not isinstance(input_params, dict):
             input_params = to_set(input_params)
-            input_params = {p.name: p for p in input_params}
+            input_params = {p: None for p in input_params}
         if not isinstance(output_params, dict):
             output_params = to_set(output_params)
-            output_params = {p.name: p for p in output_params}
+            output_params = {p: None for p in output_params}
 
         self.name = name
         self.pretty_name = pretty_name
@@ -436,8 +436,12 @@ class ComputationalGraphNode(OperatorBase, metaclass=ABCMeta):
         assert not self._base_initialized
         check_instance(input_fields, dict, keys=ScalarField)
         check_instance(output_fields, dict, keys=ScalarField)
-        check_instance(input_params, dict, keys=str, values=Parameter)
-        check_instance(output_params, dict, keys=str, values=Parameter)
+        check_instance(
+            input_params, dict, keys=Parameter, values=(MPIParams, type(None))
+        )
+        check_instance(
+            output_params, dict, keys=Parameter, values=(MPIParams, type(None))
+        )
         check_instance(input_tensor_fields, tuple, values=TensorField)
         check_instance(output_tensor_fields, tuple, values=TensorField)
         check_instance(all_input_fields, tuple, values=Field)
@@ -458,8 +462,8 @@ class ComputationalGraphNode(OperatorBase, metaclass=ABCMeta):
         otfields = set(self.output_tensor_fields)
         tfields = tuple(itfields.union(otfields))
 
-        iparams = set(self.input_params.values())
-        oparams = set(self.output_params.values())
+        iparams = set(self.input_params.keys())
+        oparams = set(self.output_params.keys())
         parameters = tuple(iparams.union(oparams))
 
         if ("mpi_params" in self.__kwds) and (
@@ -491,6 +495,16 @@ class ComputationalGraphNode(OperatorBase, metaclass=ABCMeta):
             parameters=parameters,
             **self.__kwds,
         )
+        # Consolidate unkwnown mpi_params for parameters.
+        for p in iparams:
+            if self.input_params[p] is None:
+                self.input_params[p] = self.mpi_params
+        for p in oparams:
+            if self.output_params[p] is None:
+                self.output_params[p] = self.mpi_params
+        # after consolidation : None value not allowed anymore
+        check_instance(self.input_params, dict, keys=Parameter, values=MPIParams)
+        check_instance(self.output_params, dict, keys=Parameter, values=MPIParams)
         self._base_initialized = True
         self.all_input_fields = all_input_fields
         self.all_output_fields = all_output_fields
@@ -1313,16 +1327,14 @@ class ComputationalGraphNode(OperatorBase, metaclass=ABCMeta):
         ss += "\n  INPUT PARAMS:{}"
         if self.input_params:
             ss = ss.format(
-                sep
-                + sep.join(f.short_description() for f in self.input_params.values())
+                sep + sep.join(f.short_description() for f in self.input_params.keys())
             )
         else:
             ss = ss.format(" None")
         ss += "\n  OUTPUT PARAMS:{}"
         if self.output_params:
             ss = ss.format(
-                sep
-                + sep.join(f.short_description() for f in self.output_params.values())
+                sep + sep.join(f.short_description() for f in self.output_params.keys())
             )
         else:
             ss = ss.format(" None")
diff --git a/hysop/core/graph/graph.py b/hysop/core/graph/graph.py
index aafaec415d3e1e6a0ca48978d7bf65eca508ca9a..57f5d72c866edd343661bb402de96cd80bec5a36 100644
--- a/hysop/core/graph/graph.py
+++ b/hysop/core/graph/graph.py
@@ -236,7 +236,7 @@ class VertexAttributes:
             f"{prefix}Rank:{suffix}{self.op_ordering}\n\n" if self.op_ordering else "",
             (
                 "{p}Pin:{s}{}\n".format(
-                    sep.join(ipinfo(param) for param in iparams.values()),
+                    sep.join(ipinfo(param) for param in iparams.keys()),
                     p=prefix,
                     s=suffix + "&nbsp&nbsp",
                 )
@@ -254,7 +254,7 @@ class VertexAttributes:
             ),
             (
                 "{p}Pout:{s}{}\n".format(
-                    sep.join([opinfo(param) for param in oparams.values()]),
+                    sep.join([opinfo(param) for param in oparams.keys()]),
                     p=prefix,
                     s=suffix,
                 )
@@ -466,7 +466,7 @@ def op_apply(f):
             _file = inspect.getsourcefile(f)
             _, _line = inspect.getsourcelines(f)
             description = f"{_file}:{_line}"
-            for param in sorted(op.input_params.values(), key=lambda x: x.name):
+            for param in sorted(op.input_params.keys(), key=lambda x: x.name):
                 tag = f"pre_{op.name}_{param.name}"
                 kwds["debug_dumper"](
                     it, t, tag, (param._value,), description=description
@@ -486,7 +486,7 @@ def op_apply(f):
                     description=description,
                 )
             ret = f(*args, **kwds)
-            for param in sorted(op.output_params.values(), key=lambda x: x.name):
+            for param in sorted(op.output_params.keys(), key=lambda x: x.name):
                 tag = f"post_{op.name}_{param.name}"
                 kwds["debug_dumper"](
                     it, t, tag, (param._value,), description=description
diff --git a/hysop/core/graph/graph_builder.py b/hysop/core/graph/graph_builder.py
index accc7b19f4c4d15c39f102600a84b73baa8d3822..f832e928d34377c853826e81f2b07bc92c56e786 100644
--- a/hysop/core/graph/graph_builder.py
+++ b/hysop/core/graph/graph_builder.py
@@ -20,6 +20,7 @@
 import numpy as np
 
 from hysop import vprint, dprint, Problem
+from hysop.fields.continuous_field import ScalarField
 from hysop.tools.htypes import check_instance, first_not_None
 from hysop.tools.io_utils import IOParams
 
@@ -50,9 +51,9 @@ from hysop.fields.field_requirements import (
 from hysop.operator.redistribute import (
     Redistribute,
     RedistributeInter,
+    RedistributeInterParam,
     RedistributeNotImplementedError,
 )
-from hysop.operator.inter_task_param_comm import PythonInterTaskParamComm
 from hysop.operator.transpose import Transpose, TranspositionNotImplementedError
 from hysop.operator.memory_reordering import (
     MemoryReordering,
@@ -226,19 +227,13 @@ class GraphBuilder:
                 field_requirements = op._field_requirements
 
             if isinstance(op, RedistributeInter) or isinstance(
-                op, PythonInterTaskParamComm
+                op, RedistributeInterParam
             ):
                 self._intertasks_exchanged = self._intertasks_exchanged.union(
-                    {_.name for _ in op.output_fields.keys()}
+                    {_.name for _ in list(op.output_fields) + list(output_params)}
                 )
                 self._intertasks_exchanged = self._intertasks_exchanged.union(
-                    {_.name for _ in op.input_fields.keys()}
-                )
-                self._intertasks_exchanged = self._intertasks_exchanged.union(
-                    set(op.output_params.keys())
-                )
-                self._intertasks_exchanged = self._intertasks_exchanged.union(
-                    set(op.input_params.keys())
+                    {_.name for _ in list(op.input_fields) + list(op.input_params)}
                 )
 
             if not isinstance(op, Problem) and not isinstance(op, RedistributeInter):
@@ -303,19 +298,19 @@ class GraphBuilder:
             # iterate over subgraph operator input parameters
             if iparams:
                 gprint("   >Input parameters")
-                for iparam in sorted(iparams.values(), key=lambda x: x.name):
+                for iparam in sorted(iparams.keys(), key=lambda x: x.name):
                     gprint(f"     *{iparam.short_description()}")
                     parameter_handler.handle_input_parameter(iparam, opnode)
                     if iparam.name not in output_params:
-                        input_params[iparam.name] = iparam
+                        input_params[iparam] = iparams[iparam]
 
             # iterate over subgraph operator output parameters
             if oparams:
                 gprint("   >Output parameters")
-                for oparam in sorted(oparams.values(), key=lambda x: x.name):
+                for oparam in sorted(oparams.keys(), key=lambda x: x.name):
                     gprint(f"     *{oparam.short_description()}")
                     parameter_handler.handle_output_parameter(oparam, opnode)
-                    output_params[oparam.name] = oparam
+                    output_params[oparam] = oparams[oparam]
 
             # iterate over subgraph operator input fields
             input_states = {}
@@ -377,7 +372,7 @@ class GraphBuilder:
                             (
                                 " on an unknown topology"
                                 if (otopo is None)
-                                else f".{otopo.pretty_tag}  t{otopo.task_id}"
+                                else f".{otopo.pretty_tag}"
                             ),
                         )
                     )
@@ -446,10 +441,10 @@ class GraphBuilder:
 
             # Find redistribute candidates
             available_names = {
-                _ if not hasattr(_, "name") else _.name for _ in available_elems.keys()
+                _.name for _ in available_elems.keys()
             } - self._intertasks_exchanged
             needed_names = {
-                _ if not hasattr(_, "name") else _.name for _ in needed_elems.keys()
+                _.name for _ in needed_elems.keys()
             } - self._intertasks_exchanged
             mgs = "  >[IT] Current task ({}) {} parameters and fields : {}"
             gprint(
@@ -500,12 +495,10 @@ class GraphBuilder:
                             can_provide = [_ for _ in ot_needs if _ in available_names]
                             for prov in can_provide:
                                 available_names[prov] = ot
-                                assert (
-                                    ot
-                                    != available_elems[
-                                        _name_to_key(prov, available_elems)
-                                    ].task_id
-                                )
+                                ae = available_elems[
+                                    _name_to_key(prov, available_elems)
+                                ]
+                                assert ot != ae.task_id
                             comm.isend(
                                 can_provide,
                                 dest=domain.task_root_in_parent(ot),
@@ -516,12 +509,8 @@ class GraphBuilder:
                             )
                             for _op in ot_provide:
                                 needed_names[_op] = ot
-                                assert (
-                                    ot
-                                    != needed_elems[
-                                        _name_to_key(_op, needed_elems)
-                                    ].task_id
-                                )
+                                ne = needed_elems[_name_to_key(_op, needed_elems)]
+                                assert ot != ne.task_id
                         if len(ot_needs) > 0:
                             msg += "\n   *Other task {} needs init for {}, we provide {}".format(
                                 ot,
@@ -555,54 +544,85 @@ class GraphBuilder:
                 f"  >[IT] Inter-tasks will send:to {available_names} and recieve:from {needed_names}"
             )
             # Get back the actual field or parameter
+            names_to_obj = {}
+            for p in available_names.keys():
+                names_to_obj[p] = _name_to_key(p, available_elems)
+            for p in needed_names.keys():
+                names_to_obj[p] = _name_to_key(p, needed_elems)
+            # group parameters with same other task
+            allp = []
+            tasks_to_name = {}
             for p in sorted(
                 set(available_names.keys()).union(set(needed_names.keys()))
             ):
+                t = (
+                    available_names[p]
+                    if p in available_names.keys()
+                    else needed_names[p]
+                )
+                if isinstance(names_to_obj[p], ScalarField):
+                    allp.append(
+                        [
+                            p,
+                        ]
+                    )
+                else:
+                    if t in tasks_to_name:
+                        tasks_to_name[t].append(p)
+                    else:
+                        tasks_to_name[t] = [
+                            p,
+                        ]
+            for params in tasks_to_name.values():
+                allp.append(params)
+            for p in sorted(allp):
                 kwargs = {}
                 s_topo, r_topo, comm_dir = (None,) * 3
-                ae, ne = tuple(
-                    _name_to_key(p, _) for _ in (available_elems, needed_elems)
-                )
-                if not ae is None and p in available_names:
-                    var = ae
-                    t = available_names[p]
-                    topo = available_elems[ae]
+                var = tuple(names_to_obj[_] for _ in p)
+                if p[0] in available_names:
+                    t = available_names[p[0]]
+                    topo = available_elems[var[0]]
                     comm_dir = "src"
                     s_topo = topo
-                if not ne is None and p in needed_names:
-                    var = ne
-                    t = needed_names[p]
-                    topo = needed_elems[ne]
+                if p[0] in needed_names:
+                    t = needed_names[p[0]]
+                    topo = needed_elems[var[0]]
                     comm_dir = "dest"
                     r_topo = topo
                 if not (s_topo is None or r_topo is None):
                     comm_dir = "src&dest"
                     t = None
                 assert not comm_dir is None
+                opname = "RI{}_{}{}{}_{}".format(
+                    comm_dir,
+                    "" if s_topo is None else s_topo.task_id,
+                    "to" if not s_topo is None and not r_topo is None else "",
+                    "" if r_topo is None else r_topo.task_id,
+                    ",".join(_.name for _ in var),
+                )
                 # Finalize init call
                 kwargs.update(
                     {
-                        "variable": var,
-                        "mpi_params": topo.mpi_params,
-                        "name": "RI{}_{}{}{}_{}".format(
-                            comm_dir,
-                            "" if s_topo is None else s_topo.id,
-                            "to" if not s_topo is None and not r_topo is None else "",
-                            "" if r_topo is None else r_topo.id,
-                            var.name,
-                        ),
-                        "pretty_name": "RI{}_{}{}{}_{}".format(
-                            comm_dir,
-                            "" if s_topo is None else subscript(s_topo.id),
-                            "\u2192",
-                            "" if r_topo is None else subscript(r_topo.id),
-                            var.pretty_name,
-                        ),
+                        "name": opname,
                         "source_topo": s_topo,
                         "target_topo": r_topo,
                         "other_task_id": t,
                     }
                 )
+                if isinstance(var[0], ScalarField):
+                    kwargs.update(
+                        {
+                            "variable": var[0],
+                            "mpi_params": topo.mpi_params,
+                        }
+                    )
+                else:
+                    kwargs.update(
+                        {
+                            "parameter": var,
+                            "domain": domain,
+                        }
+                    )
                 yield kwargs
 
         # iterate over ComputationalNodes
@@ -645,18 +665,26 @@ class GraphBuilder:
                     for it_redistribute_kwargs in __find_elements_to_redistribute(
                         available_elems, needed_elems
                     ):
-                        assert RedistributeInter.can_redistribute(
-                            *tuple(
-                                it_redistribute_kwargs[_]
-                                for _ in ("source_topo", "target_topo", "other_task_id")
-                            )
-                        ), str(it_redistribute_kwargs)
+                        if "variable" in it_redistribute_kwargs.keys():
+                            assert RedistributeInter.can_redistribute(
+                                *tuple(
+                                    it_redistribute_kwargs[_]
+                                    for _ in (
+                                        "source_topo",
+                                        "target_topo",
+                                        "other_task_id",
+                                    )
+                                )
+                            ), str(it_redistribute_kwargs)
                         if op.fake_init:
                             op.__init__(**it_redistribute_kwargs)
                             # Recompute fields requirements since no fields were given in first fake operator creation
                             first_op, first_opnode = op, opnode
                         else:
-                            op = RedistributeInter(**it_redistribute_kwargs)
+                            if "variable" in it_redistribute_kwargs.keys():
+                                op = RedistributeInter(**it_redistribute_kwargs)
+                            else:
+                                op = RedistributeInterParam(**it_redistribute_kwargs)
                             target_node.nodes.insert(
                                 target_node.nodes.index(first_op), op
                             )
@@ -674,12 +702,14 @@ class GraphBuilder:
                             )
                             opvertex = node_vertices[0]
                             opnode = new_vertex(graph, op)
-                            cstate = self.topology_states.setdefault(
-                                op.variable, self.new_topology_state(op.variable)
-                            )
+                            if isinstance(op, RedistributeInter):
+                                cstate = self.topology_states.setdefault(
+                                    op.variable, self.new_topology_state(op.variable)
+                                )
                             node = op
-                        op.initialize(topgraph_method=self.target_node.method)
-                        op.get_and_set_field_requirements()
+                        if isinstance(op, RedistributeInter):
+                            op.initialize(topgraph_method=self.target_node.method)
+                            op.get_and_set_field_requirements()
                         __handle_node(
                             node_id,
                             node,
@@ -692,7 +722,7 @@ class GraphBuilder:
                             opnode,
                         )
                         node_id += 1
-                    if op.fake_init:
+                    if isinstance(op, RedistributeInter) and op.fake_init:
                         # Delete node because nothing has to be exchanged
                         target_node.nodes.remove(op)
                         graph.remove_node(opnode)
@@ -727,7 +757,7 @@ class GraphBuilder:
                             f": {ireqs}" if GRAPH_BUILDER_DEBUG_LEVEL == 2 else "",
                         )
                 if len(self.input_params) > 0:
-                    for iparam in sorted(self.input_params):
+                    for iparam in sorted(ip.name for ip in self.input_params):
                         msg += f"  *Parameter {iparam}\n"
             msg += f"ComputationalGraph {target_node.name} outputs {comment}:\n"
             if not self.output_fields and not self.output_params:
@@ -743,7 +773,7 @@ class GraphBuilder:
                             f": {oreqs}" if GRAPH_BUILDER_DEBUG_LEVEL == 2 else "",
                         )
                 if len(self.output_params) > 0:
-                    for oparam in sorted(self.output_params):
+                    for oparam in sorted(op.name for op in self.output_params):
                         msg += f"  *Parameter {oparam}\n"
 
             msg += "\n"
@@ -1050,6 +1080,15 @@ class GraphBuilder:
             node_operators = node.operators()
             node_ops.extend(node_operators)
             node_vertices += [None] * len(node_operators)
+        elif isinstance(node, RedistributeInterParam):
+            node_ops.extend(
+                [
+                    node,
+                ]
+            )
+            node_vertices += [
+                None,
+            ]
         elif node.mpi_params is None or node.mpi_params.on_task:
             if isinstance(node, Problem):
                 node._build_graph(
diff --git a/hysop/core/mpi/redistribute.py b/hysop/core/mpi/redistribute.py
index 38e15aea314ad9b9afc2b19f1ffbf52d4efe3ea3..a1600861778abbb4b77df8af80fb2cfe7457bdc8 100644
--- a/hysop/core/mpi/redistribute.py
+++ b/hysop/core/mpi/redistribute.py
@@ -35,6 +35,7 @@ redistribute deployment.
 """
 
 from hashlib import sha1
+import numpy as np
 from hysop.constants import Backend, DirectionLabels, MemoryOrdering
 from hysop.tools.htypes import check_instance, to_set, first_not_None
 from hysop.tools.decorators import debug
@@ -45,8 +46,10 @@ from hysop.topology.topology_descriptor import TopologyDescriptor
 from hysop.core.mpi.topo_tools import TopoTools
 from hysop.core.mpi.bridge import Bridge, BridgeOverlap, BridgeInter
 from hysop.operator.base.redistribute_operator import RedistributeOperatorBase
+from hysop.core.graph.computational_operator import ComputationalGraphOperator
 from hysop.core.graph.graph import op_apply
-from hysop import MPI
+from hysop import MPI, MPIParams
+from hysop.parameters.scalar_parameter import ScalarParameter, TensorParameter
 
 DEBUG_REDISTRIBUTE = 0
 
@@ -798,3 +801,129 @@ class RedistributeInter(RedistributeOperatorBase):
                         tkind=Backend.OPENCL,
                     )
             self.dFout.exchange_ghosts()
+
+
+class RedistributeInterParam(ComputationalGraphOperator):
+    """parameter transfer between two operators/topologies.
+    Source and target must:
+     *be MPIParams defined on different communicators
+    """
+
+    @classmethod
+    def supports_mpi(cls):
+        return True
+
+    def __new__(
+        cls, parameter, source_topo, target_topo, other_task_id, domain, **kwds
+    ):
+        return super().__new__(cls, **kwds)
+
+    def __init__(
+        self, parameter, source_topo, target_topo, other_task_id, domain, **kwds
+    ):
+        """
+        Communicate parameter through tasks
+
+        parameter
+        ----------
+        parameter: tuple of ScalarParameter or TensorParameter
+            parameters to communicate
+        source_topo: MPIParam
+        target_topo: MPIParam
+        """
+        check_instance(parameter, tuple, values=(ScalarParameter, TensorParameter))
+        check_instance(source_topo, MPIParams, allow_none=True)
+        check_instance(target_topo, MPIParams, allow_none=True)
+        input_fields, output_fields = {}, {}
+        input_params, output_params = {}, {}
+        assert not (source_topo is None and target_topo is None)
+        if not source_topo is None and source_topo.on_task:
+            input_params = {p: source_topo for p in parameter}
+        if not target_topo is None and target_topo.on_task:
+            output_params = {p: target_topo for p in parameter}
+        super().__init__(
+            mpi_params=first_not_None(source_topo, target_topo),
+            input_params=input_params,
+            output_params=output_params,
+            input_fields=input_fields,
+            output_fields=output_fields,
+            **kwds,
+        )
+        self.initialized = True
+        self.domain = domain
+        self.source_task = other_task_id if source_topo is None else source_topo.task_id
+        self.target_task = other_task_id if target_topo is None else target_topo.task_id
+        self.task_is_source = domain.is_on_task(self.source_task)
+        self.task_is_target = domain.is_on_task(self.target_task)
+        print(self.task_is_source, self.source_task, source_topo)
+        print(self.task_is_target, self.target_task, target_topo)
+        if self.task_is_source:
+            assert source_topo.on_task
+        if self.task_is_target:
+            assert target_topo.on_task
+        self.inter_comm = domain.task_intercomm(
+            self.target_task if self.task_is_source else self.source_task
+        )
+        if self.inter_comm.is_inter:
+            # Disjoint tasks with real inter-communicator
+            self._the_apply = self._apply_intercomm
+        elif self.inter_comm.is_intra:
+            # Overlapping tasks using an intra-communicator fron union of tasks procs
+            self._the_apply = self._apply_intracomm
+
+        self._all_params_by_type = {}
+        for p in sorted(self.parameters, key=lambda _: _.name):
+            if not p.dtype in self._all_params_by_type:
+                self._all_params_by_type[p.dtype] = []
+            self._all_params_by_type[p.dtype].append(p)
+        self._send_temp_by_type = {
+            t: np.zeros((len(self._all_params_by_type[t]),), dtype=t)
+            for t in self._all_params_by_type.keys()
+        }
+        self._recv_temp_by_type = {
+            t: np.zeros((len(self._all_params_by_type[t]),), dtype=t)
+            for t in self._all_params_by_type.keys()
+        }
+
+    @op_apply
+    def apply(self, **kwds):
+        self._the_apply(**kwds)
+
+    def _apply_intercomm(self, **kwds):
+        """Disjoint tasks so inter-comm bcast is needed."""
+        for t in self._all_params_by_type.keys():
+            if self.task_is_source:
+                self._send_temp_by_type[t][...] = [
+                    p() for p in self._all_params_by_type[t]
+                ]
+                self.inter_comm.bcast(
+                    self._send_temp_by_type[t],
+                    root=MPI.ROOT if self.domain.task_rank() == 0 else MPI.PROC_NULL,
+                )
+            if self.task_is_target:
+                self._recv_temp_by_type[t] = self.inter_comm.bcast(
+                    self._send_temp_by_type[t], root=0
+                )
+                for p, v in zip(
+                    self._all_params_by_type[t], self._recv_temp_by_type[t]
+                ):
+                    p.value = v
+
+    def _apply_intracomm(self, **kwds):
+        """Communicator is an intra-communicator defined as tasks' comm union.
+        Single broadcast is enough.
+        """
+        for t in self._all_params_by_type.keys():
+            if self.task_is_source and self.domain.task_rank() == 0:
+                self._send_temp_by_type[t][...] = [
+                    p() for p in self._all_params_by_type[t]
+                ]
+            self._recv_temp_by_type[t] = self.inter_comm.bcast(
+                self._send_temp_by_type[t],
+                self.domain.task_root_in_parent(self.source_task),
+            )
+            if self.task_is_target:
+                for p, v in zip(
+                    self._all_params_by_type[t], self._recv_temp_by_type[t]
+                ):
+                    p.value = v
diff --git a/hysop/operator/adapt_timestep.py b/hysop/operator/adapt_timestep.py
index 45e07aaddeb59eba96f5a12de5c91cf3d95296b0..b78d24eb5f61881bf02840e1b887392b8bc6e615 100644
--- a/hysop/operator/adapt_timestep.py
+++ b/hysop/operator/adapt_timestep.py
@@ -32,6 +32,7 @@ from hysop.core.graph.computational_operator import ComputationalGraphOperator
 from hysop.core.graph.graph import op_apply
 from hysop.fields.continuous_field import Field
 from hysop.parameters import ScalarParameter, TensorParameter
+from hysop.parameters.parameter import Parameter
 from hysop.core.mpi import MPI
 from hysop.backend.host.host_operator import HostOperatorBase
 
@@ -71,10 +72,10 @@ class TimestepCriteria(HostOperatorBase, metaclass=ABCMeta):
         ----------
         parameter: ScalarParameter
             Timestep parameter that will be updated.
-        input_params: dict
-            Input parameters used to compute criteria.
-        output_params: dict
+        input_params: set
             Input parameters used to compute criteria.
+        output_params: set
+            Output parameters used to compute criteria.
         min_dt : float, optional
             Minimum value allowed for time step, defaults to 0.
         max_dt : float, optional
@@ -87,13 +88,13 @@ class TimestepCriteria(HostOperatorBase, metaclass=ABCMeta):
             Base class arguments.
         """
         check_instance(parameter, ScalarParameter)
-        check_instance(input_params, dict, keys=str)
-        check_instance(output_params, dict, keys=str)
+        check_instance(input_params, set, values=Parameter)
+        check_instance(output_params, set, values=Parameter)
         assert (min_dt is None) or (min_dt > 0.0)
         assert (max_dt is None) or (max_dt > 0.0)
         assert (min_dt is None) or (max_dt is None) or (max_dt >= min_dt)
         assert (dt_coeff is None) or (dt_coeff > 0.0)
-        assert parameter.name in output_params
+        assert parameter in output_params
 
         super().__init__(input_params=input_params, output_params=output_params, **kwds)
 
@@ -306,7 +307,7 @@ class CflTimestepCriteria(TimestepCriteria):
             check_instance(Fmin, TensorParameter)
             check_instance(Fmax, TensorParameter)
             assert Fmin.shape == Fmax.shape
-            input_params = {Fmin.name: Fmin, Fmax.name: Fmax}
+            input_params = {Fmin, Fmax}
             dtype = Fmin.dtype
             shape = Fmin.shape
             size = Fmin.size
@@ -315,7 +316,7 @@ class CflTimestepCriteria(TimestepCriteria):
             msg = "Cannot specify (Fmin,Fmax) and Finf at the same time."
             assert Fmin is None, msg
             assert Fmax is None, msg
-            input_params = {Finf.name: Finf}
+            input_params = {Finf}
             dtype = Finf.dtype
             shape = Finf.shape
             size = Finf.size
@@ -330,7 +331,7 @@ class CflTimestepCriteria(TimestepCriteria):
             name=name,
             pretty_name=pretty_name,
             input_params=input_params,
-            output_params={parameter.name: parameter},
+            output_params={parameter},
             parameter=parameter,
             **kwds,
         )
@@ -476,13 +477,13 @@ class AdvectionTimestepCriteria(TimestepCriteria):
         check_instance(gradFinf, TensorParameter, allow_none=True)
         check_instance(parameter, ScalarParameter)
         check_instance(criteria, AdvectionCriteria)
-        input_params = {}
+        input_params = set()
         if Finf is not None:
             assert Finf().ndim == 1, "Finf should be a 1D tensor parameter."
-            input_params[Finf.name] = Finf
+            input_params.update({Finf})
         if gradFinf is not None:
             assert gradFinf().ndim == 2, "gradFinf should be a 2D tensor parameter."
-            input_params[gradFinf.name] = gradFinf
+            input_params.update({gradFinf})
 
         name = first_not_None(name, "LCFL")
         pretty_name = first_not_None(pretty_name, name)
@@ -490,7 +491,7 @@ class AdvectionTimestepCriteria(TimestepCriteria):
             name=name,
             pretty_name=pretty_name,
             input_params=input_params,
-            output_params={parameter.name: parameter},
+            output_params={parameter},
             parameter=parameter,
             **kwds,
         )
@@ -591,8 +592,8 @@ class StretchingTimestepCriteria(TimestepCriteria):
         super().__init__(
             name=name,
             pretty_name=pretty_name,
-            input_params={gradFinf.name: gradFinf},
-            output_params={parameter.name: parameter},
+            input_params={gradFinf},
+            output_params={parameter},
             parameter=parameter,
             **kwds,
         )
@@ -639,8 +640,8 @@ class MergeTimeStepCriterias(TimestepCriteria):
         check_instance(parameter, ScalarParameter)
         check_instance(criterias, dict, keys=str, values=TimestepCriteria)
         check_instance(equivalent_CFL, ScalarParameter, allow_none=True)
-        output_params = {parameter.name: parameter}
-        input_params = {}
+        output_params = {parameter}
+        input_params = set()
         for criteria in criterias.values():
             input_params.update(criteria.output_params)
 
@@ -657,7 +658,7 @@ class MergeTimeStepCriterias(TimestepCriteria):
         )
 
     def compute_criteria(self, **kwds):
-        dt = min(p.value for p in self.input_params.values())
+        dt = min(p.value for p in self.input_params.keys())
         if self.equivalent_CFL is not None:
             cfl = self.cfl_criteria.compute_cfl(dt)
             self.equivalent_CFL.set_value(cfl)
diff --git a/hysop/operator/base/advection_dir.py b/hysop/operator/base/advection_dir.py
index 460dbdb7bf9583ba327b9951c056230d60c19b34..88510413e0175e3edb15a14dac919653d1c05c33 100644
--- a/hysop/operator/base/advection_dir.py
+++ b/hysop/operator/base/advection_dir.py
@@ -218,8 +218,8 @@ class DirectionalAdvectionBase:
 
         input_fields = {Vd: variables[velocity]}
         output_fields = {}
-        input_params = {dt.name: dt}
-        output_params = {}
+        input_params = {dt}
+        output_params = set()
 
         for ifield, ofield in zip(advected_fields_in, advected_fields_out):
             input_fields[ifield] = variables[ifield]
diff --git a/hysop/operator/base/convergence.py b/hysop/operator/base/convergence.py
index fc034f0a958b30bb428d1d03244a6cb7b4a5319d..1ad36456dc9dd78748cd9fad87683b3d99431734 100644
--- a/hysop/operator/base/convergence.py
+++ b/hysop/operator/base/convergence.py
@@ -93,7 +93,7 @@ class ConvergenceBase:
             )
 
         input_fields = {field: variables[field]}
-        output_params = {convergence.name: convergence}
+        output_params = {convergence}
 
         self.field = field
         self.convergence = convergence
diff --git a/hysop/operator/base/custom.py b/hysop/operator/base/custom.py
index 7634add66257d10f9b925b2757ed6c6863d57745..ef3f3073ea2cade8eb1ec9099612d0453e9aa7f0 100644
--- a/hysop/operator/base/custom.py
+++ b/hysop/operator/base/custom.py
@@ -28,21 +28,44 @@ from hysop.core.graph.graph import op_apply
 class CustomOperatorBase:
 
     @debug
-    def __new__(cls, func, invars=None, outvars=None,
-                extra_args=None, variables=None, ghosts=None,
-                do_update_ghosts=True, **kwds):
-        return super().__new__(cls,
-                               input_fields=None, output_fields=None,
-                               input_params=None, output_params=None,
-                               **kwds)
+    def __new__(
+        cls,
+        func,
+        invars=None,
+        outvars=None,
+        extra_args=None,
+        variables=None,
+        ghosts=None,
+        do_update_ghosts=True,
+        **kwds,
+    ):
+        return super().__new__(
+            cls,
+            input_fields=None,
+            output_fields=None,
+            input_params=None,
+            output_params=None,
+            **kwds,
+        )
 
     @debug
-    def __init__(self, func, invars=None, outvars=None,
-                 extra_args=None, variables=None, ghosts=None, do_update_ghosts=True, **kwds):
-        check_instance(invars, (tuple, list), values=(Field, Parameter),
-                       allow_none=True)
-        check_instance(outvars, (tuple, list), values=(Field, Parameter),
-                       allow_none=True)
+    def __init__(
+        self,
+        func,
+        invars=None,
+        outvars=None,
+        extra_args=None,
+        variables=None,
+        ghosts=None,
+        do_update_ghosts=True,
+        **kwds,
+    ):
+        check_instance(
+            invars, (tuple, list), values=(Field, Parameter), allow_none=True
+        )
+        check_instance(
+            outvars, (tuple, list), values=(Field, Parameter), allow_none=True
+        )
         check_instance(extra_args, tuple, allow_none=True)
         check_instance(
             variables,
@@ -52,21 +75,24 @@ class CustomOperatorBase:
             allow_none=True,
         )
         check_instance(ghosts, int, allow_none=True)
-        check_instance(do_update_ghosts, bool,)
+        check_instance(
+            do_update_ghosts,
+            bool,
+        )
         input_fields, output_fields = {}, {}
-        input_params, output_params = {}, {}
+        input_params, output_params = set(), set()
         if invars is not None:
             for v in invars:
                 if isinstance(v, Field):
                     input_fields[v] = variables[v]
                 elif isinstance(v, Parameter):
-                    input_params[v.name] = v
+                    input_params.update({v})
         if outvars is not None:
             for v in outvars:
                 if isinstance(v, Field):
                     output_fields[v] = variables[v]
                 elif isinstance(v, Parameter):
-                    output_params[v.name] = v
+                    output_params.update({v})
         self.invars, self.outvars = invars, outvars
         self.func = func
         self.extra_args = tuple()
diff --git a/hysop/operator/base/custom_symbolic_operator.py b/hysop/operator/base/custom_symbolic_operator.py
index 617c8e19ea3da925a30db9e4e4e4e9bef24da538..f0c3bb724ef2e23532b83ba898bf5ca36c4c6621 100644
--- a/hysop/operator/base/custom_symbolic_operator.py
+++ b/hysop/operator/base/custom_symbolic_operator.py
@@ -1630,8 +1630,8 @@ class CustomSymbolicOperatorBase(DirectionalOperatorBase, metaclass=ABCMeta):
 
         input_fields = expr_info.input_fields
         output_fields = expr_info.output_fields
-        input_params = expr_info.input_params
-        output_params = expr_info.output_params
+        input_params = set(expr_info.input_params.values())
+        output_params = set(expr_info.output_params.values())
 
         input_tensor_fields = ()
         output_tensor_fields = ()
diff --git a/hysop/operator/base/derivative.py b/hysop/operator/base/derivative.py
index 432227bb253dc1e23150da93a95be2199ff7ade1..0cba46286b222fbcce0c718666e545d77652cf77 100644
--- a/hysop/operator/base/derivative.py
+++ b/hysop/operator/base/derivative.py
@@ -180,7 +180,7 @@ class SpaceDerivativeBase(metaclass=ABCMeta):
 
         input_fields = {F: variables.get(F, None)}
         output_fields = {dF: variables.get(dF, input_fields[F])}
-        input_params = {}
+        input_params = set()
 
         is_inplace = dF is F
         require_tmp = first_not_None(require_tmp, is_inplace)
@@ -190,7 +190,7 @@ class SpaceDerivativeBase(metaclass=ABCMeta):
             input_fields[A] = variables.get(A, input_fields[F])
             scale_by_field = True
         elif isinstance(A, TensorParameter):
-            input_params[A.name] = A
+            input_params.update({A})
             scale_by_parameter = True
         elif isinstance(A, (float, int, npw.number, sm.Basic)):
             scale_by_value = (A != 1) and (A != 1.0)
diff --git a/hysop/operator/base/diffusion.py b/hysop/operator/base/diffusion.py
index 7a11ba7b06a5b93efb7b7d25767c9fe59ee8d859..e246347e06227bcf604d545a5dcdd7196a269cf9 100644
--- a/hysop/operator/base/diffusion.py
+++ b/hysop/operator/base/diffusion.py
@@ -79,7 +79,7 @@ class DiffusionOperatorBase(PoissonOperatorBase):
         check_instance(nu, ScalarParameter)
         check_instance(dt, ScalarParameter)
 
-        input_params = {dt.name: dt, nu.name: nu}
+        input_params = {dt, nu}
 
         default_name = f"Diffusion_{Fin.name}_{Fout.name}"
         default_pretty_name = f"Diffusion_{Fin.pretty_name}_{Fout.pretty_name}"
diff --git a/hysop/operator/base/enstrophy.py b/hysop/operator/base/enstrophy.py
index 7dbe6ec28e393ea2f255b7f9737e7eb2f71399d2..7611d6d62fcd32d3dd4c8fa8b8284e85cc093161 100644
--- a/hysop/operator/base/enstrophy.py
+++ b/hysop/operator/base/enstrophy.py
@@ -112,7 +112,7 @@ class EnstrophyBase(metaclass=ABCMeta):
 
         input_fields = {vorticity: variables[vorticity]}
         output_fields = {WdotW: variables[WdotW]}
-        output_params = {enstrophy.name: enstrophy}
+        output_params = {enstrophy}
 
         if rho is not None:
             input_fields[rho] = variables[rho]
diff --git a/hysop/operator/base/external_force.py b/hysop/operator/base/external_force.py
index a92f45832bab6bf0838a512834a3794bf2056b2c..100fa540a7ed0e874ce0926d2341524793edf649 100644
--- a/hysop/operator/base/external_force.py
+++ b/hysop/operator/base/external_force.py
@@ -263,8 +263,6 @@ class SpectralExternalForceOperatorBase(SpectralOperatorBase):
         output_fields = {
             f: self.get_topo_descriptor(variables, f) for f in output_fields
         }
-        input_params = {p.name: p for p in input_params}
-        output_params = {p.name: p for p in output_params}
 
         # TODO share tmp buffers for the whole tensor
         force = vorticity.tmp_like(name="Fext", ghosts=0, mem_tag="tmp_fext")
diff --git a/hysop/operator/base/integrate.py b/hysop/operator/base/integrate.py
index 220235fe617b81d4b6502a40fd67c6a631ff284c..3cfb8f298dd121de92488e9884a55c8f8b74b72b 100644
--- a/hysop/operator/base/integrate.py
+++ b/hysop/operator/base/integrate.py
@@ -126,7 +126,7 @@ class IntegrateBase(metaclass=ABCMeta):
             check_instance(scaling, tuple, values=float, size=field.nb_components)
 
         input_fields = {field: variables[field]}
-        output_params = {parameter.name: parameter}
+        output_params = {parameter}
 
         default_name = f"integrate_{field.name}"
         default_pname = f"∫{field.pretty_name}"
diff --git a/hysop/operator/base/min_max.py b/hysop/operator/base/min_max.py
index 00938cb9a6d89f49cdfe37d9b903df1836dcc416..ab5998b54065b6b46834b4700a72da0d97b2ddc9 100644
--- a/hysop/operator/base/min_max.py
+++ b/hysop/operator/base/min_max.py
@@ -280,7 +280,7 @@ class MinMaxFieldStatisticsBase:
             ppbasename=ppbasename,
         )
 
-        output_params = {p.name: p for p in parameters.values() if (p is not None)}
+        output_params = set(p for p in parameters.values() if (p is not None))
 
         if MinMaxDerivativeStatisticsBase in self.__class__.__mro__:
             super().__init__(
diff --git a/hysop/operator/base/poisson_curl.py b/hysop/operator/base/poisson_curl.py
index d6efbe82be81edab8ebdfede97d5f893ccc17bdd..a112bf670cfaecf6af598e8685c486766b354ee3 100644
--- a/hysop/operator/base/poisson_curl.py
+++ b/hysop/operator/base/poisson_curl.py
@@ -225,13 +225,12 @@ class PoissonCurlOperatorBase:
         # input and output fields
         vtopology = variables[velocity]
         wtopology = variables[vorticity]
-        input_params = {}
+        input_params = set()
         input_fields = {vorticity: wtopology}
         output_fields = {velocity: vtopology}
         if should_diffuse:
             assert dt is not None, "Diffusion timestep has not been given."
-            input_params[diffusion.name] = diffusion
-            input_params[dt.name] = dt
+            input_params.update({diffusion, dt})
         if should_diffuse or should_project:
             output_fields[vorticity] = wtopology
 
@@ -277,7 +276,7 @@ class PoissonCurlOperatorBase:
             msg = "Cannot compute output vorticity energy because there is no output vorticity !"
             assert should_diffuse or should_project, msg
 
-        output_params = {}
+        output_params = set()
         compute_Win_E_param = EnergyDumper.build_energy_parameter(
             do_compute=do_compute_Win_E,
             field=vorticity,
diff --git a/hysop/operator/base/spectral_operator.py b/hysop/operator/base/spectral_operator.py
index 9a2a72097cebdeddb9d8354ff872d636aa234d63..ac0431596c34c7c5229ec77b4e0dda28fcc4effd 100644
--- a/hysop/operator/base/spectral_operator.py
+++ b/hysop/operator/base/spectral_operator.py
@@ -226,7 +226,7 @@ class SpectralOperatorBase:
         for tg in self.transform_groups.values():
             output_parameters.update(tg.output_parameters)
         for p in output_parameters:
-            self.output_params[p.name] = p
+            self.output_params.update({p})
 
     def initialize(self, **kwds):
         super().initialize(**kwds)
diff --git a/hysop/operator/base/stretching_dir.py b/hysop/operator/base/stretching_dir.py
index 1d6c518640f49c5cf44a8029efc915f633091991..63011b17313c8511b00d592db24057d19c967a7f 100644
--- a/hysop/operator/base/stretching_dir.py
+++ b/hysop/operator/base/stretching_dir.py
@@ -201,8 +201,8 @@ class DirectionalStretchingBase:
 
         input_fields = {velocity: variables[velocity], vorticity: variables[vorticity]}
         output_fields = {vorticity: variables[vorticity]}
-        input_params = {dt.name: dt}
-        output_params = {}
+        input_params = {dt}
+        output_params = set()
 
         super().__init__(
             input_fields=input_fields,
diff --git a/hysop/operator/gradient.py b/hysop/operator/gradient.py
index 7a004a8807c888a311020578f181c9d151eac99e..b076a8ab763c41b3bbb4cb40ff51ed428598ae3b 100644
--- a/hysop/operator/gradient.py
+++ b/hysop/operator/gradient.py
@@ -533,15 +533,13 @@ class MinMaxGradientStatistics(Gradient):
                         msg = f">Parameter {param.pretty_name} set to:\n{param.value}"
                         vprint(msg)
 
-        _phony_input_params = {}
-        _phony_output_params = {}
+        _phony_input_params = set()
+        _phony_output_params = set()
         for pname in _names.keys():
             if pname in extra_params:
                 param = parameters[pname]
-                _phony_input_params.update(
-                    {p.name: p for p in extra_params[pname].ravel()}
-                )
-                _phony_output_params[param.name] = param
+                _phony_input_params.update({p for p in extra_params[pname].ravel()})
+                _phony_output_params.update({param})
         op = MergeTensorViewsOperator(
             name=name.format(gradF.name),
             pretty_name=pretty_name.format(gradF.pretty_name),
diff --git a/hysop/operator/parameter_plotter.py b/hysop/operator/parameter_plotter.py
index cd9d821024569f279f39ad435136e4b24140b729..e0bad0403e2622f59a5a98712481c6b0257dfe11 100644
--- a/hysop/operator/parameter_plotter.py
+++ b/hysop/operator/parameter_plotter.py
@@ -153,7 +153,7 @@ class ParameterPlotter(PlottingOperator):
         self, name, parameters, alloc_size=128, fig=None, axes=None, shape=None, **kwds
     ):
 
-        input_params = {}
+        input_params = set()
         if (fig is not None) and (axes is not None):
             import matplotlib
 
@@ -162,7 +162,7 @@ class ParameterPlotter(PlottingOperator):
             check_instance(parameters, dict, keys=matplotlib.axes.Axes, values=dict)
             for params in parameters.values():
                 check_instance(params, dict, keys=str, values=ScalarParameter)
-                input_params.update({p.name: p for p in params.values()})
+                input_params.update(set(params.values()))
         else:
             custom_axes = False
             _parameters = {}
diff --git a/hysop/operator/plotters.py b/hysop/operator/plotters.py
index 5ebab8a43dcaa4ff1608b5e4ad238eb260b9a6a5..d592b4c8da97311630d892f9e2a1ddd3176c988a 100644
--- a/hysop/operator/plotters.py
+++ b/hysop/operator/plotters.py
@@ -379,7 +379,7 @@ class ParameterPlotter(PlottingOperator):
         self, name, parameters, alloc_size=128, fig=None, axes=None, shape=None, **kwds
     ):
 
-        input_params = {}
+        input_params = set()
         if (fig is not None) and (axes is not None):
             import matplotlib
 
@@ -388,7 +388,7 @@ class ParameterPlotter(PlottingOperator):
             check_instance(parameters, dict, keys=matplotlib.axes.Axes, values=dict)
             for params in parameters.values():
                 check_instance(params, dict, keys=str, values=ScalarParameter)
-                input_params.update({p.name: p for p in params.values()})
+                input_params.update(set(params.values()))
         else:
             custom_axes = False
             _parameters = {}
diff --git a/hysop/operator/redistribute.py b/hysop/operator/redistribute.py
index fd980ad245d23108296b3660d5ce761899e08470..389013b46b223b33a5f737467e6cd1fefd2c53dc 100644
--- a/hysop/operator/redistribute.py
+++ b/hysop/operator/redistribute.py
@@ -35,6 +35,7 @@ from hysop.topology.cartesian_topology import CartesianTopology
 from hysop.core.mpi.redistribute import (
     RedistributeIntra,
     RedistributeInter,
+    RedistributeInterParam,
     RedistributeOperatorBase,
 )
 from hysop.core.graph.node_generator import ComputationalGraphNodeGenerator
diff --git a/hysop/operators.py b/hysop/operators.py
index 08604a29b03f15b23221066777e6db4143b2506c..95fab15b4bf51a5ee7d15b0d7a35eeff1b0e74bb 100644
--- a/hysop/operators.py
+++ b/hysop/operators.py
@@ -35,7 +35,6 @@ from hysop.operator.transpose import Transpose
 from hysop.operator.misc import Noop, ForceTopologyState
 
 from hysop.operator.redistribute import Redistribute
-from hysop.operator.inter_task_param_comm import InterTaskParamComm
 
 from hysop.operator.analytic import AnalyticField
 from hysop.operator.mean_field import ComputeMeanField
diff --git a/hysop/tools/spectral_utils.py b/hysop/tools/spectral_utils.py
index 978743bb54735c099de2bf78e95aa4c4bae9a7c0..5ddca43d24f27b02480ab843a630e9100f65c966 100644
--- a/hysop/tools/spectral_utils.py
+++ b/hysop/tools/spectral_utils.py
@@ -901,8 +901,8 @@ class EnergyDumper:
                 dtype=None,
                 initial_value=None,
             )
-            assert param.name not in output_params, param.name
-            output_params[param.name] = param
+            assert param not in output_params, param.name
+            output_params.update({param})
         else:
             param = None
         return param