diff --git a/CMakeLists.txt b/CMakeLists.txt
index 360a0d34799c0bbd34e969e212b9b6dac9021258..2a8c2abe6f7320f947c07921e960a57ccc618186 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -651,10 +651,7 @@ if(VERBOSE_MODE)
   message(STATUS " Project uses Scales : ${WITH_SCALES}")
   message(STATUS " Project uses FFTW : ${WITH_FFTW}")
   message(STATUS " Project uses GPU : ${WITH_GPU}")
-<<<<<<< HEAD
   message(STATUS " Project uses parallel hdf5 interface : ${H5PY_PARALLEL_COMPRESSION_ENABLED}")
-=======
->>>>>>> c9b88f65566d41430ec337958469ac7b37608012
   message(STATUS " ${PROJECT_NAME} profile mode : ${PROFILE}")
   message(STATUS " ${PROJECT_NAME} debug   mode : ${DEBUG}")
   message(STATUS " Enable -OO run? : ${OPTIM}")
diff --git a/hysop/__init__.py.in b/hysop/__init__.py.in
index 26d8c7a9ec81a48aa7573e2d19e4766d865448ee..ef29096df566f849a87b1898392d60ebed0a4346 100644
--- a/hysop/__init__.py.in
+++ b/hysop/__init__.py.in
@@ -33,10 +33,7 @@ __GPU_ENABLED__    = "@WITH_GPU@" is "ON"
 __FFTW_ENABLED__   = "@WITH_FFTW@" is "ON"
 __SCALES_ENABLED__ = "@WITH_SCALES@" is "ON"
 __OPTIMIZE__       = not __debug__
-<<<<<<< HEAD
 __H5PY_PARALLEL_COMPRESSION_ENABLED__ = ("@H5PY_PARALLEL_COMPRESSION_ENABLED@" is "ON")
-=======
->>>>>>> c9b88f65566d41430ec337958469ac7b37608012
 
 __VERBOSE__        = get_env('VERBOSE', ("@VERBOSE@" is "ON"))
 __DEBUG__          = get_env('DEBUG',   ("@DEBUG@" is "ON"))
diff --git a/hysop/operator/hdf_io.py b/hysop/operator/hdf_io.py
index 443d3faf1308899b2a0b862af72385bac49c24fd..65238694793e294988ba6ac5635060948e3b7200 100755
--- a/hysop/operator/hdf_io.py
+++ b/hysop/operator/hdf_io.py
@@ -113,6 +113,8 @@ class HDF_IO(ComputationalGraphOperator):
         self.topology = None
         self._local_compute_slices = None
         self._global_grid_resolution = None
+        self._local_grid_resolution = None
+        self._all_local_grid_resolution = None
         self._global_slices = None
         # Dictionnary of discrete fields. Key = name in hdf file,
         # Value = discrete field
@@ -374,6 +376,7 @@ class HDF_Writer(HDF_IO):
             assert self.use_local_hdf5
             return self.io_params.filename + "_{0:05d}".format(i) + "_rk{rk:03d}.h5"
 
+
     @op_apply
     def apply(self, simulation=None, **kwds):
         if (simulation is None):
@@ -405,14 +408,12 @@ class HDF_Writer(HDF_IO):
         write_step = tuple(step)
 
         ds_names = self.dataset.keys()
-        
         joinrkfiles = None
         if self.use_local_hdf5 and (self.topology.cart_size > 1):
             joinrkfiles = range(self.topology.cart_size)
-
         grid_attributes = XMF.prepare_grid_attributes(
                             ds_names,
-                            resolution, origin, step)
+                            resolution, origin, step, joinrkfiles=joinrkfiles)
         self.grid_attributes_template = grid_attributes
 
 
@@ -444,7 +445,7 @@ class HDF_Writer(HDF_IO):
                     filenames = dict(('filename'+str(r), self._get_filename(i).format(rk=r).split('/')[-1]) for r in range(self.topology.cart_size))
                     filenames.update(('resolution'+str(r), XMF._list_format(self._all_local_grid_resolution[r])) for r in range(self.topology.cart_size))
                 grid_attrs = self.grid_attributes_template.format(
-                                    niteration=i, time=t, filename=filename)
+                                    niteration=i, time=t, **filenames)
                 f.seek(lastp)
                 f.write(grid_attrs)
                 self._last_xmf_pos = f.tell()
@@ -455,7 +456,13 @@ class HDF_Writer(HDF_IO):
             self._xdmf_data_files = []
 
     def _step_HDF5(self, simu):
-        """Write an h5 file with data on each mpi process."""
+        """Write an h5 file with data on each mpi process.
+
+        If parallel interface of HDF5 is not enabled, each rank is
+        writing its own h5 file. All files are concatenated in the xmf
+        part with a 'JOIN' function.  If parallel interface enabled,
+        only one h5 file is written by all ranks.
+        """
         # Remarks:
         # - force np.float64, ParaView seems unable to read float32
         # - writing compressed hdf5 files (gzip compression seems the best)