Skip to content
Snippets Groups Projects
Commit 1ed2ddb0 authored by Jean-Baptiste Keck's avatar Jean-Baptiste Keck
Browse files

reverted hdfio to commit 1e369fd5

parent 2adde051
No related branches found
No related tags found
1 merge request!16MPI operators
......@@ -651,10 +651,7 @@ if(VERBOSE_MODE)
message(STATUS " Project uses Scales : ${WITH_SCALES}")
message(STATUS " Project uses FFTW : ${WITH_FFTW}")
message(STATUS " Project uses GPU : ${WITH_GPU}")
<<<<<<< HEAD
message(STATUS " Project uses parallel hdf5 interface : ${H5PY_PARALLEL_COMPRESSION_ENABLED}")
=======
>>>>>>> c9b88f65566d41430ec337958469ac7b37608012
message(STATUS " ${PROJECT_NAME} profile mode : ${PROFILE}")
message(STATUS " ${PROJECT_NAME} debug mode : ${DEBUG}")
message(STATUS " Enable -OO run? : ${OPTIM}")
......
......@@ -33,10 +33,7 @@ __GPU_ENABLED__ = "@WITH_GPU@" is "ON"
__FFTW_ENABLED__ = "@WITH_FFTW@" is "ON"
__SCALES_ENABLED__ = "@WITH_SCALES@" is "ON"
__OPTIMIZE__ = not __debug__
<<<<<<< HEAD
__H5PY_PARALLEL_COMPRESSION_ENABLED__ = ("@H5PY_PARALLEL_COMPRESSION_ENABLED@" is "ON")
=======
>>>>>>> c9b88f65566d41430ec337958469ac7b37608012
__VERBOSE__ = get_env('VERBOSE', ("@VERBOSE@" is "ON"))
__DEBUG__ = get_env('DEBUG', ("@DEBUG@" is "ON"))
......
......@@ -113,6 +113,8 @@ class HDF_IO(ComputationalGraphOperator):
self.topology = None
self._local_compute_slices = None
self._global_grid_resolution = None
self._local_grid_resolution = None
self._all_local_grid_resolution = None
self._global_slices = None
# Dictionnary of discrete fields. Key = name in hdf file,
# Value = discrete field
......@@ -374,6 +376,7 @@ class HDF_Writer(HDF_IO):
assert self.use_local_hdf5
return self.io_params.filename + "_{0:05d}".format(i) + "_rk{rk:03d}.h5"
@op_apply
def apply(self, simulation=None, **kwds):
if (simulation is None):
......@@ -405,14 +408,12 @@ class HDF_Writer(HDF_IO):
write_step = tuple(step)
ds_names = self.dataset.keys()
joinrkfiles = None
if self.use_local_hdf5 and (self.topology.cart_size > 1):
joinrkfiles = range(self.topology.cart_size)
grid_attributes = XMF.prepare_grid_attributes(
ds_names,
resolution, origin, step)
resolution, origin, step, joinrkfiles=joinrkfiles)
self.grid_attributes_template = grid_attributes
......@@ -444,7 +445,7 @@ class HDF_Writer(HDF_IO):
filenames = dict(('filename'+str(r), self._get_filename(i).format(rk=r).split('/')[-1]) for r in range(self.topology.cart_size))
filenames.update(('resolution'+str(r), XMF._list_format(self._all_local_grid_resolution[r])) for r in range(self.topology.cart_size))
grid_attrs = self.grid_attributes_template.format(
niteration=i, time=t, filename=filename)
niteration=i, time=t, **filenames)
f.seek(lastp)
f.write(grid_attrs)
self._last_xmf_pos = f.tell()
......@@ -455,7 +456,13 @@ class HDF_Writer(HDF_IO):
self._xdmf_data_files = []
def _step_HDF5(self, simu):
"""Write an h5 file with data on each mpi process."""
"""Write an h5 file with data on each mpi process.
If parallel interface of HDF5 is not enabled, each rank is
writing its own h5 file. All files are concatenated in the xmf
part with a 'JOIN' function. If parallel interface enabled,
only one h5 file is written by all ranks.
"""
# Remarks:
# - force np.float64, ParaView seems unable to read float32
# - writing compressed hdf5 files (gzip compression seems the best)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment