diff --git a/hysop_examples/examples/analytic/analytic.py b/hysop_examples/examples/analytic/analytic.py index 4d7f1d3f8339d0db58c037cada349ac6dbd4827c..23cf837db4b85ae5207825a62d20d313ff164abb 100755 --- a/hysop_examples/examples/analytic/analytic.py +++ b/hysop_examples/examples/analytic/analytic.py @@ -22,7 +22,7 @@ def compute(args): # We need to first get default MPI parameters (even for non MPI jobs) # so we use default domain communicator and task. - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup implementation specific variables diff --git a/hysop_examples/examples/bubble/periodic_bubble.py b/hysop_examples/examples/bubble/periodic_bubble.py index 99c7358bac33368d495324fd47addf41ccc115cb..6080bc426522ee74bce5ff574bc7c5d956e5de8b 100644 --- a/hysop_examples/examples/bubble/periodic_bubble.py +++ b/hysop_examples/examples/bubble/periodic_bubble.py @@ -81,7 +81,7 @@ def compute(args): box = Box(origin=args.box_origin, length=args.box_length, dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/bubble/periodic_bubble_levelset.py b/hysop_examples/examples/bubble/periodic_bubble_levelset.py index 7574769f0db639c0354e22ad31c3a69dadd8816c..4b4ef3eb2576c7425a30465ec9ce762dea90a372 100644 --- a/hysop_examples/examples/bubble/periodic_bubble_levelset.py +++ b/hysop_examples/examples/bubble/periodic_bubble_levelset.py @@ -67,7 +67,7 @@ def compute(args): box = Box(origin=args.box_origin, length=args.box_length, dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py b/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py index 1d6a21b8c63f22fa7a43ddcec7c688066fd8d623..09cb3ccc43b46591c017c98c25869d223337472b 100644 --- a/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py +++ b/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py @@ -76,7 +76,7 @@ def compute(args): box = Box(origin=args.box_origin, length=args.box_length, dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/bubble/periodic_jet_levelset.py b/hysop_examples/examples/bubble/periodic_jet_levelset.py index 40d2ecebc5a4d5a71d02f9c4b943edbf49410892..4c4a56236da7eb84b050decb92fbb8d326d267d3 100644 --- a/hysop_examples/examples/bubble/periodic_jet_levelset.py +++ b/hysop_examples/examples/bubble/periodic_jet_levelset.py @@ -60,7 +60,7 @@ def compute(args): box = Box(origin=args.box_origin, length=args.box_length, dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/cylinder/oscillating_cylinder.py b/hysop_examples/examples/cylinder/oscillating_cylinder.py index 8975a9029dbacb98a7b3e609d34216181b0d3e60..c149e284706bfe712c5669ba59d31e51983b5743 100644 --- a/hysop_examples/examples/cylinder/oscillating_cylinder.py +++ b/hysop_examples/examples/cylinder/oscillating_cylinder.py @@ -66,7 +66,7 @@ def compute(args): box = Box(origin=(-H/2, -L/2), length=(H,L), dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/fixed_point/heat_equation.py b/hysop_examples/examples/fixed_point/heat_equation.py index 5ed1ad646637adf8a2a5974cf15d53e70cc36f8b..b6be34fad0d08357fefcf81d58357d86328c6747 100644 --- a/hysop_examples/examples/fixed_point/heat_equation.py +++ b/hysop_examples/examples/fixed_point/heat_equation.py @@ -43,7 +43,7 @@ def compute(args): box = Box(origin=args.box_origin, length=args.box_length, dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/flow_around_sphere/flow_around_sphere.py b/hysop_examples/examples/flow_around_sphere/flow_around_sphere.py index 4732e599d229208c8474d6010b8237f18cc7f8a9..c5175e50a6aac5972ba4b9d5ab36895232811489 100644 --- a/hysop_examples/examples/flow_around_sphere/flow_around_sphere.py +++ b/hysop_examples/examples/flow_around_sphere/flow_around_sphere.py @@ -39,7 +39,7 @@ def compute(args): dt0 = args.dt # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/multiresolution/scalar_advection.py b/hysop_examples/examples/multiresolution/scalar_advection.py index f7cfbf51dfc60e144fda7fb385051f809316b837..7df8d844015626de741d9ac2adb871a9782c83b5 100644 --- a/hysop_examples/examples/multiresolution/scalar_advection.py +++ b/hysop_examples/examples/multiresolution/scalar_advection.py @@ -1,194 +1,193 @@ - import numpy as np + def compute(args): from hysop import Field, Box, Simulation, Problem, \ - ScalarParameter, MPIParams, IOParams, IO + ScalarParameter, MPIParams, IOParams, IO from hysop.constants import Implementation, BoxBoundaryCondition from hysop.operators import Advection, DirectionalAdvection, StrangSplitting, \ - SpatialFilter, HDF_Writer + SpatialFilter, HDF_Writer from hysop.methods import Remesh, TimeIntegrator, \ - Interpolation, FilteringMethod, \ - PolynomialInterpolator + Interpolation, FilteringMethod, \ + PolynomialInterpolator - ## IO paths + # IO paths spectral_path = IO.default_path() + '/spectral' - ## Function to compute initial velocity values + # Function to compute initial velocity values def init_velocity(data, coords, component): data[...] = args.velocity[::-1][component] - ## Function to compute initial scalar values + # Function to compute initial scalar values def init_scalar(data, coords, component): data[...] = 1.0 for x in coords: data[...] *= np.cos(x + component*(np.pi/2)) # Define domain - dim = args.ndim - npts = args.npts # coarse resolution + dim = args.ndim + npts = args.npts # coarse resolution snpts = args.snpts # fine resolution fnpts = tuple(3*_ for _ in snpts) # finest resolution - cnpts = tuple(_//2 for _ in npts) # coarsest resolution + cnpts = tuple(_//2 for _ in npts) # coarsest resolution lboundaries = (BoxBoundaryCondition.PERIODIC,)*dim rboundaries = (BoxBoundaryCondition.PERIODIC,)*dim - box = Box(origin=args.box_origin, length=args.box_length, dim=dim, - lboundaries=lboundaries, rboundaries=rboundaries) - + box = Box(origin=args.box_origin, length=args.box_length, dim=dim, + lboundaries=lboundaries, rboundaries=rboundaries) + # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) - + # Define parameters and field (time and analytic field) - dt = ScalarParameter('dt', dtype=args.dtype) - velo = Field(domain=box, name='V', is_vector=True, dtype=args.dtype) + dt = ScalarParameter('dt', dtype=args.dtype) + velo = Field(domain=box, name='V', is_vector=True, dtype=args.dtype) scalar = Field(domain=box, name='S', nb_components=2, dtype=args.dtype) - + # Setup operator method dictionnary # Advection-Remesh operator discretization parameters - method = { - TimeIntegrator: args.time_integrator, - Remesh: args.remesh_kernel, - PolynomialInterpolator: args.polynomial_interpolator, + method = { + TimeIntegrator: args.time_integrator, + Remesh: args.remesh_kernel, + PolynomialInterpolator: args.polynomial_interpolator, } - + # Setup implementation specific variables impl = args.impl - extra_op_kwds = { 'mpi_params': mpi_params } + extra_op_kwds = {'mpi_params': mpi_params} if (impl is Implementation.OPENCL): # For the OpenCL implementation we need to setup the compute device # and configure how the code is generated and compiled at runtime. - + # Create an explicit OpenCL context from user parameters from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number cl_env = get_or_create_opencl_env( mpi_params=mpi_params, - platform_id=args.cl_platform_id, - device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None) - + platform_id=args.cl_platform_id, + device_id=box.machine_rank % get_device_number() if args.cl_device_id is None else None) + # Configure OpenCL kernel generation and tuning method # (already done by HysopArgParser for simplicity) from hysop.methods import OpenClKernelConfig method[OpenClKernelConfig] = args.opencl_kernel_config - + # Setup opencl specific extra operator keyword arguments extra_op_kwds['cl_env'] = cl_env - elif (impl in (Implementation.PYTHON, Implementation.FORTRAN)): + elif (impl in (Implementation.PYTHON, Implementation.FORTRAN)): pass else: - msg='Unknown implementation \'{}\'.'.format(impl) + msg = 'Unknown implementation \'{}\'.'.format(impl) raise ValueError(msg) - + # Create the problem we want to solve problem = Problem(method=method) - - if (impl is Implementation.FORTRAN) or ((npts!=snpts) and (impl is Implementation.PYTHON)): + + if (impl is Implementation.FORTRAN) or ((npts != snpts) and (impl is Implementation.PYTHON)): # The fortran scales implementation is a special case. # Here directional advection is a black box. advec = Advection(implementation=Implementation.FORTRAN, - name='advec', - velocity = velo, - advected_fields = (scalar,), - variables = {velo: npts, scalar: snpts}, - dt = dt, **extra_op_kwds) - + name='advec', + velocity=velo, + advected_fields=(scalar,), + variables={velo: npts, scalar: snpts}, + dt=dt, **extra_op_kwds) + # Finally insert our advection into the problem problem.insert(advec) else: # Build the directional advection operator # here the cfl determines the maximum number of ghosts advec = DirectionalAdvection(implementation=impl, - name='advec', - velocity = velo, - velocity_cfl = args.cfl, - advected_fields = (scalar,), - variables = {velo: npts, scalar: snpts}, - dt=dt, **extra_op_kwds) + name='advec', + velocity=velo, + velocity_cfl=args.cfl, + advected_fields=(scalar,), + variables={velo: npts, scalar: snpts}, + dt=dt, **extra_op_kwds) # Build the directional splitting operator graph - splitting = StrangSplitting(splitting_dim=dim, - order=args.strang_order) + splitting = StrangSplitting(splitting_dim=dim, + order=args.strang_order) splitting.push_operators(advec) # Finally insert our splitted advection into the problem problem.insert(splitting) - - #> Interpolation filter + # > Interpolation filter interpolation_filter = SpatialFilter(input_variables={scalar: snpts}, - output_variables={scalar: fnpts}, - filtering_method=args.interpolation_filter, - implementation=impl, - **extra_op_kwds) - #> Restriction filter + output_variables={scalar: fnpts}, + filtering_method=args.interpolation_filter, + implementation=impl, + **extra_op_kwds) + # > Restriction filter restriction_filter = SpatialFilter(input_variables={scalar: npts}, - output_variables={scalar: cnpts}, - filtering_method=args.restriction_filter, - implementation=impl, - **extra_op_kwds) + output_variables={scalar: cnpts}, + filtering_method=args.restriction_filter, + implementation=impl, + **extra_op_kwds) - #> Operators to dump all fields + # > Operators to dump all fields io_params = IOParams(filename='finest', frequency=args.dump_freq) df0 = HDF_Writer(name='S_finest', - io_params=io_params, - variables={scalar: fnpts}, - **extra_op_kwds) + io_params=io_params, + variables={scalar: fnpts}, + **extra_op_kwds) io_params = IOParams(filename='fine', frequency=args.dump_freq) df1 = HDF_Writer(name='S_fine', - io_params=io_params, - variables={scalar: snpts}, - **extra_op_kwds) + io_params=io_params, + variables={scalar: snpts}, + **extra_op_kwds) io_params = IOParams(filename='coarse', frequency=args.dump_freq) df2 = HDF_Writer(name='S_coarse', - io_params=io_params, - variables={scalar: npts}, - **extra_op_kwds) + io_params=io_params, + variables={scalar: npts}, + **extra_op_kwds) io_params = IOParams(filename='coarsest', frequency=args.dump_freq) df3 = HDF_Writer(name='S_coarsest', - io_params=io_params, - variables={scalar: cnpts}, - **extra_op_kwds) - + io_params=io_params, + variables={scalar: cnpts}, + **extra_op_kwds) + # Add a writer of input field at given frequency. - problem.insert(interpolation_filter, restriction_filter, - df0, df1, df2, df3) + problem.insert(interpolation_filter, restriction_filter, + df0, df1, df2, df3) problem.build(args) # If a visu_rank was provided, and show_graph was set, # display the graph on the given process rank. if args.display_graph: problem.display(args.visu_rank) - + # Initialize discrete velocity and scalar field problem.initialize_field(velo, formula=init_velocity) problem.initialize_field(scalar, formula=init_scalar) - + # Determine a timestep using the supplied CFL # (velocity is constant for the whole simulation) - dx = problem.get_input_discrete_field(scalar).space_step.min() + dx = problem.get_input_discrete_field(scalar).space_step.min() Vinf = max(abs(vi) for vi in args.velocity) - dt0 = (args.cfl*dx)/Vinf + dt0 = (args.cfl*dx)/Vinf if (args.dt is not None): dt0 = min(args.dt, dt0) - dt0 = 0.99*dt0 - - # Create a simulation and solve the problem + dt0 = 0.99*dt0 + + # Create a simulation and solve the problem # (do not forget to specify the dt parameter here) - simu = Simulation(start=args.tstart, end=args.tend, + simu = Simulation(start=args.tstart, end=args.tend, nb_iter=args.nb_iter, max_iter=args.max_iter, times_of_interest=args.dump_times, dt=dt, dt0=dt0) - - # Finally solve the problem + + # Finally solve the problem problem.solve(simu, dry_run=args.dry_run) - + # Finalize problem.finalize() -if __name__=='__main__': +if __name__ == '__main__': from hysop_examples.example_utils import HysopArgParser, colors class MultiResolutionScalarAdvectionArgParser(HysopArgParser): @@ -196,37 +195,37 @@ if __name__=='__main__': prog_name = 'multiresolution_scalar_advection' default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), prog_name) - description=colors.color('HySoP Scalar Advection Example: ', fg='blue', style='bold') - description+='Advect a scalar by a given constant velocity. ' - description+='\n\nThe advection operator is directionally splitted resulting ' - description+='in the use of one or more advection-remesh operators per direction.' - + description = colors.color('HySoP Scalar Advection Example: ', fg='blue', style='bold') + description += 'Advect a scalar by a given constant velocity. ' + description += '\n\nThe advection operator is directionally splitted resulting ' + description += 'in the use of one or more advection-remesh operators per direction.' + super(MultiResolutionScalarAdvectionArgParser, self).__init__( - prog_name=prog_name, - description=description, - default_dump_dir=default_dump_dir) - + prog_name=prog_name, + description=description, + default_dump_dir=default_dump_dir) + def _add_main_args(self): args = super(MultiResolutionScalarAdvectionArgParser, self)._add_main_args() args.add_argument('-vel', '--velocity', type=str, - action=self.split, container=tuple, append=False, convert=float, - dest='velocity', - help='Velocity components.') + action=self.split, container=tuple, append=False, convert=float, + dest='velocity', + help='Velocity components.') return args - + def _check_main_args(self, args): super(MultiResolutionScalarAdvectionArgParser, self)._check_main_args(args) self._check_default(args, 'velocity', tuple, allow_none=False) - + def _setup_parameters(self, args): if len(args.velocity) == 1: args.velocity *= args.ndim parser = MultiResolutionScalarAdvectionArgParser() - parser.set_defaults(box_origin=(0.0,), box_length=(2*np.pi,), - tstart=0.0, tend=2*np.pi, npts=(16,), - dump_freq=10, cfl=0.5, velocity=(1.0,), - ndim=3, compute_precision='fp64') + parser.set_defaults(box_origin=(0.0,), box_length=(2*np.pi,), + tstart=0.0, tend=2*np.pi, npts=(16,), + dump_freq=10, cfl=0.5, velocity=(1.0,), + ndim=3, compute_precision='fp64') parser.run(compute) diff --git a/hysop_examples/examples/particles_above_salt/particles_above_salt_bc.py b/hysop_examples/examples/particles_above_salt/particles_above_salt_bc.py index cc3c345fc7d89c42a58e69d61dcf75059c0f50f5..f1d48e8adda216aa6c4e5766d33ec402bd742d79 100644 --- a/hysop_examples/examples/particles_above_salt/particles_above_salt_bc.py +++ b/hysop_examples/examples/particles_above_salt/particles_above_salt_bc.py @@ -95,7 +95,7 @@ def compute(args): lboundaries=lboundaries, rboundaries=rboundaries) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/particles_above_salt/particles_above_salt_bc_3d.py b/hysop_examples/examples/particles_above_salt/particles_above_salt_bc_3d.py index 00b82718e6ae2ae7db18dc8ede62697cff4d4685..f13c1acc217728bd8088d168e6b4485e7657af31 100644 --- a/hysop_examples/examples/particles_above_salt/particles_above_salt_bc_3d.py +++ b/hysop_examples/examples/particles_above_salt/particles_above_salt_bc_3d.py @@ -109,7 +109,7 @@ def compute(args): lboundaries=lboundaries, rboundaries=rboundaries) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py b/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py index 49e315386882e02dffb0109f423d2bf87e220d90..ee34aa52ff2eaf9d1523e7c998e1f95f10191867 100644 --- a/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py +++ b/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py @@ -82,7 +82,7 @@ def compute(args): box = Box(origin=Xo, length=np.subtract(Xn,Xo)) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py b/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py index ff6fb85ccf30127ef116aef2d1f5cd562512af52..256c656dc10638e573fb6cffd4a8c29424143a80 100644 --- a/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py +++ b/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py @@ -81,7 +81,7 @@ def compute(args): box = Box(origin=Xo, length=np.subtract(Xn,Xo)) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/scalar_advection/levelset.py b/hysop_examples/examples/scalar_advection/levelset.py index 76a26d1e5482114a0c21808c45a40b67afee82e5..18b9836072383467fc7e073700e6d50ab9b2ebc4 100644 --- a/hysop_examples/examples/scalar_advection/levelset.py +++ b/hysop_examples/examples/scalar_advection/levelset.py @@ -42,7 +42,7 @@ def compute(args): vprint('\nCFL is {}'.format(cfl)) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) vprint('Default I/O configuration:') diff --git a/hysop_examples/examples/scalar_advection/scalar_advection.py b/hysop_examples/examples/scalar_advection/scalar_advection.py index a43204e415116b1532977bdc7db0fe2fb8608011..deb144ed4f5589e4f143f15844b13117b3dcf0f9 100644 --- a/hysop_examples/examples/scalar_advection/scalar_advection.py +++ b/hysop_examples/examples/scalar_advection/scalar_advection.py @@ -27,7 +27,7 @@ def compute(args): box = Box(origin=args.box_origin, length=args.box_length, dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Define parameters and field (time and analytic field) diff --git a/hysop_examples/examples/scalar_advection/turbulent_scalar_advection.py b/hysop_examples/examples/scalar_advection/turbulent_scalar_advection.py index f1f3156881613fa8ab1c195f9b1061267bcc1166..97d18023a19fc3e814fde3e9fd58aa7c8acc0555 100644 --- a/hysop_examples/examples/scalar_advection/turbulent_scalar_advection.py +++ b/hysop_examples/examples/scalar_advection/turbulent_scalar_advection.py @@ -87,9 +87,9 @@ def compute(args): # Get default MPI Parameters from domain (even for serial jobs) mpi_params = MPIParams(comm=main_comm) - mpi_params_s = MPIParams(comm=box.task_comm, task_id=TASK_SCALAR, + mpi_params_s = MPIParams(comm=box.task_comm(), task_id=TASK_SCALAR, on_task=TASK_SCALAR == args.proc_tasks[main_rank]) - mpi_params_uw = MPIParams(comm=box.task_comm, task_id=TASK_UW, + mpi_params_uw = MPIParams(comm=box.task_comm(), task_id=TASK_UW, on_task=TASK_UW == args.proc_tasks[main_rank]) cl_env = None diff --git a/hysop_examples/examples/scalar_diffusion/scalar_diffusion.py b/hysop_examples/examples/scalar_diffusion/scalar_diffusion.py index a5dfcf6bbbdbb3313a3865c801b32c2c60255adf..8dc79ae9c02fb46b0333b11c2adbb5fdcb42538f 100755 --- a/hysop_examples/examples/scalar_diffusion/scalar_diffusion.py +++ b/hysop_examples/examples/scalar_diffusion/scalar_diffusion.py @@ -26,7 +26,7 @@ def compute(args): box = Box(origin=args.box_origin, length=args.box_length, dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Define parameters and field (time and analytic field) diff --git a/hysop_examples/examples/sediment_deposit/sediment_deposit.py b/hysop_examples/examples/sediment_deposit/sediment_deposit.py index 8bae7c71c8b0bc59737349199c4068f57c91ba10..966d0c6e21ab0378108b91db169433450da06002 100644 --- a/hysop_examples/examples/sediment_deposit/sediment_deposit.py +++ b/hysop_examples/examples/sediment_deposit/sediment_deposit.py @@ -142,7 +142,7 @@ def compute(args): lboundaries=lboundaries, rboundaries=rboundaries) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py b/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py index 9adbca147b9e70f16e08ec2ae36d4194eb43ffcf..58241220bcc2a5ae681d3c57b9bd87dff7b56969 100644 --- a/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py +++ b/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py @@ -159,7 +159,7 @@ def compute(args): lboundaries=lboundaries, rboundaries=rboundaries) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/shear_layer/shear_layer.py b/hysop_examples/examples/shear_layer/shear_layer.py index c746c2f6e566621ead79b6330fdcc39e3d58495c..9e4a916c844b295e8233f806d6253966b8feaf8e 100644 --- a/hysop_examples/examples/shear_layer/shear_layer.py +++ b/hysop_examples/examples/shear_layer/shear_layer.py @@ -28,7 +28,7 @@ def compute(args): box = Box(origin=args.box_origin, length=args.box_length, dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/taylor_green/taylor_green.py b/hysop_examples/examples/taylor_green/taylor_green.py index 603a28ba2d946270e4a0dc9a83af2048d98dc368..7d480137169b614e0594cc00da2da265b5e2cfbe 100644 --- a/hysop_examples/examples/taylor_green/taylor_green.py +++ b/hysop_examples/examples/taylor_green/taylor_green.py @@ -51,7 +51,7 @@ def compute(args): box = Box(origin=args.box_origin, length=args.box_length, dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables diff --git a/hysop_examples/examples/taylor_green/taylor_green_cpuFortran.py b/hysop_examples/examples/taylor_green/taylor_green_cpuFortran.py index 22e3f8a05265bc25895de8a6a36db31f7ad4f006..5ddc8d880ccd94d4ada0727b3a540234137728c8 100644 --- a/hysop_examples/examples/taylor_green/taylor_green_cpuFortran.py +++ b/hysop_examples/examples/taylor_green/taylor_green_cpuFortran.py @@ -44,7 +44,7 @@ def compute(args): box = Box(origin=args.box_origin, length=args.box_length, dim=dim) # Get default MPI Parameters from domain (even for serial jobs) - mpi_params = MPIParams(comm=box.task_comm, + mpi_params = MPIParams(comm=box.task_comm(), task_id=box.current_task()) # Setup usual implementation specific variables