diff --git a/examples/sediment_deposit/sediment_deposit.py b/examples/sediment_deposit/sediment_deposit.py index 6284d4ba36b855bd50fc73faf26e9d4a1ef0386f..eb98abe5b6571477edc43e27fa8a68a42bf9e496 100644 --- a/examples/sediment_deposit/sediment_deposit.py +++ b/examples/sediment_deposit/sediment_deposit.py @@ -130,7 +130,7 @@ def compute(args): from hysop.symbolic import sm, space_symbols, local_indices_symbols from hysop.symbolic.base import SymbolicTensor from hysop.symbolic.field import curl - from hysop.symbolic.relational import Assignment, LogicalLE, LogicalGE + from hysop.symbolic.relational import Assignment, LogicalLE, LogicalGE, LogicalLT, LogicalGT from hysop.symbolic.misc import Select from hysop.symbolic.tmp import TmpScalar from hysop.tools.string_utils import framed_str @@ -275,8 +275,9 @@ def compute(args): **extra_op_kwds) #> Operator to compute and save mean fields - axes = list(range(1, dim)) - view = tuple([slice(None,None,None),]*dim) + axes = list(range(0, dim-1)) + view = [slice(None,None,None),]*dim + view = tuple(view) io_params = IOParams(filename='horizontally_averaged_profiles', frequency=0) compute_mean_fields = ComputeMeanField(name='mean', fields={S: (view, axes)}, @@ -379,7 +380,7 @@ if __name__=='__main__': parser = ParticleAboveSaltArgParser() - parser.set_defaults(impl='cl', ndim=2, + parser.set_defaults(impl='cl', ndim=2, npts=(TANK_RATIO*DISCRETIZATION+1,DISCRETIZATION+1), box_origin=(0.0,), box_length=(1.0,), tstart=0.0, tend=20.0, diff --git a/examples/sediment_deposit/sediment_deposit_levelset.py b/examples/sediment_deposit/sediment_deposit_levelset.py index baa3e3fabd9900eb72ed2da50ec4eb142145b574..fe3c254eba5a31339f7c2902ed9e1bbd949dd3a4 100644 --- a/examples/sediment_deposit/sediment_deposit_levelset.py +++ b/examples/sediment_deposit/sediment_deposit_levelset.py @@ -1,9 +1,13 @@ import numpy as np import scipy as sp import sympy as sm +import numba as nb -TANK_RATIO = 1 -SEDIMENT_RADIUS = 1e-2 +TANK_RATIO = 1 +FILL_PCT = 0.25 +SEDIMENT_RADIUS = 1e-3 +SEDIMENT_COUNT = int(1.15*(FILL_PCT*TANK_RATIO / (np.pi*SEDIMENT_RADIUS**2))) +DISCRETIZATION = 4096 # initialize vorticity def init_vorticity(data, coords, component=None): @@ -18,29 +22,69 @@ def init_velocity(data, coords, component=None): d[...] = 0.0 def init_phi(data, coords, nblobs, rblob): + from hysop import vprint data = data[0] coords = coords[0] X, Y = coords - Bx = TANK_RATIO*np.random.rand(nblobs) - By = 1*np.random.rand(nblobs) + Bx = np.random.rand(nblobs) + By = TANK_RATIO*np.random.rand(nblobs) R2 = rblob * rblob cache_file='/tmp/C_init_ls_{}_{}'.format('_'.join(str(x) for x in data.shape), str(abs(hash((TANK_RATIO,nblobs,rblob))))) try: D = np.load(file=cache_file+'.npz') - print 'Initializing sediments from cache: "{}.npz".'.format(cache_file) + vprint(' *Initializing sediments from cache: "{}.npz".'.format(cache_file)) data[...] = D['data'] except: # this just explodes the RAM so we iterate per line - #data[...] = 0.5 * ((X[:,:,None] - Bx[None,None,:])**2 + (Y[:,:,None] - By[None,None,:])**2 < R2).any(axis=-1) - print 'Initializing sediments of radius {} with {} random blobs.'.format(rblob, nblobs) - for i in xrange(Y.size): - d = (((X[:,:,None] - Bx[None,None,:])/SEDIMENT_RADIUS)**2 + ((Y[i,:,None] - By[None,None,:])/SEDIMENT_RADIUS)**2 - 1.0).min(axis=-1) - data[i] = np.minimum(d, 10.0) + #data[...] = ((X[:,:,None] - Bx[None,None,:])**2 + (Y[:,:,None] - By[None,None,:])**2 < R2).any(axis=-1) + # this is too long + #def iter_lines(X, Y, Bx, By, data): + #for i in xrange(Y.size): + #d = (((X[:,:,None] - Bx[None,None,:])/SEDIMENT_RADIUS)**2 + ((Y[i,:,None] - By[None,None,:])/SEDIMENT_RADIUS)**2 - 1.0).min(axis=-1) + #data[i] = np.sign(d)*np.sqrt(np.abs(d)) + vprint(' *Initializing sediments of radius {} with {} random blobs.'.format(rblob, nblobs)) + #vprint(' {}/{}'.format(i, Y.size)) + + #X = X.reshape(X.size) + #Y = Y.reshape(Y.size) + + #from hysop.tools.numba_utils import make_numba_signature + #args = (X, Y[0], Bx, By, data[0]) + #signature, _ = make_numba_signature(*args) + + #@nb.guvectorize([signature], + #'(nx),(),(nb),(nb),(nx)', + #target='parallel', + #nopython=True, cache=True) + #def iter_blobs(X, Yi, Bx, By, data): + #for k in xrange(nblobs): + #Xb = Bx[k] + #Yb = By[k] + #dy = ((Yi-Yb)/SEDIMENT_RADIUS)**2 + #d0 = dy-1.0 + #d0 = (d0/abs(d0)) * abs(d0)#**0.1 + #for j in xrange(data.size): + #dj = data[j] + #if (d0 >= dj): + #continue + #Xj = X[j] + #dx = ((Xj-Xb)/SEDIMENT_RADIUS)**2 + #d = dx+dy-1.0 + #d = (d/abs(d)) * abs(d)#**0.1 + #if (d >= dj): + #continue + #data[j] = d + + #data[...] = np.inf + #for i in xrange(Y.size): + #vprint(' {}/{}'.format(i, Y.size)) + #iter_blobs(X, Y[i], Bx, By, data[i]) + # we cache initialization np.savez_compressed(file=cache_file, data=data) - print 'Caching data to "{}.npz".'.format(cache_file) + vprint(' *Caching data to "{}.npz".'.format(cache_file)) @@ -58,14 +102,16 @@ def compute(args): from hysop.operators import DirectionalAdvection, DirectionalStretching, \ Diffusion, ComputeMeanField, \ - PoissonCurl, AdaptiveTimeStep, \ + PoissonCurl, AdaptiveTimeStep, \ Enstrophy, MinMaxFieldStatistics, StrangSplitting, \ ParameterPlotter, Integrate, HDF_Writer, \ - CustomSymbolicOperator, DirectionalSymbolic + CustomSymbolicOperator, DirectionalSymbolic, \ + SpectralExternalForce, SymbolicExternalForce from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \ ComputeGranularity, Interpolation + from hysop.numerics.odesolvers.runge_kutta import Euler, RK2, RK3, RK4 from hysop.symbolic import sm, space_symbols, local_indices_symbols from hysop.symbolic.base import SymbolicTensor from hysop.symbolic.field import curl @@ -78,22 +124,23 @@ def compute(args): dim = args.ndim if (dim==2): Xo = (0.0,)*dim - Xn = (1.0,float(TANK_RATIO)) - nblobs = 250*TANK_RATIO + Xn = (float(TANK_RATIO), 1.0) + nblobs = SEDIMENT_COUNT rblob = SEDIMENT_RADIUS npts = args.npts else: msg='The {}D has not been implemented yet.'.format(dim) raise NotImplementedError(msg) - nu_W = ScalarParameter(name='nu_W', dtype=args.dtype, const=True, initial_value=1.00) + nu_W = ScalarParameter(name='nu_W', dtype=args.dtype, const=True, initial_value=1e-2) - lboundaries = (BoxBoundaryCondition.SYMMETRIC,)*dim - rboundaries = (BoxBoundaryCondition.SYMMETRIC,)*dim + lboundaries = (BoxBoundaryCondition.SYMMETRIC, BoxBoundaryCondition.SYMMETRIC) + rboundaries = (BoxBoundaryCondition.SYMMETRIC, BoxBoundaryCondition.SYMMETRIC) - S_lboundaries = (BoundaryCondition.HOMOGENEOUS_NEUMANN,)*dim - S_rboundaries = (BoundaryCondition.HOMOGENEOUS_NEUMANN,)*dim - S_boundaries = {'lboundaries': S_lboundaries, 'rboundaries': S_rboundaries} + S_boundaries = { + 'lboundaries': (BoundaryCondition.HOMOGENEOUS_NEUMANN, BoundaryCondition.HOMOGENEOUS_NEUMANN), + 'rboundaries': (BoundaryCondition.HOMOGENEOUS_NEUMANN, BoundaryCondition.HOMOGENEOUS_NEUMANN) + } box = Box(origin=Xo, length=np.subtract(Xn,Xo), lboundaries=lboundaries, rboundaries=rboundaries) @@ -133,14 +180,15 @@ def compute(args): velo = VelocityField(domain=box, dtype=args.dtype) vorti = VorticityField(velocity=velo) phi = LevelSetField(domain=box, dtype=args.dtype, **S_boundaries) - rho = DensityField(domain=box, dtype=args.dtype, **S_boundaries) + S = DensityField(name='S', domain=box, dtype=args.dtype, **S_boundaries) + Sv = VolumicIntegrationParameter(field=S) # Symbolic fields frame = velo.domain.frame Us = velo.s(*frame.vars) Ws = vorti.s(*frame.vars) phis = phi.s(*frame.vars) - rhos = rho.s(*frame.vars) + Ss = S.s(*frame.vars) dts = dt.s ### Build the directional operators @@ -150,14 +198,14 @@ def compute(args): velocity = velo, advected_fields = (vorti, phi), velocity_cfl = args.cfl, - variables = {velo: npts, + variables = {velo: npts, vorti: npts, - phi: npts}, + phi: npts}, dt=dt, **extra_op_kwds) #> Recompute density from levelset dx = np.max(np.divide(box.length, np.asarray(args.npts)-1)) - rho1, rho2 = 1.5, 1.0 + S1, S2 = 0.5, 0.0 pi = TmpScalar(name='pi', dtype=args.dtype) eps = TmpScalar(name='eps', dtype=args.dtype) x = TmpScalar(name='x', dtype=args.dtype) @@ -167,21 +215,23 @@ def compute(args): clamp = Select(0.0, 1.0, pos_cond) smooth = (x+eps)/(2*eps) + sm.sin(pi*x/eps)/(2*pi) H_eps = Select(clamp, smooth, smooth_cond) - e0 = Assignment(pi, np.pi) - e1 = Assignment(eps, 0.05*dx) - e2 = Assignment(x, phis*SEDIMENT_RADIUS*SEDIMENT_RADIUS) - e3 = Assignment(H, H_eps) - e4 = Assignment(rhos, rho1 + (rho2-rho1)*H) - exprs = (e0,e1,e2,e3,e4) + #e0 = Assignment(pi, np.pi) + #e1 = Assignment(eps, 5*dx) + #e2 = Assignment(x, phis*SEDIMENT_RADIUS) + #e3 = Assignment(H, H_eps) + #e4 = Assignment(Ss, S1 + (S2-S1)*H) + #exprs = (e0,e1,e2,e3,e4) + e = Assignment(Ss, 0.5*LogicalLE(phis, 0)) + exprs = (e,) eval_fields = DirectionalSymbolic(name='eval_fields', pretty_name=u'{}({})'.format( phi.pretty_name.decode('utf-8'), - rho.pretty_name.decode('utf-8')), + S.pretty_name.decode('utf-8')), no_split=True, implementation=impl, exprs=exprs, dt=dt, variables={phi: npts, - rho: npts}, + S: npts}, **extra_op_kwds) #> Stretch vorticity @@ -200,10 +250,10 @@ def compute(args): msg='Unsupported dimension {}.'.format(dim) raise RuntimeError(msg) - #> External force rot(-rho*g) + #> External force rot(-S*g) Fext = np.zeros(shape=(dim,), dtype=object).view(SymbolicTensor) - fext = -9.81*rhos - Fext[0] = fext + fext = -Ss + Fext[1] = fext lhs = Ws.diff(frame.time) rhs = curl(Fext, frame) exprs = Assignment.assign(lhs, rhs) @@ -211,12 +261,12 @@ def compute(args): implementation=impl, exprs=exprs, dt=dt, variables={vorti: npts, - rho: npts}, + S: npts}, **extra_op_kwds) splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order) - splitting.push_operators(advec, stretch, eval_fields, external_force) + splitting.push_operators(advec, eval_fields, stretch, external_force) ### Build standard operators #> Poisson operator to recover the velocity from the vorticity @@ -236,6 +286,11 @@ def compute(args): Finf=True, implementation=impl, variables={vorti:npts}, **extra_op_kwds) + #> Operators to compute the integrated density + integrate_S = Integrate(field=S, variables={S: npts}, + parameter=Sv, scaling='volumic', cst=2, + implementation=impl, **extra_op_kwds) + #> Operators to dump all fields io_params = IOParams(filename='fields', frequency=args.dump_freq) dump_fields = HDF_Writer(name='dump', @@ -244,7 +299,7 @@ def compute(args): variables={velo: npts, vorti: npts, phi: npts, - rho: npts}, + S: npts}, **extra_op_kwds) #> Operator to compute and save mean fields @@ -253,17 +308,16 @@ def compute(args): view = tuple(view) io_params = IOParams(filename='horizontally_averaged_profiles', frequency=0) compute_mean_fields = ComputeMeanField(name='mean', - fields={rho: (view, axes)}, - variables={rho: npts}, + fields={S: (view, axes)}, + variables={S: npts}, io_params=io_params) ### Adaptive timestep operator adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, name='merge_dt', pretty_name='dt', - max_dt=1e-1) + max_dt=1e-3) dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, - Fmin=min_max_U.Fmin, - Fmax=min_max_U.Fmax, + Finf=min_max_U.Finf, equivalent_CFL=True, name='dt_cfl', pretty_name='CFL') dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf, @@ -287,11 +341,11 @@ def compute(args): problem = Problem(method=method) problem.insert(poisson, + min_max_U, min_max_W, adapt_dt, splitting, + integrate_S, dump_fields, - compute_mean_fields, - min_max_U, min_max_W, - adapt_dt) + compute_mean_fields) problem.build(args) # If a visu_rank was provided, and show_graph was set, @@ -310,7 +364,7 @@ def compute(args): min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL, filename='parameters.txt', precision=8) - # Initialize vorticity, velocity, rho on all topologies + # Initialize vorticity, velocity, S on all topologies problem.initialize_field(field=velo, formula=init_velocity) problem.initialize_field(field=vorti, formula=init_vorticity) problem.initialize_field(field=phi, formula=init_phi, nblobs=nblobs, rblob=rblob) @@ -353,12 +407,12 @@ if __name__=='__main__': parser = ParticleAboveSaltArgParser() parser.set_defaults(impl='cl', ndim=2, - npts=(256+1,TANK_RATIO*256+1), + npts=(TANK_RATIO*DISCRETIZATION+1,DISCRETIZATION+1), box_origin=(0.0,), box_length=(1.0,), - tstart=0.0, tend=1000.0, - dt=1e-6, cfl=1.00, lcfl=0.90, - #dump_times=tuple(float(x) for x in range(0,100,10)), - dump_freq=250) + tstart=0.0, tend=100.1, + dt=1e-6, cfl=0.5, lcfl=0.5, + dump_times=tuple(float(x) for x in range(100)), + dump_freq=0) parser.run(compute) diff --git a/hysop/backend/host/host_array_backend.py b/hysop/backend/host/host_array_backend.py index 50ac7132def46c81ed652ae60e1b079aca429182..734140c5306a4824a57caeedc42a662038e01180 100644 --- a/hysop/backend/host/host_array_backend.py +++ b/hysop/backend/host/host_array_backend.py @@ -1,4 +1,6 @@ + +import warnings from hysop.deps import np from hysop.constants import Backend from hysop.constants import HYSOP_REAL, HYSOP_INTEGER, HYSOP_BOOL @@ -194,7 +196,7 @@ class HostArrayBackend(ArrayBackend): dtype=HYSOP_BOOL import warning msg='HostArrayBackend: numpy bool array converted to hysop_bool={}.'.format(dtype) - warning.warn(msg, HysopWarning) + warnings.warn(msg, HysopWarning) if (buf is None): assert offset==0 diff --git a/hysop/operator/base/integrate.py b/hysop/operator/base/integrate.py index 983e46f31ce17a389b923f553a72602d1314379e..710ee46fe76c9320e56849f7542330235ed5e218 100644 --- a/hysop/operator/base/integrate.py +++ b/hysop/operator/base/integrate.py @@ -20,7 +20,7 @@ class IntegrateBase(object): @debug def __init__(self, field, variables, - name=None, pretty_name=None, + name=None, pretty_name=None, cst=1, parameter=None, scaling=None, **kwds): """ Initialize a Integrate operator base. @@ -50,6 +50,8 @@ class IntegrateBase(object): 'normalize': scale by first integration (first value will be 1.0) Can also be a custom float value of tuple of float values. Defaults to volumic integration. + cst: float, optional + Extra scaling constant for volumic mode. kwds: Extra keywords arguments that will be passed towards implementation enstrophy operator __init__. @@ -87,6 +89,7 @@ class IntegrateBase(object): self.field = field self.parameter = parameter self.scaling = scaling + self.cst = cst self.scaling_coeff = None super(IntegrateBase, self).__init__(name=name, pretty_name=pretty_name, @@ -101,7 +104,7 @@ class IntegrateBase(object): scaling = self.scaling if (scaling == 'volumic'): - scaling_coeff = npw.prod(dF.space_step) / npw.prod(dF.domain.length) + scaling_coeff = self.cst*npw.prod(dF.space_step) / npw.prod(dF.domain.length) scaling_coeff = (scaling_coeff,)*dF.nb_components elif (scaling == 'normalize'): scaling_coeff = [None,]*dF.nb_components diff --git a/hysop/operator/base/spectral_operator.py b/hysop/operator/base/spectral_operator.py index ba5a2782877075878ea2d0c5e38adfdebf3c7a5b..70038c312825e73c805ff91ae2a3f76ef11a838d 100644 --- a/hysop/operator/base/spectral_operator.py +++ b/hysop/operator/base/spectral_operator.py @@ -1,4 +1,5 @@ +import warnings import sympy as sm import numpy as np @@ -24,7 +25,7 @@ from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors from hysop.fields.continuous_field import Field, ScalarField, TensorField from hysop.symbolic.array import SymbolicArray from hysop.symbolic.spectral import WaveNumber, SpectralTransform, AppliedSpectralTransform -from hysop.numerics.fft.fft import FFTI, simd_alignment, is_byte_aligned +from hysop.numerics.fft.fft import FFTI, simd_alignment, is_byte_aligned, HysopFFTWarning class SpectralComputationalGraphNodeFrontend(ComputationalGraphNodeFrontend): @@ -1473,11 +1474,12 @@ class PlannedSpectralTransform(object): verbose=False) fft_plan.setup(queue=queue) tmp_nbytes = max(tmp_nbytes, fft_plan.required_buffer_size) + if (tmp_nbytes > nbytes): msg='Planner claims to need more than buffer bytes as temporary buffer:' msg+='\n *Buffer bytes: {}'.format(bytes2str(nbytes)) msg+='\n *Tmp bytes: {}'.format(bytes2str(tmp_nbytes)) - raise RuntimeError(msg) + warnings.warn(msg, HysopFFTWarning) backend = self.transform_group.backend mem_tag = self.transform_group.mem_tag diff --git a/hysop/operator/mean_field.py b/hysop/operator/mean_field.py index 7c7756cf12cdeb8ff12d3a22cc4c994e48991435..ad203012eea748e50995b8d0ae7dc2423489219b 100644 --- a/hysop/operator/mean_field.py +++ b/hysop/operator/mean_field.py @@ -115,6 +115,7 @@ class ComputeMeanField(ComputationalGraphOperator): def apply(self, simulation, **kwds): if (simulation is None): raise ValueError("Missing simulation value for monitoring.") + ite = simulation.current_iteration should_dump = (self.io_params.frequency>0) and (ite % self.io_params.frequency == 0) should_dump |= simulation.is_time_of_interest if should_dump: diff --git a/hysop/tools/numba_utils.py b/hysop/tools/numba_utils.py index 527f7270deeb551a3a1b91c2f32ce744a0d6ad91..e8abd9cd42348f6b4f83986057856b1b77a4f886 100644 --- a/hysop/tools/numba_utils.py +++ b/hysop/tools/numba_utils.py @@ -83,8 +83,11 @@ def make_numba_signature(*args, **kwds): elif isinstance(a, type): na = dtype_to_ntype[a] numba_layout += ('()',) + elif type(a) in dtype_to_ntype: + na = dtype_to_ntype[type(a)] + numba_layout += ('()',) else: - msg='Uknown argument type {}.'.format(type(a)) + msg='Uknown argument type {}.'.format(type(a).__mro__) raise NotImplementedError(msg) numba_args += (na,)