-
Jean-Matthieu Etancelin authored
Code improvements: - Standardize MPI communicator use (not always COMM_WORKD); - Moving operator requirements to continuous level; - Add task parallelism paradigm; - GPU: Multi-Scale advection reorganisation on GPU (reuse of remeshing formula); - GPU: Reuse buffer across different discretization of the same field.
Jean-Matthieu Etancelin authoredCode improvements: - Standardize MPI communicator use (not always COMM_WORKD); - Moving operator requirements to continuous level; - Add task parallelism paradigm; - GPU: Multi-Scale advection reorganisation on GPU (reuse of remeshing formula); - GPU: Reuse buffer across different discretization of the same field.
levelSet3D.py 3.37 KiB
#!/usr/bin/env python
import parmepy
from parmepy.domain.box import Box
# import parmepy.gpu
# parmepy.gpu.CL_PROFILE = True
from parmepy.fields.continuous import Field
from parmepy.operator.advection import Advection
from parmepy.problem.transport import TransportProblem
from parmepy.operator.monitors.printer import Printer
from parmepy.operator.analytic import Analytic
from parmepy.problem.simulation import Simulation
from parmepy.operator.redistribute import Redistribute
from parmepy.methods_keys import TimeIntegrator, Interpolation, Remesh,\
Support, Splitting, MultiScale, Scales, MultiScale
from parmepy.numerics.integrators.runge_kutta2 import RK2 as RK
#from parmepy.numerics.integrators.runge_kutta4 import RK4 as RK
from parmepy.numerics.interpolation import Linear
from parmepy.numerics.remeshing import L2_1 as rmsh, L4_2, L2_1, L4_4, L8_4
from parmepy.constants import np, HDF5
from parmepy.mpi.main_var import main_size
sin, cos, pi, sqrt = np.sin, np.cos, np.pi, np.sqrt
def scalaire(res, x, y, z, t=0.):
r = sqrt((x - 0.35) ** 2 + (y - 0.35) ** 2 + (z - 0.35) ** 2)
res[0][...] = 0.
res[0][r < 0.15] = 1.
return res
def vitesse(res, x, y, z, t=0.):
res[0][...] = 2. * sin(pi*x)**2*sin(2.*pi*y)*sin(2.*pi*z)*cos(t*pi/3.)
res[1][...] = -sin(2.*pi*x)*sin(pi*y)**2*sin(2.*pi*z)*cos(t*pi/3.)
res[2][...] = -sin(2.*pi*x)*sin(2.*pi*y)*sin(pi*z)**2*cos(t*pi/3.)
return res
dim = 3
boxLength = [1., 1., 1.]
boxMin = [0., 0., 0.]
nbElem_v = [129] * 3
nbElem_s = [129] * 3
timeStep = 0.025
finalTime = 3.
outputFilePrefix = './res3D/levelSet_3D'
outputModulo = 10
simu = Simulation(tinit=0.0, tend=finalTime, timeStep=timeStep, iterMax=2000)
## Domain
box = Box(dim, length=boxLength, origin=boxMin)
## Fields
scal = Field(domain=box, name='Scalar', formula=scalaire
)
velo = Field(domain=box, name='Velocity', isVector=True, formula=vitesse
)
## Operators
advec = Advection(velo, scal,
resolutions={velo: nbElem_v,
scal: nbElem_s},
method={TimeIntegrator: RK,
Interpolation: Linear,
Remesh: L6_4,
Support: 'gpu_1k',
Splitting: 'o2',
MultiScale: L2_1},
#src=['./levelSet3D.cl'],
#device_type='cpu',platform_id=1,
#batch_nb=2
)
advec.discretize()
velocity = Analytic(velo,
resolutions={velo: nbElem_v},
#method={Support: 'gpu'},
topo=advec.advecDir[0].discreteFields[velo].topology
#topo=advec.discreteFields[velo].topology
)
if main_size > 1:
distrForAdvecZ = Redistribute([velo], velocity, advec.advecDir[2],
component=2, name_suffix='Zin')
##Problem
# Sequential : no need of redistribute
if main_size == 1:
pb = TransportProblem([velocity, advec], simu, dumpFreq=10000)
else:
pb = TransportProblem([velocity, distrForAdvecZ, advec],
simu, dumpFreq=10000)
pb.setUp()
p = Printer(variables=[scal],
topo=scal.topoInit,
frequency=outputModulo,
prefix=outputFilePrefix,
formattype=HDF5)
pb.addMonitors([p])
p._printStep()
# ## Solve problem
pb.solve()
pb.finalize()
print pb.timer