Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • particle_methods/hysop
1 result
Show changes
Commits on Source (23)
Showing
with 1133 additions and 981 deletions
......@@ -9,12 +9,12 @@ fi
if [ -d "$1" ]; then
echo "Folder $1 already exists."
exit 1
fi
fi
if [ -d "$2" ]; then
echo "Folder $2 already exists."
exit 1
fi
fi
ROOT_DIR="$(pwd)"
BUILD_DIR="$1"
......@@ -22,7 +22,7 @@ INSTALL_DIR="$2"
mkdir -p $BUILD_DIR
cd $BUILD_DIR
CC="$3" CXX="$4" FC="$5" cmake -DCMAKE_BUILD_TYPE=Release -DVERBOSE=OFF -DHYSOP_INSTALL=$INSTALL_DIR $ROOT_DIR
CC="$3" CXX="$4" FC="$5" cmake -DCMAKE_BUILD_TYPE=Release -DVERBOSE=OFF -DWITH_SCALES=ON -DHYSOP_INSTALL=$INSTALL_DIR $ROOT_DIR
if [ ! -f Makefile ]; then
echo "The makefile has not been generated."
......
......@@ -64,6 +64,10 @@ python "$HYSOP_DIR/operator/tests/test_directional_diffusion.py"
python "$HYSOP_DIR/operator/tests/test_diffusion.py"
python "$HYSOP_DIR/operator/tests/test_directional_stretching.py"
# If scales (fortran advection library) is installed
python -c "from hysop.f2hysop import scales2py as scales" && python "$HYSOP_DIR/operator/tests/test_bilevel_advection.py"
python -c "from hysop.f2hysop import scales2py as scales" && python "$HYSOP_DIR/operator/tests/test_scales_advection.py"
if [ "$HAS_CACHE_DIR" = true ]; then
cp -r /root/.cache/* $CACHE_DIR/
find $CACHE_DIR -name '*.lock' -delete
......
......@@ -12,30 +12,42 @@ Advection of one or more fields, i.e. find field X which solves
for a given velocity field and assuming incompressible flow.
All solvers (except pure python) are able to solve advection in a
bi-level configuration : velocity is known on a coarser grid than the
advected field. In that case, the velocity is linearly interpolated
before move particles. Linear interpolation is a default method and
the only one implemented for the moment.
Multi-CPU (scales) advection
----------------------------
Scales :
Scales (Multi-CPU) :
--------------------
Scales library is iterfaced into HySoP (using :class:`~hysop.operator.advection.Advection` operator).
* Fortran routines
* 3D only
Available solvers:
^^^^^^^^^^^^^^^^^^
* 'p_O2' : order 4 method, corrected to allow large CFL number,
untagged particles
* 'p_O4' : order 4 method, corrected to allow large CFL number,
untagged particles
* 'p_L2' : limited and corrected lambda 2
* 'p_M4' : Lambda_2,1 (=M'4) 4 point formula
* 'p_M6' (default) : Lambda_4,2 (=M'6) 6 point formula
* 'p_M8' : M8prime formula
* 'p_44' : Lambda_4,4 formula
* 'p_64' : Lambda_6,4 formula
* 'p_66' : Lambda_6,6 formula
* 'p_84' : Lambda_8,4 formula
* 'p_L2' : limited and corrected :math:`\Lambda_2`
* 'p_M4' : :math:`\Lambda_{2,1} (=M'_4)` 4 point formula
* 'p_M6' (default) : :math:`\Lambda_{4,2} (=M'_6)` 6 point formula
* 'p_M8' : :math:`M8prime` formula
* 'p_44' : :math:`\Lambda_{4,4}` formula
* 'p_64' : :math:`\Lambda_{6,4}` formula
* 'p_66' : :math:`\Lambda_{6,6}` formula
* 'p_84' : :math:`\Lambda_{8,4}` formula
Time integration:
^^^^^^^^^^^^^^^^^
* Runge-Kutta 2nd order
Splitting
......@@ -44,7 +56,7 @@ Splitting
Computations are performed with a dimensional splitting as follows:
* 'strang' (2nd order):
* X-dir, half time step
* Y-dir, half time step
* Z-dir, full time step
......@@ -52,6 +64,36 @@ Computations are performed with a dimensional splitting as follows:
* X-dir, half time step
* 'classic' (1st order):
* X-dir, full time step
* Y-dir, full time step
* Z-dir, full time step
HySoP (Multi-CPU an Multi-GPU) :
--------------------------------
* 1D, 2D or 3D advection
* Pure python code
* Generated OpenCL code
* Ghosts layers are computed from a maximum CFL number
Available solvers:
^^^^^^^^^^^^^^^^^^
* All :math:`\Lambda_{p,q}` kernels are computed
Time integration:
^^^^^^^^^^^^^^^^^
Explicit time intergration solvers:
* Euler
* Runge-Kutta (orders 1, 2, 3, 4, 4_38)
Splitting
^^^^^^^^^
Splitting is handled as a higer level from HySoP :class:`~hysop.numerics.splitting.directional_splitting.DirectionalSplitting` operators.
from hysop.tools.numpywrappers import npw
from hysop.tools.types import check_instance
from hysop.constants import AutotunerFlags, DirectionLabels, BoundaryCondition
......@@ -12,13 +11,13 @@ from hysop.backend.device.opencl.opencl_autotunable_kernel import OpenClAutotuna
class OpenClAutotunableDirectionalAdvectionKernel(OpenClAutotunableKernel):
"""Autotunable interface for directional advection kernel code generators."""
def autotune(self, direction, time_integrator, velocity_cfl,
velocity, position,
def autotune(self, direction, time_integrator, velocity_cfl,
velocity, position,
hardcode_arrays, **kwds):
"""Autotune this kernel with specified configuration."""
precision = self.typegen.dtype
dim = velocity.dim
if not (1 <= dim <= 3):
raise ValueError('Dimension mismatch.')
......@@ -32,12 +31,17 @@ class OpenClAutotunableDirectionalAdvectionKernel(OpenClAutotunableKernel):
raise ValueError(msg)
if not isinstance(velocity_cfl, float) or velocity_cfl<=0.0:
raise ValueError('Invalid velocity_cfl value.')
min_ghosts = DirectionalAdvectionKernelGenerator.min_ghosts(dim, velocity_cfl)
cache_ghosts = min_ghosts[-1]
self.check_cartesian_field(velocity, min_ghosts=min_ghosts, nb_components=dim)
self.check_cartesian_field(position, nb_components=1,
compute_resolution=velocity.compute_resolution)
self.is_bilevel = kwds['is_bilevel']
if self.is_bilevel is not None:
self.check_cartesian_field(position, nb_components=1,
compute_resolution=position.compute_resolution)
else:
self.check_cartesian_field(position, nb_components=1,
compute_resolution=velocity.compute_resolution)
if (velocity.dtype != precision) or (position.dtype != precision):
#TODO implement mixed precision kernels
......@@ -45,52 +49,54 @@ class OpenClAutotunableDirectionalAdvectionKernel(OpenClAutotunableKernel):
msg=msg.format(velocity.dtype, position.dtype,
precision.__name__)
raise NotImplementedError(msg)
ftype = clTools.dtype_to_ctype(precision)
name = 'directional_advection_{}__{}_{}_{}g'.format(DirectionLabels[direction],
time_integrator.name(), ftype, cache_ghosts)
name = 'directional_advection{}_{}__{}_{}_{}g'.format(
"" if self.is_bilevel is None else "_bilevelLinear",
DirectionLabels[direction],
time_integrator.name(), ftype, cache_ghosts)
vboundaries = (velocity.boundaries[0][-1], velocity.boundaries[1][-1])
eps = npw.finfo(precision).eps
dt = 10*eps
assert (dt > +0.0), 'Precision {} cannot represent 1e-7.'.format(precision)
make_offset, offset_dtype = self.make_array_offset()
make_strides, strides_dtype = self.make_array_strides(position.dim,
make_strides, strides_dtype = self.make_array_strides(position.dim,
hardcode_arrays=hardcode_arrays)
kernel_args = {}
known_args = {}
isolation_params = {}
mesh_info_vars = {}
target_args = known_args if hardcode_arrays else kernel_args
kernel_args['dt'] = precision(dt)
kernel_args['V_base'] = velocity[direction].base_data
target_args['V_strides'] = make_strides(velocity[direction].strides,velocity.dtype)
target_args['V_offset'] = make_offset(velocity[direction].offset, velocity.dtype)
kernel_args['P_base'] = position[0].base_data
target_args['P_strides'] = make_strides(position[0].strides, position.dtype)
target_args['P_offset'] = make_offset(position[0].offset, position.dtype)
isolation_params['V_base'] = dict(count=velocity.size,
isolation_params['V_base'] = dict(count=velocity.size,
dtype=velocity.dtype, fill=0.05)
isolation_params['P_base'] = dict(count=position.size,
isolation_params['P_base'] = dict(count=position.size,
dtype=position.dtype, fill=0.00)
mesh_info_vars['V_mesh_info'] = self.mesh_info('V_mesh_info',
velocity.mesh)
mesh_info_vars['P_mesh_info'] = self.mesh_info('P_mesh_info',
mesh_info_vars['P_mesh_info'] = self.mesh_info('P_mesh_info',
position.mesh)
return super(OpenClAutotunableDirectionalAdvectionKernel, self).autotune(name=name,
return super(OpenClAutotunableDirectionalAdvectionKernel, self).autotune(name=name,
rk_scheme=time_integrator, kernel_args=kernel_args, cache_ghosts=cache_ghosts,
vboundaries=vboundaries, precision=precision, ftype=ftype,
mesh_info_vars=mesh_info_vars, work_dim=dim,
work_size=position.compute_resolution,
vboundaries=vboundaries, precision=precision, ftype=ftype,
mesh_info_vars=mesh_info_vars, work_dim=dim,
work_size=position.compute_resolution,
hardcode_arrays=hardcode_arrays, known_args=known_args,
offset_dtype=offset_dtype, strides_dtype=strides_dtype,
isolation_params=isolation_params, **kwds)
......@@ -103,15 +109,15 @@ class OpenClAutotunableDirectionalAdvectionKernel(OpenClAutotunableKernel):
hardcode_arrays = extra_kwds['hardcode_arrays']
args_mapping = {}
args_mapping['dt'] = (0, precision)
args_mapping['V_base'] = (1, cl.MemoryObjectHolder)
if not hardcode_arrays:
args_mapping['V_strides'] = (2, strides_dtype)
args_mapping['V_offset'] = (3, offset_dtype)
i = 2+2*(1-hardcode_arrays)
args_mapping['P_base'] = (i, cl.MemoryObjectHolder)
if not hardcode_arrays:
args_mapping['P_strides'] = (i+1, strides_dtype)
......@@ -119,12 +125,12 @@ class OpenClAutotunableDirectionalAdvectionKernel(OpenClAutotunableKernel):
return args_mapping
def compute_parameters(self, extra_kwds):
def compute_parameters(self, extra_kwds):
"""Register extra parameters to optimize."""
check_instance(extra_kwds, dict, keys=str)
work_dim = extra_kwds['work_dim']
## Register extra parameters
## Register extra parameters
autotuner_flag = self.autotuner_config.autotuner_flag
caching_options = [True, False]
if (autotuner_flag == AutotunerFlags.ESTIMATE):
......@@ -139,7 +145,7 @@ class OpenClAutotunableDirectionalAdvectionKernel(OpenClAutotunableKernel):
elif (autotuner_flag == AutotunerFlags.EXHAUSTIVE):
max_workitem_workload = [1,16,16]
nparticles_options = [1,2,4,8,16]
extra_kwds['max_work_load'] = max_workitem_workload[:work_dim]
params = super(OpenClAutotunableDirectionalAdvectionKernel, self).compute_parameters(
......@@ -147,24 +153,24 @@ class OpenClAutotunableDirectionalAdvectionKernel(OpenClAutotunableKernel):
params.register_extra_parameter('is_cached', caching_options)
params.register_extra_parameter('nparticles', nparticles_options)
return params
def compute_min_max_wg_size(self, work_bounds, work_load, global_work_size,
extra_parameters, extra_kwds):
"""Default min and max workgroup size."""
cache_ghosts = extra_kwds['cache_ghosts']
min_wg_size = npw.ones(shape=work_bounds.work_dim, dtype=npw.int32)
min_wg_size = npw.ones(shape=work_bounds.work_dim, dtype=npw.int32)
max_wg_size = npw.ones(shape=work_bounds.work_dim, dtype=npw.int32)
min_wg_size[0] = 2*cache_ghosts+1
max_wg_size[0] = max(global_work_size[0], min_wg_size[0])
return (min_wg_size, max_wg_size)
def compute_global_work_size(self, local_work_size, work, extra_parameters, extra_kwds):
gs = super(OpenClAutotunableDirectionalAdvectionKernel, self).compute_global_work_size(
local_work_size=local_work_size, work=work,
local_work_size=local_work_size, work=work,
extra_parameters=extra_parameters, extra_kwds=extra_kwds)
gs[0] = local_work_size[0]
return gs
def generate_kernel_src(self, global_work_size, local_work_size,
extra_parameters, extra_kwds, tuning_mode, dry_run):
"""Generate kernel name and source code"""
......@@ -178,9 +184,9 @@ class OpenClAutotunableDirectionalAdvectionKernel(OpenClAutotunableKernel):
## Get compile time OpenCL known variables
known_vars = super(OpenClAutotunableDirectionalAdvectionKernel, self).generate_kernel_src(
global_work_size=global_work_size,
local_work_size=local_work_size,
extra_parameters=extra_parameters,
global_work_size=global_work_size,
local_work_size=local_work_size,
extra_parameters=extra_parameters,
extra_kwds=extra_kwds, tuning_mode=tuning_mode, dry_run=dry_run)
known_vars.update(extra_kwds['mesh_info_vars'])
known_vars.update(known_args)
......@@ -188,16 +194,17 @@ class OpenClAutotunableDirectionalAdvectionKernel(OpenClAutotunableKernel):
# disable periodic-periodic because we exchange ghosts anyways
if vboundaries == (BoundaryCondition.PERIODIC, BoundaryCondition.PERIODIC):
vboundaries = (BoundaryCondition.NONE, BoundaryCondition.NONE)
## Generate OpenCL source code
codegen = DirectionalAdvectionKernelGenerator(typegen=self.typegen,
work_dim=work_dim,
vboundary=vboundaries,
work_dim=work_dim,
vboundary=vboundaries,
rk_scheme=rk_scheme,
symbolic_mode=self.symbolic_mode,
symbolic_mode=self.symbolic_mode,
ftype=ftype,
min_ghosts=cache_ghosts,
tuning_mode=tuning_mode,
is_bilevel=extra_kwds['is_bilevel'],
known_vars=known_vars, **extra_parameters)
kernel_name = codegen.name
......@@ -206,14 +213,15 @@ class OpenClAutotunableDirectionalAdvectionKernel(OpenClAutotunableKernel):
## Check if cache would fit
if (local_work_size is not None):
self.check_cache(codegen.required_workgroup_cache_size(local_work_size)[2])
if (self.is_bilevel is not None):
self.check_cache(codegen.required_workgroup_velocity_cache_size())
return (kernel_name, kernel_src)
def hash_extra_kwds(self, extra_kwds):
"""Hash extra_kwds dictionnary for caching purposes."""
kwds = ('rk_scheme', 'ftype', 'work_dim',
kwds = ('rk_scheme', 'ftype', 'work_dim',
'vboundaries', 'cache_ghosts', 'work_size',
'known_args')
return self.custom_hash(*tuple(extra_kwds[kwd] for kwd in kwds),
mesh_info_vars=extra_kwds['mesh_info_vars'])
from hysop.tools.decorators import debug
from hysop.constants import DirectionLabels
from hysop.backend.device.opencl.operator.directional.opencl_directional_operator import \
......@@ -11,9 +10,9 @@ from hysop.backend.device.opencl.opencl_copy_kernel_launchers import OpenClCopyB
from hysop.backend.device.opencl.opencl_kernel_launcher import OpenClKernelListLauncher
class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOperator):
DEBUG=False
@debug
def __init__(self, force_atomics=False, relax_min_particles=False, remesh_criteria_eps=None,
**kwds):
......@@ -39,7 +38,7 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
force_atomics: bool
If set, only use atomic accumulators for remesh.
Default: autotuner will automatically figure out the best strategy
between using atomic operations and remeshing multiple particles at
between using atomic operations and remeshing multiple particles at
a time.
relax_min_particles: bool
Relax the minimum particles required for correct accumulation of remeshed
......@@ -53,7 +52,7 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
remeshing. By default every non zero value is remeshed.
kwds:
Extra parameters passed to generated directional operators.
Attributes
----------
velocity: Field
......@@ -63,7 +62,7 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
advected_fields_out: list
List of output continuous fields.
"""
super(OpenClDirectionalAdvection,self).__init__(**kwds)
self.force_atomics = force_atomics
self.relax_min_particles = relax_min_particles
......@@ -76,7 +75,7 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
def setup(self, work):
super(OpenClDirectionalAdvection, self).setup(work)
self._collect_kernels()
def _collect_kernels(self):
kl = OpenClKernelListLauncher(name='advec_remesh')
kl += self._collect_advection_kernel()
......@@ -89,24 +88,25 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
typegen = self.typegen
build_options = self.build_options()
autotuner_config = self.autotuner_config
kernel = OpenClAutotunableDirectionalAdvectionKernel(cl_env=cl_env, typegen=typegen,
build_opts=build_options, autotuner_config=autotuner_config)
kwds = {}
kwds['velocity'] = self.dvelocity
kwds['position'] = self.dposition
kwds['is_bilevel'] = self.is_bilevel
kwds['direction'] = self.splitting_direction
kwds['velocity_cfl'] = self.velocity_cfl
kwds['time_integrator'] = self.time_integrator
(advec_kernel, args_dict) = kernel.autotune(force_verbose=self._force_autotuner_verbose,
(advec_kernel, args_dict) = kernel.autotune(force_verbose=self._force_autotuner_verbose,
force_debug=self._force_autotuner_debug, hardcode_arrays=True, **kwds)
args_dict.pop('dt')
args_dict.pop('dt')
advec_launcher = advec_kernel.build_launcher(**args_dict)
assert ('dt' in advec_launcher.parameters_map.keys())
self.advection_kernel_launcher = advec_launcher
return advec_launcher
......@@ -116,30 +116,30 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
typegen = self.typegen
build_options = self.build_options()
autotuner_config = self.autotuner_config
kernel = OpenClAutotunableDirectionalRemeshKernel(cl_env=cl_env, typegen=typegen,
build_opts=build_options, autotuner_config=autotuner_config)
scalars_in = tuple(self.dadvected_fields_in[ifield] \
for ifield in self.advected_fields_in)
scalars_out = tuple(self.dadvected_fields_out[ofield] \
for ofield in self.advected_fields_out)
kwds = {}
kwds['direction'] = self.splitting_direction
kwds['scalar_cfl'] = self.scalar_cfl
kwds['is_inplace'] = self.is_inplace
kwds['position'] = self.dposition
kwds['scalars_in'] = scalars_in
kwds['scalars_out'] = scalars_out
kwds['is_inplace'] = self.is_inplace
kwds['remesh_kernel'] = self.remesh_kernel
kwds['remesh_criteria_eps'] = self.remesh_criteria_eps
kwds['force_atomics'] = self.force_atomics
kwds['relax_min_particles'] = self.relax_min_particles
(remesh_kernel, args_dict) = kernel.autotune(force_verbose=self._force_autotuner_verbose,
force_debug=self._force_autotuner_debug, hardcode_arrays=True, **kwds)
......@@ -150,19 +150,19 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
def _collect_redistribute_kernels(self):
remesh_ghosts = self.remesh_ghosts
dsoutputs = self.dadvected_fields_out
kl = OpenClKernelListLauncher(name='accumulate_and_exchange_ghosts')
kl = OpenClKernelListLauncher(name='accumulate_and_exchange_ghosts')
for sout in dsoutputs.values():
kl += sout.accumulate_ghosts(directions=sout.dim-1, ghosts=remesh_ghosts,
build_launcher=True)
build_launcher=True)
kl += sout.exchange_ghosts(build_launcher=True)
self.accumulate_and_exchange = kl
return kl
@op_apply
def apply(self, dbg=None, **kargs):
queue = self.cl_env.default_queue
dt = self.precision(self.dt() * self.dt_coeff)
if self.DEBUG:
queue.flush()
queue.finish()
......@@ -192,5 +192,3 @@ class OpenClDirectionalAdvection(DirectionalAdvectionBase, OpenClDirectionalOper
@classmethod
def supports_mpi(cls):
return True
from hysop.deps import sm
from hysop.constants import DirectionLabels
from hysop.backend.device.opencl.opencl_array_backend import OpenClArrayBackend
......@@ -23,13 +22,13 @@ class OpenClEnstrophy(EnstrophyBase, OpenClSymbolic):
rho = self.rho
rhos = (rho.s if (rho is not None) else sm.Integer(1))
Ws = self.vorticity.s()
WdotW = self.WdotW.s()
expr = Assignment(WdotW, rhos*npw.dot(Ws, Ws))
self.require_symbolic_kernel('WdotW', expr)
@debug
def get_field_requirements(self):
# force 0 ghosts for the reduction (pyopencl reduction kernel)
......@@ -38,21 +37,19 @@ class OpenClEnstrophy(EnstrophyBase, OpenClSymbolic):
if field is self.WdotW:
req.max_ghosts = (0,)*self.WdotW.dim
return requirements
@debug
def discretize(self):
super(OpenClEnstrophy, self).discretize()
assert (self.dWdotW.ghosts == 0).all()
self.coeff = npw.prod(self.dWdotW.space_step)
self.coeff /= (self.rho_0 * npw.prod(self.dWdotW.domain.length))
@debug
def setup(self, work):
super(OpenClEnstrophy, self).setup(work)
(self.WdotW_kernel, self.WdotW_update_parameters) = self.symbolic_kernels['WdotW']
self.sum_kernel = self.dWdotW.backend.nansum(a=self.dWdotW[0],
self.sum_kernel = self.dWdotW.backend.nansum(a=self.dWdotW[0],
build_kernel_launcher=True, async=True)
@op_apply
def apply(self, **kwds):
queue = self.cl_env.default_queue
......
......@@ -89,7 +89,6 @@ class PoissonFFTW(FortranFFTWOperator):
if all([__.flags['C_CONTIGUOUS']
for _ in (fi, fo) for __ in _.data ]):
changeLayout = True
print "CHange layout"
new_i = tuple(np.asfortranarray(_) for _ in fi.buffers)
new_o = tuple(np.asfortranarray(_) for _ in fo.buffers)
elif all([__.flags['F_CONTIGUOUS']
......
......@@ -101,7 +101,6 @@ class PoissonRotationalFFTW(FortranFFTWOperator):
if all([__.flags['C_CONTIGUOUS']
for _ in (self.dvorticity,self.dvelocity) for __ in _.data ]):
changeLayout = True
print "CHange layout"
dw = tuple(np.asfortranarray(_) for _ in self.dvorticity.buffers)
dv = tuple(np.asfortranarray(_) for _ in self.dvelocity.buffers)
elif all([__.flags['F_CONTIGUOUS']
......
# coding: utf-8
import hysop
import numpy as np
try:
from hysop.f2hysop import scales2py as scales
except ImportError:
msgE = 'scales package not available for your hysop install.'
msgE += 'Try to recompile with WITH_SCALES=ON'
raise ImportError(msgE)
from hysop.tools.decorators import debug
from hysop.core.graph.computational_operator import ComputationalGraphOperator
from hysop.tools.types import check_instance, InstanceOf
from hysop.fields.continuous_field import Field
from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
from hysop.parameters.scalar_parameter import ScalarParameter
from hysop.methods import Interpolation, TimeIntegrator, Remesh
from hysop.numerics.remesh.remesh import RemeshKernel
from hysop.tools.numpywrappers import npw
from hysop.core.graph.graph import op_apply
from hysop.numerics.odesolvers.runge_kutta import ExplicitRungeKutta, RK2
class ScalesAdvection(ComputationalGraphOperator):
"""Particle advection using Fortran Scales library."""
__rmsh_to_scales__ = {Remesh.L2_1: 'p_M4',
Remesh.L4_2: 'p_M6',
Remesh.L4_4: 'p_44',
Remesh.L6_4: 'p_64',
Remesh.L6_6: 'p_66',
Remesh.L8_4: 'p_84',
'p_O2': 'p_O2',
'p_O4': 'p_O4',
'p_L2': 'p_L2',
'p_M8': 'p_M8'}
__default_method = {
TimeIntegrator: RK2,
Interpolation: Interpolation.LINEAR,
Remesh: Remesh.L2_1,
}
__available_methods = {
TimeIntegrator: InstanceOf(ExplicitRungeKutta),
Interpolation: Interpolation.LINEAR,
Remesh: (InstanceOf(Remesh), InstanceOf(str)),
}
@classmethod
def default_method(cls):
dm = super(ScalesAdvection, cls).default_method()
dm.update(cls.__default_method)
return dm
@classmethod
def available_methods(cls):
am = super(ScalesAdvection, cls).available_methods()
am.update(cls.__available_methods)
return am
@debug
def __init__(self, velocity,
advected_fields_in, advected_fields_out,
variables, dt,
**kwds):
"""Particle advection of field(s),
on any backend, with cartesian remeshing.
Parameters
----------
velocity: Field
Continuous velocity field (all components)
advected_fields_in: Field or array like of Fields
Instance or list of continuous fields to be advected.
advected_fields_out: Field or array like of Field
Where input fields should be remeshed.
variables: dict
Dictionary of continuous fields as keys and topologies as values.
dt: ScalarParameter
Timestep parameter that will be used for time integration.
kwds:
Extra parameters passed to generated directional operators.
Attributes
----------
velocity: Field
Continuous velocity field (all components)
advected_fields_in: list
Tuple of continuous fields to be advected.
advected_fields_out: list
Tuple of output continuous fields.
As HySoP works with C-order of transposition for data, we interface
Scales library with the Z-Y-X data layout.
"""
check_instance(velocity, Field)
check_instance(advected_fields_in, tuple, values=Field)
check_instance(advected_fields_out, tuple, values=Field)
check_instance(variables, dict, keys=Field,
values=CartesianTopologyDescriptors)
check_instance(dt, ScalarParameter)
assert (len(advected_fields_in) == len(advected_fields_out)), \
'|inputs| != |outputs|'
input_fields = {velocity: variables[velocity]}
output_fields = {}
input_params = {dt.name: dt}
output_params = {}
is_inplace = True
for (ifield, ofield) in zip(advected_fields_in, advected_fields_out):
input_fields[ifield] = variables[ifield]
output_fields[ofield] = variables[ofield]
if ifield is ofield:
assert is_inplace, \
'Must be inplace.'
super(ScalesAdvection, self).__init__(
input_fields=input_fields,
output_fields=output_fields,
input_params=input_params,
output_params=output_params,
**kwds)
self.velocity = velocity
self.first_scalar = advected_fields_in[0]
self.advected_fields_in = advected_fields_in
self.advected_fields_out = advected_fields_out
self.dt = dt
self.is_inplace = is_inplace
@debug
def handle_method(self, method):
super(ScalesAdvection, self).handle_method(method)
remesh_kernel = method.pop(Remesh)
# Translate hysop remesh kernels into Scales configuration
try:
self._scales_kernel = ScalesAdvection.__rmsh_to_scales__[remesh_kernel]
except KeyError as e:
print "Unknown remesh method for Scales ({} given)".format(
remesh_kernel)
raise e
self.remesh_kernel = remesh_kernel
self.interp = method.pop(Interpolation)
assert self.interp is Interpolation.LINEAR, \
"Scales uses Linear interpolation for velocity"
self.time_integrator = method.pop(TimeIntegrator)
assert self.time_integrator is RK2, \
"Scales uses RK2 time integration only"
@debug
def get_field_requirements(self):
requirements = super(ScalesAdvection, self).get_field_requirements()
dim = self.first_scalar.domain.dim
assert dim == 3, "Scales is only a 3D advection"
is_inplace = self.is_inplace
velocity = self.velocity
v_topo, v_requirements = requirements.get_input_requirement(velocity)
v_requirements.min_ghosts = (0, ) * dim
v_requirements.max_ghosts = (0, ) * dim
scalar = self.first_scalar
s_topo, _ = requirements.get_input_requirement(scalar)
s_dx = s_topo.space_step
for sfield in self.advected_fields_out:
_s_topo, _s_requirements = requirements.get_output_requirement(sfield)
_s_dx = _s_topo.space_step
assert (_s_dx == s_dx).all()
_s_requirements.min_ghosts = (0, ) * dim
_s_requirements.max_ghosts = (0, ) * dim
if is_inplace:
for sfield in self.advected_fields_in:
_s_topo, _s_requirements = requirements.get_input_requirement(sfield)
_s_dx = _s_topo.space_step
assert (_s_dx == s_dx).all()
_s_requirements.min_ghosts = (0, ) * dim
_s_requirements.max_ghosts = (0, ) * dim
for field,td,req in requirements.iter_input_requirements():
req.axes = ((0,1,2), )
req.can_split = [False, True, True]
for field,td,req in requirements.iter_output_requirements():
req.axes = ((0,1,2), )
req.can_split = [False, True, True]
return requirements
@debug
def discretize(self):
super(ScalesAdvection, self).discretize()
dvelocity = self.input_discrete_fields[self.velocity]
for fi, fo in zip(self.advected_fields_in,self.advected_fields_out):
assert fi is fo
# field -> discrete_field
dadvected_fields_in = {ifield: self.input_discrete_fields[ifield]
for ifield in self.advected_fields_in}
dadvected_fields_out = {ofield: self.output_discrete_fields[ofield]
for ofield in self.advected_fields_out}
self.dvelocity = dvelocity
self.dadvected_fields_in = dadvected_fields_in
self.dadvected_fields_out = dadvected_fields_out
self.is_bilevel = None
if any(self.dvelocity.compute_resolution != \
self.dadvected_fields_out.values()[0].compute_resolution):
self.is_bilevel = self.dvelocity.compute_resolution
@debug
def setup(self, work):
super(ScalesAdvection, self).setup(work)
v_topo = self.dvelocity.topology
s_topo = self.dadvected_fields_in.values()[0].topology
assert (v_topo.ghosts == 0).all()
for dfi, dfo in zip(self.dadvected_fields_in.values(),
self.dadvected_fields_out.values()):
assert (dfi.topology.ghosts == 0).all(), \
"No ghosts in Scales advection"
assert v_topo.domain.periodicity.all(), \
"Scales is only for periodic domains"
sresol = s_topo.mesh.grid_resolution
assert (sresol%s_topo.proc_shape == 0).all(),\
"Scales support only equally sized local resolutions"
scalesres, global_start = scales.init_advection_solver(
sresol,
s_topo.domain.length,
s_topo.proc_shape,
self.mpi_params.comm.py2f(),
order=self._scales_kernel,
dim_split='strang')
assert (scalesres == s_topo.mesh.local_resolution).all(), \
"Scales resolution mismatch" + str(scalesres) + \
" != " + str(s_topo.mesh.local_resolution)
assert (global_start == s_topo.mesh.global_start).all(), \
"Scales mesh start mismatch" + str(global_start) \
+ " != " + str(s_topo.mesh.global_start)
if self.is_bilevel is not None:
scales.init_multiscale(v_topo.mesh.grid_resolution[0],
v_topo.mesh.grid_resolution[1],
v_topo.mesh.grid_resolution[2],
'lin')
self._scales_func = []
for df in self.dadvected_fields_in.values():
if df.nb_components == 3:
if self.is_bilevel is not None:
# 3D interpolation of the velocity before advection
self._scales_func.append(
scales.solve_advection_inter_basic_vect)
# Other interpolation only 2D interpolation first and
# 1D interpolations before advections in each direction
# (slower than basic): solve_advection_inter
else:
self._scales_func.append(scales.solve_advection_vect)
elif df.nb_components == 1:
if self.is_bilevel is not None:
self._scales_func.append(
scales.solve_advection_inter_basic)
else:
self._scales_func.append(scales.solve_advection)
else:
msg = "SCALES library for advection works only with "
msg += "3D scalar fields or 3D 3-components vectors."
raise RuntimeError(msg)
## Backend methods
# DirectionalOperatorBase
@classmethod
def supported_dimensions(cls):
return (2,3)
def _initialize_mem_layout(self):
"""Convert C-contiguous numpy arrays to Fortran-ordered data if needed."""
changeLayout = False
if all([__.flags['C_CONTIGUOUS']
for _ in [self.dvelocity, ] + self.dadvected_fields_out.values()
for __ in _.data]):
changeLayout = True
dv = tuple(np.asfortranarray(_) for _ in self.dvelocity.buffers)
dadf = tuple(tuple(np.asfortranarray(_) for _ in f.buffers)
for f in self.dadvected_fields_out.values())
elif all([__.flags['F_CONTIGUOUS']
for _ in [self.dvelocity, ] + self.dadvected_fields_out.values()
for __ in _.data ]):
dv = self.dvelocity
dadf = self.dadvected_fields_out.values()
else:
raise RuntimeError("Memory layout comatibility issue")
return changeLayout, dv, dadf
def _finalize_mem_layout(self, adf):
"""Get back with C-contiguous data.
We enforce a sign change for data because of the change
of direction ordering ZYX."""
for df, df_f in zip(self.dadvected_fields_out.values(), adf):
for df_b, df_f_b in zip(df, df_f):
df_b[...] = np.ascontiguousarray(df_f_b)
@op_apply
def apply(self, **kwds):
super(ScalesAdvection, self).apply(**kwds)
dt = self.dt()
# Call scales advection
for (adFi, adFo) in zip(self.dadvected_fields_in.values(),
self.dadvected_fields_out.values()):
for (adFi_c, adFo_c) in zip(adFi, adFo):
adFo_c[...] = adFi_c[...]
changeLayout, v, adf = self._initialize_mem_layout()
# Data are given to Scales in a ZYX order, so we adjust velocity components
for (adFo, fun) in zip(adf, self._scales_func):
adFo = fun(dt, v[2], v[1], v[0], *adFo)
if changeLayout:
self._finalize_mem_layout(adf)
@classmethod
def supports_mpi(cls):
return True
from hysop.tools.numpywrappers import npw
from hysop.tools.decorators import debug
from hysop.tools.types import check_instance, to_tuple
......@@ -24,33 +23,33 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
def get_work_properties(self):
requests = super(PythonDirectionalAdvection, self).get_work_properties()
V = self.dvelocity[self.splitting_direction]
cr = self.compute_granularity
shape = tuple(self.dvelocity.compute_resolution[cr:])
nbuffers = { Euler:0, RK2:2, RK3:3, RK4:4 }
time_integrator = self.time_integrator
nb_rcomponents = max(nbuffers[time_integrator], 2)
nb_icomponents = 1 if (time_integrator is Euler) else 2
request = MemoryRequest.empty_like(a=V, shape=shape,
request = MemoryRequest.empty_like(a=V, shape=shape,
nb_components=nb_rcomponents)
requests.push_mem_request('rtmp', request)
request = MemoryRequest.empty_like(a=V, shape=shape,
request = MemoryRequest.empty_like(a=V, shape=shape,
nb_components=nb_icomponents, dtype=npw.int32)
requests.push_mem_request('itmp', request)
request = MemoryRequest.empty_like(a=V, shape=shape[:-1]+(1,),
request = MemoryRequest.empty_like(a=V, shape=shape[:-1]+(1,),
nb_components=1, dtype=npw.int32)
requests.push_mem_request('ixtmp', request)
nscalars = sum(field.nb_components for field in self.advected_fields_in)
if self.is_inplace:
rg = self.remesh_ghosts
sshape = shape[:-1] + (shape[-1]+2*rg,)
request = MemoryRequest.empty_like(a=V, shape=sshape,
request = MemoryRequest.empty_like(a=V, shape=sshape,
nb_components=nscalars)
requests.push_mem_request('stmp', request)
self.nscalars = nscalars
......@@ -65,8 +64,9 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
self.dixtmp = work.get_buffer(self, 'ixtmp', handle=True)
if self.is_inplace:
self.dstmp = work.get_buffer(self, 'stmp', handle=True)
self._prepare_apply()
assert self.is_bilevel is None, "Python bilevel advection is not implemented yet."
self._prepare_apply()
def _prepare_apply(self):
cr = self.compute_granularity
......@@ -74,7 +74,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
soutputs = self.advected_fields_out
dsinputs = self.dadvected_fields_in
dsoutputs = self.dadvected_fields_out
velo_mesh = self.dvelocity.mesh
velo_mesh_iterator = velo_mesh.build_compute_mesh_iterator(cr)
X0 = velo_mesh.local_compute_coords[-1]
......@@ -82,7 +82,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
inv_dx = (1.0/dx)
velo_compute_view = velo_mesh.local_compute_slices
self._velocity_mesh_attributes = (velo_mesh_iterator, dx, inv_dx, velo_compute_view, X0)
dsinputs0 = dsinputs.values()[0]
scalar_mesh = dsinputs0.mesh
scalar_mesh_iterator = scalar_mesh.build_compute_mesh_iterator(cr)
......@@ -94,12 +94,12 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
self._last_scalar_axe_mesh_indices = tuple(npw.ndindex(
(dsinputs0.compute_resolution[-1],)))
in_compute_slices, out_compute_slices = {}, {}
in_ghosts, out_ghosts = {}, {}
in_shapes, out_shapes = {}, {}
for (ifield,ofield) in zip(sinputs, soutputs):
for (ifield,ofield) in zip(sinputs, soutputs):
Sin = dsinputs[ifield]
Sout = dsoutputs[ofield]
in_compute_slices[Sin] = Sin.compute_slices
......@@ -111,11 +111,11 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
self._inout_compute_slices = (in_compute_slices, out_compute_slices)
self._inout_ghosts = (in_ghosts, out_ghosts)
self._inout_shapes = (in_shapes, out_shapes)
@op_apply
def apply(self, **kwds):
super(PythonDirectionalAdvection, self).apply(**kwds)
dt = self.dt() * self.dt_coeff
self.counter += 1
......@@ -145,13 +145,13 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
ridx: int32 empty input buffer, destroyed
"""
N = dX.shape[-1]
Iy = I[:-1]
Ix = Ig[-1]
dX[...] *= inv_dx
lidx[...] = npw.floor(dX).astype(npw.int32)
dX[...] -= lidx
dX[...] -= lidx
alpha = dX # dX now contains alpha
lidx[...] += Ix
......@@ -176,7 +176,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
Vout[...] += Vin[Iy+(lidx,)]
def _compute_advection(self, dt):
P = self.dpos
P = self.dpos
Vd = self.dvelocity(self.splitting_direction)
rtmp = self.drtmp
itmp = self.ditmp
......@@ -186,7 +186,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
rk_scheme = self.time_integrator
(mesh_it,dx,inv_dx,view,X0) = self._velocity_mesh_attributes
_Vd = Vd[view]
Vd = Vd[view[:-1]+(slice(None),)]
......@@ -201,7 +201,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
# fill position with first direction coordinates on the whole compute domain
for (idx,_,I,Ig) in mesh_it.iter_compute_mesh():
Pi = P[idx]
Pi = P[idx]
Vi = Vd[idx]
_Vi = _Vd[idx] # Vi without ghosts
if rk_scheme.name() == 'Euler':
......@@ -212,7 +212,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
(lidx,ridx) = itmp
(dX0,V1) = rtmp
dX0[...] = _Vi
dX0[...] *= (0.5*dt)
dX0[...] *= (0.5*dt)
self._interp_velocity(Vi,V1,dX0,I,Ig,lidx,ridx,inv_dx,is_periodic)
Pi[...] = V1
Pi[...] *= dt
......@@ -223,15 +223,15 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
dXk[...] = _Vi
dXk[...] *= (0.5*dt)
self._interp_velocity(Vi,V1,dXk,I,Ig,lidx,ridx,inv_dx,is_periodic)
dXk[...] = V1
dXk[...] = V1
dXk[...] *= (0.5*dt)
self._interp_velocity(Vi,V2,dXk,I,Ig,lidx,ridx,inv_dx,is_periodic)
dXk[...] = V2
dXk[...] *= (1.0*dt)
self._interp_velocity(Vi,V3,dXk,I,Ig,lidx,ridx,inv_dx,is_periodic)
V0 = dXk
V0[...] = _Vi
V0[...] = _Vi
V0[...] *= (1.0/6.0)
V1[...] *= (2.0/6.0)
......@@ -280,38 +280,38 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
(mesh_it, dx, inv_dx, compute_view, N0) = self._scalar_mesh_attributes
if self.DEBUG:
print 'GLOBAL START'
print 'GLOBAL START'
print 'X0: {}'.format(X0[0])
print 'N0: {}'.format(N0)
print '***********'
scalar_advection_ghosts = self.scalar_advection_ghosts
remesh_ghosts = self.remesh_ghosts
remesh_kernel = self.remesh_kernel
P = 1 + remesh_kernel.n/2
is_periodic = self.is_periodic
R0,R1 = rtmp[:2]
I0 = itmp[0]
Ix = ixtmp[0]
if is_inplace:
S = stmp
is_periodic = False
N = R0.shape[-1]
(in_compute_slices, out_compute_slices) = self._inout_compute_slices
(in_compute_slices, out_compute_slices) = self._inout_compute_slices
(in_ghosts, out_ghosts) = self._inout_ghosts
(in_shapes, out_shapes) = self._inout_shapes
input_buffer_views = {}
output_buffer_views = {}
last_scalar_axe_mesh_indices = self._last_scalar_axe_mesh_indices
pos[...] -= X0[0]
for (idx,_,I,_) in mesh_it.iter_compute_mesh():
Pi = pos[idx]
R0[...] = Pi
......@@ -319,7 +319,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
I0[...] = npw.floor(R0).astype(I0.dtype)
R0[...] -= I0
R0[...] *= -1 # we need -alpha for the left point
if __debug__:
Imin, Imax = I0.min(), I0.max()
amin, amax = R0.min(), R0.max()
......@@ -329,35 +329,35 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
scalar_advection_ghosts)
assert (amin > -1.0)
assert (amax <= 0.0)
R0[...] -= P
I0[...] -= P
# prebuild data views
for ifield, ofield in zip(sinputs, soutputs):
# prebuild data views
for ifield, ofield in zip(sinputs, soutputs):
Sin = dsinputs[ifield]
Sout = dsoutputs[ofield]
sin_view = tuple(idx[i] + in_ghosts[Sin][i] for i in xrange(cr))
sin_view += in_compute_slices[Sin][cr:]
dG = out_ghosts[Sout][-1] - remesh_ghosts
sout_view = tuple(idx[i] + out_ghosts[Sout][i] for i in xrange(cr))
sout_view += out_compute_slices[Sout][cr:-1]
sout_view += (slice(dG, out_shapes[Sout][-1]-dG),)
in_views = tuple(buf[sin_view] for buf in Sin.buffers)
out_views = tuple(buf[sout_view] for buf in Sout.buffers)
input_buffer_views[ifield] = in_views
output_buffer_views[ofield] = out_views
for q in xrange(-P+1, P+1):
I0[...] += 1
R0[...] += 1
R1[...] = remesh_kernel.gamma(R0)
sid=0
for ifield, ofield in zip(sinputs, soutputs):
for ifield, ofield in zip(sinputs, soutputs):
for k in xrange(ifield.nb_components):
sin = input_buffer_views[ifield][k]
sout = output_buffer_views[ofield][k]
......@@ -377,7 +377,7 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
if (q==P) and is_inplace:
sout[...] = Si
sid+=1
if self.DEBUG:
print 'S (before accumulation)'
......@@ -387,8 +387,8 @@ class PythonDirectionalAdvection(DirectionalAdvectionBase, HostDirectionalOperat
# sum remeshed ghosts
for sout in dsoutputs.values():
sout.accumulate_ghosts(directions=sout.dim-1, ghosts=self.remesh_ghosts)
sout.accumulate_ghosts(directions=sout.dim-1, ghosts=self.remesh_ghosts)
if self.DEBUG:
print 'S (after accumulation, before ghost exchange)'
self.dadvected_fields_out.values()[0].print_with_ghosts(outer_ghosts=None)
......
import numpy as np
import sympy as sm
from hysop.tools.decorators import debug
from hysop.backend.host.host_directional_operator import HostDirectionalOperator
from hysop.constants import DirectionLabels, Implementation, StretchingFormulation
from hysop.constants import DirectionLabels, Implementation, StretchingFormulation, HYSOP_REAL
from hysop.tools.types import check_instance, to_tuple, to_list, first_not_None
from hysop.fields.continuous_field import Field
from hysop.parameters.scalar_parameter import ScalarParameter
......@@ -20,17 +21,17 @@ class PythonDirectionalStretching(DirectionalStretchingBase, HostDirectionalOper
msg='Formulation {} has not been implemented yet.'
msg=msg.format(formulation)
raise NotImplementedError(msg)
def discretize(self, **kwds):
super(PythonDirectionalStretching, self).discretize(**kwds)
dv, dw = self.dvelocity, self.dvorticity
stencil = self.stencil
stencil.replace_symbols({self.stencil.dx: dw.space_step[-1]})
G = max(stencil.L, stencil.R)[0]
S = self.time_integrator.stages
Wnames = ('W0', 'W1', 'W2')
assert dv.ghosts[-1] >= S*G
......@@ -48,7 +49,7 @@ class PythonDirectionalStretching(DirectionalStretchingBase, HostDirectionalOper
def get_work_properties(self):
requests = super(PythonDirectionalStretching, self).get_work_properties()
nb_components = 2 + self.time_integrator.stages
nb_components *= 3
buffers = MemoryRequest.empty_like(a=self.W['W0'], nb_components=nb_components,
......@@ -59,7 +60,7 @@ class PythonDirectionalStretching(DirectionalStretchingBase, HostDirectionalOper
def setup(self, work):
super(PythonDirectionalStretching, self).setup(work=work)
Wtmp = work.get_buffer(self, 'Wtmp', handle=True)
tmp = {}
buffers = {}
N = 2 + self.time_integrator.stages
......@@ -70,12 +71,19 @@ class PythonDirectionalStretching(DirectionalStretchingBase, HostDirectionalOper
self.buffers = buffers
self.tmp = tmp
# Configure C coeff access (vector or scalar)
try:
self._C = np.asarray([_ for _ in self.C], dtype=HYSOP_REAL)
except TypeError:
self._C = (HYSOP_REAL(self.C), ) * 3
@op_apply
def apply(self, **kwds):
super(PythonDirectionalStretching, self).apply(**kwds)
dt = self.dt() * self.dt_coeff
rk_scheme = self.time_integrator
Wnames, V, W, buffers, tmp = self.Wnames, self.V, self.W, self.buffers, self.tmp
wdir = Wnames[self.splitting_direction]
......@@ -85,11 +93,10 @@ class PythonDirectionalStretching(DirectionalStretchingBase, HostDirectionalOper
Vi = V[wn]
Wd = X[wdir]
self.stencil(a=Vi*Wd, out=out[wn], axis=-1)
out[wn][...] *= self.C[i]
out[wn][...] *= self._C[i]
return out
Wout = rk_scheme(Xin=W, RHS=rhs, Xout=tmp, buffers=buffers, dt=dt)
for wn in Wnames:
W[wn][...] = Wout[wn]
self.ghost_exchanger.exchange_ghosts()
"""
@file enstrophy.py
Enstrophy solver python backend.
"""
from hysop.tools.decorators import debug
from hysop.core.graph.graph import op_apply
from hysop.operator.base.enstrophy import EnstrophyBase
from hysop.backend.host.host_operator import HostOperator
from hysop.tools.numpywrappers import npw
class PythonEnstrophy(EnstrophyBase, HostOperator):
"""Compute enstrophy of the given vorticity field."""
@debug
def __init__(self, **kwds):
super(PythonEnstrophy, self).__init__(**kwds)
assert self.mpi_params.size == 1
@op_apply
def apply(self, **kwds):
# Compute W dot W
wd = self.dWin
wdotw = self.dWdotW[0]
nbc = wd.nb_components
ic = wd.compute_slices
wdotw[...] = 0.
for i in xrange(nbc):
wdotw[...] += (wd[i][ic] ** 2)
# Compute enstrophy
local_enstrophy = self.coeff * npw.real_sum(wdotw)
# TODO reduce over all mpi process
self.enstrophy.value = local_enstrophy
from hysop import __DEBUG__
from hysop.deps import np, it
from hysop.constants import Basis
......@@ -11,7 +10,7 @@ from hysop.topology.topology_descriptor import TopologyDescriptor
from hysop.core.graph.computational_node import ComputationalGraphNode
from hysop.fields.continuous_field import Field
from hysop.fields.discrete_field import DiscreteField
def can_convert_basis_inplace(lbasis, rbasis):
return True
......@@ -20,18 +19,18 @@ def can_transpose_inplace(ltranspose, rtranspose):
class DiscreteFieldRequirements(object):
__slots__ = ('_operator', '_field', '_variables', '_topology', '_dim',
'_min_ghosts', '_max_ghosts', '_can_split',
'_axes', '_basis', '_registered', '_work_dim',
'_min_ghosts', '_max_ghosts', '_can_split',
'_axes', '_basis', '_registered', '_work_dim',
'_topology_descriptor', '_header')
_registered_requirements = set()
def __init__(self, operator, variables, field,
min_ghosts=None,
def __init__(self, operator, variables, field,
min_ghosts=None,
max_ghosts=None,
can_split=None,
can_split=None,
basis=None,
axes=None,
_register=True,
......@@ -52,7 +51,7 @@ class DiscreteFieldRequirements(object):
self._registered_requirements.update(key)
super(DiscreteFieldRequirements, self).__init__(**kwds)
check_instance(field, Field)
check_instance(operator, ComputationalGraphNode, allow_none=(not _register))
check_instance(variables, dict, keys=Field, values=(Topology,TopologyDescriptor), allow_none=(not _register))
......@@ -63,10 +62,10 @@ class DiscreteFieldRequirements(object):
self._variables = variables
self._topology_descriptor = variables[field] if variables else None
self._dim = field.dim
self._header = '::{}[{}] requirements::\n'.format(getattr(operator, 'name', 'UnknownOperator'),
self._header = '::{}[{}] requirements::\n'.format(getattr(operator, 'name', 'UnknownOperator'),
getattr(field, 'name', 'UnknownField'))
self._registered = _register
self.min_ghosts = min_ghosts
self.max_ghosts = max_ghosts
self.can_split = can_split
......@@ -84,12 +83,12 @@ class DiscreteFieldRequirements(object):
can_split = self._can_split,
basis = self._basis,
axes = self._axes)
def is_default(self):
return (self == self._default())
def _default(self):
return DiscreteFieldRequirements(self._operator, self._variables, self._field,
return DiscreteFieldRequirements(self._operator, self._variables, self._field,
_register=False)
def __eq__(self, other):
......@@ -105,26 +104,26 @@ class DiscreteFieldRequirements(object):
def __hash__(self):
return id(self.operator) ^ id(self.variables) ^ id(self.field) ^ \
hash((to_tuple(self.min_ghosts), to_tuple(self.max_ghosts),
hash((to_tuple(self.min_ghosts), to_tuple(self.max_ghosts),
self.basis, self.tstates))
def ghost_str(self, array):
inf = u'+\u221e'
vals = [u''+str(x) if np.isfinite(x) else inf for x in array]
return u'[{}]'.format(u','.join(vals)).strip()
def __str__(self):
return '{:15s} {:>10s}<=ghosts<{:<10s} can_split={} basis={} tstates={}'.format(
return u'{:15s} {:>10s}<=ghosts<{:<10s} can_split={} basis={} tstates={}'.format(
'{}::{}'.format(getattr(self.operator, 'name', 'UnknownOperator'),
getattr(self.field, 'name', 'UnknownField')),
getattr(self.field, 'name', 'UnknownField')),
self.ghost_str(self.min_ghosts), self.ghost_str(self.max_ghosts+1),
self.can_split.view(np.int8),
self.can_split.view(np.int8),
'[{}]'.format(','.join(str(basis)[:3] for basis in self.basis)) \
if self.basis else 'ANY',
if self.basis else 'ANY',
'[{}]'.format(','.join(str(ts) for ts in self.tstates)) \
if self.tstates else 'ANY')
def get_axes(self):
return self._axes
def set_axes(self, axes):
......@@ -137,7 +136,7 @@ class DiscreteFieldRequirements(object):
return None
else:
return tuple(TranspositionState[self._dim](axes) for axes in all_axes)
def get_basis(self):
return self._basis
def set_basis(self, basis):
......@@ -150,14 +149,14 @@ class DiscreteFieldRequirements(object):
def get_min_ghosts(self):
return self._min_ghosts
def set_min_ghosts(self, min_ghosts):
self._min_ghosts = np.asarray(to_list(min_ghosts)
self._min_ghosts = np.asarray(to_list(min_ghosts)
if (min_ghosts is not None) else [0]*self.workdim)
assert self.min_ghosts.size == self.workdim
def get_max_ghosts(self):
return self._max_ghosts
def set_max_ghosts(self, max_ghosts):
self._max_ghosts = np.asarray(to_list(max_ghosts)
self._max_ghosts = np.asarray(to_list(max_ghosts)
if (max_ghosts is not None) else [np.inf]*self.workdim)
assert self.max_ghosts.size == self.workdim
......@@ -183,7 +182,7 @@ class DiscreteFieldRequirements(object):
min_ghosts = property(get_min_ghosts, set_min_ghosts)
max_ghosts = property(get_max_ghosts, set_max_ghosts)
basis = property(get_basis, set_basis)
axes = property(get_axes, set_axes)
axes = property(get_axes, set_axes)
tstates = property(get_tstates)
workdim = property(get_work_dim)
......@@ -191,7 +190,7 @@ class DiscreteFieldRequirements(object):
field = property(get_field)
variables = property(get_variables)
topology_descriptor = property(get_topology_descriptor)
def is_compatible_with(self, other):
assert self.field == other.field, 'field mismatch.'
if isinstance(other, DiscreteFieldRequirements):
......@@ -221,11 +220,11 @@ class DiscreteFieldRequirements(object):
# return False
# if (other.tstates != self.tstates) and \
# not can_transpose_inplace(
# other.tstates,
# other.tstates,
# self.tstates):
# return False
return True
def update_requirements(self, other):
assert self.is_compatible_with(other)
assert (self.basis == other.basis)
......@@ -237,18 +236,18 @@ class DiscreteFieldRequirements(object):
self.axes = self.axes.intersection(other.axes) if other.axes else self.axes
else:
self.axes = other.axes
def check_topology(self, topology=None):
topology = topology or self.variables[self.field]
check_instance(topology, Topology)
if topology.domain.dim != self.field.dim:
if topology.domain.dim != self.field.dim:
msg='{} Dimension mismatch between field and topology.\n field={}d, topology={}d.'
msg=msg.format(self._header, self.field.dim, topology.domain.dim)
raise RuntimeError(msg)
if (topology.global_resolution != self.topology_descriptor.global_resolution).any():
msg='{} Discretisation mismatch between requirement and topology.\n '
msg+=' requirement={}\n topology={}'
msg=msg.format(self._header,
msg=msg.format(self._header,
self.topology_descriptor.global_resolution,
topology.global_resolution)
raise RuntimeError(msg)
......@@ -260,7 +259,7 @@ class DiscreteFieldRequirements(object):
msg='{} max ghosts constraint was not met.\n max={}, actual={}.'
msg=msg.format(self._header, self.max_ghosts, topology.ghosts)
raise RuntimeError(msg)
def check_discrete_topology_state(self, state):
from hysop.topology.cartesian_topology import CartesianTopologyState
check_instance(state, CartesianTopologyState)
......@@ -273,7 +272,7 @@ class DiscreteFieldRequirements(object):
msg='{} Transposition state mismatch between requirement and topology state.\n'
msg+=' reqs=[{}], state={}.'
msg=msg.format(self._header,
','.join([str(x) for x in self.tstates]),
','.join([str(x) for x in self.tstates]),
state.tstate)
raise RuntimeError(msg)
......@@ -295,14 +294,14 @@ class DiscreteFieldRequirements(object):
class MultiFieldRequirements(object):
__slots__ = ('field', 'requirements', 'built')
def __init__(self, field):
self.field = field
self.requirements = {}
self.built = False
def copy(self):
requirements = { k:v.copy() for (k,v) in self.requirements.items() }
......@@ -310,7 +309,7 @@ class MultiFieldRequirements(object):
obj.built = self.built
obj.requirements = requirements
return obj
def as_dfr(self):
# return a DiscreteFieldRequirements if there is only one requirement
if self.nrequirements() == 0:
......@@ -335,14 +334,14 @@ class MultiFieldRequirements(object):
for td, req in zip(tds, reqs):
self.requirements.setdefault(td, set()).update(req)
def build_topologies(self):
if self.built:
return
for compatible_reqs in self._split():
compatible_reqs._build_compatible_topologies()
self.built = True
def all_compatible(self):
for topology_descriptor in self.requirements:
requirements = self.requirements[topology_descriptor]
......@@ -395,7 +394,7 @@ class MultiFieldRequirements(object):
assert can_split.any()
for req in unknown_topologies:
topo = req.topology_descriptor.choose_or_create_topology(known_topologies,
topo = req.topology_descriptor.choose_or_create_topology(known_topologies,
ghosts=ghosts, cutdirs=can_split)
known_topologies.append(topo)
req.set_and_check_topology(topo)
......@@ -405,19 +404,19 @@ class OperatorFieldRequirements(object):
__slots__ = ('_input_field_requirements', '_output_field_requirements')
def __init__(self, input_field_requirements=None,
output_field_requirements=None,
def __init__(self, input_field_requirements=None,
output_field_requirements=None,
**kwds):
super(OperatorFieldRequirements, self).__init__(**kwds)
check_instance(input_field_requirements, dict, keys=Field,
check_instance(input_field_requirements, dict, keys=Field,
values=MultiFieldRequirements, allow_none=True)
self._input_field_requirements = input_field_requirements or dict()
check_instance(output_field_requirements, dict, keys=Field,
check_instance(output_field_requirements, dict, keys=Field,
values=MultiFieldRequirements, allow_none=True)
self._output_field_requirements = output_field_requirements or dict()
def get_input_field_requirements(self):
return self._input_field_requirements
def get_output_field_requirements(self):
......@@ -425,14 +424,14 @@ class OperatorFieldRequirements(object):
input_field_requirements = property(get_input_field_requirements)
output_field_requirements = property(get_output_field_requirements)
def update(self, requirements):
check_instance(requirements, OperatorFieldRequirements)
self.update_inputs(requirements._input_field_requirements)
self.update_outputs(requirements._output_field_requirements)
def update_inputs(self, input_field_requirements):
check_instance(input_field_requirements, dict, keys=Field,
check_instance(input_field_requirements, dict, keys=Field,
values=(DiscreteFieldRequirements,MultiFieldRequirements,type(None)))
for ifield,ireqs in input_field_requirements.iteritems():
if (ireqs is not None):
......@@ -447,7 +446,7 @@ class OperatorFieldRequirements(object):
self._input_field_requirements[ifield] = ireqs
def update_outputs(self, output_field_requirements):
check_instance(output_field_requirements, dict, keys=Field,
check_instance(output_field_requirements, dict, keys=Field,
values=(DiscreteFieldRequirements,MultiFieldRequirements))
for ofield,oreqs in output_field_requirements.iteritems():
oreqs = oreqs.copy()
......@@ -483,7 +482,7 @@ class OperatorFieldRequirements(object):
def iter_requirements(self):
"""
Iterates over (is_input, field, topology_descriptor, field_requirement) for
Iterates over (is_input, field, topology_descriptor, field_requirement) for
all inputs and outputs.
"""
it0 = it.izip_longest((True,), self.iter_input_requirements())
......@@ -495,7 +494,7 @@ class OperatorFieldRequirements(object):
Get unique requirement and topology descriptor for given field, if it exists.
This is a facility for ComputationalGraphOperators to retrieve their unique i
per field requirements.
If field is not an hysop.fields.continuous_field.Field, a TypeError is raised.
If field is not known, an Attribute error is raised.
If multiple topology_descriptors or requirements are present (ie. there is no unicity),
......@@ -529,4 +528,3 @@ class OperatorFieldRequirements(object):
reqs.update(self._input_field_requirements.get(field, None),
self._output_field_requirements.get(field, None))
reqs.build_topologies()
"""
@file advection.py
Advection operator generator.
"""
from hysop.constants import Implementation
from hysop.tools.types import check_instance, to_list
from hysop.tools.decorators import debug
from hysop.fields.continuous_field import Field
from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
from hysop.parameters.scalar_parameter import ScalarParameter
from hysop.core.graph.computational_node_frontend import \
ComputationalGraphNodeFrontend
class Advection(ComputationalGraphNodeFrontend):
"""
Interface the Scales fortran advection solver.
Available implementations are: Fortran
Available remeshing formulas:
* 'p_O2' : order 4 method, corrected to allow large CFL number,
untagged particles
* 'p_O4' : order 4 method, corrected to allow large CFL number,
untagged particles
* 'p_L2' : limited and corrected lambda 2
* 'p_M4' : Lambda_2,1 (=M'4) 4 point formula
* 'p_M6' (default) : Lambda_4,2 (=M'6) 6 point formula
* 'p_M8' : M8prime formula
* 'p_44' : Lambda_4,4 formula
* 'p_64' : Lambda_6,4 formula
* 'p_66' : Lambda_6,6 formula
* 'p_84' : Lambda_8,4 formula
Time integration:
* Runge-Kutta 2nd order
Splitting:
Computations are performed with a dimensional splitting as follows:
* 'strang' (2nd order):
* X-dir, half time step
* Y-dir, half time step
* Z-dir, full time step
* Y-dir, half time step
* X-dir, half time step
* 'classic' (1st order):
* X-dir, full time step
* Y-dir, full time step
* Z-dir, full time step
"""
@classmethod
def implementations(cls):
from hysop.backend.host.fortran.operator.scales_advection import \
ScalesAdvection
_implementations = {
Implementation.FORTRAN: ScalesAdvection
}
return _implementations
@classmethod
def default_implementation(cls):
return Implementation.FORTRAN
@debug
def __init__(self, velocity,
advected_fields,
variables,
dt,
advected_fields_out=None,
implementation=None,
base_kwds=None,
**kwds):
"""
Initialize a DirectionalAdvectionFrontend.
Parameters
----------
velocity: Field
continuous velocity field (all components)
advected_fields: Field or array like of Fields
instance or list of continuous fields to be advected.
advected_fields_out: Field or array like of Field, optional, defaults to None
advection output, if set to None, advection is done inplace
on a per variable basis.
variables: dict
Dictionary of continuous fields as keys and topologies as values.
dt: ScalarParameter
Timestep parameter that will be used for time integration.
implementation: implementation, optional, defaults to None
target implementation, should be contained in available_implementations().
If None, implementation will be set to default_implementation().
base_kwds: dict, optional, defaults to None
Base class keywords arguments.
If None, an empty dict will be passed.
kwds:
Extra parameters passed to generated directional operators.
Notes
-----
An implementation should at least support the following __init__ parameters:
velocity, advected_fields, advected_fields_out, variables
direction, splitting_dim
Extra keywords parameters are passed through kwds.
"""
advected_fields = to_list(advected_fields)
assert len(set(advected_fields)) == len(advected_fields)
if (advected_fields_out is None):
advected_fields_out = advected_fields
else:
advected_fields_out = to_list(advected_fields_out)
assert len(advected_fields) == len(advected_fields_out)
assert len(set(advected_fields_out)) == len(advected_fields_out)
for i, field in enumerate(advected_fields_out):
if (field is None):
advected_fields_out[i] = advected_fields[i]
advected_fields = tuple(advected_fields)
advected_fields_out = tuple(advected_fields_out)
base_kwds = base_kwds or dict()
check_instance(velocity, Field)
check_instance(advected_fields, tuple, values=Field)
check_instance(advected_fields_out, tuple, values=Field)
check_instance(variables, dict, keys=Field,
values=CartesianTopologyDescriptors)
check_instance(base_kwds, dict, keys=str)
check_instance(dt, ScalarParameter)
super(Advection, self).__init__(
velocity=velocity, dt=dt,
advected_fields_in=advected_fields,
advected_fields_out=advected_fields_out,
variables=variables,
implementation=implementation,
base_kwds=base_kwds, **kwds)
from hysop.tools.numpywrappers import npw
from hysop.tools.decorators import debug
from hysop.tools.types import check_instance, InstanceOf
......@@ -16,17 +14,17 @@ from hysop.parameters.scalar_parameter import ScalarParameter
class DirectionalAdvectionBase(object):
__default_method = {
TimeIntegrator: Euler,
TimeIntegrator: Euler,
Interpolation: Interpolation.LINEAR,
Remesh: Remesh.L2_1,
}
__available_methods = {
TimeIntegrator: InstanceOf(ExplicitRungeKutta),
TimeIntegrator: InstanceOf(ExplicitRungeKutta),
Interpolation: Interpolation.LINEAR,
Remesh: (InstanceOf(Remesh), InstanceOf(RemeshKernel)),
}
@classmethod
def default_method(cls):
dm = super(DirectionalAdvectionBase, cls).default_method()
......@@ -37,15 +35,15 @@ class DirectionalAdvectionBase(object):
am = super(DirectionalAdvectionBase, cls).available_methods()
am.update(cls.__available_methods)
return am
@debug
def __init__(self, velocity,
advected_fields_in, advected_fields_out,
variables, dt, velocity_cfl,
def __init__(self, velocity,
advected_fields_in, advected_fields_out,
variables, dt, velocity_cfl,
remesh_criteria_eps=None,
**kwds):
"""
Particular advection of field(s) in a given direction,
Particle advection of field(s) in a given direction,
on any backend, with cartesian remeshing.
Parameters
......@@ -67,7 +65,7 @@ class DirectionalAdvectionBase(object):
remeshing. By default every non zero value is remeshed.
kwds:
Extra parameters passed to generated directional operators.
Attributes
----------
velocity: Field
......@@ -77,7 +75,7 @@ class DirectionalAdvectionBase(object):
advected_fields_out: list
Tuple of output continuous fields.
"""
check_instance(velocity, Field)
check_instance(advected_fields_in, tuple, values=Field)
check_instance(advected_fields_out, tuple, values=Field)
......@@ -104,7 +102,7 @@ class DirectionalAdvectionBase(object):
super(DirectionalAdvectionBase, self).__init__(
input_fields=input_fields,
output_fields=output_fields,
output_fields=output_fields,
input_params=input_params,
output_params=output_params,
**kwds)
......@@ -117,11 +115,11 @@ class DirectionalAdvectionBase(object):
self.velocity_cfl = velocity_cfl
self.is_inplace = is_inplace
@debug
def handle_method(self,method):
super(DirectionalAdvectionBase, self).handle_method(method)
remesh_kernel = method.pop(Remesh)
if isinstance(remesh_kernel, Remesh):
remesh_kernel = RemeshKernel.from_enum(remesh_kernel)
......@@ -133,7 +131,7 @@ class DirectionalAdvectionBase(object):
@debug
def get_field_requirements(self):
requirements = super(DirectionalAdvectionBase, self).get_field_requirements()
direction = self.splitting_direction
is_inplace = self.is_inplace
......@@ -141,29 +139,29 @@ class DirectionalAdvectionBase(object):
velocity_cfl = self.velocity_cfl
v_topo, v_requirements = requirements.get_input_requirement(velocity)
v_dx = v_topo.space_step
advection_ghosts = self.velocity_advection_ghosts(velocity_cfl)
min_velocity_ghosts = npw.integer_zeros(v_dx.shape)
min_velocity_ghosts[direction] = advection_ghosts
v_requirements.min_ghosts = min_velocity_ghosts
scalar = self.first_scalar
s_topo, _ = requirements.get_input_requirement(scalar)
s_dx = s_topo.space_step
scalar_cfl = velocity_cfl * (v_dx[direction] / s_dx[direction])
scalar_advection_ghosts = self.velocity_advection_ghosts(scalar_cfl)
remesh_ghosts = self.scalars_out_cache_ghosts(scalar_cfl, self.remesh_kernel)
min_scalar_ghosts = npw.integer_zeros(s_dx.shape)
min_scalar_ghosts[direction] = remesh_ghosts
for sfield in self.advected_fields_out:
_s_topo, _s_requirements = requirements.get_output_requirement(sfield)
_s_dx = _s_topo.space_step
assert (_s_dx == s_dx).all()
_s_requirements.min_ghosts = min_scalar_ghosts
if is_inplace:
for sfield in self.advected_fields_in:
_s_topo, _s_requirements = requirements.get_input_requirement(sfield)
......@@ -184,9 +182,9 @@ class DirectionalAdvectionBase(object):
def velocity_advection_ghosts(cls, velocity_cfl):
assert velocity_cfl > 0.0, 'cfl <= 0.0'
return int(1+npw.floor(velocity_cfl))
@classmethod
def scalars_out_cache_ghosts(cls, scalar_cfl, remesh_kernel):
def scalars_out_cache_ghosts(cls, scalar_cfl, remesh_kernel):
"""Return the minimum number of ghosts for remeshed scalars."""
assert scalar_cfl > 0.0, 'cfl <= 0.0'
assert remesh_kernel.n >=2 , 'Bad remeshing kernel.'
......@@ -198,22 +196,27 @@ class DirectionalAdvectionBase(object):
def discretize(self):
super(DirectionalAdvectionBase, self).discretize()
dvelocity = self.input_discrete_fields[self.velocity]
# field -> discrete_field
dadvected_fields_in = {ifield: self.input_discrete_fields[ifield]
dadvected_fields_in = {ifield: self.input_discrete_fields[ifield]
for ifield in self.advected_fields_in}
dadvected_fields_out = {ofield: self.output_discrete_fields[ofield]
dadvected_fields_out = {ofield: self.output_discrete_fields[ofield]
for ofield in self.advected_fields_out}
self.dvelocity = dvelocity
self.dadvected_fields_in = dadvected_fields_in
self.dadvected_fields_out = dadvected_fields_out
self.is_bilevel = None
if any(self.dvelocity.compute_resolution != \
self.dadvected_fields_out.values()[0].compute_resolution):
self.is_bilevel = self.dvelocity.compute_resolution
@debug
def get_work_properties(self):
requests = super(DirectionalAdvectionBase, self).get_work_properties()
f = self.dadvected_fields_in.values()[0]
pos, request = MemoryRequest.cartesian_dfield_like(name='position', dfield=f,
pos, request = MemoryRequest.cartesian_dfield_like(name='position', dfield=f,
ghosts=0, nb_components=1, is_read_only=False)
requests.push_mem_request('position', request)
assert all(self.dadvected_fields_in.values()[0].compute_resolution == pos.resolution)
......@@ -224,17 +227,17 @@ class DirectionalAdvectionBase(object):
def setup(self, work):
super(DirectionalAdvectionBase, self).setup(work)
self._buffer_allocations(work)
def _buffer_allocations(self, work):
"""
Allocate Host buffers for advected particle positions and
Allocate Host buffers for advected particle positions and
advected fields on particles.
"""
if (work is None):
raise ValueError('work is None.')
position_buffers = work.get_buffer(self, 'position')
self.dposition.honor_memory_request(position_buffers)
## Backend methods
# DirectionalOperatorBase
@classmethod
......
from abc import ABCMeta
from hysop.tools.types import check_instance, to_tuple, first_not_None
from hysop.tools.decorators import debug
from hysop.fields.continuous_field import Field
from hysop.tools.numpywrappers import npw
from hysop.core.memory.memory_request import MemoryRequest
from hysop.topology.cartesian_descriptor import CartesianTopologyDescriptors
......@@ -22,13 +21,13 @@ class EnstrophyBase(object):
variables, name=None, pretty_name=None, **kwds):
"""
Initialize a enstrophy operator operating on CartesianTopologyDescriptors.
vorticity: Field
Input continuous vorticity field.
enstrophy: ScalarParameter
Enstrophy scalar output parameter.
rho: Field, optional
Input continuous density field, if not given,
Input continuous density field, if not given,
defaults to 1.0 on the whole domain.
rho_0: float, optional
Reference density, defaults to 1.0.
......@@ -36,40 +35,40 @@ class EnstrophyBase(object):
Output continuous field, will contain W.W (term before integration).
If WdotW is given, WdotW will contain W.W else this will be
a temporary field only usable during this operator's apply method.
Should have nb_components=1 and the same domain and discretization
Should have nb_components=1 and the same domain and discretization
as the vorticity.
variables: dict
dictionary of fields as keys and topologies as values.
kwds:
Keywords arguments that will be passed towards implementation
Keywords arguments that will be passed towards implementation
poisson operator __init__.
"""
check_instance(vorticity, Field)
check_instance(enstrophy, ScalarParameter)
check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors)
check_instance(WdotW, Field, allow_none=True)
check_instance(rho, Field, allow_none=True)
check_instance(rho_0, float)
W = vorticity.pretty_name.decode('utf-8')
wdotw = u'{}\u22c5{}'.format(W,W)
if (WdotW is None):
assert (vorticity in variables), variables
WdotW = vorticity.tmp_like(name='WdotW', pretty_name=wdotw, nb_components=1)
variables[WdotW] = variables[vorticity]
input_fields = { vorticity: variables[vorticity] }
output_fields = { WdotW: variables[WdotW] }
output_params = { enstrophy.name: enstrophy }
if (rho is not None):
input_fields[rho] = variables[rho]
default_name = 'enstrophy'
default_pname = u'\u222b{}'.format(wdotw).encode('utf-8')
pretty_name = first_not_None(pretty_name, name, default_pname)
name = first_not_None(name, default_name)
......@@ -78,11 +77,11 @@ class EnstrophyBase(object):
self.rho_0 = rho_0
self.WdotW = WdotW
self.enstrophy = enstrophy
super(EnstrophyBase, self).__init__(input_fields=input_fields,
output_fields=output_fields, output_params=output_params,
super(EnstrophyBase, self).__init__(input_fields=input_fields,
output_fields=output_fields, output_params=output_params,
name=name, pretty_name=pretty_name, **kwds)
@debug
def discretize(self):
if self.discretized:
......@@ -91,3 +90,5 @@ class EnstrophyBase(object):
self.dWin = self.input_discrete_fields[self.vorticity]
self.dWdotW = self.output_discrete_fields[self.WdotW]
self.coeff = npw.prod(self.dWdotW.space_step)
self.coeff /= (self.rho_0 * npw.prod(self.dWdotW.domain.length))
"""
@file advection_dir.py
Directional advection operator generator.
@file diffusion.py
Diffusion operator generator.
"""
from hysop.constants import Implementation
from hysop.tools.types import check_instance
......