diff --git a/hysop/numerics/update_ghosts.py b/hysop/numerics/update_ghosts.py index 07bbc5e34e2f37b55e26a83d39d0dbf54efd05fa..7e6df40c2baca2ad51dc5c7da0d24a994ce42148 100644 --- a/hysop/numerics/update_ghosts.py +++ b/hysop/numerics/update_ghosts.py @@ -1,8 +1,25 @@ -""" -@file numerics/update_ghosts.py - -Update ghost points for a list of numpy arrays +"""Update ghost points of a list of numpy arrays for a given topology. + +Setup for send/recv process of ghosts points for a list +of numpy arrays, for a given topology. + +.. currentmodule:: hysop.numerics.update_ghosts + +* :class:`~UpdateGhosts` : udpate ghosts layers in a 'classical' way +* :class:`~UpdateGhostsFull` : update also points in the corners of the grid, +i.e. at the intersection of ghosts points lines. + +Usage : + +.. code:: + + # init + up = UpdateGhosts(topo, 2) + ... + # update + up([field1, field2]) + """ from hysop.constants import debug, PERIODIC, HYSOP_MPI_REAL import hysop.tools.numpywrappers as npw @@ -10,25 +27,28 @@ import numpy as np class UpdateGhosts(object): - """ - Ghost points synchronization for a list of numpy arrays + """Ghost points synchronization for a list of numpy arrays """ @debug - def __init__(self, topo, nbElements): + def __init__(self, topo, nb_elements): """ - Setup for send/recv process of ghosts points for a list - of numpy arrays, for a given topology. - - @param topology : the topology common to all fields. - @param nbElements : max number of arrays that will be update - at each call. - nbElements and memshape will be used to allocate memory for local - buffers used for send-recv process. + Parameters + ---------- + topo : :class:`hysop.mpi.topology.Cartesian` + data and mpi process distribution + nb_elements : int + number of arrays that will be update + at each call. + + Notes + ------ + * nb_elements and topo.mesh.resolution will be used to + allocate memory for local buffers used for send-recv process. """ - ## The mpi topology and mesh distribution + # The mpi topology and mesh distribution self.topology = topo - ## Ghost layer + # Ghost layer self.ghosts = self.topology.mesh.discretization.ghosts # Indices of points to be filled from previous neighbour # Each component is a slice and corresponds to a direction in @@ -49,12 +69,12 @@ class UpdateGhosts(object): self._setup_slices() - ## shape of numpy arrays to be updated. + # shape of numpy arrays to be updated. self.memshape = tuple(self.topology.mesh.resolution) # length of memory required to save one numpy array self._memoryblocksize = np.prod(self.memshape) - ## Number of numpy arrays that will be updated - self.nbElements = nbElements + # Number of numpy arrays that will be updated + self.nb_elements = nb_elements if self.topology.size > 1: # else no need to set buffers ... # Size computation below assumes that what we send in one # dir has the same size as what we receive from process in the @@ -66,13 +86,14 @@ class UpdateGhosts(object): for d in exchange_dir: buffsize = 0 - buffsize += temp[self._g_tonext[d]].size * self.nbElements + buffsize += temp[self._g_tonext[d]].size * self.nb_elements self._sendbuffer.append(npw.zeros((buffsize))) self._recvbuffer.append(npw.zeros((buffsize))) def _setup_slices(self): - """ - Compute slices to send and recieve ghosts values. + """Compute slices used to describe what to send + and receive in ghosts points. + """ defslice = [slice(None, None, None)] * self._dim nogh_slice = [slice(0)] * self._dim @@ -89,8 +110,8 @@ class UpdateGhosts(object): self._g_tonext[d][d] = slice(-2 * self.ghosts[d], -self.ghosts[d]) - otherDim = [x for x in xrange(self._dim) if x != d] - for d2 in otherDim: + other_dim = self._other_dim(d) + for d2 in other_dim: self._g_fromprevious[d][d2] = slice(self.ghosts[d2], -self.ghosts[d2]) self._g_fromnext[d][d2] = slice(self.ghosts[d2], @@ -105,26 +126,31 @@ class UpdateGhosts(object): self._g_toprevious.append(list(nogh_slice)) self._g_tonext.append(list(nogh_slice)) + def _other_dim(self, d): + return [x for x in xrange(self._dim) if x != d] def __call__(self, variables): return self.apply(variables) - def applyBC(self, variables): - """ - Apply boundary conditions for non-distributed directions (only). - @param variables : a list of ndarrays + def apply_bc(self, variables): + """Apply boundary conditions for non-distributed directions (only). + + Parameters + ---------- + variables : a list of ndarrays + Note that in distributed directions, BC are automatically set during ghosts update (apply). - """ + """ assert (self.topology.domain.boundaries == PERIODIC).all(),\ 'Only implemented for periodic boundary conditions.' assert isinstance(variables, list) dirs = [d for d in xrange(self._dim) if self.topology.shape[d] == 1] for d in dirs: - self._applyBC_in_dir(variables, d) + self._apply_bc_in_dir(variables, d) - def _applyBC_in_dir(self, variables, d): + def _apply_bc_in_dir(self, variables, d): """Apply periodic boundary condition in direction d.""" for v in variables: assert v.shape == self.memshape @@ -148,7 +174,7 @@ class UpdateGhosts(object): i += 1 # End of loop through send/recv directions. # Apply boundary conditions for non-distributed directions - self.applyBC(variables) + self.apply_bc(variables) def _apply_in_dir(self, variables, d, i): """Communicate ghosts values in direction d for neighbour @@ -174,7 +200,7 @@ class UpdateGhosts(object): recvbuf=self._recvbuffer[i], source=from_rk, recvtag=from_rk) - # 3 - Print recvbuffer back to variables and update sendbuffer + # 3 - fill variables with recvbuffer and update sendbuffer # for next send pos = 0 nextpos = 0 @@ -193,7 +219,7 @@ class UpdateGhosts(object): dest=dest_rk, sendtag=rank, recvbuf=self._recvbuffer[i], source=from_rk, recvtag=from_rk) - # 5 - Print recvbuffer back to variables. + # 5 - fill variables with recvbuffer pos = 0 nextpos = 0 for v in variables: @@ -204,68 +230,25 @@ class UpdateGhosts(object): class UpdateGhostsFull(UpdateGhosts): - """ - Ghost points synchronization for a list of numpy arrays - """ + """Ghost points synchronization for a list of numpy arrays - @debug - def __init__(self, topo, nbElements): - """ - Setup for send/recv process of ghosts points for a list - of numpy arrays, for a given topology. - - @param topology : the topology common to all fields. - @param nbElements : max number of arrays that will be update - at each call. - nbElements and memshape will be used to allocate memory for local - buffers used for send-recv process. - This version differs from UpdateGhosts by computing also ghosts values - in edges and corners of the domain. The directions are computed in reversed order. - """ - super(UpdateGhostsFull, self).__init__(topo, nbElements) + Notes + ----- + * This version differs from UpdateGhosts + by computing also ghosts values + in edges and corners of the domain. + The directions are computed in reversed order. - def _setup_slices(self): - """ - Computes slices to send and recieve ghosts values. - It assumes that directions are computed from an xrange() loop so that - ghosts in previous directions are completed. - """ - defslice = [slice(None, None, None)] * self._dim - nogh_slice = [slice(0)] * self._dim - for d in xrange(self._dim): - if self.ghosts[d] > 0: - self._g_fromprevious.append(list(defslice)) - self._g_fromprevious[d][d] = slice(self.ghosts[d]) - self._g_fromnext.append(list(defslice)) - self._g_fromnext[d][d] = slice(-self.ghosts[d], None, None) - self._g_toprevious.append(list(defslice)) - self._g_toprevious[d][d] = slice(self.ghosts[d], - 2 * self.ghosts[d], None) - self._g_tonext.append(list(defslice)) - self._g_tonext[d][d] = slice(-2 * self.ghosts[d], - -self.ghosts[d]) + """ - ## Slices for other directions corresponding to directions not - ## yet exchanged : x > d. For directions x < d, the slices is a - ## full slice that includes ghosts. This assumes that directions - ## x < d have already been computed (by communications or local - ## exchanges) - # if d == 0: - otherDim = [x for x in xrange(self._dim) if x > d] - for d2 in otherDim: - self._g_fromprevious[d][d2] = slice(self.ghosts[d2], - -self.ghosts[d2]) - self._g_fromnext[d][d2] = slice(self.ghosts[d2], - -self.ghosts[d2]) - self._g_toprevious[d][d2] = slice(self.ghosts[d2], - -self.ghosts[d2]) - self._g_tonext[d][d2] = slice(self.ghosts[d2], - -self.ghosts[d2]) - else: - self._g_fromprevious.append(list(nogh_slice)) - self._g_fromnext.append(list(nogh_slice)) - self._g_toprevious.append(list(nogh_slice)) - self._g_tonext.append(list(nogh_slice)) + def _other_dim(self, d): + # Slices for other directions corresponding to directions not + # yet exchanged : x > d. For directions x < d, the slices is a + # full slice that includes ghosts. This assumes that directions + # x < d have already been computed (by communications or local + # exchanges) + # if d == 0: + return [x for x in xrange(self._dim) if x > d] @debug def apply(self, variables): @@ -285,7 +268,7 @@ class UpdateGhostsFull(UpdateGhosts): i = 0 for d in xrange(self._dim): if d in local_bc_dir: - self._applyBC_in_dir(variables, d) + self._apply_bc_in_dir(variables, d) elif d in exchange_dir: self._apply_in_dir(variables, d, i) # update index in neighbours list diff --git a/hysop/operator/differential.py b/hysop/operator/differential.py index 51adf541d4e4e2a86704ef4c6df0433371d83b11..676d635b04ce0e94f2fa78979e498069648790a9 100644 --- a/hysop/operator/differential.py +++ b/hysop/operator/differential.py @@ -19,6 +19,8 @@ from hysop.numerics.finite_differences import FiniteDifference import hysop.default_methods as default from abc import ABCMeta, abstractmethod from hysop.numerics.differential_operations import Curl as NumCurl +from hysop.numerics.differential_operations import DivAdvection as NumDA +from hysop import __FFTW_ENABLED__ class Differential(Computational): @@ -105,38 +107,23 @@ class Curl(Differential): """ def _init_space_discr_method(self): - if self.method[SpaceDiscretisation] is 'fftw': + if self.method[SpaceDiscretisation] is 'fftw' and __FFTW_ENABLED__: op_class = CurlFFT - elif self.method[SpaceDiscretisation].mro()[1] is FiniteDifference: - op_class = CurlFD + elif not isinstance(self.method[SpaceDiscretisation], str): + if self.method[SpaceDiscretisation].mro()[1] is FiniteDifference: + op_class = CurlFD else: raise ValueError("The required Space Discretisation is\ not available for Curl.") return op_class def get_work_properties(self): - """Get properties of internal work arrays. Must be call after discretize - but before setup. - - Returns - ------- - dictionnary - keys = 'rwork' and 'iwork', values = list of shapes of internal - arrays required by this operator (real arrays for rwork, integer - arrays for iwork). - """ - if not self._is_discretized: - msg = 'The operator must be discretized ' - msg += 'before any call to this function.' - raise RuntimeError(msg) + super(Curl, self).get_work_properties() res = {'rwork': None, 'iwork': None} # Only FD methods need internal work space if self.method[SpaceDiscretisation].mro()[1] is FiniteDifference: - work_length = NumCurl.get_work_length() - shape = self.discreteFields[self.invar].data[0].shape - res['rwork'] = [] - for _ in xrange(work_length): - res['rwork'].append(shape) + toporef = self.variables[self.invar] + res = NumCurl.get_work_properties(toporef) return res @@ -152,6 +139,9 @@ class Grad(Differential): not available for Grad.") return op_class + def get_work_properties(self): + return {'rwork': None, 'iwork': None} + class DivAdvection(Differential): """Computes outVar = -nabla .(invar . nabla(nvar)) @@ -163,3 +153,12 @@ class DivAdvection(Differential): raise ValueError("The required Space Discretisation is\ not available for DivAdvection.") return op_class + + def get_work_properties(self): + super(DivAdvection, self).get_work_properties() + res = {'rwork': None, 'iwork': None} + # Only FD methods need internal work space + if self.method[SpaceDiscretisation].mro()[1] is FiniteDifference: + toporef = self.variables[self.invar] + res = NumDA.get_work_properties(toporef) + return res diff --git a/hysop/operator/discrete/differential.py b/hysop/operator/discrete/differential.py index 6c9360a518c7a5b9ec798d7bc782259d535a6baa..424a8491d6cf8d1d3fd2ff2962c9136c3c89f230 100644 --- a/hysop/operator/discrete/differential.py +++ b/hysop/operator/discrete/differential.py @@ -13,7 +13,6 @@ from hysop.constants import debug from hysop.operator.discrete.discrete import DiscreteOperator from hysop.numerics.differential_operations import Curl, GradV,\ DivAdvection -import hysop.tools.numpywrappers as npw from abc import ABCMeta, abstractmethod from hysop.numerics.update_ghosts import UpdateGhosts from hysop.methods_keys import SpaceDiscretisation @@ -23,13 +22,14 @@ except ImportError: from hysop.fakef2py import fftw2py import hysop.default_methods as default from hysop.tools.profiler import profile +from hysop.tools.misc import WorkSpaceTools class Differential(DiscreteOperator): """Abstract base class for discrete differential operators """ __metaclass__ = ABCMeta - + # @debug # def __new__(cls, *args, **kw): # return object.__new__(cls, *args, **kw) @@ -118,18 +118,12 @@ class CurlFD(Differential): self._function = Curl(topo=self.invar.topology, work=self._rwork, method=self.method[SpaceDiscretisation]) - def _set_work_arrays(self, rwork, iwork): - worklength = Curl.get_work_length() - memshape = self.invar.data[0].shape - if rwork is None: - self._rwork = [npw.zeros(memshape) for _ in xrange(worklength)] - else: - assert isinstance(rwork, list), 'rwork must be a list.' - # --- External rwork --- - self._rwork = rwork - assert len(self._rwork) == worklength - for wk in self._rwork: - assert wk.shape == memshape + def _set_work_arrays(self, rwork=None, iwork=None): + work_prop = Curl.get_work_properties(self.invar.topology) + lwork = len(work_prop['rwork']) + memshape = work_prop['rwork'][0] + self._rwork = WorkSpaceTools.check_work_array(lwork, memshape, + rwork) @debug @profile @@ -175,18 +169,12 @@ class DivAdvectionFD(Differential): method=self.method[SpaceDiscretisation], work=self._rwork) - def _set_work_arrays(self, rwork, iwork): - worklength = DivAdvection.get_work_length() - memshape = self.invar.data[0].shape - if rwork is None: - self._rwork = [npw.zeros(memshape) for _ in xrange(worklength)] - else: - assert isinstance(rwork, list), 'rwork must be a list.' - # --- External rwork --- - self._rwork = rwork - assert len(self._rwork) == worklength - for wk in self._rwork: - assert wk.shape == memshape + def _set_work_arrays(self, rwork=None, iwork=None): + work_prop = DivAdvection.get_work_properties(self.invar.topology) + lwork = len(work_prop['rwork']) + memshape = work_prop['rwork'][0] + self._rwork = WorkSpaceTools.check_work_array(lwork, memshape, + rwork) @debug @profile