Skip to content
Snippets Groups Projects
Commit c9568c7f authored by Franck Pérignon's avatar Franck Pérignon
Browse files

Redistribute op, not finished

parent a09837d3
No related branches found
No related tags found
No related merge requests found
...@@ -31,7 +31,8 @@ class SubMesh(object): ...@@ -31,7 +31,8 @@ class SubMesh(object):
## (in each dir) in the global mesh ## (in each dir) in the global mesh
self.global_start = g_start self.global_start = g_start
## index of the upper point (in each dir), global mesh ## index of the upper point (in each dir), global mesh
self.global_end = self.global_start + self.resolution - 1 self.global_end = self.global_start + self.resolution - 1\
- 2 * topo.ghosts
## Mesh step size ## Mesh step size
self.space_step = topo.domain.length / \ self.space_step = topo.domain.length / \
(topo.globalMeshResolution - 1) (topo.globalMeshResolution - 1)
......
...@@ -398,17 +398,7 @@ class Bridge(object): ...@@ -398,17 +398,7 @@ class Bridge(object):
main_comm.Allgather([iglob2[main_rank, :], MPI.INT], [iglob2, MPI.INT]) main_comm.Allgather([iglob2[main_rank, :], MPI.INT], [iglob2, MPI.INT])
# Connectivity : # Connectivity :
sTo = np.zeros((0, 1 + dom.dimension * 2), dtype=np.int)
### sendTo[:,0] rank of the targeted process in main_comm
## sendTo[:,1:] local indices (start,end, ...)
## of the points to be sent for each direction.
## Example (in 2D)
## sendTo = [[ 1, 2, 5, 1, 3]
## [ 2, 0, 3, 1, 2]]
## means that the current process must send to process 1
## the points of indices 2:5 in first dir and 1:3 in second dir
## and to process 2 those of indices 0:3 and 1:2
self.sendTo = np.zeros((0, 1 + dom.dimension * 2), dtype=np.int)
indexFrom = [] indexFrom = []
for dim in range(dom.dimension): for dim in range(dom.dimension):
indexFrom.append(range(iglob1[main_rank, dim * 2], indexFrom.append(range(iglob1[main_rank, dim * 2],
...@@ -432,12 +422,12 @@ class Bridge(object): ...@@ -432,12 +422,12 @@ class Bridge(object):
break break
if hasInter: if hasInter:
self.sendTo = np.vstack((self.sendTo, line)) sTo = np.vstack((sTo, line))
## List of indices that should be copied from data of ## List of indices that should be copied from data of
## topoTo to data of topoFrom for the current process. ## topoTo to data of topoFrom for the current process.
## --> no mpi messages ## --> no mpi messages
self.localFrom = np.zeros((2 * dom.dimension), dtype=np.int32) localFrom = np.zeros((2 * dom.dimension), dtype=np.int32)
hasInter = True hasInter = True
for dim in range(dom.dimension): for dim in range(dom.dimension):
...@@ -446,8 +436,8 @@ class Bridge(object): ...@@ -446,8 +436,8 @@ class Bridge(object):
interRow = [k for k in indexFrom[dim] if k in indexTo] interRow = [k for k in indexFrom[dim] if k in indexTo]
interRow.sort() interRow.sort()
if interRow.__len__(): if interRow.__len__():
self.localFrom[dim * 2] = interRow[0] localFrom[dim * 2] = interRow[0]
self.localFrom[dim * 2 + 1] = interRow[-1] localFrom[dim * 2 + 1] = interRow[-1]
else: else:
hasInter = False hasInter = False
break break
...@@ -456,8 +446,15 @@ class Bridge(object): ...@@ -456,8 +446,15 @@ class Bridge(object):
## current process for topoTo from values on the ## current process for topoTo from values on the
## same process for topoFrom. ## same process for topoFrom.
## --> no mpi messages ## --> no mpi messages
self.localTo = topoTo.toIndexLocal(self.localFrom) localTo = topoTo.toIndexLocal(localFrom)
self.localFrom = topoFrom.toIndexLocal(self.localFrom) localFrom = topoFrom.toIndexLocal(localFrom)
self.ifrom = []
self.ito = []
for d in range(dom.dimension):
self.ifrom.append(slice(localFrom[2 * d],
localFrom[2 * d + 1] + 1))
self.ito.append(slice(localTo[2 * d],
localTo[2 * d + 1] + 1))
# --- Compute globalConnectivity on process 0 and distribute it --- # --- Compute globalConnectivity on process 0 and distribute it ---
# Global Connectivity : each line corresponds to a message : # Global Connectivity : each line corresponds to a message :
...@@ -467,10 +464,10 @@ class Bridge(object): ...@@ -467,10 +464,10 @@ class Bridge(object):
# 6 in the second dir AND that process j must receive the same mesh # 6 in the second dir AND that process j must receive the same mesh
# from process i. Nodes number given in the global index set. # from process i. Nodes number given in the global index set.
# Warning : process ranks are given in mpi_main. # Warning : process ranks are given in mpi_main.
globalConnectivity = np.zeros((self.sendTo.shape[0], 2 + globalConnectivity = np.zeros((sTo.shape[0], 2 +
dom.dimension * 2), dtype=np.int) dom.dimension * 2), dtype=np.int)
globalConnectivity[:, 1:] = self.sendTo.copy() globalConnectivity[:, 1:] = sTo.copy()
globalConnectivity[:, 0] = main_rank globalConnectivity[:, 0] = main_rank
if main_rank == 0: if main_rank == 0:
...@@ -489,7 +486,7 @@ class Bridge(object): ...@@ -489,7 +486,7 @@ class Bridge(object):
## means that the current process must receive from process 1 ## means that the current process must receive from process 1
## an array that will be saved at positions of indices 2:5 ## an array that will be saved at positions of indices 2:5
## in the first dir and 1:3 in second dir ## in the first dir and 1:3 in second dir
self.recvFrom = np.empty((), dtype=np.int32) rFrom = np.empty((), dtype=np.int32)
if main_rank == 0: if main_rank == 0:
for fromRank in range(main_size): for fromRank in range(main_size):
cond1 = globalConnectivity[:, 1] == fromRank cond1 = globalConnectivity[:, 1] == fromRank
...@@ -502,18 +499,45 @@ class Bridge(object): ...@@ -502,18 +499,45 @@ class Bridge(object):
if(fromRank != main_rank): if(fromRank != main_rank):
main_comm.ssend(sol, dest=fromRank) main_comm.ssend(sol, dest=fromRank)
else: else:
self.recvFrom = sol.copy() rFrom = sol.copy()
else: else:
self.recvFrom = main_comm.recv(source=0) rFrom = main_comm.recv(source=0)
# Final setup for recvFrom and sendTo : shift from # Final setup for recvFrom and sendTo : shift from
# global indices to local ones. # global indices to local ones.
localTo = topoTo.toIndexLocal(localFrom)
self.recvFrom[:, 1::2] = self.recvFrom[:, 1::2] - start2[:] localFrom = topoFrom.toIndexLocal(localFrom)
self.recvFrom[:, 2::2] = self.recvFrom[:, 2::2] - start2[:] for row in range(rFrom.shape[0]):
self.sendTo[:, 1::2] = self.sendTo[:, 1::2] - start1[:] rFrom[row, 1:] = topoFrom.toIndexLocal(rFrom[row, 1:])
self.sendTo[:, 2::2] = self.sendTo[:, 2::2] - start1[:] for row in range(sTo.shape[0]):
sTo[row, 1:] = topoTo.toIndexLocal(sTo[row, 1:])
## recvFrom[rk] returns a list of indices that should be
## received from process rk and copied into local field.
## Usage : mpi.recv(rk, ..., yourField[recvFrom[rk]],...)
self.recvFrom = {}
## sendTo[rk] returns a list of indices that should be
## sent from current process to process rk.
## Usage : mpi.send(rk, ..., yourField[sendTo[rk]],...)
self.sendTo = {}
row = 0
for rk in rFrom[:, 0]:
self.recvFrom[rk] = []
for d in range(dom.dimension):
self.recvFrom[rk].append(slice(rFrom[row, 2 * d + 1],
rFrom[row, 2 * d + 2] + 1))
row += 1
row = 0
for rk in sTo[:, 0]:
self.sendTo[rk] = []
for d in range(dom.dimension):
self.sendTo[rk].append(slice(sTo[row, 2 * d + 1],
sTo[row, 2 * d + 2] + 1))
row += 1
@debug @debug
def setUp(self): def setUp(self):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment