From c9568c7f0daaf41fa6f2810a85f24507bd03b1b1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Franck=20P=C3=A9rignon?= <franck.perignon@imag.fr>
Date: Tue, 16 Apr 2013 08:22:23 +0000
Subject: [PATCH] Redistribute op, not finished

---
 HySoP/hysop/mpi/mesh.py     |  3 +-
 HySoP/hysop/mpi/topology.py | 78 ++++++++++++++++++++++++-------------
 2 files changed, 53 insertions(+), 28 deletions(-)

diff --git a/HySoP/hysop/mpi/mesh.py b/HySoP/hysop/mpi/mesh.py
index 67ab5526e..82fa18180 100644
--- a/HySoP/hysop/mpi/mesh.py
+++ b/HySoP/hysop/mpi/mesh.py
@@ -31,7 +31,8 @@ class SubMesh(object):
         ## (in each dir) in the global mesh
         self.global_start = g_start
         ## index of the upper point (in each dir), global mesh
-        self.global_end = self.global_start + self.resolution - 1
+        self.global_end = self.global_start + self.resolution - 1\
+            - 2 * topo.ghosts
         ## Mesh step size
         self.space_step = topo.domain.length / \
             (topo.globalMeshResolution - 1)
diff --git a/HySoP/hysop/mpi/topology.py b/HySoP/hysop/mpi/topology.py
index 96cf4b26c..6c4747dd9 100644
--- a/HySoP/hysop/mpi/topology.py
+++ b/HySoP/hysop/mpi/topology.py
@@ -398,17 +398,7 @@ class Bridge(object):
         main_comm.Allgather([iglob2[main_rank, :], MPI.INT], [iglob2, MPI.INT])
 
         # Connectivity :
-
-        ### sendTo[:,0] rank of the targeted process in main_comm
-        ##  sendTo[:,1:] local indices (start,end, ...)
-        ##  of the points to be sent for each direction.
-        ## Example (in 2D)
-        ## sendTo = [[ 1, 2, 5, 1, 3]
-        ##           [ 2, 0, 3, 1, 2]]
-        ## means that the current process must send to process 1
-        ## the points of indices 2:5 in first dir and 1:3 in second dir
-        ## and to process 2 those of indices 0:3 and 1:2
-        self.sendTo = np.zeros((0, 1 + dom.dimension * 2), dtype=np.int)
+        sTo = np.zeros((0, 1 + dom.dimension * 2), dtype=np.int)
         indexFrom = []
         for dim in range(dom.dimension):
             indexFrom.append(range(iglob1[main_rank, dim * 2],
@@ -432,12 +422,12 @@ class Bridge(object):
                     break
 
             if hasInter:
-                self.sendTo = np.vstack((self.sendTo, line))
+                sTo = np.vstack((sTo, line))
 
         ## List of indices that should be copied from data of
         ## topoTo to data of topoFrom for the current process.
         ## --> no mpi messages
-        self.localFrom = np.zeros((2 * dom.dimension), dtype=np.int32)
+        localFrom = np.zeros((2 * dom.dimension), dtype=np.int32)
 
         hasInter = True
         for dim in range(dom.dimension):
@@ -446,8 +436,8 @@ class Bridge(object):
             interRow = [k for k in indexFrom[dim] if k in indexTo]
             interRow.sort()
             if interRow.__len__():
-                self.localFrom[dim * 2] = interRow[0]
-                self.localFrom[dim * 2 + 1] = interRow[-1]
+                localFrom[dim * 2] = interRow[0]
+                localFrom[dim * 2 + 1] = interRow[-1]
             else:
                 hasInter = False
                 break
@@ -456,8 +446,15 @@ class Bridge(object):
         ## current process for topoTo from values on the
         ## same process for topoFrom.
         ## --> no mpi messages
-        self.localTo = topoTo.toIndexLocal(self.localFrom)
-        self.localFrom = topoFrom.toIndexLocal(self.localFrom)
+        localTo = topoTo.toIndexLocal(localFrom)
+        localFrom = topoFrom.toIndexLocal(localFrom)
+        self.ifrom = []
+        self.ito = []
+        for d in range(dom.dimension):
+            self.ifrom.append(slice(localFrom[2 * d],
+                                    localFrom[2 * d + 1] + 1))
+            self.ito.append(slice(localTo[2 * d],
+                                  localTo[2 * d + 1] + 1))
 
         # --- Compute globalConnectivity on process 0 and distribute it ---
         # Global Connectivity : each line corresponds to a message :
@@ -467,10 +464,10 @@ class Bridge(object):
         # 6 in the second dir AND that process j must receive the same mesh
         # from process i. Nodes number given in the global index set.
         # Warning : process ranks are given in mpi_main.
-        globalConnectivity = np.zeros((self.sendTo.shape[0], 2 +
+        globalConnectivity = np.zeros((sTo.shape[0], 2 +
                                        dom.dimension * 2), dtype=np.int)
 
-        globalConnectivity[:, 1:] = self.sendTo.copy()
+        globalConnectivity[:, 1:] = sTo.copy()
         globalConnectivity[:, 0] = main_rank
 
         if main_rank == 0:
@@ -489,7 +486,7 @@ class Bridge(object):
         ## means that the current process must receive from process 1
         ## an array that will be saved at positions of indices 2:5
         ## in the first dir and 1:3 in second dir
-        self.recvFrom = np.empty((), dtype=np.int32)
+        rFrom = np.empty((), dtype=np.int32)
         if main_rank == 0:
             for fromRank in range(main_size):
                 cond1 = globalConnectivity[:, 1] == fromRank
@@ -502,18 +499,45 @@ class Bridge(object):
                 if(fromRank != main_rank):
                     main_comm.ssend(sol, dest=fromRank)
                 else:
-                    self.recvFrom = sol.copy()
+                    rFrom = sol.copy()
 
         else:
-            self.recvFrom = main_comm.recv(source=0)
+            rFrom = main_comm.recv(source=0)
 
         # Final setup for recvFrom and sendTo : shift from
         # global indices to local ones.
-
-        self.recvFrom[:, 1::2] = self.recvFrom[:, 1::2] - start2[:]
-        self.recvFrom[:, 2::2] = self.recvFrom[:, 2::2] - start2[:]
-        self.sendTo[:, 1::2] = self.sendTo[:, 1::2] - start1[:]
-        self.sendTo[:, 2::2] = self.sendTo[:, 2::2] - start1[:]
+        localTo = topoTo.toIndexLocal(localFrom)
+        localFrom = topoFrom.toIndexLocal(localFrom)
+        for row in range(rFrom.shape[0]):
+            rFrom[row, 1:] = topoFrom.toIndexLocal(rFrom[row, 1:])
+        for row in range(sTo.shape[0]):
+            sTo[row, 1:] = topoTo.toIndexLocal(sTo[row, 1:])
+
+        ## recvFrom[rk] returns a list of indices that should be
+        ## received from process rk and copied into local field.
+        ## Usage :  mpi.recv(rk, ..., yourField[recvFrom[rk]],...)
+        self.recvFrom = {}
+
+        ## sendTo[rk] returns a list of indices that should be
+        ## sent from current process to process rk.
+        ## Usage :  mpi.send(rk, ..., yourField[sendTo[rk]],...)
+        self.sendTo = {}
+
+        row = 0
+        for rk in rFrom[:, 0]:
+            self.recvFrom[rk] = []
+            for d in range(dom.dimension):
+                self.recvFrom[rk].append(slice(rFrom[row, 2 * d + 1],
+                                               rFrom[row, 2 * d + 2] + 1))
+            row += 1
+
+        row = 0
+        for rk in sTo[:, 0]:
+            self.sendTo[rk] = []
+            for d in range(dom.dimension):
+                self.sendTo[rk].append(slice(sTo[row, 2 * d + 1],
+                                             sTo[row, 2 * d + 2] + 1))
+            row += 1
 
     @debug
     def setUp(self):
-- 
GitLab