From 4bb35e1ec00c8536b75182ec06d096a4048265d9 Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Wed, 2 May 2012 16:18:13 +0000
Subject: [PATCH] Ich bin so blooooeeed, eh...

---
 AMDiS/src/parallel/ElementObjectDatabase.cc | 14 +++++++---
 AMDiS/src/parallel/ElementObjectDatabase.h  |  2 +-
 AMDiS/src/parallel/InteriorBoundary.cc      | 30 ++++++++++++++++++---
 AMDiS/src/parallel/InteriorBoundary.h       |  9 ++++---
 AMDiS/src/parallel/MeshDistributor.cc       | 14 ++++++++--
 AMDiS/src/parallel/MeshDistributor.h        |  2 ++
 AMDiS/src/parallel/ParallelDebug.cc         |  4 +--
 AMDiS/src/parallel/ParallelDofMapping.cc    | 10 +++----
 AMDiS/src/parallel/ParallelDofMapping.h     |  6 +----
 test/mpi/src/test0002.cc                    |  3 +--
 10 files changed, 66 insertions(+), 28 deletions(-)

diff --git a/AMDiS/src/parallel/ElementObjectDatabase.cc b/AMDiS/src/parallel/ElementObjectDatabase.cc
index 930b5fef..a2c91235 100644
--- a/AMDiS/src/parallel/ElementObjectDatabase.cc
+++ b/AMDiS/src/parallel/ElementObjectDatabase.cc
@@ -572,7 +572,7 @@ namespace AMDiS {
   }
 
 
-  int ElementObjectDatabase::getIterateOwner()
+  int ElementObjectDatabase::getIterateOwner(int level)
   {
     FUNCNAME("ElementObjectDatabase::getIterateOwner()");
 
@@ -593,9 +593,17 @@ namespace AMDiS {
      break;
    }
 
+   std::set<int> &levelRanks = levelData->getLevelRanks(level);
+   bool allRanks = (levelRanks.size() == 1 && *(levelRanks.begin()) == -1);
+   MSG("ALLRANKS [LEVEL%d]: %d (%d %d)\n", level, allRanks,
+       levelRanks.size(), *(levelRanks.begin()));
+
    for (vector<ElementObjectData>::iterator it = objData->begin();
-	it != objData->end(); ++it)
-     owner = std::max(owner, (*macroElementRankMap)[it->elIndex]);
+	it != objData->end(); ++it) {
+     int elRank = (*macroElementRankMap)[it->elIndex];
+     if (allRanks || levelData->getLevelRanks(level).count(elRank))
+       owner = std::max(owner, elRank);
+   }
 
     return owner;
   }
diff --git a/AMDiS/src/parallel/ElementObjectDatabase.h b/AMDiS/src/parallel/ElementObjectDatabase.h
index 262971cf..1e0ba1a0 100644
--- a/AMDiS/src/parallel/ElementObjectDatabase.h
+++ b/AMDiS/src/parallel/ElementObjectDatabase.h
@@ -230,7 +230,7 @@ namespace AMDiS {
     }
 
     /// Returns the rank owner of the current iterator position.
-    int getIterateOwner();
+    int getIterateOwner(int level);
 
     /// Returns the rank owner of the current iterator position.
     int getIterateMaxLevel();
diff --git a/AMDiS/src/parallel/InteriorBoundary.cc b/AMDiS/src/parallel/InteriorBoundary.cc
index 863a5f79..2e3125a2 100644
--- a/AMDiS/src/parallel/InteriorBoundary.cc
+++ b/AMDiS/src/parallel/InteriorBoundary.cc
@@ -22,7 +22,8 @@ namespace AMDiS {
   using namespace std;
 
 
-  void InteriorBoundary::create(MPI::Intracomm &mpiComm,
+  void InteriorBoundary::create(MeshLevelData &levelData,
+				int level,
 				ElementObjectDatabase &elObjDb)
   { 
     FUNCNAME("InteriorBoundary::clear()");
@@ -34,7 +35,9 @@ namespace AMDiS {
     Mesh *mesh = elObjDb.getMesh();
     TEST_EXIT_DBG(mesh)("Should not happen!\n");
 
-    int mpiRank = mpiComm.Get_rank();
+    int mpiRank = levelData.getMpiComm(0);
+    MPI::Intracomm mpiComm = levelData.getMpiComm(level);
+    std::set<int> levelRanks = levelData.getLevelRanks(level);
 
     // === Create interior boundary data structure. ===
     
@@ -43,10 +46,31 @@ namespace AMDiS {
 
       while (elObjDb.iterate(geoIndex)) {
 	map<int, ElementObjectData>& objData = elObjDb.getIterateData();
+
+	// Test, if this is a boundary object of this rank.
 	if (!(objData.count(mpiRank) && objData.size() > 1))
 	  continue;
 
-	int owner = elObjDb.getIterateOwner();
+#if 0
+	// Test, if the boundary object defines an interior boundary within the
+	// ranks of the MPI group. If not, go to next element.
+	bool boundaryWithinMpiGroup = false;
+	if (levelRanks.size() == 1 && *(levelRanks.begin()) == -1) {
+	  boundaryWithinMpiGroup = true;
+	} else {
+	  for (map<int, ElementObjectData>::iterator it = objData.begin();
+	       it != objData.end(); ++it) {
+	    if (it->first != mpiRank && levelRanks.count(it->first)) {
+	      boundaryWithinMpiGroup == true;
+	      break;
+	    }
+	  }
+	}
+	if (!boundaryWithinMpiGroup)
+	  continue;
+#endif	
+
+	int owner = elObjDb.getIterateOwner(level);
 	ElementObjectData& rankBoundEl = objData[mpiRank];
 	
 	AtomicBoundary bound;
diff --git a/AMDiS/src/parallel/InteriorBoundary.h b/AMDiS/src/parallel/InteriorBoundary.h
index 293b854b..52c5e3cb 100644
--- a/AMDiS/src/parallel/InteriorBoundary.h
+++ b/AMDiS/src/parallel/InteriorBoundary.h
@@ -41,7 +41,8 @@ namespace AMDiS {
    */
   class InteriorBoundary {
   public:
-    void create(MPI::Intracomm &mpiComm,
+    void create(MeshLevelData &levelData,
+		int level,
 		ElementObjectDatabase &elObjDb);
 
     RankToBoundMap& getOwn()
@@ -121,7 +122,7 @@ namespace AMDiS {
       {
 	do {
 	  ++vecIt;
-	} while (vecIt->maxLevel < level && vecIt != mapIt->second.end());
+	} while (vecIt != mapIt->second.end() && vecIt->maxLevel < level);
 
 	if (vecIt == mapIt->second.end()) {
 	  ++mapIt;
@@ -169,7 +170,7 @@ namespace AMDiS {
 	  vecIt = mapIt->second.begin();
 	  
 	  // Search for the next atomic boundary on the mesh level
-	  while (vecIt->maxLevel < level && vecIt != mapIt->second.end())
+	  while (vecIt != mapIt->second.end() && vecIt->maxLevel < level)
 	    ++vecIt;
 	  
 	  // If vector iterator is not at the end, we have found one and
@@ -190,7 +191,7 @@ namespace AMDiS {
 
       RankToBoundMap &bound;
 
-      int level;
+      int level;     
     };
   };
 }
diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index fa91fcb0..fff980a5 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -1511,9 +1511,12 @@ namespace AMDiS {
 
     if (firstCall)
       elObjDb.create(partitionMap, levelData);
-
     elObjDb.updateRankData();
-    intBoundary.create(mpiComm, elObjDb);
+
+    intBoundary.create(levelData, 0, elObjDb);
+
+//     if (levelData.getLevelNumber() > 1)
+//       intBoundarySd.create(levelData, 1, elObjDb);
 
 #if (DEBUG != 0)
     ParallelDebug::printBoundaryInfo(*this);
@@ -1530,10 +1533,12 @@ namespace AMDiS {
     dofComm.init(0, levelData, feSpaces);
     dofComm.create(intBoundary);
 
+#if 0
     if (levelData.getLevelNumber() > 1) {
       dofCommSd.init(1, levelData, feSpaces);
       dofCommSd.create(intBoundary);
     }
+#endif
 
     // === If requested, create more information on communication DOFs. ===
     
@@ -1690,6 +1695,11 @@ namespace AMDiS {
   
     lastMeshChangeIndex = mesh->getChangeIndex();
 
+    MSG("TEST: %d %d\n", 
+	dofComm.getNumberDofs(dofComm.getSendDofs(), 0, feSpaces[0]),
+	dofComm.getNumberDofs(dofComm.getRecvDofs(), 0, feSpaces[0]));
+
+
 #if (DEBUG != 0)
     ParallelDebug::testDofContainerCommunication(*this);
 #endif
diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h
index 24dfbd4f..0686801a 100644
--- a/AMDiS/src/parallel/MeshDistributor.h
+++ b/AMDiS/src/parallel/MeshDistributor.h
@@ -471,6 +471,8 @@ namespace AMDiS {
     /// partitioning the whole mesh. 
     InteriorBoundary intBoundary;
 
+    InteriorBoundary intBoundarySd;
+
     DofComm dofComm;
 
     DofComm dofCommSd;
diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc
index b1ff9947..7d01d74d 100644
--- a/AMDiS/src/parallel/ParallelDebug.cc
+++ b/AMDiS/src/parallel/ParallelDebug.cc
@@ -578,7 +578,7 @@ namespace AMDiS {
       for (map<const FiniteElemSpace*, DofContainer>::iterator dcIt = it->second.begin(); 
 	   dcIt != it->second.end(); ++dcIt)
 	sendNumber[it->first] += dcIt->second.size();
-    
+       
     map<int, int> recvNumber;
     for (it_type it = pdb.dofComm.getRecvDofs()[0].begin(); 
 	 it != pdb.dofComm.getRecvDofs()[0].end(); ++it)
@@ -592,7 +592,7 @@ namespace AMDiS {
 	 it != pdb.dofComm.getRecvDofs()[0].end(); ++it)
       stdMpi.recv(it->first);
     stdMpi.startCommunication();
-     
+
     int foundError = 0;
     for (map<int, int>::iterator it = stdMpi.getRecvData().begin();
 	 it != stdMpi.getRecvData().end(); ++it) {
diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc
index 0566169c..4212489d 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.cc
+++ b/AMDiS/src/parallel/ParallelDofMapping.cc
@@ -26,12 +26,10 @@ namespace AMDiS {
     nonRankDofs.clear();
     nonRankDofs.resize(nLevel);
 
-    for (int i = 0; i < nLevel; i++) {
-      nRankDofs[i] = 0;
-      nLocalDofs[i] = 0;
-      nOverallDofs[i] = 0;
-      rStartDofs[i] = 0;
-    }
+    nRankDofs.resize(nLevel, 0);
+    nLocalDofs.resize(nLevel, 0);
+    nOverallDofs.resize(nLevel, 0);
+    rStartDofs.resize(nLevel, 0);
   }
 
 
diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h
index 00d2c9e7..1c2a5819 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.h
+++ b/AMDiS/src/parallel/ParallelDofMapping.h
@@ -109,11 +109,7 @@ namespace AMDiS {
 	feSpace(NULL),
 	dofMap(1),
 	needGlobalMapping(false),
-	hasNonLocalDofs(false),
-	nRankDofs(1),
-	nLocalDofs(1),
-	nOverallDofs(1),
-	rStartDofs(1)
+	hasNonLocalDofs(false)
     {
       clear();
     }
diff --git a/test/mpi/src/test0002.cc b/test/mpi/src/test0002.cc
index 2f3c508a..e373ace6 100644
--- a/test/mpi/src/test0002.cc
+++ b/test/mpi/src/test0002.cc
@@ -39,11 +39,10 @@ BOOST_AUTO_TEST_CASE(amdis_mpi_feti)
   BOOST_REQUIRE(feti.getNumberOfDuals() == 48);
 
 
-
   Spreadsheet sheet;
   sheet.read("data/data0002a");
   vector<double> data = sheet.getData()[MPI::COMM_WORLD.Get_rank()];
-  
+
   const FiniteElemSpace *feSpace = ellipt.getFeSpace(0);
   vector<double> testData;
   testData.push_back(feti.getNumberOfRankPrimals());
-- 
GitLab