From 11b3b152834e28aa186ef6915697fda7e3326b66 Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Tue, 17 Apr 2012 06:01:10 +0000
Subject: [PATCH] FETI-DP uses subdomains consisting of multiple ranks. Still
 cannot run a solve on these systems.

---
 AMDiS/src/parallel/DofComm.cc         |  2 +-
 AMDiS/src/parallel/DofComm.h          | 31 +++++++++++++++++++++------
 AMDiS/src/parallel/InteriorBoundary.h | 20 ++++++++++-------
 AMDiS/src/parallel/MeshDistributor.cc | 22 +++----------------
 AMDiS/src/parallel/MeshDistributor.h  | 11 ++++------
 AMDiS/src/parallel/MeshLevelData.h    |  7 ++++++
 AMDiS/src/parallel/ParallelDebug.h    |  4 ++--
 AMDiS/src/parallel/PetscSolverFeti.cc | 18 +++++++++-------
 AMDiS/src/parallel/PetscSolverFeti.h  |  2 +-
 9 files changed, 64 insertions(+), 53 deletions(-)

diff --git a/AMDiS/src/parallel/DofComm.cc b/AMDiS/src/parallel/DofComm.cc
index 7fa60de7..aa973a91 100644
--- a/AMDiS/src/parallel/DofComm.cc
+++ b/AMDiS/src/parallel/DofComm.cc
@@ -46,7 +46,7 @@ namespace AMDiS {
   {
     FUNCNAME("DofComm::Iterator::setNextFeMap()");
 
-    if (dataIter != dofComm.data[0].end()) {
+    if (dataIter != dofComm.data[traverseLevel].end()) {
       TEST_EXIT_DBG(dataIter->second.size())("Should not happen!\n");
 
       feMapIter = dataIter->second.begin();
diff --git a/AMDiS/src/parallel/DofComm.h b/AMDiS/src/parallel/DofComm.h
index d428ccad..dd332001 100644
--- a/AMDiS/src/parallel/DofComm.h
+++ b/AMDiS/src/parallel/DofComm.h
@@ -78,19 +78,26 @@ namespace AMDiS {
 	       const FiniteElemSpace *fe = NULL)
 	: dofComm(dc),
 	  dofCounter(-1),
-	  traverseFeSpace(fe)
+	  traverseFeSpace(fe),
+	  traverseLevel(0)
       {
-	FUNCNAME("DofComm::Iterator::Iterator()");
-
-	dataIter = dofComm.data[0].begin();
+	goFirst();
+      }
 
-	while (setNextFeMap() == false)
-	  ++dataIter;
+      Iterator(DofComm &dc,
+	       int level,
+	       const FiniteElemSpace *fe = NULL)
+	: dofComm(dc),
+	  dofCounter(-1),
+	  traverseFeSpace(fe),
+	  traverseLevel(level)
+      {
+	goFirst();
       }
 
       inline bool end()
       {
-	return (dataIter == dofComm.data[0].end());
+	return (dataIter == dofComm.data[traverseLevel].end());
       }
       
       inline void nextRank()
@@ -181,6 +188,14 @@ namespace AMDiS {
       }
 
     protected:
+      void goFirst()
+      {
+	dataIter = dofComm.data[traverseLevel].begin();
+
+	while (setNextFeMap() == false)
+	  ++dataIter;
+      }
+
       bool setNextFeMap();
 
     protected:
@@ -195,6 +210,8 @@ namespace AMDiS {
       int dofCounter;
 
       const FiniteElemSpace *traverseFeSpace;
+
+      int traverseLevel;
     };
 
 
diff --git a/AMDiS/src/parallel/InteriorBoundary.h b/AMDiS/src/parallel/InteriorBoundary.h
index ff081f56..dc04842f 100644
--- a/AMDiS/src/parallel/InteriorBoundary.h
+++ b/AMDiS/src/parallel/InteriorBoundary.h
@@ -216,14 +216,18 @@ namespace AMDiS {
 	  TEST_EXIT_DBG(levelData)("No mesh level data object defined!\n");
 	  TEST_EXIT_DBG(level == 1)("Only 2-level method supported!\n");
 
-	  int rankInLevel = levelData->mapRank(mapIt->first, level - 1, level);
-	  MSG("rankInLevel %d\n", rankInLevel);
-	}
-
- 	while (mapIt->second.size() == 0) {
- 	  ++mapIt;
-	  if (mapIt == bound.boundary.end())
-	    return;
+	  while (levelData->rankInSubdomain(mapIt->first, level) ||
+		 mapIt->second.size() == 0) {
+	    ++mapIt;
+	    if (mapIt == bound.boundary.end())
+	      return;	  
+	  }
+	} else {
+	  while (mapIt->second.size() == 0) {
+	    ++mapIt;
+	    if (mapIt == bound.boundary.end())
+	      return;
+	  }
 	}
       }
 
diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index f90dcfb7..f88f3860 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -665,14 +665,15 @@ namespace AMDiS {
 
 
   void MeshDistributor::getAllBoundaryDofs(const FiniteElemSpace *feSpace,
+					   int level,
 					   DofContainer& dofs)
   {
     FUNCNAME("MeshDistributor::getAllBoundaryDofs()");
 
     DofContainerSet dofSet;
-    for (DofComm::Iterator it(sendDofs, feSpace); !it.end(); it.nextRank())
+    for (DofComm::Iterator it(sendDofs, level, feSpace); !it.end(); it.nextRank())
       dofSet.insert(it.getDofs().begin(), it.getDofs().end());
-    for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank())
+    for (DofComm::Iterator it(recvDofs, level, feSpace); !it.end(); it.nextRank())
       dofSet.insert(it.getDofs().begin(), it.getDofs().end());
 
     dofs.clear();
@@ -1036,23 +1037,6 @@ namespace AMDiS {
   }
 
 
-  void MeshDistributor::createBoundaryDofs(const FiniteElemSpace *feSpace,
-					   std::set<DegreeOfFreedom> &boundaryDofs)
-  {
-    FUNCNAME("MeshDistributor::createBoundaryDofs()");
-
-    boundaryDofs.clear();
-
-    for (DofComm::Iterator it(sendDofs, feSpace); !it.end(); it.nextRank())
-      for (; !it.endDofIter(); it.nextDof())
-	boundaryDofs.insert(it.getDofIndex());
-
-    for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank())
-      for (; !it.endDofIter(); it.nextDof())
-	boundaryDofs.insert(it.getDofIndex());
-  }
-
-
   void MeshDistributor::serialize(ostream &out, DofContainer &data)
   {    
     int vecSize = data.size();
diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h
index 721915a4..29be59ba 100644
--- a/AMDiS/src/parallel/MeshDistributor.h
+++ b/AMDiS/src/parallel/MeshDistributor.h
@@ -373,11 +373,6 @@ namespace AMDiS {
       return initialized;
     }
 
-    /// Creates a set of all DOFs that are on interior boundaries of rank's
-    /// domain. Thus, it creates the union of \ref sendDofs and \ref recvDofs.
-    void createBoundaryDofs(const FiniteElemSpace *feSpace,
-			    std::set<DegreeOfFreedom> &boundaryDofs);
-
     // Writes all data of this object to an output stream.
     void serialize(ostream &out);
 
@@ -436,7 +431,8 @@ namespace AMDiS {
       createBoundaryDofFlag = flag;
     }
 
-    BoundaryDofInfo& getBoundaryDofInfo(const FiniteElemSpace *feSpace, int level = 0)
+    BoundaryDofInfo& getBoundaryDofInfo(const FiniteElemSpace *feSpace, 
+					int level = 0)
     {
       FUNCNAME("MeshDistributor::getBoundaryDofInfo()");
 
@@ -447,7 +443,8 @@ namespace AMDiS {
       return boundaryDofInfo[level][feSpace];
     }
 
-    void getAllBoundaryDofs(const FiniteElemSpace *feSpace,
+    void getAllBoundaryDofs(const FiniteElemSpace *feSpace, 
+			    int level,
 			    DofContainer& dofs);
 
     const ElementObjectDatabase& getElementObjectDb() 
diff --git a/AMDiS/src/parallel/MeshLevelData.h b/AMDiS/src/parallel/MeshLevelData.h
index d697e5b5..9151705d 100644
--- a/AMDiS/src/parallel/MeshLevelData.h
+++ b/AMDiS/src/parallel/MeshLevelData.h
@@ -98,6 +98,13 @@ namespace AMDiS {
       return toRank;			
     }
 
+    bool rankInSubdomain(int rank, int level)
+    {
+      TEST_EXIT_DBG(level < nLevel)("Should not happen!\n");
+
+      return static_cast<bool>(levelRanks[level].count(rank));
+    }
+
   protected:
     int nLevel;
 
diff --git a/AMDiS/src/parallel/ParallelDebug.h b/AMDiS/src/parallel/ParallelDebug.h
index 46727787..28d10f48 100644
--- a/AMDiS/src/parallel/ParallelDebug.h
+++ b/AMDiS/src/parallel/ParallelDebug.h
@@ -63,8 +63,8 @@ namespace AMDiS {
 
     /** \brief
      * This function is used for debugging only. It traverses all interior boundaries
-     * and compares the DOF indices on them with the dof indices of the boundarys
-     * neighbours. The function fails, when dof indices on an interior boundary do
+     * and compares the DOF indices on them with the DOF indices of the boundarys
+     * neighbours. The function fails, when DOF indices on an interior boundary do
      * not fit together.
      *
      * \param[in]  pdb           Parallel problem definition used for debugging.
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 4d2058fb..6918f7c5 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -10,6 +10,7 @@
 // See also license.opensource.txt in the distribution.
 
 
+#include "AMDiS.h"
 #include "parallel/PetscSolverFeti.h"
 #include "parallel/PetscSolverFetiStructs.h"
 #include "parallel/StdMpi.h"
@@ -315,7 +316,6 @@ namespace AMDiS {
     DofIndexSet primals;
     DofContainerSet& vertices = 
       meshDistributor->getBoundaryDofInfo(feSpace, meshLevel).geoDofs[VERTEX];
-    TEST_EXIT_DBG(vertices.size())("No primal vertices on this rank!\n");
     for (DofContainerSet::iterator it = vertices.begin(); 
 	 it != vertices.end(); ++it)
       primals.insert(**it);
@@ -339,7 +339,7 @@ namespace AMDiS {
     // === Create global index of the dual nodes on each rank. ===
 
     DofContainer allBoundaryDofs;
-    meshDistributor->getAllBoundaryDofs(feSpace, allBoundaryDofs);
+    meshDistributor->getAllBoundaryDofs(feSpace, meshLevel, allBoundaryDofs);
 
     for (DofContainer::iterator it = allBoundaryDofs.begin();
 	 it != allBoundaryDofs.end(); ++it)
@@ -357,7 +357,7 @@ namespace AMDiS {
 
     boundaryDofRanks[feSpace].clear();
 
-    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace); 
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), meshLevel, feSpace); 
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof()) {
 	if (!isPrimal(feSpace, it.getDofIndex())) {
@@ -372,20 +372,23 @@ namespace AMDiS {
 
     StdMpi<vector<std::set<int> > > stdMpi(meshDistributor->getMpiComm());
 
-    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), meshLevel, feSpace);
 	 !it.end(); it.nextRank())
       for (; !it.endDofIter(); it.nextDof())
-	if (!isPrimal(feSpace, it.getDofIndex()))
+	if (!isPrimal(feSpace, it.getDofIndex())) {
+	  MSG("SEND TO RANK %d\n", it.getRank());
 	  stdMpi.getSendData(it.getRank()).push_back(boundaryDofRanks[feSpace][it.getDofIndex()]);
+	}
 
     stdMpi.updateSendDataSize();
 
-    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace); 
+    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), meshLevel, feSpace); 
 	 !it.end(); it.nextRank()) {
       bool recvFromRank = false;
       for (; !it.endDofIter(); it.nextDof()) {
 	if (!isPrimal(feSpace, it.getDofIndex())) {
 	  recvFromRank = true;
+	  MSG("RECV FROM RANK %d\n", it.getRank());
 	  break;
 	}
       }
@@ -396,7 +399,7 @@ namespace AMDiS {
 
     stdMpi.startCommunication();
 
-    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace); 
+    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), meshLevel, feSpace); 
 	 !it.end(); it.nextRank()) {
       int i = 0;
       for (; !it.endDofIter(); it.nextDof())
@@ -405,7 +408,6 @@ namespace AMDiS {
 	    stdMpi.getRecvData(it.getRank())[i++];
     }
 
-
     // === Reserve for each dual node, on the rank that owns this node, the ===
     // === appropriate number of Lagrange constraints.                      ===
 
diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h
index 912e94ef..38de4e8e 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.h
+++ b/AMDiS/src/parallel/PetscSolverFeti.h
@@ -190,7 +190,7 @@ namespace AMDiS {
     /// in this map. Is used for the Dirichlet preconditioner only.
     ParallelDofMapping interiorDofMap;
 
-    /// Stores to each dual boundary DOF in each finite elment space the set of
+    /// Stores to each dual boundary DOF in each FE space the set of
     /// ranks in which the DOF is contained in.
     map<const FiniteElemSpace*, DofIndexToPartitions> boundaryDofRanks;
 
-- 
GitLab