From 41774d4946ce4d56928484e059c295aefdead91b Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Mon, 23 Mar 2009 15:02:21 +0000
Subject: [PATCH] * Update due to small bug in Residualestimator

---
 AMDiS/src/ParMetisPartitioner.cc |  35 +++++-----
 AMDiS/src/ParMetisPartitioner.h  |  26 ++++++--
 AMDiS/src/ParallelProblem.cc     |  30 ++++-----
 AMDiS/src/ParallelProblem.h      | 107 ++++++++++++-------------------
 AMDiS/src/ProblemScal.h          |   8 +--
 AMDiS/src/ResidualEstimator.cc   |   6 +-
 6 files changed, 96 insertions(+), 116 deletions(-)

diff --git a/AMDiS/src/ParMetisPartitioner.cc b/AMDiS/src/ParMetisPartitioner.cc
index 327238dc..415e6ac6 100644
--- a/AMDiS/src/ParMetisPartitioner.cc
+++ b/AMDiS/src/ParMetisPartitioner.cc
@@ -13,7 +13,7 @@
 namespace AMDiS {
 
   ParMetisMesh::ParMetisMesh(Mesh *mesh, MPI::Comm *comm)
-    : numElements_(0),
+    : nElements(0),
       mpiComm(comm)
   {
     FUNCNAME("ParMetisMesh::ParMetisMesh()");
@@ -45,19 +45,19 @@ namespace AMDiS {
       elInfo = stack.traverseNext(elInfo);
     }
 
-    numElements_ = elementCounter;
+    nElements = elementCounter;
 
-    TEST_EXIT(numElements_ > 0)("no elements in ParMETIS mesh\n");
+    TEST_EXIT(nElements > 0)("no elements in ParMETIS mesh\n");
 
     // allocate memory
-    eptr_ = GET_MEMORY(int, numElements_ + 1);
-    eind_ = GET_MEMORY(int, numElements_ * (dim_ + 1));
-    elmdist_ = GET_MEMORY(int, mpiSize + 1);
+    eptr_ = GET_MEMORY(int, nElements + 1);
+    eind_ = GET_MEMORY(int, nElements * (dim_ + 1));
+    elmdist = GET_MEMORY(int, mpiSize + 1);
 
-    elem_p2a_ = GET_MEMORY(int, numElements_);
+    elem_p2a_ = GET_MEMORY(int, nElements);
 
     if (dim_ == dow) {
-      xyz_ = GET_MEMORY(float, numElements_ * dim_);
+      xyz_ = GET_MEMORY(float, nElements * dim_);
     } else {
       xyz_ = NULL;
     }
@@ -69,11 +69,11 @@ namespace AMDiS {
     float *ptr_xyz = xyz_;
     
     // gather element numbers and create elmdist
-    mpiComm->Allgather(&numElements_, 1, MPI_INT, elmdist_ + 1, 1, MPI_INT);
+    mpiComm->Allgather(&nElements, 1, MPI_INT, elmdist + 1, 1, MPI_INT);
 
-    elmdist_[0] = 0;
+    elmdist[0] = 0;
     for (int i = 2; i < mpiSize + 1; i++) {
-      elmdist_[i] += elmdist_[i - 1];
+      elmdist[i] += elmdist[i - 1];
     }
 
     // traverse mesh and fill distributed ParMETIS data
@@ -130,19 +130,19 @@ namespace AMDiS {
   ParMetisMesh::~ParMetisMesh()
   {
     if (eptr_)
-      FREE_MEMORY(eptr_, int, numElements_ + 1);
+      FREE_MEMORY(eptr_, int, nElements + 1);
     
     if (eind_)     
-      FREE_MEMORY(eind_, int, numElements_ * (dim_ + 1));
+      FREE_MEMORY(eind_, int, nElements * (dim_ + 1));
     
-    if (elmdist_)
-      FREE_MEMORY(elmdist_, int, mpiComm->Get_size() + 1);
+    if (elmdist)
+      FREE_MEMORY(elmdist, int, mpiComm->Get_size() + 1);
     
     if (xyz_)
-      FREE_MEMORY(xyz_, float, numElements_ * dim_);
+      FREE_MEMORY(xyz_, float, nElements * dim_);
     
     if (elem_p2a_) 
-      FREE_MEMORY(elem_p2a_, int, numElements_);
+      FREE_MEMORY(elem_p2a_, int, nElements);
   }
 
   ParMetisGraph::ParMetisGraph(ParMetisMesh *parMetisMesh,
@@ -198,7 +198,6 @@ namespace AMDiS {
     elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_LEAF_EL);
     while (elInfo) {
       Element *element = elInfo->getElement();
-      //int index = element->getIndex();
 
       TEST_EXIT(element->getElementData(PARTITION_ED) == NULL)
 	("mesh already partitioned\n");
diff --git a/AMDiS/src/ParMetisPartitioner.h b/AMDiS/src/ParMetisPartitioner.h
index 62cc9067..ea03fdc2 100644
--- a/AMDiS/src/ParMetisPartitioner.h
+++ b/AMDiS/src/ParMetisPartitioner.h
@@ -81,7 +81,7 @@ namespace AMDiS {
     }
 
     inline int *getElementDist() { 
-      return elmdist_; 
+      return elmdist; 
     }
 
     inline int getDim() { 
@@ -93,7 +93,7 @@ namespace AMDiS {
     }
 
     inline int getNumElements() { 
-      return numElements_; 
+      return nElements; 
     }
 
   protected:
@@ -101,13 +101,21 @@ namespace AMDiS {
 
     int *eind_;
 
-    int *elmdist_;
+    /* \brief
+     * Array that specifies the distribution of the mesh elements.
+     *
+     * elmdist[0] = 0;
+     * elmdist[1] = number of elements of proc 0;
+     * elmdist[2] = elmdist[1] + number of elements of proc 1;
+     *    ...
+     */
+    int *elmdist;
 
     int dim_;
 
     float *xyz_;
 
-    int numElements_;
+    int nElements;
 
     std::map<int, int> elem_a2p_;
 
@@ -131,11 +139,11 @@ namespace AMDiS {
 
     inline int *getXAdj() { 
       return xadj_; 
-    };
+    }
 
     inline int *getAdjncy() { 
       return adjncy_; 
-    };
+    }
 
   protected:
     ParMetisMesh *parMetisMesh_;
@@ -152,7 +160,7 @@ namespace AMDiS {
       : mesh_(mesh),
         mpiComm(comm),
 	parMetisMesh_(NULL)
-    {};
+    {}
 
     void partition(std::map<int, double> *elemWeights,
 		   PartitionMode mode = INITIAL,
@@ -163,6 +171,10 @@ namespace AMDiS {
     void fillLeafPartitionVec(std::map<int, int> *coarseVec,
 			      std::map<int, int> *fineVec);
 
+    /* \brief
+     * Creates an initial paritioning of the AMDiS mesh by seting the partition status
+     * of all elements to either IN or UNDEFINED.
+     */
     void createPartitionData();
 
     void deletePartitionData();
diff --git a/AMDiS/src/ParallelProblem.cc b/AMDiS/src/ParallelProblem.cc
index fe3075bd..b193297c 100644
--- a/AMDiS/src/ParallelProblem.cc
+++ b/AMDiS/src/ParallelProblem.cc
@@ -226,7 +226,7 @@ namespace AMDiS {
     TEST_EXIT(localCoarseGridLevel_ >= globalCoarseGridLevel_)
       ("local coarse grid level < global coarse grid level\n");
 
-    partitioner_ = NEW ParMetisPartitioner(mesh, &mpiComm);
+    partitioner = NEW ParMetisPartitioner(mesh, &mpiComm);
 
     GET_PARAMETER(0, name_ + "->adaptive thresholds", "%d", 
 		  &adaptiveThresholds_);
@@ -251,7 +251,7 @@ namespace AMDiS {
 
   ParallelProblem::~ParallelProblem() 
   {
-    DELETE partitioner_;
+    DELETE partitioner;
   }
 
   bool ParallelProblem::doPartitioning(AdaptInfo *adaptInfo, double localWeightSum) 
@@ -262,8 +262,7 @@ namespace AMDiS {
     int *partArray = GET_MEMORY(int, mpiSize);
     int part = 0;
 
-    mpiComm.Gather(&localWeightSum, 1, MPI_DOUBLE,
-		     weightSum, 1, MPI_DOUBLE, 0);
+    mpiComm.Gather(&localWeightSum, 1, MPI_DOUBLE, weightSum, 1, MPI_DOUBLE, 0);
 
     if (mpiRank == 0) {
 
@@ -332,14 +331,14 @@ namespace AMDiS {
     static bool initial = true;
     if (initial) {
       initial = false;
-      partitioner_->fillCoarsePartitionVec(&oldPartitionVec);
-      partitioner_->partition(&elemWeights_, INITIAL);
+      partitioner->fillCoarsePartitionVec(&oldPartitionVec);
+      partitioner->partition(&elemWeights_, INITIAL);
     } else {
       oldPartitionVec = partitionVec;
-      partitioner_->partition(&elemWeights_, ADAPTIVE_REPART, 100.0 /*0.000001*/);
+      partitioner->partition(&elemWeights_, ADAPTIVE_REPART, 100.0 /*0.000001*/);
     }    
 
-    partitioner_->fillCoarsePartitionVec(&partitionVec);
+    partitioner->fillCoarsePartitionVec(&partitionVec);
   }
 
   void ParallelProblem::refineOverlap(AdaptInfo *adaptInfo)
@@ -749,8 +748,8 @@ namespace AMDiS {
   void ParallelProblem::exchangeDOFVector(AdaptInfo *adaptInfo,
 					  DOFVector<double> *values)
   {
-    partitioner_->fillLeafPartitionVec(&oldPartitionVec, &oldPartitionVec);
-    partitioner_->fillLeafPartitionVec(&partitionVec, &partitionVec);
+    partitioner->fillLeafPartitionVec(&oldPartitionVec, &oldPartitionVec);
+    partitioner->fillLeafPartitionVec(&partitionVec, &partitionVec);
 
     // === get send and recieve orders ===
     std::vector<std::vector<DegreeOfFreedom> > sendOrder;
@@ -1353,7 +1352,7 @@ namespace AMDiS {
     if (mpiSize > 1) {
       clock_t partitioningStart = clock();
 
-      partitioner_->createPartitionData();
+      partitioner->createPartitionData();
       setElemWeights(adaptInfo);
       partitionMesh(adaptInfo);
 
@@ -1388,8 +1387,7 @@ namespace AMDiS {
 
       if (mpiRank == 0) {
 	clock_t partitioningEnd = clock();
-	partitioningTime = TIME_USED(partitioningStart, 
-				     partitioningEnd);
+	partitioningTime = TIME_USED(partitioningStart, partitioningEnd);
 	computationStart = partitioningEnd;
       }
 
@@ -1416,7 +1414,7 @@ namespace AMDiS {
       if (!timeIF_) 
 	problem->writeFiles(adaptInfo, true);
 
-      partitioner_->deletePartitionData();
+      partitioner->deletePartitionData();
 
       if (!usersEstimator) 
 	DELETE problem->getEstimator();
@@ -1531,7 +1529,7 @@ namespace AMDiS {
     FUNCNAME("ParallelProblem::initParallelization()");
 
     if (mpiSize > 1) {
-      partitioner_->createPartitionData();
+      partitioner->createPartitionData();
       setElemWeights(adaptInfo);
       partitionMesh(adaptInfo);
 
@@ -1594,7 +1592,7 @@ namespace AMDiS {
       if (!timeIF_) 
 	problem->writeFiles(adaptInfo, true);
 
-      partitioner_->deletePartitionData();
+      partitioner->deletePartitionData();
 
       for (int i = 0; i < nComponents; i++) {
 	if (static_cast<int>(usersEstimator.size()) == nComponents) 
diff --git a/AMDiS/src/ParallelProblem.h b/AMDiS/src/ParallelProblem.h
index aaaa065c..0bde33ef 100644
--- a/AMDiS/src/ParallelProblem.h
+++ b/AMDiS/src/ParallelProblem.h
@@ -63,7 +63,7 @@ namespace AMDiS {
 			ProblemIterationInterface *iterationIF,
 			ProblemTimeInterface *timeIF);
 
-    virtual ~ParallelProblemBase() {};
+    virtual ~ParallelProblemBase() {}
 
     /** \brief
      * Must return true, if a new partitioning of the domain (due to unbalanced
@@ -104,53 +104,47 @@ namespace AMDiS {
 
     virtual void exitParallelization(AdaptInfo *adaptInfo);
 
-    virtual void setTime(AdaptInfo *adaptInfo) 
-    {
+    virtual void setTime(AdaptInfo *adaptInfo) {
       if (timeIF_) 
 	timeIF_->setTime(adaptInfo);
-    };
+    }
 
-    virtual void initTimestep(AdaptInfo *adaptInfo)
-    {
+    virtual void initTimestep(AdaptInfo *adaptInfo) {
       if (timeIF_) 
 	timeIF_->initTimestep(adaptInfo);
-    };
+    }
 
     virtual void closeTimestep(AdaptInfo *adaptInfo);
 
-    virtual void solveInitialProblem(AdaptInfo *adaptInfo)
-    {
+    virtual void solveInitialProblem(AdaptInfo *adaptInfo) {
       if (timeIF_)
 	timeIF_->solveInitialProblem(adaptInfo);
-    };
+    }
   
-    virtual void transferInitialSolution(AdaptInfo *adaptInfo)
-    {
+    virtual void transferInitialSolution(AdaptInfo *adaptInfo) {
       if (timeIF_) 
 	timeIF_->transferInitialSolution(adaptInfo);
-    };  
-
+    }
 
-    virtual void beginIteration(AdaptInfo *adaptInfo)
-    {
+    virtual void beginIteration(AdaptInfo *adaptInfo) {
       iterationIF_->beginIteration(adaptInfo);
-    };
+    }
 
     virtual Flag oneIteration(AdaptInfo *adaptInfo, Flag toDo = FULL_ITERATION);
 
     virtual void endIteration(AdaptInfo *adaptInfo) {
       iterationIF_->endIteration(adaptInfo);
-    };
+    }
 
-    virtual void startDelayedTimestepCalculation() {};
+    virtual void startDelayedTimestepCalculation() {}
 
     virtual bool existsDelayedCalculation() {
       return false;
-    };
+    }
 
     MPI::Intracomm* getMpiComm() {
       return &mpiComm;
-    };
+    }
 
     bool getDebugServerProcess() {
       return debugServerProcess;
@@ -260,15 +254,15 @@ namespace AMDiS {
 
     void setRepartitionSteps(int steps) { 
       repartitionSteps_ = steps; 
-    };
+    }
 
     void puEveryTimestep(bool pu) { 
       puEveryTimestep_ = pu; 
-    };
+    }
 
     void addDOFVector(DOFVector<double> *vec) {
       dofVectors_.push_back(vec);
-    };
+    }
 
     /** \brief
      * Every process creates the mesh structure code of its mesh, and all
@@ -278,44 +272,33 @@ namespace AMDiS {
 
     static bool writeElement(ElInfo *elInfo);
 
-    virtual void startDebugServer() {};
+    virtual void startDebugServer() {}
 
-    virtual void serialize(std::ostream&) {};
+    virtual void serialize(std::ostream&) {}
 
-    virtual void deserialize(std::istream&) {};
+    virtual void deserialize(std::istream&) {}
 
   protected:
-    /** \brief
-     *
-     */
+    ///
     double errors2map(std::map<int, double> &errMap, int comp, bool add);
 
+    ///
     std::vector<int> iList;
 
-    /** \brief
-     *
-     */
+    ///
     std::string name_;
 
-    /** \brief
-     * Mesh of the problem.
-     */
+    /// Mesh of the problem.
     Mesh *mesh;
 
-    /** \brief
-     *
-     */
+    ///
     RefinementManager *refinementManager;
 
-    /** \brief
-     *
-     */
+    ///
     CoarseningManager *coarseningManager;
 
-    /** \brief
-     * Pointer to the paritioner which is used to devide a mesh into partitions.
-     */
-    ParMetisPartitioner *partitioner_;
+    /// Pointer to the paritioner which is used to devide a mesh into partitions.
+    ParMetisPartitioner *partitioner;
 
     /** \brief
      * Stores to every coarse element index the number of the partition it 
@@ -329,24 +312,16 @@ namespace AMDiS {
      */
     std::map<int, int> oldPartitionVec;    
 
-    /** \brief
-     *
-     */
+    ///
     std::map<int, double> elemWeights_;
 
-    /** \brief
-     * Stores to every element the set of partitions it corresponds to.
-     */
+    /// Stores to every element the set of partitions it corresponds to.
     std::map<Element*, std::set<int> > elementPartitions_;
 
-    /** \brief
-     * Stores to every DOF the set of partitions it corresponds to.
-     */
+    /// Stores to every DOF the set of partitions it corresponds to.
     std::map<DegreeOfFreedom, std::set<int> > vertexPartitions;
 
-    /** \brief
-     *
-     */
+    ///
     int repartitionSteps_;
 
     /** \brief
@@ -418,7 +393,7 @@ namespace AMDiS {
      *
      */
     double maxLowerTH_;
-  };
+  }
 
   // =========================================================================
   // ===== class ParallelProblemScal =========================================
@@ -460,15 +435,15 @@ namespace AMDiS {
 
     void setEstimator(Estimator *est) { 
       usersEstimator = est; 
-    };
+    }
 
     void setMarker(Marker *marker) { 
       usersMarker = marker; 
-    };
+    }
 
     inline virtual const std::string& getName() { 
       return name_; 
-    };
+    }
 
   protected:
     ProblemScal *problem;
@@ -533,19 +508,19 @@ namespace AMDiS {
 
     void setEstimator(std::vector<Estimator*> est) { 
       usersEstimator = est; 
-    };
+    }
 
     void setMarker(std::vector<Marker*> marker) {
       usersMarker = marker; 
-    };
+    }
 
     inline virtual const std::string& getName() { 
       return name_; 
-    };
+    }
 
     virtual void startDebugServer(AdaptInfo *adaptInfo);
 
-    virtual void debugFunction(AdaptInfo *adaptInfo) {};
+    virtual void debugFunction(AdaptInfo *adaptInfo) {}
 
   protected:
     ProblemVec *problem;
diff --git a/AMDiS/src/ProblemScal.h b/AMDiS/src/ProblemScal.h
index 3571119f..6cecdf75 100644
--- a/AMDiS/src/ProblemScal.h
+++ b/AMDiS/src/ProblemScal.h
@@ -206,16 +206,12 @@ namespace AMDiS {
     void 
     interpolInitialSolution(AbstractFunction<double, WorldVector<double> > *fct);
 
-    /** \brief
-     * Adds an Operator to \ref systemMatrix.
-     */
+    /// Adds an Operator to \ref systemMatrix.
     void addMatrixOperator(Operator *op, 
 			   double *factor = NULL,
 			   double *estFactor = NULL);
 
-    /** \brief
-     * Adds an Operator to \ref rhs.
-     */
+    /// Adds an Operator to \ref rhs.
     void addVectorOperator(Operator *op, 
 			   double *factor = NULL,
 			   double *estFactor = NULL);
diff --git a/AMDiS/src/ResidualEstimator.cc b/AMDiS/src/ResidualEstimator.cc
index f820c4a9..b65fd057 100644
--- a/AMDiS/src/ResidualEstimator.cc
+++ b/AMDiS/src/ResidualEstimator.cc
@@ -206,7 +206,7 @@ namespace AMDiS {
 	   itfac = const_cast<DOFMatrix*>(matrix[system])->getOperatorEstFactorBegin();
 	   it != const_cast<DOFMatrix*>(matrix[system])->getOperatorsEnd(); 
 	   ++it, ++itfac) {
-	if (**itfac != 0.0) {
+	if (*itfac == NULL || **itfac != 0.0) {
 	  (*it)->getAssembler(omp_get_thread_num())->initElement(elInfo, NULL, quad);
 	}
       }
@@ -246,7 +246,7 @@ namespace AMDiS {
 	     itfac = const_cast<DOFMatrix*>(matrix[system])->getOperatorEstFactorBegin(); 
 	     it != const_cast<DOFMatrix*>(matrix[system])->getOperatorsEnd(); 
 	     ++it, ++itfac) {
-	  if (**itfac != 0.0) {
+	  if (*itfac == NULL || **itfac != 0.0) {
 	    if ((uhQP == NULL) && (*it)->zeroOrderTerms()) {
 	      uhQP = GET_MEMORY(double, nPoints);
 	      uh[system]->getVecAtQPs(elInfo, NULL, quadFast[system], uhQP);
@@ -404,7 +404,7 @@ namespace AMDiS {
 		 it != const_cast<DOFMatrix*>(matrix[system])->getOperatorsEnd(); 
 		 ++it, ++fac) {
 
-	      if (**fac != 0.0) {
+	      if (*fac == NULL || **fac != 0.0) {
 		for (int iq = 0; iq < nPointsSurface_; iq++) {
 		  localJump_[iq].set(0.0);
 		}
-- 
GitLab