ParallelDomainBase.cc 53.8 KB
Newer Older
Thomas Witkowski's avatar
Thomas Witkowski committed
1
2
#include <algorithm>

3
#include "ParallelDomainBase.h"
4
5
6
7
8
9
10
#include "ParMetisPartitioner.h"
#include "Mesh.h"
#include "Traverse.h"
#include "ElInfo.h"
#include "Element.h"
#include "MacroElement.h"
#include "PartitionElementData.h"
11
12
#include "DOFMatrix.h"
#include "DOFVector.h"
13
#include "SystemVector.h"
14
#include "VtkWriter.h"
15
#include "ElementDofIterator.h"
16
17
#include "ProblemStatBase.h"
#include "StandardProblemIteration.h"
18
#include "ElementFileWriter.h"
19
20

#include "petscksp.h"
21
22
23

namespace AMDiS {

24
  PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *)
25
  {    
26
    if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
27
      std::cout << "  Petsc-Iteration " << iter << ": " << rnorm << std::endl;
28
29
30
31

    return 0;
  }

32
33
34
35
36
  inline bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2)
  {
    return (*dof1 < *dof2);
  }

Thomas Witkowski's avatar
Thomas Witkowski committed
37
  ParallelDomainBase::ParallelDomainBase(std::string name,
38
39
40
41
					 ProblemIterationInterface *iIF,
					 ProblemTimeInterface *tIF,
					 FiniteElemSpace *fe,
					 RefinementManager *refineManager)
42
43
    : iterationIF(iIF),
      timeIF(tIF),
44
45
      feSpace(fe),
      mesh(fe->getMesh()),
46
      refinementManager(refineManager),
47
      initialPartitionMesh(true),
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
48
      nRankDOFs(0),
49
50
      rstart(0),
      nComponents(1)
51
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
52
53
54
55
56
57
58
    FUNCNAME("ParallelDomainBase::ParalleDomainBase()");

    TEST_EXIT(mesh->getNumberOfDOFAdmin() == 1)
      ("Only meshes with one DOFAdmin are supported!\n");
    TEST_EXIT(mesh->getDOFAdmin(0).getNumberOfPreDOFs(0) == 0)
      ("Wrong pre dof number for DOFAdmin!\n");

59
60
61
62
63
64
    mpiRank = MPI::COMM_WORLD.Get_rank();
    mpiSize = MPI::COMM_WORLD.Get_size();
    mpiComm = MPI::COMM_WORLD;
    partitioner = new ParMetisPartitioner(mesh, &mpiComm);
  }

65
  void ParallelDomainBase::initParallelization(AdaptInfo *adaptInfo)
66
67
68
69
  {
    if (mpiSize <= 1)
      return;

70
71
72
73
74
    // Test, if the mesh is the macro mesh only! Paritioning of the mesh is supported
    // only for macro meshes, so it will not work yet if the mesh is already refined
    // in some way.
    testForMacroMesh();

75
76
77
78
79
80
81
    // create an initial partitioning of the mesh
    partitioner->createPartitionData();
    // set the element weights, which are 1 at the very first begin
    setElemWeights(adaptInfo);
    // and now partition the mesh
    partitionMesh(adaptInfo);   

Thomas Witkowski's avatar
Thomas Witkowski committed
82
83
84
#if (DEBUG != 0)
    ElementIdxToDofs elMap;
    DbgCreateElementMap(elMap);
85
86
87

    if (mpiRank == 0)
      writePartitioningMesh("part.vtu");
Thomas Witkowski's avatar
Thomas Witkowski committed
88
#endif
89

90
    // === Create new global and local DOF numbering. ===
91

92
93
94
    // Set of all DOFs of the rank.
    std::vector<const DegreeOfFreedom*> rankDOFs;
    // Number of DOFs in ranks partition that are owned by the rank.
Thomas Witkowski's avatar
Thomas Witkowski committed
95
    nRankDOFs = 0;
96
    // Number of all DOFs in the macro mesh.
97
    int nOverallDOFs = 0;
98

99
    createLocalGlobalNumbering(rankDOFs, nRankDOFs, nOverallDOFs);
100

Thomas Witkowski's avatar
Thomas Witkowski committed
101
102
    // === Create interior boundary information ===

103
    createInteriorBoundaryInfo(rankDOFs);
Thomas Witkowski's avatar
Thomas Witkowski committed
104

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
105
106
107
108
    // === Remove all macro elements that are not part of the rank partition. ===

    removeMacroElements();

Thomas Witkowski's avatar
Thomas Witkowski committed
109
#if (DEBUG != 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
110
    DbgTestElementMap(elMap);
Thomas Witkowski's avatar
Thomas Witkowski committed
111
112
    DbgTestInteriorBoundary();
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
113

114

115
    // === Reset all DOFAdmins of the mesh. ===
116

117
    updateDofAdmins();
118

119

120
    // === Global refinements. ===
Thomas Witkowski's avatar
Thomas Witkowski committed
121

Thomas Witkowski's avatar
Thomas Witkowski committed
122
123
124
    int globalRefinement = 0;
    GET_PARAMETER(0, "testgr", "%d", &globalRefinement);

Thomas Witkowski's avatar
Thomas Witkowski committed
125
126
    if (globalRefinement > 0) {
      refinementManager->globalRefine(mesh, globalRefinement);
127

128
129
130
131
132
#if (DEBUG != 0)
      elMap.clear();
      DbgCreateElementMap(elMap);
#endif

Thomas Witkowski's avatar
Thomas Witkowski committed
133
      updateLocalGlobalNumbering(nRankDOFs, nOverallDOFs);
134
135
136
137
138
139

      updateDofAdmins();

#if (DEBUG != 0)
      DbgTestElementMap(elMap);
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
140
141
    }

142

Thomas Witkowski's avatar
Thomas Witkowski committed
143
#if (DEBUG != 0)
144
    DbgTestCommonDofs(true);
Thomas Witkowski's avatar
Thomas Witkowski committed
145
#endif
146

147

148
    // === Create petsc matrix. ===
149

150
151
152
    nRankRows = nRankDOFs * nComponents;
    nOverallRows = nOverallDOFs * nComponents;
    
153
154
155
156
157
158
159
    VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
    VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
    VecSetType(petscRhsVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscSolVec);
    VecSetSizes(petscSolVec, nRankRows, nOverallRows);
    VecSetType(petscSolVec, VECMPI);
160
161
162
163

    VecCreate(PETSC_COMM_WORLD, &petscTmpVec);
    VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
    VecSetType(petscTmpVec, VECMPI);
164
165
  }

166
167
168

  void ParallelDomainBase::exitParallelization(AdaptInfo *adaptInfo)
  {
169
170
    VecDestroy(petscRhsVec);
    VecDestroy(petscSolVec);
171
    VecDestroy(petscTmpVec);
172
  }
173

174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
  
  void ParallelDomainBase::updateDofAdmins()
  {
    int nAdmins = mesh->getNumberOfDOFAdmin();
    for (int i = 0; i < nAdmins; i++) {
      DOFAdmin& admin = const_cast<DOFAdmin&>(mesh->getDOFAdmin(i));
      
      for (int j = 0; j < admin.getSize(); j++)
	admin.setDOFFree(j, true);
      for (int j = 0; j < static_cast<int>(mapLocalGlobalDOFs.size()); j++)
 	admin.setDOFFree(j, false);

      admin.setUsedSize(mapLocalGlobalDOFs.size());
      admin.setUsedCount(mapLocalGlobalDOFs.size());
      admin.setFirstHole(mapLocalGlobalDOFs.size());
    }
  }

192

193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
  void ParallelDomainBase::testForMacroMesh()
  {
    FUNCNAME("ParallelDomainBase::testForMacroMesh()");

    int nMacroElements = 0;

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
    while (elInfo) {
      TEST_EXIT(elInfo->getLevel() == 0)
	("Mesh is already refined! This does nor work with parallelization!\n");
      
      nMacroElements++;

      elInfo = stack.traverseNext(elInfo);
    }

    TEST_EXIT(nMacroElements >= mpiSize)
      ("The mesh has less macro elements than number of mpi processes!\n");
  }

214
215
  void ParallelDomainBase::setDofMatrix(DOFMatrix* mat, int dispMult, 
					int dispAddRow, int dispAddCol)
216
  {
217
218
219
220
    FUNCNAME("ParallelDomainBase::setDofMatrix()");

    TEST_EXIT(mat)("No DOFMatrix!\n");

221
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
222
223
224
225
226
227
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    traits::col<Matrix>::type col(mat->getBaseMatrix());
    traits::const_value<Matrix>::type value(mat->getBaseMatrix());

228
    typedef traits::range_generator<row, Matrix>::type cursor_type;
229
230
    typedef traits::range_generator<nz, cursor_type>::type icursor_type;

231
232
233
234
235
236
237
238
239
240
241
242
243
    std::vector<int> cols;
    std::vector<double> values;
    cols.reserve(300);
    values.reserve(300);

    for (cursor_type cursor = begin<row>(mat->getBaseMatrix()), 
	   cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) {

      cols.clear();
      values.clear();

      int r = mapLocalGlobalDOFs[*cursor] * dispMult + dispAddRow;

244
245
      for (icursor_type icursor = begin<nz>(cursor), 
	     icend = end<nz>(cursor); icursor != icend; ++icursor)
246
	if (value(*icursor) != 0.0) {
247
248
 	  cols.push_back(mapLocalGlobalDOFs[col(*icursor)] * dispMult + dispAddCol);
 	  values.push_back(value(*icursor));
249
	}
250
251
252
253

      MatSetValues(petscMatrix, 1, &r, cols.size(), &(cols[0]), &(values[0]), ADD_VALUES);
    }

254
  }
255

256

257
258
259
  void ParallelDomainBase::setDofVector(DOFVector<double>* vec, 
					int dispMult, int dispAdd)
  {
260
261
    DOFVector<double>::Iterator dofIt(vec, USED_DOFS);
    for (dofIt.reset(); !dofIt.end(); ++dofIt) {
262
      int index = mapLocalGlobalDOFs[dofIt.getDOFIndex()] * dispMult + dispAdd;
263
264
265
      double value = *dofIt;

      VecSetValues(petscRhsVec, 1, &index, &value, ADD_VALUES);
266
    }    
267
268
  }

269

270
271
  void ParallelDomainBase::fillPetscMatrix(DOFMatrix *mat, DOFVector<double> *vec)
  {
272
273
274
275
    MatCreate(PETSC_COMM_WORLD, &petscMatrix);
    MatSetSizes(petscMatrix, nRankRows, nRankRows, nOverallRows, nOverallRows);
    MatSetType(petscMatrix, MATAIJ);

276
277
278
279
280
    setDofMatrix(mat);

    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

Thomas Witkowski's avatar
Thomas Witkowski committed
281
282
283
284
    setDofVector(vec);

    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);
285
286
287
288
289
  }

  
  void ParallelDomainBase::fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec)
  {
290
291
292
293
    FUNCNAME("ParallelDomainBase::fillPetscMatrix()");

    clock_t first = clock();

294
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
295
296
297
298
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    int d_nnz[nRankRows];
299
300
    int o_nnz[nRankRows];

301
302
    std::map<int, std::vector< std::pair<int, int> > > sendMatrixEntry;

303
    for (int i = 0; i < nRankRows; i++) {
304
      d_nnz[i] = 0;
305
306
      o_nnz[i] = 0;
    }
307

308
309
    for (int i = 0; i < nComponents; i++) {
      for (int j = 0; j < nComponents; j++) {
310
311
312
313
314
315
 	if ((*mat)[i][j]) {
	  Matrix bmat = (*mat)[i][j]->getBaseMatrix();

	  traits::col<Matrix>::type col(bmat);
	  traits::const_value<Matrix>::type value(bmat);
	  
316
	  typedef traits::range_generator<row, Matrix>::type cursor_type;
317
318
	  typedef traits::range_generator<nz, cursor_type>::type icursor_type;
	  
319
320
321
322
323
	  for (cursor_type cursor = begin<row>(bmat), 
		 cend = end<row>(bmat); cursor != cend; ++cursor) {

	    int r = mapLocalGlobalDOFs[*cursor] * nComponents + i;

324
325
	    if (isRankDof[*cursor]) {
	      r -= rstart * nComponents;
326
327
328
329
330
331
332
333
334
335

#if (DEBUG != 0)    
	      if (r < 0 || r >= nRankRows) {
		std::cout << "ERROR in rank: " << mpiRank << std::endl;
		std::cout << "  Wrong r = " << r << " " << *cursor << " " 
			  << mapLocalGlobalDOFs[*cursor] << " " 
			  << nRankRows << std::endl;
		ERROR_EXIT("Should not happen!\n");
	      }
#endif
336
	      
337
338
339
340
	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
341
	  
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
		  if (c >= rstart * nComponents && 
		      c < rstart * nComponents + nRankRows)
		    d_nnz[r]++;
		  else
		    o_nnz[r]++;		  
		}    
	      }
	    } else {
	      int sendToRank = -1;

	      for (RankToDofContainer::iterator it = recvDofs.begin();
		   it != recvDofs.end(); ++it) {
		for (DofContainer::iterator dofIt = it->second.begin();
		     dofIt != it->second.end(); ++dofIt) {
		  if (**dofIt == *cursor) {
		    sendToRank = it->first;
		    break;
		  }
		}

		if (sendToRank != -1)
		  break;
	      }

	      TEST_EXIT_DBG(sendToRank != -1)("Should not happen!\n");

	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
		  
		  sendMatrixEntry[sendToRank].push_back(std::make_pair(r, c));
		}
	      }
	    }
377
378
	  }
	}
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
      }
    }

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;

    std::vector<int*> sendBuffers;
    sendBuffers.reserve(recvDofs.size());

    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();
      request[requestCounter++] = mpiComm.Isend(&nSend, 1, MPI_INT, it->first, 0);
      
      if (nSend > 0) {
	int *sendbuf = new int[nSend * 2];
	for (int i = 0; i < nSend; i++) {
	  sendbuf[i * 2] = sendMatrixEntry[it->first][i].first;
	  sendbuf[i * 2 + 1] = sendMatrixEntry[it->first][i].second;
	}
	sendBuffers.push_back(sendbuf);
      } else {
	sendBuffers.push_back(NULL);
      }
    }

    std::vector<int> recvSize;
    recvSize.reserve(sendDofs.size());
    
    int i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it)
      request[requestCounter++] = 
	mpiComm.Irecv(&(recvSize[i++]), 1, MPI_INT, it->first, 0);

    MPI::Request::Waitall(requestCounter, request);

    requestCounter = 0;

    i = 0;
    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();

423
      if (nSend > 0)
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
	request[requestCounter++] = 
	  mpiComm.Isend(sendBuffers[i], nSend * 2, MPI_INT, it->first, 0);

      i++;
    }

    std::vector<int*> recvBuffers;
    recvBuffers.reserve(sendDofs.size());
    
    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	int *recvbuf = new int[recvSize[i] * 2];
	recvBuffers.push_back(recvbuf);

	request[requestCounter++] =
	  mpiComm.Irecv(recvbuf, recvSize[i] * 2, MPI_INT, it->first, 0);
      } else {
	recvBuffers.push_back(NULL);
      }

      i++;
    }

    MPI::Request::Waitall(requestCounter, request);
Thomas Witkowski's avatar
Thomas Witkowski committed
450

451
452
    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
      if (sendBuffers[j])
Thomas Witkowski's avatar
Thomas Witkowski committed
453
 	delete [] sendBuffers[j];
454
455
456
457
458
459
460
461
462
463

    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	for (int j = 0; j < recvSize[i]; j++) {
	  int r = recvBuffers[i][j * 2];
	  int c = recvBuffers[i][j * 2 + 1];
	  
	  r -= rstart * nComponents;
Thomas Witkowski's avatar
Thomas Witkowski committed
464

465
466
467
468
469
470
471
472
473
	  TEST_EXIT_DBG(r >= 0 && r < nRankRows)("Should not happen!\n");
	  
	  if (c < rstart * nComponents ||
	      c >= rstart * nComponents + nRankRows)
	    o_nnz[r]++;
	}

	delete [] recvBuffers[i];
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
474
475

      i++;
476
    }       	
477

478
    INFO(info, 8)("Fill petsc matrix 1 needed %.5f seconds\n", TIME_USED(first, clock()));
479
480

    MatCreateMPIAIJ(PETSC_COMM_WORLD, nRankRows, nRankRows, nOverallRows, nOverallRows,
481
482
		    0, d_nnz, 0, o_nnz, &petscMatrix);

483
484
    INFO(info, 8)("Fill petsc matrix 2 needed %.5f seconds\n", TIME_USED(first, clock()));

485
486
487
488
489
490
#if (DEBUG != 0)
    int a, b;
    MatGetOwnershipRange(petscMatrix, &a, &b);
    TEST_EXIT(a == rstart * nComponents)("Wrong matrix ownership range!\n");
    TEST_EXIT(b == rstart * nComponents + nRankRows)("Wrong matrix ownership range!\n");
#endif
491

492
493
494
495
496
497
498
499
500
    using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    for (int i = 0; i < nComponents; i++)
      for (int j = 0; j < nComponents; j++)
	if ((*mat)[i][j]) 
	  setDofMatrix((*mat)[i][j], nComponents, i, j);

501
502
    INFO(info, 8)("Fill petsc matrix 3 needed %.5f seconds\n", TIME_USED(first, clock()));

503
504
505
    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

506
507
    INFO(info, 8)("Fill petsc matrix 4 needed %.5f seconds\n", TIME_USED(first, clock()));

508
509
    for (int i = 0; i < nComponents; i++)
      setDofVector(vec->getDOFVector(i), nComponents, i);
510

511
512
513
    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);

514
    INFO(info, 8)("Fill petsc matrix needed %.5f seconds\n", TIME_USED(first, clock()));
515
516
517
  }


518
  void ParallelDomainBase::solvePetscMatrix(DOFVector<double> *vec)
519
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
520
521
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

522
523
524
525
    KSP ksp;
    PC pc;

    KSPCreate(PETSC_COMM_WORLD, &ksp);
526
    KSPSetOperators(ksp, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
527
    KSPGetPC(ksp, &pc);
528
529
    //PCSetType(pc, PCJACOBI);
    PCSetType(pc, PCILU);
530
    KSPSetTolerances(ksp, 1.e-7, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
531
532
    KSPSetType(ksp, KSPBCGS);
    //KSPSetType(ksp, KSPCG);
533
    KSPMonitorSet(ksp, myKSPMonitor, PETSC_NULL, 0);
534
535
536
537
538
    KSPSolve(ksp, petscRhsVec, petscSolVec);

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

Thomas Witkowski's avatar
Thomas Witkowski committed
539
    for (int i = 0; i < nRankDOFs; i++)
540
      (*vec)[mapLocalToDofIndex[i]] = vecPointer[i];
541
542
543

    VecRestoreArray(petscSolVec, &vecPointer);

544
545
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());
Thomas Witkowski's avatar
Thomas Witkowski committed
546
547
548

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
549
550
    
    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
551
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
552
	 sendIt != sendDofs.end(); ++sendIt, i++) {
553
554
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs];
555

556
      for (int j = 0; j < nSendDOFs; j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
557
	sendBuffers[i][j] = (*vec)[*((sendIt->second)[j])];
558

Thomas Witkowski's avatar
Thomas Witkowski committed
559
560
      request[requestCounter++] =
	mpiComm.Isend(sendBuffers[i], nSendDOFs, MPI_DOUBLE, sendIt->first, 0);
561
562
563
    }

    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
564
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
565
	 recvIt != recvDofs.end(); ++recvIt, i++) {
566
567
      int nRecvDOFs = recvIt->second.size();
      recvBuffers[i] = new double[nRecvDOFs];
568

Thomas Witkowski's avatar
Thomas Witkowski committed
569
570
      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
571
572
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
573
574
575

    MPI::Request::Waitall(requestCounter, request);

576
    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
577
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
578
	 recvIt != recvDofs.end(); ++recvIt, i++) {
Thomas Witkowski's avatar
Thomas Witkowski committed
579
      for (int j = 0; j < static_cast<int>(recvIt->second.size()); j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
580
	(*vec)[*(recvIt->second)[j]] = recvBuffers[i][j];
581
582
583
584

      delete [] recvBuffers[i];
    }
    
585
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
586
      delete [] sendBuffers[i];
587
588

    MatDestroy(petscMatrix);
589
590
  }

591

592
593
594
595
  void ParallelDomainBase::solvePetscMatrix(SystemVector *vec)
  {
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

596
    KSP solver;
597

598
599
600
    KSPCreate(PETSC_COMM_WORLD, &solver);
    KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); 

601
602
    KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);

603
604
    KSPSetType(solver, KSPBCGS);
    KSPMonitorSet(solver, myKSPMonitor, PETSC_NULL, 0);
605
    KSPSetFromOptions(solver);
606
607

    KSPSolve(solver, petscRhsVec, petscSolVec);
608
609
610
611
612
613
614
615
616
617
618
619

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

    for (int i = 0; i < nComponents; i++) {
      DOFVector<double> *dofvec = vec->getDOFVector(i);
      for (int j = 0; j < nRankDOFs; j++)
	(*dofvec)[mapLocalToDofIndex[j]] = vecPointer[j * nComponents + i];
    }

    VecRestoreArray(petscSolVec, &vecPointer);

620

621
622
623
624
625
626
627
628
629
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
    
    int i = 0;
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
	 sendIt != sendDofs.end(); ++sendIt, i++) {
630
631
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs * nComponents];
632
633
634
635
636
637
638
639

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
	DOFVector<double> *dofvec = vec->getDOFVector(j);
	for (int k = 0; k < nSendDOFs; k++)
	  sendBuffers[i][counter++] = (*dofvec)[*((sendIt->second)[k])];
      }

640
641
      request[requestCounter++] = mpiComm.Isend(sendBuffers[i], nSendDOFs * nComponents,
						MPI_DOUBLE, sendIt->first, 0);
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
    }

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size() * nComponents;
      recvBuffers[i] = new double[nRecvDOFs];

      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
    }


    MPI::Request::Waitall(requestCounter, request);

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size();

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
	DOFVector<double> *dofvec = vec->getDOFVector(j);
665
666
 	for (int k = 0; k < nRecvDOFs; k++)
 	  (*dofvec)[*(recvIt->second)[k]] = recvBuffers[i][counter++];
667
668
669
670
671
672
673
      }

      delete [] recvBuffers[i];
    }
    
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
674

675
676
677
678
679
680
681
682
683
684
    int iterations = 0;
    KSPGetIterationNumber(solver, &iterations);
    MSG("  Number of iterations: %d\n", iterations);
    
    double norm = 0.0;
    MatMult(petscMatrix, petscSolVec, petscTmpVec);
    VecAXPY(petscTmpVec, -1.0, petscRhsVec);
    VecNorm(petscTmpVec, NORM_2, &norm);
    MSG("  Residual norm: %e\n", norm);

685
    MatDestroy(petscMatrix);
686
    KSPDestroy(solver);
687
688
689
  }


690
  double ParallelDomainBase::setElemWeights(AdaptInfo *adaptInfo) 
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
  {
    double localWeightSum = 0.0;
    int elNum = -1;

    elemWeights.clear();

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1,
					 Mesh::CALL_EVERY_EL_PREORDER);
    while (elInfo) {
      Element *element = elInfo->getElement();

      // get partition data
      PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
	(element->getElementData(PARTITION_ED));

      if (partitionData && partitionData->getPartitionStatus() == IN) {
	if (partitionData->getLevel() == 0) {
	  elNum = element->getIndex();
	}
Thomas Witkowski's avatar
Thomas Witkowski committed
711
	TEST_EXIT_DBG(elNum != -1)("invalid element number\n");
712
713
714
715
716
717
718
719
720
721
722
723
	if (element->isLeaf()) {
	  elemWeights[elNum] += 1.0;
	  localWeightSum += 1.0;
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }

    return localWeightSum;
  }

724

725
  void ParallelDomainBase::partitionMesh(AdaptInfo *adaptInfo)
726
727
728
729
730
731
732
733
734
735
736
737
738
  {
    if (initialPartitionMesh) {
      initialPartitionMesh = false;
      partitioner->fillCoarsePartitionVec(&oldPartitionVec);
      partitioner->partition(&elemWeights, INITIAL);
    } else {
      oldPartitionVec = partitionVec;
      partitioner->partition(&elemWeights, ADAPTIVE_REPART, 100.0 /*0.000001*/);
    }    

    partitioner->fillCoarsePartitionVec(&partitionVec);
  }

739

740
  void ParallelDomainBase::createInteriorBoundaryInfo(DofContainer& rankDOFs)
741
  {
742
    FUNCNAME("ParallelDomainBase::createInteriorBoundaryInfo()");
Thomas Witkowski's avatar
Thomas Witkowski committed
743
744
745

    // === First, create all the information about the interior boundaries. ===

746
    TraverseStack stack;
747
748
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, 
					 Mesh::CALL_LEAF_EL | Mesh::FILL_NEIGH);
749
750
751
752
753
    while (elInfo) {
      Element *element = elInfo->getElement();

      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>(element->getElementData(PARTITION_ED));   
754

755
756
757
758
759
760
761
      if (partitionData->getPartitionStatus() == IN) {
	for (int i = 0; i < 3; i++) {
	  if (!elInfo->getNeighbour(i))
	    continue;

	  PartitionElementData *neighbourPartitionData =
	    dynamic_cast<PartitionElementData*>(elInfo->getNeighbour(i)->getElementData(PARTITION_ED));
762

763
 	  if (neighbourPartitionData->getPartitionStatus() == OUT) {
764
765
	    // We have found an element that is at an interior boundary. 

766
767
	    int otherElementRank = partitionVec[elInfo->getNeighbour(i)->getIndex()];
	    bool ranksBoundary = (mpiRank > otherElementRank);
768
769

	    AtomicBoundary& bound = 
Thomas Witkowski's avatar
Thomas Witkowski committed
770
	      (ranksBoundary ?
771
772
	       myIntBoundary.getNewAtomicBoundary(otherElementRank) :
	       otherIntBoundary.getNewAtomicBoundary(otherElementRank));
Thomas Witkowski's avatar
Thomas Witkowski committed
773

774
	    bound.rankObject.el = element;
775
	    bound.rankObject.elIndex = element->getIndex();
776
777
	    bound.rankObject.subObjAtBoundary = EDGE;
	    bound.rankObject.ithObjAtBoundary = i;
778
779
780
781
	    // Do not set a pointer to the element, because if will be deleted from
	    // mesh after partitioning and the pointer would become invalid.
	    bound.neighbourObject.el = NULL;
	    bound.neighbourObject.elIndex = elInfo->getNeighbour(i)->getIndex();
782
	    bound.neighbourObject.subObjAtBoundary = EDGE;
783
784
785
786
787
	    bound.neighbourObject.ithObjAtBoundary = elInfo->getSideOfNeighbour(i);

	    // i == 2  =>  getSideOfNeighbour(i) == 2
	    TEST_EXIT_DBG(i != 2 || elInfo->getSideOfNeighbour(i) == 2)
	      ("Should not happen!\n");
788
789
790
791
792
793
 	  }
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }
Thomas Witkowski's avatar
Thomas Witkowski committed
794
795
796


    // === Once we have this information, we must care about their order. Eventually ===
Thomas Witkowski's avatar
Thomas Witkowski committed
797
798
    // === all the boundaries have to be in the same order on both ranks that share  ===
    // === the bounday.                                                              ===
Thomas Witkowski's avatar
Thomas Witkowski committed
799

Thomas Witkowski's avatar
Thomas Witkowski committed
800
    std::vector<int*> sendBuffers, recvBuffers;
Thomas Witkowski's avatar
Thomas Witkowski committed
801

Thomas Witkowski's avatar
Thomas Witkowski committed
802
803
804
805
    MPI::Request request[myIntBoundary.boundary.size() + 
			 otherIntBoundary.boundary.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
Thomas Witkowski committed
806
807
    for (RankToBoundMap::iterator rankIt = myIntBoundary.boundary.begin();
	 rankIt != myIntBoundary.boundary.end(); ++rankIt) {    
Thomas Witkowski's avatar
Thomas Witkowski committed
808
      int nSendInt = rankIt->second.size();
809
810
811
812
813
      int* buffer = new int[nSendInt * 2];
      for (int i = 0; i < nSendInt; i++) {
	buffer[i * 2] = (rankIt->second)[i].rankObject.elIndex;
	buffer[i * 2 + 1] = (rankIt->second)[i].rankObject.ithObjAtBoundary;
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
814
815
      sendBuffers.push_back(buffer);
      
Thomas Witkowski's avatar
Thomas Witkowski committed
816
      request[requestCounter++] =
817
	mpiComm.Isend(buffer, nSendInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
818
819
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
820
821
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
822
      int nRecvInt = rankIt->second.size();
823
      int *buffer = new int[nRecvInt * 2];
Thomas Witkowski's avatar
Thomas Witkowski committed
824
      recvBuffers.push_back(buffer);
Thomas Witkowski's avatar
Thomas Witkowski committed
825

Thomas Witkowski's avatar
Thomas Witkowski committed
826
      request[requestCounter++] = 
827
	mpiComm.Irecv(buffer, nRecvInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
828
829
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
830
831
832

    MPI::Request::Waitall(requestCounter, request);

Thomas Witkowski's avatar
Thomas Witkowski committed
833
834

    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
835
836
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
837
838
839
840
841
842
843

      // === We have received from rank "rankIt->first" the ordered list of element ===
      // === indices. We now have to sort the corresponding list in this rank to    ===
      // === get the same order.                                                    ===
      
      for (int j = 0; j < static_cast<int>(rankIt->second.size()); j++) {
	// If the expected object is not at place, search for it.
844
845
	if ((rankIt->second)[j].neighbourObject.elIndex != recvBuffers[i][j * 2] ||
	    (rankIt->second)[j].neighbourObject.ithObjAtBoundary != recvBuffers[i][j * 2 + 1]) {
Thomas Witkowski's avatar
Thomas Witkowski committed
846
847
	  int k = j + 1;
	  for (; k < static_cast<int>(rankIt->second.size()); k++)
848
849
	    if ((rankIt->second)[k].neighbourObject.elIndex == recvBuffers[i][j * 2] && 
		(rankIt->second)[k].neighbourObject.ithObjAtBoundary == recvBuffers[i][j * 2 + 1])
Thomas Witkowski's avatar
Thomas Witkowski committed
850
851
852
	      break;

	  // The element must always be found, because the list is just in another order.
Thomas Witkowski's avatar
Thomas Witkowski committed
853
	  TEST_EXIT_DBG(k < static_cast<int>(rankIt->second.size()))
854
	    ("Should never happen!\n");
Thomas Witkowski's avatar
Thomas Witkowski committed
855
856
857
858
859
860
861
862
863
864
865
866
867

	  // Swap the current with the found element.
	  AtomicBoundary tmpBound = (rankIt->second)[k];
	  (rankIt->second)[k] = (rankIt->second)[j];
	  (rankIt->second)[j] = tmpBound;	
	}
      }

      delete [] recvBuffers[i++];
    }

    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
868
869
870
  }


871
  void ParallelDomainBase::removeMacroElements()
872
873
874
875
876
877
878
879
880
881
882
883
  {
    std::vector<MacroElement*> macrosToRemove;
    for (std::deque<MacroElement*>::iterator it = mesh->firstMacroElement();
	 it != mesh->endOfMacroElements();
	 ++it) {
      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>
	((*it)->getElement()->getElementData(PARTITION_ED));
      if (partitionData->getPartitionStatus() != IN)
	macrosToRemove.push_back(*it);
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
884
    mesh->removeMacroElements(macrosToRemove, feSpace);
885
886
887
  }


Thomas Witkowski's avatar
Thomas Witkowski committed
888
889
890
  void ParallelDomainBase::createLocalGlobalNumbering(DofContainer& rankDOFs,
						      int& nRankDOFs, 
						      int& nOverallDOFs)
891
  {
892
    FUNCNAME("ParallelDomainBase::createLocalGlobalNumbering()");
893
894

    // === Get rank information about DOFs. ===
895
896
897

    // Stores to each DOF pointer the set of ranks the DOF is part of.
    std::map<const DegreeOfFreedom*, std::set<int> > partitionDOFs;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
898
    DofContainer rankAllDofs;
899
    DofToRank boundaryDofs;
900

901
    createDOFMemberInfo(partitionDOFs, rankDOFs, rankAllDofs, boundaryDofs, vertexDof);
902
903
904
905

    nRankDOFs = rankDOFs.size();
    nOverallDOFs = partitionDOFs.size();

906

907
    // === Get starting position for global rank dof ordering. ====
908

909
    rstart = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
910
    mpiComm.Scan(&nRankDOFs, &rstart, 1, MPI_INT, MPI_SUM);
911
912
    rstart -= nRankDOFs;

913
914
915
916
    
    // === Create for all dofs in rank new indices. The new index must start at ===
    // === index 0, must be continues and have the same order as the indices    ===
    // === had before.                                                          ===
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
917

Thomas Witkowski's avatar
Thomas Witkowski committed
918

919
920
921
    // Do not change the indices now, but create a new indexing a store it here.
    DofIndexMap rankDofsNewLocalIndex;
    isRankDof.clear();
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
922
923
924
    int i = 0;
    for (DofContainer::iterator dofIt = rankAllDofs.begin();
	 dofIt != rankAllDofs.end(); ++dofIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
925

926
927
928
      rankDofsNewLocalIndex[*dofIt] = i;
      // First, we set all dofs in ranks partition to be owend by the rank. Later,
      // the dofs in ranks partition that are owned by other rank are set to false.
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
929
      isRankDof[i] = true;
930
      i++;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
931
932
    }

933
934
935

    // === Create for all rank owned dofs a new global indexing.                ===

936
937
    // Stores for dofs in rank a new global index.
    DofIndexMap rankDofsNewGlobalIndex;
938
939
940
    // Stores for all rank owned dofs a continues local index.
    DofIndexMap rankOwnedDofsNewLocalIndex;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
941
942
943
    i = 0;
    for (DofContainer::iterator dofIt = rankDOFs.begin();
	 dofIt != rankDOFs.end(); ++dofIt) {
944
      rankDofsNewGlobalIndex[*dofIt] = i + rstart;
945
      rankOwnedDofsNewLocalIndex[*dofIt] = i;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
946
947
948
      i++;
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
949
 
950
951
952
953
954
    // === Create information which dof indices must be send and which must  ===
    // === be received.                                                      ===

    // Stores to each rank a map from DOF pointers (which are all owned by the rank
    // and lie on an interior boundary) to new global DOF indices.
955
    std::map<int, DofIndexMap > sendNewDofs;
956

957
958
959
960
    // Maps to each rank the number of new DOF indices this rank will receive from.
    // All these DOFs are at an interior boundary on this rank, but are owned by
    // another rank.
    std::map<int, int> recvNewDofs;
961

962
    for (DofToRank::iterator it = boundaryDofs.begin(); it != boundaryDofs.end(); ++it) {
963
964
965
966
967
968
969
970

      if (it->second == mpiRank) {
	// If the boundary dof is a rank dof, it must be send to other ranks.

	// Search for all ranks that have this dof too.
	for (std::set<int>::iterator itRanks = partitionDOFs[it->first].begin();
	     itRanks != partitionDOFs[it->first].end();
	     ++itRanks) {
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
971
	  if (*itRanks != mpiRank) {
972
	    TEST_EXIT_DBG(rankDofsNewGlobalIndex.count(it->first) == 1)
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
973
974
	      ("DOF Key not found!\n");

975
	    sendNewDofs[*itRanks][it->first] = rankDofsNewGlobalIndex[it->first];
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
976
	  }
977
978
	}
      } else {
Thomas Witkowski's avatar
Thomas Witkowski committed
979
980
	// If the boundary dof is not a rank dof, its new dof index (and later
	// also the dof values) must be received from another rank.
981
982
983
984
	if (recvNewDofs.find(it->second) == recvNewDofs.end()) 
	  recvNewDofs[it->second] = 1;
	else
	  recvNewDofs[it->second]++;
985
986
987
      }
    }

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
988

989
    // === Send and receive the dof indices at boundary. ===
990

Thomas Witkowski's avatar
Thomas Witkowski committed
991
    std::vector<int*> sendBuffers(sendNewDofs.size()), recvBuffers(recvNewDofs.size());
992
993
994

    sendDofs.clear();
    recvDofs.clear();
995
    
Thomas Witkowski's avatar
Thomas Witkowski committed
996
997
998
    MPI::Request request[sendNewDofs.size() + recvNewDofs.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
999
    i = 0;
1000
1001
    for (std::map<int, DofIndexMap >::iterator sendIt = sendNewDofs.begin();
	 sendIt != sendNewDofs.end(); ++sendIt, i++) {
1002
1003
      int nSendDofs = sendIt->second.size() * 2;
      sendBuffers[i] = new int[nSendDofs];
1004

1005
      int c = 0;
1006
1007
      for (DofIndexMap::iterator dofIt = sendIt->second.begin();
	   dofIt != sendIt->second.end(); ++dofIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
1008
	sendBuffers[i][c++] = *(dofIt->first);
1009
	sendBuffers[i][c++] = dofIt->second;
1010

1011
	sendDofs[sendIt->first].push_back(dofIt->first);
1012
1013
      }

Thomas Witkowski's avatar
Thomas Witkowski committed
1014
1015
      request[requestCounter++] =
	mpiComm.Isend(sendBuffers[i], nSendDofs, MPI_INT, sendIt->first, 0);
1016
1017
1018
    }

    i = 0;
1019
    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
1020
	 recvIt != recvNewDofs.end(); ++recvIt, i++) {
1021
1022
      int nRecvDOFs = recvIt->second * 2;
      recvBuffers[i] = new int[nRecvDOFs];
1023

Thomas Witkowski's avatar
Thomas Witkowski committed
1024
1025
      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_INT, recvIt->first, 0);
1026
1027
1028
    }


Thomas Witkowski's avatar
Thomas Witkowski committed
1029
    MPI::Request::Waitall(requestCounter, request);
1030
1031

    
1032
    // === Delete send buffers. ===
1033

Thomas Witkowski's avatar
Thomas Witkowski committed
1034
1035
    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
      delete [] sendBuffers[j];
1036
1037


1038
    // === Change dof indices at boundary from other ranks. ===
1039

Thomas Witkowski's avatar
Thomas Witkowski committed
1040
1041
    // Within this small data structure we track which dof index was already changed.
    // This is used to avoid the following situation: Assume, there are two dof indices
1042
    // a and b in boundaryDofs. Then we have to change index a to b and b to c. When
Thomas Witkowski's avatar
Thomas Witkowski committed
1043
1044
1045
1046
1047
    // the second rule applies, we have to avoid that not the first b, resulted from
    // changing a to b, is set to c, but the second one. Therefore, after the first
    // rule was applied, the dof pointer is set to false in this data structure and 
    // is not allowed to be changed anymore.
    std::map<const DegreeOfFreedom*, bool> dofChanged;
1048
    for (DofToRank::iterator dofIt = boundaryDofs.begin(); dofIt != boundaryDofs.end();
Thomas Witkowski's avatar
Thomas Witkowski committed
1049
	 ++dofIt)
Thomas Witkowski's avatar
Thomas Witkowski committed
1050
1051
      dofChanged[dofIt->first] = false;

1052
    i = 0;
1053
    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
1054
1055
1056
	 recvIt != recvNewDofs.end();
	 ++recvIt, i++) {

1057
      for (int j = 0; j < recvIt->second; j++) {
1058

1059
1060
	DegreeOfFreedom oldDof = recvBuffers[i][j * 2];
	DegreeOfFreedom newGlobalDof = recvBuffers[i][j * 2 + 1];
1061

1062
	bool found = false;
1063

Thomas Witkowski's avatar
Thomas Witkowski committed
1064
1065
	// Iterate over all boundary dofs to find the dof, which index we have to change.

1066
1067
	for (DofToRank::iterator dofIt = boundaryDofs.begin(); 
	     dofIt != boundaryDofs.end(); ++dofIt) {
1068

Thomas Witkowski's avatar
Thomas Witkowski committed
1069
1070
1071
	  if (*(dofIt->first) == oldDof && !dofChanged[dofIt->first]) {
	    dofChanged[dofIt->first] = true;

1072
	    recvDofs[recvIt->first].push_back(dofIt->first);
1073
	    rankDofsNewGlobalIndex[dofIt->first] = newGlobalDof;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1074
	    isRankDof[rankDofsNewLocalIndex[dofIt->first]] = false;
Thomas Witkowski's avatar
Thomas Witkowski committed
1075

1076
	    found = true;
1077
1078
1079
	    break;
	  }
	}
1080