ParallelDomainBase.cc 54.1 KB
Newer Older
Thomas Witkowski's avatar
Thomas Witkowski committed
1
2
#include <algorithm>

3
#include "ParallelDomainBase.h"
4
5
6
7
8
9
10
#include "ParMetisPartitioner.h"
#include "Mesh.h"
#include "Traverse.h"
#include "ElInfo.h"
#include "Element.h"
#include "MacroElement.h"
#include "PartitionElementData.h"
11
12
#include "DOFMatrix.h"
#include "DOFVector.h"
13
#include "SystemVector.h"
14
#include "VtkWriter.h"
15
#include "ElementDofIterator.h"
16
17
#include "ProblemStatBase.h"
#include "StandardProblemIteration.h"
18
#include "ElementFileWriter.h"
19
20

#include "petscksp.h"
21
22
23

namespace AMDiS {

24
  PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *)
25
  {    
26
    if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
27
      std::cout << "  Petsc-Iteration " << iter << ": " << rnorm << std::endl;
28
29
30
31

    return 0;
  }

32
33
34
35
36
  inline bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2)
  {
    return (*dof1 < *dof2);
  }

Thomas Witkowski's avatar
Thomas Witkowski committed
37
  ParallelDomainBase::ParallelDomainBase(std::string name,
38
39
40
41
					 ProblemIterationInterface *iIF,
					 ProblemTimeInterface *tIF,
					 FiniteElemSpace *fe,
					 RefinementManager *refineManager)
42
43
    : iterationIF(iIF),
      timeIF(tIF),
44
45
      feSpace(fe),
      mesh(fe->getMesh()),
46
      refinementManager(refineManager),
47
      initialPartitionMesh(true),
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
48
      nRankDOFs(0),
49
50
      rstart(0),
      nComponents(1)
51
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
52
53
54
55
56
57
58
    FUNCNAME("ParallelDomainBase::ParalleDomainBase()");

    TEST_EXIT(mesh->getNumberOfDOFAdmin() == 1)
      ("Only meshes with one DOFAdmin are supported!\n");
    TEST_EXIT(mesh->getDOFAdmin(0).getNumberOfPreDOFs(0) == 0)
      ("Wrong pre dof number for DOFAdmin!\n");

59
60
61
62
63
64
    mpiRank = MPI::COMM_WORLD.Get_rank();
    mpiSize = MPI::COMM_WORLD.Get_size();
    mpiComm = MPI::COMM_WORLD;
    partitioner = new ParMetisPartitioner(mesh, &mpiComm);
  }

65
  void ParallelDomainBase::initParallelization(AdaptInfo *adaptInfo)
66
67
68
69
  {
    if (mpiSize <= 1)
      return;

70
71
72
73
74
    // Test, if the mesh is the macro mesh only! Paritioning of the mesh is supported
    // only for macro meshes, so it will not work yet if the mesh is already refined
    // in some way.
    testForMacroMesh();

75
76
77
78
79
80
81
    // create an initial partitioning of the mesh
    partitioner->createPartitionData();
    // set the element weights, which are 1 at the very first begin
    setElemWeights(adaptInfo);
    // and now partition the mesh
    partitionMesh(adaptInfo);   

Thomas Witkowski's avatar
Thomas Witkowski committed
82
83
84
85
#if (DEBUG != 0)
    ElementIdxToDofs elMap;
    DbgCreateElementMap(elMap);
#endif
86

87
    // === Create new global and local DOF numbering. ===
88

89
90
91
    // Set of all DOFs of the rank.
    std::vector<const DegreeOfFreedom*> rankDOFs;
    // Number of DOFs in ranks partition that are owned by the rank.
Thomas Witkowski's avatar
Thomas Witkowski committed
92
    nRankDOFs = 0;
93
    // Number of all DOFs in the macro mesh.
94
    int nOverallDOFs = 0;
95

96
    createLocalGlobalNumbering(rankDOFs, nRankDOFs, nOverallDOFs);
97

Thomas Witkowski's avatar
Thomas Witkowski committed
98
99
    // === Create interior boundary information ===

100
    createInteriorBoundaryInfo(rankDOFs);
Thomas Witkowski's avatar
Thomas Witkowski committed
101

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
102
103
104
105
    // === Remove all macro elements that are not part of the rank partition. ===

    removeMacroElements();

Thomas Witkowski's avatar
Thomas Witkowski committed
106
#if (DEBUG != 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
107
    DbgTestElementMap(elMap);
Thomas Witkowski's avatar
Thomas Witkowski committed
108
109
    DbgTestInteriorBoundary();
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
110

111

112
    // === Reset all DOFAdmins of the mesh. ===
113

114
    updateDofAdmins();
115

116

117
    // === Global refinements. ===
Thomas Witkowski's avatar
Thomas Witkowski committed
118

Thomas Witkowski's avatar
Thomas Witkowski committed
119
120
121
    int globalRefinement = 0;
    GET_PARAMETER(0, "testgr", "%d", &globalRefinement);

Thomas Witkowski's avatar
Thomas Witkowski committed
122
123
    if (globalRefinement > 0) {
      refinementManager->globalRefine(mesh, globalRefinement);
124

125
126
127
128
129
#if (DEBUG != 0)
      elMap.clear();
      DbgCreateElementMap(elMap);
#endif

Thomas Witkowski's avatar
Thomas Witkowski committed
130
      updateLocalGlobalNumbering(nRankDOFs, nOverallDOFs);
131
132
133
134
135
136

      updateDofAdmins();

#if (DEBUG != 0)
      DbgTestElementMap(elMap);
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
137
138
    }

139

Thomas Witkowski's avatar
Thomas Witkowski committed
140
#if (DEBUG != 0)
141
    DbgTestCommonDofs(true);
Thomas Witkowski's avatar
Thomas Witkowski committed
142
#endif
143

144

145
    // === Create petsc matrix. ===
146

147
148
149
    nRankRows = nRankDOFs * nComponents;
    nOverallRows = nOverallDOFs * nComponents;
    
150
151
152
153
154
155
156
    VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
    VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
    VecSetType(petscRhsVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscSolVec);
    VecSetSizes(petscSolVec, nRankRows, nOverallRows);
    VecSetType(petscSolVec, VECMPI);
157
158
159
160

    VecCreate(PETSC_COMM_WORLD, &petscTmpVec);
    VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
    VecSetType(petscTmpVec, VECMPI);
161
162
  }

163
164
165

  void ParallelDomainBase::exitParallelization(AdaptInfo *adaptInfo)
  {
166
167
    VecDestroy(petscRhsVec);
    VecDestroy(petscSolVec);
168
    VecDestroy(petscTmpVec);
169
  }
170

171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
  
  void ParallelDomainBase::updateDofAdmins()
  {
    int nAdmins = mesh->getNumberOfDOFAdmin();
    for (int i = 0; i < nAdmins; i++) {
      DOFAdmin& admin = const_cast<DOFAdmin&>(mesh->getDOFAdmin(i));
      
      for (int j = 0; j < admin.getSize(); j++)
	admin.setDOFFree(j, true);
      for (int j = 0; j < static_cast<int>(mapLocalGlobalDOFs.size()); j++)
 	admin.setDOFFree(j, false);

      admin.setUsedSize(mapLocalGlobalDOFs.size());
      admin.setUsedCount(mapLocalGlobalDOFs.size());
      admin.setFirstHole(mapLocalGlobalDOFs.size());
    }
  }

189

190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
  void ParallelDomainBase::testForMacroMesh()
  {
    FUNCNAME("ParallelDomainBase::testForMacroMesh()");

    int nMacroElements = 0;

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
    while (elInfo) {
      TEST_EXIT(elInfo->getLevel() == 0)
	("Mesh is already refined! This does nor work with parallelization!\n");
      
      nMacroElements++;

      elInfo = stack.traverseNext(elInfo);
    }

    TEST_EXIT(nMacroElements >= mpiSize)
      ("The mesh has less macro elements than number of mpi processes!\n");
  }

211
212
  void ParallelDomainBase::setDofMatrix(DOFMatrix* mat, int dispMult, 
					int dispAddRow, int dispAddCol)
213
  {
214
215
216
217
    FUNCNAME("ParallelDomainBase::setDofMatrix()");

    TEST_EXIT(mat)("No DOFMatrix!\n");

218
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
219
220
221
222
223
224
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    traits::col<Matrix>::type col(mat->getBaseMatrix());
    traits::const_value<Matrix>::type value(mat->getBaseMatrix());

225
    typedef traits::range_generator<row, Matrix>::type cursor_type;
226
227
    typedef traits::range_generator<nz, cursor_type>::type icursor_type;

228
229
230
231
232
233
234
235
236
237
238
239
240
    std::vector<int> cols;
    std::vector<double> values;
    cols.reserve(300);
    values.reserve(300);

    for (cursor_type cursor = begin<row>(mat->getBaseMatrix()), 
	   cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) {

      cols.clear();
      values.clear();

      int r = mapLocalGlobalDOFs[*cursor] * dispMult + dispAddRow;

241
242
      for (icursor_type icursor = begin<nz>(cursor), 
	     icend = end<nz>(cursor); icursor != icend; ++icursor)
243
	if (value(*icursor) != 0.0) {
244
245
 	  cols.push_back(mapLocalGlobalDOFs[col(*icursor)] * dispMult + dispAddCol);
 	  values.push_back(value(*icursor));
246
	}
247
248
249
250

      MatSetValues(petscMatrix, 1, &r, cols.size(), &(cols[0]), &(values[0]), ADD_VALUES);
    }

251
  }
252

253

254
255
256
  void ParallelDomainBase::setDofVector(DOFVector<double>* vec, 
					int dispMult, int dispAdd)
  {
257
258
    DOFVector<double>::Iterator dofIt(vec, USED_DOFS);
    for (dofIt.reset(); !dofIt.end(); ++dofIt) {
259
      int index = mapLocalGlobalDOFs[dofIt.getDOFIndex()] * dispMult + dispAdd;
260
261
262
      double value = *dofIt;

      VecSetValues(petscRhsVec, 1, &index, &value, ADD_VALUES);
263
    }    
264
265
  }

266

267
268
  void ParallelDomainBase::fillPetscMatrix(DOFMatrix *mat, DOFVector<double> *vec)
  {
269
270
271
272
    MatCreate(PETSC_COMM_WORLD, &petscMatrix);
    MatSetSizes(petscMatrix, nRankRows, nRankRows, nOverallRows, nOverallRows);
    MatSetType(petscMatrix, MATAIJ);

273
274
275
276
277
    setDofMatrix(mat);

    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

Thomas Witkowski's avatar
Thomas Witkowski committed
278
279
280
281
    setDofVector(vec);

    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);
282
283
284
285
286
  }

  
  void ParallelDomainBase::fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec)
  {
287
288
289
290
    FUNCNAME("ParallelDomainBase::fillPetscMatrix()");

    clock_t first = clock();

291
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
292
293
294
295
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    int d_nnz[nRankRows];
296
297
    int o_nnz[nRankRows];

298
299
    std::map<int, std::vector< std::pair<int, int> > > sendMatrixEntry;

300
    for (int i = 0; i < nRankRows; i++) {
301
      d_nnz[i] = 0;
302
303
      o_nnz[i] = 0;
    }
304

305
306
    for (int i = 0; i < nComponents; i++) {
      for (int j = 0; j < nComponents; j++) {
307
308
309
310
311
312
 	if ((*mat)[i][j]) {
	  Matrix bmat = (*mat)[i][j]->getBaseMatrix();

	  traits::col<Matrix>::type col(bmat);
	  traits::const_value<Matrix>::type value(bmat);
	  
313
	  typedef traits::range_generator<row, Matrix>::type cursor_type;
314
315
	  typedef traits::range_generator<nz, cursor_type>::type icursor_type;
	  
316
317
318
319
320
	  for (cursor_type cursor = begin<row>(bmat), 
		 cend = end<row>(bmat); cursor != cend; ++cursor) {

	    int r = mapLocalGlobalDOFs[*cursor] * nComponents + i;

321
322
	    if (isRankDof[*cursor]) {
	      r -= rstart * nComponents;
323
324
325
326
327
328
329
330
331
332

#if (DEBUG != 0)    
	      if (r < 0 || r >= nRankRows) {
		std::cout << "ERROR in rank: " << mpiRank << std::endl;
		std::cout << "  Wrong r = " << r << " " << *cursor << " " 
			  << mapLocalGlobalDOFs[*cursor] << " " 
			  << nRankRows << std::endl;
		ERROR_EXIT("Should not happen!\n");
	      }
#endif
333
	      
334
335
336
337
	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
338
	  
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
		  if (c >= rstart * nComponents && 
		      c < rstart * nComponents + nRankRows)
		    d_nnz[r]++;
		  else
		    o_nnz[r]++;		  
		}    
	      }
	    } else {
	      int sendToRank = -1;

	      for (RankToDofContainer::iterator it = recvDofs.begin();
		   it != recvDofs.end(); ++it) {
		for (DofContainer::iterator dofIt = it->second.begin();
		     dofIt != it->second.end(); ++dofIt) {
		  if (**dofIt == *cursor) {
		    sendToRank = it->first;
		    break;
		  }
		}

		if (sendToRank != -1)
		  break;
	      }

	      TEST_EXIT_DBG(sendToRank != -1)("Should not happen!\n");

	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
		  
		  sendMatrixEntry[sendToRank].push_back(std::make_pair(r, c));
		}
	      }
	    }
374
375
	  }
	}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
      }
    }

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;

    std::vector<int*> sendBuffers;
    sendBuffers.reserve(recvDofs.size());

    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();
      request[requestCounter++] = mpiComm.Isend(&nSend, 1, MPI_INT, it->first, 0);
      
      if (nSend > 0) {
	int *sendbuf = new int[nSend * 2];
	for (int i = 0; i < nSend; i++) {
	  sendbuf[i * 2] = sendMatrixEntry[it->first][i].first;
	  sendbuf[i * 2 + 1] = sendMatrixEntry[it->first][i].second;
	}
	sendBuffers.push_back(sendbuf);
      } else {
	sendBuffers.push_back(NULL);
      }
    }

    std::vector<int> recvSize;
    recvSize.reserve(sendDofs.size());
    
    int i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it)
      request[requestCounter++] = 
	mpiComm.Irecv(&(recvSize[i++]), 1, MPI_INT, it->first, 0);

    MPI::Request::Waitall(requestCounter, request);

    requestCounter = 0;

    i = 0;
    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();

420
      if (nSend > 0)
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
	request[requestCounter++] = 
	  mpiComm.Isend(sendBuffers[i], nSend * 2, MPI_INT, it->first, 0);

      i++;
    }

    std::vector<int*> recvBuffers;
    recvBuffers.reserve(sendDofs.size());
    
    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	int *recvbuf = new int[recvSize[i] * 2];
	recvBuffers.push_back(recvbuf);

	request[requestCounter++] =
	  mpiComm.Irecv(recvbuf, recvSize[i] * 2, MPI_INT, it->first, 0);
      } else {
	recvBuffers.push_back(NULL);
      }

      i++;
    }

    MPI::Request::Waitall(requestCounter, request);
Thomas Witkowski's avatar
Thomas Witkowski committed
447

448
449
    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
      if (sendBuffers[j])
Thomas Witkowski's avatar
Thomas Witkowski committed
450
 	delete [] sendBuffers[j];
451
452
453
454
455
456
457
458
459
460

    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	for (int j = 0; j < recvSize[i]; j++) {
	  int r = recvBuffers[i][j * 2];
	  int c = recvBuffers[i][j * 2 + 1];
	  
	  r -= rstart * nComponents;
Thomas Witkowski's avatar
Thomas Witkowski committed
461

462
463
464
465
466
467
468
469
470
	  TEST_EXIT_DBG(r >= 0 && r < nRankRows)("Should not happen!\n");
	  
	  if (c < rstart * nComponents ||
	      c >= rstart * nComponents + nRankRows)
	    o_nnz[r]++;
	}

	delete [] recvBuffers[i];
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
471
472

      i++;
473
    }       	
474

475
    INFO(info, 8)("Fill petsc matrix 1 needed %.5f seconds\n", TIME_USED(first, clock()));
476
477

    MatCreateMPIAIJ(PETSC_COMM_WORLD, nRankRows, nRankRows, nOverallRows, nOverallRows,
478
479
		    0, d_nnz, 0, o_nnz, &petscMatrix);

480
481
    INFO(info, 8)("Fill petsc matrix 2 needed %.5f seconds\n", TIME_USED(first, clock()));

482
483
484
485
486
487
#if (DEBUG != 0)
    int a, b;
    MatGetOwnershipRange(petscMatrix, &a, &b);
    TEST_EXIT(a == rstart * nComponents)("Wrong matrix ownership range!\n");
    TEST_EXIT(b == rstart * nComponents + nRankRows)("Wrong matrix ownership range!\n");
#endif
488

489
490
491
492
493
494
495
496
497
    using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    for (int i = 0; i < nComponents; i++)
      for (int j = 0; j < nComponents; j++)
	if ((*mat)[i][j]) 
	  setDofMatrix((*mat)[i][j], nComponents, i, j);

498
499
    INFO(info, 8)("Fill petsc matrix 3 needed %.5f seconds\n", TIME_USED(first, clock()));

500
501
502
    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

503
504
    INFO(info, 8)("Fill petsc matrix 4 needed %.5f seconds\n", TIME_USED(first, clock()));

505
506
    for (int i = 0; i < nComponents; i++)
      setDofVector(vec->getDOFVector(i), nComponents, i);
507

508
509
510
    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);

511
    INFO(info, 8)("Fill petsc matrix needed %.5f seconds\n", TIME_USED(first, clock()));
512
513
514
  }


515
  void ParallelDomainBase::solvePetscMatrix(DOFVector<double> *vec)
516
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
517
518
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

519
520
521
522
    KSP ksp;
    PC pc;

    KSPCreate(PETSC_COMM_WORLD, &ksp);
523
    KSPSetOperators(ksp, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
524
    KSPGetPC(ksp, &pc);
525
526
    //PCSetType(pc, PCJACOBI);
    PCSetType(pc, PCILU);
527
    KSPSetTolerances(ksp, 1.e-7, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
528
529
    KSPSetType(ksp, KSPBCGS);
    //KSPSetType(ksp, KSPCG);
530
    KSPMonitorSet(ksp, myKSPMonitor, PETSC_NULL, 0);
531
532
533
534
535
    KSPSolve(ksp, petscRhsVec, petscSolVec);

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

Thomas Witkowski's avatar
Thomas Witkowski committed
536
    for (int i = 0; i < nRankDOFs; i++)
537
      (*vec)[mapLocalToDofIndex[i]] = vecPointer[i];
538
539
540

    VecRestoreArray(petscSolVec, &vecPointer);

541
542
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());
Thomas Witkowski's avatar
Thomas Witkowski committed
543
544
545

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
546
547
    
    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
548
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
549
	 sendIt != sendDofs.end(); ++sendIt, i++) {
550
551
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs];
552

553
      for (int j = 0; j < nSendDOFs; j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
554
	sendBuffers[i][j] = (*vec)[*((sendIt->second)[j])];
555

Thomas Witkowski's avatar
Thomas Witkowski committed
556
557
      request[requestCounter++] =
	mpiComm.Isend(sendBuffers[i], nSendDOFs, MPI_DOUBLE, sendIt->first, 0);
558
559
560
    }

    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
561
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
562
	 recvIt != recvDofs.end(); ++recvIt, i++) {
563
564
      int nRecvDOFs = recvIt->second.size();
      recvBuffers[i] = new double[nRecvDOFs];
565

Thomas Witkowski's avatar
Thomas Witkowski committed
566
567
      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
568
569
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
570
571
572

    MPI::Request::Waitall(requestCounter, request);

573
    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
574
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
575
	 recvIt != recvDofs.end(); ++recvIt, i++) {
Thomas Witkowski's avatar
Thomas Witkowski committed
576
      for (int j = 0; j < static_cast<int>(recvIt->second.size()); j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
577
	(*vec)[*(recvIt->second)[j]] = recvBuffers[i][j];
578
579
580
581

      delete [] recvBuffers[i];
    }
    
582
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
583
      delete [] sendBuffers[i];
584
585

    MatDestroy(petscMatrix);
586
587
  }

588

589
590
591
592
  void ParallelDomainBase::solvePetscMatrix(SystemVector *vec)
  {
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

593
    KSP solver;
594

595
596
597
    KSPCreate(PETSC_COMM_WORLD, &solver);
    KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); 

598
599
    KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);

600
601
    KSPSetType(solver, KSPBCGS);
    KSPMonitorSet(solver, myKSPMonitor, PETSC_NULL, 0);
602
    KSPSetFromOptions(solver);
603
604

    KSPSolve(solver, petscRhsVec, petscSolVec);
605
606
607
608
609
610
611
612
613
614
615
616

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

    for (int i = 0; i < nComponents; i++) {
      DOFVector<double> *dofvec = vec->getDOFVector(i);
      for (int j = 0; j < nRankDOFs; j++)
	(*dofvec)[mapLocalToDofIndex[j]] = vecPointer[j * nComponents + i];
    }

    VecRestoreArray(petscSolVec, &vecPointer);

617

618
619
620
621
622
623
624
625
626
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
    
    int i = 0;
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
	 sendIt != sendDofs.end(); ++sendIt, i++) {
627
628
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs * nComponents];
629
630
631
632
633
634
635
636

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
	DOFVector<double> *dofvec = vec->getDOFVector(j);
	for (int k = 0; k < nSendDOFs; k++)
	  sendBuffers[i][counter++] = (*dofvec)[*((sendIt->second)[k])];
      }

637
638
      request[requestCounter++] = mpiComm.Isend(sendBuffers[i], nSendDOFs * nComponents,
						MPI_DOUBLE, sendIt->first, 0);
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
    }

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size() * nComponents;
      recvBuffers[i] = new double[nRecvDOFs];

      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
    }


    MPI::Request::Waitall(requestCounter, request);

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size();

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
	DOFVector<double> *dofvec = vec->getDOFVector(j);
662
663
 	for (int k = 0; k < nRecvDOFs; k++)
 	  (*dofvec)[*(recvIt->second)[k]] = recvBuffers[i][counter++];
664
665
666
667
668
669
670
      }

      delete [] recvBuffers[i];
    }
    
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
671

672
673
674
675
676
677
678
679
680
681
    int iterations = 0;
    KSPGetIterationNumber(solver, &iterations);
    MSG("  Number of iterations: %d\n", iterations);
    
    double norm = 0.0;
    MatMult(petscMatrix, petscSolVec, petscTmpVec);
    VecAXPY(petscTmpVec, -1.0, petscRhsVec);
    VecNorm(petscTmpVec, NORM_2, &norm);
    MSG("  Residual norm: %e\n", norm);

682
    MatDestroy(petscMatrix);
683
    KSPDestroy(solver);
684
685
686
  }


687
  double ParallelDomainBase::setElemWeights(AdaptInfo *adaptInfo) 
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
  {
    double localWeightSum = 0.0;
    int elNum = -1;

    elemWeights.clear();

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1,
					 Mesh::CALL_EVERY_EL_PREORDER);
    while (elInfo) {
      Element *element = elInfo->getElement();

      // get partition data
      PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
	(element->getElementData(PARTITION_ED));

      if (partitionData && partitionData->getPartitionStatus() == IN) {
	if (partitionData->getLevel() == 0) {
	  elNum = element->getIndex();
	}
Thomas Witkowski's avatar
Thomas Witkowski committed
708
	TEST_EXIT_DBG(elNum != -1)("invalid element number\n");
709
710
711
712
713
714
715
716
717
718
719
720
	if (element->isLeaf()) {
	  elemWeights[elNum] += 1.0;
	  localWeightSum += 1.0;
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }

    return localWeightSum;
  }

721

722
  void ParallelDomainBase::partitionMesh(AdaptInfo *adaptInfo)
723
724
725
726
727
728
729
730
731
732
733
734
735
  {
    if (initialPartitionMesh) {
      initialPartitionMesh = false;
      partitioner->fillCoarsePartitionVec(&oldPartitionVec);
      partitioner->partition(&elemWeights, INITIAL);
    } else {
      oldPartitionVec = partitionVec;
      partitioner->partition(&elemWeights, ADAPTIVE_REPART, 100.0 /*0.000001*/);
    }    

    partitioner->fillCoarsePartitionVec(&partitionVec);
  }

736

737
  void ParallelDomainBase::createInteriorBoundaryInfo(DofContainer& rankDOFs)
738
  {
739
    FUNCNAME("ParallelDomainBase::createInteriorBoundaryInfo()");
Thomas Witkowski's avatar
Thomas Witkowski committed
740
741
742

    // === First, create all the information about the interior boundaries. ===

743
    TraverseStack stack;
744
745
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, 
					 Mesh::CALL_LEAF_EL | Mesh::FILL_NEIGH);
746
747
748
749
750
751
752
753
754
755
756
757
    while (elInfo) {
      Element *element = elInfo->getElement();

      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>(element->getElementData(PARTITION_ED));   
      if (partitionData->getPartitionStatus() == IN) {
	for (int i = 0; i < 3; i++) {
	  if (!elInfo->getNeighbour(i))
	    continue;

	  PartitionElementData *neighbourPartitionData =
	    dynamic_cast<PartitionElementData*>(elInfo->getNeighbour(i)->getElementData(PARTITION_ED));
758

759
 	  if (neighbourPartitionData->getPartitionStatus() == OUT) {
760
761
	    // We have found an element that is at an interior boundary. 

762
763
	    // === Find out, if the boundary part of the element corresponds to the  ===
	    // === rank or to the rank "on the other side" of the interoir boundary. ===
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788

	    const DegreeOfFreedom* boundDOF1 = NULL;
	    const DegreeOfFreedom* boundDOF2 = NULL;
	    
	    switch (i) {
	    case 0:
	      boundDOF1 = element->getDOF(1);
	      boundDOF2 = element->getDOF(2);
	      break;
	    case 1:
	      boundDOF1 = element->getDOF(0);
	      boundDOF2 = element->getDOF(2);
	      break;
	    case 2:
	      boundDOF1 = element->getDOF(0);
	      boundDOF2 = element->getDOF(1);
	      break;
	    default:
	      ERROR_EXIT("Should never happen!\n");
	    }

	    bool isRankDOF1 = (find(rankDOFs.begin(), rankDOFs.end(), boundDOF1) != rankDOFs.end());
	    bool isRankDOF2 = (find(rankDOFs.begin(), rankDOFs.end(), boundDOF2) != rankDOFs.end());
	    bool ranksBoundary = isRankDOF1 || isRankDOF2;

789
	    // === And add the part of the interior boundary. ===
790
791

	    AtomicBoundary& bound = 
Thomas Witkowski's avatar
Thomas Witkowski committed
792
793
794
795
	      (ranksBoundary ?
	       myIntBoundary.getNewAtomicBoundary(partitionVec[elInfo->getNeighbour(i)->getIndex()]) :
	       otherIntBoundary.getNewAtomicBoundary(partitionVec[elInfo->getNeighbour(i)->getIndex()]));

796
	    bound.rankObject.el = element;
797
	    bound.rankObject.elIndex = element->getIndex();
798
799
	    bound.rankObject.subObjAtBoundary = EDGE;
	    bound.rankObject.ithObjAtBoundary = i;
800
801
802
803
	    // Do not set a pointer to the element, because if will be deleted from
	    // mesh after partitioning and the pointer would become invalid.
	    bound.neighbourObject.el = NULL;
	    bound.neighbourObject.elIndex = elInfo->getNeighbour(i)->getIndex();
804
	    bound.neighbourObject.subObjAtBoundary = EDGE;
805
806
807
808
809
	    bound.neighbourObject.ithObjAtBoundary = elInfo->getSideOfNeighbour(i);

	    // i == 2  =>  getSideOfNeighbour(i) == 2
	    TEST_EXIT_DBG(i != 2 || elInfo->getSideOfNeighbour(i) == 2)
	      ("Should not happen!\n");
810
811
812
813
814
815
 	  }
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }
Thomas Witkowski's avatar
Thomas Witkowski committed
816
817
818


    // === Once we have this information, we must care about their order. Eventually ===
Thomas Witkowski's avatar
Thomas Witkowski committed
819
820
    // === all the boundaries have to be in the same order on both ranks that share  ===
    // === the bounday.                                                              ===
Thomas Witkowski's avatar
Thomas Witkowski committed
821

Thomas Witkowski's avatar
Thomas Witkowski committed
822
    std::vector<int*> sendBuffers, recvBuffers;
Thomas Witkowski's avatar
Thomas Witkowski committed
823

Thomas Witkowski's avatar
Thomas Witkowski committed
824
825
826
827
    MPI::Request request[myIntBoundary.boundary.size() + 
			 otherIntBoundary.boundary.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
Thomas Witkowski committed
828
829
    for (RankToBoundMap::iterator rankIt = myIntBoundary.boundary.begin();
	 rankIt != myIntBoundary.boundary.end(); ++rankIt) {    
Thomas Witkowski's avatar
Thomas Witkowski committed
830
      int nSendInt = rankIt->second.size();
831
832
833
834
835
      int* buffer = new int[nSendInt * 2];
      for (int i = 0; i < nSendInt; i++) {
	buffer[i * 2] = (rankIt->second)[i].rankObject.elIndex;
	buffer[i * 2 + 1] = (rankIt->second)[i].rankObject.ithObjAtBoundary;
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
836
837
      sendBuffers.push_back(buffer);
      
Thomas Witkowski's avatar
Thomas Witkowski committed
838
      request[requestCounter++] =
839
	mpiComm.Isend(buffer, nSendInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
840
841
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
842
843
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
844
      int nRecvInt = rankIt->second.size();
845
      int *buffer = new int[nRecvInt * 2];
Thomas Witkowski's avatar
Thomas Witkowski committed
846
      recvBuffers.push_back(buffer);
Thomas Witkowski's avatar
Thomas Witkowski committed
847

Thomas Witkowski's avatar
Thomas Witkowski committed
848
      request[requestCounter++] = 
849
	mpiComm.Irecv(buffer, nRecvInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
850
851
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
852
853
854

    MPI::Request::Waitall(requestCounter, request);

Thomas Witkowski's avatar
Thomas Witkowski committed
855
856

    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
857
858
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
859
860
861
862
863
864
865

      // === We have received from rank "rankIt->first" the ordered list of element ===
      // === indices. We now have to sort the corresponding list in this rank to    ===
      // === get the same order.                                                    ===
      
      for (int j = 0; j < static_cast<int>(rankIt->second.size()); j++) {
	// If the expected object is not at place, search for it.
866
867
	if ((rankIt->second)[j].neighbourObject.elIndex != recvBuffers[i][j * 2] ||
	    (rankIt->second)[j].neighbourObject.ithObjAtBoundary != recvBuffers[i][j * 2 + 1]) {
Thomas Witkowski's avatar
Thomas Witkowski committed
868
869
	  int k = j + 1;
	  for (; k < static_cast<int>(rankIt->second.size()); k++)
870
871
	    if ((rankIt->second)[k].neighbourObject.elIndex == recvBuffers[i][j * 2] && 
		(rankIt->second)[k].neighbourObject.ithObjAtBoundary == recvBuffers[i][j * 2 + 1])
Thomas Witkowski's avatar
Thomas Witkowski committed
872
873
874
	      break;

	  // The element must always be found, because the list is just in another order.
Thomas Witkowski's avatar
Thomas Witkowski committed
875
	  TEST_EXIT_DBG(k < static_cast<int>(rankIt->second.size()))
876
	    ("Should never happen!\n");
Thomas Witkowski's avatar
Thomas Witkowski committed
877
878
879
880
881
882
883
884
885
886
887
888
889

	  // Swap the current with the found element.
	  AtomicBoundary tmpBound = (rankIt->second)[k];
	  (rankIt->second)[k] = (rankIt->second)[j];
	  (rankIt->second)[j] = tmpBound;	
	}
      }

      delete [] recvBuffers[i++];
    }

    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
890
891
892
  }


893
  void ParallelDomainBase::removeMacroElements()
894
895
896
897
898
899
900
901
902
903
904
905
  {
    std::vector<MacroElement*> macrosToRemove;
    for (std::deque<MacroElement*>::iterator it = mesh->firstMacroElement();
	 it != mesh->endOfMacroElements();
	 ++it) {
      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>
	((*it)->getElement()->getElementData(PARTITION_ED));
      if (partitionData->getPartitionStatus() != IN)
	macrosToRemove.push_back(*it);
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
906
    mesh->removeMacroElements(macrosToRemove, feSpace);
907
908
909
  }


Thomas Witkowski's avatar
Thomas Witkowski committed
910
911
912
  void ParallelDomainBase::createLocalGlobalNumbering(DofContainer& rankDOFs,
						      int& nRankDOFs, 
						      int& nOverallDOFs)
913
  {
914
    FUNCNAME("ParallelDomainBase::createLocalGlobalNumbering()");
915
916

    // === Get rank information about DOFs. ===
917
918
919

    // Stores to each DOF pointer the set of ranks the DOF is part of.
    std::map<const DegreeOfFreedom*, std::set<int> > partitionDOFs;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
920
    DofContainer rankAllDofs;
921
    DofToRank boundaryDofs;
922

923
    createDOFMemberInfo(partitionDOFs, rankDOFs, rankAllDofs, boundaryDofs, vertexDof);
924
925
926
927

    nRankDOFs = rankDOFs.size();
    nOverallDOFs = partitionDOFs.size();

928

929
    // === Get starting position for global rank dof ordering. ====
930

931
    rstart = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
932
    mpiComm.Scan(&nRankDOFs, &rstart, 1, MPI_INT, MPI_SUM);
933
934
    rstart -= nRankDOFs;

935
936
937
938
    
    // === Create for all dofs in rank new indices. The new index must start at ===
    // === index 0, must be continues and have the same order as the indices    ===
    // === had before.                                                          ===
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
939

Thomas Witkowski's avatar
Thomas Witkowski committed
940

941
942
943
    // Do not change the indices now, but create a new indexing a store it here.
    DofIndexMap rankDofsNewLocalIndex;
    isRankDof.clear();
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
944
945
946
    int i = 0;
    for (DofContainer::iterator dofIt = rankAllDofs.begin();
	 dofIt != rankAllDofs.end(); ++dofIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
947

948
949
950
      rankDofsNewLocalIndex[*dofIt] = i;
      // First, we set all dofs in ranks partition to be owend by the rank. Later,
      // the dofs in ranks partition that are owned by other rank are set to false.
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
951
      isRankDof[i] = true;
952
      i++;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
953
954
    }

955
956
957

    // === Create for all rank owned dofs a new global indexing.                ===

958
959
    // Stores for dofs in rank a new global index.
    DofIndexMap rankDofsNewGlobalIndex;
960
961
962
    // Stores for all rank owned dofs a continues local index.
    DofIndexMap rankOwnedDofsNewLocalIndex;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
963
964
965
    i = 0;
    for (DofContainer::iterator dofIt = rankDOFs.begin();
	 dofIt != rankDOFs.end(); ++dofIt) {
966
      rankDofsNewGlobalIndex[*dofIt] = i + rstart;
967
      rankOwnedDofsNewLocalIndex[*dofIt] = i;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
968
969
970
      i++;
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
971
 
972
973
974
975
976
    // === Create information which dof indices must be send and which must  ===
    // === be received.                                                      ===

    // Stores to each rank a map from DOF pointers (which are all owned by the rank
    // and lie on an interior boundary) to new global DOF indices.
977
    std::map<int, DofIndexMap > sendNewDofs;
978

979
980
981
982
    // Maps to each rank the number of new DOF indices this rank will receive from.
    // All these DOFs are at an interior boundary on this rank, but are owned by
    // another rank.
    std::map<int, int> recvNewDofs;
983

984
    for (DofToRank::iterator it = boundaryDofs.begin(); it != boundaryDofs.end(); ++it) {
985
986
987
988
989
990
991
992

      if (it->second == mpiRank) {
	// If the boundary dof is a rank dof, it must be send to other ranks.

	// Search for all ranks that have this dof too.
	for (std::set<int>::iterator itRanks = partitionDOFs[it->first].begin();
	     itRanks != partitionDOFs[it->first].end();
	     ++itRanks) {
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
993
	  if (*itRanks != mpiRank) {
994
	    TEST_EXIT_DBG(rankDofsNewGlobalIndex.count(it->first) == 1)
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
995
996
	      ("DOF Key not found!\n");

997
	    sendNewDofs[*itRanks][it->first] = rankDofsNewGlobalIndex[it->first];
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
998
	  }
999
1000
	}
      } else {
Thomas Witkowski's avatar
Thomas Witkowski committed
1001
1002
	// If the boundary dof is not a rank dof, its new dof index (and later
	// also the dof values) must be received from another rank.
1003
1004
1005
1006
	if (recvNewDofs.find(it->second) == recvNewDofs.end()) 
	  recvNewDofs[it->second] = 1;
	else
	  recvNewDofs[it->second]++;
1007
1008
1009
      }
    }

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1010

1011
    // === Send and receive the dof indices at boundary. ===
1012

Thomas Witkowski's avatar
Thomas Witkowski committed
1013
    std::vector<int*> sendBuffers(sendNewDofs.size()), recvBuffers(recvNewDofs.size());
1014
1015
1016

    sendDofs.clear();
    recvDofs.clear();
1017
    
Thomas Witkowski's avatar
Thomas Witkowski committed
1018
1019
1020
    MPI::Request request[sendNewDofs.size() + recvNewDofs.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1021
    i = 0;
1022
1023
    for (std::map<int, DofIndexMap >::iterator sendIt = sendNewDofs.begin();
	 sendIt != sendNewDofs.end(); ++sendIt, i++) {
1024
1025
      int nSendDofs = sendIt->second.size() * 2;
      sendBuffers[i] = new int[nSendDofs];
1026

1027
      int c = 0;
1028
1029
      for (DofIndexMap::iterator dofIt = sendIt->second.begin();
	   dofIt != sendIt->second.end(); ++dofIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
1030
	sendBuffers[i][c++] = *(dofIt->first);
1031
	sendBuffers[i][c++] = dofIt->second;
1032

1033
	sendDofs[sendIt->first].push_back(dofIt->first);
1034
1035
      }

Thomas Witkowski's avatar
Thomas Witkowski committed
1036
1037
      request[requestCounter++] =
	mpiComm.Isend(sendBuffers[i], nSendDofs, MPI_INT, sendIt->first, 0);
1038
1039
1040
    }

    i = 0;
1041
    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
1042
	 recvIt != recvNewDofs.end(); ++recvIt, i++) {
1043
1044
      int nRecvDOFs = recvIt->second * 2;
      recvBuffers[i] = new int[nRecvDOFs];
1045

Thomas Witkowski's avatar
Thomas Witkowski committed
1046
1047
      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_INT, recvIt->first, 0);
1048
1049
1050
    }


Thomas Witkowski's avatar
Thomas Witkowski committed
1051
    MPI::Request::Waitall(requestCounter, request);
1052
1053

    
1054
    // === Delete send buffers. ===
1055

Thomas Witkowski's avatar
Thomas Witkowski committed
1056
1057
    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
      delete [] sendBuffers[j];
1058
1059


1060
    // === Change dof indices at boundary from other ranks. ===
1061

Thomas Witkowski's avatar
Thomas Witkowski committed
1062
1063
    // Within this small data structure we track which dof index was already changed.
    // This is used to avoid the following situation: Assume, there are two dof indices
1064
    // a and b in boundaryDofs. Then we have to change index a to b and b to c. When
Thomas Witkowski's avatar
Thomas Witkowski committed
1065
1066
1067
1068
1069
    // the second rule applies, we have to avoid that not the first b, resulted from
    // changing a to b, is set to c, but the second one. Therefore, after the first
    // rule was applied, the dof pointer is set to false in this data structure and 
    // is not allowed to be changed anymore.
    std::map<const DegreeOfFreedom*, bool> dofChanged;
1070
    for (DofToRank::iterator dofIt = boundaryDofs.begin(); dofIt != boundaryDofs.end();
Thomas Witkowski's avatar
Thomas Witkowski committed
1071
	 ++dofIt)
Thomas Witkowski's avatar
Thomas Witkowski committed
1072
1073
      dofChanged[dofIt->first] = false;

1074
    i = 0;
1075
    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
1076
1077
1078
	 recvIt != recvNewDofs.end();
	 ++recvIt, i++) {

1079
      for (int j = 0; j < recvIt->second; j++) {
1080

1081
1082
	DegreeOfFreedom oldDof = recvBuffers[i][j * 2];
	DegreeOfFreedom newGlobalDof = recvBuffers[i][j * 2 + 1];
1083

1084
	bool found = false;
1085

Thomas Witkowski's avatar
Thomas Witkowski committed
1086
1087
	// Iterate over all boundary dofs to find the dof, which index we have to change.

1088
1089
	for (DofToRank::iterator dofIt = boundaryDofs.begin(); 
	     dofIt != boundaryDofs.end(); ++dofIt) {
1090

Thomas Witkowski's avatar
Thomas Witkowski committed
1091
1092
1093
	  if (*(dofIt->first) == oldDof && !dofChanged[dofIt->first]) {
	    dofChanged[dofIt->first] = true;

1094
	    recvDofs[recvIt->first].push_back(dofIt->first);
1095
	    rankDofsNewGlobalIndex[dofIt->first] = newGlobalDof;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1096
	    isRankDof[rankDofsNewLocalIndex[dofIt->first]] = false;
Thomas Witkowski's avatar
Thomas Witkowski committed
1097

1098
	    found = true;
1099
1100
1101
	    break;
	  }
	}