ParallelDomainBase.cc 55.3 KB
Newer Older
Thomas Witkowski's avatar
Thomas Witkowski committed
1
2
#include <algorithm>

3
#include "ParallelDomainBase.h"
4
5
6
7
8
9
10
#include "ParMetisPartitioner.h"
#include "Mesh.h"
#include "Traverse.h"
#include "ElInfo.h"
#include "Element.h"
#include "MacroElement.h"
#include "PartitionElementData.h"
11
12
#include "DOFMatrix.h"
#include "DOFVector.h"
13
#include "SystemVector.h"
14
#include "VtkWriter.h"
15
#include "ElementDofIterator.h"
16
17
#include "ProblemStatBase.h"
#include "StandardProblemIteration.h"
18
#include "ElementFileWriter.h"
19
#include "Serializer.h"
20
21

#include "petscksp.h"
22
23
24

namespace AMDiS {

25
  PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *)
26
  {    
27
    if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
28
      std::cout << "  Petsc-Iteration " << iter << ": " << rnorm << std::endl;
29
30
31
32

    return 0;
  }

33
34
35
36
37
  inline bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2)
  {
    return (*dof1 < *dof2);
  }

Thomas Witkowski's avatar
Thomas Witkowski committed
38
  ParallelDomainBase::ParallelDomainBase(std::string name,
39
40
41
42
					 ProblemIterationInterface *iIF,
					 ProblemTimeInterface *tIF,
					 FiniteElemSpace *fe,
					 RefinementManager *refineManager)
43
44
    : iterationIF(iIF),
      timeIF(tIF),
45
46
      feSpace(fe),
      mesh(fe->getMesh()),
47
      refinementManager(refineManager),
48
      initialPartitionMesh(true),
49
      nRankDofs(0),
50
51
      rstart(0),
      nComponents(1)
52
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
53
54
55
56
57
58
59
    FUNCNAME("ParallelDomainBase::ParalleDomainBase()");

    TEST_EXIT(mesh->getNumberOfDOFAdmin() == 1)
      ("Only meshes with one DOFAdmin are supported!\n");
    TEST_EXIT(mesh->getDOFAdmin(0).getNumberOfPreDOFs(0) == 0)
      ("Wrong pre dof number for DOFAdmin!\n");

60
61
62
63
64
65
    mpiRank = MPI::COMM_WORLD.Get_rank();
    mpiSize = MPI::COMM_WORLD.Get_size();
    mpiComm = MPI::COMM_WORLD;
    partitioner = new ParMetisPartitioner(mesh, &mpiComm);
  }

66
  void ParallelDomainBase::initParallelization(AdaptInfo *adaptInfo)
67
68
69
70
  {
    if (mpiSize <= 1)
      return;

71
72
73
74
75
    // Test, if the mesh is the macro mesh only! Paritioning of the mesh is supported
    // only for macro meshes, so it will not work yet if the mesh is already refined
    // in some way.
    testForMacroMesh();

76
77
78
79
80
81
82
    // create an initial partitioning of the mesh
    partitioner->createPartitionData();
    // set the element weights, which are 1 at the very first begin
    setElemWeights(adaptInfo);
    // and now partition the mesh
    partitionMesh(adaptInfo);   

Thomas Witkowski's avatar
Thomas Witkowski committed
83
84
85
#if (DEBUG != 0)
    ElementIdxToDofs elMap;
    DbgCreateElementMap(elMap);
86
87
88

    if (mpiRank == 0)
      writePartitioningMesh("part.vtu");
Thomas Witkowski's avatar
Thomas Witkowski committed
89
#endif
90

91
    // === Create new global and local DOF numbering. ===
92

93
94
95
    // Set of all DOFs of the rank.
    std::vector<const DegreeOfFreedom*> rankDOFs;
    // Number of DOFs in ranks partition that are owned by the rank.
96
    nRankDofs = 0;
97
    // Number of all DOFs in the macro mesh.
98
    int nOverallDOFs = 0;
99

100
    createLocalGlobalNumbering(rankDOFs, nRankDofs, nOverallDOFs);
101

Thomas Witkowski's avatar
Thomas Witkowski committed
102
103
    // === Create interior boundary information ===

104
    createInteriorBoundaryInfo(rankDOFs);
Thomas Witkowski's avatar
Thomas Witkowski committed
105

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
106
107
108
109
    // === Remove all macro elements that are not part of the rank partition. ===

    removeMacroElements();

Thomas Witkowski's avatar
Thomas Witkowski committed
110
#if (DEBUG != 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
111
    DbgTestElementMap(elMap);
Thomas Witkowski's avatar
Thomas Witkowski committed
112
113
    DbgTestInteriorBoundary();
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
114

115

116
    // === Reset all DOFAdmins of the mesh. ===
117

118
    updateDofAdmins();
119

120

121
    // === Global refinements. ===
Thomas Witkowski's avatar
Thomas Witkowski committed
122

Thomas Witkowski's avatar
Thomas Witkowski committed
123
124
125
    int globalRefinement = 0;
    GET_PARAMETER(0, "testgr", "%d", &globalRefinement);

Thomas Witkowski's avatar
Thomas Witkowski committed
126
127
    if (globalRefinement > 0) {
      refinementManager->globalRefine(mesh, globalRefinement);
128

129
130
131
132
133
#if (DEBUG != 0)
      elMap.clear();
      DbgCreateElementMap(elMap);
#endif

134
      updateLocalGlobalNumbering(nRankDofs, nOverallDOFs);
135
136
137
138
139
140

      updateDofAdmins();

#if (DEBUG != 0)
      DbgTestElementMap(elMap);
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
141
142
    }

143

Thomas Witkowski's avatar
Thomas Witkowski committed
144
#if (DEBUG != 0)
145
    DbgTestCommonDofs(true);
Thomas Witkowski's avatar
Thomas Witkowski committed
146
#endif
147

148
    nRankRows = nRankDofs * nComponents;
149
    nOverallRows = nOverallDOFs * nComponents;
150
151
  }

152
153

  void ParallelDomainBase::exitParallelization(AdaptInfo *adaptInfo)
154
  {}
155

156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
  
  void ParallelDomainBase::updateDofAdmins()
  {
    int nAdmins = mesh->getNumberOfDOFAdmin();
    for (int i = 0; i < nAdmins; i++) {
      DOFAdmin& admin = const_cast<DOFAdmin&>(mesh->getDOFAdmin(i));
      
      for (int j = 0; j < admin.getSize(); j++)
	admin.setDOFFree(j, true);
      for (int j = 0; j < static_cast<int>(mapLocalGlobalDOFs.size()); j++)
 	admin.setDOFFree(j, false);

      admin.setUsedSize(mapLocalGlobalDOFs.size());
      admin.setUsedCount(mapLocalGlobalDOFs.size());
      admin.setFirstHole(mapLocalGlobalDOFs.size());
    }
  }

174

175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
  void ParallelDomainBase::testForMacroMesh()
  {
    FUNCNAME("ParallelDomainBase::testForMacroMesh()");

    int nMacroElements = 0;

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
    while (elInfo) {
      TEST_EXIT(elInfo->getLevel() == 0)
	("Mesh is already refined! This does nor work with parallelization!\n");
      
      nMacroElements++;

      elInfo = stack.traverseNext(elInfo);
    }

    TEST_EXIT(nMacroElements >= mpiSize)
      ("The mesh has less macro elements than number of mpi processes!\n");
  }

196
197
  void ParallelDomainBase::setDofMatrix(DOFMatrix* mat, int dispMult, 
					int dispAddRow, int dispAddCol)
198
  {
199
200
201
202
    FUNCNAME("ParallelDomainBase::setDofMatrix()");

    TEST_EXIT(mat)("No DOFMatrix!\n");

203
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
204
205
206
207
208
209
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    traits::col<Matrix>::type col(mat->getBaseMatrix());
    traits::const_value<Matrix>::type value(mat->getBaseMatrix());

210
    typedef traits::range_generator<row, Matrix>::type cursor_type;
211
212
    typedef traits::range_generator<nz, cursor_type>::type icursor_type;

213
214
215
216
217
218
219
220
221
222
223
224
225
    std::vector<int> cols;
    std::vector<double> values;
    cols.reserve(300);
    values.reserve(300);

    for (cursor_type cursor = begin<row>(mat->getBaseMatrix()), 
	   cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) {

      cols.clear();
      values.clear();

      int r = mapLocalGlobalDOFs[*cursor] * dispMult + dispAddRow;

226
227
      for (icursor_type icursor = begin<nz>(cursor), 
	     icend = end<nz>(cursor); icursor != icend; ++icursor)
228
	if (value(*icursor) != 0.0) {
229
230
 	  cols.push_back(mapLocalGlobalDOFs[col(*icursor)] * dispMult + dispAddCol);
 	  values.push_back(value(*icursor));
231
	}
232
233
234

      MatSetValues(petscMatrix, 1, &r, cols.size(), &(cols[0]), &(values[0]), ADD_VALUES);
    }
235
  }
236

237

238
239
240
  void ParallelDomainBase::setDofVector(DOFVector<double>* vec, 
					int dispMult, int dispAdd)
  {
241
242
    DOFVector<double>::Iterator dofIt(vec, USED_DOFS);
    for (dofIt.reset(); !dofIt.end(); ++dofIt) {
243
      int index = mapLocalGlobalDOFs[dofIt.getDOFIndex()] * dispMult + dispAdd;
244
245
246
      double value = *dofIt;

      VecSetValues(petscRhsVec, 1, &index, &value, ADD_VALUES);
247
    }    
248
249
  }

250

251
252
  void ParallelDomainBase::fillPetscMatrix(DOFMatrix *mat, DOFVector<double> *vec)
  {
253
254
255
256
    MatCreate(PETSC_COMM_WORLD, &petscMatrix);
    MatSetSizes(petscMatrix, nRankRows, nRankRows, nOverallRows, nOverallRows);
    MatSetType(petscMatrix, MATAIJ);

257
258
259
260
261
262
263
264
265
266
267
268
    VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
    VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
    VecSetType(petscRhsVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscSolVec);
    VecSetSizes(petscSolVec, nRankRows, nOverallRows);
    VecSetType(petscSolVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscTmpVec);
    VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
    VecSetType(petscTmpVec, VECMPI);

269
270
271
272
273
    setDofMatrix(mat);

    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

Thomas Witkowski's avatar
Thomas Witkowski committed
274
275
276
277
    setDofVector(vec);

    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);
278
279
280
281
282
  }

  
  void ParallelDomainBase::fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec)
  {
283
284
285
286
    FUNCNAME("ParallelDomainBase::fillPetscMatrix()");

    clock_t first = clock();

287
288
289
290
291
292
293
294
295
296
297
298
    VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
    VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
    VecSetType(petscRhsVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscSolVec);
    VecSetSizes(petscSolVec, nRankRows, nOverallRows);
    VecSetType(petscSolVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscTmpVec);
    VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
    VecSetType(petscTmpVec, VECMPI);

299
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
300
301
302
303
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    int d_nnz[nRankRows];
304
305
    int o_nnz[nRankRows];

306
307
    std::map<int, std::vector< std::pair<int, int> > > sendMatrixEntry;

308
    for (int i = 0; i < nRankRows; i++) {
309
      d_nnz[i] = 0;
310
311
      o_nnz[i] = 0;
    }
312

313
314
    for (int i = 0; i < nComponents; i++) {
      for (int j = 0; j < nComponents; j++) {
315
316
317
318
319
320
 	if ((*mat)[i][j]) {
	  Matrix bmat = (*mat)[i][j]->getBaseMatrix();

	  traits::col<Matrix>::type col(bmat);
	  traits::const_value<Matrix>::type value(bmat);
	  
321
	  typedef traits::range_generator<row, Matrix>::type cursor_type;
322
323
	  typedef traits::range_generator<nz, cursor_type>::type icursor_type;
	  
324
325
326
327
328
	  for (cursor_type cursor = begin<row>(bmat), 
		 cend = end<row>(bmat); cursor != cend; ++cursor) {

	    int r = mapLocalGlobalDOFs[*cursor] * nComponents + i;

329
330
	    if (isRankDof[*cursor]) {
	      r -= rstart * nComponents;
331
332
333
334
335
336
337
338
339
340

#if (DEBUG != 0)    
	      if (r < 0 || r >= nRankRows) {
		std::cout << "ERROR in rank: " << mpiRank << std::endl;
		std::cout << "  Wrong r = " << r << " " << *cursor << " " 
			  << mapLocalGlobalDOFs[*cursor] << " " 
			  << nRankRows << std::endl;
		ERROR_EXIT("Should not happen!\n");
	      }
#endif
341
	      
342
343
344
345
	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
346

347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
		  if (c >= rstart * nComponents && 
		      c < rstart * nComponents + nRankRows)
		    d_nnz[r]++;
		  else
		    o_nnz[r]++;		  
		}    
	      }
	    } else {
	      int sendToRank = -1;

	      for (RankToDofContainer::iterator it = recvDofs.begin();
		   it != recvDofs.end(); ++it) {
		for (DofContainer::iterator dofIt = it->second.begin();
		     dofIt != it->second.end(); ++dofIt) {
		  if (**dofIt == *cursor) {
		    sendToRank = it->first;
		    break;
		  }
		}

		if (sendToRank != -1)
		  break;
	      }

	      TEST_EXIT_DBG(sendToRank != -1)("Should not happen!\n");

	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
		  
		  sendMatrixEntry[sendToRank].push_back(std::make_pair(r, c));
		}
	      }
	    }
382
383
	  }
	}
384
385
386
387
388
389
390
391
392
393
394
395
      }
    }

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;

    std::vector<int*> sendBuffers;
    sendBuffers.reserve(recvDofs.size());

    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();
396

397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
      request[requestCounter++] = mpiComm.Isend(&nSend, 1, MPI_INT, it->first, 0);
      
      if (nSend > 0) {
	int *sendbuf = new int[nSend * 2];
	for (int i = 0; i < nSend; i++) {
	  sendbuf[i * 2] = sendMatrixEntry[it->first][i].first;
	  sendbuf[i * 2 + 1] = sendMatrixEntry[it->first][i].second;
	}
	sendBuffers.push_back(sendbuf);
      } else {
	sendBuffers.push_back(NULL);
      }
    }

    std::vector<int> recvSize;
    recvSize.reserve(sendDofs.size());
    
    int i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it)
      request[requestCounter++] = 
	mpiComm.Irecv(&(recvSize[i++]), 1, MPI_INT, it->first, 0);

    MPI::Request::Waitall(requestCounter, request);

    requestCounter = 0;

    i = 0;
    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();

429
      if (nSend > 0)
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
	request[requestCounter++] = 
	  mpiComm.Isend(sendBuffers[i], nSend * 2, MPI_INT, it->first, 0);

      i++;
    }

    std::vector<int*> recvBuffers;
    recvBuffers.reserve(sendDofs.size());
    
    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	int *recvbuf = new int[recvSize[i] * 2];
	recvBuffers.push_back(recvbuf);

	request[requestCounter++] =
	  mpiComm.Irecv(recvbuf, recvSize[i] * 2, MPI_INT, it->first, 0);
      } else {
	recvBuffers.push_back(NULL);
      }

      i++;
    }

    MPI::Request::Waitall(requestCounter, request);
Thomas Witkowski's avatar
Thomas Witkowski committed
456

457
458
    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
      if (sendBuffers[j])
Thomas Witkowski's avatar
Thomas Witkowski committed
459
 	delete [] sendBuffers[j];
460
461
462
463
464
465
466
467

    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	for (int j = 0; j < recvSize[i]; j++) {
	  int r = recvBuffers[i][j * 2];
	  int c = recvBuffers[i][j * 2 + 1];
468

469
	  r -= rstart * nComponents;
Thomas Witkowski's avatar
Thomas Witkowski committed
470

471
472
473
474
475
	  TEST_EXIT_DBG(r >= 0 && r < nRankRows)("Should not happen!\n");
	  
	  if (c < rstart * nComponents ||
	      c >= rstart * nComponents + nRankRows)
	    o_nnz[r]++;
476
477
	  else
	    d_nnz[r]++;
478
479
480
481
	}

	delete [] recvBuffers[i];
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
482
483

      i++;
484
    }
485
486

    MatCreateMPIAIJ(PETSC_COMM_WORLD, nRankRows, nRankRows, nOverallRows, nOverallRows,
487
488
489
490
491
492
493
494
		    0, d_nnz, 0, o_nnz, &petscMatrix);

#if (DEBUG != 0)
    int a, b;
    MatGetOwnershipRange(petscMatrix, &a, &b);
    TEST_EXIT(a == rstart * nComponents)("Wrong matrix ownership range!\n");
    TEST_EXIT(b == rstart * nComponents + nRankRows)("Wrong matrix ownership range!\n");
#endif
495

496
497
498
499
500
501
    using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    for (int i = 0; i < nComponents; i++)
      for (int j = 0; j < nComponents; j++)
502
	if ((*mat)[i][j])
503
504
505
506
507
508
509
	  setDofMatrix((*mat)[i][j], nComponents, i, j);

    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

    for (int i = 0; i < nComponents; i++)
      setDofVector(vec->getDOFVector(i), nComponents, i);
510

511
512
513
    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);

514
    INFO(info, 8)("Fill petsc matrix needed %.5f seconds\n", TIME_USED(first, clock()));
515
516
517
  }


518
  void ParallelDomainBase::solvePetscMatrix(DOFVector<double> *vec)
519
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
520
521
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

522
523
524
525
    KSP ksp;
    PC pc;

    KSPCreate(PETSC_COMM_WORLD, &ksp);
526
    KSPSetOperators(ksp, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
527
    KSPGetPC(ksp, &pc);
528
529
    //PCSetType(pc, PCJACOBI);
    PCSetType(pc, PCILU);
530
    KSPSetTolerances(ksp, 1.e-7, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
531
532
    KSPSetType(ksp, KSPBCGS);
    //KSPSetType(ksp, KSPCG);
533
    KSPMonitorSet(ksp, myKSPMonitor, PETSC_NULL, 0);
534
535
536
537
538
    KSPSolve(ksp, petscRhsVec, petscSolVec);

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

539
    for (int i = 0; i < nRankDofs; i++)
540
      (*vec)[mapLocalToDofIndex[i]] = vecPointer[i];
541
542
543

    VecRestoreArray(petscSolVec, &vecPointer);

544
545
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());
Thomas Witkowski's avatar
Thomas Witkowski committed
546
547
548

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
549
550
    
    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
551
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
552
	 sendIt != sendDofs.end(); ++sendIt, i++) {
553
554
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs];
555

556
      for (int j = 0; j < nSendDOFs; j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
557
	sendBuffers[i][j] = (*vec)[*((sendIt->second)[j])];
558

Thomas Witkowski's avatar
Thomas Witkowski committed
559
560
      request[requestCounter++] =
	mpiComm.Isend(sendBuffers[i], nSendDOFs, MPI_DOUBLE, sendIt->first, 0);
561
562
563
    }

    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
564
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
565
	 recvIt != recvDofs.end(); ++recvIt, i++) {
566
567
      int nRecvDOFs = recvIt->second.size();
      recvBuffers[i] = new double[nRecvDOFs];
568

Thomas Witkowski's avatar
Thomas Witkowski committed
569
570
      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
571
572
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
573
574
575

    MPI::Request::Waitall(requestCounter, request);

576
    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
577
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
578
	 recvIt != recvDofs.end(); ++recvIt, i++) {
Thomas Witkowski's avatar
Thomas Witkowski committed
579
      for (int j = 0; j < static_cast<int>(recvIt->second.size()); j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
580
	(*vec)[*(recvIt->second)[j]] = recvBuffers[i][j];
581
582
583
584

      delete [] recvBuffers[i];
    }
    
585
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
586
      delete [] sendBuffers[i];
587
588

    MatDestroy(petscMatrix);
589
590
591
    VecDestroy(petscRhsVec);
    VecDestroy(petscSolVec);
    VecDestroy(petscTmpVec);
592
593
  }

594

595
  void ParallelDomainBase::solvePetscMatrix(SystemVector &vec)
596
597
598
  {
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

599
    KSP solver;
600

601
602
603
    KSPCreate(PETSC_COMM_WORLD, &solver);
    KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); 

604
605
    KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);

606
607
    KSPSetType(solver, KSPBCGS);
    KSPMonitorSet(solver, myKSPMonitor, PETSC_NULL, 0);
608
    KSPSetFromOptions(solver);
609
610

    KSPSolve(solver, petscRhsVec, petscSolVec);
611
612
613
614
615

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

    for (int i = 0; i < nComponents; i++) {
616
      DOFVector<double> *dofvec = vec.getDOFVector(i);
617
      for (int j = 0; j < nRankDofs; j++)
618
	(*dofvec)[mapLocalToDofIndex[j]] = vecPointer[j * nComponents + i];      
619
620
621
622
    }

    VecRestoreArray(petscSolVec, &vecPointer);

623
    synchVectors(vec);
624

625
626
627
628
629
630
631
632
633
634
635
    int iterations = 0;
    KSPGetIterationNumber(solver, &iterations);
    MSG("  Number of iterations: %d\n", iterations);
    
    double norm = 0.0;
    MatMult(petscMatrix, petscSolVec, petscTmpVec);
    VecAXPY(petscTmpVec, -1.0, petscRhsVec);
    VecNorm(petscTmpVec, NORM_2, &norm);
    MSG("  Residual norm: %e\n", norm);

    MatDestroy(petscMatrix);
636
637
638
    VecDestroy(petscRhsVec);
    VecDestroy(petscSolVec);
    VecDestroy(petscTmpVec);
639
640
641
642
643
    KSPDestroy(solver);
  }
  
  void ParallelDomainBase::synchVectors(SystemVector &vec)
  {
644
645
646
647
648
649
650
651
652
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
    
    int i = 0;
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
	 sendIt != sendDofs.end(); ++sendIt, i++) {
653
654
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs * nComponents];
655
656
657

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
658
	DOFVector<double> *dofvec = vec.getDOFVector(j);
659
660
661
662
	for (int k = 0; k < nSendDOFs; k++)
	  sendBuffers[i][counter++] = (*dofvec)[*((sendIt->second)[k])];
      }

663
664
      request[requestCounter++] = mpiComm.Isend(sendBuffers[i], nSendDOFs * nComponents,
						MPI_DOUBLE, sendIt->first, 0);
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
    }

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size() * nComponents;
      recvBuffers[i] = new double[nRecvDOFs];

      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
    }


    MPI::Request::Waitall(requestCounter, request);

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size();

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
687
	DOFVector<double> *dofvec = vec.getDOFVector(j);
688
689
 	for (int k = 0; k < nRecvDOFs; k++)
 	  (*dofvec)[*(recvIt->second)[k]] = recvBuffers[i][counter++];
690
691
692
693
694
695
696
697
698
699
      }

      delete [] recvBuffers[i];
    }
    
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
  }


700
  double ParallelDomainBase::setElemWeights(AdaptInfo *adaptInfo) 
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
  {
    double localWeightSum = 0.0;
    int elNum = -1;

    elemWeights.clear();

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1,
					 Mesh::CALL_EVERY_EL_PREORDER);
    while (elInfo) {
      Element *element = elInfo->getElement();

      // get partition data
      PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
	(element->getElementData(PARTITION_ED));

      if (partitionData && partitionData->getPartitionStatus() == IN) {
	if (partitionData->getLevel() == 0) {
	  elNum = element->getIndex();
	}
Thomas Witkowski's avatar
Thomas Witkowski committed
721
	TEST_EXIT_DBG(elNum != -1)("invalid element number\n");
722
723
724
725
726
727
728
729
730
731
732
733
	if (element->isLeaf()) {
	  elemWeights[elNum] += 1.0;
	  localWeightSum += 1.0;
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }

    return localWeightSum;
  }

734

735
  void ParallelDomainBase::partitionMesh(AdaptInfo *adaptInfo)
736
737
738
739
740
741
742
743
744
745
746
747
748
  {
    if (initialPartitionMesh) {
      initialPartitionMesh = false;
      partitioner->fillCoarsePartitionVec(&oldPartitionVec);
      partitioner->partition(&elemWeights, INITIAL);
    } else {
      oldPartitionVec = partitionVec;
      partitioner->partition(&elemWeights, ADAPTIVE_REPART, 100.0 /*0.000001*/);
    }    

    partitioner->fillCoarsePartitionVec(&partitionVec);
  }

749

750
  void ParallelDomainBase::createInteriorBoundaryInfo(DofContainer& rankDOFs)
751
  {
752
    FUNCNAME("ParallelDomainBase::createInteriorBoundaryInfo()");
Thomas Witkowski's avatar
Thomas Witkowski committed
753
754
755

    // === First, create all the information about the interior boundaries. ===

756
    TraverseStack stack;
757
758
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, 
					 Mesh::CALL_LEAF_EL | Mesh::FILL_NEIGH);
759
760
761
762
763
    while (elInfo) {
      Element *element = elInfo->getElement();

      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>(element->getElementData(PARTITION_ED));   
764

765
766
767
768
769
770
771
      if (partitionData->getPartitionStatus() == IN) {
	for (int i = 0; i < 3; i++) {
	  if (!elInfo->getNeighbour(i))
	    continue;

	  PartitionElementData *neighbourPartitionData =
	    dynamic_cast<PartitionElementData*>(elInfo->getNeighbour(i)->getElementData(PARTITION_ED));
772

773
 	  if (neighbourPartitionData->getPartitionStatus() == OUT) {
774
775
	    // We have found an element that is at an interior boundary. 

776
777
	    int otherElementRank = partitionVec[elInfo->getNeighbour(i)->getIndex()];
	    bool ranksBoundary = (mpiRank > otherElementRank);
778
779

	    AtomicBoundary& bound = 
Thomas Witkowski's avatar
Thomas Witkowski committed
780
	      (ranksBoundary ?
781
782
	       myIntBoundary.getNewAtomicBoundary(otherElementRank) :
	       otherIntBoundary.getNewAtomicBoundary(otherElementRank));
Thomas Witkowski's avatar
Thomas Witkowski committed
783

784
	    bound.rankObject.el = element;
785
	    bound.rankObject.elIndex = element->getIndex();
786
787
	    bound.rankObject.subObjAtBoundary = EDGE;
	    bound.rankObject.ithObjAtBoundary = i;
788
789
790
791
	    // Do not set a pointer to the element, because if will be deleted from
	    // mesh after partitioning and the pointer would become invalid.
	    bound.neighbourObject.el = NULL;
	    bound.neighbourObject.elIndex = elInfo->getNeighbour(i)->getIndex();
792
	    bound.neighbourObject.subObjAtBoundary = EDGE;
793
794
795
796
797
	    bound.neighbourObject.ithObjAtBoundary = elInfo->getSideOfNeighbour(i);

	    // i == 2  =>  getSideOfNeighbour(i) == 2
	    TEST_EXIT_DBG(i != 2 || elInfo->getSideOfNeighbour(i) == 2)
	      ("Should not happen!\n");
798
799
800
801
802
803
 	  }
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }
Thomas Witkowski's avatar
Thomas Witkowski committed
804
805
806


    // === Once we have this information, we must care about their order. Eventually ===
Thomas Witkowski's avatar
Thomas Witkowski committed
807
808
    // === all the boundaries have to be in the same order on both ranks that share  ===
    // === the bounday.                                                              ===
Thomas Witkowski's avatar
Thomas Witkowski committed
809

Thomas Witkowski's avatar
Thomas Witkowski committed
810
    std::vector<int*> sendBuffers, recvBuffers;
Thomas Witkowski's avatar
Thomas Witkowski committed
811

Thomas Witkowski's avatar
Thomas Witkowski committed
812
813
814
815
    MPI::Request request[myIntBoundary.boundary.size() + 
			 otherIntBoundary.boundary.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
Thomas Witkowski committed
816
817
    for (RankToBoundMap::iterator rankIt = myIntBoundary.boundary.begin();
	 rankIt != myIntBoundary.boundary.end(); ++rankIt) {    
Thomas Witkowski's avatar
Thomas Witkowski committed
818
      int nSendInt = rankIt->second.size();
819
820
821
822
823
      int* buffer = new int[nSendInt * 2];
      for (int i = 0; i < nSendInt; i++) {
	buffer[i * 2] = (rankIt->second)[i].rankObject.elIndex;
	buffer[i * 2 + 1] = (rankIt->second)[i].rankObject.ithObjAtBoundary;
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
824
825
      sendBuffers.push_back(buffer);
      
Thomas Witkowski's avatar
Thomas Witkowski committed
826
      request[requestCounter++] =
827
	mpiComm.Isend(buffer, nSendInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
828
829
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
830
831
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
832
      int nRecvInt = rankIt->second.size();
833
      int *buffer = new int[nRecvInt * 2];
Thomas Witkowski's avatar
Thomas Witkowski committed
834
      recvBuffers.push_back(buffer);
Thomas Witkowski's avatar
Thomas Witkowski committed
835

Thomas Witkowski's avatar
Thomas Witkowski committed
836
      request[requestCounter++] = 
837
	mpiComm.Irecv(buffer, nRecvInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
838
839
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
840
841
842

    MPI::Request::Waitall(requestCounter, request);

Thomas Witkowski's avatar
Thomas Witkowski committed
843
844

    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
845
846
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
847
848
849
850
851
852
853

      // === We have received from rank "rankIt->first" the ordered list of element ===
      // === indices. We now have to sort the corresponding list in this rank to    ===
      // === get the same order.                                                    ===
      
      for (int j = 0; j < static_cast<int>(rankIt->second.size()); j++) {
	// If the expected object is not at place, search for it.
854
855
	if ((rankIt->second)[j].neighbourObject.elIndex != recvBuffers[i][j * 2] ||
	    (rankIt->second)[j].neighbourObject.ithObjAtBoundary != recvBuffers[i][j * 2 + 1]) {
Thomas Witkowski's avatar
Thomas Witkowski committed
856
857
	  int k = j + 1;
	  for (; k < static_cast<int>(rankIt->second.size()); k++)
858
859
	    if ((rankIt->second)[k].neighbourObject.elIndex == recvBuffers[i][j * 2] && 
		(rankIt->second)[k].neighbourObject.ithObjAtBoundary == recvBuffers[i][j * 2 + 1])
Thomas Witkowski's avatar
Thomas Witkowski committed
860
861
862
	      break;

	  // The element must always be found, because the list is just in another order.
Thomas Witkowski's avatar
Thomas Witkowski committed
863
	  TEST_EXIT_DBG(k < static_cast<int>(rankIt->second.size()))
864
	    ("Should never happen!\n");
Thomas Witkowski's avatar
Thomas Witkowski committed
865
866
867
868
869
870
871
872
873
874
875
876
877

	  // Swap the current with the found element.
	  AtomicBoundary tmpBound = (rankIt->second)[k];
	  (rankIt->second)[k] = (rankIt->second)[j];
	  (rankIt->second)[j] = tmpBound;	
	}
      }

      delete [] recvBuffers[i++];
    }

    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
878
879
880
  }


881
  void ParallelDomainBase::removeMacroElements()
882
883
884
885
886
887
888
889
890
891
892
893
  {
    std::vector<MacroElement*> macrosToRemove;
    for (std::deque<MacroElement*>::iterator it = mesh->firstMacroElement();
	 it != mesh->endOfMacroElements();
	 ++it) {
      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>
	((*it)->getElement()->getElementData(PARTITION_ED));
      if (partitionData->getPartitionStatus() != IN)
	macrosToRemove.push_back(*it);
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
894
    mesh->removeMacroElements(macrosToRemove, feSpace);
895
896
897
  }


Thomas Witkowski's avatar
Thomas Witkowski committed
898
  void ParallelDomainBase::createLocalGlobalNumbering(DofContainer& rankDOFs,
899
						      int& nRankDofs, 
Thomas Witkowski's avatar
Thomas Witkowski committed
900
						      int& nOverallDOFs)
901
  {
902
    FUNCNAME("ParallelDomainBase::createLocalGlobalNumbering()");
903
904

    // === Get rank information about DOFs. ===
905
906
907

    // Stores to each DOF pointer the set of ranks the DOF is part of.
    std::map<const DegreeOfFreedom*, std::set<int> > partitionDOFs;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
908
    DofContainer rankAllDofs;
909
    DofToRank boundaryDofs;
910

911
    createDOFMemberInfo(partitionDOFs, rankDOFs, rankAllDofs, boundaryDofs, vertexDof);
912

913
    nRankDofs = rankDOFs.size();
914
915
    nOverallDOFs = partitionDOFs.size();

916

917
    // === Get starting position for global rank dof ordering. ====
918

919
    rstart = 0;
920
921
    mpiComm.Scan(&nRankDofs, &rstart, 1, MPI_INT, MPI_SUM);
    rstart -= nRankDofs;
922

923
924
925
926
    
    // === Create for all dofs in rank new indices. The new index must start at ===
    // === index 0, must be continues and have the same order as the indices    ===
    // === had before.                                                          ===
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
927

Thomas Witkowski's avatar
Thomas Witkowski committed
928

929
930
931
    // Do not change the indices now, but create a new indexing a store it here.
    DofIndexMap rankDofsNewLocalIndex;
    isRankDof.clear();
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
932
933
934
    int i = 0;
    for (DofContainer::iterator dofIt = rankAllDofs.begin();
	 dofIt != rankAllDofs.end(); ++dofIt) {
935
936
937
      rankDofsNewLocalIndex[*dofIt] = i;
      // First, we set all dofs in ranks partition to be owend by the rank. Later,
      // the dofs in ranks partition that are owned by other rank are set to false.
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
938
      isRankDof[i] = true;
939
      i++;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
940
941
    }

942
943
944

    // === Create for all rank owned dofs a new global indexing.                ===

945
946
    // Stores for dofs in rank a new global index.
    DofIndexMap rankDofsNewGlobalIndex;
947
948
949
    // Stores for all rank owned dofs a continues local index.
    DofIndexMap rankOwnedDofsNewLocalIndex;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
950
951
952
    i = 0;
    for (DofContainer::iterator dofIt = rankDOFs.begin();
	 dofIt != rankDOFs.end(); ++dofIt) {
953
      rankDofsNewGlobalIndex[*dofIt] = i + rstart;
954
      rankOwnedDofsNewLocalIndex[*dofIt] = i;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
955
956
957
      i++;
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
958
 
959
960
961
962
963
    // === Create information which dof indices must be send and which must  ===
    // === be received.                                                      ===

    // Stores to each rank a map from DOF pointers (which are all owned by the rank
    // and lie on an interior boundary) to new global DOF indices.
964
    std::map<int, DofIndexMap > sendNewDofs;
965

966
967
968
969
    // Maps to each rank the number of new DOF indices this rank will receive from.
    // All these DOFs are at an interior boundary on this rank, but are owned by
    // another rank.
    std::map<int, int> recvNewDofs;
970

971
    for (DofToRank::iterator it = boundaryDofs.begin(); it != boundaryDofs.end(); ++it) {
972
973
974
975
976
977
978
979

      if (it->second == mpiRank) {
	// If the boundary dof is a rank dof, it must be send to other ranks.

	// Search for all ranks that have this dof too.
	for (std::set<int>::iterator itRanks = partitionDOFs[it->first].begin();
	     itRanks != partitionDOFs[it->first].end();
	     ++itRanks) {
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
980
	  if (*itRanks != mpiRank) {
981
	    TEST_EXIT_DBG(rankDofsNewGlobalIndex.count(it->first) == 1)
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
982
983
	      ("DOF Key not found!\n");

984
	    sendNewDofs[*itRanks][it->first] = rankDofsNewGlobalIndex[it->first];
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
985
	  }
986
987
	}
      } else {
Thomas Witkowski's avatar
Thomas Witkowski committed
988
989
	// If the boundary dof is not a rank dof, its new dof index (and later
	// also the dof values) must be received from another rank.
990
991
992
993
	if (recvNewDofs.find(it->second) == recvNewDofs.end()) 
	  recvNewDofs[it->second] = 1;
	else
	  recvNewDofs[it->second]++;
994
995
996
      }
    }

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
997

998
    // === Send and receive the dof indices at boundary. ===
999

Thomas Witkowski's avatar
Thomas Witkowski committed
1000
    std::vector<int*> sendBuffers(sendNewDofs.size()), recvBuffers(recvNewDofs.size());
1001
1002
1003

    sendDofs.clear();
    recvDofs.clear();
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019

#if 0
    StlMpi stlMpi(mpiComm);
    stlMpi.prepareCommunication(false);
    std::map<

    for (std::map<int, DofIndexMap >::iterator sendIt = sendNewDofs.begin();
	 sendIt != sendNewDofs.end(); ++sendIt, i++)
      stlMpi.send(it->second, it->first);

    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
	 recvIt != recvNewDofs.end(); ++recvIt, i++)
      stlMpi.recv(it->first, it->second * 2); 

    stlMpi.startCommunication();
#endif
1020
    
Thomas Witkowski's avatar
Thomas Witkowski committed
1021
1022
1023
    MPI::Request request[sendNewDofs.size() + recvNewDofs.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1024
    i = 0;
1025
1026
    for (std::map<int, DofIndexMap >::iterator sendIt = sendNewDofs.begin();
	 sendIt != sendNewDofs.end(); ++sendIt, i++) {
1027
1028
      int nSendDofs = sendIt->second.size() * 2;
      sendBuffers[i] = new int[nSendDofs];
1029

1030
      int c = 0;
1031
1032
      for (DofIndexMap::iterator dofIt = sendIt->second.begin();
	   dofIt != sendIt->second.end(); ++dofIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
1033
	sendBuffers[i][c++] = *(dofIt->first);
1034
	sendBuffers[i][c++] = dofIt->second;
1035

1036
	sendDofs[sendIt->first].push_back(dofIt->first);
1037
1038
      }

Thomas Witkowski's avatar
Thomas Witkowski committed
1039
1040
      request[requestCounter++] =
	mpiComm.Isend(sendBuffers[i], nSendDofs, MPI_INT, sendIt->first, 0);
1041
1042
1043
    }

    i = 0;
1044
    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
1045
	 recvIt != recvNewDofs.end(); ++recvIt, i++) {
1046
1047
      int nRecvDOFs = recvIt->second * 2;
      recvBuffers[i] = new int[nRecvDOFs];
1048

Thomas Witkowski's avatar
Thomas Witkowski committed
1049
1050
      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_INT, recvIt->first, 0);
1051
1052
1053
    }


Thomas Witkowski's avatar
Thomas Witkowski committed
1054
    MPI::Request::Waitall(requestCounter, request);
1055
1056

    
1057
    // === Delete send buffers. ===
1058

Thomas Witkowski's avatar
Thomas Witkowski committed
1059
1060
    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
      delete [] sendBuffers[j];
1061
1062


1063
    // === Change dof indices at boundary from other ranks. ===
1064

Thomas Witkowski's avatar
Thomas Witkowski committed
1065
1066
    // Within this small data structure we track which dof index was already changed.
    // This is used to avoid the following situation: Assume, there are two dof indices
1067
    // a and b in boundaryDofs. Then we have to change index a to b and b to c. When
Thomas Witkowski's avatar
Thomas Witkowski committed
1068
1069
1070
1071
1072
    // the second rule applies, we have to avoid that not the first b, resulted from
    // changing a to b, is set to c, but the second one. Therefore, after the first
    // rule was applied, the dof pointer is set to false in this data structure and 
    // is not allowed to be changed anymore.
    std::map<const DegreeOfFreedom*, bool> dofChanged;
1073
    for (DofToRank::iterator dofIt = boundaryDofs.begin(); dofIt != boundaryDofs.end();
Thomas Witkowski's avatar
Thomas Witkowski committed
1074
	 ++dofIt)
Thomas Witkowski's avatar
Thomas Witkowski committed
1075
1076
      dofChanged[dofIt->first] = false;

1077
    i = 0;
1078
    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
1079
1080
1081
	 recvIt != recvNewDofs.end();
	 ++recvIt, i++) {

1082
      for (int j = 0; j < recvIt->second; j++) {
1083

1084
1085
	DegreeOfFreedom oldDof = recvBuffers[i][j * 2];
	DegreeOfFreedom newGlobalDof = recvBuffers[i][j * 2 + 1];
1086

1087
	bool found = false;
1088

Thomas Witkowski's avatar
Thomas Witkowski committed
1089
1090
	// Iterate over all boundary dofs to find the dof, which index we have to change.