ParallelDomainBase.cc 54.4 KB
Newer Older
Thomas Witkowski's avatar
Thomas Witkowski committed
1
2
#include <algorithm>

3
#include "ParallelDomainBase.h"
4
5
6
7
8
9
10
#include "ParMetisPartitioner.h"
#include "Mesh.h"
#include "Traverse.h"
#include "ElInfo.h"
#include "Element.h"
#include "MacroElement.h"
#include "PartitionElementData.h"
11
12
#include "DOFMatrix.h"
#include "DOFVector.h"
13
#include "SystemVector.h"
14
#include "VtkWriter.h"
15
#include "ElementDofIterator.h"
16
17
#include "ProblemStatBase.h"
#include "StandardProblemIteration.h"
18
#include "ElementFileWriter.h"
19
20

#include "petscksp.h"
21
22
23

namespace AMDiS {

24
  PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *)
25
  {    
26
    if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
27
      std::cout << "  Petsc-Iteration " << iter << ": " << rnorm << std::endl;
28
29
30
31

    return 0;
  }

32
33
34
35
36
  inline bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2)
  {
    return (*dof1 < *dof2);
  }

Thomas Witkowski's avatar
Thomas Witkowski committed
37
  ParallelDomainBase::ParallelDomainBase(std::string name,
38
39
40
41
					 ProblemIterationInterface *iIF,
					 ProblemTimeInterface *tIF,
					 FiniteElemSpace *fe,
					 RefinementManager *refineManager)
42
43
    : iterationIF(iIF),
      timeIF(tIF),
44
45
      feSpace(fe),
      mesh(fe->getMesh()),
46
      refinementManager(refineManager),
47
      initialPartitionMesh(true),
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
48
      nRankDOFs(0),
49
50
      rstart(0),
      nComponents(1)
51
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
52
53
54
55
56
57
58
    FUNCNAME("ParallelDomainBase::ParalleDomainBase()");

    TEST_EXIT(mesh->getNumberOfDOFAdmin() == 1)
      ("Only meshes with one DOFAdmin are supported!\n");
    TEST_EXIT(mesh->getDOFAdmin(0).getNumberOfPreDOFs(0) == 0)
      ("Wrong pre dof number for DOFAdmin!\n");

59
60
61
62
63
64
    mpiRank = MPI::COMM_WORLD.Get_rank();
    mpiSize = MPI::COMM_WORLD.Get_size();
    mpiComm = MPI::COMM_WORLD;
    partitioner = new ParMetisPartitioner(mesh, &mpiComm);
  }

65
  void ParallelDomainBase::initParallelization(AdaptInfo *adaptInfo)
66
67
68
69
  {
    if (mpiSize <= 1)
      return;

70
71
72
73
74
    // Test, if the mesh is the macro mesh only! Paritioning of the mesh is supported
    // only for macro meshes, so it will not work yet if the mesh is already refined
    // in some way.
    testForMacroMesh();

75
76
77
78
79
80
81
    // create an initial partitioning of the mesh
    partitioner->createPartitionData();
    // set the element weights, which are 1 at the very first begin
    setElemWeights(adaptInfo);
    // and now partition the mesh
    partitionMesh(adaptInfo);   

Thomas Witkowski's avatar
Thomas Witkowski committed
82
83
84
85
#if (DEBUG != 0)
    ElementIdxToDofs elMap;
    DbgCreateElementMap(elMap);
#endif
86

87
    // === Create new global and local DOF numbering. ===
88

89
90
91
    // Set of all DOFs of the rank.
    std::vector<const DegreeOfFreedom*> rankDOFs;
    // Number of DOFs in ranks partition that are owned by the rank.
Thomas Witkowski's avatar
Thomas Witkowski committed
92
    nRankDOFs = 0;
93
    // Number of all DOFs in the macro mesh.
94
    int nOverallDOFs = 0;
95

96
    createLocalGlobalNumbering(rankDOFs, nRankDOFs, nOverallDOFs);
97

Thomas Witkowski's avatar
Thomas Witkowski committed
98
99
    // === Create interior boundary information ===

100
    createInteriorBoundaryInfo(rankDOFs);
Thomas Witkowski's avatar
Thomas Witkowski committed
101

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
102
103
104
105
    // === Remove all macro elements that are not part of the rank partition. ===

    removeMacroElements();

Thomas Witkowski's avatar
Thomas Witkowski committed
106
#if (DEBUG != 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
107
    DbgTestElementMap(elMap);
Thomas Witkowski's avatar
Thomas Witkowski committed
108
109
    DbgTestInteriorBoundary();
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
110

111

112
    // === Reset all DOFAdmins of the mesh. ===
113

114
    updateDofAdmins();
115

116

117
    // === Global refinements. ===
Thomas Witkowski's avatar
Thomas Witkowski committed
118

Thomas Witkowski's avatar
Thomas Witkowski committed
119
120
121
    int globalRefinement = 0;
    GET_PARAMETER(0, "testgr", "%d", &globalRefinement);

Thomas Witkowski's avatar
Thomas Witkowski committed
122
123
    if (globalRefinement > 0) {
      refinementManager->globalRefine(mesh, globalRefinement);
124

125
126
127
128
129
#if (DEBUG != 0)
      elMap.clear();
      DbgCreateElementMap(elMap);
#endif

Thomas Witkowski's avatar
Thomas Witkowski committed
130
      updateLocalGlobalNumbering(nRankDOFs, nOverallDOFs);
131
132
133
134
135
136

      updateDofAdmins();

#if (DEBUG != 0)
      DbgTestElementMap(elMap);
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
137
138
    }

139

Thomas Witkowski's avatar
Thomas Witkowski committed
140
#if (DEBUG != 0)
141
    DbgTestCommonDofs(true);
Thomas Witkowski's avatar
Thomas Witkowski committed
142
#endif
143

144

145
    // === Create petsc matrix. ===
146

147
148
149
    nRankRows = nRankDOFs * nComponents;
    nOverallRows = nOverallDOFs * nComponents;
    
150
151
152
153
154
155
156
    VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
    VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
    VecSetType(petscRhsVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscSolVec);
    VecSetSizes(petscSolVec, nRankRows, nOverallRows);
    VecSetType(petscSolVec, VECMPI);
157
158
159
160

    VecCreate(PETSC_COMM_WORLD, &petscTmpVec);
    VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
    VecSetType(petscTmpVec, VECMPI);
161
162
  }

163
164
165

  void ParallelDomainBase::exitParallelization(AdaptInfo *adaptInfo)
  {
166
167
    VecDestroy(petscRhsVec);
    VecDestroy(petscSolVec);
168
    VecDestroy(petscTmpVec);
169
  }
170

171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
  
  void ParallelDomainBase::updateDofAdmins()
  {
    int nAdmins = mesh->getNumberOfDOFAdmin();
    for (int i = 0; i < nAdmins; i++) {
      DOFAdmin& admin = const_cast<DOFAdmin&>(mesh->getDOFAdmin(i));
      
      for (int j = 0; j < admin.getSize(); j++)
	admin.setDOFFree(j, true);
      for (int j = 0; j < static_cast<int>(mapLocalGlobalDOFs.size()); j++)
 	admin.setDOFFree(j, false);

      admin.setUsedSize(mapLocalGlobalDOFs.size());
      admin.setUsedCount(mapLocalGlobalDOFs.size());
      admin.setFirstHole(mapLocalGlobalDOFs.size());
    }
  }

189

190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
  void ParallelDomainBase::testForMacroMesh()
  {
    FUNCNAME("ParallelDomainBase::testForMacroMesh()");

    int nMacroElements = 0;

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
    while (elInfo) {
      TEST_EXIT(elInfo->getLevel() == 0)
	("Mesh is already refined! This does nor work with parallelization!\n");
      
      nMacroElements++;

      elInfo = stack.traverseNext(elInfo);
    }

    TEST_EXIT(nMacroElements >= mpiSize)
      ("The mesh has less macro elements than number of mpi processes!\n");
  }

211
212
  void ParallelDomainBase::setDofMatrix(DOFMatrix* mat, int dispMult, 
					int dispAddRow, int dispAddCol)
213
  {
214
215
216
217
    FUNCNAME("ParallelDomainBase::setDofMatrix()");

    TEST_EXIT(mat)("No DOFMatrix!\n");

218
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
219
220
221
222
223
224
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    traits::col<Matrix>::type col(mat->getBaseMatrix());
    traits::const_value<Matrix>::type value(mat->getBaseMatrix());

225
    typedef traits::range_generator<row, Matrix>::type cursor_type;
226
227
    typedef traits::range_generator<nz, cursor_type>::type icursor_type;

228
229
230
231
232
233
234
235
236
237
238
239
240
    std::vector<int> cols;
    std::vector<double> values;
    cols.reserve(300);
    values.reserve(300);

    for (cursor_type cursor = begin<row>(mat->getBaseMatrix()), 
	   cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) {

      cols.clear();
      values.clear();

      int r = mapLocalGlobalDOFs[*cursor] * dispMult + dispAddRow;

241
242
      for (icursor_type icursor = begin<nz>(cursor), 
	     icend = end<nz>(cursor); icursor != icend; ++icursor)
243
	if (value(*icursor) != 0.0) {
244
245
 	  cols.push_back(mapLocalGlobalDOFs[col(*icursor)] * dispMult + dispAddCol);
 	  values.push_back(value(*icursor));
246
	}
247
248
249
250

      MatSetValues(petscMatrix, 1, &r, cols.size(), &(cols[0]), &(values[0]), ADD_VALUES);
    }

251
  }
252

253

254
255
256
  void ParallelDomainBase::setDofVector(DOFVector<double>* vec, 
					int dispMult, int dispAdd)
  {
257
258
    DOFVector<double>::Iterator dofIt(vec, USED_DOFS);
    for (dofIt.reset(); !dofIt.end(); ++dofIt) {
259
      int index = mapLocalGlobalDOFs[dofIt.getDOFIndex()] * dispMult + dispAdd;
260
261
262
      double value = *dofIt;

      VecSetValues(petscRhsVec, 1, &index, &value, ADD_VALUES);
263
    }    
264
265
  }

266

267
268
  void ParallelDomainBase::fillPetscMatrix(DOFMatrix *mat, DOFVector<double> *vec)
  {
269
270
271
272
    MatCreate(PETSC_COMM_WORLD, &petscMatrix);
    MatSetSizes(petscMatrix, nRankRows, nRankRows, nOverallRows, nOverallRows);
    MatSetType(petscMatrix, MATAIJ);

273
274
275
276
277
    setDofMatrix(mat);

    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

Thomas Witkowski's avatar
Thomas Witkowski committed
278
279
280
281
    setDofVector(vec);

    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);
282
283
284
285
286
  }

  
  void ParallelDomainBase::fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec)
  {
287
288
289
290
    FUNCNAME("ParallelDomainBase::fillPetscMatrix()");

    clock_t first = clock();

291
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
292
293
294
295
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    int d_nnz[nRankRows];
296
297
    int o_nnz[nRankRows];

298
299
    std::map<int, std::vector< std::pair<int, int> > > sendMatrixEntry;

300
    for (int i = 0; i < nRankRows; i++) {
301
      d_nnz[i] = 0;
302
303
      o_nnz[i] = 0;
    }
304

305
306
    for (int i = 0; i < nComponents; i++) {
      for (int j = 0; j < nComponents; j++) {
307
308
309
310
311
312
 	if ((*mat)[i][j]) {
	  Matrix bmat = (*mat)[i][j]->getBaseMatrix();

	  traits::col<Matrix>::type col(bmat);
	  traits::const_value<Matrix>::type value(bmat);
	  
313
	  typedef traits::range_generator<row, Matrix>::type cursor_type;
314
315
	  typedef traits::range_generator<nz, cursor_type>::type icursor_type;
	  
316
317
318
319
320
	  for (cursor_type cursor = begin<row>(bmat), 
		 cend = end<row>(bmat); cursor != cend; ++cursor) {

	    int r = mapLocalGlobalDOFs[*cursor] * nComponents + i;

321
322
323
324
	    if (isRankDof[*cursor]) {
	      r -= rstart * nComponents;
	
	      TEST_EXIT_DBG(r >= 0 && r < nRankRows)("Should not happen!\n");
325
	      
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
		  
		  if (c >= rstart * nComponents && 
		      c < rstart * nComponents + nRankRows)
		    d_nnz[r]++;
		  else
		    o_nnz[r]++;		  
		}    
	      }
	    } else {
	      int sendToRank = -1;

	      for (RankToDofContainer::iterator it = recvDofs.begin();
		   it != recvDofs.end(); ++it) {
		for (DofContainer::iterator dofIt = it->second.begin();
		     dofIt != it->second.end(); ++dofIt) {
		  if (**dofIt == *cursor) {
		    sendToRank = it->first;
		    break;
		  }
		}

		if (sendToRank != -1)
		  break;
	      }

	      TEST_EXIT_DBG(sendToRank != -1)("Should not happen!\n");

	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
		  
		  sendMatrixEntry[sendToRank].push_back(std::make_pair(r, c));
		}
	      }
	    }
366
367
	  }
	}
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
      }
    }

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;

    std::vector<int*> sendBuffers;
    sendBuffers.reserve(recvDofs.size());

    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();
      request[requestCounter++] = mpiComm.Isend(&nSend, 1, MPI_INT, it->first, 0);
      
      if (nSend > 0) {
	int *sendbuf = new int[nSend * 2];
	for (int i = 0; i < nSend; i++) {
	  sendbuf[i * 2] = sendMatrixEntry[it->first][i].first;
	  sendbuf[i * 2 + 1] = sendMatrixEntry[it->first][i].second;
	}
	sendBuffers.push_back(sendbuf);
      } else {
	sendBuffers.push_back(NULL);
      }
    }

    std::vector<int> recvSize;
    recvSize.reserve(sendDofs.size());
    
    int i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it)
      request[requestCounter++] = 
	mpiComm.Irecv(&(recvSize[i++]), 1, MPI_INT, it->first, 0);

    MPI::Request::Waitall(requestCounter, request);

    requestCounter = 0;

    i = 0;
    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();

      if (nSend > 0)
	request[requestCounter++] = 
	  mpiComm.Isend(sendBuffers[i], nSend * 2, MPI_INT, it->first, 0);

      i++;
    }

    std::vector<int*> recvBuffers;
    recvBuffers.reserve(sendDofs.size());
    
    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	int *recvbuf = new int[recvSize[i] * 2];
	recvBuffers.push_back(recvbuf);

	request[requestCounter++] =
	  mpiComm.Irecv(recvbuf, recvSize[i] * 2, MPI_INT, it->first, 0);
      } else {
	recvBuffers.push_back(NULL);
      }

      i++;
    }

    MPI::Request::Waitall(requestCounter, request);
			
    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
      if (sendBuffers[j])
	delete [] sendBuffers[j];

    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	for (int j = 0; j < recvSize[i]; j++) {
	  int r = recvBuffers[i][j * 2];
	  int c = recvBuffers[i][j * 2 + 1];
	  
	  r -= rstart * nComponents;
	  
	  TEST_EXIT_DBG(r >= 0 && r < nRankRows)("Should not happen!\n");
	  
	  if (c < rstart * nComponents ||
	      c >= rstart * nComponents + nRankRows)
	    o_nnz[r]++;
	}

	delete [] recvBuffers[i];
      }
    }       	
464

465
    INFO(info, 8)("Fill petsc matrix 1 needed %.5f seconds\n", TIME_USED(first, clock()));
466
467

    MatCreateMPIAIJ(PETSC_COMM_WORLD, nRankRows, nRankRows, nOverallRows, nOverallRows,
468
469
		    0, d_nnz, 0, o_nnz, &petscMatrix);

470
471
    INFO(info, 8)("Fill petsc matrix 2 needed %.5f seconds\n", TIME_USED(first, clock()));

472
473
474
475
476
477
#if (DEBUG != 0)
    int a, b;
    MatGetOwnershipRange(petscMatrix, &a, &b);
    TEST_EXIT(a == rstart * nComponents)("Wrong matrix ownership range!\n");
    TEST_EXIT(b == rstart * nComponents + nRankRows)("Wrong matrix ownership range!\n");
#endif
478

479
480
481
482
483
484
485
486
487
    using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    for (int i = 0; i < nComponents; i++)
      for (int j = 0; j < nComponents; j++)
	if ((*mat)[i][j]) 
	  setDofMatrix((*mat)[i][j], nComponents, i, j);

488
489
    INFO(info, 8)("Fill petsc matrix 3 needed %.5f seconds\n", TIME_USED(first, clock()));

490
491
492
    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

493
494
    INFO(info, 8)("Fill petsc matrix 4 needed %.5f seconds\n", TIME_USED(first, clock()));

495
496
    for (int i = 0; i < nComponents; i++)
      setDofVector(vec->getDOFVector(i), nComponents, i);
497

498
499
500
    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);

501
    INFO(info, 8)("Fill petsc matrix needed %.5f seconds\n", TIME_USED(first, clock()));
502
503
504
  }


505
  void ParallelDomainBase::solvePetscMatrix(DOFVector<double> *vec)
506
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
507
508
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

509
510
511
512
    KSP ksp;
    PC pc;

    KSPCreate(PETSC_COMM_WORLD, &ksp);
513
    KSPSetOperators(ksp, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
514
    KSPGetPC(ksp, &pc);
515
516
    //PCSetType(pc, PCJACOBI);
    PCSetType(pc, PCILU);
517
    KSPSetTolerances(ksp, 1.e-7, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
518
519
    KSPSetType(ksp, KSPBCGS);
    //KSPSetType(ksp, KSPCG);
520
    KSPMonitorSet(ksp, myKSPMonitor, PETSC_NULL, 0);
521
522
523
524
525
    KSPSolve(ksp, petscRhsVec, petscSolVec);

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

Thomas Witkowski's avatar
Thomas Witkowski committed
526
    for (int i = 0; i < nRankDOFs; i++)
527
      (*vec)[mapLocalToDofIndex[i]] = vecPointer[i];
528
529
530

    VecRestoreArray(petscSolVec, &vecPointer);

531
532
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());
Thomas Witkowski's avatar
Thomas Witkowski committed
533
534
535

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
536
537
    
    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
538
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
539
	 sendIt != sendDofs.end(); ++sendIt, i++) {
540
541
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs];
542

543
      for (int j = 0; j < nSendDOFs; j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
544
	sendBuffers[i][j] = (*vec)[*((sendIt->second)[j])];
545

Thomas Witkowski's avatar
Thomas Witkowski committed
546
547
      request[requestCounter++] =
	mpiComm.Isend(sendBuffers[i], nSendDOFs, MPI_DOUBLE, sendIt->first, 0);
548
549
550
    }

    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
551
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
552
	 recvIt != recvDofs.end(); ++recvIt, i++) {
553
554
      int nRecvDOFs = recvIt->second.size();
      recvBuffers[i] = new double[nRecvDOFs];
555

Thomas Witkowski's avatar
Thomas Witkowski committed
556
557
      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
558
559
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
560
561
562

    MPI::Request::Waitall(requestCounter, request);

563
    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
564
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
565
	 recvIt != recvDofs.end(); ++recvIt, i++) {
Thomas Witkowski's avatar
Thomas Witkowski committed
566
      for (int j = 0; j < static_cast<int>(recvIt->second.size()); j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
567
	(*vec)[*(recvIt->second)[j]] = recvBuffers[i][j];
568
569
570
571

      delete [] recvBuffers[i];
    }
    
572
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
573
      delete [] sendBuffers[i];
574
575

    MatDestroy(petscMatrix);
576
577
  }

578

579
580
581
582
  void ParallelDomainBase::solvePetscMatrix(SystemVector *vec)
  {
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

583
    KSP solver;
584

585
586
587
588
589
590
    KSPCreate(PETSC_COMM_WORLD, &solver);
    KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); 

    KSPSetTolerances(solver, 1.e-8, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
    KSPSetType(solver, KSPBCGS);
    KSPMonitorSet(solver, myKSPMonitor, PETSC_NULL, 0);
591
    KSPSetFromOptions(solver);
592
593

    KSPSolve(solver, petscRhsVec, petscSolVec);
594
595
596
597
598
599
600
601
602
603
604
605

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

    for (int i = 0; i < nComponents; i++) {
      DOFVector<double> *dofvec = vec->getDOFVector(i);
      for (int j = 0; j < nRankDOFs; j++)
	(*dofvec)[mapLocalToDofIndex[j]] = vecPointer[j * nComponents + i];
    }

    VecRestoreArray(petscSolVec, &vecPointer);

606

607
608
609
610
611
612
613
614
615
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
    
    int i = 0;
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
	 sendIt != sendDofs.end(); ++sendIt, i++) {
616
617
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs * nComponents];
618
619
620
621
622
623
624
625

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
	DOFVector<double> *dofvec = vec->getDOFVector(j);
	for (int k = 0; k < nSendDOFs; k++)
	  sendBuffers[i][counter++] = (*dofvec)[*((sendIt->second)[k])];
      }

626
627
      request[requestCounter++] = mpiComm.Isend(sendBuffers[i], nSendDOFs * nComponents,
						MPI_DOUBLE, sendIt->first, 0);
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
    }

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size() * nComponents;
      recvBuffers[i] = new double[nRecvDOFs];

      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
    }


    MPI::Request::Waitall(requestCounter, request);

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size();

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
	DOFVector<double> *dofvec = vec->getDOFVector(j);
651
652
 	for (int k = 0; k < nRecvDOFs; k++)
 	  (*dofvec)[*(recvIt->second)[k]] = recvBuffers[i][counter++];
653
654
655
656
657
658
659
      }

      delete [] recvBuffers[i];
    }
    
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
660

661
662
663
664
665
666
667
668
669
670
    int iterations = 0;
    KSPGetIterationNumber(solver, &iterations);
    MSG("  Number of iterations: %d\n", iterations);
    
    double norm = 0.0;
    MatMult(petscMatrix, petscSolVec, petscTmpVec);
    VecAXPY(petscTmpVec, -1.0, petscRhsVec);
    VecNorm(petscTmpVec, NORM_2, &norm);
    MSG("  Residual norm: %e\n", norm);

671
    MatDestroy(petscMatrix);
672
    KSPDestroy(solver);
673
674
675
  }


676
  double ParallelDomainBase::setElemWeights(AdaptInfo *adaptInfo) 
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
  {
    double localWeightSum = 0.0;
    int elNum = -1;

    elemWeights.clear();

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1,
					 Mesh::CALL_EVERY_EL_PREORDER);
    while (elInfo) {
      Element *element = elInfo->getElement();

      // get partition data
      PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
	(element->getElementData(PARTITION_ED));

      if (partitionData && partitionData->getPartitionStatus() == IN) {
	if (partitionData->getLevel() == 0) {
	  elNum = element->getIndex();
	}
Thomas Witkowski's avatar
Thomas Witkowski committed
697
	TEST_EXIT_DBG(elNum != -1)("invalid element number\n");
698
699
700
701
702
703
704
705
706
707
708
709
	if (element->isLeaf()) {
	  elemWeights[elNum] += 1.0;
	  localWeightSum += 1.0;
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }

    return localWeightSum;
  }

710

711
  void ParallelDomainBase::partitionMesh(AdaptInfo *adaptInfo)
712
713
714
715
716
717
718
719
720
721
722
723
724
  {
    if (initialPartitionMesh) {
      initialPartitionMesh = false;
      partitioner->fillCoarsePartitionVec(&oldPartitionVec);
      partitioner->partition(&elemWeights, INITIAL);
    } else {
      oldPartitionVec = partitionVec;
      partitioner->partition(&elemWeights, ADAPTIVE_REPART, 100.0 /*0.000001*/);
    }    

    partitioner->fillCoarsePartitionVec(&partitionVec);
  }

725

726
  void ParallelDomainBase::createInteriorBoundaryInfo(DofContainer& rankDOFs)
727
  {
728
    FUNCNAME("ParallelDomainBase::createInteriorBoundaryInfo()");
Thomas Witkowski's avatar
Thomas Witkowski committed
729
730
731

    // === First, create all the information about the interior boundaries. ===

732
    TraverseStack stack;
733
734
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, 
					 Mesh::CALL_LEAF_EL | Mesh::FILL_NEIGH);
735
736
737
738
739
740
741
742
743
744
745
746
    while (elInfo) {
      Element *element = elInfo->getElement();

      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>(element->getElementData(PARTITION_ED));   
      if (partitionData->getPartitionStatus() == IN) {
	for (int i = 0; i < 3; i++) {
	  if (!elInfo->getNeighbour(i))
	    continue;

	  PartitionElementData *neighbourPartitionData =
	    dynamic_cast<PartitionElementData*>(elInfo->getNeighbour(i)->getElementData(PARTITION_ED));
747

748
 	  if (neighbourPartitionData->getPartitionStatus() == OUT) {
749
750
	    // We have found an element that is at an interior boundary. 

751
752
	    // === Find out, if the boundary part of the element corresponds to the  ===
	    // === rank or to the rank "on the other side" of the interoir boundary. ===
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777

	    const DegreeOfFreedom* boundDOF1 = NULL;
	    const DegreeOfFreedom* boundDOF2 = NULL;
	    
	    switch (i) {
	    case 0:
	      boundDOF1 = element->getDOF(1);
	      boundDOF2 = element->getDOF(2);
	      break;
	    case 1:
	      boundDOF1 = element->getDOF(0);
	      boundDOF2 = element->getDOF(2);
	      break;
	    case 2:
	      boundDOF1 = element->getDOF(0);
	      boundDOF2 = element->getDOF(1);
	      break;
	    default:
	      ERROR_EXIT("Should never happen!\n");
	    }

	    bool isRankDOF1 = (find(rankDOFs.begin(), rankDOFs.end(), boundDOF1) != rankDOFs.end());
	    bool isRankDOF2 = (find(rankDOFs.begin(), rankDOFs.end(), boundDOF2) != rankDOFs.end());
	    bool ranksBoundary = isRankDOF1 || isRankDOF2;

778
	    // === And add the part of the interior boundary. ===
779
780

	    AtomicBoundary& bound = 
Thomas Witkowski's avatar
Thomas Witkowski committed
781
782
783
784
	      (ranksBoundary ?
	       myIntBoundary.getNewAtomicBoundary(partitionVec[elInfo->getNeighbour(i)->getIndex()]) :
	       otherIntBoundary.getNewAtomicBoundary(partitionVec[elInfo->getNeighbour(i)->getIndex()]));

785
	    bound.rankObject.el = element;
786
	    bound.rankObject.elIndex = element->getIndex();
787
788
	    bound.rankObject.subObjAtBoundary = EDGE;
	    bound.rankObject.ithObjAtBoundary = i;
789
790
791
792
	    // Do not set a pointer to the element, because if will be deleted from
	    // mesh after partitioning and the pointer would become invalid.
	    bound.neighbourObject.el = NULL;
	    bound.neighbourObject.elIndex = elInfo->getNeighbour(i)->getIndex();
793
	    bound.neighbourObject.subObjAtBoundary = EDGE;
794
795
796
797
798
	    bound.neighbourObject.ithObjAtBoundary = elInfo->getSideOfNeighbour(i);

	    // i == 2  =>  getSideOfNeighbour(i) == 2
	    TEST_EXIT_DBG(i != 2 || elInfo->getSideOfNeighbour(i) == 2)
	      ("Should not happen!\n");
799
800
801
802
803
804
 	  }
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }
Thomas Witkowski's avatar
Thomas Witkowski committed
805
806
807


    // === Once we have this information, we must care about their order. Eventually ===
Thomas Witkowski's avatar
Thomas Witkowski committed
808
809
    // === all the boundaries have to be in the same order on both ranks that share  ===
    // === the bounday.                                                              ===
Thomas Witkowski's avatar
Thomas Witkowski committed
810

Thomas Witkowski's avatar
Thomas Witkowski committed
811
    std::vector<int*> sendBuffers, recvBuffers;
Thomas Witkowski's avatar
Thomas Witkowski committed
812

Thomas Witkowski's avatar
Thomas Witkowski committed
813
814
815
816
    MPI::Request request[myIntBoundary.boundary.size() + 
			 otherIntBoundary.boundary.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
Thomas Witkowski committed
817
818
    for (RankToBoundMap::iterator rankIt = myIntBoundary.boundary.begin();
	 rankIt != myIntBoundary.boundary.end(); ++rankIt) {    
Thomas Witkowski's avatar
Thomas Witkowski committed
819
      int nSendInt = rankIt->second.size();
820
821
822
823
824
      int* buffer = new int[nSendInt * 2];
      for (int i = 0; i < nSendInt; i++) {
	buffer[i * 2] = (rankIt->second)[i].rankObject.elIndex;
	buffer[i * 2 + 1] = (rankIt->second)[i].rankObject.ithObjAtBoundary;
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
825
826
      sendBuffers.push_back(buffer);
      
Thomas Witkowski's avatar
Thomas Witkowski committed
827
      request[requestCounter++] =
828
	mpiComm.Isend(buffer, nSendInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
829
830
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
831
832
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
833
      int nRecvInt = rankIt->second.size();
834
      int *buffer = new int[nRecvInt * 2];
Thomas Witkowski's avatar
Thomas Witkowski committed
835
      recvBuffers.push_back(buffer);
Thomas Witkowski's avatar
Thomas Witkowski committed
836

Thomas Witkowski's avatar
Thomas Witkowski committed
837
      request[requestCounter++] = 
838
	mpiComm.Irecv(buffer, nRecvInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
839
840
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
841
842
843

    MPI::Request::Waitall(requestCounter, request);

Thomas Witkowski's avatar
Thomas Witkowski committed
844
845

    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
846
847
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
848
849
850
851
852
853
854

      // === We have received from rank "rankIt->first" the ordered list of element ===
      // === indices. We now have to sort the corresponding list in this rank to    ===
      // === get the same order.                                                    ===
      
      for (int j = 0; j < static_cast<int>(rankIt->second.size()); j++) {
	// If the expected object is not at place, search for it.
855
856
	if ((rankIt->second)[j].neighbourObject.elIndex != recvBuffers[i][j * 2] ||
	    (rankIt->second)[j].neighbourObject.ithObjAtBoundary != recvBuffers[i][j * 2 + 1]) {
Thomas Witkowski's avatar
Thomas Witkowski committed
857
858
	  int k = j + 1;
	  for (; k < static_cast<int>(rankIt->second.size()); k++)
859
860
	    if ((rankIt->second)[k].neighbourObject.elIndex == recvBuffers[i][j * 2] && 
		(rankIt->second)[k].neighbourObject.ithObjAtBoundary == recvBuffers[i][j * 2 + 1])
Thomas Witkowski's avatar
Thomas Witkowski committed
861
862
863
	      break;

	  // The element must always be found, because the list is just in another order.
Thomas Witkowski's avatar
Thomas Witkowski committed
864
	  TEST_EXIT_DBG(k < static_cast<int>(rankIt->second.size()))
865
	    ("Should never happen!\n");
Thomas Witkowski's avatar
Thomas Witkowski committed
866
867
868
869
870
871
872
873
874
875
876
877
878

	  // Swap the current with the found element.
	  AtomicBoundary tmpBound = (rankIt->second)[k];
	  (rankIt->second)[k] = (rankIt->second)[j];
	  (rankIt->second)[j] = tmpBound;	
	}
      }

      delete [] recvBuffers[i++];
    }

    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
879
880
881
  }


882
  void ParallelDomainBase::removeMacroElements()
883
884
885
886
887
888
889
890
891
892
893
894
  {
    std::vector<MacroElement*> macrosToRemove;
    for (std::deque<MacroElement*>::iterator it = mesh->firstMacroElement();
	 it != mesh->endOfMacroElements();
	 ++it) {
      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>
	((*it)->getElement()->getElementData(PARTITION_ED));
      if (partitionData->getPartitionStatus() != IN)
	macrosToRemove.push_back(*it);
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
895
    mesh->removeMacroElements(macrosToRemove, feSpace);
896
897
898
  }


Thomas Witkowski's avatar
Thomas Witkowski committed
899
900
901
  void ParallelDomainBase::createLocalGlobalNumbering(DofContainer& rankDOFs,
						      int& nRankDOFs, 
						      int& nOverallDOFs)
902
  {
903
    FUNCNAME("ParallelDomainBase::createLocalGlobalNumbering()");
904
905

    // === Get rank information about DOFs. ===
906
907
908

    // Stores to each DOF pointer the set of ranks the DOF is part of.
    std::map<const DegreeOfFreedom*, std::set<int> > partitionDOFs;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
909
    DofContainer rankAllDofs;
910
    DofToRank boundaryDofs;
911

912
    createDOFMemberInfo(partitionDOFs, rankDOFs, rankAllDofs, boundaryDofs);
913
914
915
916

    nRankDOFs = rankDOFs.size();
    nOverallDOFs = partitionDOFs.size();

917

918
    // === Get starting position for global rank dof ordering. ====
919

920
    rstart = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
921
    mpiComm.Scan(&nRankDOFs, &rstart, 1, MPI_INT, MPI_SUM);
922
923
    rstart -= nRankDOFs;

924
925
926
927
    
    // === Create for all dofs in rank new indices. The new index must start at ===
    // === index 0, must be continues and have the same order as the indices    ===
    // === had before.                                                          ===
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
928

Thomas Witkowski's avatar
Thomas Witkowski committed
929

930
931
932
    // Do not change the indices now, but create a new indexing a store it here.
    DofIndexMap rankDofsNewLocalIndex;
    isRankDof.clear();
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
933
934
935
    int i = 0;
    for (DofContainer::iterator dofIt = rankAllDofs.begin();
	 dofIt != rankAllDofs.end(); ++dofIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
936

937
938
939
      rankDofsNewLocalIndex[*dofIt] = i;
      // First, we set all dofs in ranks partition to be owend by the rank. Later,
      // the dofs in ranks partition that are owned by other rank are set to false.
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
940
      isRankDof[i] = true;
941
      i++;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
942
943
    }

944
945
946

    // === Create for all rank owned dofs a new global indexing.                ===

947
948
    // Stores for dofs in rank a new global index.
    DofIndexMap rankDofsNewGlobalIndex;
949
950
951
    // Stores for all rank owned dofs a continues local index.
    DofIndexMap rankOwnedDofsNewLocalIndex;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
952
953
954
    i = 0;
    for (DofContainer::iterator dofIt = rankDOFs.begin();
	 dofIt != rankDOFs.end(); ++dofIt) {
955
      rankDofsNewGlobalIndex[*dofIt] = i + rstart;
956
      rankOwnedDofsNewLocalIndex[*dofIt] = i;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
957
958
959
      i++;
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
960
 
961
962
963
964
965
966
    // === Create information which dof indices must be send and which must  ===
    // === be received.                                                      ===

    // Stores to each rank a map from DOF pointers (which are all owned by the rank
    // and lie on an interior boundary) to new global DOF indices.
    std::map<int, std::map<const DegreeOfFreedom*, DegreeOfFreedom> > sendNewDofs;
967

968
969
970
971
    // Maps to each rank the number of new DOF indices this rank will receive from.
    // All these DOFs are at an interior boundary on this rank, but are owned by
    // another rank.
    std::map<int, int> recvNewDofs;
972

973
    for (DofToRank::iterator it = boundaryDofs.begin(); it != boundaryDofs.end(); ++it) {
974
975
976
977
978
979
980
981

      if (it->second == mpiRank) {
	// If the boundary dof is a rank dof, it must be send to other ranks.

	// Search for all ranks that have this dof too.
	for (std::set<int>::iterator itRanks = partitionDOFs[it->first].begin();
	     itRanks != partitionDOFs[it->first].end();
	     ++itRanks) {
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
982
	  if (*itRanks != mpiRank) {
983
	    TEST_EXIT_DBG(rankDofsNewGlobalIndex.count(it->first) == 1)
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
984
985
	      ("DOF Key not found!\n");

986
	    sendNewDofs[*itRanks][it->first] = rankDofsNewGlobalIndex[it->first];
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
987
	  }
988
989
	}
      } else {
Thomas Witkowski's avatar
Thomas Witkowski committed
990
991
	// If the boundary dof is not a rank dof, its new dof index (and later
	// also the dof values) must be received from another rank.
992
993
994
995
	if (recvNewDofs.find(it->second) == recvNewDofs.end()) 
	  recvNewDofs[it->second] = 1;
	else
	  recvNewDofs[it->second]++;
996
997
998
      }
    }

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
999

1000
    // === Send and receive the dof indices at boundary. ===
1001

Thomas Witkowski's avatar
Thomas Witkowski committed
1002
    std::vector<int*> sendBuffers(sendNewDofs.size()), recvBuffers(recvNewDofs.size());
1003
1004
1005

    sendDofs.clear();
    recvDofs.clear();
1006
    
Thomas Witkowski's avatar
Thomas Witkowski committed
1007
1008
1009
    MPI::Request request[sendNewDofs.size() + recvNewDofs.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1010
    i = 0;
1011
1012
    for (std::map<int, std::map<const DegreeOfFreedom*, DegreeOfFreedom> >::iterator 
	   sendIt = sendNewDofs.begin();
1013
1014
	 sendIt != sendNewDofs.end();
	 ++sendIt, i++) {
1015
1016
      int nSendDofs = sendIt->second.size() * 2;
      sendBuffers[i] = new int[nSendDofs];
1017

1018
      int c = 0;
1019
1020
      for (std::map<const DegreeOfFreedom*, DegreeOfFreedom>::iterator 
	     dofIt = sendIt->second.begin();
1021
	   dofIt != sendIt->second.end();
1022
	   ++dofIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
1023
	sendBuffers[i][c++] = *(dofIt->first);
1024
	sendBuffers[i][c++] = dofIt->second;
1025

1026
	sendDofs[sendIt->first].push_back(dofIt->first);
1027
1028
      }

Thomas Witkowski's avatar
Thomas Witkowski committed
1029
1030
      request[requestCounter++] =
	mpiComm.Isend(sendBuffers[i], nSendDofs, MPI_INT, sendIt->first, 0);
1031
1032
1033
    }

    i = 0;
1034
    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
1035
1036
	 recvIt != recvNewDofs.end();
	 ++recvIt, i++) {
1037
1038
      int nRecvDOFs = recvIt->second * 2;
      recvBuffers[i] = new int[nRecvDOFs];
1039

Thomas Witkowski's avatar
Thomas Witkowski committed
1040
1041
      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_INT, recvIt->first, 0);
1042
1043
1044
    }


Thomas Witkowski's avatar
Thomas Witkowski committed
1045
    MPI::Request::Waitall(requestCounter, request);
1046
1047

    
1048
    // === Delete send buffers. ===
1049

Thomas Witkowski's avatar
Thomas Witkowski committed
1050
1051
    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
      delete [] sendBuffers[j];
1052
1053


1054
    // === Change dof indices at boundary from other ranks. ===
1055

Thomas Witkowski's avatar
Thomas Witkowski committed
1056
1057
    // Within this small data structure we track which dof index was already changed.
    // This is used to avoid the following situation: Assume, there are two dof indices
1058
    // a and b in boundaryDofs. Then we have to change index a to b and b to c. When
Thomas Witkowski's avatar
Thomas Witkowski committed
1059
1060
1061
1062
1063
    // the second rule applies, we have to avoid that not the first b, resulted from
    // changing a to b, is set to c, but the second one. Therefore, after the first
    // rule was applied, the dof pointer is set to false in this data structure and 
    // is not allowed to be changed anymore.
    std::map<const DegreeOfFreedom*, bool> dofChanged;
1064
    for (DofToRank::iterator dofIt = boundaryDofs.begin(); dofIt != boundaryDofs.end();
Thomas Witkowski's avatar
Thomas Witkowski committed
1065
	 ++dofIt)
Thomas Witkowski's avatar
Thomas Witkowski committed
1066
1067
      dofChanged[dofIt->first] = false;

1068
    i = 0;
1069
    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
1070
1071
1072
	 recvIt != recvNewDofs.end();
	 ++recvIt, i++) {

1073
      for (int j = 0; j < recvIt->second; j++) {
1074

1075
1076
	DegreeOfFreedom oldDof = recvBuffers[i][j * 2];
	DegreeOfFreedom newGlobalDof = recvBuffers[i][j * 2 + 1];
1077

1078
	bool found = false;
1079

Thomas Witkowski's avatar
Thomas Witkowski committed
1080
1081
	// Iterate over all boundary dofs to find the dof, which index we have to change.

1082
1083
	for (DofToRank::iterator dofIt = boundaryDofs.begin(); 
	     dofIt != boundaryDofs.end(); ++dofIt) {
1084

Thomas Witkowski's avatar
Thomas Witkowski committed
1085
1086
1087
	  if (*(dofIt->first) == oldDof && !dofChanged[dofIt->first]) {
	    dofChanged[dofIt->first] = true;

1088
	    recvDofs[recvIt->first].push_back(dofIt->first);
1089
	    rankDofsNewGlobalIndex[dofIt->first] = newGlobalDof;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1090
	    isRankDof[rankDofsNewLocalIndex[dofIt->first]] = false;
Thomas Witkowski's avatar
Thomas Witkowski committed
1091

1092
	    found = true;
1093
1094
1095
	    break;
	  }
	}
1096

Thomas Witkowski's avatar
Thomas Witkowski committed
1097
	TEST_EXIT_DBG(found)("Should not happen!\n");
1098
1099
1100
1101
      }

      delete [] recvBuffers[i];
    }
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1102

1103
    // === Create now the local to global index and local to dof index mappings.  ===
1104

1105
1106
    createLocalMappings(rankDofsNewLocalIndex, rankOwnedDofsNewLocalIndex,
			rankDofsNewGlobalIndex);
1107
1108
1109
  }


Thomas Witkowski's avatar
Thomas Witkowski committed
1110
  void ParallelDomainBase::updateLocalGlobalNumbering(int& nRankDOFs, int& nOverallDOFs)
1111
  {
Thomas Witkowski's avatar