ParallelDomainBase.cc 59.2 KB
Newer Older
Thomas Witkowski's avatar
Thomas Witkowski committed
1
2
#include <algorithm>

3
#include "ParallelDomainBase.h"
4
5
6
7
8
9
10
#include "ParMetisPartitioner.h"
#include "Mesh.h"
#include "Traverse.h"
#include "ElInfo.h"
#include "Element.h"
#include "MacroElement.h"
#include "PartitionElementData.h"
11
12
#include "DOFMatrix.h"
#include "DOFVector.h"
13
#include "SystemVector.h"
14
#include "VtkWriter.h"
15
#include "ElementDofIterator.h"
16
17
#include "ProblemStatBase.h"
#include "StandardProblemIteration.h"
18
#include "ElementFileWriter.h"
19
20

#include "petscksp.h"
21
22
23

namespace AMDiS {

24
  PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *)
25
  {    
26
    if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
27
      std::cout << "  Petsc-Iteration " << iter << ": " << rnorm << std::endl;
28
29
30
31

    return 0;
  }

32
33
34
35
36
  inline bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2)
  {
    return (*dof1 < *dof2);
  }

37
  ParallelDomainBase::ParallelDomainBase(ProblemIterationInterface *iIF,
38
39
40
					 ProblemTimeInterface *tIF,
					 FiniteElemSpace *fe,
					 RefinementManager *refineManager)
41
42
    : iterationIF(iIF),
      timeIF(tIF),
43
      name(iIF->getName()),
44
45
      feSpace(fe),
      mesh(fe->getMesh()),
46
      refinementManager(refineManager),
47
      initialPartitionMesh(true),
48
      nRankDofs(0),
49
      rstart(0),
50
51
      nComponents(1),
      deserialized(false)
52
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
53
54
55
56
57
58
59
    FUNCNAME("ParallelDomainBase::ParalleDomainBase()");

    TEST_EXIT(mesh->getNumberOfDOFAdmin() == 1)
      ("Only meshes with one DOFAdmin are supported!\n");
    TEST_EXIT(mesh->getDOFAdmin(0).getNumberOfPreDOFs(0) == 0)
      ("Wrong pre dof number for DOFAdmin!\n");

60
61
62
63
64
65
    mpiRank = MPI::COMM_WORLD.Get_rank();
    mpiSize = MPI::COMM_WORLD.Get_size();
    mpiComm = MPI::COMM_WORLD;
    partitioner = new ParMetisPartitioner(mesh, &mpiComm);
  }

66
  void ParallelDomainBase::initParallelization(AdaptInfo *adaptInfo)
67
  {
68
69
70
71
72
73
74
    FUNCNAME("ParallelDomainBase::initParallelization()");

    TEST_EXIT(mpiSize > 1)
      ("Parallelization does not work with only one process!\n");

    // If the problem has been already read from a file, we do not need to do anything.
    if (deserialized)
75
76
      return;

77
78
79
80
81
    // Test, if the mesh is the macro mesh only! Paritioning of the mesh is supported
    // only for macro meshes, so it will not work yet if the mesh is already refined
    // in some way.
    testForMacroMesh();

82
83
84
85
86
87
88
    // create an initial partitioning of the mesh
    partitioner->createPartitionData();
    // set the element weights, which are 1 at the very first begin
    setElemWeights(adaptInfo);
    // and now partition the mesh
    partitionMesh(adaptInfo);   

Thomas Witkowski's avatar
Thomas Witkowski committed
89
90
91
#if (DEBUG != 0)
    ElementIdxToDofs elMap;
    DbgCreateElementMap(elMap);
92
93
94

    if (mpiRank == 0)
      writePartitioningMesh("part.vtu");
Thomas Witkowski's avatar
Thomas Witkowski committed
95
#endif
96

97
    // === Create new global and local DOF numbering. ===
98

99
    // Set of all DOFs of the rank.
100
    std::vector<const DegreeOfFreedom*> rankDofs;
101
    // Number of DOFs in ranks partition that are owned by the rank.
102
    nRankDofs = 0;
103
    // Number of all DOFs in the macro mesh.
104
    int nOverallDOFs = 0;
105

106
    createLocalGlobalNumbering(rankDofs, nRankDofs, nOverallDOFs);
107

Thomas Witkowski's avatar
Thomas Witkowski committed
108
109
    // === Create interior boundary information ===

110
    createInteriorBoundaryInfo(rankDofs);
Thomas Witkowski's avatar
Thomas Witkowski committed
111

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
112
113
114
115
    // === Remove all macro elements that are not part of the rank partition. ===

    removeMacroElements();

Thomas Witkowski's avatar
Thomas Witkowski committed
116
#if (DEBUG != 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
117
    DbgTestElementMap(elMap);
Thomas Witkowski's avatar
Thomas Witkowski committed
118
119
    DbgTestInteriorBoundary();
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
120

121

122
    // === Reset all DOFAdmins of the mesh. ===
123

124
    updateDofAdmins();
125

126

127
    // === Global refinements. ===
Thomas Witkowski's avatar
Thomas Witkowski committed
128

Thomas Witkowski's avatar
Thomas Witkowski committed
129
130
131
    int globalRefinement = 0;
    GET_PARAMETER(0, "testgr", "%d", &globalRefinement);

Thomas Witkowski's avatar
Thomas Witkowski committed
132
133
    if (globalRefinement > 0) {
      refinementManager->globalRefine(mesh, globalRefinement);
134

135
136
137
138
139
#if (DEBUG != 0)
      elMap.clear();
      DbgCreateElementMap(elMap);
#endif

140
      updateLocalGlobalNumbering(nRankDofs, nOverallDOFs);
141
142
143
144
145
146

      updateDofAdmins();

#if (DEBUG != 0)
      DbgTestElementMap(elMap);
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
147
148
    }

149

Thomas Witkowski's avatar
Thomas Witkowski committed
150
#if (DEBUG != 0)
151
    DbgTestCommonDofs(true);
Thomas Witkowski's avatar
Thomas Witkowski committed
152
#endif
153

154
    nRankRows = nRankDofs * nComponents;
155
    nOverallRows = nOverallDOFs * nComponents;
156
157
  }

158
159

  void ParallelDomainBase::exitParallelization(AdaptInfo *adaptInfo)
160
  {}
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
  
  void ParallelDomainBase::updateDofAdmins()
  {
    int nAdmins = mesh->getNumberOfDOFAdmin();
    for (int i = 0; i < nAdmins; i++) {
      DOFAdmin& admin = const_cast<DOFAdmin&>(mesh->getDOFAdmin(i));
      
      for (int j = 0; j < admin.getSize(); j++)
	admin.setDOFFree(j, true);
      for (int j = 0; j < static_cast<int>(mapLocalGlobalDOFs.size()); j++)
 	admin.setDOFFree(j, false);

      admin.setUsedSize(mapLocalGlobalDOFs.size());
      admin.setUsedCount(mapLocalGlobalDOFs.size());
      admin.setFirstHole(mapLocalGlobalDOFs.size());
    }
  }

180

181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
  void ParallelDomainBase::testForMacroMesh()
  {
    FUNCNAME("ParallelDomainBase::testForMacroMesh()");

    int nMacroElements = 0;

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
    while (elInfo) {
      TEST_EXIT(elInfo->getLevel() == 0)
	("Mesh is already refined! This does nor work with parallelization!\n");
      
      nMacroElements++;

      elInfo = stack.traverseNext(elInfo);
    }

    TEST_EXIT(nMacroElements >= mpiSize)
      ("The mesh has less macro elements than number of mpi processes!\n");
  }

202
203
  void ParallelDomainBase::setDofMatrix(DOFMatrix* mat, int dispMult, 
					int dispAddRow, int dispAddCol)
204
  {
205
206
207
208
    FUNCNAME("ParallelDomainBase::setDofMatrix()");

    TEST_EXIT(mat)("No DOFMatrix!\n");

209
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
210
211
212
213
214
215
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    traits::col<Matrix>::type col(mat->getBaseMatrix());
    traits::const_value<Matrix>::type value(mat->getBaseMatrix());

216
    typedef traits::range_generator<row, Matrix>::type cursor_type;
217
218
    typedef traits::range_generator<nz, cursor_type>::type icursor_type;

219
220
221
222
223
224
225
226
227
228
229
230
231
    std::vector<int> cols;
    std::vector<double> values;
    cols.reserve(300);
    values.reserve(300);

    for (cursor_type cursor = begin<row>(mat->getBaseMatrix()), 
	   cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) {

      cols.clear();
      values.clear();

      int r = mapLocalGlobalDOFs[*cursor] * dispMult + dispAddRow;

232
233
      for (icursor_type icursor = begin<nz>(cursor), 
	     icend = end<nz>(cursor); icursor != icend; ++icursor)
234
	if (value(*icursor) != 0.0) {
235
236
 	  cols.push_back(mapLocalGlobalDOFs[col(*icursor)] * dispMult + dispAddCol);
 	  values.push_back(value(*icursor));
237
	}
238
239
240

      MatSetValues(petscMatrix, 1, &r, cols.size(), &(cols[0]), &(values[0]), ADD_VALUES);
    }
241
  }
242

243

244
  void ParallelDomainBase::setDofVector(Vec& petscVec, DOFVector<double>* vec, 
245
246
					int dispMult, int dispAdd)
  {
247
248
    DOFVector<double>::Iterator dofIt(vec, USED_DOFS);
    for (dofIt.reset(); !dofIt.end(); ++dofIt) {
249
      int index = mapLocalGlobalDOFs[dofIt.getDOFIndex()] * dispMult + dispAdd;
250
251
      double value = *dofIt;

252
      VecSetValues(petscVec, 1, &index, &value, ADD_VALUES);
253
    }    
254
255
  }

256

257
258
  void ParallelDomainBase::fillPetscMatrix(DOFMatrix *mat, DOFVector<double> *vec)
  {
259
260
261
262
    MatCreate(PETSC_COMM_WORLD, &petscMatrix);
    MatSetSizes(petscMatrix, nRankRows, nRankRows, nOverallRows, nOverallRows);
    MatSetType(petscMatrix, MATAIJ);

263
264
265
266
267
268
269
270
271
272
273
274
    VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
    VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
    VecSetType(petscRhsVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscSolVec);
    VecSetSizes(petscSolVec, nRankRows, nOverallRows);
    VecSetType(petscSolVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscTmpVec);
    VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
    VecSetType(petscTmpVec, VECMPI);

275
276
277
278
279
    setDofMatrix(mat);

    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

280
    setDofVector(petscRhsVec, vec);
Thomas Witkowski's avatar
Thomas Witkowski committed
281
282
283

    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);
284
285
286
287
288
  }

  
  void ParallelDomainBase::fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec)
  {
289
290
291
292
    FUNCNAME("ParallelDomainBase::fillPetscMatrix()");

    clock_t first = clock();

293
294
295
296
297
298
299
300
301
302
303
304
    VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
    VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
    VecSetType(petscRhsVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscSolVec);
    VecSetSizes(petscSolVec, nRankRows, nOverallRows);
    VecSetType(petscSolVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscTmpVec);
    VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
    VecSetType(petscTmpVec, VECMPI);

305
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
306
307
308
309
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    int d_nnz[nRankRows];
310
311
    int o_nnz[nRankRows];

312
313
    std::map<int, std::vector< std::pair<int, int> > > sendMatrixEntry;

314
    for (int i = 0; i < nRankRows; i++) {
315
      d_nnz[i] = 0;
316
317
      o_nnz[i] = 0;
    }
318

319
320
    for (int i = 0; i < nComponents; i++) {
      for (int j = 0; j < nComponents; j++) {
321
322
323
324
325
326
 	if ((*mat)[i][j]) {
	  Matrix bmat = (*mat)[i][j]->getBaseMatrix();

	  traits::col<Matrix>::type col(bmat);
	  traits::const_value<Matrix>::type value(bmat);
	  
327
	  typedef traits::range_generator<row, Matrix>::type cursor_type;
328
329
	  typedef traits::range_generator<nz, cursor_type>::type icursor_type;
	  
330
331
332
333
334
	  for (cursor_type cursor = begin<row>(bmat), 
		 cend = end<row>(bmat); cursor != cend; ++cursor) {

	    int r = mapLocalGlobalDOFs[*cursor] * nComponents + i;

335
336
	    if (isRankDof[*cursor]) {
	      r -= rstart * nComponents;
337
338
339
340
341
342
343
344
345
346

#if (DEBUG != 0)    
	      if (r < 0 || r >= nRankRows) {
		std::cout << "ERROR in rank: " << mpiRank << std::endl;
		std::cout << "  Wrong r = " << r << " " << *cursor << " " 
			  << mapLocalGlobalDOFs[*cursor] << " " 
			  << nRankRows << std::endl;
		ERROR_EXIT("Should not happen!\n");
	      }
#endif
347
	      
348
349
350
351
	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
352

353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
		  if (c >= rstart * nComponents && 
		      c < rstart * nComponents + nRankRows)
		    d_nnz[r]++;
		  else
		    o_nnz[r]++;		  
		}    
	      }
	    } else {
	      int sendToRank = -1;

	      for (RankToDofContainer::iterator it = recvDofs.begin();
		   it != recvDofs.end(); ++it) {
		for (DofContainer::iterator dofIt = it->second.begin();
		     dofIt != it->second.end(); ++dofIt) {
		  if (**dofIt == *cursor) {
		    sendToRank = it->first;
		    break;
		  }
		}

		if (sendToRank != -1)
		  break;
	      }

	      TEST_EXIT_DBG(sendToRank != -1)("Should not happen!\n");

	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
		  
		  sendMatrixEntry[sendToRank].push_back(std::make_pair(r, c));
		}
	      }
	    }
388
389
	  }
	}
390
391
392
393
394
395
396
397
398
399
400
401
      }
    }

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;

    std::vector<int*> sendBuffers;
    sendBuffers.reserve(recvDofs.size());

    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();
402

403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
      request[requestCounter++] = mpiComm.Isend(&nSend, 1, MPI_INT, it->first, 0);
      
      if (nSend > 0) {
	int *sendbuf = new int[nSend * 2];
	for (int i = 0; i < nSend; i++) {
	  sendbuf[i * 2] = sendMatrixEntry[it->first][i].first;
	  sendbuf[i * 2 + 1] = sendMatrixEntry[it->first][i].second;
	}
	sendBuffers.push_back(sendbuf);
      } else {
	sendBuffers.push_back(NULL);
      }
    }

    std::vector<int> recvSize;
    recvSize.reserve(sendDofs.size());
    
    int i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it)
      request[requestCounter++] = 
	mpiComm.Irecv(&(recvSize[i++]), 1, MPI_INT, it->first, 0);

    MPI::Request::Waitall(requestCounter, request);

    requestCounter = 0;

    i = 0;
    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();

435
      if (nSend > 0)
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
	request[requestCounter++] = 
	  mpiComm.Isend(sendBuffers[i], nSend * 2, MPI_INT, it->first, 0);

      i++;
    }

    std::vector<int*> recvBuffers;
    recvBuffers.reserve(sendDofs.size());
    
    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	int *recvbuf = new int[recvSize[i] * 2];
	recvBuffers.push_back(recvbuf);

	request[requestCounter++] =
	  mpiComm.Irecv(recvbuf, recvSize[i] * 2, MPI_INT, it->first, 0);
      } else {
	recvBuffers.push_back(NULL);
      }

      i++;
    }

    MPI::Request::Waitall(requestCounter, request);
Thomas Witkowski's avatar
Thomas Witkowski committed
462

463
464
    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
      if (sendBuffers[j])
Thomas Witkowski's avatar
Thomas Witkowski committed
465
 	delete [] sendBuffers[j];
466
467
468
469
470
471
472
473

    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	for (int j = 0; j < recvSize[i]; j++) {
	  int r = recvBuffers[i][j * 2];
	  int c = recvBuffers[i][j * 2 + 1];
474

475
	  r -= rstart * nComponents;
Thomas Witkowski's avatar
Thomas Witkowski committed
476

477
478
479
480
481
	  TEST_EXIT_DBG(r >= 0 && r < nRankRows)("Should not happen!\n");
	  
	  if (c < rstart * nComponents ||
	      c >= rstart * nComponents + nRankRows)
	    o_nnz[r]++;
482
483
	  else
	    d_nnz[r]++;
484
485
486
487
	}

	delete [] recvBuffers[i];
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
488
489

      i++;
490
    }
491
492

    MatCreateMPIAIJ(PETSC_COMM_WORLD, nRankRows, nRankRows, nOverallRows, nOverallRows,
493
494
495
496
497
498
499
500
		    0, d_nnz, 0, o_nnz, &petscMatrix);

#if (DEBUG != 0)
    int a, b;
    MatGetOwnershipRange(petscMatrix, &a, &b);
    TEST_EXIT(a == rstart * nComponents)("Wrong matrix ownership range!\n");
    TEST_EXIT(b == rstart * nComponents + nRankRows)("Wrong matrix ownership range!\n");
#endif
501

502
503
504
505
506
507
    using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    for (int i = 0; i < nComponents; i++)
      for (int j = 0; j < nComponents; j++)
508
	if ((*mat)[i][j])
509
510
511
512
513
514
	  setDofMatrix((*mat)[i][j], nComponents, i, j);

    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

    for (int i = 0; i < nComponents; i++)
515
      setDofVector(petscRhsVec, vec->getDOFVector(i), nComponents, i);
516

517
518
519
    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);

520
    INFO(info, 8)("Fill petsc matrix needed %.5f seconds\n", TIME_USED(first, clock()));
521
522
523
  }


524
  void ParallelDomainBase::solvePetscMatrix(DOFVector<double> *vec)
525
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
526
527
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

528
529
530
531
    KSP ksp;
    PC pc;

    KSPCreate(PETSC_COMM_WORLD, &ksp);
532
    KSPSetOperators(ksp, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
533
    KSPGetPC(ksp, &pc);
534
535
    //PCSetType(pc, PCJACOBI);
    PCSetType(pc, PCILU);
536
    KSPSetTolerances(ksp, 1.e-7, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
537
538
    KSPSetType(ksp, KSPBCGS);
    //KSPSetType(ksp, KSPCG);
539
    KSPMonitorSet(ksp, myKSPMonitor, PETSC_NULL, 0);
540
541
542
543
544
    KSPSolve(ksp, petscRhsVec, petscSolVec);

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

545
    for (int i = 0; i < nRankDofs; i++)
546
      (*vec)[mapLocalToDofIndex[i]] = vecPointer[i];
547
548
549

    VecRestoreArray(petscSolVec, &vecPointer);

550
551
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());
Thomas Witkowski's avatar
Thomas Witkowski committed
552
553
554

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
555
556
    
    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
557
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
558
	 sendIt != sendDofs.end(); ++sendIt, i++) {
559
560
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs];
561

562
      for (int j = 0; j < nSendDOFs; j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
563
	sendBuffers[i][j] = (*vec)[*((sendIt->second)[j])];
564

Thomas Witkowski's avatar
Thomas Witkowski committed
565
566
      request[requestCounter++] =
	mpiComm.Isend(sendBuffers[i], nSendDOFs, MPI_DOUBLE, sendIt->first, 0);
567
568
569
    }

    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
570
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
571
	 recvIt != recvDofs.end(); ++recvIt, i++) {
572
573
      int nRecvDOFs = recvIt->second.size();
      recvBuffers[i] = new double[nRecvDOFs];
574

Thomas Witkowski's avatar
Thomas Witkowski committed
575
576
      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
577
578
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
579
580
581

    MPI::Request::Waitall(requestCounter, request);

582
    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
583
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
584
	 recvIt != recvDofs.end(); ++recvIt, i++) {
Thomas Witkowski's avatar
Thomas Witkowski committed
585
      for (int j = 0; j < static_cast<int>(recvIt->second.size()); j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
586
	(*vec)[*(recvIt->second)[j]] = recvBuffers[i][j];
587
588
589
590

      delete [] recvBuffers[i];
    }
    
591
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
592
      delete [] sendBuffers[i];
593
594

    MatDestroy(petscMatrix);
595
596
597
    VecDestroy(petscRhsVec);
    VecDestroy(petscSolVec);
    VecDestroy(petscTmpVec);
598
599
  }

600

601
  void ParallelDomainBase::solvePetscMatrix(SystemVector &vec)
602
603
604
  {
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

605
606
607
608
609
610
611
612
613
#if 0
    // Set old solution to be initiual guess for petsc solver.
    for (int i = 0; i < nComponents; i++)
      setDofVector(petscSolVec, vec->getDOFVector(i), nComponents, i);

    VecAssemblyBegin(petscSolVec);
    VecAssemblyEnd(petscSolVec);
#endif

614
    KSP solver;
615

616
617
618
    KSPCreate(PETSC_COMM_WORLD, &solver);
    KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); 

619
620
    KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);

621
622
    KSPSetType(solver, KSPBCGS);
    KSPMonitorSet(solver, myKSPMonitor, PETSC_NULL, 0);
623
    KSPSetFromOptions(solver);
624
625
    // Do not delete the solution vector, use it for the initial guess.
    //    KSPSetInitialGuessNonzero(solver, PETSC_TRUE);
626
627

    KSPSolve(solver, petscRhsVec, petscSolVec);
628
629
630
631
632

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

    for (int i = 0; i < nComponents; i++) {
633
      DOFVector<double> *dofvec = vec.getDOFVector(i);
634
      for (int j = 0; j < nRankDofs; j++)
635
	(*dofvec)[mapLocalToDofIndex[j]] = vecPointer[j * nComponents + i];      
636
637
638
639
    }

    VecRestoreArray(petscSolVec, &vecPointer);

640
    synchVectors(vec);
641

642
643
644
645
646
647
648
649
650
651
652
    int iterations = 0;
    KSPGetIterationNumber(solver, &iterations);
    MSG("  Number of iterations: %d\n", iterations);
    
    double norm = 0.0;
    MatMult(petscMatrix, petscSolVec, petscTmpVec);
    VecAXPY(petscTmpVec, -1.0, petscRhsVec);
    VecNorm(petscTmpVec, NORM_2, &norm);
    MSG("  Residual norm: %e\n", norm);

    MatDestroy(petscMatrix);
653
654
655
    VecDestroy(petscRhsVec);
    VecDestroy(petscSolVec);
    VecDestroy(petscTmpVec);
656
657
658
659
660
    KSPDestroy(solver);
  }
  
  void ParallelDomainBase::synchVectors(SystemVector &vec)
  {
661
662
663
664
665
666
667
668
669
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
    
    int i = 0;
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
	 sendIt != sendDofs.end(); ++sendIt, i++) {
670
671
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs * nComponents];
672
673
674

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
675
	DOFVector<double> *dofvec = vec.getDOFVector(j);
676
677
678
679
	for (int k = 0; k < nSendDOFs; k++)
	  sendBuffers[i][counter++] = (*dofvec)[*((sendIt->second)[k])];
      }

680
681
      request[requestCounter++] = mpiComm.Isend(sendBuffers[i], nSendDOFs * nComponents,
						MPI_DOUBLE, sendIt->first, 0);
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
    }

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size() * nComponents;
      recvBuffers[i] = new double[nRecvDOFs];

      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
    }


    MPI::Request::Waitall(requestCounter, request);

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size();

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
704
	DOFVector<double> *dofvec = vec.getDOFVector(j);
705
706
 	for (int k = 0; k < nRecvDOFs; k++)
 	  (*dofvec)[*(recvIt->second)[k]] = recvBuffers[i][counter++];
707
708
709
710
711
712
713
714
715
      }

      delete [] recvBuffers[i];
    }
    
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
  }

716
717
718
719
  
  void ParallelDomainBase::serialize(std::ostream &out, DofContainer &data)
  {
    int vecSize = data.size();
720
    SerUtil::serialize(out, vecSize);
721
722
    for (int i = 0; i < vecSize; i++) {
      int dofIndex = (*data[i]);
723
      SerUtil::serialize(out, dofIndex);
724
725
726
727
728
729
730
731
732
733
    }
  }


  void ParallelDomainBase::deserialize(std::istream &in, DofContainer &data,
				       std::map<int, const DegreeOfFreedom*> &dofMap)
  {
    FUNCNAME("ParallelDomainBase::deserialize()");

    int vecSize = 0;
734
    SerUtil::deserialize(in, vecSize);
735
736
737
    data.resize(vecSize);
    for (int i = 0; i < vecSize; i++) {
      int dofIndex = 0;
738
      SerUtil::deserialize(in, dofIndex);
739
740
741
742
743
744
745
746
747
748
749
750

      TEST_EXIT_DBG(dofMap.count(dofIndex) != 0)
	("Dof index could not be deserialized correctly!\n");

      data[i] = dofMap[dofIndex];
    }
  }


  void ParallelDomainBase::serialize(std::ostream &out, RankToDofContainer &data)
  {
    int mapSize = data.size();
751
    SerUtil::serialize(out, mapSize);
752
753
    for (RankToDofContainer::iterator it = data.begin(); it != data.end(); ++it) {
      int rank = it->first;
754
      SerUtil::serialize(out, rank);
755
756
757
758
759
760
761
762
763
      serialize(out, it->second);
    }
  }

  
  void ParallelDomainBase::deserialize(std::istream &in, RankToDofContainer &data,
				       std::map<int, const DegreeOfFreedom*> &dofMap)
  {
    int mapSize = 0;
764
    SerUtil::deserialize(in, mapSize);
765
766
    for (int i = 0; i < mapSize; i++) {
      int rank = 0;
767
      SerUtil::deserialize(in, rank);
768
769
770
771
      deserialize(in, data[rank], dofMap);      
    }
  }

772

773
  double ParallelDomainBase::setElemWeights(AdaptInfo *adaptInfo) 
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
  {
    double localWeightSum = 0.0;
    int elNum = -1;

    elemWeights.clear();

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1,
					 Mesh::CALL_EVERY_EL_PREORDER);
    while (elInfo) {
      Element *element = elInfo->getElement();

      // get partition data
      PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
	(element->getElementData(PARTITION_ED));

      if (partitionData && partitionData->getPartitionStatus() == IN) {
	if (partitionData->getLevel() == 0) {
	  elNum = element->getIndex();
	}
Thomas Witkowski's avatar
Thomas Witkowski committed
794
	TEST_EXIT_DBG(elNum != -1)("invalid element number\n");
795
796
797
798
799
800
801
802
803
804
805
806
	if (element->isLeaf()) {
	  elemWeights[elNum] += 1.0;
	  localWeightSum += 1.0;
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }

    return localWeightSum;
  }

807

808
  void ParallelDomainBase::partitionMesh(AdaptInfo *adaptInfo)
809
810
811
812
813
814
815
816
817
818
819
820
821
  {
    if (initialPartitionMesh) {
      initialPartitionMesh = false;
      partitioner->fillCoarsePartitionVec(&oldPartitionVec);
      partitioner->partition(&elemWeights, INITIAL);
    } else {
      oldPartitionVec = partitionVec;
      partitioner->partition(&elemWeights, ADAPTIVE_REPART, 100.0 /*0.000001*/);
    }    

    partitioner->fillCoarsePartitionVec(&partitionVec);
  }

822

823
  void ParallelDomainBase::createInteriorBoundaryInfo(DofContainer& rankDofs)
824
  {
825
    FUNCNAME("ParallelDomainBase::createInteriorBoundaryInfo()");
Thomas Witkowski's avatar
Thomas Witkowski committed
826
827
828

    // === First, create all the information about the interior boundaries. ===

829
    TraverseStack stack;
830
831
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, 
					 Mesh::CALL_LEAF_EL | Mesh::FILL_NEIGH);
832
833
834
835
836
    while (elInfo) {
      Element *element = elInfo->getElement();

      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>(element->getElementData(PARTITION_ED));   
837

838
839
840
841
842
843
844
      if (partitionData->getPartitionStatus() == IN) {
	for (int i = 0; i < 3; i++) {
	  if (!elInfo->getNeighbour(i))
	    continue;

	  PartitionElementData *neighbourPartitionData =
	    dynamic_cast<PartitionElementData*>(elInfo->getNeighbour(i)->getElementData(PARTITION_ED));
845

846
 	  if (neighbourPartitionData->getPartitionStatus() == OUT) {
847
848
	    // We have found an element that is at an interior boundary. 

849
850
	    int otherElementRank = partitionVec[elInfo->getNeighbour(i)->getIndex()];
	    bool ranksBoundary = (mpiRank > otherElementRank);
851
852

	    AtomicBoundary& bound = 
Thomas Witkowski's avatar
Thomas Witkowski committed
853
	      (ranksBoundary ?
854
855
	       myIntBoundary.getNewAtomicBoundary(otherElementRank) :
	       otherIntBoundary.getNewAtomicBoundary(otherElementRank));
Thomas Witkowski's avatar
Thomas Witkowski committed
856

857
	    bound.rankObject.el = element;
858
	    bound.rankObject.elIndex = element->getIndex();
859
860
	    bound.rankObject.subObjAtBoundary = EDGE;
	    bound.rankObject.ithObjAtBoundary = i;
861
862
863
864
	    // Do not set a pointer to the element, because if will be deleted from
	    // mesh after partitioning and the pointer would become invalid.
	    bound.neighbourObject.el = NULL;
	    bound.neighbourObject.elIndex = elInfo->getNeighbour(i)->getIndex();
865
	    bound.neighbourObject.subObjAtBoundary = EDGE;
866
867
868
869
870
	    bound.neighbourObject.ithObjAtBoundary = elInfo->getSideOfNeighbour(i);

	    // i == 2  =>  getSideOfNeighbour(i) == 2
	    TEST_EXIT_DBG(i != 2 || elInfo->getSideOfNeighbour(i) == 2)
	      ("Should not happen!\n");
871
872
873
874
875
876
 	  }
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }
Thomas Witkowski's avatar
Thomas Witkowski committed
877
878
879


    // === Once we have this information, we must care about their order. Eventually ===
Thomas Witkowski's avatar
Thomas Witkowski committed
880
881
    // === all the boundaries have to be in the same order on both ranks that share  ===
    // === the bounday.                                                              ===
Thomas Witkowski's avatar
Thomas Witkowski committed
882

Thomas Witkowski's avatar
Thomas Witkowski committed
883
    std::vector<int*> sendBuffers, recvBuffers;
Thomas Witkowski's avatar
Thomas Witkowski committed
884

Thomas Witkowski's avatar
Thomas Witkowski committed
885
886
887
888
    MPI::Request request[myIntBoundary.boundary.size() + 
			 otherIntBoundary.boundary.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
Thomas Witkowski committed
889
890
    for (RankToBoundMap::iterator rankIt = myIntBoundary.boundary.begin();
	 rankIt != myIntBoundary.boundary.end(); ++rankIt) {    
Thomas Witkowski's avatar
Thomas Witkowski committed
891
      int nSendInt = rankIt->second.size();
892
893
894
895
896
      int* buffer = new int[nSendInt * 2];
      for (int i = 0; i < nSendInt; i++) {
	buffer[i * 2] = (rankIt->second)[i].rankObject.elIndex;
	buffer[i * 2 + 1] = (rankIt->second)[i].rankObject.ithObjAtBoundary;
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
897
898
      sendBuffers.push_back(buffer);
      
Thomas Witkowski's avatar
Thomas Witkowski committed
899
      request[requestCounter++] =
900
	mpiComm.Isend(buffer, nSendInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
901
902
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
903
904
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
905
      int nRecvInt = rankIt->second.size();
906
      int *buffer = new int[nRecvInt * 2];
Thomas Witkowski's avatar
Thomas Witkowski committed
907
      recvBuffers.push_back(buffer);
Thomas Witkowski's avatar
Thomas Witkowski committed
908

Thomas Witkowski's avatar
Thomas Witkowski committed
909
      request[requestCounter++] = 
910
	mpiComm.Irecv(buffer, nRecvInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
911
912
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
913
914
915

    MPI::Request::Waitall(requestCounter, request);

Thomas Witkowski's avatar
Thomas Witkowski committed
916
917

    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
918
919
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
920
921
922
923
924
925
926

      // === We have received from rank "rankIt->first" the ordered list of element ===
      // === indices. We now have to sort the corresponding list in this rank to    ===
      // === get the same order.                                                    ===
      
      for (int j = 0; j < static_cast<int>(rankIt->second.size()); j++) {
	// If the expected object is not at place, search for it.
927
928
	if ((rankIt->second)[j].neighbourObject.elIndex != recvBuffers[i][j * 2] ||
	    (rankIt->second)[j].neighbourObject.ithObjAtBoundary != recvBuffers[i][j * 2 + 1]) {
Thomas Witkowski's avatar
Thomas Witkowski committed
929
930
	  int k = j + 1;
	  for (; k < static_cast<int>(rankIt->second.size()); k++)
931
932
	    if ((rankIt->second)[k].neighbourObject.elIndex == recvBuffers[i][j * 2] && 
		(rankIt->second)[k].neighbourObject.ithObjAtBoundary == recvBuffers[i][j * 2 + 1])
Thomas Witkowski's avatar
Thomas Witkowski committed
933
934
935
	      break;

	  // The element must always be found, because the list is just in another order.
Thomas Witkowski's avatar
Thomas Witkowski committed
936
	  TEST_EXIT_DBG(k < static_cast<int>(rankIt->second.size()))
937
	    ("Should never happen!\n");
Thomas Witkowski's avatar
Thomas Witkowski committed
938
939
940
941
942
943
944
945
946
947
948
949
950

	  // Swap the current with the found element.
	  AtomicBoundary tmpBound = (rankIt->second)[k];
	  (rankIt->second)[k] = (rankIt->second)[j];
	  (rankIt->second)[j] = tmpBound;	
	}
      }

      delete [] recvBuffers[i++];
    }

    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
951
952
953
  }


954
  void ParallelDomainBase::removeMacroElements()
955
956
957
958
959
960
961
962
963
964
965
966
  {
    std::vector<MacroElement*> macrosToRemove;
    for (std::deque<MacroElement*>::iterator it = mesh->firstMacroElement();
	 it != mesh->endOfMacroElements();
	 ++it) {
      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>
	((*it)->getElement()->getElementData(PARTITION_ED));
      if (partitionData->getPartitionStatus() != IN)
	macrosToRemove.push_back(*it);
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
967
    mesh->removeMacroElements(macrosToRemove, feSpace);
968
969
970
  }


971
  void ParallelDomainBase::createLocalGlobalNumbering(DofContainer& rankDofs,
972
						      int& nRankDofs, 
Thomas Witkowski's avatar
Thomas Witkowski committed
973
						      int& nOverallDOFs)
974
  {
975
    FUNCNAME("ParallelDomainBase::createLocalGlobalNumbering()");
976
977

    // === Get rank information about DOFs. ===
978
979
980

    // Stores to each DOF pointer the set of ranks the DOF is part of.
    std::map<const DegreeOfFreedom*, std::set<int> > partitionDOFs;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
981
    DofContainer rankAllDofs;
982
    DofToRank boundaryDofs;
983

984
    createDofMemberInfo(partitionDOFs, rankDofs, rankAllDofs, boundaryDofs, vertexDof);
985

986
    nRankDofs = rankDofs.size();
987
988
    nOverallDOFs = partitionDOFs.size();

989

990
    // === Get starting position for global rank dof ordering. ====
991

992
    rstart = 0;
993
994
    mpiComm.Scan(&nRankDofs, &rstart, 1, MPI_INT, MPI_SUM);
    rstart -= nRankDofs;
995

996
997
998
999
    
    // === Create for all dofs in rank new indices. The new index must start at ===
    // === index 0, must be continues and have the same order as the indices    ===
    // === had before.                                                          ===
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1000

Thomas Witkowski's avatar
Thomas Witkowski committed
1001

1002
    // Do not change the indices now, but create a new indexing and store it here.
1003
1004
    DofIndexMap rankDofsNewLocalIndex;
    isRankDof.clear();
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1005
1006
1007
    int i = 0;
    for (DofContainer::iterator dofIt = rankAllDofs.begin();
	 dofIt != rankAllDofs.end(); ++dofIt) {
1008
1009
1010
      rankDofsNewLocalIndex[*dofIt] = i;
      // First, we set all dofs in ranks partition to be owend by the rank. Later,
      // the dofs in ranks partition that are owned by other rank are set to false.
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1011
      isRankDof[i] = true;
1012
      i++;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1013
1014
    }

1015

1016
    // === Create for all rank owned dofs a new global indexing. ===
1017

1018
1019
    // Stores for dofs in rank a new global index.
    DofIndexMap rankDofsNewGlobalIndex;
1020
1021
1022
    // Stores for all rank owned dofs a continues local index.
    DofIndexMap rankOwnedDofsNewLocalIndex;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1023
    i = 0;
1024
1025
    for (DofContainer::iterator dofIt = rankDofs.begin();
	 dofIt != rankDofs.end(); ++dofIt) {
1026
      rankDofsNewGlobalIndex[*dofIt] = i + rstart;
1027
      rankOwnedDofsNewLocalIndex[*dofIt] = i;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1028
1029
1030
      i++;
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
1031
 
1032
1033
1034
1035
1036
    // === Create information which dof indices must be send and which must  ===
    // === be received.                                                      ===

    // Stores to each rank a map from DOF pointers (which are all owned by the rank
    // and lie on an interior boundary) to new global DOF indices.
1037
    std::map<int, DofIndexMap > sendNewDofs;
1038

1039
1040
1041
1042
    // Maps to each rank the number of new DOF indices this rank will receive from.
    // All these DOFs are at an interior boundary on this rank, but are owned by
    // another rank.
    std::map<int, int> recvNewDofs;
1043

1044
    for (DofToRank::iterator it = boundaryDofs.begin(); it != boundaryDofs.end(); ++it) {
1045
1046
1047
1048
1049
1050
1051
1052

      if (it->second == mpiRank) {
	// If the boundary dof is a rank dof, it must be send to other ranks.

	// Search for all ranks that have this dof too.
	for (std::set<int>::iterator itRanks = partitionDOFs[it->first].begin();
	     itRanks != partitionDOFs[it->first].end();
	     ++itRanks) {
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1053
	  if (*itRanks != mpiRank) {
1054
	    TEST_EXIT_DBG(rankDofsNewGlobalIndex.count(it->first) == 1)
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1055
1056
	      ("DOF Key not found!\n");

1057
	    sendNewDofs[*itRanks][it->first] = rankDofsNewGlobalIndex[it->first];
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1058
	  }
1059
1060
	}
      } else {
Thomas Witkowski's avatar
Thomas Witkowski committed
1061
1062
	// If the boundary dof is not a rank dof, its new dof index (and later
	// also the dof values) must be received from another rank.
1063
1064
1065
1066
	if (recvNewDofs.find(it->second) == recvNewDofs.end()) 
	  recvNewDofs[it->second] = 1;
	else
	  recvNewDofs[it->second]++;
1067
1068
1069
      }
    }

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1070

1071
    // === Send and receive the dof indices at boundary. ===
1072

Thomas Witkowski's avatar
Thomas Witkowski committed
1073
    std::vector<int*> sendBuffers(sendNewDofs.size()), recvBuffers(recvNewDofs.size());
1074
1075
1076

    sendDofs.clear();
    recvDofs.clear();
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092

#if 0
    StlMpi stlMpi(mpiComm);
    stlMpi.prepareCommunication(false);
    std::map<

    for (std::map<int, DofIndexMap >::iterator sendIt = sendNewDofs.begin();
	 sendIt != sendNewDofs.end(); ++sendIt, i++)
      stlMpi.send(it->second, it->first);

    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
	 recvIt != recvNewDofs.end(); ++recvIt, i++)
      stlMpi.recv(it->first, it->second * 2); 

    stlMpi.startCommunication();
#endif
1093
    
Thomas Witkowski's avatar
Thomas Witkowski committed
1094
1095
1096
    MPI::Request request[sendNewDofs.size() + recvNewDofs.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1097
    i = 0;
1098
1099
    for (std::map<int, DofIndexMap >::iterator sendIt = sendNewDofs.begin();
	 sendIt != sendNewDofs.end(); ++sendIt, i++) {
1100
1101
      int nSendDofs = sendIt->second.size() * 2;
      sendBuffers[i] = new int[nSendDofs];
1102

1103
      int c = 0;