ParallelDomainBase.cc 58.8 KB
Newer Older
Thomas Witkowski's avatar
Thomas Witkowski committed
1
2
#include <algorithm>

3
#include "ParallelDomainBase.h"
4
5
6
7
8
9
10
#include "ParMetisPartitioner.h"
#include "Mesh.h"
#include "Traverse.h"
#include "ElInfo.h"
#include "Element.h"
#include "MacroElement.h"
#include "PartitionElementData.h"
11
12
#include "DOFMatrix.h"
#include "DOFVector.h"
13
#include "SystemVector.h"
14
#include "VtkWriter.h"
15
#include "ElementDofIterator.h"
16
17
#include "ProblemStatBase.h"
#include "StandardProblemIteration.h"
18
#include "ElementFileWriter.h"
19
20

#include "petscksp.h"
21
22
23

namespace AMDiS {

24
  PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *)
25
  {    
26
    if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
27
      std::cout << "  Petsc-Iteration " << iter << ": " << rnorm << std::endl;
28
29
30
31

    return 0;
  }

32
33
34
35
36
  inline bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2)
  {
    return (*dof1 < *dof2);
  }

37
  ParallelDomainBase::ParallelDomainBase(ProblemIterationInterface *iIF,
38
39
40
					 ProblemTimeInterface *tIF,
					 FiniteElemSpace *fe,
					 RefinementManager *refineManager)
41
42
    : iterationIF(iIF),
      timeIF(tIF),
43
      name(iIF->getName()),
44
45
      feSpace(fe),
      mesh(fe->getMesh()),
46
      refinementManager(refineManager),
47
      initialPartitionMesh(true),
48
      nRankDofs(0),
49
      rstart(0),
50
51
      nComponents(1),
      deserialized(false)
52
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
53
54
55
56
57
58
59
    FUNCNAME("ParallelDomainBase::ParalleDomainBase()");

    TEST_EXIT(mesh->getNumberOfDOFAdmin() == 1)
      ("Only meshes with one DOFAdmin are supported!\n");
    TEST_EXIT(mesh->getDOFAdmin(0).getNumberOfPreDOFs(0) == 0)
      ("Wrong pre dof number for DOFAdmin!\n");

60
61
62
63
64
65
    mpiRank = MPI::COMM_WORLD.Get_rank();
    mpiSize = MPI::COMM_WORLD.Get_size();
    mpiComm = MPI::COMM_WORLD;
    partitioner = new ParMetisPartitioner(mesh, &mpiComm);
  }

66
  void ParallelDomainBase::initParallelization(AdaptInfo *adaptInfo)
67
  {
68
69
70
71
72
73
74
    FUNCNAME("ParallelDomainBase::initParallelization()");

    TEST_EXIT(mpiSize > 1)
      ("Parallelization does not work with only one process!\n");

    // If the problem has been already read from a file, we do not need to do anything.
    if (deserialized)
75
76
      return;

77
78
79
80
81
    // Test, if the mesh is the macro mesh only! Paritioning of the mesh is supported
    // only for macro meshes, so it will not work yet if the mesh is already refined
    // in some way.
    testForMacroMesh();

82
83
84
85
86
87
88
    // create an initial partitioning of the mesh
    partitioner->createPartitionData();
    // set the element weights, which are 1 at the very first begin
    setElemWeights(adaptInfo);
    // and now partition the mesh
    partitionMesh(adaptInfo);   

Thomas Witkowski's avatar
Thomas Witkowski committed
89
90
91
#if (DEBUG != 0)
    ElementIdxToDofs elMap;
    DbgCreateElementMap(elMap);
92
93
94

    if (mpiRank == 0)
      writePartitioningMesh("part.vtu");
Thomas Witkowski's avatar
Thomas Witkowski committed
95
#endif
96

97
    // === Create new global and local DOF numbering. ===
98

99
    // Set of all DOFs of the rank.
100
    std::vector<const DegreeOfFreedom*> rankDofs;
101
    // Number of DOFs in ranks partition that are owned by the rank.
102
    nRankDofs = 0;
103
    // Number of all DOFs in the macro mesh.
104
    int nOverallDOFs = 0;
105

106
    createLocalGlobalNumbering(rankDofs, nRankDofs, nOverallDOFs);
107

Thomas Witkowski's avatar
Thomas Witkowski committed
108
109
    // === Create interior boundary information ===

110
    createInteriorBoundaryInfo(rankDofs);
Thomas Witkowski's avatar
Thomas Witkowski committed
111

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
112
113
114
115
    // === Remove all macro elements that are not part of the rank partition. ===

    removeMacroElements();

Thomas Witkowski's avatar
Thomas Witkowski committed
116
#if (DEBUG != 0)
Thomas Witkowski's avatar
Thomas Witkowski committed
117
    DbgTestElementMap(elMap);
Thomas Witkowski's avatar
Thomas Witkowski committed
118
119
    DbgTestInteriorBoundary();
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
120

121

122
    // === Reset all DOFAdmins of the mesh. ===
123

124
    updateDofAdmins();
125

126

127
    // === Global refinements. ===
Thomas Witkowski's avatar
Thomas Witkowski committed
128

Thomas Witkowski's avatar
Thomas Witkowski committed
129
130
131
    int globalRefinement = 0;
    GET_PARAMETER(0, "testgr", "%d", &globalRefinement);

Thomas Witkowski's avatar
Thomas Witkowski committed
132
133
    if (globalRefinement > 0) {
      refinementManager->globalRefine(mesh, globalRefinement);
134

135
136
137
138
139
#if (DEBUG != 0)
      elMap.clear();
      DbgCreateElementMap(elMap);
#endif

140
      updateLocalGlobalNumbering(nRankDofs, nOverallDOFs);
141
142
143
144
145
146

      updateDofAdmins();

#if (DEBUG != 0)
      DbgTestElementMap(elMap);
#endif
Thomas Witkowski's avatar
Thomas Witkowski committed
147
148
    }

149

Thomas Witkowski's avatar
Thomas Witkowski committed
150
#if (DEBUG != 0)
151
    DbgTestCommonDofs(true);
Thomas Witkowski's avatar
Thomas Witkowski committed
152
#endif
153

154
    nRankRows = nRankDofs * nComponents;
155
    nOverallRows = nOverallDOFs * nComponents;
156
157
  }

158
159

  void ParallelDomainBase::exitParallelization(AdaptInfo *adaptInfo)
160
  {}
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
  
  void ParallelDomainBase::updateDofAdmins()
  {
    int nAdmins = mesh->getNumberOfDOFAdmin();
    for (int i = 0; i < nAdmins; i++) {
      DOFAdmin& admin = const_cast<DOFAdmin&>(mesh->getDOFAdmin(i));
      
      for (int j = 0; j < admin.getSize(); j++)
	admin.setDOFFree(j, true);
      for (int j = 0; j < static_cast<int>(mapLocalGlobalDOFs.size()); j++)
 	admin.setDOFFree(j, false);

      admin.setUsedSize(mapLocalGlobalDOFs.size());
      admin.setUsedCount(mapLocalGlobalDOFs.size());
      admin.setFirstHole(mapLocalGlobalDOFs.size());
    }
  }

180

181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
  void ParallelDomainBase::testForMacroMesh()
  {
    FUNCNAME("ParallelDomainBase::testForMacroMesh()");

    int nMacroElements = 0;

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
    while (elInfo) {
      TEST_EXIT(elInfo->getLevel() == 0)
	("Mesh is already refined! This does nor work with parallelization!\n");
      
      nMacroElements++;

      elInfo = stack.traverseNext(elInfo);
    }

    TEST_EXIT(nMacroElements >= mpiSize)
      ("The mesh has less macro elements than number of mpi processes!\n");
  }

202
203
  void ParallelDomainBase::setDofMatrix(DOFMatrix* mat, int dispMult, 
					int dispAddRow, int dispAddCol)
204
  {
205
206
207
208
    FUNCNAME("ParallelDomainBase::setDofMatrix()");

    TEST_EXIT(mat)("No DOFMatrix!\n");

209
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
210
211
212
213
214
215
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    traits::col<Matrix>::type col(mat->getBaseMatrix());
    traits::const_value<Matrix>::type value(mat->getBaseMatrix());

216
    typedef traits::range_generator<row, Matrix>::type cursor_type;
217
218
    typedef traits::range_generator<nz, cursor_type>::type icursor_type;

219
220
221
222
223
224
225
226
227
228
229
230
231
    std::vector<int> cols;
    std::vector<double> values;
    cols.reserve(300);
    values.reserve(300);

    for (cursor_type cursor = begin<row>(mat->getBaseMatrix()), 
	   cend = end<row>(mat->getBaseMatrix()); cursor != cend; ++cursor) {

      cols.clear();
      values.clear();

      int r = mapLocalGlobalDOFs[*cursor] * dispMult + dispAddRow;

232
233
      for (icursor_type icursor = begin<nz>(cursor), 
	     icend = end<nz>(cursor); icursor != icend; ++icursor)
234
	if (value(*icursor) != 0.0) {
235
236
 	  cols.push_back(mapLocalGlobalDOFs[col(*icursor)] * dispMult + dispAddCol);
 	  values.push_back(value(*icursor));
237
	}
238
239
240

      MatSetValues(petscMatrix, 1, &r, cols.size(), &(cols[0]), &(values[0]), ADD_VALUES);
    }
241
  }
242

243

244
245
246
  void ParallelDomainBase::setDofVector(DOFVector<double>* vec, 
					int dispMult, int dispAdd)
  {
247
248
    DOFVector<double>::Iterator dofIt(vec, USED_DOFS);
    for (dofIt.reset(); !dofIt.end(); ++dofIt) {
249
      int index = mapLocalGlobalDOFs[dofIt.getDOFIndex()] * dispMult + dispAdd;
250
251
252
      double value = *dofIt;

      VecSetValues(petscRhsVec, 1, &index, &value, ADD_VALUES);
253
    }    
254
255
  }

256

257
258
  void ParallelDomainBase::fillPetscMatrix(DOFMatrix *mat, DOFVector<double> *vec)
  {
259
260
261
262
    MatCreate(PETSC_COMM_WORLD, &petscMatrix);
    MatSetSizes(petscMatrix, nRankRows, nRankRows, nOverallRows, nOverallRows);
    MatSetType(petscMatrix, MATAIJ);

263
264
265
266
267
268
269
270
271
272
273
274
    VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
    VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
    VecSetType(petscRhsVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscSolVec);
    VecSetSizes(petscSolVec, nRankRows, nOverallRows);
    VecSetType(petscSolVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscTmpVec);
    VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
    VecSetType(petscTmpVec, VECMPI);

275
276
277
278
279
    setDofMatrix(mat);

    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

Thomas Witkowski's avatar
Thomas Witkowski committed
280
281
282
283
    setDofVector(vec);

    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);
284
285
286
287
288
  }

  
  void ParallelDomainBase::fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec)
  {
289
290
291
292
    FUNCNAME("ParallelDomainBase::fillPetscMatrix()");

    clock_t first = clock();

293
294
295
296
297
298
299
300
301
302
303
304
    VecCreate(PETSC_COMM_WORLD, &petscRhsVec);
    VecSetSizes(petscRhsVec, nRankRows, nOverallRows);
    VecSetType(petscRhsVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscSolVec);
    VecSetSizes(petscSolVec, nRankRows, nOverallRows);
    VecSetType(petscSolVec, VECMPI);

    VecCreate(PETSC_COMM_WORLD, &petscTmpVec);
    VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
    VecSetType(petscTmpVec, VECMPI);

305
    using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end;
306
307
308
309
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    int d_nnz[nRankRows];
310
311
    int o_nnz[nRankRows];

312
313
    std::map<int, std::vector< std::pair<int, int> > > sendMatrixEntry;

314
    for (int i = 0; i < nRankRows; i++) {
315
      d_nnz[i] = 0;
316
317
      o_nnz[i] = 0;
    }
318

319
320
    for (int i = 0; i < nComponents; i++) {
      for (int j = 0; j < nComponents; j++) {
321
322
323
324
325
326
 	if ((*mat)[i][j]) {
	  Matrix bmat = (*mat)[i][j]->getBaseMatrix();

	  traits::col<Matrix>::type col(bmat);
	  traits::const_value<Matrix>::type value(bmat);
	  
327
	  typedef traits::range_generator<row, Matrix>::type cursor_type;
328
329
	  typedef traits::range_generator<nz, cursor_type>::type icursor_type;
	  
330
331
332
333
334
	  for (cursor_type cursor = begin<row>(bmat), 
		 cend = end<row>(bmat); cursor != cend; ++cursor) {

	    int r = mapLocalGlobalDOFs[*cursor] * nComponents + i;

335
336
	    if (isRankDof[*cursor]) {
	      r -= rstart * nComponents;
337
338
339
340
341
342
343
344
345
346

#if (DEBUG != 0)    
	      if (r < 0 || r >= nRankRows) {
		std::cout << "ERROR in rank: " << mpiRank << std::endl;
		std::cout << "  Wrong r = " << r << " " << *cursor << " " 
			  << mapLocalGlobalDOFs[*cursor] << " " 
			  << nRankRows << std::endl;
		ERROR_EXIT("Should not happen!\n");
	      }
#endif
347
	      
348
349
350
351
	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
352

353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
		  if (c >= rstart * nComponents && 
		      c < rstart * nComponents + nRankRows)
		    d_nnz[r]++;
		  else
		    o_nnz[r]++;		  
		}    
	      }
	    } else {
	      int sendToRank = -1;

	      for (RankToDofContainer::iterator it = recvDofs.begin();
		   it != recvDofs.end(); ++it) {
		for (DofContainer::iterator dofIt = it->second.begin();
		     dofIt != it->second.end(); ++dofIt) {
		  if (**dofIt == *cursor) {
		    sendToRank = it->first;
		    break;
		  }
		}

		if (sendToRank != -1)
		  break;
	      }

	      TEST_EXIT_DBG(sendToRank != -1)("Should not happen!\n");

	      for (icursor_type icursor = begin<nz>(cursor), 
		     icend = end<nz>(cursor); icursor != icend; ++icursor) {
		if (value(*icursor) != 0.0) {
		  int c = mapLocalGlobalDOFs[col(*icursor)] * nComponents + j;
		  
		  sendMatrixEntry[sendToRank].push_back(std::make_pair(r, c));
		}
	      }
	    }
388
389
	  }
	}
390
391
392
393
394
395
396
397
398
399
400
401
      }
    }

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;

    std::vector<int*> sendBuffers;
    sendBuffers.reserve(recvDofs.size());

    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();
402

403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
      request[requestCounter++] = mpiComm.Isend(&nSend, 1, MPI_INT, it->first, 0);
      
      if (nSend > 0) {
	int *sendbuf = new int[nSend * 2];
	for (int i = 0; i < nSend; i++) {
	  sendbuf[i * 2] = sendMatrixEntry[it->first][i].first;
	  sendbuf[i * 2 + 1] = sendMatrixEntry[it->first][i].second;
	}
	sendBuffers.push_back(sendbuf);
      } else {
	sendBuffers.push_back(NULL);
      }
    }

    std::vector<int> recvSize;
    recvSize.reserve(sendDofs.size());
    
    int i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it)
      request[requestCounter++] = 
	mpiComm.Irecv(&(recvSize[i++]), 1, MPI_INT, it->first, 0);

    MPI::Request::Waitall(requestCounter, request);

    requestCounter = 0;

    i = 0;
    for (RankToDofContainer::iterator it = recvDofs.begin(); 
	 it != recvDofs.end(); ++it) {
      int nSend = sendMatrixEntry[it->first].size();

435
      if (nSend > 0)
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
	request[requestCounter++] = 
	  mpiComm.Isend(sendBuffers[i], nSend * 2, MPI_INT, it->first, 0);

      i++;
    }

    std::vector<int*> recvBuffers;
    recvBuffers.reserve(sendDofs.size());
    
    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	int *recvbuf = new int[recvSize[i] * 2];
	recvBuffers.push_back(recvbuf);

	request[requestCounter++] =
	  mpiComm.Irecv(recvbuf, recvSize[i] * 2, MPI_INT, it->first, 0);
      } else {
	recvBuffers.push_back(NULL);
      }

      i++;
    }

    MPI::Request::Waitall(requestCounter, request);
Thomas Witkowski's avatar
Thomas Witkowski committed
462

463
464
    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
      if (sendBuffers[j])
Thomas Witkowski's avatar
Thomas Witkowski committed
465
 	delete [] sendBuffers[j];
466
467
468
469
470
471
472
473

    i = 0;
    for (RankToDofContainer::iterator it = sendDofs.begin();
	 it != sendDofs.end(); ++it) {
      if (recvSize[i] > 0) {
	for (int j = 0; j < recvSize[i]; j++) {
	  int r = recvBuffers[i][j * 2];
	  int c = recvBuffers[i][j * 2 + 1];
474

475
	  r -= rstart * nComponents;
Thomas Witkowski's avatar
Thomas Witkowski committed
476

477
478
479
480
481
	  TEST_EXIT_DBG(r >= 0 && r < nRankRows)("Should not happen!\n");
	  
	  if (c < rstart * nComponents ||
	      c >= rstart * nComponents + nRankRows)
	    o_nnz[r]++;
482
483
	  else
	    d_nnz[r]++;
484
485
486
487
	}

	delete [] recvBuffers[i];
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
488
489

      i++;
490
    }
491
492

    MatCreateMPIAIJ(PETSC_COMM_WORLD, nRankRows, nRankRows, nOverallRows, nOverallRows,
493
494
495
496
497
498
499
500
		    0, d_nnz, 0, o_nnz, &petscMatrix);

#if (DEBUG != 0)
    int a, b;
    MatGetOwnershipRange(petscMatrix, &a, &b);
    TEST_EXIT(a == rstart * nComponents)("Wrong matrix ownership range!\n");
    TEST_EXIT(b == rstart * nComponents + nRankRows)("Wrong matrix ownership range!\n");
#endif
501

502
503
504
505
506
507
    using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
    namespace traits= mtl::traits;
    typedef DOFMatrix::base_matrix_type Matrix;

    for (int i = 0; i < nComponents; i++)
      for (int j = 0; j < nComponents; j++)
508
	if ((*mat)[i][j])
509
510
511
512
513
514
515
	  setDofMatrix((*mat)[i][j], nComponents, i, j);

    MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
    MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);

    for (int i = 0; i < nComponents; i++)
      setDofVector(vec->getDOFVector(i), nComponents, i);
516

517
518
519
    VecAssemblyBegin(petscRhsVec);
    VecAssemblyEnd(petscRhsVec);

520
    INFO(info, 8)("Fill petsc matrix needed %.5f seconds\n", TIME_USED(first, clock()));
521
522
523
  }


524
  void ParallelDomainBase::solvePetscMatrix(DOFVector<double> *vec)
525
  {
Thomas Witkowski's avatar
Thomas Witkowski committed
526
527
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

528
529
530
531
    KSP ksp;
    PC pc;

    KSPCreate(PETSC_COMM_WORLD, &ksp);
532
    KSPSetOperators(ksp, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN);
533
    KSPGetPC(ksp, &pc);
534
535
    //PCSetType(pc, PCJACOBI);
    PCSetType(pc, PCILU);
536
    KSPSetTolerances(ksp, 1.e-7, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
537
538
    KSPSetType(ksp, KSPBCGS);
    //KSPSetType(ksp, KSPCG);
539
    KSPMonitorSet(ksp, myKSPMonitor, PETSC_NULL, 0);
540
541
542
543
544
    KSPSolve(ksp, petscRhsVec, petscSolVec);

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

545
    for (int i = 0; i < nRankDofs; i++)
546
      (*vec)[mapLocalToDofIndex[i]] = vecPointer[i];
547
548
549

    VecRestoreArray(petscSolVec, &vecPointer);

550
551
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());
Thomas Witkowski's avatar
Thomas Witkowski committed
552
553
554

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
555
556
    
    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
557
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
558
	 sendIt != sendDofs.end(); ++sendIt, i++) {
559
560
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs];
561

562
      for (int j = 0; j < nSendDOFs; j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
563
	sendBuffers[i][j] = (*vec)[*((sendIt->second)[j])];
564

Thomas Witkowski's avatar
Thomas Witkowski committed
565
566
      request[requestCounter++] =
	mpiComm.Isend(sendBuffers[i], nSendDOFs, MPI_DOUBLE, sendIt->first, 0);
567
568
569
    }

    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
570
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
571
	 recvIt != recvDofs.end(); ++recvIt, i++) {
572
573
      int nRecvDOFs = recvIt->second.size();
      recvBuffers[i] = new double[nRecvDOFs];
574

Thomas Witkowski's avatar
Thomas Witkowski committed
575
576
      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
577
578
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
579
580
581

    MPI::Request::Waitall(requestCounter, request);

582
    i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
583
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
584
	 recvIt != recvDofs.end(); ++recvIt, i++) {
Thomas Witkowski's avatar
Thomas Witkowski committed
585
      for (int j = 0; j < static_cast<int>(recvIt->second.size()); j++)
Thomas Witkowski's avatar
Thomas Witkowski committed
586
	(*vec)[*(recvIt->second)[j]] = recvBuffers[i][j];
587
588
589
590

      delete [] recvBuffers[i];
    }
    
591
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
592
      delete [] sendBuffers[i];
593
594

    MatDestroy(petscMatrix);
595
596
597
    VecDestroy(petscRhsVec);
    VecDestroy(petscSolVec);
    VecDestroy(petscTmpVec);
598
599
  }

600

601
  void ParallelDomainBase::solvePetscMatrix(SystemVector &vec)
602
603
604
  {
    FUNCNAME("ParallelDomainBase::solvePetscMatrix()");

605
    KSP solver;
606

607
608
609
    KSPCreate(PETSC_COMM_WORLD, &solver);
    KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); 

610
611
    KSPSetTolerances(solver, 0.0, 1e-8, PETSC_DEFAULT, PETSC_DEFAULT);

612
613
    KSPSetType(solver, KSPBCGS);
    KSPMonitorSet(solver, myKSPMonitor, PETSC_NULL, 0);
614
    KSPSetFromOptions(solver);
615
616

    KSPSolve(solver, petscRhsVec, petscSolVec);
617
618
619
620
621

    PetscScalar *vecPointer;
    VecGetArray(petscSolVec, &vecPointer);

    for (int i = 0; i < nComponents; i++) {
622
      DOFVector<double> *dofvec = vec.getDOFVector(i);
623
      for (int j = 0; j < nRankDofs; j++)
624
	(*dofvec)[mapLocalToDofIndex[j]] = vecPointer[j * nComponents + i];      
625
626
627
628
    }

    VecRestoreArray(petscSolVec, &vecPointer);

629
    synchVectors(vec);
630

631
632
633
634
635
636
637
638
639
640
641
    int iterations = 0;
    KSPGetIterationNumber(solver, &iterations);
    MSG("  Number of iterations: %d\n", iterations);
    
    double norm = 0.0;
    MatMult(petscMatrix, petscSolVec, petscTmpVec);
    VecAXPY(petscTmpVec, -1.0, petscRhsVec);
    VecNorm(petscTmpVec, NORM_2, &norm);
    MSG("  Residual norm: %e\n", norm);

    MatDestroy(petscMatrix);
642
643
644
    VecDestroy(petscRhsVec);
    VecDestroy(petscSolVec);
    VecDestroy(petscTmpVec);
645
646
647
648
649
    KSPDestroy(solver);
  }
  
  void ParallelDomainBase::synchVectors(SystemVector &vec)
  {
650
651
652
653
654
655
656
657
658
    std::vector<double*> sendBuffers(sendDofs.size());
    std::vector<double*> recvBuffers(recvDofs.size());

    MPI::Request request[sendDofs.size() + recvDofs.size()];
    int requestCounter = 0;
    
    int i = 0;
    for (RankToDofContainer::iterator sendIt = sendDofs.begin();
	 sendIt != sendDofs.end(); ++sendIt, i++) {
659
660
      int nSendDOFs = sendIt->second.size();
      sendBuffers[i] = new double[nSendDOFs * nComponents];
661
662
663

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
664
	DOFVector<double> *dofvec = vec.getDOFVector(j);
665
666
667
668
	for (int k = 0; k < nSendDOFs; k++)
	  sendBuffers[i][counter++] = (*dofvec)[*((sendIt->second)[k])];
      }

669
670
      request[requestCounter++] = mpiComm.Isend(sendBuffers[i], nSendDOFs * nComponents,
						MPI_DOUBLE, sendIt->first, 0);
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
    }

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size() * nComponents;
      recvBuffers[i] = new double[nRecvDOFs];

      request[requestCounter++] =
	mpiComm.Irecv(recvBuffers[i], nRecvDOFs, MPI_DOUBLE, recvIt->first, 0);
    }


    MPI::Request::Waitall(requestCounter, request);

    i = 0;
    for (RankToDofContainer::iterator recvIt = recvDofs.begin();
	 recvIt != recvDofs.end(); ++recvIt, i++) {
      int nRecvDOFs = recvIt->second.size();

      int counter = 0;
      for (int j = 0; j < nComponents; j++) {
693
	DOFVector<double> *dofvec = vec.getDOFVector(j);
694
695
 	for (int k = 0; k < nRecvDOFs; k++)
 	  (*dofvec)[*(recvIt->second)[k]] = recvBuffers[i][counter++];
696
697
698
699
700
701
702
703
704
      }

      delete [] recvBuffers[i];
    }
    
    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
  }

705
706
707
708
  
  void ParallelDomainBase::serialize(std::ostream &out, DofContainer &data)
  {
    int vecSize = data.size();
709
    SerUtil::serialize(out, vecSize);
710
711
    for (int i = 0; i < vecSize; i++) {
      int dofIndex = (*data[i]);
712
      SerUtil::serialize(out, dofIndex);
713
714
715
716
717
718
719
720
721
722
    }
  }


  void ParallelDomainBase::deserialize(std::istream &in, DofContainer &data,
				       std::map<int, const DegreeOfFreedom*> &dofMap)
  {
    FUNCNAME("ParallelDomainBase::deserialize()");

    int vecSize = 0;
723
    SerUtil::deserialize(in, vecSize);
724
725
726
    data.resize(vecSize);
    for (int i = 0; i < vecSize; i++) {
      int dofIndex = 0;
727
      SerUtil::deserialize(in, dofIndex);
728
729
730
731
732
733
734
735
736
737
738
739

      TEST_EXIT_DBG(dofMap.count(dofIndex) != 0)
	("Dof index could not be deserialized correctly!\n");

      data[i] = dofMap[dofIndex];
    }
  }


  void ParallelDomainBase::serialize(std::ostream &out, RankToDofContainer &data)
  {
    int mapSize = data.size();
740
    SerUtil::serialize(out, mapSize);
741
742
    for (RankToDofContainer::iterator it = data.begin(); it != data.end(); ++it) {
      int rank = it->first;
743
      SerUtil::serialize(out, rank);
744
745
746
747
748
749
750
751
752
      serialize(out, it->second);
    }
  }

  
  void ParallelDomainBase::deserialize(std::istream &in, RankToDofContainer &data,
				       std::map<int, const DegreeOfFreedom*> &dofMap)
  {
    int mapSize = 0;
753
    SerUtil::deserialize(in, mapSize);
754
755
    for (int i = 0; i < mapSize; i++) {
      int rank = 0;
756
      SerUtil::deserialize(in, rank);
757
758
759
760
      deserialize(in, data[rank], dofMap);      
    }
  }

761

762
  double ParallelDomainBase::setElemWeights(AdaptInfo *adaptInfo) 
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
  {
    double localWeightSum = 0.0;
    int elNum = -1;

    elemWeights.clear();

    TraverseStack stack;
    ElInfo *elInfo = stack.traverseFirst(mesh, -1,
					 Mesh::CALL_EVERY_EL_PREORDER);
    while (elInfo) {
      Element *element = elInfo->getElement();

      // get partition data
      PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
	(element->getElementData(PARTITION_ED));

      if (partitionData && partitionData->getPartitionStatus() == IN) {
	if (partitionData->getLevel() == 0) {
	  elNum = element->getIndex();
	}
Thomas Witkowski's avatar
Thomas Witkowski committed
783
	TEST_EXIT_DBG(elNum != -1)("invalid element number\n");
784
785
786
787
788
789
790
791
792
793
794
795
	if (element->isLeaf()) {
	  elemWeights[elNum] += 1.0;
	  localWeightSum += 1.0;
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }

    return localWeightSum;
  }

796

797
  void ParallelDomainBase::partitionMesh(AdaptInfo *adaptInfo)
798
799
800
801
802
803
804
805
806
807
808
809
810
  {
    if (initialPartitionMesh) {
      initialPartitionMesh = false;
      partitioner->fillCoarsePartitionVec(&oldPartitionVec);
      partitioner->partition(&elemWeights, INITIAL);
    } else {
      oldPartitionVec = partitionVec;
      partitioner->partition(&elemWeights, ADAPTIVE_REPART, 100.0 /*0.000001*/);
    }    

    partitioner->fillCoarsePartitionVec(&partitionVec);
  }

811

812
  void ParallelDomainBase::createInteriorBoundaryInfo(DofContainer& rankDofs)
813
  {
814
    FUNCNAME("ParallelDomainBase::createInteriorBoundaryInfo()");
Thomas Witkowski's avatar
Thomas Witkowski committed
815
816
817

    // === First, create all the information about the interior boundaries. ===

818
    TraverseStack stack;
819
820
    ElInfo *elInfo = stack.traverseFirst(mesh, -1, 
					 Mesh::CALL_LEAF_EL | Mesh::FILL_NEIGH);
821
822
823
824
825
    while (elInfo) {
      Element *element = elInfo->getElement();

      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>(element->getElementData(PARTITION_ED));   
826

827
828
829
830
831
832
833
      if (partitionData->getPartitionStatus() == IN) {
	for (int i = 0; i < 3; i++) {
	  if (!elInfo->getNeighbour(i))
	    continue;

	  PartitionElementData *neighbourPartitionData =
	    dynamic_cast<PartitionElementData*>(elInfo->getNeighbour(i)->getElementData(PARTITION_ED));
834

835
 	  if (neighbourPartitionData->getPartitionStatus() == OUT) {
836
837
	    // We have found an element that is at an interior boundary. 

838
839
	    int otherElementRank = partitionVec[elInfo->getNeighbour(i)->getIndex()];
	    bool ranksBoundary = (mpiRank > otherElementRank);
840
841

	    AtomicBoundary& bound = 
Thomas Witkowski's avatar
Thomas Witkowski committed
842
	      (ranksBoundary ?
843
844
	       myIntBoundary.getNewAtomicBoundary(otherElementRank) :
	       otherIntBoundary.getNewAtomicBoundary(otherElementRank));
Thomas Witkowski's avatar
Thomas Witkowski committed
845

846
	    bound.rankObject.el = element;
847
	    bound.rankObject.elIndex = element->getIndex();
848
849
	    bound.rankObject.subObjAtBoundary = EDGE;
	    bound.rankObject.ithObjAtBoundary = i;
850
851
852
853
	    // Do not set a pointer to the element, because if will be deleted from
	    // mesh after partitioning and the pointer would become invalid.
	    bound.neighbourObject.el = NULL;
	    bound.neighbourObject.elIndex = elInfo->getNeighbour(i)->getIndex();
854
	    bound.neighbourObject.subObjAtBoundary = EDGE;
855
856
857
858
859
	    bound.neighbourObject.ithObjAtBoundary = elInfo->getSideOfNeighbour(i);

	    // i == 2  =>  getSideOfNeighbour(i) == 2
	    TEST_EXIT_DBG(i != 2 || elInfo->getSideOfNeighbour(i) == 2)
	      ("Should not happen!\n");
860
861
862
863
864
865
 	  }
	}
      }

      elInfo = stack.traverseNext(elInfo);
    }
Thomas Witkowski's avatar
Thomas Witkowski committed
866
867
868


    // === Once we have this information, we must care about their order. Eventually ===
Thomas Witkowski's avatar
Thomas Witkowski committed
869
870
    // === all the boundaries have to be in the same order on both ranks that share  ===
    // === the bounday.                                                              ===
Thomas Witkowski's avatar
Thomas Witkowski committed
871

Thomas Witkowski's avatar
Thomas Witkowski committed
872
    std::vector<int*> sendBuffers, recvBuffers;
Thomas Witkowski's avatar
Thomas Witkowski committed
873

Thomas Witkowski's avatar
Thomas Witkowski committed
874
875
876
877
    MPI::Request request[myIntBoundary.boundary.size() + 
			 otherIntBoundary.boundary.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
Thomas Witkowski committed
878
879
    for (RankToBoundMap::iterator rankIt = myIntBoundary.boundary.begin();
	 rankIt != myIntBoundary.boundary.end(); ++rankIt) {    
Thomas Witkowski's avatar
Thomas Witkowski committed
880
      int nSendInt = rankIt->second.size();
881
882
883
884
885
      int* buffer = new int[nSendInt * 2];
      for (int i = 0; i < nSendInt; i++) {
	buffer[i * 2] = (rankIt->second)[i].rankObject.elIndex;
	buffer[i * 2 + 1] = (rankIt->second)[i].rankObject.ithObjAtBoundary;
      }
Thomas Witkowski's avatar
Thomas Witkowski committed
886
887
      sendBuffers.push_back(buffer);
      
Thomas Witkowski's avatar
Thomas Witkowski committed
888
      request[requestCounter++] =
889
	mpiComm.Isend(buffer, nSendInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
890
891
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
892
893
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
894
      int nRecvInt = rankIt->second.size();
895
      int *buffer = new int[nRecvInt * 2];
Thomas Witkowski's avatar
Thomas Witkowski committed
896
      recvBuffers.push_back(buffer);
Thomas Witkowski's avatar
Thomas Witkowski committed
897

Thomas Witkowski's avatar
Thomas Witkowski committed
898
      request[requestCounter++] = 
899
	mpiComm.Irecv(buffer, nRecvInt * 2, MPI_INT, rankIt->first, 0);
Thomas Witkowski's avatar
Thomas Witkowski committed
900
901
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
902
903
904

    MPI::Request::Waitall(requestCounter, request);

Thomas Witkowski's avatar
Thomas Witkowski committed
905
906

    int i = 0;
Thomas Witkowski's avatar
Thomas Witkowski committed
907
908
    for (RankToBoundMap::iterator rankIt = otherIntBoundary.boundary.begin();
	 rankIt != otherIntBoundary.boundary.end(); ++rankIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
909
910
911
912
913
914
915

      // === We have received from rank "rankIt->first" the ordered list of element ===
      // === indices. We now have to sort the corresponding list in this rank to    ===
      // === get the same order.                                                    ===
      
      for (int j = 0; j < static_cast<int>(rankIt->second.size()); j++) {
	// If the expected object is not at place, search for it.
916
917
	if ((rankIt->second)[j].neighbourObject.elIndex != recvBuffers[i][j * 2] ||
	    (rankIt->second)[j].neighbourObject.ithObjAtBoundary != recvBuffers[i][j * 2 + 1]) {
Thomas Witkowski's avatar
Thomas Witkowski committed
918
919
	  int k = j + 1;
	  for (; k < static_cast<int>(rankIt->second.size()); k++)
920
921
	    if ((rankIt->second)[k].neighbourObject.elIndex == recvBuffers[i][j * 2] && 
		(rankIt->second)[k].neighbourObject.ithObjAtBoundary == recvBuffers[i][j * 2 + 1])
Thomas Witkowski's avatar
Thomas Witkowski committed
922
923
924
	      break;

	  // The element must always be found, because the list is just in another order.
Thomas Witkowski's avatar
Thomas Witkowski committed
925
	  TEST_EXIT_DBG(k < static_cast<int>(rankIt->second.size()))
926
	    ("Should never happen!\n");
Thomas Witkowski's avatar
Thomas Witkowski committed
927
928
929
930
931
932
933
934
935
936
937
938
939

	  // Swap the current with the found element.
	  AtomicBoundary tmpBound = (rankIt->second)[k];
	  (rankIt->second)[k] = (rankIt->second)[j];
	  (rankIt->second)[j] = tmpBound;	
	}
      }

      delete [] recvBuffers[i++];
    }

    for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
      delete [] sendBuffers[i];
940
941
942
  }


943
  void ParallelDomainBase::removeMacroElements()
944
945
946
947
948
949
950
951
952
953
954
955
  {
    std::vector<MacroElement*> macrosToRemove;
    for (std::deque<MacroElement*>::iterator it = mesh->firstMacroElement();
	 it != mesh->endOfMacroElements();
	 ++it) {
      PartitionElementData *partitionData = 
	dynamic_cast<PartitionElementData*>
	((*it)->getElement()->getElementData(PARTITION_ED));
      if (partitionData->getPartitionStatus() != IN)
	macrosToRemove.push_back(*it);
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
956
    mesh->removeMacroElements(macrosToRemove, feSpace);
957
958
959
  }


960
  void ParallelDomainBase::createLocalGlobalNumbering(DofContainer& rankDofs,
961
						      int& nRankDofs, 
Thomas Witkowski's avatar
Thomas Witkowski committed
962
						      int& nOverallDOFs)
963
  {
964
    FUNCNAME("ParallelDomainBase::createLocalGlobalNumbering()");
965
966

    // === Get rank information about DOFs. ===
967
968
969

    // Stores to each DOF pointer the set of ranks the DOF is part of.
    std::map<const DegreeOfFreedom*, std::set<int> > partitionDOFs;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
970
    DofContainer rankAllDofs;
971
    DofToRank boundaryDofs;
972

973
    createDofMemberInfo(partitionDOFs, rankDofs, rankAllDofs, boundaryDofs, vertexDof);
974

975
    nRankDofs = rankDofs.size();
976
977
    nOverallDOFs = partitionDOFs.size();

978

979
    // === Get starting position for global rank dof ordering. ====
980

981
    rstart = 0;
982
983
    mpiComm.Scan(&nRankDofs, &rstart, 1, MPI_INT, MPI_SUM);
    rstart -= nRankDofs;
984

985
986
987
988
    
    // === Create for all dofs in rank new indices. The new index must start at ===
    // === index 0, must be continues and have the same order as the indices    ===
    // === had before.                                                          ===
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
989

Thomas Witkowski's avatar
Thomas Witkowski committed
990

991
    // Do not change the indices now, but create a new indexing and store it here.
992
993
    DofIndexMap rankDofsNewLocalIndex;
    isRankDof.clear();
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
994
995
996
    int i = 0;
    for (DofContainer::iterator dofIt = rankAllDofs.begin();
	 dofIt != rankAllDofs.end(); ++dofIt) {
997
998
999
      rankDofsNewLocalIndex[*dofIt] = i;
      // First, we set all dofs in ranks partition to be owend by the rank. Later,
      // the dofs in ranks partition that are owned by other rank are set to false.
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1000
      isRankDof[i] = true;
1001
      i++;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1002
1003
    }

1004

1005
    // === Create for all rank owned dofs a new global indexing. ===
1006

1007
1008
    // Stores for dofs in rank a new global index.
    DofIndexMap rankDofsNewGlobalIndex;
1009
1010
1011
    // Stores for all rank owned dofs a continues local index.
    DofIndexMap rankOwnedDofsNewLocalIndex;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1012
    i = 0;
1013
1014
    for (DofContainer::iterator dofIt = rankDofs.begin();
	 dofIt != rankDofs.end(); ++dofIt) {
1015
      rankDofsNewGlobalIndex[*dofIt] = i + rstart;
1016
      rankOwnedDofsNewLocalIndex[*dofIt] = i;
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1017
1018
1019
      i++;
    }

Thomas Witkowski's avatar
Thomas Witkowski committed
1020
 
1021
1022
1023
1024
1025
    // === Create information which dof indices must be send and which must  ===
    // === be received.                                                      ===

    // Stores to each rank a map from DOF pointers (which are all owned by the rank
    // and lie on an interior boundary) to new global DOF indices.
1026
    std::map<int, DofIndexMap > sendNewDofs;
1027

1028
1029
1030
1031
    // Maps to each rank the number of new DOF indices this rank will receive from.
    // All these DOFs are at an interior boundary on this rank, but are owned by
    // another rank.
    std::map<int, int> recvNewDofs;
1032

1033
    for (DofToRank::iterator it = boundaryDofs.begin(); it != boundaryDofs.end(); ++it) {
1034
1035
1036
1037
1038
1039
1040
1041

      if (it->second == mpiRank) {
	// If the boundary dof is a rank dof, it must be send to other ranks.

	// Search for all ranks that have this dof too.
	for (std::set<int>::iterator itRanks = partitionDOFs[it->first].begin();
	     itRanks != partitionDOFs[it->first].end();
	     ++itRanks) {
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1042
	  if (*itRanks != mpiRank) {
1043
	    TEST_EXIT_DBG(rankDofsNewGlobalIndex.count(it->first) == 1)
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1044
1045
	      ("DOF Key not found!\n");

1046
	    sendNewDofs[*itRanks][it->first] = rankDofsNewGlobalIndex[it->first];
Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1047
	  }
1048
1049
	}
      } else {
Thomas Witkowski's avatar
Thomas Witkowski committed
1050
1051
	// If the boundary dof is not a rank dof, its new dof index (and later
	// also the dof values) must be received from another rank.
1052
1053
1054
1055
	if (recvNewDofs.find(it->second) == recvNewDofs.end()) 
	  recvNewDofs[it->second] = 1;
	else
	  recvNewDofs[it->second]++;
1056
1057
1058
      }
    }

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1059

1060
    // === Send and receive the dof indices at boundary. ===
1061

Thomas Witkowski's avatar
Thomas Witkowski committed
1062
    std::vector<int*> sendBuffers(sendNewDofs.size()), recvBuffers(recvNewDofs.size());
1063
1064
1065

    sendDofs.clear();
    recvDofs.clear();
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081

#if 0
    StlMpi stlMpi(mpiComm);
    stlMpi.prepareCommunication(false);
    std::map<

    for (std::map<int, DofIndexMap >::iterator sendIt = sendNewDofs.begin();
	 sendIt != sendNewDofs.end(); ++sendIt, i++)
      stlMpi.send(it->second, it->first);

    for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
	 recvIt != recvNewDofs.end(); ++recvIt, i++)
      stlMpi.recv(it->first, it->second * 2); 

    stlMpi.startCommunication();
#endif
1082
    
Thomas Witkowski's avatar
Thomas Witkowski committed
1083
1084
1085
    MPI::Request request[sendNewDofs.size() + recvNewDofs.size()];
    int requestCounter = 0;

Thomas Witkowski's avatar
n    
Thomas Witkowski committed
1086
    i = 0;
1087
1088
    for (std::map<int, DofIndexMap >::iterator sendIt = sendNewDofs.begin();
	 sendIt != sendNewDofs.end(); ++sendIt, i++) {
1089
1090
      int nSendDofs = sendIt->second.size() * 2;
      sendBuffers[i] = new int[nSendDofs];
1091

1092
      int c = 0;
1093
1094
      for (DofIndexMap::iterator dofIt = sendIt->second.begin();
	   dofIt != sendIt->second.end(); ++dofIt) {
Thomas Witkowski's avatar
Thomas Witkowski committed
1095
	sendBuffers[i][c++] = *(dofIt->first);
1096
	sendBuffers[i][c++] = dofIt->second;
1097

1098
	sendDofs[sendIt->first].push_back(dofIt->first);
1099
1100
      }