ParallelCoarseSpaceMatVec.cc 6.82 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology 
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.


#include "AMDiS.h"
#include "parallel/ParallelCoarseSpaceMatVec.h"
15
#include "parallel/MatrixNnzStructure.h"
16
17
18
19
20

namespace AMDiS {

  using namespace std;

21
22
23
  ParallelCoarseSpaceMatVec::ParallelCoarseSpaceMatVec()
    : lastMeshNnz(0),
      alwaysCreateNnzStructure(false)
24
  {
25
26
27
28
29
30
31
32
33
34
35
36
37
    Parameters::get("parallel->always create nnz structure", 
		    alwaysCreateNnzStructure);
  }


  void ParallelCoarseSpaceMatVec::init(ParallelDofMapping *iMap,
				       map<int, ParallelDofMapping*> cMap,
				       int sdLevel,
				       MPI::Intracomm mcLocal,
				       MPI::Intracomm mcGlobal,
				       MeshDistributor *meshDist)
  {
    FUNCNAME("ParallelCoarseSpaceMatVec:init()");
38
39
40

    interiorMap = iMap;
    coarseSpaceMap = cMap;
41
42
43
44
    subdomainLevel = sdLevel;
    mpiCommLocal = mcLocal;
    mpiCommGlobal = mcGlobal;
    meshDistributor = meshDist;
45
46
    

47
    uniqueCoarseMap.clear();
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
    if (coarseSpaceMap.size()) {
      std::set<ParallelDofMapping*> tmp;
      for (map<int, ParallelDofMapping*>::iterator it = coarseSpaceMap.begin();
	   it != coarseSpaceMap.end(); ++it) {
	if (tmp.count(it->second) == 0) {	  
	  tmp.insert(it->second);
	  uniqueCoarseMap.push_back(it->second);
	}
      }
    }

    int nCoarseMap = uniqueCoarseMap.size();
    mat.resize(nCoarseMap + 1);
    for (int i = 0; i < nCoarseMap + 1; i++)
      mat[i].resize(nCoarseMap + 1);

    componentIthCoarseMap.resize(coarseSpaceMap.size());
    for (unsigned int i = 0; i < componentIthCoarseMap.size(); i++) {
      bool found = false;
      for (int j = 0; j < nCoarseMap; j++) {
	if (coarseSpaceMap[i] == uniqueCoarseMap[j]) {
	  componentIthCoarseMap[i] = j;
	  found = true;
	  break;
	}
      }
      
      TEST_EXIT_DBG(found)("Should not happen!\n");
    }
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
  }


  void ParallelCoarseSpaceMatVec::create(Matrix<DOFMatrix*>& seqMat)
  {
    FUNCNAME("ParallelCoarseSpaceMatVec::create()");


    // === If required, recompute non zero structure of the matrix. ===

    bool localMatrix = (coarseSpaceMap.size() && subdomainLevel == 0);
    if (checkMeshChange(seqMat, localMatrix)) {
      int nMat = uniqueCoarseMap.size() + 1;
      nnz.resize(nMat);
      for (int i = 0; i < nMat; i++) {
	nnz.resize(nMat);
	for (int j = 0; j < nMat; j++)
	  nnz[i][j].clear();
      }

      nnz[0][0].create(seqMat, mpiCommGlobal, *interiorMap,
		       (coarseSpaceMap.size() == 0 ? &(meshDistributor->getPeriodicMap()) : NULL),
		       meshDistributor->getElementObjectDb(),
		       localMatrix);

      for (int i = 0; i < nMat; i++) {
	for (int j = 0; j < nMat; j++) {
	  if (i == 0 && j == 0)
	    continue;

	  if (i == j) {
	    nnz[i][j].create(seqMat, mpiCommGlobal, coarseSpaceMap, NULL,
			     meshDistributor->getElementObjectDb());
	  } else {
	    ParallelDofMapping *rowMap = NULL;
	    ParallelDofMapping *colMap = NULL;
	    
	    nnz[i][j].create(seqMat, mpiCommGlobal, *rowMap, *colMap, NULL,
			     meshDistributor->getElementObjectDb());

	/*
	nnzCoarse.create(mat, mpiCommGlobal, *coarseSpaceMap, NULL, meshDistributor->getElementObjectDb());
	nnzCoarseInt.create(mat, mpiCommGlobal, *coarseSpaceMap, *interiorMap, NULL, meshDistributor->getElementObjectDb());
	nnzIntCoarse.create(mat, mpiCommGlobal, *interiorMap, *coarseSpaceMap, NULL, meshDistributor->getElementObjectDb());
	*/

	  }
	}
      }
    }


129
130
131
132
133
134
    // === Create PETSc matrix with the computed nnz data structure. ===
    
    int nRankRows = interiorMap->getRankDofs();
    int nOverallRows = interiorMap->getOverallDofs();

    if (localMatrix) {
135
136
      MatCreateSeqAIJ(mpiCommLocal, nRankRows, nRankRows,
		      100, PETSC_NULL, 
137
138
139
140
141
		      &mat[0][0]);
      MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
    } else {
      MatCreateAIJ(mpiCommGlobal, nRankRows, nRankRows, 
		   nOverallRows, nOverallRows,
142
143
		   100, PETSC_NULL,
		   100, PETSC_NULL,
144
145
146
147
148
149
		   &mat[0][0]);
      MatSetOption(mat[0][0], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
    }


    if (coarseSpaceMap.size()) {
150
      int nCoarseMap = uniqueCoarseMap.size();
151
152
153
154
155
156
157
158
159
      for (int i = 0; i < nCoarseMap; i++) {
	ParallelDofMapping* cMap = uniqueCoarseMap[i];

	int nRowsRankCoarse = cMap->getRankDofs();
	int nRowsOverallCoarse = cMap->getOverallDofs();
	
	MatCreateAIJ(mpiCommGlobal,
		     nRowsRankCoarse, nRowsRankCoarse,
		     nRowsOverallCoarse, nRowsOverallCoarse,
160
161
		     100, PETSC_NULL, 100, PETSC_NULL, 
		     &mat[i + 1][i + 1]);
162
163

	MSG("REMOVE THIS LINE WHEN FINISHED!\n");
164
	MatSetOption(mat[i + 1][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180

	for (int j = 0; j < nCoarseMap + 1; j++) {
	  int nRowsRankMat = (j == 0 ? nRankRows : uniqueCoarseMap[j - 1]->getRankDofs());
	  int nRowsOverallMat = (j == 0 ? nOverallRows : uniqueCoarseMap[j - 1]->getOverallDofs());

	  MatCreateAIJ(mpiCommGlobal,
		       nRowsRankCoarse, nRowsRankMat,
		       nRowsOverallCoarse, nRowsOverallMat,
		       100, PETSC_NULL, 100, PETSC_NULL,
		       &mat[i + 1][j]);	  
	  MSG("REMOVE THIS LINE WHEN FINISHED!\n");
	  MatSetOption(mat[i + 1][j], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);

	  MatCreateAIJ(mpiCommGlobal,
		       nRowsRankMat, nRowsRankCoarse,
		       nRowsOverallMat, nRowsOverallCoarse,
181
		       100, PETSC_NULL, 100, PETSC_NULL,
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
		       &mat[j][i + 1]);
	  MSG("REMOVE THIS LINE WHEN FINISHED!\n");
	  MatSetOption(mat[j][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
	}
      }
    }
  }


  void ParallelCoarseSpaceMatVec::destroy()
  {
    FUNCNAME("ParallelCoarseSpaceMatVec::destroy()");

    int nMatrix = mat.size();
    for (int i = 0; i < nMatrix; i++)
      for (int j = 0; j < nMatrix; j++)
	MatDestroy(&mat[i][j]);
  }


  void ParallelCoarseSpaceMatVec::assembly()
  {
    FUNCNAME("ParallelCoarseSpaceMatVec::assembly()");

    int nMatrix = mat.size();
    for (int i = 0; i < nMatrix; i++) {
      for (int j = 0; j < nMatrix; j++) {
	MatAssemblyBegin(mat[i][j], MAT_FINAL_ASSEMBLY);
	MatAssemblyEnd(mat[i][j], MAT_FINAL_ASSEMBLY);  
      }
    }
  }

215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240

  bool ParallelCoarseSpaceMatVec::checkMeshChange(Matrix<DOFMatrix*> &seqMat,
						  bool localMatrix)
  {
    FUNCNAME("ParallelCoarseSpaceMatVec::checkMeshChange()");

    int recvAllValues = 0;
    int sendValue = 
      static_cast<int>(meshDistributor->getLastMeshChangeIndex() != lastMeshNnz);
    mpiCommGlobal.Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM);

    if (recvAllValues != 0 || alwaysCreateNnzStructure) {
      vector<const FiniteElemSpace*> feSpaces = getFeSpaces(seqMat);
      
      interiorMap->setComputeMatIndex(!localMatrix);
      interiorMap->update(feSpaces);

      //      updateSubdomainData();
      lastMeshNnz = meshDistributor->getLastMeshChangeIndex();

      return true;
    }

    return false;
  }

241
}