Commit a2911e8d authored by Thomas Witkowski's avatar Thomas Witkowski

Several small changed to simplify parallel code.

parent 29fd364c
......@@ -372,9 +372,6 @@ namespace AMDiS {
while (elInfo) {
// Get element value.
double val = vec[elInfo->getElement()->getIndex()];
if (vc == 1101) {
MSG("FOUND EL %d\n", elInfo->getElement()->getIndex());
}
// Write value for each vertex of each element.
for (int i = 0; i <= dim; i++)
......
......@@ -197,7 +197,8 @@ namespace AMDiS {
DOFVector<double>::Iterator dofIt(vec, USED_DOFS);
for (dofIt.reset(); !dofIt.end(); ++dofIt) {
// Calculate global row index of the dof.
DegreeOfFreedom globalRow = meshDistributor->mapLocalToGlobal(dofIt.getDOFIndex());
DegreeOfFreedom globalRow =
meshDistributor->mapLocalToGlobal(dofIt.getDOFIndex());
// Calculate petsc index of the row dof.
int index = globalRow * dispMult + dispAdd;
......@@ -384,6 +385,8 @@ namespace AMDiS {
{
FUNCNAME("GlobalMatrixSolver::fillPetscMatrix()");
MSG("START FILL PETSC MATRIX!\n");
clock_t first = clock();
int nRankRows = meshDistributor->getNumberRankDofs() * nComponents;
int nOverallRows = meshDistributor->getNumberOverallDofs() * nComponents;
......@@ -402,7 +405,11 @@ namespace AMDiS {
VecSetSizes(petscTmpVec, nRankRows, nOverallRows);
VecSetType(petscTmpVec, VECMPI);
if (!d_nnz || meshDistributor->getLastMeshChangeIndex() != lastMeshNnz) {
int recvAllValues = 0;
int sendValue = static_cast<int>(meshDistributor->getLastMeshChangeIndex() != lastMeshNnz);
meshDistributor->getMpiComm().Allreduce(&sendValue, &recvAllValues, 1, MPI_INT, MPI_SUM);
if (!d_nnz || recvAllValues != 0) {
if (d_nnz) {
delete [] d_nnz;
d_nnz = NULL;
......@@ -430,6 +437,7 @@ namespace AMDiS {
("Wrong matrix ownership range!\n");
#endif
// === Transfer values from DOF matrices to the PETSc matrix. ===
for (int i = 0; i < nComponents; i++)
......@@ -442,6 +450,7 @@ namespace AMDiS {
MatAssemblyBegin(petscMatrix, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(petscMatrix, MAT_FINAL_ASSEMBLY);
// === Transfer values from DOF vector to the PETSc vector. ===
for (int i = 0; i < nComponents; i++)
......@@ -496,9 +505,9 @@ namespace AMDiS {
int nRankDofs = meshDistributor->getNumberRankDofs();
for (int i = 0; i < nComponents; i++) {
DOFVector<double> *dofvec = vec.getDOFVector(i);
DOFVector<double> &dofvec = *(vec.getDOFVector(i));
for (int j = 0; j < nRankDofs; j++)
(*dofvec)[meshDistributor->mapLocalToDofIndex(j)] =
dofvec[meshDistributor->mapLocalToDofIndex(j)] =
vecPointer[j * nComponents + i];
}
......
......@@ -1582,11 +1582,11 @@ namespace AMDiS {
elInfo = stack.traverseNext(elInfo);
}
DofContainer rankAllDofs;
for (DofSet::iterator dofIt = rankDofSet.begin(); dofIt != rankDofSet.end(); ++dofIt)
rankAllDofs.push_back(*dofIt);
sort(rankAllDofs.begin(), rankAllDofs.end(), cmpDofsByValue);
DofContainer rankDofs = rankAllDofs;
DofContainer rankDofs;
for (DofSet::iterator it = rankDofSet.begin(); it != rankDofSet.end(); ++it)
rankDofs.push_back(*it);
sort(rankDofs.begin(), rankDofs.end(), cmpDofsByValue);
int nRankAllDofs = rankDofs.size();
// === Traverse on interior boundaries and move all not ranked owned DOFs from ===
......@@ -1627,20 +1627,8 @@ namespace AMDiS {
it->rankObj.el->getVertexDofs(feSpace, it->rankObj, dofs);
it->rankObj.el->getNonVertexDofs(feSpace, it->rankObj, dofs);
for (int i = 0; i < static_cast<int>(dofs.size()); i++) {
// FOR DEBUGGING
/*
WorldVector<double> cs;
mesh->getDofIndexCoords(dofs[i], feSpace, cs);
MSG("SEND EL %d DOF %d TO %d\n", it->rankObj.elIndex, *(dofs[i]), it.getRank());
if (cs.getSize() == 2)
MSG("COORDS-s2: %f %f\n", cs[0], cs[1]);
else
MSG("COORDS-s2: %f %f %f\n", cs[0], cs[1], cs[2]);
*/
sendDofs[it.getRank()].push_back(dofs[i]);
}
for (int i = 0; i < static_cast<int>(dofs.size()); i++)
sendDofs[it.getRank()].push_back(dofs[i]);
}
......@@ -1655,17 +1643,6 @@ namespace AMDiS {
if (eraseIt != rankDofs.end())
rankDofs.erase(eraseIt);
// FOR DEBUGGING
/*
WorldVector<double> cs;
mesh->getDofIndexCoords(dofs[i], feSpace, cs);
MSG("RECV EL %d DOF %d FROM %d\n", it->rankObj.elIndex, *(dofs[i]), it.getRank());
if (cs.getSize() == 2)
MSG("COORDS-r2: %f %f\n", cs[0], cs[1]);
else
MSG("COORDS-r2: %f %f %f\n", cs[0], cs[1], cs[2]);
*/
recvDofs[it.getRank()].push_back(dofs[i]);
}
}
......@@ -1684,40 +1661,24 @@ namespace AMDiS {
nOverallDofs = 0;
mpiComm.Allreduce(&nRankDofs, &nOverallDofs, 1, MPI_INT, MPI_SUM);
// Do not change the indices now, but create a new indexing and store it here.
DofIndexMap rankDofsNewLocalIndex;
// First, we set all dofs in ranks partition to be owend by the rank. Later,
// the dofs in ranks partition that are owned by other rank are set to false.
isRankDof.clear();
int i = 0;
for (DofContainer::iterator dofIt = rankAllDofs.begin();
dofIt != rankAllDofs.end(); ++dofIt) {
rankDofsNewLocalIndex[*dofIt] = i;
// First, we set all dofs in ranks partition to be owend by the rank. Later,
// the dofs in ranks partition that are owned by other rank are set to false.
for (int i = 0; i < nRankAllDofs; i++)
isRankDof[i] = true;
i++;
}
// Stores for all rank owned dofs a new global index.
DofIndexMap rankDofsNewGlobalIndex;
// Stores for all rank owned dofs a continues local index.
DofIndexMap rankOwnedDofsNewLocalIndex;
i = 0;
for (DofContainer::iterator dofIt = rankDofs.begin();
dofIt != rankDofs.end(); ++dofIt) {
rankDofsNewGlobalIndex[*dofIt] = i + rstart;
rankOwnedDofsNewLocalIndex[*dofIt] = i;
i++;
}
for (int i = 0; i < nRankDofs; i++)
rankDofsNewGlobalIndex[rankDofs[i]] = i + rstart;
// === Send new DOF indices. ===
#if (DEBUG != 0)
ParallelDomainDbg::testDofContainerCommunication(*this, sendDofs, recvDofs);
#endif
int i = 0;
StdMpi<std::vector<DegreeOfFreedom> > stdMpi(mpiComm, false);
for (RankToDofContainer::iterator sendIt = sendDofs.begin();
sendIt != sendDofs.end(); ++sendIt, i++) {
......@@ -1738,15 +1699,23 @@ namespace AMDiS {
for (DofContainer::iterator dofIt = recvIt->second.begin();
dofIt != recvIt->second.end(); ++dofIt) {
rankDofsNewGlobalIndex[*dofIt] = stdMpi.getRecvData(recvIt->first)[j++];
isRankDof[rankDofsNewLocalIndex[*dofIt]] = false;
isRankDof[**dofIt] = false;
}
}
// === Create now the local to global index and local to dof index mappings. ===
createLocalMappings(rankDofsNewLocalIndex, rankOwnedDofsNewLocalIndex,
rankDofsNewGlobalIndex);
mapLocalGlobalDofs.clear();
mapLocalDofIndex.clear();
for (DofIndexMap::iterator dofIt = rankDofsNewGlobalIndex.begin();
dofIt != rankDofsNewGlobalIndex.end(); ++dofIt)
mapLocalGlobalDofs[*(dofIt->first)] = dofIt->second;
for (int i = 0; i < nRankDofs; i++)
mapLocalDofIndex[i] = *(rankDofs[i]);
// === Update dof admins due to new number of dofs. ===
......@@ -1787,11 +1756,6 @@ namespace AMDiS {
vertexDof[*it],
isRankDof[**it]);
}
MSG("\n");
for (DofMapping::iterator it = mapLocalDofIndex.begin();
it != mapLocalDofIndex.end(); ++it) {
MSG("mapLocalDofIndex[%d] = %d\n", it->first, it->second);
}
#endif
#endif
}
......@@ -1827,6 +1791,7 @@ namespace AMDiS {
mapLocalGlobalDofs.clear();
mapLocalDofIndex.clear();
// Iterate over all DOFs in ranks partition.
for (DofIndexMap::iterator dofIt = rankDofsNewLocalIndex.begin();
dofIt != rankDofsNewLocalIndex.end(); ++dofIt) {
......@@ -1837,12 +1802,13 @@ namespace AMDiS {
mapLocalGlobalDofs[newLocalIndex] = newGlobalIndex;
}
for (DofIndexMap::iterator dofIt = rankOwnedDofsNewLocalIndex.begin();
dofIt != rankOwnedDofsNewLocalIndex.end(); ++dofIt)
mapLocalDofIndex[dofIt->second] = *(dofIt->first);
}
void MeshDistributor::createDofMemberInfo(DofToPartitions& partitionDofs,
DofContainer& rankOwnedDofs,
DofContainer& rankAllDofs,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment