Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Aland, Sebastian
amdis
Commits
41afebb9
Commit
41afebb9
authored
Jun 11, 2009
by
Thomas Witkowski
Browse files
Work on pdd, works for the first time.....
parent
cfbd82f1
Changes
2
Hide whitespace changes
Inline
Side-by-side
AMDiS/src/ParallelDomainProblem.cc
View file @
41afebb9
...
...
@@ -46,17 +46,11 @@ namespace AMDiS {
partitionMesh
(
adaptInfo
);
/// === Get rank information about DOFs. ===
// Stores to each DOF pointer the set of ranks the DOF is part of.
std
::
map
<
const
DegreeOfFreedom
*
,
std
::
set
<
int
>
>
partitionDOFs
;
// Set of all DOFs of the rank.
std
::
vector
<
const
DegreeOfFreedom
*>
rankDOFs
;
// Set of all interior boundary DOFs in ranks partition which are owned by
// another rank.
std
::
map
<
const
DegreeOfFreedom
*
,
int
>
boundaryDOFs
;
// === Create new global and local DOF numbering. ===
createDOFMemberInformation
(
partionDOFs
,
rankDOFs
,
boundaryDOFs
);
int
nRankDOFs
=
0
;
int
nOverallDOFs
=
0
;
createLocalGlobalNumbering
(
nRankDOFs
,
nOverallDOFs
);
// === Create interior boundary information ===
...
...
@@ -67,191 +61,39 @@ namespace AMDiS {
// === Remove all macro elements that are not part of the rank partition. ===
removeMacroElements
();
// === Get starting position for global rank dof ordering ====
int
rstart
=
0
;
int
nRankDOFs
=
rankDofs
.
size
();
MPI_Scan
(
&
nRankDOFs
,
&
rstart
,
1
,
MPI_INT
,
MPI_SUM
,
PETSC_COMM_WORLD
);
rstart
-=
nRankDOFs
;
/// === Create information which dof indices must be send and which must be received. ===
std
::
map
<
int
,
std
::
map
<
DegreeOfFreedom
,
DegreeOfFreedom
>
>
sendNewDofs
;
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>
recvNewDofs
;
for
(
std
::
map
<
const
DegreeOfFreedom
*
,
int
>::
iterator
it
=
boundaryDofs
.
begin
();
it
!=
boundaryDofs
.
end
();
++
it
)
{
if
(
it
->
second
==
mpiRank
)
{
// If the boundary dof is a rank dof, it must be send to other ranks.
// old global index
int
oldDofIndex
=
(
it
->
first
)[
0
];
// search for new dof index in ranks partition for this boundary dof
int
newDofIndex
=
0
;
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
rankDofs
.
size
());
i
++
)
{
if
(
rankDofs
[
i
]
==
it
->
first
)
{
newDofIndex
=
rstart
+
i
;
break
;
}
}
// Search for all ranks that have this dof too.
for
(
std
::
set
<
int
>::
iterator
itRanks
=
partitionDofs
[
it
->
first
].
begin
();
itRanks
!=
partitionDofs
[
it
->
first
].
end
();
++
itRanks
)
{
if
(
*
itRanks
!=
mpiRank
)
sendNewDofs
[
*
itRanks
][
oldDofIndex
]
=
newDofIndex
;
}
}
else
{
// If the boundary dof is not a rank dof, its new dof index, and later
// also the dof values, must be received from another rank.
recvNewDofs
[
it
->
second
].
push_back
((
it
->
first
)[
0
]);
}
}
/// === Send and receive the dof indices at boundary. ===
std
::
vector
<
int
*>
sendBuffers
(
sendNewDofs
.
size
());
std
::
vector
<
int
*>
recvBuffers
(
recvNewDofs
.
size
());
int
i
=
0
;
for
(
std
::
map
<
int
,
std
::
map
<
DegreeOfFreedom
,
DegreeOfFreedom
>
>::
iterator
sendIt
=
sendNewDofs
.
begin
();
sendIt
!=
sendNewDofs
.
end
();
++
sendIt
,
i
++
)
{
sendBuffers
[
i
]
=
new
int
[
sendIt
->
second
.
size
()
*
2
];
int
c
=
0
;
for
(
std
::
map
<
DegreeOfFreedom
,
DegreeOfFreedom
>::
iterator
dofIt
=
sendIt
->
second
.
begin
();
dofIt
!=
sendIt
->
second
.
end
();
++
dofIt
,
c
+=
2
)
{
sendBuffers
[
i
][
c
]
=
dofIt
->
first
;
sendBuffers
[
i
][
c
+
1
]
=
dofIt
->
second
;
sendDofs
[
sendIt
->
first
].
push_back
(
dofIt
->
second
);
}
mpiComm
.
Isend
(
sendBuffers
[
i
],
sendIt
->
second
.
size
()
*
2
,
MPI_INT
,
sendIt
->
first
,
0
);
}
i
=
0
;
for
(
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>::
iterator
recvIt
=
recvNewDofs
.
begin
();
recvIt
!=
recvNewDofs
.
end
();
++
recvIt
,
i
++
)
{
recvBuffers
[
i
]
=
new
int
[
recvIt
->
second
.
size
()
*
2
];
mpiComm
.
Irecv
(
recvBuffers
[
i
],
recvIt
->
second
.
size
()
*
2
,
MPI_INT
,
recvIt
->
first
,
0
);
}
mpiComm
.
Barrier
();
/// === Delete send buffers. ===
i
=
0
;
for
(
std
::
map
<
int
,
std
::
map
<
DegreeOfFreedom
,
DegreeOfFreedom
>
>::
iterator
sendIt
=
sendNewDofs
.
begin
();
sendIt
!=
sendNewDofs
.
end
();
++
sendIt
,
i
++
)
delete
[]
sendBuffers
[
i
];
/// === Change dof indices for rank partition. ===
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
rankDofs
.
size
());
i
++
)
{
const_cast
<
DegreeOfFreedom
*>
(
rankDofs
[
i
])[
0
]
=
i
;
mapLocalGlobalDOFs
[
i
]
=
rstart
+
i
;
mapGlobalLocalDOFs
[
rstart
+
i
]
=
i
;
isRankDOF
[
i
]
=
true
;
}
/// === Change dof indices at boundary from other ranks. ===
i
=
0
;
for
(
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>::
iterator
recvIt
=
recvNewDofs
.
begin
();
recvIt
!=
recvNewDofs
.
end
();
++
recvIt
,
i
++
)
{
for
(
int
j
=
0
;
j
<
static_cast
<
int
>
(
recvIt
->
second
.
size
());
j
++
)
{
int
oldDof
=
recvBuffers
[
i
][
j
*
2
];
int
newDof
=
recvBuffers
[
i
][
j
*
2
+
1
];
int
newLocalDof
=
mapLocalGlobalDOFs
.
size
();
recvDofs
[
recvIt
->
first
].
push_back
(
newDof
);
for
(
std
::
map
<
const
DegreeOfFreedom
*
,
int
>::
iterator
dofIt
=
boundaryDofs
.
begin
();
dofIt
!=
boundaryDofs
.
end
();
++
dofIt
)
{
if
((
dofIt
->
first
)[
0
]
==
oldDof
)
{
const_cast
<
DegreeOfFreedom
*>
(
dofIt
->
first
)[
0
]
=
newLocalDof
;
mapLocalGlobalDOFs
[
newLocalDof
]
=
newDof
;
mapGlobalLocalDOFs
[
newDof
]
=
newLocalDof
;
isRankDOF
[
newLocalDof
]
=
false
;
break
;
}
}
}
delete
[]
recvBuffers
[
i
];
}
/// === Reset all DOFAdmins of the mesh. ===
int
nAdmins
=
mesh
->
getNumberOfDOFAdmin
();
for
(
int
i
=
0
;
i
<
nAdmins
;
i
++
)
{
for
(
int
j
=
0
;
j
<
mesh
->
getDOFAdmin
(
i
).
getSize
();
j
++
)
const_cast
<
DOFAdmin
&>
(
mesh
->
getDOFAdmin
(
i
)).
setDOFFree
(
j
,
true
);
for
(
int
j
=
0
;
j
<
mapLocalGlobalDOFs
.
size
();
j
++
)
const_cast
<
DOFAdmin
&>
(
mesh
->
getDOFAdmin
(
i
)).
setDOFFree
(
j
,
false
);
const_cast
<
DOFAdmin
&>
(
mesh
->
getDOFAdmin
(
i
)).
setDOFFree
(
j
,
true
);
for
(
int
j
=
0
;
j
<
static_cast
<
int
>
(
mapLocalGlobalDOFs
.
size
()
)
;
j
++
)
const_cast
<
DOFAdmin
&>
(
mesh
->
getDOFAdmin
(
i
)).
setDOFFree
(
j
,
false
);
}
/// === Create local information from sendDofs and recvDofs
for
(
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>::
iterator
it
=
sendDofs
.
begin
();
it
!=
sendDofs
.
end
();
++
it
)
for
(
std
::
vector
<
DegreeOfFreedom
>::
iterator
dofIt
=
it
->
second
.
begin
();
dofIt
!=
it
->
second
.
end
();
dofIt
++
)
*
dofIt
=
mapGlobalLocalDOFs
[
*
dofIt
];
for
(
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>::
iterator
it
=
recvDofs
.
begin
();
it
!=
recvDofs
.
end
();
++
it
)
for
(
std
::
vector
<
DegreeOfFreedom
>::
iterator
dofIt
=
it
->
second
.
begin
();
dofIt
!=
it
->
second
.
end
();
dofIt
++
)
*
dofIt
=
mapGlobalLocalDOFs
[
*
dofIt
];
/// === Create petsc matrix. ===
int
ierr
;
ierr
=
MatCreate
(
PETSC_COMM_WORLD
,
&
petscMatrix
);
ierr
=
MatSetSizes
(
petscMatrix
,
rankDofs
.
size
(),
rankDofs
.
size
(),
partitionDofs
.
size
(),
partitionDofs
.
size
());
ierr
=
MatSetSizes
(
petscMatrix
,
nRankDOFs
,
nRankDOFs
,
nOverallDOFs
,
nOverallDOFs
);
ierr
=
MatSetType
(
petscMatrix
,
MATAIJ
);
ierr
=
VecCreate
(
PETSC_COMM_WORLD
,
&
petscRhsVec
);
ierr
=
VecSetSizes
(
petscRhsVec
,
r
ankD
ofs
.
size
(),
partitionDofs
.
size
());
ierr
=
VecSetSizes
(
petscRhsVec
,
nR
ankD
OFs
,
nOverallDOFs
);
ierr
=
VecSetType
(
petscRhsVec
,
VECMPI
);
ierr
=
VecCreate
(
PETSC_COMM_WORLD
,
&
petscSolVec
);
ierr
=
VecSetSizes
(
petscSolVec
,
r
ankD
ofs
.
size
(),
partitionDofs
.
size
());
ierr
=
VecSetSizes
(
petscSolVec
,
nR
ankD
OFs
,
nOverallDOFs
);
ierr
=
VecSetType
(
petscSolVec
,
VECMPI
);
}
void
ParallelDomainProblemBase
::
exitParallelization
(
AdaptInfo
*
adaptInfo
)
{}
void
ParallelDomainProblemBase
::
fillPetscMatrix
(
DOFMatrix
*
mat
,
DOFVector
<
double
>
*
vec
)
{
...
...
@@ -289,6 +131,7 @@ namespace AMDiS {
}
}
void
ParallelDomainProblemBase
::
solvePetscMatrix
(
DOFVector
<
double
>
*
vec
)
{
KSP
ksp
;
...
...
@@ -322,7 +165,7 @@ namespace AMDiS {
++
sendIt
,
i
++
)
{
sendBuffers
[
i
]
=
new
double
[
sendIt
->
second
.
size
()];
for
(
int
j
=
0
;
j
<
sendIt
->
second
.
size
();
j
++
)
for
(
int
j
=
0
;
j
<
static_cast
<
int
>
(
sendIt
->
second
.
size
()
)
;
j
++
)
sendBuffers
[
i
][
j
]
=
(
*
vec
)[(
sendIt
->
second
)[
j
]];
mpiComm
.
Isend
(
sendBuffers
[
i
],
sendIt
->
second
.
size
(),
MPI_DOUBLE
,
sendIt
->
first
,
0
);
...
...
@@ -344,16 +187,17 @@ namespace AMDiS {
for
(
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>::
iterator
recvIt
=
recvDofs
.
begin
();
recvIt
!=
recvDofs
.
end
();
++
recvIt
,
i
++
)
{
for
(
int
j
=
0
;
j
<
recvIt
->
second
.
size
();
j
++
)
for
(
int
j
=
0
;
j
<
static_cast
<
int
>
(
recvIt
->
second
.
size
()
)
;
j
++
)
(
*
vec
)[(
recvIt
->
second
)[
j
]]
=
recvBuffers
[
i
][
j
];
delete
[]
recvBuffers
[
i
];
}
for
(
int
i
=
0
;
i
<
sendBuffers
.
size
();
i
++
)
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
sendBuffers
.
size
()
)
;
i
++
)
delete
[]
sendBuffers
[
i
];
}
double
ParallelDomainProblemBase
::
setElemWeights
(
AdaptInfo
*
adaptInfo
)
{
double
localWeightSum
=
0.0
;
...
...
@@ -388,6 +232,7 @@ namespace AMDiS {
return
localWeightSum
;
}
void
ParallelDomainProblemBase
::
partitionMesh
(
AdaptInfo
*
adaptInfo
)
{
if
(
initialPartitionMesh
)
{
...
...
@@ -402,8 +247,232 @@ namespace AMDiS {
partitioner
->
fillCoarsePartitionVec
(
&
partitionVec
);
}
void
ParallelDomainProblemBase
::
createInteriorBoundaryInfo
()
{
TraverseStack
stack
;
ElInfo
*
elInfo
=
stack
.
traverseFirst
(
mesh
,
-
1
,
Mesh
::
CALL_LEAF_EL
|
Mesh
::
FILL_NEIGH
);
while
(
elInfo
)
{
Element
*
element
=
elInfo
->
getElement
();
// Hidde elements which are not part of ranks partition.
PartitionElementData
*
partitionData
=
dynamic_cast
<
PartitionElementData
*>
(
element
->
getElementData
(
PARTITION_ED
));
if
(
partitionData
->
getPartitionStatus
()
==
IN
)
{
for
(
int
i
=
0
;
i
<
3
;
i
++
)
{
if
(
!
elInfo
->
getNeighbour
(
i
))
continue
;
PartitionElementData
*
neighbourPartitionData
=
dynamic_cast
<
PartitionElementData
*>
(
elInfo
->
getNeighbour
(
i
)
->
getElementData
(
PARTITION_ED
));
if
(
neighbourPartitionData
->
getPartitionStatus
()
==
OUT
)
{
AtomicBoundary
&
bound
=
interiorBoundary
.
getNewAtomicBoundary
(
partitionVec
[
elInfo
->
getNeighbour
(
i
)
->
getIndex
()]);
bound
.
rankObject
.
el
=
element
;
bound
.
rankObject
.
subObjAtBoundary
=
EDGE
;
bound
.
rankObject
.
ithObjAtBoundary
=
i
;
bound
.
neighbourObject
.
el
=
elInfo
->
getNeighbour
(
i
);
bound
.
neighbourObject
.
subObjAtBoundary
=
EDGE
;
bound
.
neighbourObject
.
ithObjAtBoundary
=
-
1
;
}
}
}
elInfo
=
stack
.
traverseNext
(
elInfo
);
}
}
void
ParallelDomainProblemBase
::
removeMacroElements
()
{
std
::
vector
<
MacroElement
*>
macrosToRemove
;
for
(
std
::
deque
<
MacroElement
*>::
iterator
it
=
mesh
->
firstMacroElement
();
it
!=
mesh
->
endOfMacroElements
();
++
it
)
{
PartitionElementData
*
partitionData
=
dynamic_cast
<
PartitionElementData
*>
((
*
it
)
->
getElement
()
->
getElementData
(
PARTITION_ED
));
if
(
partitionData
->
getPartitionStatus
()
!=
IN
)
macrosToRemove
.
push_back
(
*
it
);
}
mesh
->
removeMacroElements
(
macrosToRemove
);
}
void
ParallelDomainProblemBase
::
createLocalGlobalNumbering
(
int
&
nRankDOFs
,
int
&
nOverallDOFs
)
{
/// === Get rank information about DOFs. ===
// Stores to each DOF pointer the set of ranks the DOF is part of.
std
::
map
<
const
DegreeOfFreedom
*
,
std
::
set
<
int
>
>
partitionDOFs
;
// Set of all DOFs of the rank.
std
::
vector
<
const
DegreeOfFreedom
*>
rankDOFs
;
// Set of all interior boundary DOFs in ranks partition which are owned by
// another rank.
std
::
map
<
const
DegreeOfFreedom
*
,
int
>
boundaryDOFs
;
createDOFMemberInfo
(
partitionDOFs
,
rankDOFs
,
boundaryDOFs
);
nRankDOFs
=
rankDOFs
.
size
();
nOverallDOFs
=
partitionDOFs
.
size
();
// === Get starting position for global rank dof ordering ====
int
rstart
=
0
;
MPI_Scan
(
&
nRankDOFs
,
&
rstart
,
1
,
MPI_INT
,
MPI_SUM
,
PETSC_COMM_WORLD
);
rstart
-=
nRankDOFs
;
/// === Create information which dof indices must be send and which must be received. ===
std
::
map
<
int
,
std
::
map
<
DegreeOfFreedom
,
DegreeOfFreedom
>
>
sendNewDofs
;
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>
recvNewDofs
;
for
(
std
::
map
<
const
DegreeOfFreedom
*
,
int
>::
iterator
it
=
boundaryDOFs
.
begin
();
it
!=
boundaryDOFs
.
end
();
++
it
)
{
if
(
it
->
second
==
mpiRank
)
{
// If the boundary dof is a rank dof, it must be send to other ranks.
// old global index
int
oldDofIndex
=
(
it
->
first
)[
0
];
// search for new dof index in ranks partition for this boundary dof
int
newDofIndex
=
0
;
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
rankDOFs
.
size
());
i
++
)
{
if
(
rankDOFs
[
i
]
==
it
->
first
)
{
newDofIndex
=
rstart
+
i
;
break
;
}
}
// Search for all ranks that have this dof too.
for
(
std
::
set
<
int
>::
iterator
itRanks
=
partitionDOFs
[
it
->
first
].
begin
();
itRanks
!=
partitionDOFs
[
it
->
first
].
end
();
++
itRanks
)
{
if
(
*
itRanks
!=
mpiRank
)
sendNewDofs
[
*
itRanks
][
oldDofIndex
]
=
newDofIndex
;
}
}
else
{
// If the boundary dof is not a rank dof, its new dof index, and later
// also the dof values, must be received from another rank.
recvNewDofs
[
it
->
second
].
push_back
((
it
->
first
)[
0
]);
}
}
/// === Send and receive the dof indices at boundary. ===
std
::
vector
<
int
*>
sendBuffers
(
sendNewDofs
.
size
());
std
::
vector
<
int
*>
recvBuffers
(
recvNewDofs
.
size
());
int
i
=
0
;
for
(
std
::
map
<
int
,
std
::
map
<
DegreeOfFreedom
,
DegreeOfFreedom
>
>::
iterator
sendIt
=
sendNewDofs
.
begin
();
sendIt
!=
sendNewDofs
.
end
();
++
sendIt
,
i
++
)
{
sendBuffers
[
i
]
=
new
int
[
sendIt
->
second
.
size
()
*
2
];
int
c
=
0
;
for
(
std
::
map
<
DegreeOfFreedom
,
DegreeOfFreedom
>::
iterator
dofIt
=
sendIt
->
second
.
begin
();
dofIt
!=
sendIt
->
second
.
end
();
++
dofIt
,
c
+=
2
)
{
sendBuffers
[
i
][
c
]
=
dofIt
->
first
;
sendBuffers
[
i
][
c
+
1
]
=
dofIt
->
second
;
sendDofs
[
sendIt
->
first
].
push_back
(
dofIt
->
second
);
}
mpiComm
.
Isend
(
sendBuffers
[
i
],
sendIt
->
second
.
size
()
*
2
,
MPI_INT
,
sendIt
->
first
,
0
);
}
i
=
0
;
for
(
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>::
iterator
recvIt
=
recvNewDofs
.
begin
();
recvIt
!=
recvNewDofs
.
end
();
++
recvIt
,
i
++
)
{
recvBuffers
[
i
]
=
new
int
[
recvIt
->
second
.
size
()
*
2
];
mpiComm
.
Irecv
(
recvBuffers
[
i
],
recvIt
->
second
.
size
()
*
2
,
MPI_INT
,
recvIt
->
first
,
0
);
}
mpiComm
.
Barrier
();
/// === Delete send buffers. ===
i
=
0
;
for
(
std
::
map
<
int
,
std
::
map
<
DegreeOfFreedom
,
DegreeOfFreedom
>
>::
iterator
sendIt
=
sendNewDofs
.
begin
();
sendIt
!=
sendNewDofs
.
end
();
++
sendIt
,
i
++
)
delete
[]
sendBuffers
[
i
];
/// === Change dof indices for rank partition. ===
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
rankDOFs
.
size
());
i
++
)
{
const_cast
<
DegreeOfFreedom
*>
(
rankDOFs
[
i
])[
0
]
=
i
;
mapLocalGlobalDOFs
[
i
]
=
rstart
+
i
;
mapGlobalLocalDOFs
[
rstart
+
i
]
=
i
;
isRankDOF
[
i
]
=
true
;
}
/// === Change dof indices at boundary from other ranks. ===
i
=
0
;
for
(
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>::
iterator
recvIt
=
recvNewDofs
.
begin
();
recvIt
!=
recvNewDofs
.
end
();
++
recvIt
,
i
++
)
{
for
(
int
j
=
0
;
j
<
static_cast
<
int
>
(
recvIt
->
second
.
size
());
j
++
)
{
int
oldDof
=
recvBuffers
[
i
][
j
*
2
];
int
newDof
=
recvBuffers
[
i
][
j
*
2
+
1
];
int
newLocalDof
=
mapLocalGlobalDOFs
.
size
();
recvDofs
[
recvIt
->
first
].
push_back
(
newDof
);
for
(
std
::
map
<
const
DegreeOfFreedom
*
,
int
>::
iterator
dofIt
=
boundaryDOFs
.
begin
();
dofIt
!=
boundaryDOFs
.
end
();
++
dofIt
)
{
if
((
dofIt
->
first
)[
0
]
==
oldDof
)
{
const_cast
<
DegreeOfFreedom
*>
(
dofIt
->
first
)[
0
]
=
newLocalDof
;
mapLocalGlobalDOFs
[
newLocalDof
]
=
newDof
;
mapGlobalLocalDOFs
[
newDof
]
=
newLocalDof
;
isRankDOF
[
newLocalDof
]
=
false
;
break
;
}
}
}
delete
[]
recvBuffers
[
i
];
}
/// === Create local information from sendDofs and recvDofs
for
(
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>::
iterator
it
=
sendDofs
.
begin
();
it
!=
sendDofs
.
end
();
++
it
)
for
(
std
::
vector
<
DegreeOfFreedom
>::
iterator
dofIt
=
it
->
second
.
begin
();
dofIt
!=
it
->
second
.
end
();
dofIt
++
)
*
dofIt
=
mapGlobalLocalDOFs
[
*
dofIt
];
for
(
std
::
map
<
int
,
std
::
vector
<
DegreeOfFreedom
>
>::
iterator
it
=
recvDofs
.
begin
();
it
!=
recvDofs
.
end
();
++
it
)
for
(
std
::
vector
<
DegreeOfFreedom
>::
iterator
dofIt
=
it
->
second
.
begin
();
dofIt
!=
it
->
second
.
end
();
dofIt
++
)
*
dofIt
=
mapGlobalLocalDOFs
[
*
dofIt
];
}
void
ParallelDomainProblemBase
::
createDOFMemberInfo
(
std
::
map
<
const
DegreeOfFreedom
*
,
std
::
set
<
int
>&
partitionDOFs
,
std
::
map
<
const
DegreeOfFreedom
*
,
std
::
set
<
int
>
>
&
partitionDOFs
,
std
::
vector
<
const
DegreeOfFreedom
*>&
rankDOFs
,
std
::
map
<
const
DegreeOfFreedom
*
,
int
>&
boundaryDOFs
)
{
...
...
@@ -460,60 +529,6 @@ namespace AMDiS {
}
}
void
ParallelDomainProblemBase
::
createInteriorBoundaryInfo
()
{
TraverseStack
stack
;
ElInfo
*
elInfo
=
stack
.
traverseFirst
(
mesh
,
-
1
,
Mesh
::
CALL_LEAF_EL
|
Mesh
::
FILL_NEIGH
);
while
(
elInfo
)
{
Element
*
element
=
elInfo
->
getElement
();
// Hidde elements which are not part of ranks partition.
PartitionElementData
*
partitionData
=
dynamic_cast
<
PartitionElementData
*>
(
element
->
getElementData
(
PARTITION_ED
));
if
(
partitionData
->
getPartitionStatus
()
==
IN
)
{
for
(
int
i
=
0
;
i
<
3
;
i
++
)
{
if
(
!
elInfo
->
getNeighbour
(
i
))
continue
;
PartitionElementData
*
neighbourPartitionData
=
dynamic_cast
<
PartitionElementData
*>
(
elInfo
->
getNeighbour
(
i
)
->
getElementData
(
PARTITION_ED
));
if
(
neighbourPartitionData
->
getPartitionStatus
()
==
OUT
)
{
AtomicBoundary
&
bound
=
interiorBoundary
.
getNewAtomicBoundary
(
partitionVec
[
elInfo
->
getNeighbour
(
i
)
->
getIndex
()]);
bound
.
rankObject
.
el
=
element
;
bound
.
rankObject
.
subObjAtBoundary
=
EDGE
;
bound
.
rankObject
.
ithObjAtBoundary
=
i
;
bound
.
neighbourObject
.
el
=
elInfo
->
getNeighbour
(
i
);
bound
.
neighbourObject
.
subObjAtBoundary
=
EDGE
;
bound
.
neighbourObject
.
ithObjAtBoundary
=
-
1
;
}
}
}
elInfo
=
stack
.
traverseNext
(
elInfo
);
}
}
void
ParallelDomainProblemBase
::
removeMacroElements
()
{
std
::
vector
<
MacroElement
*>
macrosToRemove
;
for
(
std
::
deque
<
MacroElement
*>::
iterator
it
=
mesh
->
firstMacroElement
();
it
!=
mesh
->
endOfMacroElements
();
++
it
)
{
PartitionElementData
*
partitionData
=
dynamic_cast
<
PartitionElementData
*>
((
*
it
)
->
getElement
()
->
getElementData
(
PARTITION_ED
));
if
(
partitionData
->
getPartitionStatus
()
!=
IN
)
macrosToRemove
.
push_back
(
*
it
);
}
mesh
->
removeMacroElements
(
macrosToRemove
);
}
void
ParallelDomainProblemBase
::
createLocalGlobalOrder
()
{
}
ParallelDomainProblemScal
::
ParallelDomainProblemScal
(
const
std
::
string
&
name
,
ProblemScal
*
problem
,
...
...
AMDiS/src/ParallelDomainProblem.h
View file @
41afebb9
...
...
@@ -23,6 +23,7 @@
#define AMDIS_PARALLELDOMAINPROBLEM_H
#include
<map>
#include
<set>
#include
<vector>
#include
"ProblemTimeInterface.h"
...
...
@@ -90,19 +91,16 @@ namespace AMDiS {
iterationIF
->
endIteration
(
adaptInfo
);
}
virtual
int
getNumProblems
()
{}
virtual
int
getNumProblems
()
{
return
0
;
}
inline
virtual
const
std
::
string
&
getName
()
{
return
name
;
}
/// Returns pointer to \ref applicationOrdering.
AO
*
getApplicationOrdering
()
{
return
&
applicationOrdering
;
}
/// Returns \ref nRankDOFs, the number of DOFs in the rank mesh.
int
getNumberRankDOFs
()
{
...
...
@@ -113,7 +111,10 @@ namespace AMDiS {
void
solvePetscMatrix
(
DOFVector
<
double
>
*
vec
);
virtual
ProblemStatBase
*
getProblem
(
int
number
=
0
)
{}
virtual
ProblemStatBase
*
getProblem
(
int
number
=
0
)
{