Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
A
amdis
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Incidents
Environments
Packages & Registries
Packages & Registries
Container Registry
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Aland, Sebastian
amdis
Commits
aff33faa
Commit
aff33faa
authored
Jan 13, 2012
by
Thomas Witkowski
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Work on parallelization.
parent
808eac8e
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
423 additions
and
141 deletions
+423
-141
AMDiS/CMakeLists.txt
AMDiS/CMakeLists.txt
+2
-1
AMDiS/src/AdaptStationary.cc
AMDiS/src/AdaptStationary.cc
+10
-2
AMDiS/src/Flag.h
AMDiS/src/Flag.h
+1
-1
AMDiS/src/MeshStructure.cc
AMDiS/src/MeshStructure.cc
+15
-7
AMDiS/src/MeshStructure.h
AMDiS/src/MeshStructure.h
+3
-0
AMDiS/src/Serializer.h
AMDiS/src/Serializer.h
+4
-4
AMDiS/src/parallel/CheckerPartitioner.cc
AMDiS/src/parallel/CheckerPartitioner.cc
+85
-7
AMDiS/src/parallel/CheckerPartitioner.h
AMDiS/src/parallel/CheckerPartitioner.h
+29
-2
AMDiS/src/parallel/MeshDistributor.cc
AMDiS/src/parallel/MeshDistributor.cc
+72
-64
AMDiS/src/parallel/ParallelDebug.cc
AMDiS/src/parallel/ParallelDebug.cc
+54
-0
AMDiS/src/parallel/ParallelDebug.h
AMDiS/src/parallel/ParallelDebug.h
+9
-0
AMDiS/src/parallel/PetscMultigridPrecon.cc
AMDiS/src/parallel/PetscMultigridPrecon.cc
+65
-0
AMDiS/src/parallel/PetscMultigridPrecon.h
AMDiS/src/parallel/PetscMultigridPrecon.h
+51
-0
AMDiS/src/parallel/PetscProblemStat.cc
AMDiS/src/parallel/PetscProblemStat.cc
+0
-8
AMDiS/src/parallel/PetscSolverFeti.cc
AMDiS/src/parallel/PetscSolverFeti.cc
+0
-4
AMDiS/src/parallel/PetscSolverFeti.h
AMDiS/src/parallel/PetscSolverFeti.h
+0
-4
AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
+6
-2
AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
+0
-11
AMDiS/src/parallel/PetscSolverSchur.cc
AMDiS/src/parallel/PetscSolverSchur.cc
+0
-3
AMDiS/src/parallel/PetscSolverSchur.h
AMDiS/src/parallel/PetscSolverSchur.h
+0
-2
AMDiS/src/parallel/StdMpi.h
AMDiS/src/parallel/StdMpi.h
+17
-19
No files found.
AMDiS/CMakeLists.txt
View file @
aff33faa
...
...
@@ -239,7 +239,8 @@ if(ENABLE_PARALLEL_DOMAIN)
find_package
(
PETSc REQUIRED
)
include_directories
(
${
PETSC_DIR
}
/include
${
PETSC_DIR
}
/
${
PETSC_ARCH
}
/include
)
list
(
APPEND AMDIS_INCLUDE_DIRS
${
PETSC_DIR
}
/include
${
PETSC_DIR
}
/
${
PETSC_ARCH
}
/include
)
list
(
APPEND PARALLEL_DOMAIN_AMDIS_SRC
${
SOURCE_DIR
}
/parallel/PetscSolver.cc
list
(
APPEND PARALLEL_DOMAIN_AMDIS_SRC
${
SOURCE_DIR
}
/parallel/PetscMultigridPrecon.cc
${
SOURCE_DIR
}
/parallel/PetscSolver.cc
${
SOURCE_DIR
}
/parallel/PetscProblemStat.cc
${
SOURCE_DIR
}
/parallel/PetscSolverFeti.cc
${
SOURCE_DIR
}
/parallel/PetscSolverGlobalMatrix.cc
...
...
AMDiS/src/AdaptStationary.cc
View file @
aff33faa
...
...
@@ -63,7 +63,7 @@ namespace AMDiS {
problemIteration
->
endIteration
(
adaptInfo
);
adaptInfo
->
incSpaceIteration
();
}
// adaption loop
while
(
!
adaptInfo
->
spaceToleranceReached
()
&&
(
adaptInfo
->
getSpaceIteration
()
<
adaptInfo
->
getMaxSpaceIteration
()
||
...
...
@@ -72,9 +72,17 @@ namespace AMDiS {
problemIteration
->
beginIteration
(
adaptInfo
);
Flag
adapted
=
problemIteration
->
oneIteration
(
adaptInfo
,
FULL_ITERATION
);
problemIteration
->
endIteration
(
adaptInfo
);
#if HAVE_PARALLEL_DOMAIN_AMDIS
int
recvAllValues
=
0
;
int
isAdapted
=
static_cast
<
bool
>
(
adapted
);
MPI
::
COMM_WORLD
.
Allreduce
(
&
isAdapted
,
&
recvAllValues
,
1
,
MPI_INT
,
MPI_SUM
);
if
(
recvAllValues
==
0
)
break
;
#else
if
(
!
adapted
)
break
;
#endif
adaptInfo
->
incSpaceIteration
();
}
...
...
AMDiS/src/Flag.h
View file @
aff33faa
...
...
@@ -158,7 +158,7 @@ namespace AMDiS {
inline
Flag
operator
^
(
const
Flag
&
f
)
const
{
Flag
r
(
flags
);
r
.
flags
^=
f
.
flags
;
r
.
flags
^=
f
.
flags
;
return
r
;
}
...
...
AMDiS/src/MeshStructure.cc
View file @
aff33faa
...
...
@@ -363,10 +363,8 @@ namespace AMDiS {
}
void
MeshStructure
::
print
(
bool
resetCode
)
string
MeshStructure
::
toStr
(
bool
resetCode
)
{
FUNCNAME
(
"MeshStructure::print()"
);
std
::
stringstream
oss
;
if
(
empty
())
{
...
...
@@ -385,14 +383,24 @@ namespace AMDiS {
cont
=
nextElement
();
}
}
return
oss
.
str
();
}
void
MeshStructure
::
print
(
bool
resetCode
)
{
FUNCNAME
(
"MeshStructure::print()"
);
string
str
=
toStr
(
resetCode
);
if
(
oss
.
str
()
.
length
()
<
255
)
{
MSG
(
"Mesh structure code: %s
\n
"
,
oss
.
str
()
.
c_str
());
if
(
str
.
length
()
<
255
)
{
MSG
(
"Mesh structure code: %s
\n
"
,
str
.
c_str
());
}
else
{
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
std
::
cout
<<
"["
<<
MPI
::
COMM_WORLD
.
Get_rank
()
<<
"] Mesh structure code: "
<<
oss
.
str
()
<<
"
\n
"
;
std
::
cout
<<
"["
<<
MPI
::
COMM_WORLD
.
Get_rank
()
<<
"] Mesh structure code: "
<<
str
<<
"
\n
"
;
#else
std
::
cout
<<
" Mesh structure code: "
<<
oss
.
str
()
<<
"
\n
"
;
std
::
cout
<<
" Mesh structure code: "
<<
str
<<
"
\n
"
;
#endif
}
}
...
...
AMDiS/src/MeshStructure.h
View file @
aff33faa
...
...
@@ -126,6 +126,9 @@ namespace AMDiS {
int
macroElIndex
=
-
1
,
bool
ignoreFinerMesh
=
false
);
/// Converts the mesh structure code to a string (for debugging).
string
toStr
(
bool
resetCode
=
true
);
/// Prints the mesh structure code.
void
print
(
bool
resetCode
=
true
);
...
...
AMDiS/src/Serializer.h
View file @
aff33faa
...
...
@@ -45,10 +45,10 @@ namespace AMDiS {
:
name
(
""
),
problem
(
prob
),
tsModulo
(
1
),
timestepNumber
(
-
1
),
appendIndex
(
0
),
indexLength
(
5
),
indexDecimals
(
3
)
indexDecimals
(
3
),
timestepNumber
(
-
1
)
{
FUNCNAME
(
"Serializer::Serializer()"
);
...
...
@@ -68,10 +68,10 @@ namespace AMDiS {
:
name
(
filename
),
problem
(
prob
),
tsModulo
(
writeEveryIth
),
timestepNumber
(
-
1
),
appendIndex
(
0
),
indexLength
(
5
),
indexDecimals
(
3
)
indexDecimals
(
3
),
timestepNumber
(
-
1
)
{
FUNCNAME
(
"Serializer::Serializer()"
);
...
...
AMDiS/src/parallel/CheckerPartitioner.cc
View file @
aff33faa
...
...
@@ -15,27 +15,105 @@
namespace
AMDiS
{
void
CheckerPartitioner
::
createInitialPartitioning
()
{
FUNCNAME
(
"CheckerPartitioner::createInitialPartitioning()"
);
int
mpiRank
=
mpiComm
->
Get_rank
();
int
mpiSize
=
mpiComm
->
Get_size
();
// In one of the stripes mode, we have to check if the number of macro
// elements with together with the number of nodes.
if
(
mode
==
1
||
mode
==
2
||
mode
==
3
)
{
int
elCounter
=
0
;
TraverseStack
stack
;
ElInfo
*
elInfo
=
stack
.
traverseFirst
(
mesh
,
0
,
Mesh
::
CALL_EL_LEVEL
);
while
(
elInfo
)
{
elCounter
++
;
elInfo
=
stack
.
traverseNext
(
elInfo
);
}
if
(
mesh
->
getDim
()
==
2
)
TEST_EXIT
(
elCounter
==
2
*
mpiSize
*
mpiSize
)
(
"The number of macro elements is %d, but must be %d for %d number of nodes!"
,
elCounter
,
2
*
mpiSize
*
mpiSize
,
mpiSize
);
int
nElementsPerBlock
=
(
mesh
->
getDim
()
==
2
?
2
:
6
);
if
(
mesh
->
getDim
()
==
3
)
TEST_EXIT
(
elCounter
==
6
*
static_cast
<
int
>
(
pow
(
mpiSize
,
1.5
)))
(
"The number of macro elements is %d, but must be %d for %d number of nodes!"
,
elCounter
,
6
*
static_cast
<
int
>
(
pow
(
mpiSize
,
1.5
)),
mpiSize
);
}
int
dim
=
mesh
->
getDim
();
TraverseStack
stack
;
ElInfo
*
elInfo
=
stack
.
traverseFirst
(
mesh
,
0
,
Mesh
::
CALL_EL_LEVEL
);
while
(
elInfo
)
{
Element
*
el
=
elInfo
->
getElement
();
int
elIndex
=
el
->
getIndex
();
int
elInRank
=
elIndex
/
nElementsPerBlock
;
TEST_EXIT_DBG
(
elInRank
<
mpiSize
)(
"Should not happen!
\n
"
);
int
boxIndex
=
elIndex
/
(
dim
==
2
?
2
:
6
);
int
elInRank
=
-
1
;
switch
(
mode
)
{
case
0
:
elInRank
=
boxIndex
;
break
;
case
1
:
// x-slices
{
if
(
dim
==
2
)
elInRank
=
elIndex
/
(
2
*
mpiSize
);
if
(
dim
==
3
)
{
int
boxSliceY
=
(
boxIndex
%
mpiSize
)
/
static_cast
<
int
>
(
sqrt
(
mpiSize
));
int
boxSliceZ
=
boxIndex
/
mpiSize
;
elInRank
=
boxSliceY
*
static_cast
<
int
>
(
sqrt
(
mpiSize
))
+
boxSliceZ
;
}
}
break
;
case
2
:
// y-slices
{
if
(
dim
==
2
)
elInRank
=
(
elIndex
%
(
2
*
mpiSize
))
/
2
;
if
(
dim
==
3
)
{
int
boxSliceX
=
(
boxIndex
%
mpiSize
)
%
static_cast
<
int
>
(
sqrt
(
mpiSize
));
int
boxSliceZ
=
boxIndex
/
mpiSize
;
elInRank
=
boxSliceX
*
static_cast
<
int
>
(
sqrt
(
mpiSize
))
+
boxSliceZ
;
}
}
break
;
case
3
:
// z-slices
{
int
boxSliceX
=
(
boxIndex
%
mpiSize
)
%
static_cast
<
int
>
(
sqrt
(
mpiSize
));
int
boxSliceY
=
(
boxIndex
%
mpiSize
)
/
static_cast
<
int
>
(
sqrt
(
mpiSize
));
elInRank
=
boxSliceX
*
static_cast
<
int
>
(
sqrt
(
mpiSize
))
+
boxSliceY
;
}
break
;
default:
ERROR_EXIT
(
"Mode %d does not exists for checker based mesh partitioning!
\n
"
,
mode
);
}
TEST_EXIT_DBG
(
elInRank
>=
0
)(
"Should not happen!
\n
"
);
TEST_EXIT_DBG
(
elInRank
<
mpiSize
)(
"Should not happen!
\n
"
);
elementInRank
[
elIndex
]
=
(
elInRank
==
mpiRank
);
partitionMap
[
elIndex
]
=
elInRank
;
elInfo
=
stack
.
traverseNext
(
elInfo
);
}
}
...
...
AMDiS/src/parallel/CheckerPartitioner.h
View file @
aff33faa
...
...
@@ -25,16 +25,35 @@
#include "AMDiS_fwd.h"
#include "Global.h"
#include "Initfile.h"
#include "parallel/MeshPartitioner.h"
namespace
AMDiS
{
using
namespace
std
;
class
CheckerPartitioner
:
public
MeshPartitioner
{
public:
CheckerPartitioner
(
MPI
::
Intracomm
*
comm
)
:
MeshPartitioner
(
comm
)
{}
:
MeshPartitioner
(
comm
),
mpiRank
(
mpiComm
->
Get_rank
()),
mpiSize
(
mpiComm
->
Get_size
())
{
string
modestr
=
""
;
Parameters
::
get
(
"parallel->partitioner->mode"
,
modestr
);
if
(
modestr
==
""
)
mode
=
0
;
else
if
(
modestr
==
"x-stripes"
)
mode
=
1
;
else
if
(
modestr
==
"y-stripes"
)
mode
=
2
;
else
if
(
modestr
==
"z-stripes"
)
mode
=
3
;
else
ERROR_EXIT
(
"No partitioner mode
\"
%s
\"
!
\n
"
,
modestr
.
c_str
());
}
~
CheckerPartitioner
()
{}
...
...
@@ -50,6 +69,14 @@ namespace AMDiS {
{
pMap
=
partitionMap
;
}
protected:
int
mpiRank
,
mpiSize
;
/// 0: standard mode: each node gets one box
/// 1: x-stripes: each node gets one x-stripe of boxes
/// 2: y-stripes: each node gets one y-stripe of boxes
int
mode
;
};
}
...
...
AMDiS/src/parallel/MeshDistributor.cc
View file @
aff33faa
...
...
@@ -77,8 +77,8 @@ namespace AMDiS {
info
(
10
),
partitioner
(
NULL
),
nRankDofs
(
0
),
nOverallDofs
(
0
),
rStartDofs
(
0
),
nOverallDofs
(
0
),
deserialized
(
false
),
writeSerializationFile
(
false
),
repartitioningAllowed
(
false
),
...
...
@@ -297,6 +297,7 @@ namespace AMDiS {
ParallelDebug
::
testAllElements
(
*
this
);
debug
::
testSortedDofs
(
mesh
,
elMap
);
ParallelDebug
::
testInteriorBoundary
(
*
this
);
ParallelDebug
::
followBoundary
(
*
this
);
debug
::
writeMesh
(
feSpace
,
-
1
,
debugOutputDir
+
"macro_mesh"
);
...
...
@@ -892,8 +893,8 @@ namespace AMDiS {
map
<
int
,
MeshCodeVec
>
sendCodes
;
for
(
RankToBoundMap
::
iterator
it
=
allBound
.
begin
();
it
!=
allBound
.
end
();
++
it
)
{
for
(
RankToBoundMap
::
iterator
it
=
allBound
.
begin
();
it
!=
allBound
.
end
();
++
it
)
{
for
(
vector
<
AtomicBoundary
>::
iterator
boundIt
=
it
->
second
.
begin
();
boundIt
!=
it
->
second
.
end
();
++
boundIt
)
{
MeshStructure
elCode
;
...
...
@@ -904,7 +905,8 @@ namespace AMDiS {
StdMpi
<
MeshCodeVec
>
stdMpi
(
mpiComm
,
true
);
stdMpi
.
send
(
sendCodes
);
for
(
RankToBoundMap
::
iterator
it
=
allBound
.
begin
();
it
!=
allBound
.
end
();
++
it
)
for
(
RankToBoundMap
::
iterator
it
=
allBound
.
begin
();
it
!=
allBound
.
end
();
++
it
)
stdMpi
.
recv
(
it
->
first
);
MSG
(
"DA 1
\n
"
);
...
...
@@ -917,7 +919,8 @@ namespace AMDiS {
bool
meshChanged
=
false
;
for
(
RankToBoundMap
::
iterator
it
=
allBound
.
begin
();
it
!=
allBound
.
end
();
++
it
)
{
for
(
RankToBoundMap
::
iterator
it
=
allBound
.
begin
();
it
!=
allBound
.
end
();
++
it
)
{
MeshCodeVec
&
recvCodes
=
stdMpi
.
getRecvData
()[
it
->
first
];
int
i
=
0
;
...
...
@@ -928,6 +931,10 @@ namespace AMDiS {
MeshStructure
elCode
;
elCode
.
init
(
boundIt
->
rankObj
);
#if (DEBUG != 0)
ParallelDebug
::
followBoundary
(
mesh
,
*
boundIt
,
elCode
);
#endif
if
(
elCode
.
getCode
()
!=
recvCodes
[
i
].
getCode
())
{
// MSG("MACRO EL %d %d %d DOES NOT FIT WITH MACRO EL %d %d %d ON RANK %d\n",
// boundIt->rankObj.elIndex, boundIt->rankObj.subObj, boundIt->rankObj.ithObj,
...
...
@@ -1440,77 +1447,78 @@ namespace AMDiS {
while
(
elObjects
.
iterate
(
geoIndex
))
{
map
<
int
,
ElementObjectData
>&
objData
=
elObjects
.
getIterateData
();
if
(
objData
.
count
(
mpiRank
)
&&
objData
.
size
()
>
1
)
{
int
owner
=
elObjects
.
getIterateOwner
();
ElementObjectData
&
rankBoundEl
=
objData
[
mpiRank
];
TEST_EXIT_DBG
(
macroElIndexMap
[
rankBoundEl
.
elIndex
])(
"Should not happen!
\n
"
);
AtomicBoundary
bound
;
bound
.
rankObj
.
el
=
macroElIndexMap
[
rankBoundEl
.
elIndex
];
bound
.
rankObj
.
elIndex
=
rankBoundEl
.
elIndex
;
bound
.
rankObj
.
elType
=
macroElIndexTypeMap
[
rankBoundEl
.
elIndex
];
bound
.
rankObj
.
subObj
=
geoIndex
;
bound
.
rankObj
.
ithObj
=
rankBoundEl
.
ithObject
;
if
(
geoIndex
==
FACE
)
{
for
(
int
edgeNo
=
0
;
edgeNo
<
3
;
edgeNo
++
)
{
int
edgeOfFace
=
bound
.
rankObj
.
el
->
getEdgeOfFace
(
bound
.
rankObj
.
ithObj
,
edgeNo
);
if
(
!
(
objData
.
count
(
mpiRank
)
&&
objData
.
size
()
>
1
))
continue
;
bound
.
rankObj
.
excludedSubstructures
.
push_back
(
make_pair
(
EDGE
,
edgeOfFace
));
}
}
if
(
owner
==
mpiRank
)
{
for
(
map
<
int
,
ElementObjectData
>::
iterator
it2
=
objData
.
begin
();
it2
!=
objData
.
end
();
++
it2
)
{
if
(
it2
->
first
==
mpiRank
)
continue
;
bound
.
neighObj
.
el
=
macroElIndexMap
[
it2
->
second
.
elIndex
];
bound
.
neighObj
.
elIndex
=
it2
->
second
.
elIndex
;
bound
.
neighObj
.
elType
=
macroElIndexTypeMap
[
it2
->
second
.
elIndex
];
bound
.
neighObj
.
subObj
=
geoIndex
;
bound
.
neighObj
.
ithObj
=
it2
->
second
.
ithObject
;
bound
.
type
=
INTERIOR
;
AtomicBoundary
&
b
=
myIntBoundary
.
getNewAtomic
(
it2
->
first
);
b
=
bound
;
if
(
geoIndex
==
EDGE
)
b
.
neighObj
.
reverseMode
=
elObjects
.
getEdgeReverseMode
(
rankBoundEl
,
it2
->
second
);
if
(
geoIndex
==
FACE
)
b
.
neighObj
.
reverseMode
=
elObjects
.
getFaceReverseMode
(
rankBoundEl
,
it2
->
second
);
}
}
else
{
TEST_EXIT_DBG
(
objData
.
count
(
owner
)
==
1
)
(
"Should not happen!
\n
"
);
int
owner
=
elObjects
.
getIterateOwner
();
ElementObjectData
&
rankBoundEl
=
objData
[
mpiRank
];
TEST_EXIT_DBG
(
macroElIndexMap
[
rankBoundEl
.
elIndex
])(
"Should not happen!
\n
"
);
AtomicBoundary
bound
;
bound
.
rankObj
.
el
=
macroElIndexMap
[
rankBoundEl
.
elIndex
];
bound
.
rankObj
.
elIndex
=
rankBoundEl
.
elIndex
;
bound
.
rankObj
.
elType
=
macroElIndexTypeMap
[
rankBoundEl
.
elIndex
];
bound
.
rankObj
.
subObj
=
geoIndex
;
bound
.
rankObj
.
ithObj
=
rankBoundEl
.
ithObject
;
if
(
geoIndex
==
FACE
)
{
for
(
int
edgeNo
=
0
;
edgeNo
<
3
;
edgeNo
++
)
{
int
edgeOfFace
=
bound
.
rankObj
.
el
->
getEdgeOfFace
(
bound
.
rankObj
.
ithObj
,
edgeNo
);
ElementObjectData
&
ownerBoundEl
=
objData
[
owner
];
bound
.
rankObj
.
excludedSubstructures
.
push_back
(
make_pair
(
EDGE
,
edgeOfFace
));
}
}
if
(
owner
==
mpiRank
)
{
for
(
map
<
int
,
ElementObjectData
>::
iterator
it2
=
objData
.
begin
();
it2
!=
objData
.
end
();
++
it2
)
{
if
(
it2
->
first
==
mpiRank
)
continue
;
bound
.
neighObj
.
el
=
macroElIndexMap
[
ownerBoundEl
.
elIndex
];
bound
.
neighObj
.
elIndex
=
ownerBoundEl
.
elIndex
;
bound
.
neighObj
.
elType
=
-
1
;
bound
.
neighObj
.
el
=
macroElIndexMap
[
it2
->
second
.
elIndex
];
bound
.
neighObj
.
elIndex
=
it2
->
second
.
elIndex
;
bound
.
neighObj
.
elType
=
macroElIndexTypeMap
[
it2
->
second
.
elIndex
]
;
bound
.
neighObj
.
subObj
=
geoIndex
;
bound
.
neighObj
.
ithObj
=
ownerBoundEl
.
ithObject
;
bound
.
neighObj
.
ithObj
=
it2
->
second
.
ithObject
;
bound
.
type
=
INTERIOR
;
AtomicBoundary
&
b
=
otherIntBoundary
.
getNewAtomic
(
owner
);
b
=
bound
;
AtomicBoundary
&
b
=
myIntBoundary
.
getNewAtomic
(
it2
->
first
);
b
=
bound
;
if
(
geoIndex
==
EDGE
)
b
.
rankObj
.
reverseMode
=
elObjects
.
getEdgeReverseMode
(
rankBoundEl
,
ownerBoundEl
);
b
.
neighObj
.
reverseMode
=
elObjects
.
getEdgeReverseMode
(
rankBoundEl
,
it2
->
second
);
if
(
geoIndex
==
FACE
)
b
.
rankObj
.
reverseMode
=
elObjects
.
getFaceReverseMode
(
rankBoundEl
,
ownerBoundEl
);
b
.
neighObj
.
reverseMode
=
elObjects
.
getFaceReverseMode
(
rankBoundEl
,
it2
->
second
);
}
}
else
{
TEST_EXIT_DBG
(
objData
.
count
(
owner
)
==
1
)
(
"Should not happen!
\n
"
);
ElementObjectData
&
ownerBoundEl
=
objData
[
owner
];
bound
.
neighObj
.
el
=
macroElIndexMap
[
ownerBoundEl
.
elIndex
];
bound
.
neighObj
.
elIndex
=
ownerBoundEl
.
elIndex
;
bound
.
neighObj
.
elType
=
-
1
;
bound
.
neighObj
.
subObj
=
geoIndex
;
bound
.
neighObj
.
ithObj
=
ownerBoundEl
.
ithObject
;
bound
.
type
=
INTERIOR
;
AtomicBoundary
&
b
=
otherIntBoundary
.
getNewAtomic
(
owner
);
b
=
bound
;
if
(
geoIndex
==
EDGE
)
b
.
rankObj
.
reverseMode
=
elObjects
.
getEdgeReverseMode
(
rankBoundEl
,
ownerBoundEl
);
if
(
geoIndex
==
FACE
)
b
.
rankObj
.
reverseMode
=
elObjects
.
getFaceReverseMode
(
rankBoundEl
,
ownerBoundEl
);
}
}
}
// === Create periodic boundary data structure. ===
for
(
PerBoundMap
<
DegreeOfFreedom
>::
iterator
it
=
elObjects
.
getPeriodicVertices
().
begin
();
...
...
AMDiS/src/parallel/ParallelDebug.cc
View file @
aff33faa
...
...
@@ -835,6 +835,7 @@ namespace AMDiS {
ElementFileWriter
::
writeFile
(
vec
,
pdb
.
mesh
,
filename
);
}
void
ParallelDebug
::
writePartitioningFile
(
string
filename
,
int
counter
,
FiniteElemSpace
*
feSpace
)
...
...
@@ -851,4 +852,57 @@ namespace AMDiS {
tmpa
.
set
(
MPI
::
COMM_WORLD
.
Get_rank
());
VtkWriter
::
writeFile
(
&
tmpa
,
oss
.
str
());
}
bool
ParallelDebug
::
followThisBound
(
int
rankElIndex
,
int
neighElIndex
)
{
FUNCNAME
(
"ParallelDebug::followThisBound()"
);
int
el0
=
std
::
min
(
rankElIndex
,
neighElIndex
);
int
el1
=
std
::
max
(
rankElIndex
,
neighElIndex
);
vector
<
int
>
els
;
Parameters
::
get
(
"parallel->debug->follow boundary"
,
els
);
if
(
els
.
size
()
!=
2
)
return
false
;
return
(
el0
==
els
[
0
]
&&
el1
==
els
[
1
]);
}
void
ParallelDebug
::
followBoundary
(
MeshDistributor
&
pdb
)
{
FUNCNAME
(
"ParallelDebug::followBoundary()"
);
for
(
InteriorBoundary
::
iterator
it
(
pdb
.
myIntBoundary
);
!
it
.
end
();
++
it
)
if
(
followThisBound
(
it
->
rankObj
.
elIndex
,
it
->
neighObj
.
elIndex
))
debug
::
writeLocalElementDofs
(
pdb
.
mpiRank
,
it
->
rankObj
.
elIndex
,
pdb
.
feSpace
);
for
(
InteriorBoundary
::
iterator
it
(
pdb
.
otherIntBoundary
);
!
it
.
end
();
++
it
)
if
(
followThisBound
(
it
->
rankObj
.
elIndex
,
it
->
neighObj
.
elIndex
))
debug
::
writeLocalElementDofs
(
pdb
.
mpiRank
,
it
->
rankObj
.
elIndex
,
pdb
.
feSpace
);
}
void
ParallelDebug
::
followBoundary
(
Mesh
*
mesh
,
AtomicBoundary
&
bound
,
MeshStructure
&
code
)
{
FUNCNAME
(
"ParallelDebug::followBoundary()"
);
if
(
mesh
->
getDim
()
!=
bound
.
rankObj
.
subObj
)
return
;
if
(
!
followThisBound
(
bound
.
rankObj
.
elIndex
,
bound
.
neighObj
.
elIndex
))
return
;
MSG
(
"Mesh structure code of bound %d/%d <-> %d/%d: %s
\n
"
,
bound
.
rankObj
.
elIndex
,
mesh
->
getDim
(),
bound
.
neighObj
.
elIndex
,
mesh
->
getDim
(),
code
.
toStr
().
c_str
());
}
}
AMDiS/src/parallel/ParallelDebug.h
View file @
aff33faa
...
...
@@ -160,6 +160,15 @@ namespace AMDiS {
static
void
writePartitioningFile
(
std
::
string
filename
,
int
counter
,
FiniteElemSpace
*
feSpace
);
static
bool
followThisBound
(
int
rankElIndex
,
int
neighElIndex
);
static
void
followBoundary
(
MeshDistributor
&
pdb
);
static
void
followBoundary
(
Mesh
*
mesh
,
AtomicBoundary
&
bound
,
MeshStructure
&
code
);
};
}
// namespace AMDiS
...
...
AMDiS/src/parallel/PetscMultigridPrecon.cc
0 → 100644
View file @
aff33faa
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
#include "parallel/PetscMultigridPrecon.h"
namespace
AMDiS
{
#ifdef HAVE_PETSC_DEV
using
namespace
std
;
PetscErrorCode
multigridComputeRhs
(
DM
da
,
Vec
x
,
Vec
b
)
{
}
PetscErrorCode
multigridComputeMatrix
(
DM
da
,
Vec
x
,
Mat
J
,
Mat
jac
,
MatStructure
*
str
)
{
}
PetscMultigridPrecon
::
PetscMultigridPrecon
()
{}
void
PetscMultigridPrecon
::
init
(
KSP
&
ksp
)
{
int
globalDofX
=
100
;
int
globalDofY
=
100
;
int
nProc
=
sqrt
(
MPI
::
COMM_WORLD
.
Get_rank
());
int
nDofsPerNode
=
1000
;
DMDACreate2d
(
PETSC_COMM_WORLD
,
DMDA_BOUNDARY_NONE
,
DMDA_BOUNDARY_NONE
,
DMDA_STENCIL_STAR
,
globalDofX
,
globalDofY
,