Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Aland, Sebastian
amdis
Commits
d0041fed
Commit
d0041fed
authored
Oct 27, 2009
by
Thomas Witkowski
Browse files
First use of StdMpi to simpify parallelization code.
parent
90f46d51
Changes
5
Hide whitespace changes
Inline
Side-by-side
AMDiS/src/ParallelDomainBase.cc
View file @
d0041fed
...
...
@@ -17,6 +17,7 @@
#include
"StandardProblemIteration.h"
#include
"ElementFileWriter.h"
#include
"VertexVector.h"
#include
"StdMpi.h"
#include
"petscksp.h"
...
...
@@ -131,7 +132,7 @@ namespace AMDiS {
// === Global refinements. ===
int
globalRefinement
=
0
;
GET_PARAMETER
(
0
,
"testgr
"
,
"%d"
,
&
globalRefinement
);
GET_PARAMETER
(
0
,
mesh
->
getName
()
+
"->global refinements
"
,
"%d"
,
&
globalRefinement
);
if
(
globalRefinement
>
0
)
{
refinementManager
->
globalRefine
(
mesh
,
globalRefinement
);
...
...
@@ -781,6 +782,30 @@ namespace AMDiS {
void
ParallelDomainBase
::
synchVectors
(
SystemVector
&
vec
)
{
#if 0
StdMpi<std::vector<double>, std::vector<double> > stdMpi(mpiComm);
stdMpi.prepareCommunication(false);
for (RankToDofContainer::iterator sendIt = sendDofs.begin();
sendIt != sendDofs.end(); ++sendIt, i++) {
std::vector<double> dofs;
int nSendDOFs = sendIt->second.size();
dofs.reserve(nComponents * sendIt->second.size());
for (int j = 0; j < nComponents; j++) {
DOFVector<double> *dofvec = vec.getDOFVector(j);
for (int k = 0; k < nSendDOFs; k++)
dofs.push_back((*dofvec)[*((sendIt->second)[k])]);
}
stdMpi.send(sendIt->first, dofs);
}
stdMpi.startCommunication();
#endif
#if 1
std
::
vector
<
double
*>
sendBuffers
(
sendDofs
.
size
());
std
::
vector
<
double
*>
recvBuffers
(
recvDofs
.
size
());
...
...
@@ -834,6 +859,7 @@ namespace AMDiS {
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
sendBuffers
.
size
());
i
++
)
delete
[]
sendBuffers
[
i
];
#endif
}
...
...
@@ -1268,7 +1294,7 @@ namespace AMDiS {
// Stores to each rank a map from DOF pointers (which are all owned by the rank
// and lie on an interior boundary) to new global DOF indices.
std
::
map
<
int
,
DofIndexMap
>
sendNewDofs
;
std
::
map
<
int
,
DofIndexMap
>
sendNewDofs
;
// Maps to each rank the number of new DOF indices this rank will receive from.
// All these DOFs are at an interior boundary on this rank, but are owned by
...
...
@@ -1304,69 +1330,24 @@ namespace AMDiS {
// === Send and receive the dof indices at boundary. ===
std
::
vector
<
int
*>
sendBuffers
(
sendNewDofs
.
size
()),
recvBuffers
(
recvNewDofs
.
size
());
sendDofs
.
clear
();
recvDofs
.
clear
();
#if 0
StlMpi stlMpi(mpiComm);
stlMpi.prepareCommunication(false);
std::map<
for (std::map<int, DofIndexMap >::iterator sendIt = sendNewDofs.begin();
sendIt != sendNewDofs.end(); ++sendIt, i++)
stlMpi.send(it->second, it->first);
for (std::map<int, int>::iterator recvIt = recvNewDofs.begin();
recvIt != recvNewDofs.end(); ++recvIt, i++)
stlMpi.recv(it->first, it->second * 2);
stlMpi.startCommunication();
#endif
MPI
::
Request
request
[
sendNewDofs
.
size
()
+
recvNewDofs
.
size
()];
int
requestCounter
=
0
;
i
=
0
;
for
(
std
::
map
<
int
,
DofIndexMap
>::
iterator
sendIt
=
sendNewDofs
.
begin
();
sendIt
!=
sendNewDofs
.
end
();
++
sendIt
,
i
++
)
{
int
nSendDofs
=
sendIt
->
second
.
size
()
*
2
;
sendBuffers
[
i
]
=
new
int
[
nSendDofs
];
int
c
=
0
;
sendIt
!=
sendNewDofs
.
end
();
++
sendIt
)
for
(
DofIndexMap
::
iterator
dofIt
=
sendIt
->
second
.
begin
();
dofIt
!=
sendIt
->
second
.
end
();
++
dofIt
)
{
sendBuffers
[
i
][
c
++
]
=
*
(
dofIt
->
first
);
sendBuffers
[
i
][
c
++
]
=
dofIt
->
second
;
dofIt
!=
sendIt
->
second
.
end
();
++
dofIt
)
sendDofs
[
sendIt
->
first
].
push_back
(
dofIt
->
first
);
}
request
[
requestCounter
++
]
=
mpiComm
.
Isend
(
sendBuffers
[
i
],
nSendDofs
,
MPI_INT
,
sendIt
->
first
,
0
);
}
i
=
0
;
StdMpi
<
DofIndexMap
,
DofMapping
>
stdMpi
(
mpiComm
);
stdMpi
.
prepareCommunication
(
false
);
stdMpi
.
send
(
sendNewDofs
);
for
(
std
::
map
<
int
,
int
>::
iterator
recvIt
=
recvNewDofs
.
begin
();
recvIt
!=
recvNewDofs
.
end
();
++
recvIt
,
i
++
)
{
int
nRecvDOFs
=
recvIt
->
second
*
2
;
recvBuffers
[
i
]
=
new
int
[
nRecvDOFs
];
request
[
requestCounter
++
]
=
mpiComm
.
Irecv
(
recvBuffers
[
i
],
nRecvDOFs
,
MPI_INT
,
recvIt
->
first
,
0
);
}
MPI
::
Request
::
Waitall
(
requestCounter
,
request
);
// === Delete send buffers. ===
for
(
int
j
=
0
;
j
<
static_cast
<
int
>
(
sendBuffers
.
size
());
j
++
)
delete
[]
sendBuffers
[
j
];
recvIt
!=
recvNewDofs
.
end
();
++
recvIt
,
i
++
)
stdMpi
.
recv
(
recvIt
->
first
,
recvIt
->
second
*
2
);
stdMpi
.
startCommunication
();
std
::
map
<
int
,
DofMapping
>
&
dofMap
=
stdMpi
.
getRecvData
();
// === Change dof indices at boundary from other ranks. ===
// Within this small data structure we track which dof index was already changed.
...
...
@@ -1377,43 +1358,42 @@ namespace AMDiS {
// rule was applied, the dof pointer is set to false in this data structure and
// is not allowed to be changed anymore.
std
::
map
<
const
DegreeOfFreedom
*
,
bool
>
dofChanged
;
recvDofs
.
clear
();
for
(
DofToRank
::
iterator
dofIt
=
boundaryDofs
.
begin
();
dofIt
!=
boundaryDofs
.
end
();
++
dofIt
)
dofChanged
[
dofIt
->
first
]
=
false
;
i
=
0
;
for
(
std
::
map
<
int
,
int
>::
iterator
recvIt
=
recvNewDofs
.
begin
();
recvIt
!=
recvNewDofs
.
end
();
++
recvIt
,
i
++
)
{
for
(
int
j
=
0
;
j
<
recvIt
->
second
;
j
++
)
{
DegreeOfFreedom
oldDof
=
recvBuffers
[
i
][
j
*
2
];
DegreeOfFreedom
newGlobalDof
=
recvBuffers
[
i
][
j
*
2
+
1
];
for
(
std
::
map
<
int
,
DofMapping
>::
iterator
rankIt
=
dofMap
.
begin
();
rankIt
!=
dofMap
.
end
();
++
rankIt
)
{
for
(
DofMapping
::
iterator
recvDofIt
=
rankIt
->
second
.
begin
();
recvDofIt
!=
rankIt
->
second
.
end
();
++
recvDofIt
)
{
DegreeOfFreedom
oldDof
=
recvDofIt
->
first
;
DegreeOfFreedom
newGlobalDof
=
recvDofIt
->
second
;
bool
found
=
false
;
// Iterate over all boundary dofs to find the dof which index we have to change.
for
(
DofToRank
::
iterator
dofIt
=
boundaryDofs
.
begin
();
dofIt
!=
boundaryDofs
.
end
();
++
dofIt
)
{
dofIt
!=
boundaryDofs
.
end
();
++
dofIt
)
{
if
(
*
(
dofIt
->
first
)
==
oldDof
&&
!
dofChanged
[
dofIt
->
first
])
{
dofChanged
[
dofIt
->
first
]
=
true
;
recvDofs
[
r
ecv
It
->
first
].
push_back
(
dofIt
->
first
);
recvDofs
[
r
ank
It
->
first
].
push_back
(
dofIt
->
first
);
rankDofsNewGlobalIndex
[
dofIt
->
first
]
=
newGlobalDof
;
isRankDof
[
rankDofsNewLocalIndex
[
dofIt
->
first
]]
=
false
;
found
=
true
;
break
;
}
}
TEST_EXIT_DBG
(
found
)(
"Should not happen!
\n
"
);
}
delete
[]
recvBuffers
[
i
];
}
}
...
...
AMDiS/src/ProblemScal.cc
View file @
d0041fed
...
...
@@ -227,7 +227,7 @@ namespace AMDiS {
SolverMatrix
<
DOFMatrix
>
solverMatrix
;
solverMatrix
.
setMatrix
(
*
systemMatrix
);
int
iter
=
solver
->
solveSystem
(
solverMatrix
,
*
solution
,
*
rhs
);
solver
->
solveSystem
(
solverMatrix
,
*
solution
,
*
rhs
);
#ifdef _OPENMP
INFO
(
info
,
8
)(
"solution of discrete system needed %.5f seconds system time / %.5f seconds wallclock time
\n
"
,
...
...
@@ -404,7 +404,11 @@ namespace AMDiS {
// === do global refinements ===
if
(
initFlag
.
isSet
(
INIT_GLOBAL_REFINES
))
{
int
globalRefinements
=
0
;
#ifndef HAVE_PARALLEL_DOMAIN_AMDIS
GET_PARAMETER
(
0
,
mesh
->
getName
()
+
"->global refinements"
,
"%d"
,
&
globalRefinements
);
#endif
refinementManager
->
globalRefine
(
mesh
,
globalRefinements
);
}
}
...
...
AMDiS/src/ProblemVec.cc
View file @
d0041fed
...
...
@@ -173,8 +173,8 @@ namespace AMDiS {
&
serializationFilename
);
TEST_EXIT
(
serializationFilename
!=
""
)(
"no serialization file
\n
"
);
//
We
AMDiS is compiled for parallel computations, the deserialization is
// controlled by the parallel problem
definition
object.
//
If
AMDiS is compiled for parallel computations, the deserialization is
// controlled by the parallel problem object.
#ifndef HAVE_PARALLEL_DOMAIN_AMDIS
std
::
ifstream
in
(
serializationFilename
.
c_str
());
deserialize
(
in
);
...
...
@@ -184,8 +184,14 @@ namespace AMDiS {
#endif
}
else
{
int
globalRefinements
=
0
;
// If AMDiS is compied for parallel computations, the global refinements are
// ignored here. Later, each rank will add the global refinements to its
// private mesh.
#ifndef HAVE_PARALLEL_DOMAIN_AMDIS
GET_PARAMETER
(
0
,
meshes
[
0
]
->
getName
()
+
"->global refinements"
,
"%d"
,
&
globalRefinements
);
#endif
// Initialize the meshes if there is no serialization file.
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
meshes
.
size
());
i
++
)
{
...
...
@@ -487,7 +493,7 @@ namespace AMDiS {
#endif
clock_t
first
=
clock
();
int
iter
=
solver
->
solveSystem
(
solverMatrix
,
*
solution
,
*
rhs
);
solver
->
solveSystem
(
solverMatrix
,
*
solution
,
*
rhs
);
#ifdef _OPENMP
INFO
(
info
,
8
)(
"solution of discrete system needed %.5f seconds system time / %.5f seconds wallclock time
\n
"
,
...
...
@@ -846,8 +852,7 @@ namespace AMDiS {
void
ProblemVec
::
addMatrixOperator
(
Operator
*
op
,
int
i
,
int
j
,
double
*
factor
,
double
*
estFactor
,
bool
fake
)
double
*
estFactor
)
{
FUNCNAME
(
"ProblemVec::addMatrixOperator()"
);
...
...
@@ -865,36 +870,31 @@ namespace AMDiS {
(
*
systemMatrix
)[
i
][
j
]
->
addOperator
(
op
,
factor
,
estFactor
);
if
(
!
fake
)
{
traverseInfo
.
getMatrix
(
i
,
j
).
setAuxFESpaces
(
op
->
getAuxFESpaces
());
traverseInfo
.
getMatrix
(
i
,
j
).
setAuxFESpaces
(
op
->
getAuxFESpaces
());
for
(
int
k
=
0
;
k
<
static_cast
<
int
>
(
op
->
getAuxFESpaces
().
size
());
k
++
)
{
if
((
op
->
getAuxFESpaces
())[
k
]
->
getMesh
()
!=
componentSpaces
[
i
]
->
getMesh
()
||
(
op
->
getAuxFESpaces
())[
k
]
->
getMesh
()
!=
componentSpaces
[
j
]
->
getMesh
())
{
op
->
setNeedDualTraverse
(
true
);
break
;
}
}
for
(
int
k
=
0
;
k
<
static_cast
<
int
>
(
op
->
getAuxFESpaces
().
size
());
k
++
)
{
if
((
op
->
getAuxFESpaces
())[
k
]
->
getMesh
()
!=
componentSpaces
[
i
]
->
getMesh
()
||
(
op
->
getAuxFESpaces
())[
k
]
->
getMesh
()
!=
componentSpaces
[
j
]
->
getMesh
())
{
op
->
setNeedDualTraverse
(
true
);
break
;
}
}
}
void
ProblemVec
::
addVectorOperator
(
Operator
*
op
,
int
i
,
double
*
factor
,
double
*
estFactor
,
bool
fake
)
double
*
estFactor
)
{
FUNCNAME
(
"ProblemVec::addVectorOperator()"
);
rhs
->
getDOFVector
(
i
)
->
addOperator
(
op
,
factor
,
estFactor
);
if
(
!
fake
)
{
traverseInfo
.
getVector
(
i
).
setAuxFESpaces
(
op
->
getAuxFESpaces
());
traverseInfo
.
getVector
(
i
).
setAuxFESpaces
(
op
->
getAuxFESpaces
());
for
(
int
j
=
0
;
j
<
static_cast
<
int
>
(
op
->
getAuxFESpaces
().
size
());
j
++
)
{
if
((
op
->
getAuxFESpaces
())[
j
]
->
getMesh
()
!=
componentSpaces
[
i
]
->
getMesh
())
{
op
->
setNeedDualTraverse
(
true
);
break
;
}
for
(
int
j
=
0
;
j
<
static_cast
<
int
>
(
op
->
getAuxFESpaces
().
size
());
j
++
)
{
if
((
op
->
getAuxFESpaces
())[
j
]
->
getMesh
()
!=
componentSpaces
[
i
]
->
getMesh
())
{
op
->
setNeedDualTraverse
(
true
);
break
;
}
}
}
...
...
AMDiS/src/ProblemVec.h
View file @
d0041fed
...
...
@@ -211,14 +211,12 @@ namespace AMDiS {
/// Adds an Operator to \ref A.
void
addMatrixOperator
(
Operator
*
op
,
int
i
,
int
j
,
double
*
factor
=
NULL
,
double
*
estFactor
=
NULL
,
bool
fake
=
false
);
double
*
estFactor
=
NULL
);
/// Adds an Operator to \ref rhs.
void
addVectorOperator
(
Operator
*
op
,
int
i
,
double
*
factor
=
NULL
,
double
*
estFactor
=
NULL
,
bool
fake
=
false
);
double
*
estFactor
=
NULL
);
/// Adds dirichlet boundary conditions.
virtual
void
addDirichletBC
(
BoundaryType
type
,
int
row
,
int
col
,
...
...
AMDiS/src/StdMpi.h
0 → 100644
View file @
d0041fed
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// ============================================================================
// == ==
// == TU Dresden ==
// == ==
// == Institut fr Wissenschaftliches Rechnen ==
// == Zellescher Weg 12-14 ==
// == 01069 Dresden ==
// == germany ==
// == ==
// ============================================================================
// == ==
// == https://gforge.zih.tu-dresden.de/projects/amdis/ ==
// == ==
// ============================================================================
/** \file StdMpi.h */
#ifndef AMDIS_STDMPI_H
#define AMDIS_STDMPI_H
#include
<map>
#include
"mpi.h"
namespace
AMDiS
{
int
intSizeOf
(
std
::
map
<
const
DegreeOfFreedom
*
,
DegreeOfFreedom
>
&
data
)
{
return
data
.
size
()
*
2
;
}
void
makeIntBuf
(
std
::
map
<
const
DegreeOfFreedom
*
,
DegreeOfFreedom
>
&
data
,
int
*
buf
)
{
int
i
=
0
;
for
(
std
::
map
<
const
DegreeOfFreedom
*
,
DegreeOfFreedom
>::
iterator
it
=
data
.
begin
();
it
!=
data
.
end
();
++
it
)
{
buf
[
i
++
]
=
*
(
it
->
first
);
buf
[
i
++
]
=
it
->
second
;
}
}
void
makeFromIntBuf
(
std
::
map
<
DegreeOfFreedom
,
DegreeOfFreedom
>
&
data
,
int
*
buf
,
int
bufSize
)
{
if
(
bufSize
==
0
)
return
;
int
i
=
0
;
do
{
data
[
buf
[
i
]]
=
buf
[
i
+
1
];
i
+=
2
;
}
while
(
i
<
bufSize
);
}
template
<
typename
SendT
,
typename
RecvT
>
class
StdMpi
{
public:
StdMpi
(
MPI
::
Intracomm
&
comm
)
:
mpiComm
(
comm
),
commPrepared
(
false
),
exchangeDataSize
(
true
)
{}
void
prepareCommunication
(
bool
b
)
{
sendData
.
clear
();
recvData
.
clear
();
exchangeDataSize
=
b
;
commPrepared
=
true
;
}
void
send
(
int
toRank
,
SendT
&
data
)
{
FUNCNAME
(
"StdMpi::send()"
);
TEST_EXIT_DBG
(
commPrepared
)(
"Communication is not prepared!
\n
"
);
sendData
[
toRank
]
=
data
;
sendDataSize
[
toRank
]
=
intSizeOf
(
data
);
}
void
send
(
std
::
map
<
int
,
SendT
>&
data
)
{
FUNCNAME
(
"StdMpi::send()"
);
TEST_EXIT_DBG
(
commPrepared
)(
"Communication is not prepared!
\n
"
);
for
(
typename
std
::
map
<
int
,
SendT
>::
iterator
it
=
data
.
begin
();
it
!=
data
.
end
();
++
it
)
{
sendData
[
it
->
first
]
=
it
->
second
;
sendDataSize
[
it
->
first
]
=
intSizeOf
(
it
->
second
);
}
}
void
recv
(
int
fromRank
,
int
size
=
-
1
)
{
FUNCNAME
(
"StdMpi::recv()"
);
TEST_EXIT_DBG
(
commPrepared
)(
"Communication is not prepared!
\n
"
);
recvDataSize
[
fromRank
]
=
size
;
}
std
::
map
<
int
,
RecvT
>&
getRecvData
()
{
return
recvData
;
}
void
startCommunication
()
{
FUNCNAME
(
"StdMpi::startCommunication()"
);
TEST_EXIT_DBG
(
commPrepared
)(
"Communication is not prepared!
\n
"
);
MPI
::
Request
request
[
sendData
.
size
()
+
recvDataSize
.
size
()];
int
requestCounter
=
0
;
std
::
vector
<
int
*>
sendBuffers
,
recvBuffers
;
for
(
typename
std
::
map
<
int
,
SendT
>::
iterator
sendIt
=
sendData
.
begin
();
sendIt
!=
sendData
.
end
();
++
sendIt
)
{
int
bufferSize
=
intSizeOf
(
sendIt
->
second
);
int
*
buf
=
new
int
[
bufferSize
];
makeIntBuf
(
sendIt
->
second
,
buf
);
request
[
requestCounter
++
]
=
mpiComm
.
Isend
(
buf
,
bufferSize
,
MPI_INT
,
sendIt
->
first
,
0
);
sendBuffers
.
push_back
(
buf
);
}
for
(
std
::
map
<
int
,
int
>::
iterator
recvIt
=
recvDataSize
.
begin
();
recvIt
!=
recvDataSize
.
end
();
++
recvIt
)
{
int
*
buf
=
new
int
[
recvIt
->
second
];
request
[
requestCounter
++
]
=
mpiComm
.
Irecv
(
buf
,
recvIt
->
second
,
MPI_INT
,
recvIt
->
first
,
0
);
recvBuffers
.
push_back
(
buf
);
}
MPI
::
Request
::
Waitall
(
requestCounter
,
request
);
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
sendBuffers
.
size
());
i
++
)
delete
[]
sendBuffers
[
i
];
sendBuffers
.
clear
();
int
i
=
0
;
for
(
std
::
map
<
int
,
int
>::
iterator
recvIt
=
recvDataSize
.
begin
();
recvIt
!=
recvDataSize
.
end
();
++
recvIt
)
{
makeFromIntBuf
(
recvData
[
recvIt
->
first
],
recvBuffers
[
i
],
recvIt
->
second
);
delete
[]
recvBuffers
[
i
];
i
++
;
}
}
protected:
///
MPI
::
Intracomm
mpiComm
;
///
std
::
map
<
int
,
SendT
>
sendData
;
///
std
::
map
<
int
,
RecvT
>
recvData
;
std
::
map
<
int
,
int
>
sendDataSize
;
std
::
map
<
int
,
int
>
recvDataSize
;
bool
commPrepared
;
bool
exchangeDataSize
;
};
}
#endif
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment