Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
A
amdis
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Incidents
Environments
Packages & Registries
Packages & Registries
Container Registry
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Backofen, Rainer
amdis
Commits
2d5c10bf
Commit
2d5c10bf
authored
Feb 06, 2012
by
Thomas Witkowski
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Starting point to make FETI-DP work with mixed finite elements.
parent
988aefe0
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
443 additions
and
260 deletions
+443
-260
AMDiS/src/parallel/FeSpaceMapping.h
AMDiS/src/parallel/FeSpaceMapping.h
+158
-0
AMDiS/src/parallel/PetscSolver.h
AMDiS/src/parallel/PetscSolver.h
+11
-2
AMDiS/src/parallel/PetscSolverFeti.cc
AMDiS/src/parallel/PetscSolverFeti.cc
+142
-149
AMDiS/src/parallel/PetscSolverFeti.h
AMDiS/src/parallel/PetscSolverFeti.h
+11
-109
AMDiS/src/parallel/PetscSolverFetiStructs.h
AMDiS/src/parallel/PetscSolverFetiStructs.h
+121
-0
No files found.
AMDiS/src/parallel/FeSpaceMapping.h
0 → 100644
View file @
2d5c10bf
// ============================================================================
// == ==
// == AMDiS - Adaptive multidimensional simulations ==
// == ==
// == http://www.amdis-fem.org ==
// == ==
// ============================================================================
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.
/** \file FeSpaceMapping.h */
#include <map>
#include "parallel/MpiHelper.h"
#include "parallel/ParallelTypes.h"
#ifndef AMDIS_FE_SPACE_MAPPING_H
#define AMDIS_FE_SPACE_MAPPING_H
namespace
AMDiS
{
using
namespace
std
;
class
GlobalDofMap
{
public:
GlobalDofMap
(
MPI
::
Intracomm
*
m
)
:
mpiComm
(
m
),
nRankDofs
(
0
),
nOverallDofs
(
0
),
rStartDofs
(
0
)
{}
void
clear
()
{
dofMap
.
clear
();
nRankDofs
=
0
;
nOverallDofs
=
0
;
rStartDofs
=
0
;
}
DegreeOfFreedom
operator
[](
DegreeOfFreedom
d
)
{
TEST_EXIT_DBG
(
dofMap
.
count
(
d
))(
"Should not happen!
\n
"
);
return
dofMap
[
d
];
}
void
insertRankDof
(
DegreeOfFreedom
dof0
,
DegreeOfFreedom
dof1
=
-
1
)
{
FUNCNAME
(
"GlobalDofMap::insertRankDof()"
);
TEST_EXIT_DBG
(
dofMap
.
count
(
dof0
)
==
0
)(
"Should not happen!
\n
"
);
dofMap
[
dof0
]
=
(
dof1
>=
0
?
dof1
:
nRankDofs
);
nRankDofs
++
;
}
void
insert
(
DegreeOfFreedom
dof0
,
DegreeOfFreedom
dof1
)
{
FUNCNAME
(
"GlobalDofMap::insert()"
);
TEST_EXIT_DBG
(
dofMap
.
count
(
dof0
)
==
0
)(
"Should not happen!
\n
"
);
dofMap
[
dof0
]
=
dof1
;
}
bool
isSet
(
DegreeOfFreedom
dof
)
{
return
static_cast
<
bool
>
(
dofMap
.
count
(
dof
));
}
unsigned
int
size
()
{
return
dofMap
.
size
();
}
DofMapping
&
getMap
()
{
return
dofMap
;
}
void
update
(
bool
add
=
true
)
{
nOverallDofs
=
0
;
rStartDofs
=
0
;
mpi
::
getDofNumbering
(
*
mpiComm
,
nRankDofs
,
rStartDofs
,
nOverallDofs
);
if
(
add
)
addOffset
(
rStartDofs
);
}
void
addOffset
(
int
offset
)
{
for
(
DofMapping
::
iterator
it
=
dofMap
.
begin
();
it
!=
dofMap
.
end
();
++
it
)
it
->
second
+=
offset
;
}
private:
MPI
::
Intracomm
*
mpiComm
;
///
DofMapping
dofMap
;
public:
///
int
nRankDofs
,
nOverallDofs
,
rStartDofs
;
};
template
<
typename
T
>
class
FeSpaceData
{
public:
FeSpaceData
()
{}
void
setMpiComm
(
MPI
::
Intracomm
*
m
)
{
mpiComm
=
m
;
}
T
&
operator
[](
const
FiniteElemSpace
*
feSpace
)
{
FUNCNAME
(
"FeSpaceData::operator[]()"
);
TEST_EXIT_DBG
(
data
.
count
(
feSpace
))(
"Should not happen!
\n
"
);
return
data
.
find
(
feSpace
)
->
second
;
}
void
addFeSpace
(
const
FiniteElemSpace
*
feSpace
)
{
FUNCNAME
(
"FeSpaceData::addFeSpace()"
);
if
(
data
.
count
(
feSpace
))
data
.
find
(
feSpace
)
->
second
.
clear
();
else
data
.
insert
(
make_pair
(
feSpace
,
T
(
mpiComm
)));
}
private:
MPI
::
Intracomm
*
mpiComm
;
map
<
const
FiniteElemSpace
*
,
T
>
data
;
};
}
#endif
AMDiS/src/parallel/PetscSolver.h
View file @
2d5c10bf
...
...
@@ -57,6 +57,7 @@ namespace AMDiS {
{
meshDistributor
=
m
;
mpiRank
=
meshDistributor
->
getMpiRank
();
mpiComm
=
&
(
meshDistributor
->
getMpiComm
());
}
/** \brief
...
...
@@ -85,9 +86,15 @@ namespace AMDiS {
return
0
;
}
KSP
getSolver
()
{
return
solver
;
}
KSP
getSolver
()
{
return
solver
;
}
PC
getPc
()
{
return
pc
;
}
PC
getPc
()
{
return
pc
;
}
protected:
void
printSolutionInfo
(
AdaptInfo
*
adaptInfo
,
...
...
@@ -123,6 +130,8 @@ namespace AMDiS {
int
mpiRank
;
MPI
::
Intracomm
*
mpiComm
;
/// Petsc's matrix structure.
Mat
petscMatrix
;
...
...
AMDiS/src/parallel/PetscSolverFeti.cc
View file @
2d5c10bf
...
...
@@ -11,6 +11,7 @@
#include "parallel/PetscSolverFeti.h"
#include "parallel/PetscSolverFetiStructs.h"
#include "parallel/StdMpi.h"
#include "parallel/MpiHelper.h"
#include "io/VtkWriter.h"
...
...
@@ -248,32 +249,16 @@ namespace AMDiS {
// === Calculate the number of primals that are owned by the rank and ===
// === create local indices of the primals starting at zero. ===
globalPrimalIndex
.
clear
();
nRankPrimals
=
0
;
primalDofMap
.
addFeSpace
(
feSpace
);
for
(
DofIndexSet
::
iterator
it
=
primals
.
begin
();
it
!=
primals
.
end
();
++
it
)
if
(
meshDistributor
->
getIsRankDof
(
feSpace
,
*
it
))
{
globalPrimalIndex
[
*
it
]
=
nRankPrimals
;
nRankPrimals
++
;
}
// === Get overall number of primals and rank's displacement in the ===
// === numbering of the primals. ===
nOverallPrimals
=
0
;
rStartPrimals
=
0
;
mpi
::
getDofNumbering
(
meshDistributor
->
getMpiComm
(),
nRankPrimals
,
rStartPrimals
,
nOverallPrimals
);
// === Create global primal index for all primals. ===
if
(
meshDistributor
->
getIsRankDof
(
feSpace
,
*
it
))
primalDofMap
[
feSpace
].
insertRankDof
(
*
it
);
for
(
DofMapping
::
iterator
it
=
globalPrimalIndex
.
begin
();
it
!=
globalPrimalIndex
.
end
();
++
it
)
it
->
second
+=
rStartPrimals
;
primalDofMap
[
feSpace
].
update
();
MSG
(
"nRankPrimals = %d nOverallPrimals = %d
\n
"
,
nRankPrimals
,
nOverallPrimals
);
primalDofMap
[
feSpace
].
nRankDofs
,
primalDofMap
[
feSpace
].
nOverallDofs
);
// === Communicate primal's global index from ranks that own the ===
...
...
@@ -286,8 +271,10 @@ namespace AMDiS {
for
(
DofComm
::
Iterator
it
(
meshDistributor
->
getSendDofs
(),
feSpace
);
!
it
.
end
();
it
.
nextRank
())
for
(;
!
it
.
endDofIter
();
it
.
nextDof
())
if
(
globalPrimalIndex
.
count
(
it
.
getDofIndex
()))
stdMpi
.
getSendData
(
it
.
getRank
()).
push_back
(
globalPrimalIndex
[
it
.
getDofIndex
()]);
if
(
primalDofMap
[
feSpace
].
isSet
(
it
.
getDofIndex
()))
{
DegreeOfFreedom
d
=
primalDofMap
[
feSpace
][
it
.
getDofIndex
()];
stdMpi
.
getSendData
(
it
.
getRank
()).
push_back
(
d
);
}
stdMpi
.
updateSendDataSize
();
...
...
@@ -313,15 +300,16 @@ namespace AMDiS {
int
i
=
0
;
for
(;
!
it
.
endDofIter
();
it
.
nextDof
())
{
if
(
primals
.
count
(
it
.
getDofIndex
())
&&
meshDistributor
->
getIsRankDof
(
feSpace
,
it
.
getDofIndex
())
==
false
)
globalPrimalIndex
[
it
.
getDofIndex
()]
=
stdMpi
.
getRecvData
(
it
.
getRank
())[
i
++
];
meshDistributor
->
getIsRankDof
(
feSpace
,
it
.
getDofIndex
())
==
false
)
{
DegreeOfFreedom
d
=
stdMpi
.
getRecvData
(
it
.
getRank
())[
i
++
];
primalDofMap
[
feSpace
].
insert
(
it
.
getDofIndex
(),
d
);
}
}
}
TEST_EXIT_DBG
(
primals
.
size
()
==
globalPrimalIndex
.
size
())
TEST_EXIT_DBG
(
primals
.
size
()
==
primalDofMap
[
feSpace
]
.
size
())
(
"Number of primals %d, but number of global primals on this rank is %d!
\n
"
,
primals
.
size
(),
globalPrimalIndex
.
size
());
primals
.
size
(),
primalDofMap
[
feSpace
]
.
size
());
}
...
...
@@ -340,7 +328,7 @@ namespace AMDiS {
!
it
.
end
();
it
.
nextRank
())
for
(;
!
it
.
endDofIter
();
it
.
nextDof
())
{
// If DOF is not primal, i.e., its a dual node
if
(
globalPrimalIndex
.
count
(
it
.
getDofIndex
())
==
0
)
{
if
(
primalDofMap
[
feSpace
].
isSet
(
it
.
getDofIndex
())
==
false
)
{
boundaryDofRanks
[
it
.
getDofIndex
()].
insert
(
mpiRank
);
boundaryDofRanks
[
it
.
getDofIndex
()].
insert
(
it
.
getRank
());
}
...
...
@@ -355,7 +343,7 @@ namespace AMDiS {
for
(
DofComm
::
Iterator
it
(
meshDistributor
->
getSendDofs
(),
feSpace
);
!
it
.
end
();
it
.
nextRank
())
for
(;
!
it
.
endDofIter
();
it
.
nextDof
())
if
(
globalPrimalIndex
.
count
(
it
.
getDofIndex
())
==
0
)
if
(
primalDofMap
[
feSpace
].
isSet
(
it
.
getDofIndex
())
==
false
)
stdMpi
.
getSendData
(
it
.
getRank
()).
push_back
(
boundaryDofRanks
[
it
.
getDofIndex
()]);
stdMpi
.
updateSendDataSize
();
...
...
@@ -364,7 +352,7 @@ namespace AMDiS {
!
it
.
end
();
it
.
nextRank
())
{
bool
recvFromRank
=
false
;
for
(;
!
it
.
endDofIter
();
it
.
nextDof
())
{
if
(
globalPrimalIndex
.
count
(
it
.
getDofIndex
())
==
0
)
{
if
(
primalDofMap
[
feSpace
].
isSet
(
it
.
getDofIndex
())
==
false
)
{
recvFromRank
=
true
;
break
;
}
...
...
@@ -380,18 +368,17 @@ namespace AMDiS {
!
it
.
end
();
it
.
nextRank
())
{
int
i
=
0
;
for
(;
!
it
.
endDofIter
();
it
.
nextDof
())
if
(
globalPrimalIndex
.
count
(
it
.
getDofIndex
())
==
0
)
if
(
primalDofMap
[
feSpace
].
isSet
(
it
.
getDofIndex
())
==
false
)
boundaryDofRanks
[
it
.
getDofIndex
()]
=
stdMpi
.
getRecvData
(
it
.
getRank
())[
i
++
];
}
// === Create global index of the dual nodes on each rank. ===
duals
.
clear
();
globalDualIndex
.
clear
();
dualDofMap
.
addFeSpace
(
feSpace
);
int
nRankAllDofs
=
feSpace
->
getAdmin
()
->
getUsedDofs
();
nRankB
=
nRankAllDofs
-
globalPrimalIndex
.
size
();
nRankB
=
nRankAllDofs
-
primalDofMap
[
feSpace
]
.
size
();
nOverallB
=
0
;
rStartB
=
0
;
mpi
::
getDofNumbering
(
meshDistributor
->
getMpiComm
(),
...
...
@@ -400,21 +387,17 @@ namespace AMDiS {
meshDistributor
->
getAllBoundaryDofs
(
feSpace
,
allBoundaryDofs
);
int
nRankInteriorDofs
=
nRankAllDofs
-
allBoundaryDofs
.
size
();
int
nRankDuals
=
0
;
for
(
DofContainer
::
iterator
it
=
allBoundaryDofs
.
begin
();
it
!=
allBoundaryDofs
.
end
();
++
it
)
{
if
(
globalPrimalIndex
.
count
(
**
it
)
==
0
)
{
duals
.
insert
(
**
it
);
globalDualIndex
[
**
it
]
=
rStartB
+
nRankInteriorDofs
+
nRankDuals
;
nRankDuals
++
;
}
}
it
!=
allBoundaryDofs
.
end
();
++
it
)
if
(
primalDofMap
[
feSpace
].
isSet
(
**
it
)
==
false
)
dualDofMap
[
feSpace
].
insertRankDof
(
**
it
);
int
nOverallDuals
=
nRankDuals
;
mpi
::
globalAdd
(
nOverallDual
s
);
dualDofMap
[
feSpace
].
update
(
false
)
;
dualDofMap
[
feSpace
].
addOffset
(
rStartB
+
nRankInteriorDof
s
);
MSG
(
"nRankDuals = %d nOverallDuals = %d
\n
"
,
nRankDuals
,
nOverallDuals
);
dualDofMap
[
feSpace
].
nRankDofs
,
dualDofMap
[
feSpace
].
nOverallDofs
);
}
...
...
@@ -422,36 +405,27 @@ namespace AMDiS {
{
FUNCNAME
(
"PetscSolverFeti::createLagrange()"
);
const
FiniteElemSpace
*
feSpace
=
meshDistributor
->
getFeSpace
(
0
);
// === Reserve for each dual node, on the rank that owns this node, the ===
// === appropriate number of Lagrange constraints. ===
nRankLagrange
=
0
;
for
(
DofIndexSet
::
iterator
it
=
duals
.
begin
();
it
!=
duals
.
end
();
++
it
)
{
if
(
meshDistributor
->
getIsRankDof
(
feSpace
,
*
it
))
{
dofFirstLagrange
[
*
it
]
=
nRankLagrange
;
int
degree
=
boundaryDofRanks
[
*
it
].
size
();
const
FiniteElemSpace
*
feSpace
=
meshDistributor
->
getFeSpace
(
0
);
dofFirstLagrange
.
addFeSpace
(
feSpace
);
int
nRankLagrange
=
0
;
DofMapping
&
dualMap
=
dualDofMap
[
feSpace
].
getMap
();
for
(
DofMapping
::
iterator
it
=
dualMap
.
begin
();
it
!=
dualMap
.
end
();
++
it
)
{
if
(
meshDistributor
->
getIsRankDof
(
feSpace
,
it
->
first
))
{
dofFirstLagrange
[
feSpace
].
insert
(
it
->
first
,
nRankLagrange
);
int
degree
=
boundaryDofRanks
[
it
->
first
].
size
();
nRankLagrange
+=
(
degree
*
(
degree
-
1
))
/
2
;
}
}
// === Get the overall number of Lagrange constraints and create the ===
// === mapping dofFirstLagrange, that defines for each dual boundary ===
// === node the first Lagrange constraint global index. ===
nOverallLagrange
=
0
;
rStartLagrange
=
0
;
mpi
::
getDofNumbering
(
meshDistributor
->
getMpiComm
(),
nRankLagrange
,
rStartLagrange
,
nOverallLagrange
);
for
(
DofIndexSet
::
iterator
it
=
duals
.
begin
();
it
!=
duals
.
end
();
++
it
)
if
(
meshDistributor
->
getIsRankDof
(
feSpace
,
*
it
))
dofFirstLagrange
[
*
it
]
+=
rStartLagrange
;
dofFirstLagrange
[
feSpace
].
nRankDofs
=
nRankLagrange
;
dofFirstLagrange
[
feSpace
].
update
();
MSG
(
"nRankLagrange = %d nOverallLagrange = %d
\n
"
,
nRankLagrange
,
nOverallLagrange
);
dofFirstLagrange
[
feSpace
].
nRankDofs
,
dofFirstLagrange
[
feSpace
].
nOverallDofs
);
// === Communicate dofFirstLagrange to all other ranks. ===
...
...
@@ -461,10 +435,11 @@ namespace AMDiS {
for
(
DofComm
::
Iterator
it
(
meshDistributor
->
getSendDofs
(),
feSpace
);
!
it
.
end
();
it
.
nextRank
())
{
for
(;
!
it
.
endDofIter
();
it
.
nextDof
())
if
(
globalPrimalIndex
.
count
(
it
.
getDofIndex
())
==
0
)
{
TEST_EXIT_DBG
(
dofFirstLagrange
.
coun
t
(
it
.
getDofIndex
()))
if
(
primalDofMap
[
feSpace
].
isSet
(
it
.
getDofIndex
())
==
false
)
{
TEST_EXIT_DBG
(
dofFirstLagrange
[
feSpace
].
isSe
t
(
it
.
getDofIndex
()))
(
"Should not happen!
\n
"
);
stdMpi
.
getSendData
(
it
.
getRank
()).
push_back
(
dofFirstLagrange
[
it
.
getDofIndex
()]);
DegreeOfFreedom
d
=
dofFirstLagrange
[
feSpace
][
it
.
getDofIndex
()];
stdMpi
.
getSendData
(
it
.
getRank
()).
push_back
(
d
);
}
}
...
...
@@ -474,7 +449,7 @@ namespace AMDiS {
!
it
.
end
();
it
.
nextRank
())
{
bool
recvData
=
false
;
for
(;
!
it
.
endDofIter
();
it
.
nextDof
())
if
(
globalPrimalIndex
.
count
(
it
.
getDofIndex
())
==
0
)
{
if
(
primalDofMap
[
feSpace
].
isSet
(
it
.
getDofIndex
())
==
false
)
{
recvData
=
true
;
break
;
}
...
...
@@ -488,10 +463,12 @@ namespace AMDiS {
for
(
DofComm
::
Iterator
it
(
meshDistributor
->
getRecvDofs
(),
feSpace
);
!
it
.
end
();
it
.
nextRank
())
{
int
counter
=
0
;
for
(;
!
it
.
endDofIter
();
it
.
nextDof
())
if
(
globalPrimalIndex
.
count
(
it
.
getDofIndex
())
==
0
)
dofFirstLagrange
[
it
.
getDofIndex
()]
=
stdMpi
.
getRecvData
(
it
.
getRank
())[
counter
++
];
for
(;
!
it
.
endDofIter
();
it
.
nextDof
())
{
if
(
primalDofMap
[
feSpace
].
isSet
(
it
.
getDofIndex
())
==
false
)
{
DegreeOfFreedom
d
=
stdMpi
.
getRecvData
(
it
.
getRank
())[
counter
++
];
dofFirstLagrange
[
feSpace
].
insert
(
it
.
getDofIndex
(),
d
);
}
}
}
}
...
...
@@ -509,9 +486,10 @@ namespace AMDiS {
// === without defining a correct index. ===
for
(
int
i
=
0
;
i
<
admin
->
getUsedSize
();
i
++
)
if
(
admin
->
isDofFree
(
i
)
==
false
&&
globalPrimalIndex
.
count
(
i
)
==
0
)
if
(
duals
.
count
(
i
)
==
0
&&
globalPrimalIndex
.
count
(
i
)
==
0
)
localIndexB
[
i
]
=
-
1
;
if
(
admin
->
isDofFree
(
i
)
==
false
&&
primalDofMap
[
feSpace
].
isSet
(
i
)
==
false
&&
dualDofMap
[
feSpace
].
isSet
(
i
)
==
false
)
localIndexB
[
i
]
=
-
1
;
// === Get correct index for all interior nodes. ===
...
...
@@ -522,18 +500,20 @@ namespace AMDiS {
it
->
second
=
nLocalInterior
;
nLocalInterior
++
;
}
nLocalDuals
=
dual
s
.
size
();
nLocalDuals
=
dual
DofMap
[
feSpace
]
.
size
();
TEST_EXIT_DBG
(
nLocalInterior
+
globalPrimalIndex
.
size
()
+
duals
.
size
()
==
TEST_EXIT_DBG
(
nLocalInterior
+
primalDofMap
[
feSpace
].
size
()
+
dualDofMap
[
feSpace
].
size
()
==
static_cast
<
unsigned
int
>
(
admin
->
getUsedDofs
()))
(
"Should not happen!
\n
"
);
// === And finally, add the global indicies of all dual nodes. ===
for
(
Dof
IndexSet
::
iterator
it
=
duals
.
begin
();
it
!=
dual
s
.
end
();
++
it
)
localIndexB
[
*
it
]
=
globalDualIndex
[
*
it
]
-
rStartB
;
for
(
Dof
Mapping
::
iterator
it
=
dualDofMap
[
feSpace
].
getMap
()
.
begin
();
it
!=
dual
DofMap
[
feSpace
].
getMap
()
.
end
();
++
it
)
localIndexB
[
it
->
first
]
=
it
->
second
-
rStartB
;
}
...
...
@@ -541,12 +521,14 @@ namespace AMDiS {
{
FUNCNAME
(
"PetscSolverFeti::createMatLagrange()"
);
const
FiniteElemSpace
*
feSpace
=
meshDistributor
->
getFeSpace
(
0
);
// === Create distributed matrix for Lagrange constraints. ===
MatCreateMPIAIJ
(
PETSC_COMM_WORLD
,
nRankLagrange
*
nComponents
,
dofFirstLagrange
[
feSpace
].
nRankDofs
*
nComponents
,
nRankB
*
nComponents
,
nOverallLagrange
*
nComponents
,
dofFirstLagrange
[
feSpace
].
nOverallDofs
*
nComponents
,
nOverallB
*
nComponents
,
2
,
PETSC_NULL
,
2
,
PETSC_NULL
,
&
mat_lagrange
);
...
...
@@ -558,27 +540,28 @@ namespace AMDiS {
// === m == r, than the rank sets -1.0 for the corresponding ===
// === constraint. ===
for
(
DofIndexSet
::
iterator
it
=
duals
.
begin
();
it
!=
duals
.
end
();
++
it
)
{
TEST_EXIT_DBG
(
dofFirstLagrange
.
count
(
*
it
))(
"Should not happen!
\n
"
);
TEST_EXIT_DBG
(
boundaryDofRanks
.
count
(
*
it
))(
"Should not happen!
\n
"
);
DofMapping
&
dualMap
=
dualDofMap
[
feSpace
].
getMap
();
for
(
DofMapping
::
iterator
it
=
dualMap
.
begin
();
it
!=
dualMap
.
end
();
++
it
)
{
TEST_EXIT_DBG
(
dofFirstLagrange
[
feSpace
].
isSet
(
it
->
first
))
(
"Should not happen!
\n
"
);
TEST_EXIT_DBG
(
boundaryDofRanks
.
count
(
it
->
first
))
(
"Should not happen!
\n
"
);
// Global index of the first Lagrange constriant for this node.
int
index
=
dofFirstLagrange
[
*
i
t
];
int
index
=
dofFirstLagrange
[
feSpace
][
it
->
firs
t
];
// Copy set of all ranks that contain this dual node.
vector
<
int
>
W
(
boundaryDofRanks
[
*
it
].
begin
(),
boundaryDofRanks
[
*
it
].
end
());
vector
<
int
>
W
(
boundaryDofRanks
[
it
->
first
].
begin
(),
boundaryDofRanks
[
it
->
first
].
end
());
// Number of ranks that contain this dual node.
int
degree
=
W
.
size
();
TEST_EXIT_DBG
(
globalDualIndex
.
count
(
*
it
))(
"Should not happen!
\n
"
);
int
dualCol
=
globalDualIndex
[
*
it
];
for
(
int
i
=
0
;
i
<
degree
;
i
++
)
{
for
(
int
j
=
i
+
1
;
j
<
degree
;
j
++
)
{
if
(
W
[
i
]
==
mpiRank
||
W
[
j
]
==
mpiRank
)
{
// Set the constraint for all components of the system.
for
(
int
k
=
0
;
k
<
nComponents
;
k
++
)
{
int
rowIndex
=
index
*
nComponents
+
k
;
int
colIndex
=
dualCol
*
nComponents
+
k
;
int
colIndex
=
it
->
second
*
nComponents
+
k
;
double
value
=
(
W
[
i
]
==
mpiRank
?
1.0
:
-
1.0
);
MatSetValue
(
mat_lagrange
,
rowIndex
,
colIndex
,
value
,
INSERT_VALUES
);
...
...
@@ -599,6 +582,8 @@ namespace AMDiS {
{
FUNCNAME
(
"PetscSolverFeti::createSchurPrimal()"
);
const
FiniteElemSpace
*
feSpace
=
meshDistributor
->
getFeSpace
(
0
);
if
(
schurPrimalSolver
==
0
)
{
MSG
(
"Create iterative schur primal solver!
\n
"
);
...
...
@@ -611,12 +596,12 @@ namespace AMDiS {
nRankB
*
nComponents
,
nOverallB
*
nComponents
,
&
(
schurPrimalData
.
tmp_vec_b
));
VecCreateMPI
(
PETSC_COMM_WORLD
,
nRankPrimals
*
nComponents
,
nOverallPrimal
s
*
nComponents
,
primalDofMap
[
feSpace
].
nRankDofs
*
nComponents
,
primalDofMap
[
feSpace
].
nOverallDof
s
*
nComponents
,
&
(
schurPrimalData
.
tmp_vec_primal
));
MatCreateShell
(
PETSC_COMM_WORLD
,
nRankPrimals
*
nComponents
,
nRankPrimal
s
*
nComponents
,
nOverallPrimals
*
nComponents
,
nOverallPrimal
s
*
nComponents
,
primalDofMap
[
feSpace
].
nRankDofs
*
nComponents
,
primalDofMap
[
feSpace
].
nRankDof
s
*
nComponents
,
primalDofMap
[
feSpace
].
nOverallDofs
*
nComponents
,
primalDofMap
[
feSpace
].
nOverallDof
s
*
nComponents
,
&
schurPrimalData
,
&
mat_schur_primal
);
MatShellSetOperation
(
mat_schur_primal
,
MATOP_MULT
,
...
...
@@ -633,8 +618,8 @@ namespace AMDiS {
double
wtime
=
MPI
::
Wtime
();
int
nRowsRankPrimal
=
nRankPrimal
s
*
nComponents
;
int
nRowsOverallPrimal
=
nOverallPrimal
s
*
nComponents
;
int
nRowsRankPrimal
=
primalDofMap
[
feSpace
].
nRankDof
s
*
nComponents
;
int
nRowsOverallPrimal
=
primalDofMap
[
feSpace
].
nOverallDof
s
*
nComponents
;
int
nRowsRankB
=
nRankB
*
nComponents
;
int
nRowsOverallB
=
nOverallB
*
nComponents
;
...
...
@@ -649,7 +634,6 @@ namespace AMDiS {
const
PetscInt
*
cols
;
const
PetscScalar
*
values
;
int
nLocalPrimals
=
globalPrimalIndex
.
size
()
*
nComponents
;
map
<
int
,
vector
<
pair
<
int
,
double
>
>
>
mat_b_primal_cols
;
for
(
int
i
=
0
;
i
<
nRankB
*
nComponents
;
i
++
)
{
...
...
@@ -667,7 +651,7 @@ namespace AMDiS {
mpi
::
globalMax
(
maxLocalPrimal
);
TEST_EXIT
(
mat_b_primal_cols
.
size
()
==
globalPrimalIndex
.
size
()
*
nComponents
)(
"Should not happen!
\n
"
);
primalDofMap
[
feSpace
]
.
size
()
*
nComponents
)(
"Should not happen!
\n
"
);
for
(
map
<
int
,
vector
<
pair
<
int
,
double
>
>
>::
iterator
it
=
mat_b_primal_cols
.
begin
();
it
!=
mat_b_primal_cols
.
end
();
++
it
)
{
Vec
tmpVec
;
...
...
@@ -744,6 +728,8 @@ namespace AMDiS {
{
FUNCNAME
(
"PetscSolverFeti::createFetiKsp()"
);
const
FiniteElemSpace
*
feSpace
=
meshDistributor
->
getFeSpace
(
0
);
// === Create FETI-DP solver object. ===
fetiData
.
mat_primal_b
=
&
mat_primal_b
;
...
...
@@ -756,15 +742,18 @@ namespace AMDiS {
nRankB
*
nComponents
,
nOverallB
*
nComponents
,
&
(
fetiData
.
tmp_vec_b
));
VecCreateMPI
(
PETSC_COMM_WORLD
,
nRankLagrange
*
nComponents
,
nOverallLagrange
*
nComponents
,
dofFirstLagrange
[
feSpace
].
nRankDofs
*
nComponents
,
dofFirstLagrange
[
feSpace
].
nOverallDofs
*
nComponents
,
&
(
fetiData
.
tmp_vec_lagrange
));
VecCreateMPI
(
PETSC_COMM_WORLD
,
nRankPrimals
*
nComponents
,
nOverallPrimal
s
*
nComponents
,
primalDofMap
[
feSpace
].
nRankDofs
*
nComponents
,
primalDofMap
[
feSpace
].
nOverallDof
s
*
nComponents
,
&
(
fetiData
.
tmp_vec_primal
));