Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Aland, Sebastian
amdis
Commits
d089b050
Commit
d089b050
authored
Jun 14, 2012
by
Thomas Witkowski
Browse files
Upgrade to PETSc 3.3, added basic support for fieldsplit preconditioner.
parent
602a62d3
Changes
10
Hide whitespace changes
Inline
Side-by-side
AMDiS/src/parallel/MeshLevelData.h
View file @
d089b050
...
...
@@ -20,16 +20,15 @@
/** \file MeshLevelData.h */
#ifndef AMDIS_MESH_LEVEL_DATA_H
#define AMDIS_MESH_LEVEL_DATA_H
#include
<iostream>
#include
<set>
#include
<vector>
#include
<mpi.h>
#include
"Global.h"
#ifndef AMDIS_MESH_LEVEL_DATA_H
#define AMDIS_MESH_LEVEL_DATA_H
namespace
AMDiS
{
using
namespace
std
;
...
...
AMDiS/src/parallel/ParMetisPartitioner.cc
View file @
d089b050
...
...
@@ -288,8 +288,8 @@ namespace AMDiS {
int
ncon
=
1
;
// one weight at each vertex!
int
nparts
=
mpiSize
;
// number of partitions
vector
<
float
>
tpwgts
(
mpiSize
);
float
ubvec
=
1.05
;
vector
<
double
>
tpwgts
(
mpiSize
);
double
ubvec
=
1.05
;
int
options
[
4
]
=
{
0
,
0
,
15
,
1
};
// default options
int
edgecut
=
-
1
;
vector
<
int
>
part
(
nElements
);
...
...
AMDiS/src/parallel/ParMetisPartitioner.h
View file @
d089b050
...
...
@@ -182,7 +182,7 @@ namespace AMDiS {
void
createPartitionMap
(
map
<
int
,
int
>&
partitionMap
);
void
setItr
(
float
value
)
void
setItr
(
double
value
)
{
itr
=
value
;
}
...
...
@@ -195,7 +195,7 @@ namespace AMDiS {
protected:
ParMetisMesh
*
parMetisMesh
;
float
itr
;
double
itr
;
};
}
...
...
AMDiS/src/parallel/ParallelDofMapping.cc
View file @
d089b050
...
...
@@ -475,5 +475,41 @@ namespace AMDiS {
}
}
}
void
ParallelDofMapping
::
createIndexSet
(
IS
&
is
,
int
firstComponent
,
int
nComponents
)
{
FUNCNAME
(
"ParallelDofMapping::createIndexSet()"
);
TEST_EXIT_DBG
(
firstComponent
+
nComponents
<=
feSpaces
.
size
())
(
"Should not happen!
\n
"
);
TEST_EXIT_DBG
(
data
.
count
(
feSpaces
[
firstComponent
]))
(
"No data for FE space at address %p!
\n
"
,
feSpaces
[
firstComponent
]);
int
firstRankDof
=
-
1
;
FeSpaceDofMap
&
feMap
=
data
.
find
(
feSpaces
[
firstComponent
])
->
second
;
DofMap
&
dofMap
=
feMap
.
getMap
();
for
(
DofMap
::
iterator
it
=
dofMap
.
begin
();
it
!=
dofMap
.
end
();
++
it
)
{
if
(
feMap
.
isRankDof
(
it
->
first
))
{
if
(
needMatIndexFromGlobal
)
firstRankDof
=
it
->
second
.
global
;
else
firstRankDof
=
it
->
first
;
break
;
}
}
TEST_EXIT_DBG
(
firstRankDof
>=
0
)(
"No rank DOF found!
\n
"
);
int
firstMatIndex
=
dofToMatIndex
.
get
(
firstComponent
,
firstRankDof
);
int
nRankRows
=
0
;
for
(
int
i
=
firstComponent
;
i
<
firstComponent
+
nComponents
;
i
++
)
nRankRows
+=
data
.
find
(
feSpaces
[
firstComponent
])
->
second
.
nRankDofs
;
ISCreateStride
(
mpiComm
,
nRankRows
,
firstMatIndex
,
1
,
&
is
);
}
}
AMDiS/src/parallel/ParallelDofMapping.h
View file @
d089b050
...
...
@@ -29,6 +29,8 @@
#include
"parallel/ParallelTypes.h"
#include
"parallel/StdMpi.h"
#include
<petscis.h>
#ifndef AMDIS_FE_SPACE_MAPPING_H
#define AMDIS_FE_SPACE_MAPPING_H
...
...
@@ -414,6 +416,10 @@ namespace AMDiS {
/// Compute local and global matrix indices.
void
computeMatIndex
(
bool
globalIndex
);
void
createIndexSet
(
IS
&
is
,
int
firstComponent
,
int
nComponents
);
protected:
/// Insert a new FE space DOF mapping for a given FE space.
void
addFeSpace
(
const
FiniteElemSpace
*
feSpace
);
...
...
AMDiS/src/parallel/PetscSolverFeti.cc
View file @
d089b050
...
...
@@ -688,11 +688,11 @@ namespace AMDiS {
// === Create distributed matrix for Lagrange constraints. ===
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
lagrangeMap
.
getRankDofs
(),
localDofMap
.
getRankDofs
(),
lagrangeMap
.
getOverallDofs
(),
nGlobalOverallInterior
,
2
,
PETSC_NULL
,
2
,
PETSC_NULL
,
&
mat_lagrange
);
MatCreateAIJ
(
mpiCommGlobal
,
lagrangeMap
.
getRankDofs
(),
localDofMap
.
getRankDofs
(),
lagrangeMap
.
getOverallDofs
(),
nGlobalOverallInterior
,
2
,
PETSC_NULL
,
2
,
PETSC_NULL
,
&
mat_lagrange
);
// === Create for all duals the corresponding Lagrange constraints. On ===
// === each rank we traverse all pairs (n, m) of ranks, with n < m, ===
...
...
@@ -789,10 +789,10 @@ namespace AMDiS {
int
nRowsRankB
=
localDofMap
.
getRankDofs
();
Mat
matBPi
;
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
nRowsRankB
,
nRowsRankPrimal
,
nGlobalOverallInterior
,
nRowsOverallPrimal
,
30
,
PETSC_NULL
,
30
,
PETSC_NULL
,
&
matBPi
);
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankB
,
nRowsRankPrimal
,
nGlobalOverallInterior
,
nRowsOverallPrimal
,
30
,
PETSC_NULL
,
30
,
PETSC_NULL
,
&
matBPi
);
Mat
matPrimal
;
PetscInt
nCols
;
...
...
AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
View file @
d089b050
...
...
@@ -54,12 +54,12 @@ namespace AMDiS {
for
(
int
i
=
0
;
i
<
nBlocks
;
i
++
)
for
(
int
j
=
0
;
j
<
nBlocks
;
j
++
)
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
nRankRows
*
blockSize
[
i
],
nRankRows
*
blockSize
[
j
],
nOverallRows
*
blockSize
[
i
],
nOverallRows
*
blockSize
[
j
],
30
*
blockSize
[
i
],
PETSC_NULL
,
30
*
blockSize
[
j
],
PETSC_NULL
,
&
(
nestMat
[
i
*
nBlocks
+
j
]));
MatCreateAIJ
(
mpiCommGlobal
,
nRankRows
*
blockSize
[
i
],
nRankRows
*
blockSize
[
j
],
nOverallRows
*
blockSize
[
i
],
nOverallRows
*
blockSize
[
j
],
30
*
blockSize
[
i
],
PETSC_NULL
,
30
*
blockSize
[
j
],
PETSC_NULL
,
&
(
nestMat
[
i
*
nBlocks
+
j
]));
for
(
int
i
=
0
;
i
<
nComponents
;
i
++
)
for
(
int
j
=
0
;
j
<
nComponents
;
j
++
)
...
...
AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
View file @
d089b050
...
...
@@ -70,7 +70,7 @@ namespace AMDiS {
// === Create PETSc matrix with the computed nnz data structure. ===
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
nRankRows
,
nRankRows
,
MatCreateAIJ
(
mpiCommGlobal
,
nRankRows
,
nRankRows
,
nOverallRows
,
nOverallRows
,
0
,
d_nnz
,
0
,
o_nnz
,
&
matIntInt
);
...
...
@@ -119,6 +119,8 @@ namespace AMDiS {
KSPSetFromOptions
(
kspInterior
);
PCSetFromOptions
(
pcInterior
);
createFieldSplit
(
pcInterior
);
// Do not delete the solution vector, use it for the initial guess.
if
(
!
zeroStartVector
)
KSPSetInitialGuessNonzero
(
kspInterior
,
PETSC_TRUE
);
...
...
@@ -140,30 +142,30 @@ namespace AMDiS {
MatCreateSeqAIJ
(
mpiCommLocal
,
nRowsRankInterior
,
nRowsRankInterior
,
60
,
PETSC_NULL
,
&
matIntInt
);
}
else
{
MatCreate
MPI
AIJ
(
mpiCommLocal
,
nRowsRankInterior
,
nRowsRankInterior
,
nRowsOverallInterior
,
nRowsOverallInterior
,
60
,
PETSC_NULL
,
60
,
PETSC_NULL
,
&
matIntInt
);
MatCreateAIJ
(
mpiCommLocal
,
nRowsRankInterior
,
nRowsRankInterior
,
nRowsOverallInterior
,
nRowsOverallInterior
,
60
,
PETSC_NULL
,
60
,
PETSC_NULL
,
&
matIntInt
);
}
if
(
coarseSpaceMap
)
{
int
nRowsRankCoarse
=
coarseSpaceMap
->
getRankDofs
();
int
nRowsOverallCoarse
=
coarseSpaceMap
->
getOverallDofs
();
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankCoarse
,
nRowsOverallCoarse
,
nRowsOverallCoarse
,
60
,
PETSC_NULL
,
60
,
PETSC_NULL
,
&
matCoarseCoarse
);
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankCoarse
,
nRowsOverallCoarse
,
nRowsOverallCoarse
,
60
,
PETSC_NULL
,
60
,
PETSC_NULL
,
&
matCoarseCoarse
);
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankInterior
,
nRowsOverallCoarse
,
nGlobalOverallInterior
,
60
,
PETSC_NULL
,
60
,
PETSC_NULL
,
&
matCoarseInt
);
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankInterior
,
nRowsOverallCoarse
,
nGlobalOverallInterior
,
60
,
PETSC_NULL
,
60
,
PETSC_NULL
,
&
matCoarseInt
);
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
nRowsRankInterior
,
nRowsRankCoarse
,
nGlobalOverallInterior
,
nRowsOverallCoarse
,
60
,
PETSC_NULL
,
60
,
PETSC_NULL
,
&
matIntCoarse
);
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankInterior
,
nRowsRankCoarse
,
nGlobalOverallInterior
,
nRowsOverallCoarse
,
60
,
PETSC_NULL
,
60
,
PETSC_NULL
,
&
matIntCoarse
);
}
// === Prepare traverse of sequentially created matrices. ===
...
...
@@ -540,6 +542,42 @@ namespace AMDiS {
}
void
PetscSolverGlobalMatrix
::
createFieldSplit
(
PC
pc
)
{
FUNCNAME
(
"PetscSolverGlobalMatrix::createFieldSplit()"
);
vector
<
string
>
isNames
;
Parameters
::
get
(
"parallel->solver->is blocks"
,
isNames
);
int
nBlocks
=
isNames
.
size
();
if
(
nBlocks
==
0
)
return
;
for
(
int
i
=
0
;
i
<
nBlocks
;
i
++
)
{
MSG
(
"Create for block %s
\n
"
,
isNames
[
i
].
c_str
());
vector
<
int
>
blockComponents
;
Parameters
::
get
(
"parallel->solver->is block "
+
lexical_cast
<
string
>
(
i
),
blockComponents
);
int
nComponents
=
static_cast
<
int
>
(
blockComponents
.
size
());
TEST_EXIT
(
nComponents
>
0
)(
"No is block for block %d defined!
\n
"
,
i
);
// Check if blocks are continous
for
(
int
j
=
0
;
j
<
nComponents
;
j
++
)
{
TEST_EXIT
(
blockComponents
[
j
]
==
blockComponents
[
0
]
+
j
)
(
"Does not yet support not continous IS blocks! Block %s
\n
"
,
isNames
[
i
].
c_str
());
}
IS
is
;
interiorMap
->
createIndexSet
(
is
,
blockComponents
[
0
],
nComponents
);
PCFieldSplitSetIS
(
pc
,
isNames
[
i
].
c_str
(),
is
);
ISDestroy
(
&
is
);
}
}
void
PetscSolverGlobalMatrix
::
setDofMatrix
(
DOFMatrix
*
mat
,
int
nRowMat
,
int
nColMat
)
{
...
...
AMDiS/src/parallel/PetscSolverGlobalMatrix.h
View file @
d089b050
...
...
@@ -62,6 +62,8 @@ namespace AMDiS {
void
destroyVectorData
();
void
createFieldSplit
(
PC
pc
);
protected:
/// Creates a new non zero pattern structure for the PETSc matrix.
void
createPetscNnzStructure
(
Matrix
<
DOFMatrix
*>
*
mat
);
...
...
AMDiS/src/parallel/PetscSolverSchur.cc
View file @
d089b050
...
...
@@ -190,25 +190,25 @@ namespace AMDiS {
int
nOverallBoundaryRows
=
nOverallBoundaryDofs
*
nComponents
;
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
nInteriorRows
,
nInteriorRows
,
nOverallInteriorRows
,
nOverallInteriorRows
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
matA11
);
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
nBoundaryRows
,
nBoundaryRows
,
nOverallBoundaryRows
,
nOverallBoundaryRows
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
matA22
);
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
nInteriorRows
,
nBoundaryRows
,
nOverallInteriorRows
,
nOverallBoundaryRows
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
matA12
);
MatCreate
MPI
AIJ
(
mpiCommGlobal
,
nBoundaryRows
,
nInteriorRows
,
nOverallBoundaryRows
,
nOverallInteriorRows
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
matA21
);
MatCreateAIJ
(
mpiCommGlobal
,
nInteriorRows
,
nInteriorRows
,
nOverallInteriorRows
,
nOverallInteriorRows
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
matA11
);
MatCreateAIJ
(
mpiCommGlobal
,
nBoundaryRows
,
nBoundaryRows
,
nOverallBoundaryRows
,
nOverallBoundaryRows
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
matA22
);
MatCreateAIJ
(
mpiCommGlobal
,
nInteriorRows
,
nBoundaryRows
,
nOverallInteriorRows
,
nOverallBoundaryRows
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
matA12
);
MatCreateAIJ
(
mpiCommGlobal
,
nBoundaryRows
,
nInteriorRows
,
nOverallBoundaryRows
,
nOverallInteriorRows
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
matA21
);
for
(
int
i
=
0
;
i
<
nComponents
;
i
++
)
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment