Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
10
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
Backofen, Rainer
amdis
Commits
66a86905
Commit
66a86905
authored
Aug 20, 2012
by
Thomas Witkowski
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Blub
parent
41a2b884
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
227 additions
and
128 deletions
+227
-128
AMDiS/src/io/ElementFileWriter.cc
AMDiS/src/io/ElementFileWriter.cc
+5
-2
AMDiS/src/io/FileWriter.cc
AMDiS/src/io/FileWriter.cc
+9
-3
AMDiS/src/io/VtkWriter.cc
AMDiS/src/io/VtkWriter.cc
+12
-6
AMDiS/src/io/VtkWriter.h
AMDiS/src/io/VtkWriter.h
+1
-1
AMDiS/src/parallel/PetscSolver.h
AMDiS/src/parallel/PetscSolver.h
+15
-9
AMDiS/src/parallel/PetscSolverFeti.cc
AMDiS/src/parallel/PetscSolverFeti.cc
+5
-4
AMDiS/src/parallel/PetscSolverFeti.h
AMDiS/src/parallel/PetscSolverFeti.h
+4
-1
AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
+17
-12
AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
+143
-78
AMDiS/src/parallel/PetscSolverSchur.cc
AMDiS/src/parallel/PetscSolverSchur.cc
+16
-12
No files found.
AMDiS/src/io/ElementFileWriter.cc
View file @
66a86905
...
...
@@ -438,9 +438,12 @@ namespace AMDiS {
file
<<
"</VTKFile>
\n
"
;
#if HAVE_PARALLEL_DOMAIN_AMDIS
if
(
MPI
::
COMM_WORLD
.
Get_rank
()
==
0
)
if
(
MPI
::
COMM_WORLD
.
Get_rank
()
==
0
)
{
vector
<
string
>
componentNames
;
componentNames
.
push_back
(
"elvalue"
);
VtkWriter
::
writeParallelFile
(
fname
+
".pvtu"
,
MPI
::
COMM_WORLD
.
Get_size
(),
fname
,
".vtu"
,
1
);
fname
,
".vtu"
,
componentNames
);
}
#endif
}
}
...
...
AMDiS/src/io/FileWriter.cc
View file @
66a86905
...
...
@@ -140,11 +140,17 @@ namespace AMDiS {
vtkWriter
.
writeFile
(
fn
+
paraviewFileExt
);
#if HAVE_PARALLEL_DOMAIN_AMDIS
if
(
MPI
::
COMM_WORLD
.
Get_rank
()
==
0
)
if
(
MPI
::
COMM_WORLD
.
Get_rank
()
==
0
)
{
vector
<
string
>
componentNames
;
for
(
unsigned
int
i
=
0
;
i
<
dataCollectors
.
size
();
i
++
)
componentNames
.
push_back
(
dataCollectors
[
i
]
->
getValues
()
->
getName
());
vtkWriter
.
writeParallelFile
(
paraFilename
+
paraviewParallelFileExt
,
MPI
::
COMM_WORLD
.
Get_size
(),
filename
,
postfix
,
dataCollectors
.
size
());
filename
,
postfix
,
componentNames
);
}
#endif
MSG
(
"ParaView file written to %s
\n
"
,
(
fn
+
paraviewFileExt
).
c_str
());
...
...
AMDiS/src/io/VtkWriter.cc
View file @
66a86905
...
...
@@ -68,7 +68,7 @@ namespace AMDiS {
void
VtkWriter
::
writeParallelFile
(
string
name
,
int
nRanks
,
string
fnPrefix
,
string
fnPostfix
,
int
nC
omponents
)
vector
<
string
>
&
c
omponent
Name
s
)
{
FUNCNAME
(
"VtkWriter::writeParallelFile()"
);
...
...
@@ -94,9 +94,10 @@ namespace AMDiS {
<<
" </PCells>
\n
"
;
file
<<
" <PPointData>
\n
"
;
for
(
int
i
=
0
;
i
<
nComponents
;
i
++
)
file
<<
" <PDataArray type=
\"
Float32
\"
Name=
\"
value"
<<
i
<<
"
\"
format=
\"
ascii
\"
/>
\n
"
;
for
(
unsigned
int
i
=
0
;
i
<
componentNames
.
size
();
i
++
)
file
<<
" <PDataArray type=
\"
Float32
\"
Name=
\"
"
<<
componentNames
[
i
]
<<
"
\"
format=
\"
ascii
\"
/>
\n
"
;
file
<<
" </PPointData>
\n
"
;
...
...
@@ -235,11 +236,16 @@ namespace AMDiS {
TEST_EXIT
(
sPos
>=
0
)(
"Failed to find file postfix!
\n
"
);
string
name
=
filename
.
substr
(
0
,
sPos
);
if
(
MPI
::
COMM_WORLD
.
Get_rank
()
==
0
)
if
(
MPI
::
COMM_WORLD
.
Get_rank
()
==
0
)
{
vector
<
string
>
componentNames
;
for
(
unsigned
int
i
=
0
;
i
<
dcList
.
size
();
i
++
)
componentNames
.
push_back
(
dcList
[
i
]
->
getValues
()
->
getName
());
writer
.
writeParallelFile
(
name
+
".pvtu"
,
MPI
::
COMM_WORLD
.
Get_size
(),
name
,
".vtu"
,
static_cast
<
int
>
(
dcList
.
size
()));
componentNames
);
}
filename
=
name
+
"-p"
+
lexical_cast
<
string
>
(
MPI
::
COMM_WORLD
.
Get_rank
())
+
"-.vtu"
;
}
...
...
AMDiS/src/io/VtkWriter.h
View file @
66a86905
...
...
@@ -55,7 +55,7 @@ namespace AMDiS {
/// Writes a pvtu file, which contains the links to all the rank files.
static
void
writeParallelFile
(
string
name
,
int
nRanks
,
string
fnPrefix
,
string
fnPostfix
,
int
nC
omponents
);
vector
<
string
>
&
c
omponent
Name
s
);
/// May be used to simply write ParaView files.
static
void
writeFile
(
DOFVector
<
double
>
*
values
,
...
...
AMDiS/src/parallel/PetscSolver.h
View file @
66a86905
...
...
@@ -178,37 +178,41 @@ namespace AMDiS {
inline
Mat
&
getMatIntInt
()
{
return
matIntInt
;
return
mat
[
0
][
0
];
// return matIntInt;
}
inline
Mat
&
getMatCoarseCoarse
()
{
FUNCNAME
(
"PetscSolver::getMatCoarseCoarse()"
);
TEST_EXIT_DBG
(
coarseSpaceMap
.
size
())
TEST_EXIT_DBG
(
coarseSpaceMap
.
size
()
&&
mat
.
size
()
>
1
)
(
"Subdomain solver does not contain a coarse space!
\n
"
);
return
matCoarseCoarse
;
return
mat
[
1
][
1
];
// return matCoarseCoarse;
}
inline
Mat
&
getMatIntCoarse
()
{
FUNCNAME
(
"PetscSolver::getMatIntCoarse()"
);
TEST_EXIT_DBG
(
coarseSpaceMap
.
size
())
TEST_EXIT_DBG
(
coarseSpaceMap
.
size
()
&&
mat
.
size
()
>
1
)
(
"Subdomain solver does not contain a coarse space!
\n
"
);
return
matIntCoarse
;
return
mat
[
0
][
1
];
// return matIntCoarse;
}
inline
Mat
&
getMatCoarseInt
()
{
FUNCNAME
(
"PetscSolver::getMatCoarseInt()"
);
TEST_EXIT_DBG
(
coarseSpaceMap
.
size
())
TEST_EXIT_DBG
(
coarseSpaceMap
.
size
()
&&
mat
.
size
()
>
1
)
(
"Subdomain solver does not contain a coarse space!
\n
"
);
return
matCoarseInt
;
return
mat
[
1
][
0
];
// return matCoarseInt;
}
protected:
...
...
@@ -251,7 +255,7 @@ namespace AMDiS {
/// Parallel DOF mapping of the (optional) coarse space. Allows to define
/// different coarse spaces for different components.
std
::
map
<
int
,
ParallelDofMapping
*>
coarseSpaceMap
;
map
<
int
,
ParallelDofMapping
*>
coarseSpaceMap
;
int
mpiRank
;
...
...
@@ -260,7 +264,9 @@ namespace AMDiS {
MPI
::
Intracomm
mpiCommLocal
;
/// Petsc's matrix structure.
Mat
matIntInt
,
matCoarseCoarse
,
matIntCoarse
,
matCoarseInt
;
// Mat matIntInt, matCoarseCoarse, matIntCoarse, matCoarseInt;
vector
<
vector
<
Mat
>
>
mat
;
/// PETSc's vector structures for the rhs vector, the solution vector and a
/// temporary vector for calculating the final residuum.
...
...
AMDiS/src/parallel/PetscSolverFeti.cc
View file @
66a86905
...
...
@@ -338,14 +338,14 @@ namespace AMDiS {
for
(
unsigned
int
i
=
0
;
i
<
meshDistributor
->
getFeSpaces
().
size
();
i
++
)
{
const
FiniteElemSpace
*
feSpace
=
meshDistributor
->
getFeSpace
(
i
);
createPrimals
(
feSpace
);
createDuals
(
feSpace
);
createInterfaceNodes
(
feSpace
);
createIndexB
(
feSpace
);
createIndexB
(
feSpace
);
}
primalDofMap
.
update
();
...
...
@@ -1352,11 +1352,12 @@ namespace AMDiS {
// === Create all sets and indices. ===
vector
<
const
FiniteElemSpace
*>
feSpaces
=
getFeSpaces
(
mat
);
initialize
(
feSpaces
);
createFetiData
();
// === Create matrices for the FETI-DP method. ===
if
(
printTimings
)
{
...
...
AMDiS/src/parallel/PetscSolverFeti.h
View file @
66a86905
...
...
@@ -199,7 +199,10 @@ namespace AMDiS {
inline
bool
isInterface
(
const
FiniteElemSpace
*
feSpace
,
DegreeOfFreedom
dof
)
{
return
interfaceDofMap
[
feSpace
].
isSet
(
dof
);
if
(
feSpace
==
fullInterface
)
return
interfaceDofMap
[
feSpace
].
isSet
(
dof
);
return
false
;
}
protected:
...
...
AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
View file @
66a86905
...
...
@@ -16,17 +16,22 @@
namespace
AMDiS
{
void
PetscSolverGlobalBlockMatrix
::
fillPetscMatrix
(
Matrix
<
DOFMatrix
*>
*
m
at
)
void
PetscSolverGlobalBlockMatrix
::
fillPetscMatrix
(
Matrix
<
DOFMatrix
*>
*
seqM
at
)
{
FUNCNAME
(
"PetscSolverGlobalBlockMatrix::fillPetscMatrix()"
);
TEST_EXIT_DBG
(
meshDistributor
)(
"No mesh distributor object defined!
\n
"
);
TEST_EXIT_DBG
(
interiorMap
)(
"No parallel mapping object defined!
\n
"
);
TEST_EXIT_DBG
(
mat
)(
"No DOF matrix defined!
\n
"
);
TEST_EXIT_DBG
(
seqMat
)(
"No DOF matrix defined!
\n
"
);
mat
.
resize
(
1
);
mat
[
0
].
resize
(
1
);
Mat
&
matIntInt
=
mat
[
0
][
0
];
double
wtime
=
MPI
::
Wtime
();
const
FiniteElemSpace
*
feSpace
=
meshDistributor
->
getFeSpace
(
0
);
nComponents
=
m
at
->
getNumRows
();
nComponents
=
seqM
at
->
getNumRows
();
int
nRankRows
=
(
*
interiorMap
)[
feSpace
].
nRankDofs
;
int
nOverallRows
=
(
*
interiorMap
)[
feSpace
].
nOverallDofs
;
...
...
@@ -63,9 +68,9 @@ namespace AMDiS {
for
(
int
i
=
0
;
i
<
nComponents
;
i
++
)
for
(
int
j
=
0
;
j
<
nComponents
;
j
++
)
if
((
*
m
at
)[
i
][
j
])
{
if
((
*
seqM
at
)[
i
][
j
])
{
int
idx
=
componentInBlock
[
i
]
*
nBlocks
+
componentInBlock
[
j
];
setDofMatrix
(
nestMat
[
idx
],
(
*
m
at
)[
i
][
j
],
setDofMatrix
(
nestMat
[
idx
],
(
*
seqM
at
)[
i
][
j
],
compNthInBlock
[
i
],
compNthInBlock
[
j
]);
}
...
...
@@ -178,7 +183,7 @@ namespace AMDiS {
if
(
nestMat
[
i
]
!=
PETSC_NULL
)
MatDestroy
(
&
(
nestMat
[
i
]));
MatDestroy
(
&
mat
IntInt
);
MatDestroy
(
&
mat
[
0
][
0
]
);
KSPDestroy
(
&
kspInterior
);
}
...
...
@@ -196,14 +201,14 @@ namespace AMDiS {
void
PetscSolverGlobalBlockMatrix
::
setDofMatrix
(
Mat
&
petscMat
,
DOFMatrix
*
m
at
,
DOFMatrix
*
seqM
at
,
int
dispRowBlock
,
int
dispColBlock
)
{
FUNCNAME
(
"PetscSolverGlobalBlockMatrix::setDofMatrix()"
);
TEST_EXIT
(
mat
)(
"No DOFMatrix!
\n
"
);
TEST_EXIT
(
petscMat
)(
"No PETSc matrix!
\n
"
);
TEST_EXIT
(
seqMat
)(
"No DOFMatrix!
\n
"
);
const
FiniteElemSpace
*
feSpace
=
meshDistributor
->
getFeSpace
(
0
);
...
...
@@ -211,8 +216,8 @@ namespace AMDiS {
namespace
traits
=
mtl
::
traits
;
typedef
DOFMatrix
::
base_matrix_type
Matrix
;
traits
::
col
<
Matrix
>::
type
col
(
m
at
->
getBaseMatrix
());
traits
::
const_value
<
Matrix
>::
type
value
(
m
at
->
getBaseMatrix
());
traits
::
col
<
Matrix
>::
type
col
(
seqM
at
->
getBaseMatrix
());
traits
::
const_value
<
Matrix
>::
type
value
(
seqM
at
->
getBaseMatrix
());
typedef
traits
::
range_generator
<
row
,
Matrix
>::
type
cursor_type
;
typedef
traits
::
range_generator
<
nz
,
cursor_type
>::
type
icursor_type
;
...
...
@@ -228,8 +233,8 @@ namespace AMDiS {
// === Traverse all rows of the dof matrix and insert row wise the values ===
// === to the PETSc matrix. ===
for
(
cursor_type
cursor
=
begin
<
row
>
(
m
at
->
getBaseMatrix
()),
cend
=
end
<
row
>
(
m
at
->
getBaseMatrix
());
cursor
!=
cend
;
++
cursor
)
{
for
(
cursor_type
cursor
=
begin
<
row
>
(
seqM
at
->
getBaseMatrix
()),
cend
=
end
<
row
>
(
seqM
at
->
getBaseMatrix
());
cursor
!=
cend
;
++
cursor
)
{
// Global index of the current row DOF.
int
rowIndex
=
(
*
interiorMap
)[
feSpace
][
*
cursor
].
global
+
dispRowIndex
;
...
...
AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
View file @
66a86905
...
...
@@ -17,26 +17,30 @@
namespace
AMDiS
{
void
PetscSolverGlobalMatrix
::
fillPetscMatrix
(
Matrix
<
DOFMatrix
*>
*
m
at
)
void
PetscSolverGlobalMatrix
::
fillPetscMatrix
(
Matrix
<
DOFMatrix
*>
*
seqM
at
)
{
FUNCNAME
(
"PetscSolverGlobalMatrix::fillPetscMatrix()"
);
if
(
coarseSpaceMap
.
size
())
{
updateSubdomainData
();
fillPetscMatrixWithCoarseSpace
(
m
at
);
fillPetscMatrixWithCoarseSpace
(
seqM
at
);
return
;
}
mat
.
resize
(
1
);
mat
[
0
].
resize
(
1
);
Mat
&
matIntInt
=
mat
[
0
][
0
];
TEST_EXIT_DBG
(
meshDistributor
)(
"No mesh distributor object defined!
\n
"
);
TEST_EXIT_DBG
(
interiorMap
)(
"No parallel mapping object defined!
\n
"
);
TEST_EXIT_DBG
(
m
at
)(
"No DOF matrix defined!
\n
"
);
TEST_EXIT_DBG
(
seqM
at
)(
"No DOF matrix defined!
\n
"
);
double
wtime
=
MPI
::
Wtime
();
// === If required, recompute non zero structure of the matrix. ===
if
(
checkMeshChange
(
m
at
))
nnzInterior
.
create
(
m
at
,
mpiCommGlobal
,
*
interiorMap
,
if
(
checkMeshChange
(
seqM
at
))
nnzInterior
.
create
(
seqM
at
,
mpiCommGlobal
,
*
interiorMap
,
&
(
meshDistributor
->
getPeriodicMap
()),
meshDistributor
->
getElementObjectDb
());
...
...
@@ -73,11 +77,11 @@ namespace AMDiS {
// === Transfer values from DOF matrices to the PETSc matrix. ===
int
nComponents
=
m
at
->
getNumRows
();
int
nComponents
=
seqM
at
->
getNumRows
();
for
(
int
i
=
0
;
i
<
nComponents
;
i
++
)
for
(
int
j
=
0
;
j
<
nComponents
;
j
++
)
if
((
*
m
at
)[
i
][
j
])
setDofMatrix
((
*
m
at
)[
i
][
j
],
i
,
j
);
if
((
*
seqM
at
)[
i
][
j
])
setDofMatrix
((
*
seqM
at
)[
i
][
j
],
i
,
j
);
#if (DEBUG != 0)
MSG
(
"Fill petsc matrix 2 needed %.5f seconds
\n
"
,
MPI
::
Wtime
()
-
wtime
);
...
...
@@ -133,13 +137,49 @@ namespace AMDiS {
}
void
PetscSolverGlobalMatrix
::
fillPetscMatrixWithCoarseSpace
(
Matrix
<
DOFMatrix
*>
*
m
at
)
void
PetscSolverGlobalMatrix
::
fillPetscMatrixWithCoarseSpace
(
Matrix
<
DOFMatrix
*>
*
seqM
at
)
{
FUNCNAME
(
"PetscSolverGlobalMatrix::fillPetscMatrixWithCoarseSpace()"
);
TEST_EXIT_DBG
(
interiorMap
)(
"Should not happen!
\n
"
);
vector
<
const
FiniteElemSpace
*>
feSpaces
=
getFeSpaces
(
mat
);
vector
<
const
FiniteElemSpace
*>
feSpaces
=
getFeSpaces
(
seqMat
);
vector
<
ParallelDofMapping
*>
uniqueCoarseMap
;
if
(
coarseSpaceMap
.
size
())
{
TEST_EXIT_DBG
(
coarseSpaceMap
.
size
()
==
seqMat
->
getSize
())
(
"Wrong sizes %d %d
\n
"
,
coarseSpaceMap
.
size
(),
seqMat
->
getSize
());
std
::
set
<
ParallelDofMapping
*>
tmp
;
for
(
map
<
int
,
ParallelDofMapping
*>::
iterator
it
=
coarseSpaceMap
.
begin
();
it
!=
coarseSpaceMap
.
end
();
++
it
)
{
if
(
tmp
.
count
(
it
->
second
)
==
0
)
{
tmp
.
insert
(
it
->
second
);
uniqueCoarseMap
.
push_back
(
it
->
second
);
}
}
}
int
nCoarseMap
=
uniqueCoarseMap
.
size
();
mat
.
resize
(
nCoarseMap
+
1
);
for
(
int
i
=
0
;
i
<
nCoarseMap
+
1
;
i
++
)
mat
[
i
].
resize
(
nCoarseMap
+
1
);
vector
<
int
>
componentIthCoarseMap
(
coarseSpaceMap
.
size
());
for
(
unsigned
int
i
=
0
;
i
<
componentIthCoarseMap
.
size
();
i
++
)
{
bool
found
=
false
;
for
(
int
j
=
0
;
j
<
nCoarseMap
;
j
++
)
{
if
(
coarseSpaceMap
[
i
]
==
uniqueCoarseMap
[
j
])
{
componentIthCoarseMap
[
i
]
=
j
;
found
=
true
;
break
;
}
}
TEST_EXIT_DBG
(
found
)(
"Should not happen!
\n
"
);
}
int
nRowsRankInterior
=
interiorMap
->
getRankDofs
();
int
nRowsOverallInterior
=
interiorMap
->
getOverallDofs
();
...
...
@@ -147,8 +187,8 @@ namespace AMDiS {
// === If required, recompute non zero structure of the matrix. ===
bool
localMatrix
=
(
subdomainLevel
==
0
);
if
(
checkMeshChange
(
m
at
,
localMatrix
))
{
nnzInterior
.
create
(
m
at
,
mpiCommGlobal
,
*
interiorMap
,
NULL
,
if
(
checkMeshChange
(
seqM
at
,
localMatrix
))
{
nnzInterior
.
create
(
seqM
at
,
mpiCommGlobal
,
*
interiorMap
,
NULL
,
meshDistributor
->
getElementObjectDb
(),
localMatrix
);
...
...
@@ -162,40 +202,69 @@ namespace AMDiS {
}
}
mat
.
resize
(
nCoarseMap
+
1
);
if
(
localMatrix
)
{
MatCreateSeqAIJ
(
mpiCommLocal
,
nRowsRankInterior
,
nRowsRankInterior
,
0
,
nnzInterior
.
dnnz
,
&
mat
IntInt
);
&
mat
[
0
][
0
]
);
}
else
{
MatCreateAIJ
(
mpiCommLocal
,
nRowsRankInterior
,
nRowsRankInterior
,
nRowsOverallInterior
,
nRowsOverallInterior
,
0
,
nnzInterior
.
dnnz
,
0
,
nnzInterior
.
onnz
,
&
mat
IntInt
);
&
mat
[
0
][
0
]
);
}
if
(
coarseSpaceMap
.
size
())
{
int
nRowsRankCoarse
=
coarseSpaceMap
[
0
]
->
getRankDofs
();
int
nRowsOverallCoarse
=
coarseSpaceMap
[
0
]
->
getOverallDofs
();
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankCoarse
,
nRowsOverallCoarse
,
nRowsOverallCoarse
,
0
,
nnzCoarse
.
dnnz
,
0
,
nnzCoarse
.
onnz
,
&
matCoarseCoarse
);
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankInterior
,
nRowsOverallCoarse
,
nGlobalOverallInterior
,
0
,
nnzCoarseInt
.
dnnz
,
0
,
nnzCoarseInt
.
onnz
,
&
matCoarseInt
);
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankInterior
,
nRowsRankCoarse
,
nGlobalOverallInterior
,
nRowsOverallCoarse
,
0
,
nnzIntCoarse
.
dnnz
,
0
,
nnzIntCoarse
.
onnz
,
&
matIntCoarse
);
for
(
int
i
=
0
;
i
<
nCoarseMap
;
i
++
)
{
ParallelDofMapping
*
cMap
=
uniqueCoarseMap
[
i
];
int
nRowsRankCoarse
=
cMap
->
getRankDofs
();
int
nRowsOverallCoarse
=
cMap
->
getOverallDofs
();
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankCoarse
,
nRowsOverallCoarse
,
nRowsOverallCoarse
,
0
,
PETSC_NULL
,
0
,
PETSC_NULL
,
&
mat
[
i
+
1
][
i
+
1
]);
MSG
(
"REMOVE THIS LINE WHEN FINISHED!
\n
"
);
MatSetOption
(
mat
[
i
+
1
][
i
+
1
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
for
(
int
j
=
0
;
j
<
nCoarseMap
+
1
;
j
++
)
{
int
nRowsRankMat
=
(
j
==
0
?
nRowsRankInterior
:
uniqueCoarseMap
[
j
-
1
]
->
getRankDofs
());
int
nRowsOverallMat
=
(
j
==
0
?
nGlobalOverallInterior
:
uniqueCoarseMap
[
j
-
1
]
->
getOverallDofs
());
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankMat
,
nRowsOverallCoarse
,
nRowsOverallMat
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
mat
[
i
+
1
][
j
]);
MSG
(
"REMOVE THIS LINE WHEN FINISHED!
\n
"
);
MatSetOption
(
mat
[
i
+
1
][
j
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankMat
,
nRowsRankCoarse
,
nRowsOverallMat
,
nRowsOverallCoarse
,
0
,
PETSC_NULL
,
0
,
PETSC_NULL
,
&
mat
[
j
][
i
+
1
]);
MSG
(
"REMOVE THIS LINE WHEN FINISHED!
\n
"
);
MatSetOption
(
mat
[
j
][
i
+
1
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
}
// MatCreateAIJ(mpiCommGlobal,
// nRowsRankCoarse, nRowsRankInterior,
// nRowsOverallCoarse, nGlobalOverallInterior,
// 0, nnzCoarseInt.dnnz, 0, nnzCoarseInt.onnz,
// &matCoarseInt);
// MatCreateAIJ(mpiCommGlobal,
// nRowsRankInterior, nRowsRankCoarse,
// nGlobalOverallInterior, nRowsOverallCoarse,
// 0, nnzIntCoarse.dnnz, 0, nnzIntCoarse.onnz,
// &matIntCoarse);
}
}
...
...
@@ -218,21 +287,21 @@ namespace AMDiS {
// === Traverse all sequentially created matrices and add the values to ===
// === the global PETSc matrices. ===
int
nComponents
=
m
at
->
getSize
();
int
nComponents
=
seqM
at
->
getSize
();
for
(
int
i
=
0
;
i
<
nComponents
;
i
++
)
{
for
(
int
j
=
0
;
j
<
nComponents
;
j
++
)
{
if
(
!
(
*
m
at
)[
i
][
j
])
if
(
!
(
*
seqM
at
)[
i
][
j
])
continue
;
ParallelDofMapping
*
rowCoarseSpace
=
coarseSpaceMap
[
i
];
ParallelDofMapping
*
colCoarseSpace
=
coarseSpaceMap
[
j
];
traits
::
col
<
Matrix
>::
type
col
((
*
m
at
)[
i
][
j
]
->
getBaseMatrix
());
traits
::
const_value
<
Matrix
>::
type
value
((
*
m
at
)[
i
][
j
]
->
getBaseMatrix
());
traits
::
col
<
Matrix
>::
type
col
((
*
seqM
at
)[
i
][
j
]
->
getBaseMatrix
());
traits
::
const_value
<
Matrix
>::
type
value
((
*
seqM
at
)[
i
][
j
]
->
getBaseMatrix
());
// Traverse all rows.
for
(
cursor_type
cursor
=
begin
<
row
>
((
*
m
at
)[
i
][
j
]
->
getBaseMatrix
()),
cend
=
end
<
row
>
((
*
m
at
)[
i
][
j
]
->
getBaseMatrix
());
cursor
!=
cend
;
++
cursor
)
{
for
(
cursor_type
cursor
=
begin
<
row
>
((
*
seqM
at
)[
i
][
j
]
->
getBaseMatrix
()),
cend
=
end
<
row
>
((
*
seqM
at
)[
i
][
j
]
->
getBaseMatrix
());
cursor
!=
cend
;
++
cursor
)
{
bool
isRowCoarse
=
isCoarseSpace
(
i
,
feSpaces
[
i
],
*
cursor
);
...
...
@@ -274,7 +343,9 @@ namespace AMDiS {
for
(
unsigned
int
k
=
0
;
k
<
cols
.
size
();
k
++
)
cols
[
k
]
=
colCoarseSpace
->
getMatIndex
(
j
,
cols
[
k
]);
MatSetValues
(
matCoarseCoarse
,
1
,
&
rowIndex
,
cols
.
size
(),
// matCoarseCoarse
MatSetValues
(
mat
[
componentIthCoarseMap
[
i
]
+
1
][
componentIthCoarseMap
[
i
]
+
1
],
1
,
&
rowIndex
,
cols
.
size
(),
&
(
cols
[
0
]),
&
(
values
[
0
]),
ADD_VALUES
);
if
(
colsOther
.
size
())
{
...
...
@@ -287,7 +358,9 @@ namespace AMDiS {
interiorMap
->
getMatIndex
(
j
,
colsOther
[
k
])
+
rStartInterior
;
}
MatSetValues
(
matCoarseInt
,
1
,
&
rowIndex
,
colsOther
.
size
(),
// matCoarseInt
MatSetValues
(
mat
[
componentIthCoarseMap
[
i
]
+
1
][
0
],
1
,
&
rowIndex
,
colsOther
.
size
(),
&
(
colsOther
[
0
]),
&
(
valuesOther
[
0
]),
ADD_VALUES
);
}
}
else
{
...
...
@@ -302,7 +375,8 @@ namespace AMDiS {
cols
[
k
]
=
interiorMap
->
getMatIndex
(
j
,
cols
[
k
]);
}
MatSetValues
(
matIntInt
,
1
,
&
localRowIndex
,
cols
.
size
(),
// matIntInt
MatSetValues
(
mat
[
0
][
0
],
1
,
&
localRowIndex
,
cols
.
size
(),
&
(
cols
[
0
]),
&
(
values
[
0
]),
ADD_VALUES
);
if
(
colsOther
.
size
())
{
...
...
@@ -314,7 +388,9 @@ namespace AMDiS {
for
(
unsigned
int
k
=
0
;
k
<
colsOther
.
size
();
k
++
)
colsOther
[
k
]
=
colCoarseSpace
->
getMatIndex
(
j
,
colsOther
[
k
]);
MatSetValues
(
matIntCoarse
,
1
,
&
globalRowIndex
,
colsOther
.
size
(),
// matIntCoarse
MatSetValues
(
mat
[
0
][
componentIthCoarseMap
[
i
]
+
1
],
1
,
&
globalRowIndex
,
colsOther
.
size
(),
&
(
colsOther
[
0
]),
&
(
valuesOther
[
0
]),
ADD_VALUES
);
}
}
...
...
@@ -325,19 +401,11 @@ namespace AMDiS {
// === Start global assembly procedure. ===
MatAssemblyBegin
(
matIntInt
,
MAT_FINAL_ASSEMBLY
);
MatAssemblyEnd
(
matIntInt
,
MAT_FINAL_ASSEMBLY
);
if
(
coarseSpaceMap
.
size
())
{
MatAssemblyBegin
(
matCoarseCoarse
,
MAT_FINAL_ASSEMBLY
);
MatAssemblyEnd
(
matCoarseCoarse
,
MAT_FINAL_ASSEMBLY
);
MatAssemblyBegin
(
matIntCoarse
,
MAT_FINAL_ASSEMBLY
);
MatAssemblyEnd
(
matIntCoarse
,
MAT_FINAL_ASSEMBLY
);
MatAssemblyBegin
(
matCoarseInt
,
MAT_FINAL_ASSEMBLY
);
MatAssemblyEnd
(
matCoarseInt
,
MAT_FINAL_ASSEMBLY
);
for
(
int
i
=
0
;
i
<
nCoarseMap
+
1
;
i
++
)
{
for
(
int
j
=
0
;
j
<
nCoarseMap
+
1
;
j
++
)
{
MatAssemblyBegin
(
mat
[
i
][
j
],
MAT_FINAL_ASSEMBLY
);
MatAssemblyEnd
(
mat
[
i
][
j
],
MAT_FINAL_ASSEMBLY
);
}
}
...
...
@@ -348,7 +416,7 @@ namespace AMDiS {
// === Create solver for the non primal (thus local) variables. ===
KSPCreate
(
mpiCommLocal
,
&
kspInterior
);
KSPSetOperators
(
kspInterior
,
mat
IntInt
,
matIntInt
,
SAME_NONZERO_PATTERN
);
KSPSetOperators
(
kspInterior
,
mat
[
0
][
0
],
mat
[
0
][
0
]
,
SAME_NONZERO_PATTERN
);
KSPSetOptionsPrefix
(
kspInterior
,
"interior_"
);
KSPSetType
(
kspInterior
,
KSPPREONLY
);
KSPGetPC
(
kspInterior
,
&
pcInterior
);
...
...
@@ -453,7 +521,7 @@ namespace AMDiS {
MatNullSpaceCreate
(
mpiCommGlobal
,
(
hasConstantNullspace
?
PETSC_TRUE
:
PETSC_FALSE
),
1
,
&
nullspaceBasis
,
&
matNullspace
);
MatMult
(
mat
IntInt
,
nullspaceBasis
,
petscSolVec
);