Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Backofen, Rainer
amdis
Commits
0d0838bd
Commit
0d0838bd
authored
Aug 21, 2012
by
Thomas Witkowski
Browse files
Blub
parent
42237fb0
Changes
8
Hide whitespace changes
Inline
Side-by-side
AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc
View file @
0d0838bd
...
...
@@ -19,7 +19,9 @@ namespace AMDiS {
using
namespace
std
;
ParallelCoarseSpaceMatVec
::
ParallelCoarseSpaceMatVec
()
:
lastMeshNnz
(
0
),
:
rStartInterior
(
0
),
nGlobalOverallInterior
(
0
),
lastMeshNnz
(
0
),
alwaysCreateNnzStructure
(
false
)
{
Parameters
::
get
(
"parallel->always create nnz structure"
,
...
...
@@ -61,6 +63,9 @@ namespace AMDiS {
for
(
int
i
=
0
;
i
<
nCoarseMap
+
1
;
i
++
)
mat
[
i
].
resize
(
nCoarseMap
+
1
);
vecSol
.
resize
(
nCoarseMap
+
1
);
vecRhs
.
resize
(
nCoarseMap
+
1
);
componentIthCoarseMap
.
resize
(
coarseSpaceMap
.
size
());
for
(
unsigned
int
i
=
0
;
i
<
componentIthCoarseMap
.
size
();
i
++
)
{
bool
found
=
false
;
...
...
@@ -81,6 +86,7 @@ namespace AMDiS {
{
FUNCNAME
(
"ParallelCoarseSpaceMatVec::create()"
);
updateSubdomainData
();
// === If required, recompute non zero structure of the matrix. ===
...
...
@@ -115,84 +121,77 @@ namespace AMDiS {
nnz
[
i
][
j
].
create
(
seqMat
,
mpiCommGlobal
,
rowMap
,
colMap
,
NULL
,
meshDistributor
->
getElementObjectDb
());
/*
nnzCoarse.create(mat, mpiCommGlobal, *coarseSpaceMap, NULL, meshDistributor->getElementObjectDb());
nnzCoarseInt.create(mat, mpiCommGlobal, *coarseSpaceMap, *interiorMap, NULL, meshDistributor->getElementObjectDb());
nnzIntCoarse.create(mat, mpiCommGlobal, *interiorMap, *coarseSpaceMap, NULL, meshDistributor->getElementObjectDb());
*/
}
}
}
}
// === Create PETSc matrix with the computed nnz data structure. ===
// === Create PETSc matrices and PETSc vectors with the computed ===
// === nnz data structure. ===
int
nRankRows
=
interiorMap
->
getRankDofs
();
int
nOverallRows
=
interiorMap
->
getOverallDofs
();
if
(
localMatrix
)
{
MatCreateSeqAIJ
(
mpiCommLocal
,
nRankRows
,
nRankRows
,
10
0
,
PETSC_NULL
,
0
,
nnz
[
0
][
0
].
dnnz
,
&
mat
[
0
][
0
]);
MatSetOption
(
mat
[
0
][
0
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
}
else
{
MatCreateAIJ
(
mpiCommGlobal
,
nRankRows
,
nRankRows
,
nOverallRows
,
nOverallRows
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
0
,
nnz
[
0
][
0
].
dnnz
,
0
,
nnz
[
0
][
0
].
onnz
,
&
mat
[
0
][
0
]);
MatSetOption
(
mat
[
0
][
0
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
}
VecCreateMPI
(
mpiCommGlobal
,
nRankRows
,
nOverallRows
,
&
vecSol
[
0
]);
VecCreateMPI
(
mpiCommGlobal
,
nRankRows
,
nOverallRows
,
&
vecRhs
[
0
]);
if
(
coarseSpaceMap
.
size
())
{
int
nCoarseMap
=
uniqueCoarseMap
.
size
();
for
(
int
i
=
0
;
i
<
nCoarseMap
;
i
++
)
{
ParallelDofMapping
*
cMap
=
uniqueCoarseMap
[
i
];
int
nRowsRankCoarse
=
cMap
->
getRankDofs
();
int
nRowsOverallCoarse
=
cMap
->
getOverallDofs
();
int
nCoarseMap
=
uniqueCoarseMap
.
size
();
for
(
int
i
=
0
;
i
<
nCoarseMap
;
i
++
)
{
ParallelDofMapping
*
cMap
=
uniqueCoarseMap
[
i
];
int
nRowsRankCoarse
=
cMap
->
getRankDofs
();
int
nRowsOverallCoarse
=
cMap
->
getOverallDofs
();
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankCoarse
,
nRowsOverallCoarse
,
nRowsOverallCoarse
,
0
,
nnz
[
i
+
1
][
i
+
1
].
dnnz
,
0
,
nnz
[
i
+
1
][
i
+
1
].
onnz
,
&
mat
[
i
+
1
][
i
+
1
]);
VecCreateMPI
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsOverallCoarse
,
&
vecSol
[
i
+
1
]);
VecCreateMPI
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsOverallCoarse
,
&
vecRhs
[
i
+
1
]);
for
(
int
j
=
0
;
j
<
nCoarseMap
+
1
;
j
++
)
{
int
nRowsRankMat
=
(
j
==
0
?
nRankRows
:
uniqueCoarseMap
[
j
-
1
]
->
getRankDofs
());
int
nRowsOverallMat
=
(
j
==
0
?
nOverallRows
:
uniqueCoarseMap
[
j
-
1
]
->
getOverallDofs
());
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankCoarse
,
nRowsOverallCoarse
,
nRowsOverallCoarse
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
mat
[
i
+
1
][
i
+
1
]);
MSG
(
"REMOVE THIS LINE WHEN FINISHED!
\n
"
);
MatSetOption
(
mat
[
i
+
1
][
i
+
1
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
for
(
int
j
=
0
;
j
<
nCoarseMap
+
1
;
j
++
)
{
int
nRowsRankMat
=
(
j
==
0
?
nRankRows
:
uniqueCoarseMap
[
j
-
1
]
->
getRankDofs
());
int
nRowsOverallMat
=
(
j
==
0
?
nOverallRows
:
uniqueCoarseMap
[
j
-
1
]
->
getOverallDofs
());
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankCoarse
,
nRowsRankMat
,
nRowsOverallCoarse
,
nRowsOverallMat
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
mat
[
i
+
1
][
j
]);
MSG
(
"REMOVE THIS LINE WHEN FINISHED!
\n
"
);
MatSetOption
(
mat
[
i
+
1
][
j
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankMat
,
nRowsRankCoarse
,
nRowsOverallMat
,
nRowsOverallCoarse
,
100
,
PETSC_NULL
,
100
,
PETSC_NULL
,
&
mat
[
j
][
i
+
1
]);
MSG
(
"REMOVE THIS LINE WHEN FINISHED!
\n
"
);
MatSetOption
(
mat
[
j
][
i
+
1
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
}
nRowsRankCoarse
,
nRowsRankMat
,
nRowsOverallCoarse
,
nRowsOverallMat
,
0
,
nnz
[
i
+
1
][
j
].
dnnz
,
0
,
nnz
[
i
+
1
][
j
].
onnz
,
&
mat
[
i
+
1
][
j
]);
MatCreateAIJ
(
mpiCommGlobal
,
nRowsRankMat
,
nRowsRankCoarse
,
nRowsOverallMat
,
nRowsOverallCoarse
,
0
,
nnz
[
j
][
i
+
1
].
dnnz
,
0
,
nnz
[
j
][
i
+
1
].
onnz
,
&
mat
[
j
][
i
+
1
]);
}
}
}
}
void
ParallelCoarseSpaceMatVec
::
d
estroy
()
void
ParallelCoarseSpaceMatVec
::
matD
estroy
()
{
FUNCNAME
(
"ParallelCoarseSpaceMatVec::
d
estroy()"
);
FUNCNAME
(
"ParallelCoarseSpaceMatVec::
matD
estroy()"
);
int
nMatrix
=
mat
.
size
();
for
(
int
i
=
0
;
i
<
nMatrix
;
i
++
)
...
...
@@ -201,9 +200,21 @@ namespace AMDiS {
}
void
ParallelCoarseSpaceMatVec
::
assembl
y
()
void
ParallelCoarseSpaceMatVec
::
vecDestro
y
()
{
FUNCNAME
(
"ParallelCoarseSpaceMatVec::assembly()"
);
FUNCNAME
(
"ParallelCoarseSpaceMatVec::vecDestroy()"
);
int
nVec
=
vecSol
.
size
();
for
(
int
i
=
0
;
i
<
nVec
;
i
++
)
{
VecDestroy
(
&
vecSol
[
i
]);
VecDestroy
(
&
vecRhs
[
i
]);
}
}
void
ParallelCoarseSpaceMatVec
::
matAssembly
()
{
FUNCNAME
(
"ParallelCoarseSpaceMatVec::matAssembly()"
);
int
nMatrix
=
mat
.
size
();
for
(
int
i
=
0
;
i
<
nMatrix
;
i
++
)
{
...
...
@@ -215,6 +226,30 @@ namespace AMDiS {
}
void
ParallelCoarseSpaceMatVec
::
vecRhsAssembly
()
{
FUNCNAME
(
"ParallelCoarseSpaceMatVec::vecRhsAssembly()"
);
int
nVec
=
vecRhs
.
size
();
for
(
int
i
=
0
;
i
<
nVec
;
i
++
)
{
VecAssemblyBegin
(
vecRhs
[
i
]);
VecAssemblyEnd
(
vecRhs
[
i
]);
}
}
void
ParallelCoarseSpaceMatVec
::
vecSolAssembly
()
{
FUNCNAME
(
"ParallelCoarseSpaceMatVec::vecSolAssembly()"
);
int
nVec
=
vecRhs
.
size
();
for
(
int
i
=
0
;
i
<
nVec
;
i
++
)
{
VecAssemblyBegin
(
vecSol
[
i
]);
VecAssemblyEnd
(
vecSol
[
i
]);
}
}
bool
ParallelCoarseSpaceMatVec
::
checkMeshChange
(
Matrix
<
DOFMatrix
*>
&
seqMat
,
bool
localMatrix
)
{
...
...
@@ -231,7 +266,6 @@ namespace AMDiS {
interiorMap
->
setComputeMatIndex
(
!
localMatrix
);
interiorMap
->
update
(
feSpaces
);
// updateSubdomainData();
lastMeshNnz
=
meshDistributor
->
getLastMeshChangeIndex
();
return
true
;
...
...
@@ -240,4 +274,28 @@ namespace AMDiS {
return
false
;
}
void
ParallelCoarseSpaceMatVec
::
updateSubdomainData
()
{
FUNCNAME
(
"ParallelCoarseSpaceMatVec::updateSubdomainData()"
);
if
(
mpiCommLocal
.
Get_size
()
==
1
)
{
rStartInterior
=
0
;
nGlobalOverallInterior
=
interiorMap
->
getOverallDofs
();
}
else
{
int
groupRowsInterior
=
0
;
if
(
mpiCommLocal
.
Get_rank
()
==
0
)
groupRowsInterior
=
interiorMap
->
getOverallDofs
();
mpi
::
getDofNumbering
(
mpiCommGlobal
,
groupRowsInterior
,
rStartInterior
,
nGlobalOverallInterior
);
int
tmp
=
0
;
if
(
mpiCommLocal
.
Get_rank
()
==
0
)
tmp
=
rStartInterior
;
mpiCommLocal
.
Allreduce
(
&
tmp
,
&
rStartInterior
,
1
,
MPI_INT
,
MPI_SUM
);
}
}
}
AMDiS/src/parallel/ParallelCoarseSpaceMatVec.h
View file @
0d0838bd
...
...
@@ -57,10 +57,17 @@ namespace AMDiS {
void
create
(
Matrix
<
DOFMatrix
*>&
seqMat
);
/// Run PETSc's assembly routines.
void
a
ssembly
();
/// Run PETSc's
matrix
assembly routines.
void
matA
ssembly
();
void
destroy
();
/// Run PETSc's vector assembly routines.
void
vecRhsAssembly
();
void
vecSolAssembly
();
void
matDestroy
();
void
vecDestroy
();
bool
checkMeshChange
(
Matrix
<
DOFMatrix
*>
&
mat
,
bool
localMatrix
=
false
);
...
...
@@ -108,9 +115,39 @@ namespace AMDiS {
return
mat
[
matIndex
][
0
];
}
inline
Vec
&
getInteriorVecRhs
()
{
return
vecRhs
[
0
];
}
inline
Vec
&
getCoarseVecRhs
(
int
coarseSpace
=
0
)
{
return
vecRhs
[
coarseSpace
+
1
];
}
inline
Vec
&
getInteriorVecSol
()
{
return
vecSol
[
0
];
}
inline
Vec
&
getCoarseVecSol
(
int
coarseSpace
=
0
)
{
return
vecSol
[
coarseSpace
+
1
];
}
inline
int
getStartInterior
()
const
{
return
rStartInterior
;
}
protected:
void
updateSubdomainData
();
private:
vector
<
vector
<
Mat
>
>
mat
;
vector
<
Vec
>
vecSol
,
vecRhs
;
vector
<
vector
<
MatrixNnzStructure
>
>
nnz
;
ParallelDofMapping
*
interiorMap
;
...
...
@@ -131,6 +168,10 @@ namespace AMDiS {
MPI
::
Intracomm
mpiCommGlobal
;
int
rStartInterior
;
int
nGlobalOverallInterior
;
/// Stores the mesh change index of the mesh the nnz structure was created for.
/// Therefore, if the mesh change index is higher than this value, we have to create
/// a new nnz structure for PETSc matrices, because the mesh has been changed and
...
...
AMDiS/src/parallel/PetscSolver.cc
View file @
0d0838bd
...
...
@@ -60,30 +60,6 @@ namespace AMDiS {
}
void
PetscSolver
::
updateSubdomainData
()
{
FUNCNAME
(
"PetscSolver::updateSubdomainData()"
);
if
(
mpiCommLocal
.
Get_size
()
==
1
)
{
rStartInterior
=
0
;
nGlobalOverallInterior
=
interiorMap
->
getOverallDofs
();
}
else
{
int
groupRowsInterior
=
0
;
if
(
mpiCommLocal
.
Get_rank
()
==
0
)
groupRowsInterior
=
interiorMap
->
getOverallDofs
();
mpi
::
getDofNumbering
(
mpiCommGlobal
,
groupRowsInterior
,
rStartInterior
,
nGlobalOverallInterior
);
int
tmp
=
0
;
if
(
mpiCommLocal
.
Get_rank
()
==
0
)
tmp
=
rStartInterior
;
mpiCommLocal
.
Allreduce
(
&
tmp
,
&
rStartInterior
,
1
,
MPI_INT
,
MPI_SUM
);
}
}
void
PetscSolver
::
solve
(
Vec
&
rhs
,
Vec
&
sol
)
{
FUNCNAME
(
"PetscSolver::solve()"
);
...
...
AMDiS/src/parallel/PetscSolver.h
View file @
0d0838bd
...
...
@@ -168,12 +168,27 @@ namespace AMDiS {
TEST_EXIT_DBG
(
coarseSpaceMap
.
size
())
(
"Subdomain solver does not contain a coarse space!
\n
"
);
return
rhsCoarseSpace
;
return
petscData
.
getCoarseVecRhs
()
;
}
inline
Vec
&
getRhsInterior
()
{
return
rhsInterior
;
return
petscData
.
getInteriorVecRhs
();
}
inline
Vec
&
getSolCoarseSpace
()
{
FUNCNAME
(
"PetscSolver::getSolCoarseSpace()"
);
TEST_EXIT_DBG
(
coarseSpaceMap
.
size
())
(
"Subdomain solver does not contain a coarse space!
\n
"
);
return
petscData
.
getCoarseVecSol
();
}
inline
Vec
&
getSolInterior
()
{
return
petscData
.
getInteriorVecSol
();
}
inline
Mat
&
getMatIntInt
()
...
...
@@ -232,17 +247,11 @@ namespace AMDiS {
/// Returns a vector with the FE spaces of all components of a system vector.
vector
<
const
FiniteElemSpace
*>
getFeSpaces
(
SystemVector
*
vec
);
void
updateSubdomainData
();
protected:
MeshDistributor
*
meshDistributor
;
int
subdomainLevel
;
int
rStartInterior
;
int
nGlobalOverallInterior
;
ParallelDofMapping
*
interiorMap
;
/// Parallel DOF mapping of the (optional) coarse space. Allows to define
...
...
@@ -259,12 +268,6 @@ namespace AMDiS {
/// a coarse space defined).
ParallelCoarseSpaceMatVec
petscData
;
/// PETSc's vector structures for the rhs vector, the solution vector and a
/// temporary vector for calculating the final residuum.
Vec
rhsInterior
;
Vec
rhsCoarseSpace
;
/// PETSc solver object
KSP
kspInterior
;
...
...
AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc
View file @
0d0838bd
...
...
@@ -93,7 +93,7 @@ namespace AMDiS {
MSG
(
"Fill petsc matrix 2 needed %.5f seconds
\n
"
,
MPI
::
Wtime
()
-
wtime
);
#endif
petscData
.
a
ssembly
();
petscData
.
matA
ssembly
();
// === Init PETSc solver. ===
KSPCreate
(
mpiCommGlobal
,
&
kspInterior
);
...
...
@@ -129,10 +129,9 @@ namespace AMDiS {
}
VecCreateNest
(
mpiCommGlobal
,
nComponents
,
PETSC_NULL
,
&
(
nestVec
[
0
]),
&
r
hsInterior
);
&
(
nestVec
[
0
]),
&
(
getR
hsInterior
())
);
VecAssemblyBegin
(
rhsInterior
);
VecAssemblyEnd
(
rhsInterior
);
petscData
.
vecRhsAssembly
();
}
...
...
@@ -145,10 +144,10 @@ namespace AMDiS {
setBlockPreconditioner
(
pcInterior
);
const
FiniteElemSpace
*
feSpace
=
meshDistributor
->
getFeSpace
(
0
);
VecDuplicate
(
r
hsInterior
,
&
petscSolVec
);
VecDuplicate
(
getR
hsInterior
()
,
&
petscSolVec
);
// PETSc.
solve
(
r
hsInterior
,
petscSolVec
);
solve
(
getR
hsInterior
()
,
petscSolVec
);
// === Transfere values from PETSc's solution vectors to the DOF vectors. ===
for
(
int
i
=
0
;
i
<
nComponents
;
i
++
)
{
...
...
@@ -184,7 +183,7 @@ namespace AMDiS {
if
(
nestMat
[
i
]
!=
PETSC_NULL
)
MatDestroy
(
&
(
nestMat
[
i
]));
petscData
.
d
estroy
();
petscData
.
matD
estroy
();
KSPDestroy
(
&
kspInterior
);
}
...
...
@@ -194,9 +193,7 @@ namespace AMDiS {
{
FUNCNAME
(
"PetscSolverGlobalBlockMatrix::destroyVectorData()"
);
VecDestroy
(
&
rhsInterior
);
for
(
int
i
=
0
;
i
<
nComponents
;
i
++
)
VecDestroy
(
&
(
nestVec
[
i
]));
petscData
.
vecDestroy
();
VecDestroy
(
&
petscSolVec
);
}
...
...
AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
View file @
0d0838bd
...
...
@@ -21,44 +21,28 @@ namespace AMDiS {
{
FUNCNAME
(
"PetscSolverGlobalMatrix::fillPetscMatrix()"
);
TEST_EXIT_DBG
(
meshDistributor
)(
"No mesh distributor object defined!
\n
"
);
TEST_EXIT_DBG
(
interiorMap
)(
"No parallel mapping object defined!
\n
"
);
TEST_EXIT_DBG
(
seqMat
)(
"No DOF matrix defined!
\n
"
);
double
wtime
=
MPI
::
Wtime
();
petscData
.
init
(
interiorMap
,
coarseSpaceMap
,
subdomainLevel
,
mpiCommLocal
,
mpiCommGlobal
,
meshDistributor
);
petscData
.
create
(
*
seqMat
);
if
(
coarseSpaceMap
.
size
())
{
updateSubdomainData
();
fillPetscMatrixWithCoarseSpace
(
seqMat
);
return
;
}
TEST_EXIT_DBG
(
meshDistributor
)(
"No mesh distributor object defined!
\n
"
);
TEST_EXIT_DBG
(
interiorMap
)(
"No parallel mapping object defined!
\n
"
);
TEST_EXIT_DBG
(
seqMat
)(
"No DOF matrix defined!
\n
"
);
double
wtime
=
MPI
::
Wtime
();
// === Create PETSc vector (solution and a temporary vector). ===
int
nRankRows
=
interiorMap
->
getRankDofs
();
int
nOverallRows
=
interiorMap
->
getOverallDofs
();
VecCreateMPI
(
mpiCommGlobal
,
nRankRows
,
nOverallRows
,
&
petscSolVec
);
#if (DEBUG != 0)
MSG
(
"Fill petsc matrix 1 needed %.5f seconds
\n
"
,
MPI
::
Wtime
()
-
wtime
);
#endif
#if (DEBUG != 0)
int
a
,
b
;
MatGetOwnershipRange
(
petscData
.
getInteriorMat
(),
&
a
,
&
b
);
TEST_EXIT
(
a
==
interiorMap
->
getStartDofs
())(
"Wrong matrix ownership range!
\n
"
);
TEST_EXIT
(
b
==
interiorMap
->
getStartDofs
()
+
nRankRows
)
(
"Wrong matrix ownership range!
\n
"
);
#endif
// === Transfer values from DOF matrices to the PETSc matrix. ===
int
nComponents
=
seqMat
->
getNumRows
();
...
...
@@ -71,7 +55,7 @@ namespace AMDiS {
MSG
(
"Fill petsc matrix 2 needed %.5f seconds
\n
"
,
MPI
::
Wtime
()
-
wtime
);
#endif
petscData
.
a
ssembly
();
petscData
.
matA
ssembly
();
if
(
printMatInfo
)
{
MatInfo
matInfo
;
...
...
@@ -134,7 +118,7 @@ namespace AMDiS {
int
nRowsRankInterior
=
interiorMap
->
getRankDofs
();
int
nRowsOverallInterior
=
interiorMap
->
getOverallDofs
();
int
rStartInterior
=
petscData
.
getStartInterior
();
// === Prepare traverse of sequentially created matrices. ===
...
...
@@ -265,7 +249,7 @@ namespace AMDiS {
}
}
petscData
.
a
ssembly
();
petscData
.
matA
ssembly
();
// === Create solver for the non primal (thus local) variables. ===
...
...
@@ -290,42 +274,19 @@ namespace AMDiS {
{
FUNCNAME
(
"PetscSolverGlobalMatrix::fillPetscRhs()"
);
VecCreateMPI
(
mpiCommGlobal
,
interiorMap
->
getRankDofs
(),
nGlobalOverallInterior
,
&
rhsInterior
);
if
(
coarseSpaceMap
.
size
())
VecCreateMPI
(
mpiCommGlobal
,
coarseSpaceMap
[
0
]
->
getRankDofs
(),
coarseSpaceMap
[
0
]
->
getOverallDofs
(),
&
rhsCoarseSpace
);
TEST_EXIT_DBG
(
vec
)(
"No DOF vector defined!
\n
"
);
TEST_EXIT_DBG
(
interiorMap
)(
"No parallel DOF map defined!
\n
"
);
// === Transfer values from DOF vector to the PETSc vector. ===
if
(
coarseSpaceMap
.
size
())
{
for
(
int
i
=
0
;
i
<
vec
->
getSize
();
i
++
)
setDofVector
(
r
hsInterior
,
r
hsCoarseSpace
,
vec
->
getDOFVector
(
i
),
i
);
setDofVector
(
getR
hsInterior
(),
getR
hsCoarseSpace
()
,
vec
->
getDOFVector
(
i
),
i
);
}
else
{
for
(
int
i
=
0
;
i
<
vec
->
getSize
();
i
++
)
setDofVector
(
rhsInterior
,
vec
->
getDOFVector
(
i
),
i
);
}
VecAssemblyBegin
(
rhsInterior
);
VecAssemblyEnd
(
rhsInterior
);
if
(
coarseSpaceMap
.
size
())
{
VecAssemblyBegin
(
rhsCoarseSpace
);
VecAssemblyEnd
(
rhsCoarseSpace
);
setDofVector
(
getRhsInterior
(),
vec
->
getDOFVector
(
i
),
i
);
}
// === Remove Dirichlet BC DOFs. ===
// removeDirichletBcDofs(vec);
petscData
.
vecRhsAssembly
();
}
...
...
@@ -338,13 +299,14 @@ namespace AMDiS {
// === Set old solution to be initiual guess for PETSc solver. ===
if
(
!
zeroStartVector
)
{
VecSet
(
petscSolVec
,
0.0
);
TEST_EXIT
(
coarseSpaceMap
.
size
()
==
0
)(
"Not yet supported!
\n
"
);
VecSet
(
getSolInterior
(),
0.0
);
for
(
int
i
=
0
;
i
<
nComponents
;
i
++
)
setDofVector
(
petscSolVec
,
vec
.
getDOFVector
(
i
),
i
,
true
);
VecAssemblyBegin
(
petscSolVec
);
VecAssemblyEnd
(
petscSolVec
);
setDofVector
(
getSolInterior
(),
vec
.
getDOFVector
(
i
),
i
,
true
);
petscData
.
vecSolAssembly
();
}
...
...
@@ -366,7 +328,7 @@ namespace AMDiS {
}
if
(
nullspace
.
size
()
>
0
)
{
VecDuplicate
(
p
et
sc
Sol
Vec
,
&
nullspaceBasis
);
VecDuplicate
(
g
etSol
Interior
()
,
&
nullspaceBasis
);