Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
10
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
Backofen, Rainer
amdis
Commits
169b9d9a
Commit
169b9d9a
authored
Aug 22, 2012
by
Thomas Witkowski
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added more features to FETI-DP, splitted into some more files.
parent
ecba737f
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
449 additions
and
333 deletions
+449
-333
AMDiS/CMakeLists.txt
AMDiS/CMakeLists.txt
+2
-0
AMDiS/src/DOFVector.h
AMDiS/src/DOFVector.h
+1
-0
AMDiS/src/LeafData.h
AMDiS/src/LeafData.h
+2
-4
AMDiS/src/parallel/DofComm.cc
AMDiS/src/parallel/DofComm.cc
+1
-0
AMDiS/src/parallel/DofComm.h
AMDiS/src/parallel/DofComm.h
+1
-0
AMDiS/src/parallel/ElementObjectDatabase.cc
AMDiS/src/parallel/ElementObjectDatabase.cc
+1
-0
AMDiS/src/parallel/InteriorBoundary.h
AMDiS/src/parallel/InteriorBoundary.h
+0
-1
AMDiS/src/parallel/MeshLevelData.h
AMDiS/src/parallel/MeshLevelData.h
+4
-3
AMDiS/src/parallel/MpiHelper.cc
AMDiS/src/parallel/MpiHelper.cc
+5
-3
AMDiS/src/parallel/MpiHelper.h
AMDiS/src/parallel/MpiHelper.h
+3
-10
AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc
AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc
+3
-14
AMDiS/src/parallel/ParallelDofMapping.cc
AMDiS/src/parallel/ParallelDofMapping.cc
+12
-0
AMDiS/src/parallel/ParallelDofMapping.h
AMDiS/src/parallel/ParallelDofMapping.h
+24
-15
AMDiS/src/parallel/PetscSolver.h
AMDiS/src/parallel/PetscSolver.h
+4
-4
AMDiS/src/parallel/PetscSolverFeti.cc
AMDiS/src/parallel/PetscSolverFeti.cc
+44
-257
AMDiS/src/parallel/PetscSolverFeti.h
AMDiS/src/parallel/PetscSolverFeti.h
+3
-21
AMDiS/src/parallel/PetscSolverFetiOperators.cc
AMDiS/src/parallel/PetscSolverFetiOperators.cc
+206
-0
AMDiS/src/parallel/PetscSolverFetiOperators.h
AMDiS/src/parallel/PetscSolverFetiOperators.h
+44
-0
AMDiS/src/parallel/PetscSolverFetiStructs.h
AMDiS/src/parallel/PetscSolverFetiStructs.h
+12
-1
AMDiS/src/parallel/PetscSolverFetiTimings.cc
AMDiS/src/parallel/PetscSolverFetiTimings.cc
+31
-0
AMDiS/src/parallel/PetscSolverFetiTimings.h
AMDiS/src/parallel/PetscSolverFetiTimings.h
+46
-0
No files found.
AMDiS/CMakeLists.txt
View file @
169b9d9a
...
...
@@ -254,6 +254,8 @@ if(ENABLE_PARALLEL_DOMAIN)
${
SOURCE_DIR
}
/parallel/PetscSolver.cc
${
SOURCE_DIR
}
/parallel/PetscProblemStat.cc
${
SOURCE_DIR
}
/parallel/PetscSolverFeti.cc
${
SOURCE_DIR
}
/parallel/PetscSolverFetiOperators.cc
${
SOURCE_DIR
}
/parallel/PetscSolverFetiTimings.cc
${
SOURCE_DIR
}
/parallel/PetscSolverGlobalMatrix.cc
${
SOURCE_DIR
}
/parallel/PetscSolverGlobalBlockMatrix.cc
${
SOURCE_DIR
}
/parallel/PetscSolverSchur.cc
)
...
...
AMDiS/src/DOFVector.h
View file @
169b9d9a
...
...
@@ -43,6 +43,7 @@
#include "FiniteElemSpace.h"
#include "SurfaceQuadrature.h"
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
#include <mpi.h>
#include "parallel/ParallelDofMapping.h"
#endif
...
...
AMDiS/src/LeafData.h
View file @
169b9d9a
...
...
@@ -67,10 +67,8 @@ namespace AMDiS {
errorEstimate
(
0.0
)
{}
/** \brief
* Refinement of parent to child1 and child2.
* @return true: must this ElementData, else not allowed to delete it
*/
/// Refinement of parent to child1 and child2.
/// @return true: must this ElementData, else not allowed to delete it
bool
refineElementData
(
Element
*
parent
,
Element
*
child1
,
Element
*
child2
,
...
...
AMDiS/src/parallel/DofComm.cc
View file @
169b9d9a
...
...
@@ -12,6 +12,7 @@
#include "parallel/DofComm.h"
#include "parallel/InteriorBoundary.h"
#include "parallel/MeshLevelData.h"
#include "FiniteElemSpace.h"
namespace
AMDiS
{
...
...
AMDiS/src/parallel/DofComm.h
View file @
169b9d9a
...
...
@@ -23,6 +23,7 @@
#ifndef AMDIS_DOF_COMM_H
#define AMDIS_DOF_COMM_H
#include <mpi.h>
#include <map>
#include "parallel/ParallelTypes.h"
#include "FiniteElemSpace.h"
...
...
AMDiS/src/parallel/ElementObjectDatabase.cc
View file @
169b9d9a
...
...
@@ -12,6 +12,7 @@
#include "VertexVector.h"
#include "parallel/ElementObjectDatabase.h"
#include "parallel/MeshLevelData.h"
namespace
AMDiS
{
...
...
AMDiS/src/parallel/InteriorBoundary.h
View file @
169b9d9a
...
...
@@ -28,7 +28,6 @@
#include "AMDiS_fwd.h"
#include "BoundaryObject.h"
#include "parallel/MeshLevelData.h"
#include "parallel/ParallelTypes.h"
namespace
AMDiS
{
...
...
AMDiS/src/parallel/MeshLevelData.h
View file @
169b9d9a
...
...
@@ -20,15 +20,16 @@
/** \file MeshLevelData.h */
#ifndef AMDIS_MESH_LEVEL_DATA_H
#define AMDIS_MESH_LEVEL_DATA_H
#include <iostream>
#include <set>
#include <vector>
#include <mpi.h>
#include "Global.h"
#ifndef AMDIS_MESH_LEVEL_DATA_H
#define AMDIS_MESH_LEVEL_DATA_H
namespace
AMDiS
{
using
namespace
std
;
...
...
AMDiS/src/parallel/MpiHelper.cc
View file @
169b9d9a
...
...
@@ -10,10 +10,9 @@
// See also license.opensource.txt in the distribution.
#include <mpi.h>
#include "MpiHelper.h"
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
namespace
AMDiS
{
namespace
mpi
{
...
...
@@ -54,7 +53,10 @@ namespace AMDiS {
MPI
::
COMM_WORLD
.
Allreduce
(
&
valCopy
,
&
value
,
1
,
MPI_INT
,
MPI_MAX
);
}
void
startRand
()
{
srand
(
time
(
NULL
)
*
(
MPI
::
COMM_WORLD
.
Get_rank
()
+
1
));
}
}
}
#endif
AMDiS/src/parallel/MpiHelper.h
View file @
169b9d9a
...
...
@@ -23,12 +23,10 @@
#ifndef AMDIS_MPIHELPER_H
#define AMDIS_MPIHELPER_H
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
#include "Global.h"
#include <mpi.h>
#include <time.h>
#include <stdlib.h>
#include
<mpi
.h
>
#include
"Global
.h
"
namespace
AMDiS
{
...
...
@@ -63,10 +61,7 @@ namespace AMDiS {
WARNING
(
"Unknown type for globalMax. Can not determine maximal value of all processors!
\n
"
);
}
inline
void
startRand
()
{
srand
(
time
(
NULL
)
*
(
MPI
::
COMM_WORLD
.
Get_rank
()
+
1
));
}
void
startRand
();
/** \brief
* In many situations a rank computes a number of local DOFs. Then all
...
...
@@ -97,5 +92,3 @@ namespace AMDiS {
}
#endif
#endif
AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc
View file @
169b9d9a
...
...
@@ -191,13 +191,8 @@ namespace AMDiS {
nOverallCoarseRows
,
nOverallCoarseRows
,
0
,
nnz
[
i
+
1
][
i
+
1
].
dnnz
,
0
,
nnz
[
i
+
1
][
i
+
1
].
onnz
,
&
mat
[
i
+
1
][
i
+
1
]);
MSG
(
"REMOVE THIS!
\n
"
);
MatSetOption
(
mat
[
i
+
1
][
i
+
1
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
VecCreateMPI
(
mpiCommGlobal
,
nRankCoarseRows
,
nOverallCoarseRows
,
&
vecSol
[
i
+
1
]);
VecCreateMPI
(
mpiCommGlobal
,
nRankCoarseRows
,
nOverallCoarseRows
,
&
vecRhs
[
i
+
1
]);
cMap
->
createVec
(
vecSol
[
i
+
1
]);
cMap
->
createVec
(
vecRhs
[
i
+
1
]);
}
for
(
int
i
=
0
;
i
<
nCoarseMap
+
1
;
i
++
)
{
...
...
@@ -215,18 +210,12 @@ namespace AMDiS {
nRowsRankMat
,
nColsRankMat
,
nRowsOverallMat
,
nColsOverallMat
,
0
,
nnz
[
i
][
j
].
dnnz
,
0
,
nnz
[
i
][
j
].
onnz
,
&
mat
[
i
][
j
]);
MSG
(
"REMOVE THIS!
\n
"
);
MatSetOption
(
mat
[
i
][
j
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
&
mat
[
i
][
j
]);
MatCreateAIJ
(
mpiCommGlobal
,
nColsRankMat
,
nRowsRankMat
,
nColsOverallMat
,
nRowsOverallMat
,
0
,
nnz
[
j
][
i
].
dnnz
,
0
,
nnz
[
j
][
i
].
onnz
,
&
mat
[
j
][
i
]);
MSG
(
"REMOVE THIS!
\n
"
);
MatSetOption
(
mat
[
j
][
i
],
MAT_NEW_NONZERO_ALLOCATION_ERR
,
PETSC_FALSE
);
}
}
}
...
...
AMDiS/src/parallel/ParallelDofMapping.cc
View file @
169b9d9a
...
...
@@ -11,6 +11,7 @@
#include "parallel/ParallelDofMapping.h"
#include "parallel/MeshLevelData.h"
#include "parallel/StdMpi.h"
namespace
AMDiS
{
...
...
@@ -37,6 +38,17 @@ namespace AMDiS {
}
FeSpaceDofMap
::
FeSpaceDofMap
(
MeshLevelData
*
ld
)
:
levelData
(
ld
),
dofComm
(
NULL
),
feSpace
(
NULL
),
needGlobalMapping
(
false
),
isNonLocal
(
false
)
{
clear
();
}
void
FeSpaceDofMap
::
clear
()
{
dofMap
.
clear
();
...
...
AMDiS/src/parallel/ParallelDofMapping.h
View file @
169b9d9a
...
...
@@ -20,20 +20,22 @@
/** \file FeSpaceMapping.h */
#ifndef AMDIS_FE_SPACE_MAPPING_H
#define AMDIS_FE_SPACE_MAPPING_H
#include <mpi.h>
#include <vector>
#include <map>
#include <set>
#include <petsc.h>
#include <petscis.h>
#include "AMDiS_fwd.h"
#include "parallel/DofComm.h"
#include "parallel/MeshLevelData.h"
#include "parallel/MpiHelper.h"
#include "parallel/ParallelTypes.h"
#include "parallel/StdMpi.h"
#include <petscis.h>
#ifndef AMDIS_FE_SPACE_MAPPING_H
#define AMDIS_FE_SPACE_MAPPING_H
namespace
AMDiS
{
using
namespace
std
;
...
...
@@ -110,15 +112,7 @@ namespace AMDiS {
}
/// This is the only valid constructur to be used.
FeSpaceDofMap
(
MeshLevelData
*
ld
)
:
levelData
(
ld
),
dofComm
(
NULL
),
feSpace
(
NULL
),
needGlobalMapping
(
false
),
isNonLocal
(
false
)
{
clear
();
}
FeSpaceDofMap
(
MeshLevelData
*
ld
);
/// Clears all data of the mapping.
void
clear
();
...
...
@@ -483,6 +477,21 @@ namespace AMDiS {
int
firstComponent
,
int
nComponents
);
/// Create a parallel distributed PETSc vector based on this mapping.
inline
void
createVec
(
Vec
&
vec
)
{
VecCreateMPI
(
mpiComm
,
getRankDofs
(),
getOverallDofs
(),
&
vec
);
}
/// Create a parallel distributed PETsc vector based on this mapping but
/// with a different (larger) global size. This is used in multi-level
/// method to embed a local vector into a subdomain spaned by several
/// ranks.
inline
void
createVec
(
Vec
&
vec
,
int
nGlobalRows
)
{
VecCreateMPI
(
mpiComm
,
getRankDofs
(),
nGlobalRows
,
&
vec
);
}
protected:
/// Insert a new FE space DOF mapping for a given FE space.
void
addFeSpace
(
const
FiniteElemSpace
*
feSpace
);
...
...
AMDiS/src/parallel/PetscSolver.h
View file @
169b9d9a
...
...
@@ -26,6 +26,10 @@
#include <set>
#include <map>
#include <mpi.h>
#include <petsc.h>
#include <petscsys.h>
#include <petscao.h>
#include <petscksp.h>
#include "AMDiS_fwd.h"
#include "Global.h"
...
...
@@ -33,10 +37,6 @@
#include "DOFMatrix.h"
#include "parallel/MeshDistributor.h"
#include "parallel/ParallelCoarseSpaceMatVec.h"
#include <petsc.h>
#include <petscsys.h>
#include <petscao.h>
#include <petscksp.h>
namespace
AMDiS
{
...
...
AMDiS/src/parallel/PetscSolverFeti.cc
View file @
169b9d9a
...
...
@@ -14,6 +14,8 @@
#include "MatrixVector.h"
#include "parallel/PetscSolverFeti.h"
#include "parallel/PetscSolverFetiStructs.h"
#include "parallel/PetscSolverFetiOperators.h"
#include "parallel/PetscSolverFetiTimings.h"
#include "parallel/StdMpi.h"
#include "parallel/MpiHelper.h"
#include "parallel/PetscSolverGlobalMatrix.h"
...
...
@@ -23,201 +25,6 @@ namespace AMDiS {
using
namespace
std
;
double
FetiTimings
::
fetiSolve
=
0.0
;
double
FetiTimings
::
fetiSolve01
=
0.0
;
double
FetiTimings
::
fetiSolve02
=
0.0
;
double
FetiTimings
::
fetiPreconditioner
=
0.0
;
// y = mat * x
int
petscMultMatSchurPrimal
(
Mat
mat
,
Vec
x
,
Vec
y
)
{
// S_PiPi = K_PiPi - K_PiB inv(K_BB) K_BPi
void
*
ctx
;
MatShellGetContext
(
mat
,
&
ctx
);
SchurPrimalData
*
data
=
static_cast
<
SchurPrimalData
*>
(
ctx
);
MatMult
(
data
->
subSolver
->
getMatInteriorCoarse
(),
x
,
data
->
tmp_vec_b
);
data
->
subSolver
->
solveGlobal
(
data
->
tmp_vec_b
,
data
->
tmp_vec_b
);
MatMult
(
data
->
subSolver
->
getMatCoarseInterior
(),
data
->
tmp_vec_b
,
data
->
tmp_vec_primal
);
MatMult
(
data
->
subSolver
->
getMatCoarse
(),
x
,
y
);
VecAXPBY
(
y
,
-
1.0
,
1.0
,
data
->
tmp_vec_primal
);
return
0
;
}
// y = mat * x
int
petscMultMatFeti
(
Mat
mat
,
Vec
x
,
Vec
y
)
{
FUNCNAME
(
"petscMultMatFeti()"
);
// F = L inv(K_BB) trans(L) + L inv(K_BB) K_BPi inv(S_PiPi) K_PiB inv(K_BB) trans(L)
// => F = L [I + inv(K_BB) K_BPi inv(S_PiPi) K_PiB] inv(K_BB) trans(L)
double
wtime
=
MPI
::
Wtime
();
void
*
ctx
;
MatShellGetContext
(
mat
,
&
ctx
);
FetiData
*
data
=
static_cast
<
FetiData
*>
(
ctx
);
MatMultTranspose
(
*
(
data
->
mat_lagrange
),
x
,
data
->
tmp_vec_b
);
double
wtime01
=
MPI
::
Wtime
();
data
->
subSolver
->
solveGlobal
(
data
->
tmp_vec_b
,
data
->
tmp_vec_b
);
FetiTimings
::
fetiSolve01
+=
(
MPI
::
Wtime
()
-
wtime01
);
MatMult
(
*
(
data
->
mat_lagrange
),
data
->
tmp_vec_b
,
data
->
tmp_vec_lagrange
);
MatMult
(
data
->
subSolver
->
getMatCoarseInterior
(),
data
->
tmp_vec_b
,
data
->
tmp_vec_primal
);
wtime01
=
MPI
::
Wtime
();
KSPSolve
(
*
(
data
->
ksp_schur_primal
),
data
->
tmp_vec_primal
,
data
->
tmp_vec_primal
);
FetiTimings
::
fetiSolve02
+=
(
MPI
::
Wtime
()
-
wtime01
);
MatMult
(
data
->
subSolver
->
getMatInteriorCoarse
(),
data
->
tmp_vec_primal
,
data
->
tmp_vec_b
);
wtime01
=
MPI
::
Wtime
();
data
->
subSolver
->
solveGlobal
(
data
->
tmp_vec_b
,
data
->
tmp_vec_b
);
FetiTimings
::
fetiSolve01
+=
(
MPI
::
Wtime
()
-
wtime01
);
MatMult
(
*
(
data
->
mat_lagrange
),
data
->
tmp_vec_b
,
y
);
VecAXPBY
(
y
,
1.0
,
1.0
,
data
->
tmp_vec_lagrange
);
FetiTimings
::
fetiSolve
+=
(
MPI
::
Wtime
()
-
wtime
);
return
0
;
}
// y = PC * x
PetscErrorCode
petscApplyFetiDirichletPrecon
(
PC
pc
,
Vec
x
,
Vec
y
)
{
double
wtime
=
MPI
::
Wtime
();
// Get data for the preconditioner
void
*
ctx
;
PCShellGetContext
(
pc
,
&
ctx
);
FetiDirichletPreconData
*
data
=
static_cast
<
FetiDirichletPreconData
*>
(
ctx
);
// Multiply with scaled Lagrange constraint matrix.
MatMultTranspose
(
*
(
data
->
mat_lagrange_scaled
),
x
,
data
->
tmp_vec_b
);
// === Restriction of the B nodes to the boundary nodes. ===
int
nLocalB
;
int
nLocalDuals
;
VecGetLocalSize
(
data
->
tmp_vec_b
,
&
nLocalB
);
VecGetLocalSize
(
data
->
tmp_vec_duals0
,
&
nLocalDuals
);
PetscScalar
*
local_b
,
*
local_duals
;
VecGetArray
(
data
->
tmp_vec_b
,
&
local_b
);
VecGetArray
(
data
->
tmp_vec_duals0
,
&
local_duals
);
for
(
map
<
int
,
int
>::
iterator
it
=
data
->
localToDualMap
.
begin
();
it
!=
data
->
localToDualMap
.
end
();
++
it
)
local_duals
[
it
->
second
]
=
local_b
[
it
->
first
];
VecRestoreArray
(
data
->
tmp_vec_b
,
&
local_b
);
VecRestoreArray
(
data
->
tmp_vec_duals0
,
&
local_duals
);
// === K_DD - K_DI inv(K_II) K_ID ===
MatMult
(
*
(
data
->
mat_duals_duals
),
data
->
tmp_vec_duals0
,
data
->
tmp_vec_duals1
);
MatMult
(
*
(
data
->
mat_interior_duals
),
data
->
tmp_vec_duals0
,
data
->
tmp_vec_interior
);
KSPSolve
(
*
(
data
->
ksp_interior
),
data
->
tmp_vec_interior
,
data
->
tmp_vec_interior
);
MatMult
(
*
(
data
->
mat_duals_interior
),
data
->
tmp_vec_interior
,
data
->
tmp_vec_duals0
);
VecAXPBY
(
data
->
tmp_vec_duals0
,
1.0
,
-
1.0
,
data
->
tmp_vec_duals1
);
// === Prolongation from local dual nodes to B nodes.
VecGetArray
(
data
->
tmp_vec_b
,
&
local_b
);
VecGetArray
(
data
->
tmp_vec_duals0
,
&
local_duals
);
for
(
map
<
int
,
int
>::
iterator
it
=
data
->
localToDualMap
.
begin
();
it
!=
data
->
localToDualMap
.
end
();
++
it
)
local_b
[
it
->
first
]
=
local_duals
[
it
->
second
];
VecRestoreArray
(
data
->
tmp_vec_b
,
&
local_b
);
VecRestoreArray
(
data
->
tmp_vec_duals0
,
&
local_duals
);
// Multiply with scaled Lagrange constraint matrix.
MatMult
(
*
(
data
->
mat_lagrange_scaled
),
data
->
tmp_vec_b
,
y
);
FetiTimings
::
fetiPreconditioner
+=
(
MPI
::
Wtime
()
-
wtime
);
return
0
;
}
// y = PC * x
PetscErrorCode
petscApplyFetiLumpedPrecon
(
PC
pc
,
Vec
x
,
Vec
y
)
{
// Get data for the preconditioner
void
*
ctx
;
PCShellGetContext
(
pc
,
&
ctx
);
FetiLumpedPreconData
*
data
=
static_cast
<
FetiLumpedPreconData
*>
(
ctx
);
// Multiply with scaled Lagrange constraint matrix.
MatMultTranspose
(
*
(
data
->
mat_lagrange_scaled
),
x
,
data
->
tmp_vec_b
);
// === Restriction of the B nodes to the boundary nodes. ===
int
nLocalB
;
int
nLocalDuals
;
VecGetLocalSize
(
data
->
tmp_vec_b
,
&
nLocalB
);
VecGetLocalSize
(
data
->
tmp_vec_duals0
,
&
nLocalDuals
);
PetscScalar
*
local_b
,
*
local_duals
;
VecGetArray
(
data
->
tmp_vec_b
,
&
local_b
);
VecGetArray
(
data
->
tmp_vec_duals0
,
&
local_duals
);
for
(
map
<
int
,
int
>::
iterator
it
=
data
->
localToDualMap
.
begin
();
it
!=
data
->
localToDualMap
.
end
();
++
it
)
local_duals
[
it
->
second
]
=
local_b
[
it
->
first
];
VecRestoreArray
(
data
->
tmp_vec_b
,
&
local_b
);
VecRestoreArray
(
data
->
tmp_vec_duals0
,
&
local_duals
);
// === K_DD ===
MatMult
(
*
(
data
->
mat_duals_duals
),
data
->
tmp_vec_duals0
,
data
->
tmp_vec_duals1
);
// === Prolongation from local dual nodes to B nodes.
VecGetArray
(
data
->
tmp_vec_b
,
&
local_b
);
VecGetArray
(
data
->
tmp_vec_duals1
,
&
local_duals
);
for
(
map
<
int
,
int
>::
iterator
it
=
data
->
localToDualMap
.
begin
();
it
!=
data
->
localToDualMap
.
end
();
++
it
)
local_b
[
it
->
first
]
=
local_duals
[
it
->
second
];
VecRestoreArray
(
data
->
tmp_vec_b
,
&
local_b
);
VecRestoreArray
(
data
->
tmp_vec_duals0
,
&
local_duals
);
// Multiply with scaled Lagrange constraint matrix.
MatMult
(
*
(
data
->
mat_lagrange_scaled
),
data
->
tmp_vec_b
,
y
);
return
0
;
}
PetscSolverFeti
::
PetscSolverFeti
()
:
PetscSolver
(),
schurPrimalSolver
(
0
),
...
...
@@ -804,14 +611,8 @@ namespace AMDiS {
schurPrimalData
.
subSolver
=
subdomain
;
VecCreateMPI
(
mpiCommGlobal
,
localDofMap
.
getRankDofs
(),
nGlobalOverallInterior
,
&
(
schurPrimalData
.
tmp_vec_b
));
VecCreateMPI
(
mpiCommGlobal
,
primalDofMap
.
getRankDofs
(),
primalDofMap
.
getOverallDofs
(),
&
(
schurPrimalData
.
tmp_vec_primal
));
localDofMap
.
createVec
(
schurPrimalData
.
tmp_vec_b
,
nGlobalOverallInterior
);
primalDofMap
.
createVec
(
schurPrimalData
.
tmp_vec_primal
);
MatCreateShell
(
mpiCommGlobal
,
primalDofMap
.
getRankDofs
(),
...
...
@@ -967,18 +768,9 @@ namespace AMDiS {
fetiData
.
subSolver
=
subdomain
;
fetiData
.
ksp_schur_primal
=
&
ksp_schur_primal
;
VecCreateMPI
(
mpiCommGlobal
,
localDofMap
.
getRankDofs
(),
nGlobalOverallInterior
,
&
(
fetiData
.
tmp_vec_b
));
VecCreateMPI
(
mpiCommGlobal
,
lagrangeMap
.
getRankDofs
(),
lagrangeMap
.
getOverallDofs
(),
&
(
fetiData
.
tmp_vec_lagrange
));
VecCreateMPI
(
mpiCommGlobal
,
primalDofMap
.
getRankDofs
(),
primalDofMap
.
getOverallDofs
(),
&
(
fetiData
.
tmp_vec_primal
));
localDofMap
.
createVec
(
fetiData
.
tmp_vec_b
,
nGlobalOverallInterior
);
lagrangeMap
.
createVec
(
fetiData
.
tmp_vec_lagrange
);
primalDofMap
.
createVec
(
fetiData
.
tmp_vec_primal
);
MatCreateShell
(
mpiCommGlobal
,
lagrangeMap
.
getRankDofs
(),
...
...
@@ -1027,10 +819,8 @@ namespace AMDiS {
fetiDirichletPreconData
.
mat_duals_interior
=
&
mat_duals_interior
;
fetiDirichletPreconData
.
ksp_interior
=
&
ksp_interior
;
VecCreateMPI
(
mpiCommGlobal
,
localDofMap
.
getRankDofs
(),
nGlobalOverallInterior
,
&
(
fetiDirichletPreconData
.
tmp_vec_b
));
localDofMap
.
createVec
(
fetiDirichletPreconData
.
tmp_vec_b
,
nGlobalOverallInterior
);
MatGetVecs
(
mat_duals_duals
,
PETSC_NULL
,
&
(
fetiDirichletPreconData
.
tmp_vec_duals0
));
MatGetVecs
(
mat_duals_duals
,
PETSC_NULL
,
...
...
@@ -1084,10 +874,7 @@ namespace AMDiS {
}
}
VecCreateMPI
(
mpiCommGlobal
,
localDofMap
.
getRankDofs
(),
localDofMap
.
getOverallDofs
(),
&
(
fetiLumpedPreconData
.
tmp_vec_b
));
localDofMap
.
createVec
(
fetiLumpedPreconData
.
tmp_vec_b
);
MatGetVecs
(
mat_duals_duals
,
PETSC_NULL
,
&
(
fetiLumpedPreconData
.
tmp_vec_duals0
));
MatGetVecs
(
mat_duals_duals
,
PETSC_NULL
,
...
...
@@ -1219,12 +1006,8 @@ namespace AMDiS {
lagrangeMap
.
getOverallDofs
(),
PETSC_NULL
,
&
fetiMat
);
VecCreateMPI
(
mpiCommGlobal
,
lagrangeMap
.
getRankDofs
(),
lagrangeMap
.
getOverallDofs
(),
&
unitVector
);
VecCreateMPI
(
mpiCommGlobal
,
lagrangeMap
.
getRankDofs
(),
lagrangeMap
.
getOverallDofs
(),
&
resultVector
);
lagrangeMap
.
createVec
(
unitVector
);
lagrangeMap
.
createVec
(
resultVector
);
PetscInt
low
,
high
;
VecGetOwnershipRange
(
unitVector
,
&
low
,
&
high
);
...
...
@@ -1631,39 +1414,43 @@ namespace AMDiS {
{
FUNCNAME
(
"PetscSolverFeti::solveReducedFetiMatrix()"
);
// RHS vector.
Vec
vec_rhs
,
vec_sol
;
// Some temporary vectors.
Vec
tmp_b0
,
tmp_b1
,
tmp_lagrange0
,
tmp_primal0
,
tmp_primal1
;
localDofMap
.
createVec
(
tmp_b0
,
nGlobalOverallInterior
);
localDofMap
.
createVec
(
tmp_b1
,
nGlobalOverallInterior
);
primalDofMap
.
createVec
(
tmp_primal0
);
primalDofMap
.
createVec
(
tmp_primal1
);
VecCreateMPI
(
mpiCommGlobal
,
localDofMap
.
getRankDofs
(),
nGlobalOverallInterior
,
&
tmp_b0
);
VecCreateMPI
(
mpiCommGlobal
,
localDofMap
.
getRankDofs
(),
nGlobalOverallInterior
,
&
tmp_b1
);
VecCreateMPI
(
mpiCommGlobal
,
primalDofMap
.
getRankDofs
(),
primalDofMap
.
getOverallDofs
(),
&
tmp_primal0
);
VecCreateMPI
(
mpiCommGlobal
,
primalDofMap
.
getRankDofs
(),
primalDofMap
.
getOverallDofs
(),
&
tmp_primal1
);