Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Praetorius, Simon
dune-dec
Commits
f1105a85
Commit
f1105a85
authored
Jun 11, 2017
by
Praetorius, Simon
Browse files
synch vector reimplemented by index-based assignment
parent
6395187b
Changes
7
Hide whitespace changes
Inline
Side-by-side
dune/dec/common/Operations.hpp
View file @
f1105a85
...
...
@@ -37,9 +37,9 @@ namespace Dec
struct
assign
{
template
<
class
T
,
class
S
>
constexpr
auto
&
operator
()(
T
&
lhs
,
S
const
&
rhs
)
const
constexpr
void
operator
()(
T
const
&
from
,
S
&
to
)
const
{
return
lhs
=
rhs
;
to
=
from
;
}
};
...
...
@@ -56,9 +56,9 @@ namespace Dec
struct
plus_assign
{
template
<
class
T
,
class
S
>
constexpr
auto
&
operator
()(
T
&
lhs
,
S
const
&
rhs
)
const
constexpr
void
operator
()(
T
const
&
from
,
S
&
to
)
const
{
return
lhs
+=
rhs
;
to
+=
from
;
}
};
...
...
@@ -75,9 +75,9 @@ namespace Dec
struct
minus_assign
{
template
<
class
T
,
class
S
>
constexpr
auto
&
operator
()(
T
&
lhs
,
S
const
&
rhs
)
const
constexpr
void
operator
()(
T
const
&
from
,
S
&
to
)
const
{
return
lhs
-=
rhs
;
to
-=
from
;
}
};
...
...
@@ -94,9 +94,9 @@ namespace Dec
struct
times_assign
{
template
<
class
T
,
class
S
>
constexpr
auto
&
operator
()(
T
&
lhs
,
S
const
&
rhs
)
const
constexpr
void
operator
()(
T
const
&
from
,
S
&
to
)
const
{
return
lhs
*=
rhs
;
to
*=
from
;
}
};
...
...
@@ -113,9 +113,9 @@ namespace Dec
struct
divides_assign
{
template
<
class
T
,
class
S
>
constexpr
auto
&
operator
()(
T
&
lhs
,
S
const
&
rhs
)
const
constexpr
void
operator
()(
T
const
&
from
,
S
&
to
)
const
{
return
lhs
/=
rhs
;
to
/=
from
;
}
};
...
...
dune/dec/parallel/DistributedMatrix.hpp
View file @
f1105a85
...
...
@@ -91,7 +91,7 @@ namespace Dec
{
DistributedVector
<
T
,
GridView
,
cd_row
>
r
{
b
};
r
-=
A
*
x
;
return
two_norm
(
r
);
return
two_norm
(
r
);
// TODO: combine two synchronizations
}
}
// end namespace Dec
dune/dec/parallel/DistributedVector.hpp
View file @
f1105a85
...
...
@@ -3,6 +3,7 @@
#include <dune/dec/LinearAlgebra.hpp>
#include <dune/dec/common/Common.hpp>
#include <dune/dec/common/Operations.hpp>
#include <dune/dec/common/Joiner.hpp>
#include <dune/dec/ranges/Filter.hpp>
#include <dune/dec/ranges/IndexRange.hpp>
...
...
@@ -14,16 +15,17 @@ namespace Dec
struct
need_synchronization
{};
}
template
<
class
BaseGridView
,
int
cd
,
class
T
,
class
Assigner
>
template
<
class
BaseGridView
,
int
cd
>
class
SynchVectorHandle
:
public
Dune
::
CommDataHandleIF
<
SynchVectorHandle
<
BaseGridView
,
cd
,
T
,
Assigner
>
,
T
>
:
public
Dune
::
CommDataHandleIF
<
SynchVectorHandle
<
BaseGridView
,
cd
>
,
int
>
{
using
IndexType
=
typename
HalfEdge
::
IndexType
;
using
id_type
=
typename
BaseGridView
::
Grid
::
GlobalIdSet
::
IdType
;
using
index_type
=
typename
BaseGridView
::
IndexSet
::
IndexType
;
public:
SynchVectorHandle
(
BaseGridView
const
&
gv
,
DOFVector
<
T
>&
vec
)
SynchVectorHandle
(
BaseGridView
const
&
gv
)
:
gv_
(
gv
)
,
vec_
(
vec
)
,
map_
(
gv
.
comm
().
size
()
)
{}
bool
contains
(
int
/*dim*/
,
int
codim
)
const
...
...
@@ -45,22 +47,173 @@ namespace Dec
template
<
class
MessageBuffer
,
class
BaseEntity
>
void
gather
(
MessageBuffer
&
buff
,
BaseEntity
const
&
e
)
const
{
buff
.
write
(
vec_
[
gv_
.
indexSet
().
index
(
e
)]
);
buff
.
write
(
int
(
gv_
.
comm
().
rank
())
);
}
template
<
class
MessageBuffer
,
class
BaseEntity
>
void
scatter
(
MessageBuffer
&
buff
,
BaseEntity
const
&
e
,
std
::
size_t
n
)
{
assert
(
n
==
1
);
T
value
=
0.0
;
buff
.
read
(
value
);
assign
(
vec_
[
gv_
.
indexSet
().
index
(
e
)],
value
);
int
rank
=
-
1
;
buff
.
read
(
rank
);
auto
id
=
gv_
.
grid
().
globalIdSet
().
id
(
e
);
map_
[
rank
][
id
]
=
gv_
.
indexSet
().
index
(
e
);
}
void
finish
(
std
::
vector
<
std
::
vector
<
std
::
size_t
>
>&
out
)
const
{
out
.
resize
(
gv_
.
comm
().
size
());
for
(
int
r
=
0
;
r
<
gv_
.
comm
().
size
();
++
r
)
{
out
[
r
].
resize
(
map_
[
r
].
size
());
std
::
transform
(
map_
[
r
].
begin
(),
map_
[
r
].
end
(),
out
[
r
].
begin
(),
[](
auto
const
&
pair
)
{
return
pair
.
second
;
});
}
}
private:
BaseGridView
gv_
;
DOFVector
<
T
>&
vec_
;
Assigner
assign
;
std
::
vector
<
std
::
map
<
id_type
,
index_type
>
>
map_
;
};
template
<
int
cd
,
class
T
>
class
SynchVector
{
public:
template
<
class
GridView
>
SynchVector
(
GridView
const
&
gv
)
:
rank_
(
gv
.
comm
().
rank
())
,
size_
(
gv
.
comm
().
size
())
,
comm_
(
gv
.
comm
())
,
buff_in_
(
size_
)
,
request_in_
(
size_
)
,
request_out_
(
size_
)
,
finished_
(
size_
,
true
)
{
SynchVectorHandle
<
GridView
,
cd
>
synchVector
(
gv
);
gv
.
communicate
(
synchVector
,
Dune
::
InteriorBorder_All_Interface
,
Dune
::
ForwardCommunication
);
synchVector
.
finish
(
map_
);
std
::
size_t
len
=
std
::
accumulate
(
map_
.
begin
(),
map_
.
end
(),
0u
,
[](
std
::
size_t
l
,
auto
const
&
v
)
{
return
std
::
max
(
l
,
v
.
size
());
});
buff_out_
.
resize
(
len
);
for
(
int
r
=
0
;
r
<
size_
;
++
r
)
buff_in_
[
r
].
resize
(
map_
[
r
].
size
());
}
template
<
class
Assigner
>
void
synch
(
DOFVector
<
T
>&
vec
,
Assigner
assign
)
{
int
received
=
0
;
for
(
int
r
=
0
;
r
<
size_
;
++
r
)
{
if
(
r
==
rank_
||
map_
[
r
].
empty
())
continue
;
// send data to neighbours
std
::
size_t
i
=
0
;
for
(
auto
const
&
j
:
map_
[
r
])
buff_out_
[
i
++
]
=
vec
[
j
];
MPI_Isend
(
buff_out_
.
data
(),
map_
[
r
].
size
(),
Dune
::
MPITraits
<
T
>::
getType
(),
r
,
tag_
,
comm_
,
&
request_out_
[
r
]);
// receive data from neighbours
MPI_Irecv
(
buff_in_
[
r
].
data
(),
map_
[
r
].
size
(),
Dune
::
MPITraits
<
T
>::
getType
(),
r
,
tag_
,
comm_
,
&
request_in_
[
r
]);
finished_
[
r
]
=
false
;
received
++
;
}
while
(
received
>
0
)
{
for
(
int
r
=
0
;
r
<
size_
;
++
r
)
{
if
(
r
==
rank_
||
map_
[
r
].
empty
()
||
finished_
[
r
])
continue
;
int
flag
=
-
1
;
MPI_Status
status
;
MPI_Test
(
&
request_in_
[
r
],
&
flag
,
&
status
);
if
(
flag
)
{
std
::
size_t
i
=
0
;
for
(
auto
const
&
j
:
map_
[
r
])
assign
(
buff_in_
[
r
][
i
++
],
vec
[
j
]);
finished_
[
r
]
=
true
;
received
--
;
}
}
}
}
// the owner assigns its value to all ranks
void
synch
(
DOFVector
<
T
>&
vec
)
{
int
received
=
0
;
for
(
int
r
=
0
;
r
<
size_
;
++
r
)
{
if
(
r
==
rank_
||
map_
[
r
].
empty
())
continue
;
// send data to neighbours
if
(
r
>
rank_
)
{
std
::
size_t
i
=
0
;
for
(
auto
const
&
j
:
map_
[
r
])
buff_out_
[
i
++
]
=
vec
[
j
];
MPI_Isend
(
buff_out_
.
data
(),
map_
[
r
].
size
(),
Dune
::
MPITraits
<
T
>::
getType
(),
r
,
tag_
,
comm_
,
&
request_out_
[
r
]);
}
if
(
r
<
rank_
)
{
// receive data from neighbours
MPI_Irecv
(
buff_in_
[
r
].
data
(),
map_
[
r
].
size
(),
Dune
::
MPITraits
<
T
>::
getType
(),
r
,
tag_
,
comm_
,
&
request_in_
[
r
]);
finished_
[
r
]
=
false
;
received
++
;
}
else
{
finished_
[
r
]
=
true
;
}
}
// wait for all data to be received
while
(
received
>
0
)
{
for
(
int
r
=
0
;
r
<
size_
;
++
r
)
{
if
(
r
>=
rank_
||
map_
[
r
].
empty
()
||
finished_
[
r
])
continue
;
int
flag
=
-
1
;
MPI_Status
status
;
MPI_Test
(
&
request_in_
[
r
],
&
flag
,
&
status
);
if
(
flag
)
{
finished_
[
r
]
=
true
;
received
--
;
}
}
}
// assign values from larges rank to lowest
for
(
int
r
=
rank_
-
1
;
r
>=
0
;
--
r
)
{
std
::
size_t
i
=
0
;
for
(
auto
const
&
j
:
map_
[
r
])
vec
[
j
]
=
buff_in_
[
r
][
i
++
];
}
}
private:
int
rank_
;
//< my rank
int
size_
;
//< the size of the MPI communicator
MPI_Comm
comm_
;
//< the MPI communicator
// data buffers for send and receive
std
::
vector
<
std
::
vector
<
T
>
>
buff_in_
;
std
::
vector
<
T
>
buff_out_
;
// status of the communications
std
::
vector
<
MPI_Request
>
request_in_
;
std
::
vector
<
MPI_Request
>
request_out_
;
// sets for each rank whether the communication is finished
std
::
vector
<
bool
>
finished_
;
// a mapping of indices, i.e. the order in which values are read from and written to a data-vector
std
::
vector
<
std
::
vector
<
std
::
size_t
>
>
map_
;
// a communication tag
int
tag_
=
3579
;
};
...
...
@@ -86,12 +239,14 @@ namespace Dec
:
Super
(
gv
.
size
(
cd
),
"distributed"
)
,
gv_
(
gv
)
,
localVector_
(
gv
.
size
(
cd
))
,
comm_
(
gv
.
base
())
{}
DistributedVector
(
DistributedVector
const
&
that
,
tag
::
ressource
)
:
Super
(
that
.
size
(),
that
.
name
())
,
gv_
(
that
.
gv_
)
,
localVector_
(
gv_
.
size
(
that
.
codimension
))
,
comm_
(
that
.
comm_
)
{}
...
...
@@ -108,11 +263,14 @@ namespace Dec
DistributedVector
&
operator
=
(
std
::
pair
<
Expr
,
tag
::
need_synchronization
>
const
&
that
)
{
localVector_
=
that
.
first
;
auto
baseGridView
=
gv_
.
base
(
);
comm_
.
synch
(
localVector_
,
operation
::
plus_assign
{}
);
using
BaseGridView
=
decltype
(
baseGridView
);
SynchVectorHandle
<
BaseGridView
,
cd
,
T
,
operation
::
plus_assign
>
synchVector
(
baseGridView
,
localVector_
);
baseGridView
.
communicate
(
synchVector
,
Dune
::
InteriorBorder_All_Interface
,
Dune
::
ForwardCommunication
);
// auto baseGridView = gv_.base();
//
// using BaseGridView = decltype(baseGridView);
// SynchVectorHandle<BaseGridView,cd,T,operation::plus_assign> synchVector(baseGridView, localVector_);
// baseGridView.communicate(synchVector, Dune::InteriorBorder_All_Interface, Dune::ForwardCommunication);
return
*
this
;
}
...
...
@@ -134,11 +292,12 @@ namespace Dec
DistributedVector
&
operator
+=
(
std
::
pair
<
Expr
,
tag
::
need_synchronization
>
const
&
that
)
{
DOFVector
<
T
>
tmp
{
that
.
first
};
auto
baseGridView
=
gv_
.
base
();
using
BaseGridView
=
decltype
(
baseGridView
);
SynchVectorHandle
<
BaseGridView
,
cd
,
T
,
operation
::
plus_assign
>
synchVector
(
baseGridView
,
tmp
);
baseGridView
.
communicate
(
synchVector
,
Dune
::
InteriorBorder_All_Interface
,
Dune
::
ForwardCommunication
);
comm_
.
synch
(
tmp
,
operation
::
plus_assign
{});
// auto baseGridView = gv_.base();
//
// using BaseGridView = decltype(baseGridView);
// SynchVectorHandle<BaseGridView,cd,T,operation::plus_assign> synchVector(baseGridView, tmp);
// baseGridView.communicate(synchVector, Dune::InteriorBorder_All_Interface, Dune::ForwardCommunication);
localVector_
+=
tmp
;
return
*
this
;
...
...
@@ -161,11 +320,12 @@ namespace Dec
DistributedVector
&
operator
-=
(
std
::
pair
<
Expr
,
tag
::
need_synchronization
>
const
&
that
)
{
DOFVector
<
T
>
tmp
{
that
.
first
};
auto
baseGridView
=
gv_
.
base
();
using
BaseGridView
=
decltype
(
baseGridView
);
SynchVectorHandle
<
BaseGridView
,
cd
,
T
,
operation
::
plus_assign
>
synchVector
(
baseGridView
,
tmp
);
baseGridView
.
communicate
(
synchVector
,
Dune
::
InteriorBorder_All_Interface
,
Dune
::
ForwardCommunication
);
comm_
.
synch
(
tmp
,
operation
::
plus_assign
{});
// auto baseGridView = gv_.base();
//
// using BaseGridView = decltype(baseGridView);
// SynchVectorHandle<BaseGridView,cd,T,operation::plus_assign> synchVector(baseGridView, tmp);
// baseGridView.communicate(synchVector, Dune::InteriorBorder_All_Interface, Dune::ForwardCommunication);
localVector_
-=
tmp
;
return
*
this
;
...
...
@@ -184,6 +344,11 @@ namespace Dec
}
void
synch
()
{
comm_
.
synch
(
localVector_
);
}
public:
// binary arithmetic operations
friend
auto
operator
+
(
DistributedVector
<
T
,
GridView
,
cd
>
const
&
lhs
,
DistributedVector
<
T
,
GridView
,
cd
>
const
&
rhs
)
...
...
@@ -237,7 +402,7 @@ namespace Dec
T
x
=
0
;
for
(
size_type
i
=
0
;
i
<
lhs
.
size
();
++
i
)
x
+=
indexSet
.
template
owns
<
cd
>(
i
)
?
lhs
.
vector
()[
i
]
*
rhs
.
vector
()[
i
]
:
0
;
x
+=
indexSet
.
template
owns
<
cd
>(
i
)
*
lhs
.
vector
()[
i
]
*
rhs
.
vector
()[
i
];
return
lhs
.
gv_
.
comm
().
sum
(
x
);
}
...
...
@@ -248,7 +413,7 @@ namespace Dec
T
x
=
0
;
for
(
size_type
i
=
0
;
i
<
vec
.
size
();
++
i
)
x
+=
indexSet
.
template
owns
<
cd
>(
i
)
?
sqr
(
vec
.
vector
()[
i
])
:
0
;
x
+=
indexSet
.
template
owns
<
cd
>(
i
)
*
sqr
(
vec
.
vector
()[
i
]);
return
vec
.
gv_
.
comm
().
sum
(
x
);
}
...
...
@@ -276,6 +441,7 @@ namespace Dec
GridView
gv_
;
DOFVector
<
T
>
localVector_
;
SynchVector
<
cd
,
T
>
comm_
;
};
...
...
dune/dec/parallel/ParallelDofMapper.hpp
View file @
f1105a85
...
...
@@ -7,6 +7,8 @@
#include <dune/grid/common/gridenums.hh>
#include <dune/grid/common/partitionset.hh>
#include <dune/dec/common/Common.hpp>
#include "EntityOwner.hpp"
namespace
Dec
...
...
@@ -45,9 +47,10 @@ namespace Dec
/// Entity is owned by current rank
template
<
class
Entity
,
REQUIRES
(
Entity
::
codimension
==
codim
)
>
class
=
Void_t
<
decltype
(
std
::
declval
<
Entity
>().
index
())
>
>
bool
owns
(
Entity
const
&
e
)
const
{
static_assert
(
Entity
::
codimension
==
codim
,
""
);
return
owns
(
e
.
index
());
}
...
...
examples/CMakeLists.txt
View file @
f1105a85
...
...
@@ -11,6 +11,9 @@ target_compile_definitions(domain_decomposition PRIVATE DEC_HAS_MPI=1)
add_dec_executable
(
entity_owner entity_owner.cpp
)
target_compile_definitions
(
entity_owner PRIVATE DEC_HAS_MPI=1
)
add_dec_executable
(
communicator communicator.cpp
)
target_compile_definitions
(
communicator PRIVATE DEC_HAS_MPI=1
)
add_dec_executable
(
alugrid ALBERTA alugrid.cpp
)
target_compile_definitions
(
alugrid PRIVATE DEC_HAS_MPI=1
)
...
...
@@ -24,7 +27,7 @@ add_dec_executable(transfer ALBERTA transfer.cpp)
add_dec_executable
(
vecellipt ALBERTA vecellipt.cpp
)
# add_dec_executable(weighted_triangulation ALBERTA weighted_triangulation.cpp)
add_dec_executable
(
weighted_triangulation2 ALBERTA weighted_triangulation2.cpp
)
add_dependencies
(
examples geometry laplace laplace_operator simple_grid heat weighted_triangulation2 orientation transfer domain_decomposition entity_owner alugrid
)
add_dependencies
(
examples geometry laplace laplace_operator simple_grid heat weighted_triangulation2 orientation transfer domain_decomposition entity_owner alugrid
communicator
)
add_dec_executable
(
helmholtz ALBERTA helmholtz.cpp
)
add_dec_executable
(
consistency ALBERTA consistency.cpp
)
...
...
examples/alugrid.cpp
View file @
f1105a85
...
...
@@ -126,10 +126,16 @@ int main(int argc, char** argv)
DistributedMatrix
<
double
,
GridView
,
2
,
2
>
A
(
gv
);
laplace
.
build
(
A
);
DistributedVector
<
double
,
GridView
,
2
>
x
(
gv
),
b
(
gv
);
DistributedVector
<
double
,
GridView
,
2
>
b
(
gv
);
b
.
vector
()
=
1.0
;
x
.
vector
()
=
0.0
;
DistributedVector
<
double
,
GridView
,
2
>
x
(
b
,
tag
::
ressource
{});
random
(
x
.
vector
());
x
.
synch
();
Dune
::
VTKWriter
<
GridView
>
vtkwriter
(
gv
);
vtkwriter
.
addVertexData
(
x
.
vector
(),
"x"
);
vtkwriter
.
write
(
"alugrid_x0"
);
if
(
mpihelper
.
rank
()
==
0
)
{
typename
Grid
::
GlobalCoordinate
x0
;
...
...
@@ -159,8 +165,6 @@ int main(int argc, char** argv)
msg
(
"|x| = "
,
two_norm
(
x
));
t
.
reset
();
Dune
::
VTKWriter
<
GridView
>
vtkwriter
(
gv
);
vtkwriter
.
addVertexData
(
x
.
vector
(),
"x"
);
vtkwriter
.
write
(
"alugrid"
);
msg
(
"time(write file) = "
,
t
.
elapsed
(),
"sec"
);
...
...
examples/communicator.cpp
0 → 100644
View file @
f1105a85
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <array>
#include <iostream>
#include <memory>
#include <vector>
#include <dune/grid/utility/structuredgridfactory.hh>
#include <dune/grid/utility/parmetisgridpartitioner.hh>
#include <dune/grid/uggrid.hh>
#include <dune/common/parallel/indexset.hh>
#include <dune/common/parallel/plocalindex.hh>
#include <dune/common/parallel/remoteindices.hh>
#include <dune/common/parallel/interface.hh>
#include <dune/common/enumset.hh>
#include <dune/dec/parallel/ParallelDofMapper.hpp>
enum
GridFlags
{
owner
,
overlap
,
border
};
template
<
class
GridView
,
int
cd
>
class
Handle
:
public
Dune
::
CommDataHandleIF
<
Handle
<
GridView
,
cd
>
,
std
::
size_t
>
{
using
ParallelIndexSet
=
Dune
::
ParallelIndexSet
<
std
::
size_t
,
Dune
::
ParallelLocalIndex
<
GridFlags
>>
;
public:
Handle
(
GridView
const
&
gv
,
Dec
::
LocalCodimMapper
<
cd
>
const
&
localCodimMapper
)
:
indexSet_
(
gv
.
indexSet
())
,
idSet_
(
gv
.
grid
().
globalIdSet
())
,
localCodimMapper_
(
localCodimMapper
)
{
parallelIndexSet_
.
beginResize
();
}
bool
contains
(
int
/*dim*/
,
int
codim
)
const
{
return
codim
==
cd
;
}
bool
fixedSize
(
int
/*dim*/
,
int
/*codim*/
)
const
{
return
true
;
}
template
<
class
Entity
>
std
::
size_t
size
(
Entity
const
&
/*e*/
)
const
{
return
2u
;
}
template
<
class
MessageBuffer
,
class
Entity
>
void
gather
(
MessageBuffer
&
buff
,
Entity
const
&
e
)
const
{
buff
.
write
(
std
::
size_t
(
indexSet_
.
index
(
e
)));
buff
.
write
(
std
::
size_t
(
idSet_
.
id
(
e
)));
}
template
<
class
MessageBuffer
,
class
Entity
>
void
scatter
(
MessageBuffer
&
buff
,
Entity
const
&
e
,
std
::
size_t
/*n*/
)
{
assert
(
!
finished
);
std
::
size_t
local
=
0
,
global
=
0
;
buff
.
read
(
local
);
buff
.
read
(
global
);
GridFlags
attribute
=
localCodimMapper_
.
owns
(
local
)
?
GridFlags
::
owner
:
GridFlags
::
overlap
;
Dune
::
ParallelLocalIndex
<
GridFlags
>
localIndex
{
local
,
attribute
};
parallelIndexSet_
.
add
(
global
,
localIndex
);
}
ParallelIndexSet
&&
get
()
{
parallelIndexSet_
.
endResize
();
finished
=
true
;
return
std
::
move
(
parallelIndexSet_
);
}
private:
typename
GridView
::
IndexSet
const
&
indexSet_
;
typename
GridView
::
Grid
::
GlobalIdSet
const
&
idSet_
;
Dec
::
LocalCodimMapper
<
cd
>
const
&
localCodimMapper_
;
ParallelIndexSet
parallelIndexSet_
;
bool
finished
=
false
;
};
int
main
(
int
argc
,
char
**
argv
)
{
auto
&
mpihelper
=
Dune
::
MPIHelper
::
instance
(
argc
,
argv
);
// Create ug grid from structured grid
std
::
array
<
unsigned
int
,
2
>
n
=
{{
2
,
2
}};
Dune
::
FieldVector
<
double
,
2
>
lower
=
{{
0
,
0
}};
Dune
::
FieldVector
<
double
,
2
>
upper
=
{{
1
,
1
}};
using
GridBase
=
Dune
::
UGGrid
<
2
>
;
std
::
shared_ptr
<
GridBase
>
gridBase
=
Dune
::
StructuredGridFactory
<
GridBase
>::
createSimplexGrid
(
lower
,
upper
,
n
);
using
GridView
=
typename
GridBase
::
LeafGridView
;
auto
gv
=
gridBase
->
leafGridView
();
// Create initial partitioning using ParMETIS
std
::
vector
<
unsigned
>
part
(
Dune
::
ParMetisGridPartitioner
<
GridView
>::
partition
(
gv
,
mpihelper
));
// Transfer partitioning from ParMETIS to our grid
gridBase
->
loadBalance
(
part
,
0
);
Dec
::
LocalCodimMapper
<
2
>
localCodimMapper
(
gv
);
Handle
<
GridView
,
2
>
handle
{
gv
,
localCodimMapper
};
gv
.
communicate
(
handle
,
Dune
::
All_All_Interface
,
Dune
::
ForwardCommunication
);
auto
parallelIndexSet
=
handle
.
get
();
Dune
::
RemoteIndices
<
decltype
(
parallelIndexSet
)
>
remoteIndices
{
parallelIndexSet
,
parallelIndexSet
,
gridBase
->
comm
()};
remoteIndices
.
rebuild
<
true
>
();
Dune
::
Interface
interface
{
gridBase
->
comm
()};
interface
.
build
(
remoteIndices
,
Dune
::
EnumItem
<
GridFlags
,
owner
>
{},
Dune
::
EnumItem
<
GridFlags
,
overlap
>
{});
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment