Commit 77592d44 authored by Praetorius, Simon's avatar Praetorius, Simon

modified data-type to allow metis specific real type

parent ee4ea74b
......@@ -5,7 +5,7 @@
* Copyright (C) 2013 Dresden University of Technology. All Rights Reserved.
* Web: https://fusionforge.zih.tu-dresden.de/projects/amdis
*
* Authors:
* Authors:
* Simon Vey, Thomas Witkowski, Andreas Naumann, Simon Praetorius, et al.
*
* This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
......@@ -15,7 +15,7 @@
* This file is part of AMDiS
*
* See also license.opensource.txt in the distribution.
*
*
******************************************************************************/
......@@ -37,7 +37,7 @@ using namespace std;
namespace AMDiS { namespace Parallel {
ParMetisMesh::ParMetisMesh(Mesh *mesh, MPI::Intracomm *comm,
ParMetisMesh::ParMetisMesh(Mesh *mesh, MPI::Intracomm *comm,
std::map<int, bool>& elementInRank,
DofMap *mapLocalGlobal)
: dim(mesh->getDim()),
......@@ -54,7 +54,7 @@ namespace AMDiS { namespace Parallel {
ElInfo *elInfo = stack.traverseFirst(mesh, 0, Mesh::CALL_EL_LEVEL);
while (elInfo) {
if (elementInRank[elInfo->getElement()->getIndex()])
elementCounter++;
elementCounter++;
elInfo = stack.traverseNext(elInfo);
}
......@@ -72,14 +72,14 @@ namespace AMDiS { namespace Parallel {
if (dim == dow)
xyz = new float[nElements * dim];
else
xyz = NULL;
xyz = NULL;
eptr[0] = 0;
int *ptr_eptr = eptr + 1;
int *ptr_eind = eind;
float *ptr_xyz = xyz;
// gather element numbers and create elmdist
mpiComm->Allgather(&nElements, 1, MPI_INT, elmdist + 1, 1, MPI_INT);
......@@ -115,7 +115,7 @@ namespace AMDiS { namespace Parallel {
if (mapLocalGlobal)
*ptr_eind = (*mapLocalGlobal)[element->getDof(i, 0)].global;
else
*ptr_eind = element->getDof(i, 0);
*ptr_eind = element->getDof(i, 0);
ptr_eind++;
}
......@@ -141,17 +141,17 @@ namespace AMDiS { namespace Parallel {
{
if (eptr)
delete [] eptr;
if (eind)
if (eind)
delete [] eind;
if (elmdist)
delete [] elmdist;
if (xyz)
delete [] xyz;
if (elem_p2a)
if (elem_p2a)
delete [] elem_p2a;
}
......@@ -168,7 +168,7 @@ namespace AMDiS { namespace Parallel {
int numflag = 0;
if (ncommonnodes == -1)
if (ncommonnodes == -1)
ncommonnodes = parMetisMesh->getDim();
MPI_Comm tmpComm = MPI_Comm(*comm);
......@@ -198,21 +198,21 @@ namespace AMDiS { namespace Parallel {
stringstream oss;
for (int i = 0; i <= MPI::COMM_WORLD.Get_size(); i++)
oss << parMetisMesh->getElementDist()[i] << " ";
MSG("Element dist = %s\n", oss.str().c_str());
int mpiRank = MPI::COMM_WORLD.Get_rank();
int nElements = parMetisMesh->getElementDist()[mpiRank + 1] -
int nElements = parMetisMesh->getElementDist()[mpiRank + 1] -
parMetisMesh->getElementDist()[mpiRank];
MSG("nElements = %d in index range %d - %d\n",
nElements,
MSG("nElements = %d in index range %d - %d\n",
nElements,
parMetisMesh->getElementDist()[mpiRank],
parMetisMesh->getElementDist()[mpiRank + 1]);
oss.str("");
oss.clear();
for (int i = 0; i <= nElements; i++)
oss << xadj[i] << ", ";
......@@ -221,7 +221,7 @@ namespace AMDiS { namespace Parallel {
oss.str("");
oss.clear();
for (int i = 0; i <= xadj[nElements] - 1; i++)
for (int i = 0; i <= xadj[nElements] - 1; i++)
oss << adjncy[i] << ", ";
MSG("adjncy = {%s}\n", oss.str().c_str());
......@@ -230,13 +230,13 @@ namespace AMDiS { namespace Parallel {
ParMetisPartitioner::~ParMetisPartitioner()
{
if (parMetisMesh)
if (parMetisMesh)
delete parMetisMesh;
}
bool ParMetisPartitioner::partition(map<int, double> &elemWeights,
PartitionMode mode)
PartitionMode mode)
{
FUNCNAME("ParMetisPartitioner::partition()");
......@@ -245,7 +245,7 @@ namespace AMDiS { namespace Parallel {
// === Create parmetis mesh ===
if (parMetisMesh)
if (parMetisMesh)
delete parMetisMesh;
TEST_EXIT_DBG(elementInRank.size() != 0)("Should not happen!\n");
......@@ -268,7 +268,7 @@ namespace AMDiS { namespace Parallel {
int index = elInfo->getElement()->getIndex();
if (elementInRank[index]) {
// get weight
// get weight
float wgt = static_cast<float>(elemWeights[index]);
maxWgt = std::max(wgt, maxWgt);
......@@ -298,18 +298,18 @@ namespace AMDiS { namespace Parallel {
int ncon = 1; // one weight at each vertex!
int nparts = mpiSize; // number of partitions
vector<double> tpwgts(mpiSize);
double ubvec = 1.05;
vector<real_t> tpwgts(mpiSize);
real_t ubvec = 1.05;
int options[4] = {0, 0, 15, PARMETIS_PSR_COUPLED}; // default options
int edgecut = -1;
vector<int> part(nElements);
// set tpwgts
for (int i = 0; i < mpiSize; i++)
tpwgts[i] = 1.0 / static_cast<double>(nparts);
tpwgts[i] = 1.0 / static_cast<real_t>(nparts);
// float scale = 10000.0 / maxWgt;
for (int i = 0; i < nElements; i++)
for (int i = 0; i < nElements; i++)
wgts[i] = floatWgts[i];
// wgts[i] = static_cast<int>(floatWgts[i] * scale);
......@@ -379,7 +379,7 @@ namespace AMDiS { namespace Parallel {
&tmpComm);
break;
default:
default:
ERROR_EXIT("unknown partitioning mode\n");
}
......@@ -417,11 +417,11 @@ namespace AMDiS { namespace Parallel {
// distribute partition elements
mpiComm->Allgatherv(parMetisMesh->getAMDiSIndices(),
nPartitionElements[mpiRank],
MPI_INT,
&(partitionElements[0]),
&(nPartitionElements[0]),
elmdist,
nPartitionElements[mpiRank],
MPI_INT,
&(partitionElements[0]),
&(nPartitionElements[0]),
elmdist,
MPI_INT);
// fill partitionMap
......@@ -433,7 +433,7 @@ namespace AMDiS { namespace Parallel {
}
bool ParMetisPartitioner::distributePartitioning(int *part)
bool ParMetisPartitioner::distributePartitioning(int *part)
{
FUNCNAME("ParMetisPartitioner::distributePartitioning()");
......@@ -443,10 +443,10 @@ namespace AMDiS { namespace Parallel {
// nPartitionElements[i] is the number of elements for the i-th partition
int *nPartitionElements = new int[mpiSize];
for (int i = 0; i < mpiSize; i++)
for (int i = 0; i < mpiSize; i++)
nPartitionElements[i] = 0;
for (int i = 0; i < nElements; i++)
nPartitionElements[part[i]]++;
nPartitionElements[part[i]]++;
// collect number of partition elements from all ranks for this rank
int *nRankElements = new int[mpiSize];
......@@ -460,11 +460,11 @@ namespace AMDiS { namespace Parallel {
// Test if there exists an empty partition
bool emptyPartition = false;
for (int i = 0; i < mpiSize; i++)
for (int i = 0; i < mpiSize; i++)
emptyPartition |= (sumPartitionElements[i] == 0);
if (emptyPartition)
return false;
return false;
// prepare distribution (fill partitionElements with AMDiS indices)
int *bufferOffset = new int[mpiSize];
......@@ -497,7 +497,7 @@ namespace AMDiS { namespace Parallel {
for (int i = 1; i < mpiSize; i++)
recvBufferOffset[i] = recvBufferOffset[i - 1] + nRankElements[i - 1];
mpiComm->Alltoallv(partitionElements,
mpiComm->Alltoallv(partitionElements,
nPartitionElements,
bufferOffset,
MPI_INT,
......@@ -505,7 +505,7 @@ namespace AMDiS { namespace Parallel {
nRankElements,
recvBufferOffset,
MPI_INT);
TEST_EXIT(elementInRank.size() != 0)("Should not happen!\n");
for (map<int, bool>::iterator it = elementInRank.begin();
it != elementInRank.end(); ++it)
......@@ -538,5 +538,5 @@ namespace AMDiS { namespace Parallel {
return true;
}
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment