Commit 80ce300b authored by Praetorius, Simon's avatar Praetorius, Simon

some compiler warnings removed

parent f5c1cb21
......@@ -129,8 +129,7 @@ namespace AMDiS {
int edgeNo1 = el->getEdgeOfChild(1, i, elInfo2->getType());
bool refineChildFirst =
!(i > 0 &&
(edgeNo0 >= 0 && !el2->getChild(0)->isLeaf()) ||
!((i > 0 && (edgeNo0 >= 0 && !el2->getChild(0)->isLeaf())) ||
(edgeNo1 >= 0 && !el2->getChild(1)->isLeaf()));
if (refineChildFirst) {
......
......@@ -218,7 +218,7 @@ namespace AMDiS {
int i = 0;
for (; i < refinementPathLength; i++) {
elInfo[(i+1)%2]->fillElInfo(static_cast<int>((refinementPath & (1<<i)) == (1<<i)), elInfo[i%2]);
elInfo[(i+1)%2]->fillElInfo(static_cast<int>((refinementPath & (1<<i)) == static_cast<unsigned long>(1<<i)), elInfo[i%2]);
}
if (i%2 == 0)
*this = *elInfo[0];
......
......@@ -128,9 +128,9 @@ namespace AMDiS {
{
using mtl::tag::major; using mtl::tag::nz; using mtl::begin; using mtl::end;
namespace traits = mtl::traits;
typedef DOFMatrix::base_matrix_type Matrix;
typedef traits::range_generator<major, Matrix>::type cursor_type;
typedef traits::range_generator<nz, cursor_type>::type icursor_type;
// typedef DOFMatrix::base_matrix_type Matrix;
// typedef traits::range_generator<major, Matrix>::type cursor_type;
// typedef traits::range_generator<nz, cursor_type>::type icursor_type;
// Create a map from coords of all DOFs, to the DOF indices in this problem.
CoordToDof coordToDof;
......
......@@ -686,7 +686,7 @@ namespace AMDiS {
// === of this edge to the refinement patch. ===
for (int edgeIndex = 0;
edgeIndex < static_cast<unsigned int>(refineEdges.size()); edgeIndex++) {
edgeIndex < static_cast<int>(refineEdges.size()); edgeIndex++) {
Element *otherEl = refineEdges[edgeIndex].first;
TraverseStack stack2;
ElInfo *elInfo2 =
......
......@@ -129,14 +129,15 @@ namespace AMDiS {
/// DOFVector to be evaluated at quadrature points.
DOFVectorBase<double>* vec;
/// Function for c.
AbstractFunction<double, double> *f;
/// Constant factor of zero order term.
double factor;
/// Vector v at quadrature points.
mtl::dense_vector<double> vecAtQPs;
/// Function for c.
AbstractFunction<double, double> *f;
};
......@@ -176,6 +177,10 @@ namespace AMDiS {
/// DOFVectorBase to be evaluated at quadrature points.
DOFVectorBase<double>* vec1;
DOFVectorBase<double>* vec2;
/// Function for c.
AbstractFunction<double, double> *f1;
AbstractFunction<double, double> *f2;
/// Constant factor of zero order term.
double factor;
......@@ -183,10 +188,6 @@ namespace AMDiS {
/// Vector v at quadrature points.
mtl::dense_vector<double> vecAtQPs1;
mtl::dense_vector<double> vecAtQPs2;
/// Function for c.
AbstractFunction<double, double> *f1;
AbstractFunction<double, double> *f2;
};
......@@ -232,6 +233,9 @@ namespace AMDiS {
DOFVectorBase<double>* vec1;
/// Second DOFVector to be evaluated at quadrature points.
DOFVectorBase<double>* vec2;
/// Function for c.
BinaryAbstractFunction<double, double, double> *f;
/// Constant factor of zero order term.
double factor;
......@@ -240,9 +244,6 @@ namespace AMDiS {
mtl::dense_vector<double> vecAtQPs1;
/// Values of the second DOFVector at the quadrature points.
mtl::dense_vector<double> vecAtQPs2;
/// Function for c.
BinaryAbstractFunction<double, double, double> *f;
};
......@@ -281,15 +282,15 @@ namespace AMDiS {
protected:
/// DOFVectors to be evaluated at quadrature points.
DOFVectorBase<double> *vec1, *vec2, *vec3;
/// Function for c.
TertiaryAbstractFunction<double, double, double, double> *f;
/// Constant factor of zero order term.
double factor;
/// Vectors at quadrature points.
mtl::dense_vector<double> vecAtQPs1, vecAtQPs2, vecAtQPs3;
/// Function for c.
TertiaryAbstractFunction<double, double, double, double> *f;
};
......
......@@ -68,15 +68,17 @@ namespace AMDiS { namespace Parallel {
elInfo = stack.traverseNext(elInfo);
}
if (mesh->getDim() == 2)
if (mesh->getDim() == 2) {
TEST_EXIT(elCounter == 2 * mpiSize * mpiSize)
("The number of macro elements is %d, but must be %d for %d number of nodes!",
elCounter, 2 * mpiSize * mpiSize, mpiSize);
}
if (mesh->getDim() == 3)
if (mesh->getDim() == 3) {
TEST_EXIT(elCounter == 6 * static_cast<int>(pow(mpiSize, 1.5)))
("The number of macro elements is %d, but must be %d for %d number of nodes!",
elCounter, 6 * static_cast<int>(pow(mpiSize, 1.5)), mpiSize);
}
}
......@@ -225,8 +227,8 @@ namespace AMDiS { namespace Parallel {
while (elInfo) {
TEST_EXIT(elInfo->getLevel() == 0)("Should not happen!\n");
Element *el = elInfo->getElement();
int elIndex = el->getIndex();
// Element *el = elInfo->getElement();
// int elIndex = el->getIndex();
int zeroCoordCounter = 0;
for (int i = 0; i < mesh->getGeo(VERTEX); i++)
......@@ -252,7 +254,7 @@ namespace AMDiS { namespace Parallel {
mpiSize, stripes.size());
int testElementCounter = 0;
for (int stripe = 0; stripe < stripes.size(); stripe++) {
for (size_t stripe = 0; stripe < stripes.size(); stripe++) {
MacroElement *mel = stripes[stripe][0];
set<int> localDofs;
......@@ -321,8 +323,8 @@ namespace AMDiS { namespace Parallel {
TEST_EXIT(testElementCounter == nElements)("Should not happen!\n");
int elsPerStripe = stripes[0].size();
for (int i = 0; i < stripes.size(); i++) {
size_t elsPerStripe = stripes[0].size();
for (size_t i = 0; i < stripes.size(); i++) {
TEST_EXIT(stripes[i].size() == elsPerStripe)
("Should not happen!\n");
}
......@@ -330,18 +332,18 @@ namespace AMDiS { namespace Parallel {
// === Computing mapping from macro element indices to ranks ===
int nStripes = elInStripe.size();
int procPerStripe = mpiSize / nStripes;
int elsPerRank = elsPerStripe / procPerStripe;
size_t nStripes = elInStripe.size();
size_t procPerStripe = mpiSize / nStripes;
size_t elsPerRank = elsPerStripe / procPerStripe;
TEST_EXIT(mpiSize % nStripes == 0)("Should not happen!\n");
TEST_EXIT(elsPerStripe % procPerStripe == 0)("Should not happen!\n");
elStripeInRank.clear();
int rankCount = 0;
for (int i = 0; i < nStripes; i++) {
for (int j = 0; j < procPerStripe; j++) {
for (int k = 0; k < elsPerRank; k++)
for (size_t i = 0; i < nStripes; i++) {
for (size_t j = 0; j < procPerStripe; j++) {
for (size_t k = 0; k < elsPerRank; k++)
elStripeInRank[elInStripe[i][j * elsPerRank + k]] = rankCount;
rankCount++;
}
......
......@@ -105,14 +105,18 @@ namespace AMDiS { namespace Parallel {
DofContainerSet dofSet;
DofContainer dofVec;
for (DataIter rankIt = data.begin(); rankIt != data.end(); ++rankIt)
for (DataIter rankIt = data.begin(); rankIt != data.end(); ++rankIt) {
for (FeMapIter feIt = rankIt->second.begin();
feIt != rankIt->second.end(); ++feIt)
if (feIt->first == feSpace)
if (countDouble)
feIt != rankIt->second.end(); ++feIt) {
if (feIt->first == feSpace) {
if (countDouble) {
dofVec.insert(dofVec.end(), feIt->second.begin(), feIt->second.end());
else
} else {
dofSet.insert(feIt->second.begin(), feIt->second.end());
}
}
}
}
if (countDouble)
return static_cast<int>(dofVec.size());
......
......@@ -703,9 +703,11 @@ namespace AMDiS { namespace Parallel {
case FACE:
return getOwner(faceElements[faceIter->first], level);
break;
default:
ERROR_EXIT("There is something reallllly wrong!\n");
return -1;
}
ERROR_EXIT("There is something reallllly wrong!\n");
return -1;
}
......@@ -1134,7 +1136,7 @@ namespace AMDiS { namespace Parallel {
const unsigned int vectorOverhead = sizeof(vector<int>);
const unsigned int mapOverhead = 48; //sizeof(_Rb_tree<int, int>);
const unsigned int flatMapOverhead = 24;
const unsigned int mapEntryOverhead = 40; // sizeof(_Rb_tree_node_base);
// const unsigned int mapEntryOverhead = 40; // sizeof(_Rb_tree_node_base);
const unsigned int setOverhead = 48;
const unsigned int setEntryOverhead = 40;
......
......@@ -52,7 +52,7 @@ namespace AMDiS { namespace Parallel {
if (mpiComm == MPI::COMM_SELF)
return;
int levelMpiRank = mpiComm.Get_rank();
// int levelMpiRank = mpiComm.Get_rank();
int globalMpiRank = MPI::COMM_WORLD.Get_rank();
std::set<int> levelRanks = levelData.getLevelRanks(level);
......
......@@ -72,7 +72,7 @@ namespace AMDiS { namespace Parallel {
typedef traits::range_generator<nz, cursor_type>::type icursor_type;
typedef vector<pair<int, int> > MatrixNnzEntry;
typedef map<int, DofContainer> RankToDofContainer;
// typedef map<int, DofContainer> RankToDofContainer;
// Stores to each rank a list of nnz entries (i.e. pairs of row and column
// index) that this rank will send to. These nnz entries will be assembled
......@@ -229,7 +229,7 @@ namespace AMDiS { namespace Parallel {
vector<int> newCols;
perMap->mapDof(colFeSpace, colDofIndex.global, perColAsc, newCols);
for (int aa = 0; aa < newCols.size(); aa++) {
for (size_t aa = 0; aa < newCols.size(); aa++) {
int petscColIdx = colDofMap.getMatIndex(colComp, newCols[aa]);
// The row DOF is a rank DOF, if also the column is a rank DOF,
......
......@@ -445,7 +445,7 @@ namespace AMDiS { namespace Parallel {
map<int, int> arhElInRank;
map<int, int> arhElCodeSize;
int nProc = ArhReader::readMetaData(filename, arhElInRank, arhElCodeSize);
/*int nProc = */ArhReader::readMetaData(filename, arhElInRank, arhElCodeSize);
for (map<int, int>::iterator it = arhElCodeSize.begin();
it != arhElCodeSize.end(); ++it)
elemWeights[it->first] = it->second;
......@@ -1117,7 +1117,9 @@ namespace AMDiS { namespace Parallel {
bool meshChanged = false;
// === Check the boundaries and adapt mesh if necessary. ===
MSG_DBG("Run checkAndAdaptBoundary ...\n");
#if (DEBUG != 0)
MSG("Run checkAndAdaptBoundary ...\n");
#endif
// Check for periodic boundaries within rank's subdomain.
for (InteriorBoundary::iterator it(intBoundary[0].getPeriodic());
......@@ -1170,7 +1172,9 @@ namespace AMDiS { namespace Parallel {
if (repartitioningFailed > 0) {
MSG_DBG("Repartitioning not tried because it has failed in the past!\n");
#if (DEBUG != 0)
MSG("Repartitioning not tried because it has failed in the past!\n");
#endif
repartitioningFailed--;
} else if (tryRepartition &&
......
......@@ -315,8 +315,10 @@ namespace AMDiS { namespace Parallel {
// Create traverse stack and traverse within the mesh until the element,
// which should be fitted to the mesh structure code, is reached.
TraverseStack stack;
#if (DEBUG != 0)
ElInfo *elInfo =
stack.traverseFirstOneMacro(mesh, boundEl.elIndex, -1, traverseFlag);
#endif
TEST_EXIT_DBG(elInfo->getElement() == boundEl.el)
("This should not happen!\n");
......
......@@ -307,7 +307,7 @@ namespace AMDiS { namespace Parallel {
for (int i = 0; i < mpiSize; i++)
tpwgts[i] = 1.0 / static_cast<double>(nparts);
float scale = 10000.0 / maxWgt;
// float scale = 10000.0 / maxWgt;
for (int i = 0; i < nElements; i++)
wgts[i] = floatWgts[i];
// wgts[i] = static_cast<int>(floatWgts[i] * scale);
......
......@@ -29,11 +29,11 @@ namespace AMDiS { namespace Parallel {
ParallelCoarseSpaceSolver::ParallelCoarseSpaceSolver(string name)
: ParallelSolver(name, true),
initFileStr(name),
lastMeshNnz(-1),
alwaysCreateNnzStructure(false),
rStartInterior(0),
nGlobalOverallInterior(0)
nGlobalOverallInterior(0),
initFileStr(name)
{
Parameters::get("parallel->always create nnz structure",
alwaysCreateNnzStructure);
......
......@@ -45,7 +45,7 @@ namespace AMDiS { namespace Parallel {
MPI::Intracomm &mpiComm = pdb.levelData.getMpiComm(0);
int mpiRank = mpiComm.Get_rank();
MPI::Request request[pdb.intBoundary[0].own.size() +
MPI::Request *request = new MPI::Request[pdb.intBoundary[0].own.size() +
pdb.intBoundary[0].other.size() +
pdb.intBoundary[0].periodic.size() * 2];
int requestCounter = 0;
......@@ -152,6 +152,8 @@ namespace AMDiS { namespace Parallel {
delete [] recvBuffers[bufCounter++];
}
delete[] request;
}
......@@ -179,7 +181,6 @@ namespace AMDiS { namespace Parallel {
it != perMap.periodicDofAssociations[feSpace].end(); ++it) {
WorldVector<double> c;
pdb.mesh->getDofIndexCoords(it->first, pdb.feSpaces[0], c);
int nAssoc = it->second.size();
}
......@@ -366,12 +367,12 @@ namespace AMDiS { namespace Parallel {
int mpiSize = mpiComm.Get_size();
std::set<int> &ranks = pdb.levelData.getLevelRanks(level);
TEST_EXIT(mpiSize == ranks.size())
TEST_EXIT(mpiSize == static_cast<int>(ranks.size()))
("Wrong mpi sizes: Get_size() = %d ranks.size() = %d\n",
mpiSize, ranks.size());
/// Defines a mapping type from rank numbers to sets of DOFs.
typedef map<int, DofContainer> RankToDofContainer;
// typedef map<int, DofContainer> RankToDofContainer;
// Maps to each neighbour rank an array of WorldVectors. This array contains the
// coordinates of all DOFs this rank shares on the interior boundary with the
......@@ -400,7 +401,7 @@ namespace AMDiS { namespace Parallel {
map<int, int> sendSize;
map<int, int> recvSize;
map<int, int> recvSizeBuffer;
MPI::Request request[(mpiSize - 1) * 2];
MPI::Request *request = new MPI::Request[(mpiSize - 1) * 2];
int requestCounter = 0;
for (RankToCoords::iterator it = sendCoords.begin(); it != sendCoords.end(); ++it)
......@@ -441,6 +442,8 @@ namespace AMDiS { namespace Parallel {
mpi::globalAdd(foundError);
TEST_EXIT(foundError == 0)("Error found on at least on rank!\n");
delete[] request;
// === Now we know that the number of send and received DOFs fits together. ===
// === So we can check if also the coordinates of the communicated DOFs are ===
// === the same on both corresponding ranks. ===
......@@ -514,8 +517,8 @@ namespace AMDiS { namespace Parallel {
if (mpiComm == MPI::COMM_SELF)
continue;
int mpiRank = mpiComm.Get_rank();
int mpiSize = mpiComm.Get_size();
// int mpiRank = mpiComm.Get_rank();
// int mpiSize = mpiComm.Get_size();
typedef map<int, WorldVector<double> > CoordsIndexMap;
CoordsIndexMap coordsToIndex;
......
......@@ -108,8 +108,6 @@ namespace AMDiS { namespace Parallel {
TEST_EXIT_DBG(dofComm)("No DOF communicator defined!\n");
typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
// === Send all global indices of DOFs that are owned by the rank to all ===
// === other ranks that also include this DOF. ===
......
......@@ -177,7 +177,9 @@ namespace AMDiS
int firstRow = firstRow0 + firstRow1;
int mpiSize = MPI::COMM_WORLD.Get_size();
#if (DEBUG != 0)
int mpiRank = MPI::COMM_WORLD.Get_rank();
#endif
vector<int> allFirstRow0(mpiSize + 1, 0);
vector<int> allFirstRow1(mpiSize + 1, 0);
MPI::COMM_WORLD.Allgather(&nRankRows0, 1, MPI_INT, &(allFirstRow0[1]), 1, MPI_INT);
......
......@@ -78,6 +78,7 @@ namespace AMDiS { namespace Parallel {
VecDestroy(&y1);
VecDestroy(&y2);
PetscFunctionReturn(0);
}
......
......@@ -43,6 +43,8 @@ namespace AMDiS { namespace Parallel {
KSPSolve(data->kspMplusK, b, y1);
MatMult(data->matMass, y1, y2);
KSPSolve(data->kspMplusK, y2, x);
PetscFunctionReturn(0);
}
/// solve Cahn-Hilliard Preconditioner
......@@ -79,6 +81,8 @@ namespace AMDiS { namespace Parallel {
MatDestroy(&S);
MatDestroy(&K);
KSPDestroy(&kspS);
PetscFunctionReturn(0);
}
......@@ -139,8 +143,6 @@ namespace AMDiS { namespace Parallel {
MPI::COMM_WORLD.Barrier();
double wtime = MPI::Wtime();
int dim = componentSpaces[0]->getMesh()->getDim();
vector<int> chPotentialComponent;
chPotentialComponent.push_back(0);
vector<int> chSchurComponent;
......
......@@ -49,15 +49,15 @@ namespace AMDiS { namespace Parallel {
schurPrimalSolver(0),
levelMode(1),
subDomainIsLocal(true),
subdomain(NULL),
massMatrixSolver(NULL),
printTimings(false),
augmentedLagrange(false),
nRankEdges(0),
nOverallEdges(0),
dirichletMode(0),
stokesMode(false),
pressureComponent(-1),
subdomain(NULL),
massMatrixSolver(NULL)
pressureComponent(-1)
{
FUNCNAME("PetscSolverFeti::PetscSolverFeti()");
......@@ -147,7 +147,9 @@ namespace AMDiS { namespace Parallel {
{
FUNCNAME("PetscSolverFeti::initialize()");
MSG_DBG("Init FETI-DP on mesh level %d\n", meshLevel);
#if (DEBUG != 0)
MSG("Init FETI-DP on mesh level %d\n", meshLevel);
#endif
TEST_EXIT_DBG(meshLevel + 2 <=
meshDistributor->getMeshLevelData().getNumberOfLevels())
......@@ -301,7 +303,7 @@ namespace AMDiS { namespace Parallel {
}
for (unsigned int i = 0; i < componentSpaces.size(); i++) {
for (int i = 0; i < static_cast<int>(componentSpaces.size()); i++) {
const FiniteElemSpace *feSpace = componentSpaces[i];
MSG("FETI-DP data for %d-ith component (FE space %p):\n", i, feSpace);
......@@ -572,14 +574,16 @@ namespace AMDiS { namespace Parallel {
for (DofComm::Iterator it(meshDistributor->getDofComm(meshLevel).getRecvDofs(), feSpace);
!it.end(); it.nextRank()) {
int i = 0;
for (; !it.endDofIter(); it.nextDof())
if (!isPrimal(component, it.getDofIndex()))
if (subDomainIsLocal || dofMapSubDomain[feSpace].isRankDof(it.getDofIndex()))
for (; !it.endDofIter(); it.nextDof()) {
if (!isPrimal(component, it.getDofIndex())) {
if (subDomainIsLocal || dofMapSubDomain[feSpace].isRankDof(it.getDofIndex())) {
boundaryDofRanks[feSpace][it.getDofIndex()] =
stdMpi.getRecvData(it.getRank())[i++];
else {
} else {
lagrangeMap[component].insertNonRankDof(it.getDofIndex());
}
}
}
}
......@@ -957,7 +961,7 @@ namespace AMDiS { namespace Parallel {
int rowCounter = rStartEdges;
for (vector<vector<BoundaryObject> >::iterator it = allEdges.begin();
it != allEdges.end(); ++it) {
for (int component = 0; component < componentSpaces.size(); component++) {
for (int component = 0; component < static_cast<int>(componentSpaces.size()); component++) {
for (vector<BoundaryObject>::iterator edgeIt = it->begin();
edgeIt != it->end(); ++edgeIt) {
......@@ -1488,7 +1492,7 @@ namespace AMDiS { namespace Parallel {
MatGetVecs(mat_duals_duals, PETSC_NULL,
&(lumpedData->tmp_vec_duals1));
for (unsigned int component = 0; component < componentSpaces.size();
for (int component = 0; component < static_cast<int>(componentSpaces.size());
component++) {
if (stokesMode && component == pressureComponent)
continue;
......@@ -1967,7 +1971,7 @@ namespace AMDiS { namespace Parallel {
// === Create scatter to get solutions of all primal nodes that are ===
// === contained in rank's domain. ===
int nComponents = vec.getSize();
unsigned int nComponents = vec.getSize();
vector<PetscInt> globalIsIndex, localIsIndex;
globalIsIndex.reserve(primalDofMap.getLocalDofs());
......@@ -2020,7 +2024,7 @@ namespace AMDiS { namespace Parallel {
// === And copy from PETSc local vectors to the DOF vectors. ===
int cnt = 0;
for (int component = 0; component < nComponents; component++) {
for (unsigned int component = 0; component < nComponents; component++) {
DOFVector<double>& dofVec = *(vec.getDOFVector(component));
for (DofMap::iterator it = localDofMap[component].getMap().begin();
......@@ -2277,8 +2281,8 @@ namespace AMDiS { namespace Parallel {
colComponent == pressureComponent))
continue;
const FiniteElemSpace *rowFeSpace = dofMat->getRowFeSpace();
const FiniteElemSpace *colFeSpace = dofMat->getColFeSpace();
// const FiniteElemSpace *rowFeSpace = dofMat->getRowFeSpace();
// const FiniteElemSpace *colFeSpace = dofMat->getColFeSpace();
traits::col<Matrix>::type col(dofMat->getBaseMatrix());
traits::const_value<Matrix>::type value(dofMat->getBaseMatrix());
......
......@@ -227,7 +227,7 @@ namespace AMDiS { namespace Parallel {
nestMat[14] = PETSC_NULL;
nestMat[15] = PETSC_NULL;
Mat nestFetiMat;
// Mat nestFetiMat;
MatCreateNest(feti.domainComm, 4, PETSC_NULL, 4, PETSC_NULL,
&(nestMat[0]), &mat);
} else {
......@@ -244,7 +244,7 @@ namespace AMDiS { namespace Parallel {
nestMat[7] = PETSC_NULL;
nestMat[8] = PETSC_NULL;
Mat nestFetiMat;
// Mat nestFetiMat;
MatCreateNest(feti.domainComm, 3, PETSC_NULL, 3, PETSC_NULL,
&(nestMat[0]), &mat);
}
......
......@@ -387,6 +387,8 @@ namespace AMDiS { namespace Parallel {
VecDestroy(&tmpPrimal);
VecDestroy(&tmpInterior0);
VecDestroy(&tmpInterior1);
PetscFunctionReturn(0);
}
......
......@@ -204,7 +204,7 @@ namespace AMDiS { namespace Parallel {
Vec tmp;
VecNestGetSubVec(petscSolVec, i, &tmp);
int nRankDofs = (*interiorMap)[feSpace].nRankDofs;
// int nRankDofs = (*interiorMap)[feSpace].nRankDofs;
PetscScalar *vecPointer;
VecGetArray(tmp, &vecPointer);
......
......@@ -92,7 +92,9 @@ namespace AMDiS { namespace Parallel {
TEST_EXIT_DBG(interiorMap)("No parallel mapping object defined!\n");
TEST_EXIT_DBG(seqMat)("No DOF matrix defined!\n");
#if (DEBUG != 0)
double wtime = MPI::Wtime();