Liebe Gitlab-Nutzer, lieber Gitlab-Nutzer,
es ist nun möglich sich mittels des ZIH-Logins/LDAP an unserem Dienst anzumelden. Die Konten der externen Nutzer:innen sind über den Reiter "Standard" erreichbar.
Die Administratoren


Dear Gitlab user,
it is now possible to log in to our service using the ZIH login/LDAP. The accounts of external users can be accessed via the "Standard" tab.
The administrators

Commit 2497a645 authored by Siqi Ling's avatar Siqi Ling
Browse files

add function writeMetaData into Arh2Writer in parallel mode

parent 31edf520
......@@ -163,6 +163,7 @@ SET(AMDIS_SRC ${SOURCE_DIR}/AdaptBase.cc
${SOURCE_DIR}/io/ArhReader.cc
${SOURCE_DIR}/io/detail/ArhReader.cc
${SOURCE_DIR}/io/Arh2Reader.cc
${SOURCE_DIR}/io/Arh2Writer.cc
${SOURCE_DIR}/io/detail/Arh2Reader.cc
${SOURCE_DIR}/io/detail/ArhWriter.cc
${SOURCE_DIR}/io/detail/Arh2Writer.cc
......
/******************************************************************************
*
* AMDiS - Adaptive multidimensional simulations
*
* Copyright (C) 2013 Dresden University of Technology. All Rights Reserved.
* Web: https://fusionforge.zih.tu-dresden.de/projects/amdis
*
* Authors:
* Simon Vey, Thomas Witkowski, Andreas Naumann, Simon Praetorius, et al.
*
* This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
* WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
*
* This file is part of AMDiS
*
* See also license.opensource.txt in the distribution.
*
******************************************************************************/
#include "Arh2Writer.h"
#include "Mesh.h"
#include "MeshStructure.h"
#include "parallel/StdMpi.h"
namespace AMDiS { namespace io {
using namespace std;
using namespace AMDiS::Parallel;
namespace Arh2Writer
{
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
void writeMetaData(Mesh *mesh, string metaFilename)
{
FUNCNAME("Arh2Writer::writeMetaData()");
int mpiSize = MPI::COMM_WORLD.Get_size();
vector<std::set<pair<int, int> > > overallData;
std::set<pair<int, int> > data;
// Calculate local data
MeshStructure elementStructure;
int macroElIndex = -1;
TraverseStack stack;
ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER);
while (elInfo) {
if (elInfo->getLevel() == 0) {
if (macroElIndex != -1) {
elementStructure.commit();
data.insert(make_pair(macroElIndex, elementStructure.getNumElements()));
}
elementStructure.clear();
macroElIndex = elInfo->getElement()->getIndex();
}
elementStructure.insertElement(elInfo->getElement()->isLeaf());
elInfo = stack.traverseNext(elInfo);
}
TEST_EXIT_DBG(macroElIndex != -1)("Should not happen!\n");
elementStructure.commit();
data.insert(make_pair(macroElIndex, elementStructure.getNumElements()));
// Collect data from other processors
StdMpi<std::set<pair<int, int> > > stdMpi(MPI::COMM_WORLD);
if(MPI::COMM_WORLD.Get_rank() == 0) {
for(int rank = 1; rank < mpiSize; rank++)
stdMpi.recv(rank);
} else {
stdMpi.send(0, data);
}
stdMpi.startCommunication();
if(MPI::COMM_WORLD.Get_rank() == 0) {
overallData.push_back(data);
for(int rank = 1; rank < mpiSize; rank++) {
std::set<pair<int, int> >& recvData = stdMpi.getRecvData(rank);
overallData.push_back(recvData);
}
// Write to meta file
ofstream file;
file.open(metaFilename.c_str());
file << "METAARH\n";
file << "" << "\n";
file << mpiSize << "\n";
for (int i = 0; i < mpiSize; i++) {
file << i << " " << overallData[i].size() << "\n";
for (std::set<pair<int, int> >::iterator it = overallData[i].begin(); it != overallData[i].end(); ++it)
file << it->first << " " << it->second << "\n";
}
file.close();
}
}
#endif
}
} }
\ No newline at end of file
......@@ -97,7 +97,11 @@ namespace AMDiS { namespace io {
std::vector<DOFVector<double>*> vecs;
detail::write(filename, mesh, vecs, writeParallel);
}
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
void writeMetaData(Mesh *mesh, std::string filename);
#endif
} // end namespace Arh2Writer
} } // end namespace io, AMDiS
......
......@@ -29,6 +29,7 @@ namespace AMDiS { namespace Parallel {
MPI_Datatype StdMpiHelper<vector<int> >::mpiDataType = MPI_INT;
MPI_Datatype StdMpiHelper<std::set<int> >::mpiDataType = MPI_INT;
MPI_Datatype StdMpiHelper<vector<std::set<int> > >::mpiDataType = MPI_INT;
MPI_Datatype StdMpiHelper<std::set<std::pair<int, int> > >::mpiDataType = MPI_INT;
MPI_Datatype StdMpiHelper<vector<double> >::mpiDataType = MPI_DOUBLE;
MPI_Datatype StdMpiHelper<vector<vector<double> > >::mpiDataType = MPI_DOUBLE;
MPI_Datatype StdMpiHelper<vector<MeshStructure> >::mpiDataType = MPI_UNSIGNED_LONG_LONG;
......@@ -157,7 +158,35 @@ namespace AMDiS { namespace Parallel {
counter, bufSize);
}
// T = std::set<std::pair<int, int> > >
int StdMpiHelper<std::set<std::pair<int, int> > >::getBufferSize(std::set<std::pair<int, int> > &data)
{
return data.size() * 2;
}
void StdMpiHelper<std::set<std::pair<int, int> > >::createBuffer(std::set<std::pair<int, int> > &data,
int *buf)
{
int i = 0;
for(std::set<std::pair<int, int> >::iterator it = data.begin(); it != data.end(); ++it) {
buf[i++] = it->first;
buf[i++] = it->second;
}
}
void StdMpiHelper<std::set<std::pair<int, int> > >::makeFromBuffer(std::set<std::pair<int, int> > &data,
int *buf,
int bufSize)
{
data.clear();
for (int i = 0; i < bufSize; i += 2)
data.insert(std::make_pair(buf[i], buf[i + 1]));
}
// T = vector<double>
......
......@@ -107,6 +107,20 @@ namespace AMDiS { namespace Parallel {
static void makeFromBuffer(std::vector<std::set<int> > &data,
int *buf, int bufSize);
};
template<>
struct StdMpiHelper<std::set<std::pair<int, int> > > {
static MPI_Datatype mpiDataType;
typedef int cppDataType;
static int getBufferSize(std::set<std::pair<int, int> > &data);
static void createBuffer(std::set<std::pair<int, int> > &data, int *buf);
static void makeFromBuffer(std::set<std::pair<int, int> > &data,
int *buf, int bufSize);
};
template<>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment