Commit 92775cf7 authored by Naumann, Andreas's avatar Naumann, Andreas
Browse files

merge from linearAlg-branch. my parallelization needed a slightly more general...

merge from linearAlg-branch. my parallelization needed a slightly more general linear solver interface and uses the pvd-naming scheme from the petsc-parallelization
parent 2daa8f2e
...@@ -52,10 +52,10 @@ namespace AMDiS ...@@ -52,10 +52,10 @@ namespace AMDiS
{ {
template<> template<>
FileWriter<double>::FileWriter(std::string name_, FileWriter<double>::FileWriter(std::string name_,
Mesh *mesh_, Mesh *mesh_,
SystemVector *vecs) SystemVector *vecs)
: name(name_), : name(name_),
mesh(mesh_) mesh(mesh_)
{ {
initialize(); initialize();
...@@ -63,76 +63,76 @@ namespace AMDiS ...@@ -63,76 +63,76 @@ namespace AMDiS
* Removed by Siqi. not sure. * Removed by Siqi. not sure.
* for (int i = 0; i < static_cast<int>(vecs->getSize()); i++) * for (int i = 0; i < static_cast<int>(vecs->getSize()); i++)
* TEST_EXIT(vecs->getDOFVector(0)->getFeSpace() == vecs->getDOFVector(i)->getFeSpace()) * TEST_EXIT(vecs->getDOFVector(0)->getFeSpace() == vecs->getDOFVector(i)->getFeSpace())
* ("All FeSpace have to be equal!\n"); * ("All FeSpace have to be equal!\n");
*/ */
feSpace = vecs->getDOFVector(0)->getFeSpace(); feSpace = vecs->getDOFVector(0)->getFeSpace();
solutionVecs.resize(vecs->getSize()); solutionVecs.resize(vecs->getSize());
for (int i = 0; i < static_cast<int>(vecs->getSize()); i++) for (int i = 0; i < static_cast<int>(vecs->getSize()); i++)
solutionVecs[i] = vecs->getDOFVector(i); solutionVecs[i] = vecs->getDOFVector(i);
for (size_t i = 0; i < solutionVecs.size(); i++) for (size_t i = 0; i < solutionVecs.size(); i++)
solutionNames.push_back(solutionVecs[i]->getName()); solutionNames.push_back(solutionVecs[i]->getName());
} }
template<> template<>
void FileWriter<double>::writeFiles(AdaptInfo *adaptInfo, void FileWriter<double>::writeFiles(AdaptInfo *adaptInfo,
bool force, bool force,
int level, int level,
Flag flag, Flag flag,
bool (*writeElem)(ElInfo*)) bool (*writeElem)(ElInfo*))
{ {
FUNCNAME("FileWriter<T>::writeFiles()"); FUNCNAME("FileWriter<T>::writeFiles()");
using namespace ::AMDiS::io; using namespace ::AMDiS::io;
if (timeModulo > 0.0) { if (timeModulo > 0.0) {
if ((lastWriteTime != 0.0 && adaptInfo->getTime() < lastWriteTime + timeModulo) && !force) if ((lastWriteTime != 0.0 && adaptInfo->getTime() < lastWriteTime + timeModulo) && !force)
return; return;
} else { } else {
if ((adaptInfo->getTimestepNumber() % tsModulo != 0) && !force) if ((adaptInfo->getTimestepNumber() % tsModulo != 0) && !force)
return; return;
} }
lastWriteTime = adaptInfo->getTime(); lastWriteTime = adaptInfo->getTime();
//-----------------by Siqi---------------------// //-----------------by Siqi---------------------//
if (writeAMDiSFormat || writePeriodicFormat || writeParaViewFormat if (writeAMDiSFormat || writePeriodicFormat || writeParaViewFormat
|| writeParaViewVectorFormat || writeParaViewAnimation || writeParaViewVectorFormat || writeParaViewAnimation
|| writeDofFormat || (writeArhFormat && !writeArh2Format) || writePovrayFormat) || writeDofFormat || (writeArhFormat && !writeArh2Format) || writePovrayFormat)
{ {
for (int i = 0; i < static_cast<int>(solutionVecs.size()); i++) for (int i = 0; i < static_cast<int>(solutionVecs.size()); i++)
TEST_EXIT(solutionVecs[0]->getFeSpace() == solutionVecs[i]->getFeSpace()) TEST_EXIT(solutionVecs[0]->getFeSpace() == solutionVecs[i]->getFeSpace())
("All FeSpaces have to be equal!\n"); ("All FeSpaces have to be equal!\n");
} }
// Containers, which store the data to be written; // Containers, which store the data to be written;
std::vector<DataCollector<>*> dataCollectors(solutionVecs.size()); std::vector<DataCollector<>*> dataCollectors(solutionVecs.size());
if (writeElem) { if (writeElem) {
for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++) for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++)
dataCollectors[i] = new DataCollector<>(feSpace, solutionVecs[i], dataCollectors[i] = new DataCollector<>(feSpace, solutionVecs[i],
level, flag, writeElem); level, flag, writeElem);
} else { } else {
for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++) for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++)
dataCollectors[i] = new DataCollector<>(feSpace, solutionVecs[i], dataCollectors[i] = new DataCollector<>(feSpace, solutionVecs[i],
traverseLevel, traverseLevel,
flag | traverseFlag, flag | traverseFlag,
writeElement); writeElement);
} }
std::string fn = filename; std::string fn = filename;
if (createParaViewSubDir) { if (createParaViewSubDir) {
using namespace boost::filesystem; using namespace boost::filesystem;
path vtu_path = fn; path vtu_path = fn;
path data_basedir("data"); path data_basedir("data");
path vtu_filename = vtu_path.filename(); path vtu_filename = vtu_path.filename();
vtu_path.remove_filename() /= data_basedir; vtu_path.remove_filename() /= data_basedir;
try { try {
create_directory(vtu_path); create_directory(vtu_path);
vtu_path /= vtu_filename; vtu_path /= vtu_filename;
fn = vtu_path.string(); fn = vtu_path.string();
} catch (...) {} } catch (...) {}
} }
#if HAVE_PARALLEL_DOMAIN_AMDIS #if HAVE_PARALLEL_DOMAIN_AMDIS
...@@ -142,25 +142,26 @@ namespace AMDiS ...@@ -142,25 +142,26 @@ namespace AMDiS
#endif #endif
///TODO: use the getParaViewFilename.. consistent with the parallel domain??
if (appendIndex) { if (appendIndex) {
TEST_EXIT(indexLength <= 99)("index lenght > 99\n"); TEST_EXIT(indexLength <= 99)("index lenght > 99\n");
TEST_EXIT(indexDecimals <= 97)("index decimals > 97\n"); TEST_EXIT(indexDecimals <= 97)("index decimals > 97\n");
TEST_EXIT(indexDecimals < indexLength)("index length <= index decimals\n"); TEST_EXIT(indexDecimals < indexLength)("index length <= index decimals\n");
char formatStr[9]; char formatStr[9];
char timeStr[20]; char timeStr[20];
sprintf(formatStr, "%%0%d.%df", indexLength, indexDecimals); sprintf(formatStr, "%%0%d.%df", indexLength, indexDecimals);
sprintf(timeStr, formatStr, adaptInfo ? adaptInfo->getTime() : 0.0); sprintf(timeStr, formatStr, adaptInfo ? adaptInfo->getTime() : 0.0);
fn += timeStr; fn += timeStr;
#if HAVE_PARALLEL_DOMAIN_AMDIS #if HAVE_PARALLEL_DOMAIN_AMDIS
paraFilename += timeStr; paraFilename += timeStr;
postfix += timeStr + paraviewFileExt; postfix += timeStr + paraviewFileExt;
#endif #endif
} else { } else {
#if HAVE_PARALLEL_DOMAIN_AMDIS #if HAVE_PARALLEL_DOMAIN_AMDIS
postfix += paraviewFileExt; postfix += paraviewFileExt;
#endif #endif
} }
...@@ -172,102 +173,121 @@ namespace AMDiS ...@@ -172,102 +173,121 @@ namespace AMDiS
#endif #endif
if (writeAMDiSFormat) { if (writeAMDiSFormat) {
MacroWriter::writeMacro(dataCollectors[0], MacroWriter::writeMacro(dataCollectors[0],
const_cast<char*>((fn + amdisMeshExt).c_str()), const_cast<char*>((fn + amdisMeshExt).c_str()),
adaptInfo ? adaptInfo->getTime() : 0.0); adaptInfo ? adaptInfo->getTime() : 0.0);
MSG("macro file written to %s\n", (fn + amdisMeshExt).c_str()); MSG("macro file written to %s\n", (fn + amdisMeshExt).c_str());
ValueWriter::writeValues(dataCollectors[0], ValueWriter::writeValues(dataCollectors[0],
(fn + amdisDataExt).c_str(), (fn + amdisDataExt).c_str(),
adaptInfo ? adaptInfo->getTime() : 0.0); adaptInfo ? adaptInfo->getTime() : 0.0);
MSG("value file written to %s\n", (fn + amdisDataExt).c_str()); MSG("value file written to %s\n", (fn + amdisDataExt).c_str());
} }
if (writePeriodicFormat) { if (writePeriodicFormat) {
MacroWriter::writePeriodicFile(dataCollectors[0], MacroWriter::writePeriodicFile(dataCollectors[0],
(fn + periodicFileExt).c_str()); (fn + periodicFileExt).c_str());
MSG("periodic file written to %s\n", (fn + periodicFileExt).c_str()); MSG("periodic file written to %s\n", (fn + periodicFileExt).c_str());
} }
if (writeParaViewFormat) { if (writeParaViewFormat) {
std::string vtu_file = fn + paraviewFileExt; std::string vtu_file = fn + paraviewFileExt;
VtkWriter::Aux vtkWriter(&dataCollectors, VtkWriter::Aux vtkWriter(&dataCollectors,
solutionNames, solutionNames,
VtkWriter::Vtuformat(paraViewMode), (paraViewPrecision == 1), writeParaViewVectorFormat); VtkWriter::Vtuformat(paraViewMode), (paraViewPrecision == 1), writeParaViewVectorFormat);
vtkWriter.writeFile(vtu_file); vtkWriter.writeFile(vtu_file);
#if HAVE_PARALLEL_DOMAIN_AMDIS #if HAVE_PARALLEL_DOMAIN_AMDIS
if (MPI::COMM_WORLD.Get_rank() == 0) { if (MPI::COMM_WORLD.Get_rank() == 0) {
// vector<string> componentNames; // vector<string> componentNames;
// for (unsigned int i = 0; i < dataCollectors.size(); i++) // for (unsigned int i = 0; i < dataCollectors.size(); i++)
// componentNames.push_back(dataCollectors[i]->getValues()->getName()); // componentNames.push_back(dataCollectors[i]->getValues()->getName());
VtkWriter::detail::writeParallelFile(paraFilename + paraviewParallelFileExt, VtkWriter::detail::writeParallelFile(paraFilename + paraviewParallelFileExt,
MPI::COMM_WORLD.Get_size(), MPI::COMM_WORLD.Get_size(),
filename, filename,
postfix, postfix,
solutionNames, solutionNames,
VtkWriter::Vtuformat(paraViewMode), VtkWriter::Vtuformat(paraViewMode),
(paraViewPrecision == 1), (paraViewPrecision == 1),
writeParaViewVectorFormat); writeParaViewVectorFormat);
} }
#endif #endif
MSG("ParaView file written to %s\n", (fn + paraviewFileExt).c_str()); MSG("ParaView file written to %s\n", (fn + paraviewFileExt).c_str());
} }
// write vtu-vector files // write vtu-vector files
if (writeParaViewVectorFormat && !writeParaViewFormat) { if (writeParaViewVectorFormat && !writeParaViewFormat) {
VtkVectorWriter::writeFile(solutionVecs, fn_ + paraviewFileExt, true, writeAs3dVector); VtkVectorWriter::writeFile(solutionVecs, fn_ + paraviewFileExt, true, writeAs3dVector);
MSG("ParaView file written to %s\n", (fn_ + paraviewFileExt).c_str()); MSG("ParaView file written to %s\n", (fn_ + paraviewFileExt).c_str());
} }
if (writeParaViewAnimation) { if (writeParaViewAnimation) {
std::string pvd_file = fn_ + paraviewFileExt; std::string pvd_file = fn_ + paraviewFileExt;
#if HAVE_PARALLEL_DOMAIN_AMDIS #if HAVE_PARALLEL_DOMAIN_AMDIS
pvd_file = fn_ + paraviewParallelFileExt; pvd_file = fn_ + paraviewParallelFileExt;
if (MPI::COMM_WORLD.Get_rank() == 0) if (MPI::COMM_WORLD.Get_rank() == 0)
#endif #endif
{ {
VtkWriter::detail::updateAnimationFile(adaptInfo, VtkWriter::detail::updateAnimationFile(adaptInfo,
pvd_file, pvd_file,
&paraviewAnimationFrames, &paraviewAnimationFrames,
filename + ".pvd"); filename + ".pvd");
} }
} }
if (writeDofFormat) { if (writeDofFormat) {
DofWriter::writeFile(solutionVecs, fn + ".dof"); DofWriter::writeFile(solutionVecs, fn + ".dof");
} }
// write Arh files // write Arh files
if (!writeArh2Format && writeArhFormat) if (!writeArh2Format && writeArhFormat)
ArhWriter::write(fn_ + ".arh", feSpace->getMesh(), solutionVecs); ArhWriter::write(fn_ + ".arh", feSpace->getMesh(), solutionVecs);
else if (writeArh2Format) else if (writeArh2Format)
Arh2Writer::writeFile(solutionVecs, fn_ + ".arh"); Arh2Writer::writeFile(solutionVecs, fn_ + ".arh");
#ifdef HAVE_PNG #ifdef HAVE_PNG
if (writePngFormat) { if (writePngFormat) {
PngWriter pngWriter(dataCollectors[0]); PngWriter pngWriter(dataCollectors[0]);
pngWriter.writeFile(fn + ".png", pngType); pngWriter.writeFile(fn + ".png", pngType);
MSG("PNG image file written to %s\n", (fn + ".png").c_str()); MSG("PNG image file written to %s\n", (fn + ".png").c_str());
} }
#endif #endif
if (writePovrayFormat) { if (writePovrayFormat) {
PovrayWriter povrayWriter(dataCollectors[0]); PovrayWriter povrayWriter(dataCollectors[0]);
povrayWriter.writeFile(fn + ".pov"); povrayWriter.writeFile(fn + ".pov");
MSG("Povray script written to %s\n", (fn + ".pov").c_str()); MSG("Povray script written to %s\n", (fn + ".pov").c_str());
} }
for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++) for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++)
delete dataCollectors[i]; delete dataCollectors[i];
} }
template<>
string FileWriter<double>::getParaViewFilename(AdaptInfo* adaptInfo) const
{
string ret(filename);
if (appendIndex) {
TEST_EXIT(indexLength <= 99)("index lenght > 99\n");
TEST_EXIT(indexDecimals <= 97)("index decimals > 97\n");
TEST_EXIT(indexDecimals < indexLength)("index length <= index decimals\n");
char formatStr[9];
char timeStr[20];
sprintf(formatStr, "%%0%d.%df", indexLength, indexDecimals);
sprintf(timeStr, formatStr, adaptInfo ? adaptInfo->getTime() : 0.0);
ret += timeStr;
}
return ret;
}
} // end namespace detail } // end namespace detail
} // end namespace AMDiS } // end namespace AMDiS
...@@ -82,6 +82,11 @@ namespace AMDiS { ...@@ -82,6 +82,11 @@ namespace AMDiS {
return paraviewAnimationFrames; return paraviewAnimationFrames;
} }
bool getWriteParaViewFormat() const { return writeParaViewFormat; }
std::string getParaViewFilename(AdaptInfo* info) const ;
const std::vector< std::string >& getSolutionNames() const
{ return solutionNames; }
protected: protected:
/// Initialization of the filewriter. /// Initialization of the filewriter.
void initialize(); void initialize();
......
...@@ -200,6 +200,8 @@ namespace AMDiS { namespace io { ...@@ -200,6 +200,8 @@ namespace AMDiS { namespace io {
} }
void writeParallelFile(string name, int nRanks, void writeParallelFile(string name, int nRanks,
string fnPrefix, string fnPostfix, string fnPrefix, string fnPostfix,
vector<string> &componentNames, vector<string> &componentNames,
...@@ -207,6 +209,22 @@ namespace AMDiS { namespace io { ...@@ -207,6 +209,22 @@ namespace AMDiS { namespace io {
bool highPrecision, bool highPrecision,
bool writeAsVector bool writeAsVector
) )
{
using boost::lexical_cast;
vector< string > fileNames(nRanks);
for (int i = 0; i < nRanks; i++) {
fileNames[i] = fnPrefix + "-p" + lexical_cast<string>(i) + "-" + fnPostfix;
}
writeParallelFile(name, nRanks, fileNames, componentNames, format, highPrecision, writeAsVector);
}
void writeParallelFile(string name, int nRanks,
vector<string>& subNames,
const vector<string> &componentNames,
::AMDiS::io::VtkWriter::Vtuformat format,
bool highPrecision,
bool writeAsVector
)
{ {
FUNCNAME("writeParallelFile()"); FUNCNAME("writeParallelFile()");
...@@ -277,8 +295,7 @@ namespace AMDiS { namespace io { ...@@ -277,8 +295,7 @@ namespace AMDiS { namespace io {
file << " </PPointData>\n"; file << " </PPointData>\n";
for (int i = 0; i < nRanks; i++) { for (int i = 0; i < nRanks; i++) {
string pname(fnPrefix + "-p" + lexical_cast<string>(i) + "-" + fnPostfix); boost::filesystem::path filepath(subNames[i]);
boost::filesystem::path filepath(pname);
file << " <Piece Source=\"" file << " <Piece Source=\""
<< boost::filesystem::basename(filepath) << boost::filesystem::basename(filepath)
<< boost::filesystem::extension(filepath) << "\"/>\n"; << boost::filesystem::extension(filepath) << "\"/>\n";
......
...@@ -292,7 +292,16 @@ namespace AMDiS { namespace io { ...@@ -292,7 +292,16 @@ namespace AMDiS { namespace io {
/// Writes a pvtu file, which contains the links to all the rank files. /// Writes a pvtu file, which contains the links to all the rank files.
void writeParallelFile(std::string name, int nRanks, void writeParallelFile(std::string name, int nRanks,
std::string fnPrefix, std::string fnPostfix, std::string fnPrefix, std::string fnPostfix,
std::vector<std::string> &componentNames, const std::vector<std::string> &componentNames,
::AMDiS::io::VtkWriter::Vtuformat format = ::AMDiS::io::VtkWriter::ASCII,
bool highPrecision = false,
bool writeAsVector = false
);
/// Writes a pvtu file which contains the links to the rank files in @subNames
void writeParallelFile(std::string name, int nRanks,
std::vector<std::string>& subNames,
const std::vector<std::string> &componentNames,
::AMDiS::io::VtkWriter::Vtuformat format = ::AMDiS::io::VtkWriter::ASCII, ::AMDiS::io::VtkWriter::Vtuformat format = ::AMDiS::io::VtkWriter::ASCII,
bool highPrecision = false, bool highPrecision = false,
bool writeAsVector = false bool writeAsVector = false
......
...@@ -50,10 +50,10 @@ namespace AMDiS { ...@@ -50,10 +50,10 @@ namespace AMDiS {
/// Wrapper for template-argument dependent constructors /// Wrapper for template-argument dependent constructors
template < typename MatrixType, typename Enable = void > template < typename MatrixType, typename Mapper_ = BlockMapper, typename Enable = void >
struct LinearSolverBase : public LinearSolverInterface struct LinearSolverBase : public LinearSolverInterface
{ {
typedef BlockMapper Mapper; typedef Mapper_ Mapper;
LinearSolverBase(std::string name) LinearSolverBase(std::string name)
: LinearSolverInterface(name) {} : LinearSolverInterface(name) {}
...@@ -63,25 +63,26 @@ namespace AMDiS { ...@@ -63,25 +63,26 @@ namespace AMDiS {
return matrix; return matrix;
} }
protected:
/// create a sequential BlockMapper /// create a sequential BlockMapper
void initMapper(const SolverMatrix<Matrix<DOFMatrix*> >& A) virtual void initMapper(const SolverMatrix<Matrix<DOFMatrix*> >& A)
{ {
mapper = new BlockMapper(A); mapper = new Mapper(A);
} }
void exitMapper() virtual void exitMapper()
{ {
delete mapper; delete mapper;
} }
protected:
MatrixType matrix; MatrixType matrix;
Mapper* mapper; Mapper* mapper;
}; };
#ifdef HAVE_PARALLEL_MTL4 #ifdef HAVE_PARALLEL_MTL4
template< typename MatrixType > template< typename MatrixType >
struct LinearSolverBase<MatrixType, typename boost::enable_if< mtl::traits::is_distributed<MatrixType> > > struct LinearSolverBase<MatrixType, ParallelMapper, typename boost::enable_if< mtl::traits::is_distributed<MatrixType> > >
: public ParallelSolver : public ParallelSolver
{ {
typedef ParallelMapper Mapper; typedef ParallelMapper Mapper;
...@@ -94,18 +95,18 @@ namespace AMDiS { ...@@ -94,18 +95,18 @@ namespace AMDiS {
return matrix; return matrix;
} }
protected:
/// create a parallel mapper based on local-to-global mapping /// create a parallel mapper based on local-to-global mapping
void initMapper(const SolverMatrix<Matrix<DOFMatrix*> >& A) virtual void initMapper(const SolverMatrix<Matrix<DOFMatrix*> >& A)
{ {
mapper = new ParallelMapper(*ParallelSolver::getDofMapping()); mapper = new ParallelMapper(*ParallelSolver::getDofMapping());
} }
void exitMapper() virtual void exitMapper()
{ {
delete mapper; delete mapper;
} }
protected:
MatrixType matrix; MatrixType matrix;
Mapper* mapper; Mapper* mapper;
};