Commit 92775cf7 authored by Naumann, Andreas's avatar Naumann, Andreas

merge from linearAlg-branch. my parallelization needed a slightly more general...

merge from linearAlg-branch. my parallelization needed a slightly more general linear solver interface and uses the pvd-naming scheme from the petsc-parallelization
parent 2daa8f2e
......@@ -52,10 +52,10 @@ namespace AMDiS
{
template<>
FileWriter<double>::FileWriter(std::string name_,
Mesh *mesh_,
SystemVector *vecs)
Mesh *mesh_,
SystemVector *vecs)
: name(name_),
mesh(mesh_)
mesh(mesh_)
{
initialize();
......@@ -63,76 +63,76 @@ namespace AMDiS
* Removed by Siqi. not sure.
* for (int i = 0; i < static_cast<int>(vecs->getSize()); i++)
* TEST_EXIT(vecs->getDOFVector(0)->getFeSpace() == vecs->getDOFVector(i)->getFeSpace())
* ("All FeSpace have to be equal!\n");
* ("All FeSpace have to be equal!\n");
*/
feSpace = vecs->getDOFVector(0)->getFeSpace();
solutionVecs.resize(vecs->getSize());
for (int i = 0; i < static_cast<int>(vecs->getSize()); i++)
solutionVecs[i] = vecs->getDOFVector(i);
solutionVecs[i] = vecs->getDOFVector(i);
for (size_t i = 0; i < solutionVecs.size(); i++)
solutionNames.push_back(solutionVecs[i]->getName());
solutionNames.push_back(solutionVecs[i]->getName());
}
template<>
void FileWriter<double>::writeFiles(AdaptInfo *adaptInfo,
bool force,
int level,
Flag flag,
bool (*writeElem)(ElInfo*))
bool force,
int level,
Flag flag,
bool (*writeElem)(ElInfo*))
{
FUNCNAME("FileWriter<T>::writeFiles()");
using namespace ::AMDiS::io;
if (timeModulo > 0.0) {
if ((lastWriteTime != 0.0 && adaptInfo->getTime() < lastWriteTime + timeModulo) && !force)
return;
if ((lastWriteTime != 0.0 && adaptInfo->getTime() < lastWriteTime + timeModulo) && !force)
return;
} else {
if ((adaptInfo->getTimestepNumber() % tsModulo != 0) && !force)
return;
if ((adaptInfo->getTimestepNumber() % tsModulo != 0) && !force)
return;
}
lastWriteTime = adaptInfo->getTime();
//-----------------by Siqi---------------------//
if (writeAMDiSFormat || writePeriodicFormat || writeParaViewFormat
|| writeParaViewVectorFormat || writeParaViewAnimation
|| writeDofFormat || (writeArhFormat && !writeArh2Format) || writePovrayFormat)
|| writeParaViewVectorFormat || writeParaViewAnimation
|| writeDofFormat || (writeArhFormat && !writeArh2Format) || writePovrayFormat)
{
for (int i = 0; i < static_cast<int>(solutionVecs.size()); i++)
TEST_EXIT(solutionVecs[0]->getFeSpace() == solutionVecs[i]->getFeSpace())
("All FeSpaces have to be equal!\n");
for (int i = 0; i < static_cast<int>(solutionVecs.size()); i++)
TEST_EXIT(solutionVecs[0]->getFeSpace() == solutionVecs[i]->getFeSpace())
("All FeSpaces have to be equal!\n");
}
// Containers, which store the data to be written;
std::vector<DataCollector<>*> dataCollectors(solutionVecs.size());
if (writeElem) {
for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++)
dataCollectors[i] = new DataCollector<>(feSpace, solutionVecs[i],
level, flag, writeElem);
for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++)
dataCollectors[i] = new DataCollector<>(feSpace, solutionVecs[i],
level, flag, writeElem);
} else {
for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++)
dataCollectors[i] = new DataCollector<>(feSpace, solutionVecs[i],
traverseLevel,
flag | traverseFlag,
writeElement);
for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++)
dataCollectors[i] = new DataCollector<>(feSpace, solutionVecs[i],
traverseLevel,
flag | traverseFlag,
writeElement);
}
std::string fn = filename;
if (createParaViewSubDir) {
using namespace boost::filesystem;
path vtu_path = fn;
path data_basedir("data");
path vtu_filename = vtu_path.filename();
vtu_path.remove_filename() /= data_basedir;
try {
create_directory(vtu_path);
vtu_path /= vtu_filename;
fn = vtu_path.string();
} catch (...) {}
using namespace boost::filesystem;
path vtu_path = fn;
path data_basedir("data");
path vtu_filename = vtu_path.filename();
vtu_path.remove_filename() /= data_basedir;
try {
create_directory(vtu_path);
vtu_path /= vtu_filename;
fn = vtu_path.string();
} catch (...) {}
}
#if HAVE_PARALLEL_DOMAIN_AMDIS
......@@ -142,25 +142,26 @@ namespace AMDiS
#endif
///TODO: use the getParaViewFilename.. consistent with the parallel domain??
if (appendIndex) {
TEST_EXIT(indexLength <= 99)("index lenght > 99\n");
TEST_EXIT(indexDecimals <= 97)("index decimals > 97\n");
TEST_EXIT(indexDecimals < indexLength)("index length <= index decimals\n");
TEST_EXIT(indexLength <= 99)("index lenght > 99\n");
TEST_EXIT(indexDecimals <= 97)("index decimals > 97\n");
TEST_EXIT(indexDecimals < indexLength)("index length <= index decimals\n");
char formatStr[9];
char timeStr[20];
char formatStr[9];
char timeStr[20];
sprintf(formatStr, "%%0%d.%df", indexLength, indexDecimals);
sprintf(timeStr, formatStr, adaptInfo ? adaptInfo->getTime() : 0.0);
sprintf(formatStr, "%%0%d.%df", indexLength, indexDecimals);
sprintf(timeStr, formatStr, adaptInfo ? adaptInfo->getTime() : 0.0);
fn += timeStr;
fn += timeStr;
#if HAVE_PARALLEL_DOMAIN_AMDIS
paraFilename += timeStr;
postfix += timeStr + paraviewFileExt;
paraFilename += timeStr;
postfix += timeStr + paraviewFileExt;
#endif
} else {
#if HAVE_PARALLEL_DOMAIN_AMDIS
postfix += paraviewFileExt;
postfix += paraviewFileExt;
#endif
}
......@@ -172,102 +173,121 @@ namespace AMDiS
#endif
if (writeAMDiSFormat) {
MacroWriter::writeMacro(dataCollectors[0],
const_cast<char*>((fn + amdisMeshExt).c_str()),
adaptInfo ? adaptInfo->getTime() : 0.0);
MSG("macro file written to %s\n", (fn + amdisMeshExt).c_str());
ValueWriter::writeValues(dataCollectors[0],
(fn + amdisDataExt).c_str(),
adaptInfo ? adaptInfo->getTime() : 0.0);
MSG("value file written to %s\n", (fn + amdisDataExt).c_str());
MacroWriter::writeMacro(dataCollectors[0],
const_cast<char*>((fn + amdisMeshExt).c_str()),
adaptInfo ? adaptInfo->getTime() : 0.0);
MSG("macro file written to %s\n", (fn + amdisMeshExt).c_str());
ValueWriter::writeValues(dataCollectors[0],
(fn + amdisDataExt).c_str(),
adaptInfo ? adaptInfo->getTime() : 0.0);
MSG("value file written to %s\n", (fn + amdisDataExt).c_str());
}
if (writePeriodicFormat) {
MacroWriter::writePeriodicFile(dataCollectors[0],
(fn + periodicFileExt).c_str());
MSG("periodic file written to %s\n", (fn + periodicFileExt).c_str());
MacroWriter::writePeriodicFile(dataCollectors[0],
(fn + periodicFileExt).c_str());
MSG("periodic file written to %s\n", (fn + periodicFileExt).c_str());
}
if (writeParaViewFormat) {
std::string vtu_file = fn + paraviewFileExt;
VtkWriter::Aux vtkWriter(&dataCollectors,
solutionNames,
VtkWriter::Vtuformat(paraViewMode), (paraViewPrecision == 1), writeParaViewVectorFormat);
vtkWriter.writeFile(vtu_file);
std::string vtu_file = fn + paraviewFileExt;
VtkWriter::Aux vtkWriter(&dataCollectors,
solutionNames,
VtkWriter::Vtuformat(paraViewMode), (paraViewPrecision == 1), writeParaViewVectorFormat);
vtkWriter.writeFile(vtu_file);
#if HAVE_PARALLEL_DOMAIN_AMDIS
if (MPI::COMM_WORLD.Get_rank() == 0) {
// vector<string> componentNames;
// for (unsigned int i = 0; i < dataCollectors.size(); i++)
// componentNames.push_back(dataCollectors[i]->getValues()->getName());
VtkWriter::detail::writeParallelFile(paraFilename + paraviewParallelFileExt,
MPI::COMM_WORLD.Get_size(),
filename,
postfix,
solutionNames,
VtkWriter::Vtuformat(paraViewMode),
(paraViewPrecision == 1),
writeParaViewVectorFormat);
}
if (MPI::COMM_WORLD.Get_rank() == 0) {
// vector<string> componentNames;
// for (unsigned int i = 0; i < dataCollectors.size(); i++)
// componentNames.push_back(dataCollectors[i]->getValues()->getName());
VtkWriter::detail::writeParallelFile(paraFilename + paraviewParallelFileExt,
MPI::COMM_WORLD.Get_size(),
filename,
postfix,
solutionNames,
VtkWriter::Vtuformat(paraViewMode),
(paraViewPrecision == 1),
writeParaViewVectorFormat);
}
#endif
MSG("ParaView file written to %s\n", (fn + paraviewFileExt).c_str());
MSG("ParaView file written to %s\n", (fn + paraviewFileExt).c_str());
}
// write vtu-vector files
if (writeParaViewVectorFormat && !writeParaViewFormat) {
VtkVectorWriter::writeFile(solutionVecs, fn_ + paraviewFileExt, true, writeAs3dVector);
MSG("ParaView file written to %s\n", (fn_ + paraviewFileExt).c_str());
VtkVectorWriter::writeFile(solutionVecs, fn_ + paraviewFileExt, true, writeAs3dVector);
MSG("ParaView file written to %s\n", (fn_ + paraviewFileExt).c_str());
}
if (writeParaViewAnimation) {
std::string pvd_file = fn_ + paraviewFileExt;
std::string pvd_file = fn_ + paraviewFileExt;
#if HAVE_PARALLEL_DOMAIN_AMDIS
pvd_file = fn_ + paraviewParallelFileExt;
if (MPI::COMM_WORLD.Get_rank() == 0)
pvd_file = fn_ + paraviewParallelFileExt;
if (MPI::COMM_WORLD.Get_rank() == 0)
#endif
{
VtkWriter::detail::updateAnimationFile(adaptInfo,
pvd_file,
&paraviewAnimationFrames,
filename + ".pvd");
}
{
VtkWriter::detail::updateAnimationFile(adaptInfo,
pvd_file,
&paraviewAnimationFrames,
filename + ".pvd");
}
}
if (writeDofFormat) {
DofWriter::writeFile(solutionVecs, fn + ".dof");
DofWriter::writeFile(solutionVecs, fn + ".dof");
}
// write Arh files
if (!writeArh2Format && writeArhFormat)
ArhWriter::write(fn_ + ".arh", feSpace->getMesh(), solutionVecs);
ArhWriter::write(fn_ + ".arh", feSpace->getMesh(), solutionVecs);
else if (writeArh2Format)
Arh2Writer::writeFile(solutionVecs, fn_ + ".arh");
Arh2Writer::writeFile(solutionVecs, fn_ + ".arh");
#ifdef HAVE_PNG
if (writePngFormat) {
PngWriter pngWriter(dataCollectors[0]);
pngWriter.writeFile(fn + ".png", pngType);
PngWriter pngWriter(dataCollectors[0]);
pngWriter.writeFile(fn + ".png", pngType);
MSG("PNG image file written to %s\n", (fn + ".png").c_str());
MSG("PNG image file written to %s\n", (fn + ".png").c_str());
}
#endif
if (writePovrayFormat) {
PovrayWriter povrayWriter(dataCollectors[0]);
povrayWriter.writeFile(fn + ".pov");
PovrayWriter povrayWriter(dataCollectors[0]);
povrayWriter.writeFile(fn + ".pov");
MSG("Povray script written to %s\n", (fn + ".pov").c_str());
MSG("Povray script written to %s\n", (fn + ".pov").c_str());
}
for (int i = 0; i < static_cast<int>(dataCollectors.size()); i++)
delete dataCollectors[i];
delete dataCollectors[i];
}
template<>
string FileWriter<double>::getParaViewFilename(AdaptInfo* adaptInfo) const
{
string ret(filename);
if (appendIndex) {
TEST_EXIT(indexLength <= 99)("index lenght > 99\n");
TEST_EXIT(indexDecimals <= 97)("index decimals > 97\n");
TEST_EXIT(indexDecimals < indexLength)("index length <= index decimals\n");
char formatStr[9];
char timeStr[20];
sprintf(formatStr, "%%0%d.%df", indexLength, indexDecimals);
sprintf(timeStr, formatStr, adaptInfo ? adaptInfo->getTime() : 0.0);
ret += timeStr;
}
return ret;
}
} // end namespace detail
} // end namespace AMDiS
......@@ -82,6 +82,11 @@ namespace AMDiS {
return paraviewAnimationFrames;
}
bool getWriteParaViewFormat() const { return writeParaViewFormat; }
std::string getParaViewFilename(AdaptInfo* info) const ;
const std::vector< std::string >& getSolutionNames() const
{ return solutionNames; }
protected:
/// Initialization of the filewriter.
void initialize();
......
......@@ -200,6 +200,8 @@ namespace AMDiS { namespace io {
}
void writeParallelFile(string name, int nRanks,
string fnPrefix, string fnPostfix,
vector<string> &componentNames,
......@@ -207,6 +209,22 @@ namespace AMDiS { namespace io {
bool highPrecision,
bool writeAsVector
)
{
using boost::lexical_cast;
vector< string > fileNames(nRanks);
for (int i = 0; i < nRanks; i++) {
fileNames[i] = fnPrefix + "-p" + lexical_cast<string>(i) + "-" + fnPostfix;
}
writeParallelFile(name, nRanks, fileNames, componentNames, format, highPrecision, writeAsVector);
}
void writeParallelFile(string name, int nRanks,
vector<string>& subNames,
const vector<string> &componentNames,
::AMDiS::io::VtkWriter::Vtuformat format,
bool highPrecision,
bool writeAsVector
)
{
FUNCNAME("writeParallelFile()");
......@@ -277,8 +295,7 @@ namespace AMDiS { namespace io {
file << " </PPointData>\n";
for (int i = 0; i < nRanks; i++) {
string pname(fnPrefix + "-p" + lexical_cast<string>(i) + "-" + fnPostfix);
boost::filesystem::path filepath(pname);
boost::filesystem::path filepath(subNames[i]);
file << " <Piece Source=\""
<< boost::filesystem::basename(filepath)
<< boost::filesystem::extension(filepath) << "\"/>\n";
......
......@@ -292,7 +292,16 @@ namespace AMDiS { namespace io {
/// Writes a pvtu file, which contains the links to all the rank files.
void writeParallelFile(std::string name, int nRanks,
std::string fnPrefix, std::string fnPostfix,
std::vector<std::string> &componentNames,
const std::vector<std::string> &componentNames,
::AMDiS::io::VtkWriter::Vtuformat format = ::AMDiS::io::VtkWriter::ASCII,
bool highPrecision = false,
bool writeAsVector = false
);
/// Writes a pvtu file which contains the links to the rank files in @subNames
void writeParallelFile(std::string name, int nRanks,
std::vector<std::string>& subNames,
const std::vector<std::string> &componentNames,
::AMDiS::io::VtkWriter::Vtuformat format = ::AMDiS::io::VtkWriter::ASCII,
bool highPrecision = false,
bool writeAsVector = false
......
......@@ -50,10 +50,10 @@ namespace AMDiS {
/// Wrapper for template-argument dependent constructors
template < typename MatrixType, typename Enable = void >
template < typename MatrixType, typename Mapper_ = BlockMapper, typename Enable = void >
struct LinearSolverBase : public LinearSolverInterface
{
typedef BlockMapper Mapper;
typedef Mapper_ Mapper;
LinearSolverBase(std::string name)
: LinearSolverInterface(name) {}
......@@ -63,25 +63,26 @@ namespace AMDiS {
return matrix;
}
protected:
/// create a sequential BlockMapper
void initMapper(const SolverMatrix<Matrix<DOFMatrix*> >& A)
virtual void initMapper(const SolverMatrix<Matrix<DOFMatrix*> >& A)
{
mapper = new BlockMapper(A);
mapper = new Mapper(A);
}
void exitMapper()
virtual void exitMapper()
{
delete mapper;
}
protected:
MatrixType matrix;
Mapper* mapper;
};
#ifdef HAVE_PARALLEL_MTL4
template< typename MatrixType >
struct LinearSolverBase<MatrixType, typename boost::enable_if< mtl::traits::is_distributed<MatrixType> > >
struct LinearSolverBase<MatrixType, ParallelMapper, typename boost::enable_if< mtl::traits::is_distributed<MatrixType> > >
: public ParallelSolver
{
typedef ParallelMapper Mapper;
......@@ -94,18 +95,18 @@ namespace AMDiS {
return matrix;
}
protected:
/// create a parallel mapper based on local-to-global mapping
void initMapper(const SolverMatrix<Matrix<DOFMatrix*> >& A)
virtual void initMapper(const SolverMatrix<Matrix<DOFMatrix*> >& A)
{
mapper = new ParallelMapper(*ParallelSolver::getDofMapping());
}
void exitMapper()
virtual void exitMapper()
{
delete mapper;
}
protected:
MatrixType matrix;
Mapper* mapper;
};
......@@ -120,15 +121,13 @@ namespace AMDiS {
* solvers where MTL4 provides an interface, can be assigned
* by different Runner objects.
**/
template< typename MatrixType, typename VectorType, typename Runner >
class LinearSolver : public LinearSolverBase<MatrixType>
template< typename MatrixType, typename VectorType, typename Runner, typename Mapper_ = BlockMapper >
class LinearSolver : public LinearSolverBase<MatrixType, Mapper_>
{
private:
typedef LinearSolverBase<MatrixType> super;
typedef LinearSolver<MatrixType, VectorType, Runner> self;
typedef typename super::Mapper Mapper;
Runner runner; // redirect the implementation to a runner
protected:
typedef LinearSolverBase<MatrixType, Mapper_> super;
typedef LinearSolver<MatrixType, VectorType, Runner, Mapper_> self;
typedef typename super::Mapper Mapper;
public:
/// Creator class used in the LinearSolverInterfaceMap.
......@@ -172,6 +171,7 @@ namespace AMDiS {
}
protected:
/// Implementation of \ref LinearSolverInterface::solveLinearSystem()
int solveLinearSystem(const SolverMatrix<Matrix<DOFMatrix*> >& A,
SystemVector& x,
......@@ -182,7 +182,7 @@ namespace AMDiS {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
MPI::COMM_WORLD.Barrier();
#endif
super::initMapper(A);
this->initMapper(A);
Timer t;
if (createMatrixData) {
......@@ -206,7 +206,7 @@ namespace AMDiS {
if (!storeMatrixData)
runner.exit();
super::exitMapper();
this->exitMapper();
return error;
}
......@@ -228,6 +228,9 @@ namespace AMDiS {
dispatch::initVector(target, super::matrix);
dispatch::fillVector(target, source, mapper.self());
}
Runner runner; // redirect the implementation to a runner
};
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment