From ab3182cb4e0b193c6598fa91e90c42a607b405f3 Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Fri, 2 Jul 2010 11:37:40 +0000
Subject: [PATCH] Solved several small problems with periodic boundaries in
 parallel computations, new file names.

---
 AMDiS/CMakeLists.txt                          |   4 +-
 AMDiS/bin/Makefile.am                         |   4 +-
 AMDiS/bin/Makefile.in                         |  40 ++---
 AMDiS/libtool                                 |  64 ++++----
 AMDiS/src/DOFMatrix.cc                        |   5 +
 AMDiS/src/Element.h                           |  12 +-
 AMDiS/src/ProblemVec.cc                       |  10 +-
 AMDiS/src/VtkWriter.cc                        | 148 +++++++++---------
 AMDiS/src/VtkWriter.h                         |   7 +-
 AMDiS/src/VtkWriter.hh                        |   8 +-
 AMDiS/src/parallel/GlobalMatrixSolver.cc      |  57 ++++---
 AMDiS/src/parallel/GlobalMatrixSolver.h       |   2 +-
 ...rallelDomainBase.cc => MeshDistributor.cc} | 145 ++++++-----------
 ...ParallelDomainBase.h => MeshDistributor.h} |  15 +-
 ...{ParallelDomainDbg.cc => ParallelDebug.cc} | 105 +++++++++----
 .../{ParallelDomainDbg.h => ParallelDebug.h}  |  13 +-
 16 files changed, 329 insertions(+), 310 deletions(-)
 rename AMDiS/src/parallel/{ParallelDomainBase.cc => MeshDistributor.cc} (94%)
 rename AMDiS/src/parallel/{ParallelDomainBase.h => MeshDistributor.h} (98%)
 rename AMDiS/src/parallel/{ParallelDomainDbg.cc => ParallelDebug.cc} (80%)
 rename AMDiS/src/parallel/{ParallelDomainDbg.h => ParallelDebug.h} (95%)

diff --git a/AMDiS/CMakeLists.txt b/AMDiS/CMakeLists.txt
index af080f62..b8cfd3eb 100644
--- a/AMDiS/CMakeLists.txt
+++ b/AMDiS/CMakeLists.txt
@@ -155,9 +155,9 @@ endif(ENABLE_PARMETIS)
 if(ENABLE_PARALLEL_DOMAIN)
 	include_directories(${PETSC_DIR}/include ${PETSC_DIR}/${PETSC_ARCH}/include)
 	SET(PARALLEL_DOMAIN_AMDIS_SRC
-		${SOURCE_DIR}/parallel/ParallelDomainBase.cc 
+		${SOURCE_DIR}/parallel/MeshDistributor.cc 
 		${SOURCE_DIR}/parallel/StdMpi.cc
-		${SOURCE_DIR}/parallel/ParallelDomainDbg.cc
+		${SOURCE_DIR}/parallel/ParallelDebug.cc
 		${SOURCE_DIR}/parallel/GlobalMatrixSolver.cc
 		${SOURCE_DIR}/parallel/MpiHelper.cc)
 	SET(COMPILEFLAGS "${COMPILEFLAGS} -DHAVE_PARALLEL_DOMAIN_AMDIS=1")
diff --git a/AMDiS/bin/Makefile.am b/AMDiS/bin/Makefile.am
index 6aec26c1..470d86ac 100644
--- a/AMDiS/bin/Makefile.am
+++ b/AMDiS/bin/Makefile.am
@@ -29,8 +29,8 @@ endif
 if USE_PARALLEL_DOMAIN_AMDIS
   PARALLEL_AMDIS_SOURCES += \
   $(SOURCE_DIR)/parallel/StdMpi.h $(SOURCE_DIR)/parallel/StdMpi.cc \
-  $(SOURCE_DIR)/parallel/ParallelDomainBase.h $(SOURCE_DIR)/parallel/ParallelDomainBase.cc \
-  $(SOURCE_DIR)/parallel/ParallelDomainDbg.h $(SOURCE_DIR)/parallel/ParallelDomainDbg.cc \
+  $(SOURCE_DIR)/parallel/MeshDistributor.h $(SOURCE_DIR)/parallel/MeshDistributor.cc \
+  $(SOURCE_DIR)/parallel/ParallelDebug.h $(SOURCE_DIR)/parallel/ParallelDebug.cc \
   $(SOURCE_DIR)/parallel/GlobalMatrixSolver.h $(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc \
   $(SOURCE_DIR)/parallel/MpiHelper.h $(SOURCE_DIR)/parallel/MpiHelper.cc 
   libamdis_la_CXXFLAGS += -DHAVE_PARALLEL_DOMAIN_AMDIS=1
diff --git a/AMDiS/bin/Makefile.in b/AMDiS/bin/Makefile.in
index e22b3803..0fc9dc80 100644
--- a/AMDiS/bin/Makefile.in
+++ b/AMDiS/bin/Makefile.in
@@ -39,8 +39,8 @@ host_triplet = @host@
 @USE_PARALLEL_AMDIS_TRUE@am__append_1 = -DHAVE_PARALLEL_AMDIS=1
 @USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__append_2 = \
 @USE_PARALLEL_DOMAIN_AMDIS_TRUE@  $(SOURCE_DIR)/parallel/StdMpi.h $(SOURCE_DIR)/parallel/StdMpi.cc \
-@USE_PARALLEL_DOMAIN_AMDIS_TRUE@  $(SOURCE_DIR)/parallel/ParallelDomainBase.h $(SOURCE_DIR)/parallel/ParallelDomainBase.cc \
-@USE_PARALLEL_DOMAIN_AMDIS_TRUE@  $(SOURCE_DIR)/parallel/ParallelDomainDbg.h $(SOURCE_DIR)/parallel/ParallelDomainDbg.cc \
+@USE_PARALLEL_DOMAIN_AMDIS_TRUE@  $(SOURCE_DIR)/parallel/MeshDistributor.h $(SOURCE_DIR)/parallel/MeshDistributor.cc \
+@USE_PARALLEL_DOMAIN_AMDIS_TRUE@  $(SOURCE_DIR)/parallel/ParallelDebug.h $(SOURCE_DIR)/parallel/ParallelDebug.cc \
 @USE_PARALLEL_DOMAIN_AMDIS_TRUE@  $(SOURCE_DIR)/parallel/GlobalMatrixSolver.h $(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc \
 @USE_PARALLEL_DOMAIN_AMDIS_TRUE@  $(SOURCE_DIR)/parallel/MpiHelper.h $(SOURCE_DIR)/parallel/MpiHelper.cc 
 
@@ -77,10 +77,10 @@ LTLIBRARIES = $(lib_LTLIBRARIES)
 libamdis_la_LIBADD =
 am__libamdis_la_SOURCES_DIST = $(SOURCE_DIR)/parallel/StdMpi.h \
 	$(SOURCE_DIR)/parallel/StdMpi.cc \
-	$(SOURCE_DIR)/parallel/ParallelDomainBase.h \
-	$(SOURCE_DIR)/parallel/ParallelDomainBase.cc \
-	$(SOURCE_DIR)/parallel/ParallelDomainDbg.h \
-	$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc \
+	$(SOURCE_DIR)/parallel/MeshDistributor.h \
+	$(SOURCE_DIR)/parallel/MeshDistributor.cc \
+	$(SOURCE_DIR)/parallel/ParallelDebug.h \
+	$(SOURCE_DIR)/parallel/ParallelDebug.cc \
 	$(SOURCE_DIR)/parallel/GlobalMatrixSolver.h \
 	$(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc \
 	$(SOURCE_DIR)/parallel/MpiHelper.h \
@@ -247,8 +247,8 @@ am__libamdis_la_SOURCES_DIST = $(SOURCE_DIR)/parallel/StdMpi.h \
 	$(SOURCE_DIR)/time/RosenbrockMethod.h \
 	$(SOURCE_DIR)/time/RosenbrockMethod.cc
 @USE_PARALLEL_DOMAIN_AMDIS_TRUE@am__objects_1 = libamdis_la-StdMpi.lo \
-@USE_PARALLEL_DOMAIN_AMDIS_TRUE@	libamdis_la-ParallelDomainBase.lo \
-@USE_PARALLEL_DOMAIN_AMDIS_TRUE@	libamdis_la-ParallelDomainDbg.lo \
+@USE_PARALLEL_DOMAIN_AMDIS_TRUE@	libamdis_la-MeshDistributor.lo \
+@USE_PARALLEL_DOMAIN_AMDIS_TRUE@	libamdis_la-ParallelDebug.lo \
 @USE_PARALLEL_DOMAIN_AMDIS_TRUE@	libamdis_la-GlobalMatrixSolver.lo \
 @USE_PARALLEL_DOMAIN_AMDIS_TRUE@	libamdis_la-MpiHelper.lo
 @USE_PARALLEL_AMDIS_FALSE@am__objects_2 = $(am__objects_1)
@@ -807,14 +807,14 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-MacroWriter.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Marker.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Mesh.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-MeshDistributor.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-MeshStructure.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-MpiHelper.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-NonLinUpdater.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Operator.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-OperatorTerm.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParMetisPartitioner.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainBase.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDomainDbg.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelDebug.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-ParallelProblem.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Parameters.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libamdis_la-Parametric.Plo@am__quote@
@@ -901,19 +901,19 @@ libamdis_la-StdMpi.lo: $(SOURCE_DIR)/parallel/StdMpi.cc
 @AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCXX_FALSE@	$(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-StdMpi.lo `test -f '$(SOURCE_DIR)/parallel/StdMpi.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/StdMpi.cc
 
-libamdis_la-ParallelDomainBase.lo: $(SOURCE_DIR)/parallel/ParallelDomainBase.cc
-@am__fastdepCXX_TRUE@	if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ParallelDomainBase.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ParallelDomainBase.Tpo" -c -o libamdis_la-ParallelDomainBase.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDomainBase.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDomainBase.cc; \
-@am__fastdepCXX_TRUE@	then mv -f "$(DEPDIR)/libamdis_la-ParallelDomainBase.Tpo" "$(DEPDIR)/libamdis_la-ParallelDomainBase.Plo"; else rm -f "$(DEPDIR)/libamdis_la-ParallelDomainBase.Tpo"; exit 1; fi
-@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$(SOURCE_DIR)/parallel/ParallelDomainBase.cc' object='libamdis_la-ParallelDomainBase.lo' libtool=yes @AMDEPBACKSLASH@
+libamdis_la-MeshDistributor.lo: $(SOURCE_DIR)/parallel/MeshDistributor.cc
+@am__fastdepCXX_TRUE@	if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-MeshDistributor.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-MeshDistributor.Tpo" -c -o libamdis_la-MeshDistributor.lo `test -f '$(SOURCE_DIR)/parallel/MeshDistributor.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/MeshDistributor.cc; \
+@am__fastdepCXX_TRUE@	then mv -f "$(DEPDIR)/libamdis_la-MeshDistributor.Tpo" "$(DEPDIR)/libamdis_la-MeshDistributor.Plo"; else rm -f "$(DEPDIR)/libamdis_la-MeshDistributor.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$(SOURCE_DIR)/parallel/MeshDistributor.cc' object='libamdis_la-MeshDistributor.lo' libtool=yes @AMDEPBACKSLASH@
 @AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCXX_FALSE@	$(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainBase.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDomainBase.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDomainBase.cc
+@am__fastdepCXX_FALSE@	$(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-MeshDistributor.lo `test -f '$(SOURCE_DIR)/parallel/MeshDistributor.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/MeshDistributor.cc
 
-libamdis_la-ParallelDomainDbg.lo: $(SOURCE_DIR)/parallel/ParallelDomainDbg.cc
-@am__fastdepCXX_TRUE@	if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ParallelDomainDbg.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Tpo" -c -o libamdis_la-ParallelDomainDbg.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc; \
-@am__fastdepCXX_TRUE@	then mv -f "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Tpo" "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Plo"; else rm -f "$(DEPDIR)/libamdis_la-ParallelDomainDbg.Tpo"; exit 1; fi
-@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc' object='libamdis_la-ParallelDomainDbg.lo' libtool=yes @AMDEPBACKSLASH@
+libamdis_la-ParallelDebug.lo: $(SOURCE_DIR)/parallel/ParallelDebug.cc
+@am__fastdepCXX_TRUE@	if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-ParallelDebug.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-ParallelDebug.Tpo" -c -o libamdis_la-ParallelDebug.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDebug.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDebug.cc; \
+@am__fastdepCXX_TRUE@	then mv -f "$(DEPDIR)/libamdis_la-ParallelDebug.Tpo" "$(DEPDIR)/libamdis_la-ParallelDebug.Plo"; else rm -f "$(DEPDIR)/libamdis_la-ParallelDebug.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$(SOURCE_DIR)/parallel/ParallelDebug.cc' object='libamdis_la-ParallelDebug.lo' libtool=yes @AMDEPBACKSLASH@
 @AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCXX_FALSE@	$(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDomainDbg.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDomainDbg.cc
+@am__fastdepCXX_FALSE@	$(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -c -o libamdis_la-ParallelDebug.lo `test -f '$(SOURCE_DIR)/parallel/ParallelDebug.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/ParallelDebug.cc
 
 libamdis_la-GlobalMatrixSolver.lo: $(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc
 @am__fastdepCXX_TRUE@	if $(LIBTOOL) --tag=CXX --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libamdis_la_CXXFLAGS) $(CXXFLAGS) -MT libamdis_la-GlobalMatrixSolver.lo -MD -MP -MF "$(DEPDIR)/libamdis_la-GlobalMatrixSolver.Tpo" -c -o libamdis_la-GlobalMatrixSolver.lo `test -f '$(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc' || echo '$(srcdir)/'`$(SOURCE_DIR)/parallel/GlobalMatrixSolver.cc; \
diff --git a/AMDiS/libtool b/AMDiS/libtool
index dae42157..1a37a1c4 100755
--- a/AMDiS/libtool
+++ b/AMDiS/libtool
@@ -30,10 +30,10 @@
 # the same distribution terms that you use for the rest of that program.
 
 # A sed program that does not truncate output.
-SED="/bin/sed"
+SED="/usr/bin/sed"
 
 # Sed that helps us avoid accidentally triggering echo(1) options like -n.
-Xsed="/bin/sed -e 1s/^X//"
+Xsed="/usr/bin/sed -e 1s/^X//"
 
 # The HP-UX ksh and POSIX shell print the target directory to stdout
 # if CDPATH is set.
@@ -44,7 +44,7 @@ available_tags=" CXX F77"
 
 # ### BEGIN LIBTOOL CONFIG
 
-# Libtool was configured on host NWRW15:
+# Libtool was configured on host deimos102:
 
 # Shell to use when invoking shell scripts.
 SHELL="/bin/sh"
@@ -66,12 +66,12 @@ fast_install=yes
 
 # The host system.
 host_alias=
-host=i686-pc-linux-gnu
+host=x86_64-unknown-linux-gnu
 host_os=linux-gnu
 
 # The build system.
 build_alias=
-build=i686-pc-linux-gnu
+build=x86_64-unknown-linux-gnu
 build_os=linux-gnu
 
 # An echo program that does not interpret backslashes.
@@ -82,13 +82,13 @@ AR="ar"
 AR_FLAGS="cru"
 
 # A C compiler.
-LTCC="gcc"
+LTCC="/licsoft/libraries/openmpi/1.2.6/64bit/bin/mpicc"
 
 # LTCC compiler flags.
 LTCFLAGS="-g -O2"
 
 # A language-specific compiler.
-CC="gcc"
+CC="/licsoft/libraries/openmpi/1.2.6/64bit/bin/mpicc"
 
 # Is the compiler the GNU C compiler?
 with_gcc=yes
@@ -97,7 +97,7 @@ with_gcc=yes
 EGREP="grep -E"
 
 # The linker used to build libraries.
-LD="/usr/bin/ld"
+LD="/usr/x86_64-suse-linux/bin/ld -m elf_x86_64"
 
 # Whether we need hard or soft links.
 LN_S="ln -s"
@@ -171,7 +171,7 @@ dlopen_self=unknown
 dlopen_self_static=unknown
 
 # Compiler flag to prevent dynamic linking.
-link_static_flag="-static"
+link_static_flag=""
 
 # Compiler flag to turn off builtin functions.
 no_builtin_flag=" -fno-builtin"
@@ -325,10 +325,10 @@ variables_saved_for_relink="PATH LD_LIBRARY_PATH LD_RUN_PATH GCC_EXEC_PREFIX COM
 link_all_deplibs=unknown
 
 # Compile-time system search path for libraries
-sys_lib_search_path_spec=" /u/witkowski/local/lib/i386-redhat-linux/4.1.2/ /u/witkowski/local/lib/ /usr/lib/gcc/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../../i386-redhat-linux/lib/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../../i386-redhat-linux/lib/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../ /lib/i386-redhat-linux/4.1.2/ /lib/ /usr/lib/i386-redhat-linux/4.1.2/ /usr/lib/"
+sys_lib_search_path_spec=" /fastfs/wir/local/lib/x86_64-suse-linux/4.1.2/ /fastfs/wir/local/lib/../lib64/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/ /usr/lib/gcc/x86_64-suse-linux/4.1.2/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../x86_64-suse-linux/lib/x86_64-suse-linux/4.1.2/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../x86_64-suse-linux/lib/../lib64/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../x86_64-suse-linux/4.1.2/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../lib64/ /lib/x86_64-suse-linux/4.1.2/ /lib/../lib64/ /usr/lib/x86_64-suse-linux/4.1.2/ /usr/lib/../lib64/ /fastfs/wir/local/lib/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../x86_64-suse-linux/lib/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../ /lib/ /usr/lib/"
 
 # Run-time system search path for libraries
-sys_lib_dlsearch_path_spec="/lib /usr/lib /usr/lib/octave-2.9.9 /usr/lib/qt-3.3/lib /usr/lib/qt4/lib /usr/lib/xulrunner-1.9.2 "
+sys_lib_dlsearch_path_spec="/lib /usr/lib /usr/X11R6/lib64/Xaw3d /usr/X11R6/lib64 /usr/X11R6/lib/Xaw3d /usr/X11R6/lib /usr/x86_64-suse-linux/lib /usr/local/lib64 /usr/local/lib /opt/kde3/lib64 /opt/kde3/lib /opt/gnome/lib64 /opt/gnome/lib /lib64 /lib /usr/lib64 /usr/lib /opt/cluster/intel/cce/9.1.042/lib /opt/cluster/intel/fce/9.1.036/lib /opt/cluster/Pathscale3.0/lib/2.9.99 /opt/cluster/Pathscale3.0/lib/2.9.99/32 /work/licsoft/compilers/pgi/linux86-64/6.2/lib /work/licsoft/compilers/pgi/linux86-64/6.2/libso "
 
 # Fix the shell variable $srcfile for the compiler.
 fix_srcfile_path=""
@@ -6760,7 +6760,7 @@ build_old_libs=`case $build_libtool_libs in yes) $echo no;; *) $echo yes;; esac`
 # End:
 # ### BEGIN LIBTOOL TAG CONFIG: CXX
 
-# Libtool was configured on host NWRW15:
+# Libtool was configured on host deimos102:
 
 # Shell to use when invoking shell scripts.
 SHELL="/bin/sh"
@@ -6782,12 +6782,12 @@ fast_install=yes
 
 # The host system.
 host_alias=
-host=i686-pc-linux-gnu
+host=x86_64-unknown-linux-gnu
 host_os=linux-gnu
 
 # The build system.
 build_alias=
-build=i686-pc-linux-gnu
+build=x86_64-unknown-linux-gnu
 build_os=linux-gnu
 
 # An echo program that does not interpret backslashes.
@@ -6798,13 +6798,13 @@ AR="ar"
 AR_FLAGS="cru"
 
 # A C compiler.
-LTCC="gcc"
+LTCC="/licsoft/libraries/openmpi/1.2.6/64bit/bin/mpicc"
 
 # LTCC compiler flags.
 LTCFLAGS="-g -O2"
 
 # A language-specific compiler.
-CC="g++"
+CC="/licsoft/libraries/openmpi/1.2.6/64bit/bin/mpicxx"
 
 # Is the compiler the GNU C compiler?
 with_gcc=yes
@@ -6813,7 +6813,7 @@ with_gcc=yes
 EGREP="grep -E"
 
 # The linker used to build libraries.
-LD="/usr/bin/ld"
+LD="/usr/x86_64-suse-linux/bin/ld -m elf_x86_64"
 
 # Whether we need hard or soft links.
 LN_S="ln -s"
@@ -6887,7 +6887,7 @@ dlopen_self=unknown
 dlopen_self_static=unknown
 
 # Compiler flag to prevent dynamic linking.
-link_static_flag="-static"
+link_static_flag=""
 
 # Compiler flag to turn off builtin functions.
 no_builtin_flag=" -fno-builtin"
@@ -6942,11 +6942,11 @@ striplib="strip --strip-unneeded"
 
 # Dependencies to place before the objects being linked to create a
 # shared library.
-predep_objects="/usr/lib/gcc/i386-redhat-linux/4.1.2/../../../crti.o /usr/lib/gcc/i386-redhat-linux/4.1.2/crtbeginS.o"
+predep_objects="/usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../lib64/crti.o /usr/lib64/gcc/x86_64-suse-linux/4.1.2/crtbeginS.o"
 
 # Dependencies to place after the objects being linked to create a
 # shared library.
-postdep_objects="/usr/lib/gcc/i386-redhat-linux/4.1.2/crtendS.o /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../crtn.o"
+postdep_objects="/usr/lib64/gcc/x86_64-suse-linux/4.1.2/crtendS.o /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../lib64/crtn.o"
 
 # Dependencies to place before the objects being linked to create a
 # shared library.
@@ -6954,11 +6954,11 @@ predeps=""
 
 # Dependencies to place after the objects being linked to create a
 # shared library.
-postdeps="-lstdc++ -lm -lgcc_s -lc -lgcc_s"
+postdeps="-lmpi_cxx -lmpi -lopen-rte -lopen-pal -libverbs -lrt -lnuma -ldl -lnsl -lutil -ldl -lstdc++ -lm -lgcc_s -lpthread -lc -lgcc_s"
 
 # The library search path used internally by the compiler when linking
 # a shared library.
-compiler_lib_search_path="-L/u/witkowski/local/lib -L/usr/lib/gcc/i386-redhat-linux/4.1.2 -L/usr/lib/gcc/i386-redhat-linux/4.1.2 -L/usr/lib/gcc/i386-redhat-linux/4.1.2/../../.."
+compiler_lib_search_path="-L/usr/lib64 -L/licsoft/libraries/openmpi/1.2.6/64bit/lib -L/usr/lib64/gcc/x86_64-suse-linux/4.1.2 -L/usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../lib64 -L/lib/../lib64 -L/usr/lib/../lib64 -L/fastfs/wir/local/lib -L/usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../x86_64-suse-linux/lib -L/usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../.."
 
 # Method to check whether dependent libraries are shared objects.
 deplibs_check_method="pass_all"
@@ -7038,10 +7038,10 @@ variables_saved_for_relink="PATH LD_LIBRARY_PATH LD_RUN_PATH GCC_EXEC_PREFIX COM
 link_all_deplibs=unknown
 
 # Compile-time system search path for libraries
-sys_lib_search_path_spec=" /u/witkowski/local/lib/i386-redhat-linux/4.1.2/ /u/witkowski/local/lib/ /usr/lib/gcc/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../../i386-redhat-linux/lib/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../../i386-redhat-linux/lib/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../ /lib/i386-redhat-linux/4.1.2/ /lib/ /usr/lib/i386-redhat-linux/4.1.2/ /usr/lib/"
+sys_lib_search_path_spec=" /fastfs/wir/local/lib/x86_64-suse-linux/4.1.2/ /fastfs/wir/local/lib/../lib64/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/ /usr/lib/gcc/x86_64-suse-linux/4.1.2/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../x86_64-suse-linux/lib/x86_64-suse-linux/4.1.2/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../x86_64-suse-linux/lib/../lib64/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../x86_64-suse-linux/4.1.2/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../lib64/ /lib/x86_64-suse-linux/4.1.2/ /lib/../lib64/ /usr/lib/x86_64-suse-linux/4.1.2/ /usr/lib/../lib64/ /fastfs/wir/local/lib/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../x86_64-suse-linux/lib/ /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../ /lib/ /usr/lib/"
 
 # Run-time system search path for libraries
-sys_lib_dlsearch_path_spec="/lib /usr/lib /usr/lib/octave-2.9.9 /usr/lib/qt-3.3/lib /usr/lib/qt4/lib /usr/lib/xulrunner-1.9.2 "
+sys_lib_dlsearch_path_spec="/lib /usr/lib /usr/X11R6/lib64/Xaw3d /usr/X11R6/lib64 /usr/X11R6/lib/Xaw3d /usr/X11R6/lib /usr/x86_64-suse-linux/lib /usr/local/lib64 /usr/local/lib /opt/kde3/lib64 /opt/kde3/lib /opt/gnome/lib64 /opt/gnome/lib /lib64 /lib /usr/lib64 /usr/lib /opt/cluster/intel/cce/9.1.042/lib /opt/cluster/intel/fce/9.1.036/lib /opt/cluster/Pathscale3.0/lib/2.9.99 /opt/cluster/Pathscale3.0/lib/2.9.99/32 /work/licsoft/compilers/pgi/linux86-64/6.2/lib /work/licsoft/compilers/pgi/linux86-64/6.2/libso "
 
 # Fix the shell variable $srcfile for the compiler.
 fix_srcfile_path=""
@@ -7065,7 +7065,7 @@ include_expsyms=""
 
 # ### BEGIN LIBTOOL TAG CONFIG: F77
 
-# Libtool was configured on host NWRW15:
+# Libtool was configured on host deimos102:
 
 # Shell to use when invoking shell scripts.
 SHELL="/bin/sh"
@@ -7087,12 +7087,12 @@ fast_install=yes
 
 # The host system.
 host_alias=
-host=i686-pc-linux-gnu
+host=x86_64-unknown-linux-gnu
 host_os=linux-gnu
 
 # The build system.
 build_alias=
-build=i686-pc-linux-gnu
+build=x86_64-unknown-linux-gnu
 build_os=linux-gnu
 
 # An echo program that does not interpret backslashes.
@@ -7103,7 +7103,7 @@ AR="ar"
 AR_FLAGS="cru"
 
 # A C compiler.
-LTCC="gcc"
+LTCC="/licsoft/libraries/openmpi/1.2.6/64bit/bin/mpicc"
 
 # LTCC compiler flags.
 LTCFLAGS="-g -O2"
@@ -7112,13 +7112,13 @@ LTCFLAGS="-g -O2"
 CC="g77"
 
 # Is the compiler the GNU C compiler?
-with_gcc=yes
+with_gcc=
 
 # An ERE matcher.
 EGREP="grep -E"
 
 # The linker used to build libraries.
-LD="/usr/bin/ld"
+LD="/usr/x86_64-suse-linux/bin/ld -m elf_x86_64"
 
 # Whether we need hard or soft links.
 LN_S="ln -s"
@@ -7346,10 +7346,10 @@ variables_saved_for_relink="PATH LD_LIBRARY_PATH LD_RUN_PATH GCC_EXEC_PREFIX COM
 link_all_deplibs=unknown
 
 # Compile-time system search path for libraries
-sys_lib_search_path_spec=" /u/witkowski/local/lib/i386-redhat-linux/3.4.6/ /u/witkowski/local/lib/ /usr/lib/gcc/i386-redhat-linux/3.4.6/ /usr/lib/gcc/i386-redhat-linux/3.4.6/ /usr/lib/gcc/i386-redhat-linux/3.4.6/../../../../i386-redhat-linux/lib/i386-redhat-linux/3.4.6/ /usr/lib/gcc/i386-redhat-linux/3.4.6/../../../../i386-redhat-linux/lib/ /usr/lib/gcc/i386-redhat-linux/3.4.6/../../../i386-redhat-linux/3.4.6/ /usr/lib/gcc/i386-redhat-linux/3.4.6/../../../ /lib/i386-redhat-linux/3.4.6/ /lib/ /usr/lib/i386-redhat-linux/3.4.6/ /usr/lib/"
+sys_lib_search_path_spec=" /fastfs/wir/local/lib/x86_64-suse-linux/3.3.5/ /fastfs/wir/local/lib/ /usr/lib64/gcc-lib/x86_64-suse-linux/3.3.5/ /usr/lib/gcc/x86_64-suse-linux/3.3.5/ /usr/lib64/gcc-lib/x86_64-suse-linux/3.3.5/../../../../x86_64-suse-linux/lib/x86_64-suse-linux/3.3.5/ /usr/lib64/gcc-lib/x86_64-suse-linux/3.3.5/../../../../x86_64-suse-linux/lib/ /usr/lib64/gcc-lib/x86_64-suse-linux/3.3.5/../../../x86_64-suse-linux/3.3.5/ /usr/lib64/gcc-lib/x86_64-suse-linux/3.3.5/../../../ /lib/x86_64-suse-linux/3.3.5/ /lib/ /usr/lib/x86_64-suse-linux/3.3.5/ /usr/lib/"
 
 # Run-time system search path for libraries
-sys_lib_dlsearch_path_spec="/lib /usr/lib /usr/lib/octave-2.9.9 /usr/lib/qt-3.3/lib /usr/lib/qt4/lib /usr/lib/xulrunner-1.9.2 "
+sys_lib_dlsearch_path_spec="/lib /usr/lib /usr/X11R6/lib64/Xaw3d /usr/X11R6/lib64 /usr/X11R6/lib/Xaw3d /usr/X11R6/lib /usr/x86_64-suse-linux/lib /usr/local/lib64 /usr/local/lib /opt/kde3/lib64 /opt/kde3/lib /opt/gnome/lib64 /opt/gnome/lib /lib64 /lib /usr/lib64 /usr/lib /opt/cluster/intel/cce/9.1.042/lib /opt/cluster/intel/fce/9.1.036/lib /opt/cluster/Pathscale3.0/lib/2.9.99 /opt/cluster/Pathscale3.0/lib/2.9.99/32 /work/licsoft/compilers/pgi/linux86-64/6.2/lib /work/licsoft/compilers/pgi/linux86-64/6.2/libso "
 
 # Fix the shell variable $srcfile for the compiler.
 fix_srcfile_path=""
diff --git a/AMDiS/src/DOFMatrix.cc b/AMDiS/src/DOFMatrix.cc
index 64b339ff..4533d159 100644
--- a/AMDiS/src/DOFMatrix.cc
+++ b/AMDiS/src/DOFMatrix.cc
@@ -219,6 +219,11 @@ namespace AMDiS {
       } else {
 	for (int j = 0; j < nCol; j++) {
 	  DegreeOfFreedom col = colIndices[j];
+
+// 	  if (MPI::COMM_WORLD.Get_rank() == 0  && row <= 10 && col <= 10) {
+// 	    MSG("%d/%d entry: %e\n", row, col, elMat[i][j]);
+// 	  }
+
 	  ins[row][col] += elMat[i][j];
 	}
       }
diff --git a/AMDiS/src/Element.h b/AMDiS/src/Element.h
index e1f06894..f1500f02 100644
--- a/AMDiS/src/Element.h
+++ b/AMDiS/src/Element.h
@@ -393,10 +393,8 @@ namespace AMDiS {
      * are assembled and put together to a list. 
      *
      * \param[in]  feSpace         FE space which is used to get the dofs.
-     * \param[in]  ith             Defines the edge/face on which all the vertex dofs
-     *                             are assembled.
-     * \param[in]  geoPos          Must be either EDGE or FACE. Defines whether an
-     *                             edge or a face (only in 3d) should be traversed.
+     * \param[in]  bound           Defines the edge/face of the element on which
+     *                             all vertex dofs are assembled.
      * \param[out] dofs            List of dofs, where the result is stored.
      */
     virtual void getVertexDofs(FiniteElemSpace* feSpace, 
@@ -409,10 +407,8 @@ namespace AMDiS {
      * are assembled and put together to a list.
      *
      * \param[in]  feSpace         FE space which is used to get the dofs.
-     * \param[in]  ith             Defines the edge/face on which all the non vertex
-     *                             dofs are assembled.
-     * \param[in]  geoPos          Must be either EDGE or FACE. Defines whether an
-     *                             edge or a face (only in 3d) should be traversed.
+     * \param[in]  bound           Defines the edge/face of the element on which
+     *                             all non vertex dofs are assembled.
      * \param[out] dofs            All dofs are put to this dof list.
      */
     virtual void getNonVertexDofs(FiniteElemSpace* feSpace, 
diff --git a/AMDiS/src/ProblemVec.cc b/AMDiS/src/ProblemVec.cc
index 1f9b8f49..b7787d01 100644
--- a/AMDiS/src/ProblemVec.cc
+++ b/AMDiS/src/ProblemVec.cc
@@ -451,8 +451,8 @@ namespace AMDiS {
       }
 
       fileWriters.push_back(new FileWriter(numberedName,
-					    componentMeshes[0],
-					    solutionList));
+					   componentMeshes[0],
+					   solutionList));
     }
 
     // Create own filewriters for each components of the problem
@@ -464,7 +464,7 @@ namespace AMDiS {
       if (filename != "")
 	fileWriters.push_back(new FileWriter(numberedName, 
 					     componentMeshes[i], 
-					     solution->getDOFVector(i)));
+					     solution->getDOFVector(i)));      
     }
 
     // Check for serializer
@@ -674,8 +674,8 @@ namespace AMDiS {
 
       for (int j = 0; j < nComponents; j++) {
 
-	if (writeAsmInfo)
-	  MSG("--------- %d %d -------------\n", i, j);
+	//	if (writeAsmInfo)
+	//	  MSG("--------- %d %d -------------\n", i, j);
 
 	// Only if this variable is true, the current matrix will be assembled.	
 	bool assembleMatrix = true;
diff --git a/AMDiS/src/VtkWriter.cc b/AMDiS/src/VtkWriter.cc
index 8b3f9917..2153ae00 100644
--- a/AMDiS/src/VtkWriter.cc
+++ b/AMDiS/src/VtkWriter.cc
@@ -128,94 +128,94 @@ namespace AMDiS {
     return 0;
   }
 
+
   void VtkWriter::writeFile(DOFVector<double> *values, 
-				std::string filename,
-				bool writeParallel)
+			    std::string filename,
+			    bool writeParallel)
   {
-	FUNCNAME("VtkWriter::writeFile()");
-	
-	DataCollector dc(values->getFeSpace(), values);
-	std::vector<DataCollector*> dcList(0);
-	dcList.push_back(&dc);
-	writeFile(dcList,filename,writeParallel);
-  };
+    FUNCNAME("VtkWriter::writeFile()");
+
+    DataCollector dc(values->getFeSpace(), values);
+    std::vector<DataCollector*> dcList(0);
+    dcList.push_back(&dc);
+    writeFile(dcList,filename,writeParallel);
+  }
+
 
   void VtkWriter::writeFile(std::vector<DOFVector<double>* > &values,
-				std::string filename,
-				bool writeParallel)
+			    std::string filename,
+			    bool writeParallel)
   {
-	std::vector<DataCollector*> dcList(0);
-	for(unsigned i=0; i<values.size(); ++i) {
-		dcList.push_back(new DataCollector(values[i]->getFeSpace(), values[i]));
-	}
-	writeFile(dcList,filename,writeParallel);
-	for(unsigned i=0; i<values.size(); ++i) {
-    		delete dcList[i];
-	}
-  };
+    std::vector<DataCollector*> dcList(0);
+    for (unsigned int i = 0; i < values.size(); i++)
+      dcList.push_back(new DataCollector(values[i]->getFeSpace(), values[i]));	
+    writeFile(dcList, filename, writeParallel);
+    for (unsigned int i = 0; i < values.size(); i++)
+      delete dcList[i];	
+  }
+
 
   void VtkWriter::writeFile(WorldVector<DOFVector<double>* > &values,
-				std::string filename,
-				bool writeParallel)
+			    std::string filename,
+			    bool writeParallel)
   {
-	std::vector<DataCollector*> dcList(0);
-	for(unsigned i=0; i<static_cast<unsigned>(values.getSize()); ++i) {
-		dcList.push_back(new DataCollector(values[i]->getFeSpace(), values[i]));
-	}
-	writeFile(dcList,filename,writeParallel);
-	for(unsigned i=0; i<static_cast<unsigned>(values.getSize()); ++i) {
-    		delete dcList[i];
-	}
-  };
+    std::vector<DataCollector*> dcList(0);
+    for (int i = 0; i < values.getSize(); i++)
+      dcList.push_back(new DataCollector(values[i]->getFeSpace(), values[i]));	
+    writeFile(dcList, filename, writeParallel);
+    for (int i = 0; i < values.getSize(); i++)
+      delete dcList[i];	
+  }
+
 
   void VtkWriter::writeFile(DOFVector<WorldVector<double> > *values,
-				std::string filename,
-				bool writeParallel)
+			    std::string filename,
+			    bool writeParallel)
   {
-	WorldVector<DOFVector<double>*> valuesWV;
-	for(unsigned i=0; i<static_cast<unsigned>(valuesWV.getSize()); ++i)
-		valuesWV[i] = new DOFVector<double>(values->getFeSpace(), "valuesWV_i");
-	transform(values, &valuesWV);
-	writeFile(valuesWV,filename,writeParallel);
-	for(unsigned i=0; i<static_cast<unsigned>(valuesWV.getSize()); ++i) {
-    		delete valuesWV[i];
-	}
-  };
+    WorldVector<DOFVector<double>*> valuesWV;
+    for (int i =0 ; i < valuesWV.getSize(); i++)
+      valuesWV[i] = new DOFVector<double>(values->getFeSpace(), "valuesWV_i");
+    transform(values, &valuesWV);
+    writeFile(valuesWV, filename, writeParallel);
+    for (int i = 0; i < valuesWV.getSize(); i++)
+      delete valuesWV[i];
+  }
 
+  
   void VtkWriter::writeFile(SystemVector *values, 
-				std::string filename,
-				bool writeParallel)
+			    std::string filename,
+			    bool writeParallel)
   {
-	std::vector<DataCollector*> dcList(0);
-	for(unsigned i=0; i<static_cast<unsigned>(values->getSize()); ++i) {
-		dcList.push_back(new DataCollector(values->getDOFVector(i)->getFeSpace(), values->getDOFVector(i)));
-	}
-	writeFile(dcList,filename,writeParallel);
-	for(unsigned i=0; i<static_cast<unsigned>(dcList.size()); ++i) {
-    		delete dcList[i];
-	}
-  };
-
+    std::vector<DataCollector*> dcList(0);
+    for (int i = 0; i < values->getSize(); i++)
+      dcList.push_back(new DataCollector(values->getDOFVector(i)->getFeSpace(), 
+					 values->getDOFVector(i)));    
+    writeFile(dcList, filename, writeParallel);
+    for (unsigned i = 0; i < dcList.size(); i++)
+      delete dcList[i];    
+  }
+  
+  
   void VtkWriter::writeFile(std::vector<DataCollector*> &dcList,
-				std::string filename,
-				bool writeParallel)
+			    std::string filename,
+			    bool writeParallel)
   {
-	VtkWriter writer(&dcList);
-
-	#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
-	if (writeParallel) {
-		using boost::lexical_cast;
-		
-		int sPos = filename.find(".vtu");
-		TEST_EXIT(sPos >= 0)("Failed to find file postfix!\n");
-		std::string name = filename.substr(0, sPos);
-		
-		if (MPI::COMM_WORLD.Get_rank() == 0)
-			writer.writeParallelFile(name + ".pvtu", MPI::COMM_WORLD.Get_size(), name, ".vtu");    
-		
-		filename = name + "-p" + lexical_cast<std::string>(MPI::COMM_WORLD.Get_rank()) + "-.vtu";
-	}
-	#endif
-	writer.writeFile(filename);
-  };
+    VtkWriter writer(&dcList);
+    
+#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
+    if (writeParallel) {
+      using boost::lexical_cast;
+      
+      int sPos = filename.find(".vtu");
+      TEST_EXIT(sPos >= 0)("Failed to find file postfix!\n");
+      std::string name = filename.substr(0, sPos);
+      
+      if (MPI::COMM_WORLD.Get_rank() == 0)
+	writer.writeParallelFile(name + ".pvtu", MPI::COMM_WORLD.Get_size(), name, ".vtu");    
+      
+      filename = name + "-p" + lexical_cast<std::string>(MPI::COMM_WORLD.Get_rank()) + "-.vtu";
+    }
+#endif
+    writer.writeFile(filename);
+  }
 }
diff --git a/AMDiS/src/VtkWriter.h b/AMDiS/src/VtkWriter.h
index f89271c3..c7be36da 100644
--- a/AMDiS/src/VtkWriter.h
+++ b/AMDiS/src/VtkWriter.h
@@ -72,17 +72,22 @@ namespace AMDiS {
     static void writeFile(std::vector<DOFVector<double>*> &values,
 			  std::string filename, 
 			  bool writeParallel = true);
+
     static void writeFile(WorldVector<DOFVector<double>*> &values,
 			  std::string filename, 
 			  bool writeParallel = true);
+
     static void writeFile(DOFVector<WorldVector<double> > *values,
 			  std::string filename, 
 			  bool writeParallel = true);
+
     static void writeFile(DOFVector<WorldVector<double> > &values,
 			  std::string filename, 
-			  bool writeParallel = true) {
+			  bool writeParallel = true) 
+    {
       writeFile(&values, filename, writeParallel);
     }
+
     static void writeFile(SystemVector *values,
 			  std::string filename, 
 			  bool writeParallel = true);
diff --git a/AMDiS/src/VtkWriter.hh b/AMDiS/src/VtkWriter.hh
index 721252ea..010a1c8d 100644
--- a/AMDiS/src/VtkWriter.hh
+++ b/AMDiS/src/VtkWriter.hh
@@ -81,6 +81,7 @@ namespace AMDiS {
     file << "  </UnstructuredGrid>\n";
     file << "</VTKFile>\n";
   }
+
   
   template<typename T>
   void VtkWriter::writeVertexCoords(T &file)
@@ -115,6 +116,7 @@ namespace AMDiS {
     }
   }
 
+
   template<typename T>
   void VtkWriter::writeVertexValues(T &file, int componentNo)
   {
@@ -149,12 +151,13 @@ namespace AMDiS {
 	   ++intPointIt, ++valueIt, ++interpCoordIt) {      
 	
 	if (*intPointIt >= 0) {
-	  for (int i = 0; i < static_cast<int>(interpCoordIt->size()); i++)
+	  for (unsigned int i = 0; i < interpCoordIt->size(); i++)
 	    file << " " << (fabs(*valueIt) < 1e-40 ? 0.0 : *valueIt) << "\n";
 	}
       }
     }    
   }
+
   
   template<typename T>
   void VtkWriter::writeConnectivity(T &file)
@@ -182,6 +185,7 @@ namespace AMDiS {
     }
   }
   
+
   template<typename T>
   void VtkWriter::writeConnectivity_dim2_degree2(T &file)
   {
@@ -215,6 +219,7 @@ namespace AMDiS {
     }
   }
 
+
   template<typename T>
   void VtkWriter::writeConnectivity_dim2_degree3(T &file)
   {
@@ -270,6 +275,7 @@ namespace AMDiS {
     }
   }
 
+
   template<typename T>
   void VtkWriter::writeConnectivity_dim2_degree4(T &file)
   {
diff --git a/AMDiS/src/parallel/GlobalMatrixSolver.cc b/AMDiS/src/parallel/GlobalMatrixSolver.cc
index 4ae8d8f9..0359b421 100644
--- a/AMDiS/src/parallel/GlobalMatrixSolver.cc
+++ b/AMDiS/src/parallel/GlobalMatrixSolver.cc
@@ -1,9 +1,9 @@
-#include "GlobalMatrixSolver.h"
+#include "parallel/GlobalMatrixSolver.h"
+#include "parallel/StdMpi.h"
+#include "parallel/ParallelDebug.h"
 #include "DOFVector.h"
 #include "Debug.h"
 #include "SystemVector.h"
-#include "parallel/StdMpi.h"
-#include "parallel/ParallelDomainDbg.h"
 
 #include "petscksp.h"
 
@@ -117,7 +117,7 @@ namespace AMDiS {
 	    // The value is assign to n matrix entries, therefore, every entry 
 	    // has only 1/n value of the original entry.
 	    double scalFactor = 
-	      1.0 / (meshDistributor->getPeriodicDof(globalColDof).size() + 1.0);
+	      1.0 / pow(2.0, meshDistributor->getPeriodicDof(globalColDof).size());
 
 	    // Insert original entry.
  	    cols.push_back(colIndex);
@@ -131,7 +131,7 @@ namespace AMDiS {
  	      values.push_back(value(*icursor) * scalFactor);
 	    }
  	  } else {
-	    // Neigher row nor column dof index is periodic, simple add entry.
+	    // The col dof index is not periodic, simple add entry.
 	    cols.push_back(colIndex);
 	    values.push_back(value(*icursor));
 	  }
@@ -149,34 +149,39 @@ namespace AMDiS {
 	// The row dof is periodic, so send dof to all the corresponding rows.
 
 	double scalFactor = 
-	  1.0 / (meshDistributor->getPeriodicDof(globalRowDof).size() + 1.0);
-	
-	int diagIndex = -1;
-	for (int i = 0; i < static_cast<int>(values.size()); i++) {
-	  // Change only the non diagonal values in the col. For the diagonal test
-	  // we compare the global dof indices of the dof matrix (not of the petsc
-	  // matrix!).
-	  if ((cols[i] - dispAddCol) / dispMult != globalRowDof)
-	    values[i] *= scalFactor;
-	  else
-	    diagIndex = i;
-	}
-	
+	  1.0 / pow(2.0, meshDistributor->getPeriodicDof(globalRowDof).size());
+	for (unsigned int i = 0; i < values.size(); i++)
+	  values[i] *= scalFactor;
+
 	// Send the main row to the petsc matrix.
 	MatSetValues(petscMatrix, 1, &rowIndex, cols.size(), 
 		     &(cols[0]), &(values[0]), ADD_VALUES);	
  
-	// Set diagonal element to zero, i.e., the diagonal element of the current
-	// row is not send to the periodic row indices.
-	if (diagIndex != -1)
-	  values[diagIndex] = 0.0;
+	std::vector<int> perCols;
+	perCols.reserve(300);
+	std::vector<double> perValues;
+	perValues.reserve(300);
+	for (unsigned int i = 0; i < cols.size(); i++) {
+	  int tmp = (cols[i] - dispAddCol) / dispMult;
+	  if (meshDistributor->getPeriodicDofMap().count(tmp) == 0) {
+	    perCols.push_back(cols[i]);
+	    perValues.push_back(values[i]);
+	  } else {
+	    for (std::set<DegreeOfFreedom>::iterator it = meshDistributor->getPeriodicDof(tmp).begin();
+		 it != meshDistributor->getPeriodicDof(tmp).end(); ++it) {
+	      perValues.push_back(values[i]);
+	      perCols.push_back((*it * dispMult) + dispAddCol);
+	    }
+	  }
+	}
 
 	// Send the row to all periodic row indices.
 	for (std::set<DegreeOfFreedom>::iterator it = meshDistributor->getPeriodicDof(globalRowDof).begin();
 	     it != meshDistributor->getPeriodicDof(globalRowDof).end(); ++it) {
 	  int perRowIndex = *it * dispMult + dispAddRow;
-	  MatSetValues(petscMatrix, 1, &perRowIndex, cols.size(), 
-		       &(cols[0]), &(values[0]), ADD_VALUES);
+
+	  MatSetValues(petscMatrix, 1, &perRowIndex, perCols.size(), 
+		       &(perCols[0]), &(perValues[0]), ADD_VALUES);
 	}
 
       } else {
@@ -372,7 +377,7 @@ namespace AMDiS {
 	    d_nnz[localRowIdx]++;
 	}
       }
-    }  
+    }
   }
 
 
@@ -436,7 +441,7 @@ namespace AMDiS {
     for (int i = 0; i < nComponents; i++)
       for (int j = 0; j < nComponents; j++)
 	if ((*mat)[i][j])
-	  setDofMatrix((*mat)[i][j], nComponents, i, j);	
+	  setDofMatrix((*mat)[i][j], nComponents, i, j);		
 
     INFO(info, 8)("Fill petsc matrix 2 needed %.5f seconds\n", TIME_USED(first, clock()));
 
diff --git a/AMDiS/src/parallel/GlobalMatrixSolver.h b/AMDiS/src/parallel/GlobalMatrixSolver.h
index 25228935..d6ca31e4 100644
--- a/AMDiS/src/parallel/GlobalMatrixSolver.h
+++ b/AMDiS/src/parallel/GlobalMatrixSolver.h
@@ -24,7 +24,7 @@
 
 #include "AMDiS_fwd.h"
 #include "Global.h"
-#include "ParallelDomainBase.h"
+#include "MeshDistributor.h"
 #include "ProblemVec.h"
 #include "ProblemInstat.h"
 
diff --git a/AMDiS/src/parallel/ParallelDomainBase.cc b/AMDiS/src/parallel/MeshDistributor.cc
similarity index 94%
rename from AMDiS/src/parallel/ParallelDomainBase.cc
rename to AMDiS/src/parallel/MeshDistributor.cc
index d909fc44..68c9f012 100644
--- a/AMDiS/src/parallel/ParallelDomainBase.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -4,8 +4,8 @@
 #include <boost/lexical_cast.hpp>
 #include <boost/filesystem.hpp>
 
-#include "parallel/ParallelDomainBase.h"
-#include "parallel/ParallelDomainDbg.h"
+#include "parallel/MeshDistributor.h"
+#include "parallel/ParallelDebug.h"
 #include "parallel/StdMpi.h"
 #include "ParMetisPartitioner.h"
 #include "Mesh.h"
@@ -108,7 +108,7 @@ namespace AMDiS {
 	MSG("Skip write part mesh!\n");
     }
 
-    ParallelDomainDbg::testAllElements(*this);
+    ParallelDebug::testAllElements(*this);
 #endif
 
 
@@ -117,7 +117,7 @@ namespace AMDiS {
     createInteriorBoundaryInfo();
 
 #if (DEBUG != 0)
-    ParallelDomainDbg::printBoundaryInfo(*this);
+    ParallelDebug::printBoundaryInfo(*this);
 #endif
 
 
@@ -142,8 +142,8 @@ namespace AMDiS {
 #if (DEBUG != 0)
     MSG("AMDiS runs in debug mode, so make some test ...\n");
     debug::testSortedDofs(mesh, elMap);
-    ParallelDomainDbg::testInteriorBoundary(*this);
-    ParallelDomainDbg::testCommonDofs(*this, true);
+    ParallelDebug::testInteriorBoundary(*this);
+    ParallelDebug::testCommonDofs(*this, true);
     MSG("Debug mode tests finished!\n");
 
     debug::writeMesh(feSpace, -1, "macro_mesh");   
@@ -898,21 +898,20 @@ namespace AMDiS {
       // === indices. Now, we have to sort the corresponding list in this rank to   ===
       // === get the same order.                                                    ===
      
-      for (int j = 0; j < static_cast<int>(rankIt->second.size()); j++) {
+      for (unsigned int j = 0; j < rankIt->second.size(); j++) {
 
 	// If the expected object is not at place, search for it.
 
 	BoundaryObject &recvedBound = stdMpi.getRecvData()[rankIt->first][j].rankObj;
 
 	if ((rankIt->second)[j].neighObj != recvedBound) {
-	  int k = j + 1;
-	  for (; k < static_cast<int>(rankIt->second.size()); k++)
+	  unsigned int k = j + 1;
+	  for (; k < rankIt->second.size(); k++)
  	    if ((rankIt->second)[k].neighObj == recvedBound)
 	      break;
 
 	  // The element must always be found, because the list is just in another order.
-	  TEST_EXIT_DBG(k < static_cast<int>(rankIt->second.size()))
-	    ("Should never happen!\n");
+	  TEST_EXIT_DBG(k < rankIt->second.size())("Should never happen!\n");
 
 	  // Swap the current with the found element.
 	  AtomicBoundary tmpBound = (rankIt->second)[k];
@@ -949,20 +948,19 @@ namespace AMDiS {
 	if (rankIt->first <= mpiRank)
 	  continue;
 	  
-	for (int j = 0; j < static_cast<int>(rankIt->second.size()); j++) {
+	for (unsigned int j = 0; j < rankIt->second.size(); j++) {
 	  
 	  BoundaryObject &recvedBound = stdMpi.getRecvData()[rankIt->first][j].rankObj;
 	  
 	  if (periodicBoundary.boundary[rankIt->first][j].neighObj != recvedBound) {    
-	    int k = j + 1;	    
-	    for (; k < static_cast<int>(rankIt->second.size()); k++)
+	    unsigned int k = j + 1;	    
+	    for (; k < rankIt->second.size(); k++)
 	      if (periodicBoundary.boundary[rankIt->first][k].neighObj == recvedBound)
 		break;
 	    
 	    // The element must always be found, because the list is just in 
 	    // another order.
-	    TEST_EXIT_DBG(k < static_cast<int>(rankIt->second.size()))
-	      ("Should never happen!\n");
+	    TEST_EXIT_DBG(k < rankIt->second.size())("Should never happen!\n");
 	    
 	    // Swap the current with the found element.
 	    AtomicBoundary tmpBound = (rankIt->second)[k];
@@ -1366,11 +1364,6 @@ namespace AMDiS {
 	b.rankObj = rankDofs[it->second[i]];
 	b.neighObj = stdMpiObjs.getRecvData(it->first)[i];
 
-	if (mpiRank == 3 && it->first == 0) {	  
-	  MSG("ADDED VERTEX BOUNDARY WITH RANK 0!: %d %d %d\n", 
-	      it->second[i], b.rankObj.el->getIndex(), b.rankObj.ithObj);
-	}
-	
 	sendObjects[it->first].push_back(b.rankObj);
       }
     }
@@ -1682,7 +1675,7 @@ namespace AMDiS {
       it->rankObj.el->getVertexDofs(feSpace, it->rankObj, dofs);
       it->rankObj.el->getNonVertexDofs(feSpace, it->rankObj, dofs);
     
-      for (int i = 0; i < static_cast<int>(dofs.size()); i++)
+      for (unsigned int i = 0; i < dofs.size(); i++)
 	sendDofs[it.getRank()].push_back(dofs[i]);                  
     }
 
@@ -1692,7 +1685,7 @@ namespace AMDiS {
       it->rankObj.el->getVertexDofs(feSpace, it->rankObj, dofs);
       it->rankObj.el->getNonVertexDofs(feSpace, it->rankObj, dofs);
      
-      for (int i = 0; i < static_cast<int>(dofs.size()); i++) {
+      for (unsigned int i = 0; i < dofs.size(); i++) {
 	DofContainer::iterator eraseIt = 
 	  find(rankDofs.begin(), rankDofs.end(), dofs[i]);
 	if (eraseIt != rankDofs.end()) 
@@ -1730,7 +1723,7 @@ namespace AMDiS {
     // === Send new DOF indices. ===
 
 #if (DEBUG != 0)
-    ParallelDomainDbg::testDofContainerCommunication(*this, sendDofs, recvDofs);
+    ParallelDebug::testDofContainerCommunication(*this, sendDofs, recvDofs);
 #endif
     
     int i = 0;
@@ -1780,7 +1773,7 @@ namespace AMDiS {
     
 #if (DEBUG != 0)
     debug::testSortedDofs(mesh, elMap);
-    ParallelDomainDbg::testCommonDofs(*this, true);
+    ParallelDebug::testCommonDofs(*this, true);
     
 #if 0
     MSG("------------- Debug information -------------\n");
@@ -1967,112 +1960,70 @@ namespace AMDiS {
     // === Each rank traverse its periodic boundaries and sends the dof indices ===
     // === to the rank "on the other side" of the periodic boundary.            ===
 
+    RankToDofContainer rankPeriodicDofs;
+
     for (RankToBoundMap::iterator it = periodicBoundary.boundary.begin();
 	 it != periodicBoundary.boundary.end(); ++it) {
       TEST_EXIT_DBG(it->first != mpiRank)
 	("Periodic interior boundary within the rank itself is not possible!\n");
 
       // Create dof indices on the boundary. 
-      DofContainer dofs;
+      DofContainer& dofs = rankPeriodicDofs[it->first];
       for (std::vector<AtomicBoundary>::iterator boundIt = it->second.begin();
 	   boundIt != it->second.end(); ++boundIt) {
-	boundIt->rankObj.el->getVertexDofs(feSpace, boundIt->rankObj, dofs);
-	boundIt->rankObj.el->getNonVertexDofs(feSpace, boundIt->rankObj, dofs);
+	Element *el = boundIt->rankObj.el;
+
+	if (boundIt->rankObj.reverseMode) {
+	  for (int i = mesh->getDim() - 1; i >= 0; i--) {
+	    int localDofIndex = 
+	      el->getVertexOfPosition(boundIt->rankObj.subObj, boundIt->rankObj.ithObj, i);
+	    dofs.push_back(el->getDOF(localDofIndex));
+	  }
+	} else {
+	  for (int i = 0; i < mesh->getDim(); i++) {
+	    int localDofIndex = 
+	      el->getVertexOfPosition(boundIt->rankObj.subObj, boundIt->rankObj.ithObj, i);
+	    dofs.push_back(el->getDOF(localDofIndex));
+	  }
+	}
+	el->getVertexDofs(feSpace, boundIt->rankObj, dofs);
+	el->getNonVertexDofs(feSpace, boundIt->rankObj, dofs);
       }
 
       // Send the global indices to the rank on the other side.
       stdMpi.getSendData(it->first).reserve(dofs.size());
       for (unsigned int i = 0; i < dofs.size(); i++)
-	stdMpi.getSendData(it->first).push_back(mapLocalGlobalDofs[*(dofs[i])]);
+	stdMpi.getSendData(it->first).push_back(mapLocalGlobalDofs[*(dofs[i])]);      
 
       // Receive from this rank the same number of dofs.
-      stdMpi.recv(it->first, dofs.size());     
+      stdMpi.recv(it->first, dofs.size());
+      //      rankPeriodicDofs[it->first] = dofs;
     }
 
     stdMpi.updateSendDataSize();
     stdMpi.startCommunication<int>(MPI_INT);
 
+
     // === The rank has received the dofs from the rank on the other side of ===
     // === the boundary. Now it can use them to create the mapping between   ===
     // === the periodic dofs in this rank and the corresponding periodic     ===
     // === dofs from the other ranks.                                        ===
 
 
-    std::map<DegreeOfFreedom, std::set<int> > dofFromRank;
     for (RankToBoundMap::iterator it = periodicBoundary.boundary.begin();
 	 it != periodicBoundary.boundary.end(); ++it) {
-      DofContainer dofs;
-      
-      // Create the dofs on the boundary in inverse order.
-      for (std::vector<AtomicBoundary>::iterator boundIt = it->second.begin();
-	   boundIt != it->second.end(); ++boundIt) {
-	DofContainer tmpdofs;
-	boundIt->rankObj.el->getNonVertexDofs(feSpace, boundIt->rankObj, tmpdofs);
-	boundIt->rankObj.el->getVertexDofs(feSpace, boundIt->rankObj, tmpdofs);
-
-	for (unsigned int i = 0; i < tmpdofs.size(); i++)
-	  dofs.push_back(tmpdofs[i]);	
-      }
+      DofContainer& dofs = rankPeriodicDofs[it->first];
 
       // Added the received dofs to the mapping.
-      for (int i = 0; i < static_cast<int>(dofs.size()); i++) {
+      for (unsigned int i = 0; i < dofs.size(); i++) {
 	int globalDofIndex = mapLocalGlobalDofs[*(dofs[i])];
 	periodicDof[globalDofIndex].insert(stdMpi.getRecvData(it->first)[i]);
-	dofFromRank[globalDofIndex].insert(it->first);
-      }
-    }
-
-    if (dofFromRank.size() > 0) 
-      TEST_EXIT_DBG(mesh->getDim() == 2)
-	("Periodic boundary corner problem must be generalized to 3d!\n");
-
-    MPI::Request request[min(static_cast<int>(periodicBoundary.boundary.size() * 2), 4)];
-    int requestCounter = 0;
-    std::vector<int*> sendBuffers, recvBuffers;
-
-    for (std::map<DegreeOfFreedom, std::set<int> >::iterator it = dofFromRank.begin();
-	 it != dofFromRank.end(); ++it) {
-      if (it->second.size() == 2) {
-	TEST_EXIT_DBG(periodicDof[it->first].size() == 2)("Missing periodic dof!\n");
-	
-	int *sendbuf = new int[2];
-	sendbuf[0] = *(periodicDof[it->first].begin());
-	sendbuf[1] = *(++(periodicDof[it->first].begin()));
-	
-	request[requestCounter++] = 
-	  mpiComm.Isend(sendbuf, 2, MPI_INT, *(it->second.begin()), 0);
-	request[requestCounter++] = 
-	  mpiComm.Isend(sendbuf, 2, MPI_INT, *(++(it->second.begin())), 0);
-	
-	sendBuffers.push_back(sendbuf);
-
-	int *recvbuf1 = new int[2];
-	int *recvbuf2 = new int[2];
-
-	request[requestCounter++] = 
-	  mpiComm.Irecv(recvbuf1, 2, MPI_INT, *(it->second.begin()), 0);
-	request[requestCounter++] = 
-	  mpiComm.Irecv(recvbuf2, 2, MPI_INT, *(++(it->second.begin())), 0);
-
-	recvBuffers.push_back(recvbuf1);
-	recvBuffers.push_back(recvbuf2);
-      }
-    }
-
-    MPI::Request::Waitall(requestCounter, request);
-
-    int i = 0;
-    for (std::map<DegreeOfFreedom, std::set<int> >::iterator it = dofFromRank.begin();
-	 it != dofFromRank.end(); ++it) {
-      if (it->second.size() == 2) {
-	for (int k = 0; k < 2; k++)
-	  for (int j = 0; j < 2; j++)
-	    if (recvBuffers[i + k][j] != it->first)
-	      periodicDof[it->first].insert(recvBuffers[i + k][j]);
-
-	i++;
       }
     }
+       
+    for (PeriodicDofMap::iterator it = periodicDof.begin(); it != periodicDof.end(); ++it)
+      if (it->second.size() == 2)
+	periodicDof.erase(it++);          
   }
 
 
diff --git a/AMDiS/src/parallel/ParallelDomainBase.h b/AMDiS/src/parallel/MeshDistributor.h
similarity index 98%
rename from AMDiS/src/parallel/ParallelDomainBase.h
rename to AMDiS/src/parallel/MeshDistributor.h
index bd986ca6..fc42d49a 100644
--- a/AMDiS/src/parallel/ParallelDomainBase.h
+++ b/AMDiS/src/parallel/MeshDistributor.h
@@ -17,10 +17,10 @@
 // ==                                                                        ==
 // ============================================================================
 
-/** \file ParallelDomainBase.h */
+/** \file MeshDistributor.h */
 
-#ifndef AMDIS_PARALLELDOMAINBASE_H
-#define AMDIS_PARALLELDOMAINBASE_H
+#ifndef AMDIS_MESHDISTRIBUTOR_H
+#define AMDIS_MESHDISTRIBUTOR_H
 
 
 #include <map>
@@ -437,7 +437,7 @@ namespace AMDiS {
     /// Number of DOFs in the rank mesh.
     int nRankDofs;
 
-    ///
+    /// Number of DOFs in the whole domain.
     int nOverallDofs;
 
     /** \brief 
@@ -457,7 +457,8 @@ namespace AMDiS {
     InteriorBoundary otherIntBoundary;
 
     /** \brief
-     *
+     * Defines the periodic boundaries with other ranks. Periodic boundaries have
+     * no owner, as it is the case of interior boundaries.
      */
     InteriorBoundary periodicBoundary;
 
@@ -525,8 +526,8 @@ namespace AMDiS {
     /// may not be the case.
     bool macroElementStructureConsisten;
 
-    friend class ParallelDomainDbg;
+    friend class ParallelDebug;
   };
 }
 
-#endif // AMDIS_PARALLELDOMAINBASE_H
+#endif // AMDIS_MESHDISTRIBUTOR_H
diff --git a/AMDiS/src/parallel/ParallelDomainDbg.cc b/AMDiS/src/parallel/ParallelDebug.cc
similarity index 80%
rename from AMDiS/src/parallel/ParallelDomainDbg.cc
rename to AMDiS/src/parallel/ParallelDebug.cc
index 4391379c..98e0619e 100644
--- a/AMDiS/src/parallel/ParallelDomainDbg.cc
+++ b/AMDiS/src/parallel/ParallelDebug.cc
@@ -1,5 +1,5 @@
-#include "ParallelDomainDbg.h"
-#include "ParallelDomainBase.h"
+#include "ParallelDebug.h"
+#include "MeshDistributor.h"
 #include "PartitionElementData.h"
 #include "ProblemVec.h"
 #include "DOFVector.h"
@@ -10,18 +10,22 @@
 
 namespace AMDiS {
 
-  void ParallelDomainDbg::testInteriorBoundary(MeshDistributor &pdb)
+  void ParallelDebug::testInteriorBoundary(MeshDistributor &pdb)
   {
-    FUNCNAME("ParallelDomainDbg::testInteriorBoundary()");
+    FUNCNAME("ParallelDebug::testInteriorBoundary()");
 
     typedef MeshDistributor::RankToBoundMap RankToBoundMap;
 
     std::vector<int*> sendBuffers, recvBuffers;
 
     MPI::Request request[pdb.myIntBoundary.boundary.size() + 
-			 pdb.otherIntBoundary.boundary.size()];
+			 pdb.otherIntBoundary.boundary.size() +
+                         pdb.periodicBoundary.boundary.size() * 2];
     int requestCounter = 0;
 
+
+    // === Send rank's boundary information. ===
+
     for (RankToBoundMap::iterator rankIt = pdb.myIntBoundary.boundary.begin();
 	 rankIt != pdb.myIntBoundary.boundary.end(); ++rankIt) {
 
@@ -36,6 +40,9 @@ namespace AMDiS {
 	pdb.mpiComm.Isend(buffer, nSendInt, MPI_INT, rankIt->first, 0);
     }
 
+
+    // === Receive information from other ranks about the interior boundaries. ====
+
     for (RankToBoundMap::iterator rankIt = pdb.otherIntBoundary.boundary.begin();
 	 rankIt != pdb.otherIntBoundary.boundary.end(); ++rankIt) {
       int nRecvInt = rankIt->second.size();
@@ -46,19 +53,48 @@ namespace AMDiS {
 	pdb.mpiComm.Irecv(buffer, nRecvInt, MPI_INT, rankIt->first, 0);
     }
 
-    MPI::Request::Waitall(requestCounter, request);
 
+    // === To the last, do the same of periodic boundaries. ===
+
+    for (RankToBoundMap::iterator rankIt = pdb.periodicBoundary.boundary.begin();
+	 rankIt != pdb.periodicBoundary.boundary.end(); ++rankIt) {
+      int nValues = rankIt->second.size();
+      int* sBuffer = new int[nValues];
+      for (int i = 0; i < nValues; i++)
+	sBuffer[i] = (rankIt->second)[i].rankObj.elIndex;
+
+      sendBuffers.push_back(sBuffer);
+
+      request[requestCounter++] =
+	pdb.mpiComm.Isend(sBuffer, nValues, MPI_INT, rankIt->first, 0);
+
+      int *rBuffer = new int[nValues];
+      recvBuffers.push_back(rBuffer);
+
+      request[requestCounter++] = 
+	pdb.mpiComm.Irecv(rBuffer, nValues, MPI_INT, rankIt->first, 0);      
+    }
+
+    // === Finish communication and delete all send buffers. ===
+
+    MPI::Request::Waitall(requestCounter, request);
     for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
       delete [] sendBuffers[i];
 
+
+    // === Finally, check the results, i.e., the indices of element at the     === 
+    // === boundaries, if they fit together. First check the interior bounds,  ===
+    // === and after this the periodic ones.                                   ===
+
     int bufCounter = 0;
     for (RankToBoundMap::iterator rankIt = pdb.otherIntBoundary.boundary.begin();
 	 rankIt != pdb.otherIntBoundary.boundary.end(); ++rankIt) {
 
-      TEST_EXIT(rankIt->second.size() == pdb.otherIntBoundary.boundary[rankIt->first].size())
+      TEST_EXIT(rankIt->second.size() == 
+		pdb.otherIntBoundary.boundary[rankIt->first].size())
 	("Boundaries does not fit together!\n");      
 
-      for (int i = 0; i < static_cast<int>(rankIt->second.size()); i++) {
+      for (unsigned int i = 0; i < rankIt->second.size(); i++) {
 	int elIndex1 = recvBuffers[bufCounter][i];
 	int elIndex2 = pdb.otherIntBoundary.boundary[rankIt->first][i].neighObj.elIndex;
 
@@ -67,12 +103,25 @@ namespace AMDiS {
 
       delete [] recvBuffers[bufCounter++];
     }
+
+
+    for (RankToBoundMap::iterator rankIt = pdb.periodicBoundary.boundary.begin();
+	 rankIt != pdb.periodicBoundary.boundary.end(); ++rankIt) {
+      for (unsigned int i = 0; i < rankIt->second.size(); i++) {
+	int elIndex1 = recvBuffers[bufCounter][i];
+	int elIndex2 = pdb.periodicBoundary.boundary[rankIt->first][i].neighObj.elIndex;
+
+	TEST_EXIT(elIndex1 == elIndex2)("Wrong element index at periodic boundary!\n");
+      }
+
+      delete [] recvBuffers[bufCounter++];
+    }
   }
 
 
-  void ParallelDomainDbg::testCommonDofs(MeshDistributor &pdb, bool printCoords)
+  void ParallelDebug::testCommonDofs(MeshDistributor &pdb, bool printCoords)
   {
-    FUNCNAME("ParallelDomainDbg::testCommonDofs()");
+    FUNCNAME("ParallelDebug::testCommonDofs()");
 
     clock_t first = clock();
 
@@ -216,9 +265,9 @@ namespace AMDiS {
   }
 
 
-  void ParallelDomainDbg::testAllElements(MeshDistributor &pdb)
+  void ParallelDebug::testAllElements(MeshDistributor &pdb)
   {
-    FUNCNAME("ParallelDomainDbg::testAllElements()");
+    FUNCNAME("ParallelDebug::testAllElements()");
 
     TraverseStack stack;
     ElInfo *elInfo = stack.traverseFirst(pdb.mesh, -1, Mesh::CALL_LEAF_EL);
@@ -247,11 +296,11 @@ namespace AMDiS {
   }
 
 
-  void ParallelDomainDbg::testDofContainerCommunication(MeshDistributor &pdb, 
-							RankToDofContainer &sendDofs,
-							RankToDofContainer &recvDofs)
+  void ParallelDebug::testDofContainerCommunication(MeshDistributor &pdb, 
+						    RankToDofContainer &sendDofs,
+						    RankToDofContainer &recvDofs)
   {
-    FUNCNAME("ParallelDomainDbg::testDofContainerCommunication()");
+    FUNCNAME("ParallelDebug::testDofContainerCommunication()");
 
     std::map<int, int> sendNumber;
     for (RankToDofContainer::iterator it = sendDofs.begin(); it != sendDofs.end(); ++it)
@@ -278,7 +327,7 @@ namespace AMDiS {
   }
 
 
-  void ParallelDomainDbg::printMapLocalGlobal(MeshDistributor &pdb, int rank)
+  void ParallelDebug::printMapLocalGlobal(MeshDistributor &pdb, int rank)
   {    
     if (rank == -1 || pdb.mpiRank == rank) {
       std::cout << "====== DOF MAP LOCAL -> GLOBAL ====== " << std::endl;
@@ -321,9 +370,9 @@ namespace AMDiS {
   }
 
 
-  void ParallelDomainDbg::printMapPeriodic(MeshDistributor &pdb, int rank)
+  void ParallelDebug::printMapPeriodic(MeshDistributor &pdb, int rank)
   {
-    FUNCNAME("ParallelDomainDbg::printMapPeriodic()");
+    FUNCNAME("ParallelDebug::printMapPeriodic()");
 
     typedef std::map<DegreeOfFreedom, DegreeOfFreedom> DofMapping;
     typedef std::map<DegreeOfFreedom, std::set<DegreeOfFreedom> > PeriodicDofMap;
@@ -355,10 +404,10 @@ namespace AMDiS {
   }
 
   
-  void ParallelDomainDbg::printRankDofs(MeshDistributor &pdb, 
-					int rank, 
-					DofContainer& rankDofs,
-					DofContainer& rankAllDofs)
+  void ParallelDebug::printRankDofs(MeshDistributor &pdb, 
+				    int rank, 
+				    DofContainer& rankDofs,
+				    DofContainer& rankAllDofs)
   {
     if (rank == -1 || pdb.mpiRank == rank) {
       std::cout << "====== RANK DOF INFORMATION ====== " << std::endl;
@@ -384,9 +433,9 @@ namespace AMDiS {
   }
 
 
-  void ParallelDomainDbg::printBoundaryInfo(MeshDistributor &pdb)
+  void ParallelDebug::printBoundaryInfo(MeshDistributor &pdb)
   {
-    FUNCNAME("ParallelDomainDbg::printBoundaryInfo()");
+    FUNCNAME("ParallelDebug::printBoundaryInfo()");
 
     for (InteriorBoundary::iterator it(pdb.myIntBoundary); !it.end(); ++it) {
       MSG("Rank owned boundary with rank %d: \n", it.getRank());
@@ -406,10 +455,10 @@ namespace AMDiS {
   }
 
 
-  void ParallelDomainDbg::writeCoordsFile(MeshDistributor &pdb, 
-					  std::string prefix, std::string postfix)
+  void ParallelDebug::writeCoordsFile(MeshDistributor &pdb, 
+				      std::string prefix, std::string postfix)
   {
-    FUNCNAME("ParallelDomainDbg::writeCoordsFile()");
+    FUNCNAME("ParallelDebug::writeCoordsFile()");
 
     std::stringstream filename;
     filename << prefix << "-" << pdb.mpiRank << "." << postfix;
diff --git a/AMDiS/src/parallel/ParallelDomainDbg.h b/AMDiS/src/parallel/ParallelDebug.h
similarity index 95%
rename from AMDiS/src/parallel/ParallelDomainDbg.h
rename to AMDiS/src/parallel/ParallelDebug.h
index d5523b91..1318f442 100644
--- a/AMDiS/src/parallel/ParallelDomainDbg.h
+++ b/AMDiS/src/parallel/ParallelDebug.h
@@ -17,23 +17,24 @@
 // ==                                                                        ==
 // ============================================================================
 
-/** \file ParallelDomainDbg.h */
+/** \file ParallelDebug.h */
 
-#ifndef AMDIS_PARALLELDOMAINDBG_H
-#define AMDIS_PARALLELDOMAINDBG_H
+#ifndef AMDIS_PARALLELDEBUG_H
+#define AMDIS_PARALLELDEBUG_H
 
-#include "parallel/ParallelDomainBase.h"
+#include "parallel/MeshDistributor.h"
 
 namespace AMDiS {
    
-  class ParallelDomainDbg 
+  class ParallelDebug 
   {
   protected:
     typedef MeshDistributor::RankToDofContainer RankToDofContainer;
 
   public:
     /** \brief
-     * Tests the interior boundaries on all ranks if their order fits together.
+     * Tests the interior and the periodic boundaries on all ranks if their order
+     * fits together.
      *
      * \param[in]  pdb   Parallel problem definition used for debugging.
      */
-- 
GitLab