ParallelCoarseSpaceMatVec.h 4.83 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
// ============================================================================
// ==                                                                        ==
// == AMDiS - Adaptive multidimensional simulations                          ==
// ==                                                                        ==
// ==  http://www.amdis-fem.org                                              ==
// ==                                                                        ==
// ============================================================================
//
// Software License for AMDiS
//
// Copyright (c) 2010 Dresden University of Technology 
// All rights reserved.
// Authors: Simon Vey, Thomas Witkowski et al.
//
// This file is part of AMDiS
//
// See also license.opensource.txt in the distribution.



/** \file ParallelCoarseSpaceMatVec.h */

#ifndef AMDIS_PARALLEL_COARSE_SPACE_MAT_VEC_H
#define AMDIS_PARALLEL_COARSE_SPACE_MAT_VEC_H

#include <vector>
#include <map>
#include <petsc.h>
#include "AMDiS_fwd.h"

namespace AMDiS {

  /**
   * This class implements a block structured PETSc matrix/vec which seperates
   * the discretization of the interior of subdomains and the discretization 
   * of the coarse space. Thus, we have one matrix block for the interior and
   * one matrix block for the coarse space plus the coupling blocks. Some notes:
   * - For a single level domain decomposition method (e.g. the standad
   *   FETI-DP method), the interior matrix is local to the current rank and the
   *   coarse space matrix is a globally distributed matrix.
   * - There are different coarse spaces for different components possible. In
   *   this case, there are as many blocks as there are different coarse spaces
   *   plus one block for the interior matrix.
   */
  class ParallelCoarseSpaceMatVec {
  public:
47
    ParallelCoarseSpaceMatVec();
48 49

    /// Creates matrices and vectors with respect to the coarse space.
50 51 52 53 54 55 56 57
    void init(ParallelDofMapping *interiorMap,
	      map<int, ParallelDofMapping*> coarseSpaceMap,
	      int subdomainLevel,
	      MPI::Intracomm mpiCommLocal,
	      MPI::Intracomm mpiCommGlobal,
	      MeshDistributor *meshDistributor);
    
    void create(Matrix<DOFMatrix*>& seqMat);
58 59 60 61 62 63

    /// Run PETSc's assembly routines.
    void assembly();

    void destroy();

64 65 66
    bool checkMeshChange(Matrix<DOFMatrix*> &mat,
			 bool localMatrix = false);

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
    inline Mat& getInteriorMat()
    {
      TEST_EXIT_DBG(mat.size() > 0)("No matrix data!\n");
      return mat[0][0];
    }

    inline Mat& getCoarseMat(int coarseSpace0 = 0, int coarseSpace1 = 0)
    {
      TEST_EXIT_DBG(mat.size() > coarseSpace0 + 1)("No matrix data!\n");
      TEST_EXIT_DBG(mat.size() > coarseSpace1 + 1)("No matrix data!\n");
      return mat[coarseSpace0 + 1][coarseSpace1 + 1];
    }

    inline Mat& getIntCoarseMat(int coarseSpace = 0)
    {
      TEST_EXIT_DBG(mat.size() > coarseSpace + 1)("No matrix data!\n");
      return mat[0][coarseSpace + 1];
    }

    inline Mat& getCoarseIntMat(int coarseSpace = 0)
    {
      TEST_EXIT_DBG(mat.size() > coarseSpace + 1)("No matrix data!\n");
      return mat[coarseSpace + 1][0];
    }

    inline Mat& getCoarseMatComp(int comp)
    {
      int matIndex = componentIthCoarseMap[comp] + 1;
      return mat[matIndex][matIndex];
    }

    inline Mat& getIntCoarseMatComp(int comp)
    {
      int matIndex = componentIthCoarseMap[comp] + 1;
      return mat[0][matIndex];
    }

    inline Mat& getCoarseIntMatComp(int comp)
    {
      int matIndex = componentIthCoarseMap[comp] + 1;
      return mat[matIndex][0];
    }

  private:
    vector<vector<Mat> > mat;

113 114
    vector<vector<MatrixNnzStructure> > nnz;

115 116 117 118 119 120 121
    ParallelDofMapping *interiorMap;

    /// Parallel DOF mapping of the (optional) coarse space. Allows to define
    /// different coarse spaces for different components.
    map<int, ParallelDofMapping*> coarseSpaceMap;   

    vector<int> componentIthCoarseMap;
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145

    vector<ParallelDofMapping*> uniqueCoarseMap;

    MeshDistributor *meshDistributor;

    int subdomainLevel;

    MPI::Intracomm mpiCommLocal;
    
    MPI::Intracomm mpiCommGlobal;

    /// Stores the mesh change index of the mesh the nnz structure was created for.
    /// Therefore, if the mesh change index is higher than this value, we have to create
    /// a new nnz structure for PETSc matrices, because the mesh has been changed and
    /// therefore also the assembled matrix structure.
    int lastMeshNnz;

    /// If this variable is set to true, the non-zero matrix structure is
    /// created each time from scratch by calling \ref createPetscNnzStrcuture.
    /// This can be necessary if the number of non-zeros in the matrix varies
    /// though the mesh does not change. This may happen if there are many
    /// operators using DOFVectors from old timestep containing many zeros due to
    /// some phase fields.
    bool alwaysCreateNnzStructure;
146 147 148 149
  };
}

#endif