LifeV
MeshPartitioner.hpp
Go to the documentation of this file.
1 //@HEADER
2 /*
3 *******************************************************************************
4 
5  Copyright (C) 2004, 2005, 2007 EPFL, Politecnico di Milano, INRIA
6  Copyright (C) 2010 EPFL, Politecnico di Milano, Emory University
7 
8  This file is part of LifeV.
9 
10  LifeV is free software; you can redistribute it and/or modify
11  it under the terms of the GNU Lesser General Public License as published by
12  the Free Software Foundation, either version 3 of the License, or
13  (at your option) any later version.
14 
15  LifeV is distributed in the hope that it will be useful,
16  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  Lesser General Public License for more details.
19 
20  You should have received a copy of the GNU Lesser General Public License
21  along with LifeV. If not, see <http://www.gnu.org/licenses/>.
22 
23 *******************************************************************************
24 */
25 //@HEADER
26 
27 /*!
28  @file
29  @brief Class that handles mesh partitioning
30 
31  @date 08-12-2010
32  @author Gilles Fourestey <gilles.fourestey@epfl.ch>
33 
34  @contributor Radu Popescu <radu.popescu@epfl.ch>
35  @contributor Mauro Perego <mperego@fsu.edu>
36  @maintainer Radu Popescu <radu.popescu@epfl.ch>
37 */
38 
39 #ifndef MESH_PARTITIONER_H
40 #define MESH_PARTITIONER_H 1
41 
42 #include <fstream>
43 #include <sstream>
44 
45 
46 #include <parmetis.h>
47 #include <Epetra_MpiComm.h>
48 
49 
50 #include <lifev/core/LifeV.hpp>
51 #include <lifev/core/util/Switch.hpp>
52 #include <lifev/core/mesh/MeshElementMarked.hpp>
53 #include <lifev/core/util/LifeDebug.hpp>
54 #include <lifev/core/fem/DOF.hpp>
55 #include <lifev/core/mesh/MeshEntity.hpp>
56 #include <lifev/core/util/LifeChrono.hpp>
57 #include <lifev/core/array/GhostHandler.hpp>
58 
59 namespace LifeV
60 {
61 
62 /*!
63  @brief Class that handles mesh partitioning
64  @author Gilles Fourestey gilles.fourestey@epfl.ch
65  @contributor Radu Popescu radu.popescu@epfl.ch
66 
67  This class implements the partitioning of a mesh using (par)Metis. Currently
68  it is possible to do both online and offline partitioning. In online
69  partitioning, each MPI rank stores only its own mesh partition. In offline
70  partitioning (run as a single process, on a workstation), all the mesh
71  partitions are stored and can be saved to disk, using the HDF5 filter,
72  for later use during a parallel run.
73 */
74 template<typename MeshType>
75 class MeshPartitioner
76 {
77 public:
78  //@{
79  // Make the template's type available to the outside
80  typedef MeshType mesh_Type;
81  typedef std::shared_ptr<MeshType> meshPtr_Type;
82  typedef std::vector<Int> idList_Type;
83  typedef std::vector<idList_Type> graph_Type;
84  typedef std::shared_ptr<graph_Type> graphPtr_Type;
85  typedef std::vector<meshPtr_Type> partMesh_Type;
86  typedef std::shared_ptr<partMesh_Type> partMeshPtr_Type;
87  //@}
88  //! \name Constructors & Destructors
89  //@{
90  //! Default empty constructor
91  MeshPartitioner();
92 
93  //! Constructor
94  /*!
95  This is a non-empty constructor. It takes as parameters the
96  unpartitioned mesh (reference), the Epetra_Comm object in
97  use (reference) and pointers to the Epetra interface and
98  repeated interface maps. The constructor initializes the
99  data members and calls a private method
100  MeshPartitioner::execute which handles the mesh partitioning.
101  \param mesh - Mesh& - the unpartitioned mesh
102  \param _comm - Epetra_Comm& - Epetra communicator object
103  \param interfaceMap - Epetra_Map*
104  \param interfaceMapRep - Epetra_Map*
105  */
106  MeshPartitioner ( meshPtr_Type& mesh,
107  std::shared_ptr<Epetra_Comm>& comm,
108  Epetra_Map* interfaceMap = 0,
109  Epetra_Map* interfaceMapRep = 0 );
110 
111  //! Empty destructor
112  virtual ~MeshPartitioner() {}
113  //@}
114 
115  //! \name Public Methods
116  //@{
117  //! Partition the mesh.
118  /*!
119  It takes as parameters the unpartitioned mesh (reference),
120  the Epetra_Comm object in use (reference) and pointers to
121  the Epetra interface and repeated interface maps.
122  The method initializes the data members and calls a private method
123  MeshPartitioner::execute which handles the mesh partitioning.
124  @param mesh - Mesh& - the unpartitioned mesh
125  @param _comm - Epetra_Comm& - Epetra communicator object
126  @param interfaceMap - Epetra_Map*
127  @param interfaceMapRep - Epetra_Map*
128  @note This method is meant to be used with the empty constructor.
129  */
130  void doPartition ( meshPtr_Type& mesh,
131  std::shared_ptr<Epetra_Comm>& comm,
132  Epetra_Map* interfaceMap = 0,
133  Epetra_Map* interfaceMapRep = 0 );
134 
135 
136  //! To be used with the new constructor.
137  /*!
138  Loads the parameters of the partitioning process from the simulation data file.
139  Allocates and initializes data members according to the number of partitions specified in
140  the data file.
141  \param numPartitions - UInt - the number of partitions desired, in the offline case
142  \param _comm - Epetra_Comm& - reference of the Epetra communicator used
143  */
144  void setup (UInt numPartitions, std::shared_ptr<Epetra_Comm> comm);
145 
146  //! Call update() method after loading the graph, to rebuild all data structures
147  /*!
148  This method is to be called after the partitioned graph is LOADED from a HDF5 file.
149  Set M_nPartitions and rebuilds the M_graphVertexLocations data member, required to do
150  mesh partitioning.
151  !!! This method should be the first one to be called after loading the graph. !!!
152  */
153  void update();
154 
155  //! Stores a pointer to the unpartitioned mesh in the M_originalMesh data member.
156  /*!
157  Stores a pointer to the unpartitioned mesh in the M_originalMesh data member.
158  \param mesh - Mesh& - the unpartitioned mesh (passed by reference)
159  \param interfaceMap - Epetra_Map* - pointer to the interface map (default value 0)
160  \param interfaceMapRep - Epetra_Map* - pointer to the repeated interface map (default value 0)
161  */
162  void attachUnpartitionedMesh (meshPtr_Type& mesh, Epetra_Map* interfaceMap = 0,
163  Epetra_Map* interfaceMapRep = 0);
164 
165  //! Releases the original unpartitioned mesh
166  /*!
167  Releases the unpartitioned mesh so that it can be deleted, freeing A LOT of memory
168  in some cases.
169  */
170  void releaseUnpartitionedMesh();
171 
172  //! Executes the ParMETIS graph partitioning
173  /*!
174  Executes the ParMETIS graph partitioning
175  */
176  void doPartitionGraph();
177 
178  //! Builds the partitioned mesh using the partitioned graph
179  /*!
180  Builds the partitioned mesh using the partitioned graph
181  */
182  void doPartitionMesh();
183 
184  //! Initialize M_entityPID
185  void fillEntityPID();
186 
187  // Next method should be renamed and become a regular method
188  //! Return a pointer to the mesh partition with rank k
189  const meshPtr_Type& LIFEV_DEPRECATED ( getPartition (Int k) const )
190  {
191  return (*M_meshPartitions) [k];
192  }
193 
194  //! Clean structures that are not needed after partitioning
195  void cleanUp();
196 
197  //! Prints information about the state (data) of the object
198  void showMe (std::ostream& output = std::cout) const;
199  //@}
200 
201  //! \name Get Methods
202  //@{
203  //! Return a reference to M_vertexDistribution
204  const std::vector<Int>& vertexDistribution() const
205  {
206  return M_vertexDistribution;
207  }
208  //! Return a const pointer to M_meshPartitions[0] - for parallel
209  const meshPtr_Type& meshPartition() const
210  {
211  return (*M_meshPartitions) [0];
212  }
213  meshPtr_Type& meshPartition()
214  {
215  return (*M_meshPartitions) [0];
216  }
217  //! Return a pointer to M_meshPartitions
218  const partMeshPtr_Type& meshPartitions() const
219  {
220  return M_meshPartitions;
221  }
222  //! Return a pointer to M_graphVertexLocations
223  const std::vector<Int>& graphVertexLocations() const
224  {
225  return M_graphVertexLocations;
226  }
227  //! Return a pointer to M_elementDomains
228  const graphPtr_Type& elementDomains() const
229  {
230  return M_elementDomains;
231  }
232  graphPtr_Type& elementDomains()
233  {
234  return M_elementDomains;
235  }
236  //! Return a pointer to the communicator M_comm
237  const std::shared_ptr<Epetra_Comm>& comm() const
238  {
239  return M_comm;
240  }
241  //@}
242 
243  //! @name Set methos
244  //@{
245 
246  //! Set M_buildOverlappingPartitions
247  void setPartitionOverlap ( UInt const overlap )
248  {
249  M_partitionOverlap = overlap;
250  }
251 
252  //@}
253 
254 private:
255  // Private copy constructor and assignment operator. No implementation
256  MeshPartitioner (const MeshPartitioner&);
257  MeshPartitioner& operator= (const MeshPartitioner&);
258  //! Private Methods
259  //@{
260 
261  //! Initialize the parameters with default value.
262  void init ();
263 
264  //! Execute mesh partitioning using the configured MPI processes (online partitioning)
265  /*!
266  Executed the mesh partitioning using the number of MPI processes as the number of partitions.
267  Sets current mesh element parameters: M_elementVertices, M_elementRidges, M_elementFacets, M_facetVertices
268  Updates: M_elementDomains (indirectly)
269  Other data members are changed indirectly by calling other private methods.
270  */
271  void execute();
272  //! Build the graph vertex distribution vector
273  /*!
274  Updates the member M_vertexDistribution according to the number of processors to be
275  used by ParMETIS (the number of processes started for MPI
276  \param numElements - UInt - number of elements in the mesh
277  */
278  void distributeElements (UInt numElements);
279  //! Find faces on the boundaries between domains (FSI)
280  /*!
281  Identifies the element faces that are common to both the fluid and the solid
282  meshes and creates a map between the faces that reside on the boundary between
283  the two meshes and the processors used. Updates the members M_repeatedFacet and
284  M_isOnProc.
285  */
286  void findRepeatedFacesFSI();
287  //! Partition the connectivity graph using ParMETIS
288  /*!
289  Partitions the connectivity graph using ParMETIS. The result is stored in the
290  member M_graphVertexLocations: this is a vector of integer values; its size is the number of
291  elements in the unpartitioned mesh. Each value represents the number of the
292  partition to which the element was assigned. Also creates M_elementDomains, the
293  vector of elements in each subdomain.
294  Updates: M_graphVertexLocations, M_elementDomains
295  \param numParts - unsigned int - number of partitions for the graph cutting process
296  */
297  void partitionConnectivityGraph (UInt numParts);
298 
299  //! Updates the map between elements and processors in FSI
300  /*!
301  Updates M_elementDomains during FSI modeling.
302  */
303  void matchFluidPartitionsFSI();
304  //! Redistribute elements among processes
305  /*!
306  Redistributes elements among processes, when needed, after the connectivity
307  graph partitioning phase. Updates M_elementDomains
308  */
309  void redistributeElements();
310  //! Construct local mesh
311  /*!
312  Constructs the data structures for the local mesh partition.
313  Updates M_localNodes, M_localRidges, M_localFacets, M_localElements,
314  M_globalToLocalNode.
315  */
316  void constructLocalMesh();
317  //! Construct vertices
318  /*!
319  Adds vertices to the partitioned mesh object. Updates M_nBoundaryPoints,
320  M_meshPartitions.
321  */
322  void constructNodes();
323  //! Construct volumes
324  /*!
325  Adds volumes to the partitioned mesh object. Updates M_globalToLocalElement,
326  M_meshPartitions.
327  */
328  void constructElements();
329  //! Construct ridges
330  /*!
331  Adds ridges to the partitioned mesh object. Updates M_nBoundaryRidges,
332  M_meshPartitions.
333  */
334  void constructRidges();
335  //! Construct facets
336  /*!
337  Adds facets to the partitioned mesh object. Updates M_nBoundaryFacets,
338  M_meshPartitions.
339  */
340  void constructFacets();
341  //! Final setup of local mesh
342  /*!
343  Updates the partitioned mesh object data members after adding the mesh
344  elements (vertices, ridges, facets, volumes).
345  Updates M_meshPartitions.
346  */
347  void finalSetup();
348 
349  //! Mark ghost entities
350  /*!
351  Mark all ghost entities that have been added for overlapping partitions
352  with the EntityFlags::GHOST flag in order to properly build the dof
353  map in DOF::GlobalElements().
354  */
355  void markGhostEntities();
356 
357  //@}
358  //! Private Data Members
359  //@{
360  UInt M_numPartitions;
361  partMeshPtr_Type M_meshPartitions;
362  std::vector<Int> M_vertexDistribution;
363  std::vector<Int> M_adjacencyGraphKeys;
364  std::vector<Int> M_adjacencyGraphValues;
365  std::shared_ptr<Epetra_Comm> M_comm;
366  Int M_me;
367 
368  std::vector<std::vector<Int> > M_localNodes;
369  std::vector<std::set<Int> > M_localRidges;
370  std::vector<std::set<Int> > M_localFacets;
371  std::vector<std::vector<Int> > M_localElements;
372  std::vector<std::map<Int, Int> > M_globalToLocalNode;
373  std::vector<std::map<Int, Int> > M_globalToLocalElement;
374  std::vector<UInt> M_nBoundaryPoints;
375  std::vector<UInt> M_nBoundaryRidges;
376  std::vector<UInt> M_nBoundaryFacets;
377  // The following are utility variables used throughout the partitioning
378  // process
379  meshPtr_Type M_originalMesh;
380  Epetra_Map* M_interfaceMap;
381  Epetra_Map* M_interfaceMapRep;
382  //! Number of partitions handled. 1 for parallel (old way), != 1 for serial
383  UInt M_elementVertices;
384  UInt M_elementFacets;
385  UInt M_elementRidges;
386  UInt M_facetVertices;
387  std::shared_ptr<std::vector<Int> > M_repeatedFacet;
388  std::shared_ptr<std::vector<Int> > M_isOnProc;
389  std::vector<Int> M_graphVertexLocations;
390  graphPtr_Type M_elementDomains;
391  bool M_serialMode; // how to tell if running serial partition mode
392  UInt M_partitionOverlap;
393 
394  //! Store ownership for each entity, subdivided by entity type
395  struct EntityPIDList
396  {
397  idList_Type elements;
398  idList_Type facets;
399  idList_Type ridges;
400  idList_Type points;
401  } M_entityPID;
402 
403  //@}
404 }; // class MeshPartitioner
405 
406 //
407 // IMPLEMENTATION
408 //
409 
410 
411 // =================================
412 // Constructors and destructor
413 // =================================
414 
415 template < typename MeshType >
416 MeshPartitioner < MeshType >::
417 MeshPartitioner()
418 {
419  init ();
420 } // constructor
421 
422 template < typename MeshType >
423 MeshPartitioner < MeshType >::
424 MeshPartitioner ( meshPtr_Type& mesh, std::shared_ptr<Epetra_Comm>& comm,
425  Epetra_Map* interfaceMap, Epetra_Map* interfaceMapRep)
426 {
427  init ();
428  doPartition ( mesh, comm, interfaceMap, interfaceMapRep );
429 } // constructor
430 
431 template < typename MeshType >
432 void
433 MeshPartitioner < MeshType >::
434 init ()
435 {
436  M_numPartitions = 1;
437  M_localNodes.resize ( M_numPartitions );
438  M_localRidges.resize ( M_numPartitions );
439  M_localFacets.resize ( M_numPartitions );
440  M_localElements.resize ( M_numPartitions );
441  M_globalToLocalNode.resize ( M_numPartitions );
442  M_globalToLocalElement.resize ( M_numPartitions );
443  M_nBoundaryPoints.resize ( M_numPartitions );
444  M_nBoundaryRidges.resize ( M_numPartitions );
445  M_nBoundaryFacets.resize ( M_numPartitions );
446  M_elementDomains.reset ( new graph_Type );
447  M_serialMode = false;
448  M_partitionOverlap = 0;
449 
450  /*
451  Sets element parameters (nodes, faces, ridges and number of nodes on each
452  facet according to the type of mesh element used (Mesh::ElementShape::S_shape).
453  Updates M_elementVertices, M_elementFaces, M_elementRidges, M_facetVertices.
454  */
455  M_elementVertices = MeshType::elementShape_Type::S_numVertices;
456  M_elementFacets = MeshType::elementShape_Type::S_numFacets;
457  M_elementRidges = MeshType::elementShape_Type::S_numRidges;
458  M_facetVertices = MeshType::facetShape_Type::S_numVertices;
459 
460 } // init
461 
462 template < typename MeshType >
463 void
464 MeshPartitioner < MeshType >::
465 doPartition ( meshPtr_Type& mesh, std::shared_ptr<Epetra_Comm>& comm,
466  Epetra_Map* interfaceMap, Epetra_Map* interfaceMapRep )
467 {
468  M_comm = comm;
469  M_originalMesh = mesh;
470  M_interfaceMap = interfaceMap;
471  M_interfaceMapRep = interfaceMapRep;
472 
473  M_me = M_comm->MyPID();
474 
475  meshPtr_Type newMesh ( new MeshType ( comm ) );
476  newMesh->setIsPartitioned ( true );
477  M_meshPartitions.reset ( new partMesh_Type ( M_numPartitions, newMesh ) );
478  newMesh.reset();
479 
480  execute();
481 
482 } // doPartiton
483 
484 // =================================
485 // Public methods
486 // =================================
487 
488 template<typename MeshType>
489 void MeshPartitioner<MeshType>::setup (UInt numPartitions, std::shared_ptr<Epetra_Comm> comm)
490 {
491  M_serialMode = true;
492  M_comm = comm;
493  M_me = M_comm->MyPID();
494 
495  M_numPartitions = numPartitions;
496 
497  M_meshPartitions.reset (new partMesh_Type);
498  meshPtr_Type newMesh;
499  for (UInt i = 0; i < M_numPartitions; ++i)
500  {
501  newMesh.reset ( new MeshType ( comm ) );
502  newMesh->setIsPartitioned ( true );
503  M_meshPartitions->push_back (newMesh);
504  }
505  newMesh.reset();
506 
507  M_elementDomains.reset (new graph_Type);
508 
509  M_localNodes.resize (M_numPartitions);
510  M_localRidges.resize (M_numPartitions);
511  M_localFacets.resize (M_numPartitions);
512  M_localElements.resize (M_numPartitions);
513  M_globalToLocalNode.resize (M_numPartitions);
514  M_globalToLocalElement.resize (M_numPartitions);
515  M_nBoundaryPoints.resize (M_numPartitions);
516  M_nBoundaryRidges.resize (M_numPartitions);
517  M_nBoundaryFacets.resize (M_numPartitions);
518 }
519 
520 template<typename MeshType>
521 void MeshPartitioner<MeshType>::update()
522 {
523  M_numPartitions = M_elementDomains->size();
524 
525  Int numElements = 0;
526 
527  for (UInt i = 0; i < M_numPartitions; ++i)
528  {
529  numElements += (*M_elementDomains) [i].size();
530  }
531 
532  // Rebuild M_graphVertexLocations
533  M_graphVertexLocations.resize (numElements);
534  for (std::vector<std::vector<Int> >::iterator it1 = M_elementDomains->begin();
535  it1 != M_elementDomains->end(); ++it1)
536  {
537  for (std::vector<Int>::iterator it2 = it1->begin();
538  it2 != it1->end(); ++it2)
539  {
540  M_graphVertexLocations[*it2] = static_cast<Int> ( (it1 - M_elementDomains->begin() ) );
541  }
542  }
543 }
544 
545 template<typename MeshType>
546 void MeshPartitioner<MeshType>::attachUnpartitionedMesh (meshPtr_Type& mesh,
547  Epetra_Map* interfaceMap,
548  Epetra_Map* interfaceMapRep)
549 {
550  M_originalMesh = mesh;
551  M_interfaceMap = interfaceMap;
552  M_interfaceMapRep = interfaceMapRep;
553 }
554 
555 template<typename MeshType>
556 void MeshPartitioner<MeshType>::releaseUnpartitionedMesh()
557 {
558  M_originalMesh.reset();
559  M_interfaceMap = 0;
560  M_interfaceMapRep = 0;
561 }
562 
563 template<typename MeshType>
564 void MeshPartitioner<MeshType>::doPartitionGraph()
565 {
566  distributeElements (M_originalMesh->numElements() );
567  if (M_interfaceMap)
568  {
569  findRepeatedFacesFSI();
570  }
571  partitionConnectivityGraph (M_numPartitions);
572  if (M_interfaceMap)
573  {
574  matchFluidPartitionsFSI();
575  }
576 }
577 
578 template<typename MeshType>
579 void MeshPartitioner<MeshType>::doPartitionMesh()
580 {
581 
582  // ***********************
583  // local mesh construction
584  // ***********************
585  constructLocalMesh();
586 
587  // ******************
588  // nodes construction
589  // ******************
590  constructNodes();
591 
592  // ********************
593  // element construction
594  // ********************
595  constructElements();
596 
597  // ******************
598  // ridges construction
599  // ******************
600  constructRidges();
601 
602  // new faces can be built only after all local volumes are complete in order to get proper ghost faces data
603  M_comm->Barrier();
604 
605  // ******************
606  // faces construction
607  // ******************
608  constructFacets();
609 
610  // ******************
611  // final setup
612  // ******************
613  finalSetup();
614 
615  markGhostEntities();
616 }
617 
618 template<typename MeshType>
619 void MeshPartitioner<MeshType>::showMe (std::ostream& output) const
620 {
621  output << "Number of partitions: " << M_numPartitions << std::endl;
622  output << "Serial mode:" << M_serialMode << std::endl;
623 }
624 
625 // =================================
626 // Private methods
627 // =================================
628 
629 template<typename MeshType>
630 void MeshPartitioner<MeshType>::distributeElements (UInt numElements)
631 {
632  // ParMETIS is able to work in parallel: how many processors does it have at hand?
633  Int numProcessors = M_comm->NumProc();
634  M_me = M_comm->MyPID();
635 
636  // CAREFUL: ParMetis works on a graph abstraction.
637  // A graph is built over the data structure to be split, each vertex being a mesh element
638  // so hereby a "vertex" is actually a _graph_ vertex, i. e. a mesh element
639  M_vertexDistribution.resize (numProcessors + 1);
640  M_vertexDistribution[0] = 0;
641 
642  UInt k = numElements;
643 
644  // Evenly distributed graph vertices
645  for (Int i = 0; i < numProcessors; ++i)
646  {
647  UInt l = k / (numProcessors - i);
648  M_vertexDistribution[i + 1] = M_vertexDistribution[i] + l;
649  k -= l;
650  }
651  ASSERT (k == 0, "At this point we should have 0 volumes left") ;
652 }
653 
654 template<typename MeshType>
655 void MeshPartitioner<MeshType>::findRepeatedFacesFSI()
656 {
657  std::vector<Int> myRepeatedFacet; // used for the solid partitioning
658  std::shared_ptr<std::vector<Int> > myIsOnProc; // used for the solid partitioning
659 
660  myIsOnProc.reset (new std::vector<Int> (M_originalMesh->numElements() ) );
661 
662  bool myFacetRep;
663  bool myFacet (false);
664  short count;
665  for (UInt h = 0; h < M_originalMesh->numElements(); ++h)
666  {
667  (*myIsOnProc) [h] = -1;
668  }
669 
670  // This loop is throughout the whole unpartitioned mesh,
671  // it is expensive and not scalable.
672  // Bad, this part should be done offline
673 
674  for (UInt ie = 0; ie < M_originalMesh->numElements(); ++ie)
675  {
676  for (UInt ifacet = 0; ifacet < M_elementFacets; ++ifacet)
677  {
678  UInt facet = M_originalMesh->localFacetId (ie, ifacet);
679  UInt vol = M_originalMesh->facet (facet).firstAdjacentElementIdentity();
680  if (vol == ie)
681  {
682  vol = M_originalMesh->facet (facet).secondAdjacentElementIdentity();
683  }
684  if (vol != NotAnId)
685  {
686  myFacet = false;
687  myFacetRep = false;
688  count = 0;
689  for (Int ipoint = 0; ipoint < static_cast<Int> (M_facetVertices); ++ipoint) // vertex-based dofs
690  {
691  myFacetRep = ( (M_interfaceMap->LID ( static_cast<EpetraInt_Type> (M_originalMesh->facet (facet).point (ipoint).id() ) )
692  /* first is fluid */ == -1) &&
693  (M_interfaceMapRep->LID ( static_cast<EpetraInt_Type> (M_originalMesh->facet (facet).point (ipoint).id() ) )
694  /* first is fluid */ != -1) );
695  myFacet = myFacet ||
696  (M_interfaceMap->LID ( static_cast<EpetraInt_Type> (M_originalMesh->facet (facet).point (ipoint).id() ) ) != -1);
697  if (myFacetRep)
698  {
699  ++count;
700  }
701  }
702  if (count > 1)
703  {
704  myRepeatedFacet.push_back (1);
705  }
706  else
707  {
708  myRepeatedFacet.push_back (0);
709  }
710  }
711  if (myFacet)
712  {
713  (*myIsOnProc) [ie] = M_me;
714  }
715  }
716  }
717 
718  M_repeatedFacet.reset (new std::vector<Int> (myRepeatedFacet.size() ) );
719  M_isOnProc.reset (new std::vector<Int> (*myIsOnProc) );
720 
721  // Lot of communication here!!
722  std::shared_ptr<Epetra_MpiComm> mpiComm = std::dynamic_pointer_cast <Epetra_MpiComm> ( M_comm );
723  MPI_Allreduce ( &myRepeatedFacet[0], & (*M_repeatedFacet) [0], myRepeatedFacet.size(),
724  MPI_INT, MPI_SUM, mpiComm->Comm() );
725  MPI_Allreduce ( & (*myIsOnProc) [0], & (*M_isOnProc) [0], myIsOnProc->size(),
726  MPI_INT, MPI_MAX, mpiComm->Comm() );
727 }
728 
729 template<typename MeshType>
730 void MeshPartitioner<MeshType>::partitionConnectivityGraph (UInt numParts)
731 {
732  // This array's size is equal to the number of locally-stored vertices:
733  // at the end of the partitioning process, "M_graphVertexLocations" will contain the partitioning array:
734  // M_graphVertexLocations[m] = n; means that graph vertex m belongs to subdomain n
735  M_graphVertexLocations.resize ( M_vertexDistribution[M_comm->NumProc()] - M_vertexDistribution[0], M_comm->NumProc() );
736 
737  // Now each processor will take care of its own graph vertices (i. e. mesh elements).
738  // Nothing guarantees about the neighbor elements distribution across the processors,
739  // since as of now we just split the set of volumes based on IDs.
740  // Here we building up the neighbor arrays.
741 
742  UInt localStart = M_vertexDistribution[M_me];
743  UInt localEnd = M_vertexDistribution[M_me + 1];
744 
745  // this vector contains the weights for the edges of the graph,
746  // it is set to null if it is not used.
747  std::vector<Int> graphEdgeWeights;
748 
749  M_adjacencyGraphKeys.resize (0);
750  M_adjacencyGraphKeys.push_back (0);
751 
752  UInt sum = 0;
753 
754  for (UInt ie = localStart; ie < localEnd; ++ie)
755  {
756  for (UInt ifacet = 0; ifacet < M_elementFacets; ++ifacet)
757  {
758  // global ID of the ifacet-th facet in element ie
759  UInt facet = M_originalMesh->localFacetId (ie, ifacet);
760  // first adjacent element to face "facet"
761  UInt elem = M_originalMesh->facet (facet).firstAdjacentElementIdentity();
762  if (elem == ie)
763  {
764  elem = M_originalMesh->facet (facet).secondAdjacentElementIdentity();
765  }
766  if (elem != NotAnId)
767  {
768  // this is the list of adjacency
769  // for each graph vertex, simply push back the ID of its neighbors
770  M_adjacencyGraphValues.push_back (elem);
771  ++sum;
772  if (M_interfaceMap) // if I'm partitioning the solid in FSI
773  {
774  if ( (*M_repeatedFacet) [sum])
775  {
776  graphEdgeWeights.push_back (0);
777  }
778  else
779  {
780  graphEdgeWeights.push_back (10);
781  }
782  }
783  }
784  }
785  // this is the list of "keys" to access M_adjacencyGraphValues
786  // graph element i has neighbors M_adjacencyGraphValues[ k ],
787  // with M_adjacencyGraphKeys[i] <= k < M_adjacencyGraphKeys[i+1]
788  M_adjacencyGraphKeys.push_back (sum);
789  }
790 
791  // **************
792  // parMetis part
793 
794  // this array is to be used for weighted vertices on the graph:
795  // usually we will set it to NULL
796 
797  Int* weightVector = 0;
798 
799  Int weightFlag;
800  if (M_interfaceMap)
801  {
802  weightFlag = 1;
803  }
804  else
805  {
806  weightFlag = 0;
807  }
808 
809  Int ncon = 1;
810  Int numflag = 0;
811 
812  Int cutGraphEdges; // here will be stored the number of edges cut in the partitioning process
813 
814  // additional options
815  std::vector<Int> options (3, 0);
816  options[0] = 1; // means that additional options are actually passed
817  options[1] = 3; // level of information to be returned during execution (see ParMETIS's defs.h file)
818  options[2] = 1; // random number seed for the ParMETIS routine
819 
820  // fraction of vertex weight to be distributed to each subdomain.
821  // here we want the subdomains to be of the same size
822  std::vector<float> tpwgts (ncon * numParts, 1. / numParts);
823  // imbalance tolerance for each vertex weight
824  std::vector<float> ubvec (ncon, 1.05);
825 
826  std::shared_ptr<Epetra_MpiComm> mpiComm = std::dynamic_pointer_cast <Epetra_MpiComm> (M_comm);
827  MPI_Comm MPIcomm = mpiComm->Comm();
828 
829  Int nprocs;
830  MPI_Comm_size (MPIcomm, &nprocs);
831 
832  /*
833  (from ParMETIS v 3.1 manual)
834  This routine is used to compute a k-way partitioning of a graph
835  on p processors using the multilevel k-way multi-constraint
836  partitioning algorithm.
837  */
838 
839  Int numberParts = (Int) numParts;
840 
841  Int* adjwgtPtr (0);
842  if (graphEdgeWeights.size() > 0)
843  {
844  adjwgtPtr = static_cast<Int*> (&graphEdgeWeights[0]);
845  }
846  ParMETIS_V3_PartKway (static_cast<Int*> (&M_vertexDistribution[0]),
847  static_cast<Int*> (&M_adjacencyGraphKeys[0]),
848  static_cast<Int*> (&M_adjacencyGraphValues[0]),
849  weightVector, adjwgtPtr, &weightFlag, &numflag,
850  &ncon, &numberParts, &tpwgts[0], &ubvec[0],
851  &options[0], &cutGraphEdges, &M_graphVertexLocations[localStart],
852  &MPIcomm);
853 
854  M_comm->Barrier();
855 
856  Int nProc = M_comm->NumProc();
857 
858  // distribute the resulting partitioning stored in M_graphVertexLocations to all processors
859  for ( Int proc = 0; proc < nProc; proc++ )
860  {
861  UInt procStart = M_vertexDistribution[ proc ];
862  UInt procLength = M_vertexDistribution[ proc + 1 ] - M_vertexDistribution[ proc ];
863  M_comm->Broadcast ( &M_graphVertexLocations[ procStart ], procLength, proc );
864  }
865 
866  // this is a vector of subdomains: each component is
867  // the list of vertices belonging to the specific subdomain
868  (*M_elementDomains).resize (numParts);
869 
870  // cycling on locally stored vertices
871  for (UInt ii = 0; ii < M_graphVertexLocations.size(); ++ii)
872  {
873  // here we are associating the vertex global ID to the subdomain ID
874  (*M_elementDomains) [ M_graphVertexLocations[ ii ] ].push_back ( ii );
875  }
876 }
877 
878 template<typename MeshType>
879 void MeshPartitioner<MeshType>::matchFluidPartitionsFSI()
880 {
881  std::shared_ptr<Epetra_MpiComm> mpiComm = std::dynamic_pointer_cast <Epetra_MpiComm> (M_comm);
882  MPI_Comm MPIcomm = mpiComm->Comm();
883  Int numProcesses;
884  MPI_Comm_size (MPIcomm, &numProcesses);
885 
886  std::vector<Int> procOrder (numProcesses);
887  std::vector<std::vector<UInt> > myMatchesForProc (numProcesses);
888  std::vector<std::vector<UInt> > matchesForProc (numProcesses);
889  std::vector<bool> orderingError (numProcesses);
890 
891  for (Int i = 0; i < numProcesses ; ++i)
892  {
893  orderingError[i] = false;
894  for (Int j = 0; j < numProcesses ; ++j)
895  {
896  myMatchesForProc[i].push_back (0);
897  matchesForProc[i].push_back (0);
898  }
899  }
900 
901  for (UInt kk = 0; kk < M_graphVertexLocations.size(); ++kk)
902  {
903  if ( (*M_isOnProc) [kk + M_vertexDistribution[M_me]] != -1)
904  {
905  ++myMatchesForProc[M_graphVertexLocations[kk]][ (*M_isOnProc) [kk + M_vertexDistribution[M_me]]];
906  }
907  }
908 
909  for (UInt j = 0; (Int) j < numProcesses; ++j)
910  {
911  MPI_Allreduce (&myMatchesForProc[j][0], &matchesForProc[j][0], numProcesses,
912  MPI_INT, MPI_SUM, MPIcomm);
913  }
914 
915  M_comm->Barrier();
916 
917  Int suitableProcess = -1;
918  UInt max = 0;
919 
920  for (Int ii = 0; ii < numProcesses; ++ii)
921  {
922  if (matchesForProc[M_me][ii] > max)
923  {
924  suitableProcess = ii;
925  max = matchesForProc[M_me][ii];
926  }
927  }
928 
929  ASSERT (suitableProcess != -1, "one partition is without interface nodes!");
930  procOrder[M_me] = suitableProcess;
931 
932  M_comm->Barrier();
933 
934  std::vector<UInt> maxs (numProcesses);
935  maxs[M_me] = max;
936  for (Int j = 0; j < numProcesses ; ++j) // Allgather
937  {
938  MPI_Bcast (&maxs[j], 1, MPI_INT, j, MPIcomm); // perhaps generates errors
939  }
940 
941  std::vector<std::pair<UInt, Int> > procIndex (numProcesses);
942  for (Int k = 0; k < numProcesses; ++k)
943  {
944  procIndex[k] = std::make_pair ( maxs[k], k);
945  }
946 
947  std::sort (procIndex.begin(), procIndex.end() /*, &booleanCondition::reordering*/);
948 
949  for (Int l = 0; l < numProcesses; ++l)
950  {
951  for (Int l = 0; l < numProcesses; ++l)
952  {
953  for (Int j = 0; j < numProcesses ; ++j) // Allgather
954  {
955  MPI_Bcast ( &procOrder[j], 1, MPI_INT, j, MPIcomm); // perhaps generates errors
956  }
957  }
958  }
959 
960  std::vector< std::vector<Int> > locProc2 ( (*M_elementDomains) );
961  for (Int j = numProcesses - 1; j >= 0 ; --j)
962  {
963  if (orderingError[procOrder[procIndex[j].second]] == false)
964  {
965  (*M_elementDomains) [procOrder[procIndex[j].second]] = locProc2[procIndex[j].second];
966  }
967  else
968  {
969  std::cout << "Ordering error when assigning the processor"
970  << M_me << " to the partition," << std::endl
971  << " parmetis did a bad job." << std::endl;
972  for (Int i = numProcesses - 1; i >= 0; --i)
973  {
974  if (orderingError[procIndex[i].second] == false) // means that i is the first
975  // proc not assigned
976  {
977  procOrder[procIndex[j].second] = procIndex[i].second;
978  (*M_elementDomains) [procIndex[i].second] = locProc2[procIndex[j].second];
979  break;
980  }
981  }
982  }
983  orderingError[procOrder[procIndex[j].second]] = true;
984  }
985 }
986 
987 template<typename MeshType>
988 void MeshPartitioner<MeshType>::redistributeElements()
989 {
990  std::shared_ptr<Epetra_MpiComm> mpiComm = std::dynamic_pointer_cast <Epetra_MpiComm> (M_comm);
991  MPI_Comm MPIcomm = mpiComm->Comm();
992  Int numProcesses;
993  MPI_Comm_size (MPIcomm, &numProcesses);
994 
995  Int maxInt (1000);
996  std::vector<Int> sendSize ( numProcesses );
997  std::vector<Int> receiveSize ( numProcesses );
998  // cycling on subdomains
999  // TODO: Matteo please comment this part :)
1000 
1001  MPI_Status status;
1002  Int size;
1003 
1004  // MPI_Status recv_status;
1005  // MPI_Request send_request;
1006 
1007  for (Int iproc = 0; iproc < numProcesses; ++iproc)
1008  {
1009  sendSize[iproc] = (*M_elementDomains) [iproc].size();
1010  }
1011  MPI_Alltoall ( &sendSize[ 0 ], 1, MPI_INT, &receiveSize[ 0 ], 1, MPI_INT, MPIcomm );
1012 
1013  for (Int iproc = 0; iproc < numProcesses; ++iproc)
1014  {
1015  if (static_cast<Int> (M_me) != iproc)
1016  {
1017  size = sendSize[iproc];
1018  // workaround for huge data to be passed
1019  if (size > maxInt)
1020  {
1021  Int incr = 1 ;
1022  Int pos = 0;
1023  Int sizePart = size;
1024 
1025  // divide the whole data set into smaller packets
1026  while (sizePart > maxInt)
1027  {
1028  incr += 1;
1029  sizePart = size / incr;
1030  }
1031 
1032  MPI_Send (&incr, 1, MPI_INT, iproc, 20, MPIcomm);
1033  MPI_Send (&sizePart, 1, MPI_INT, iproc, 30, MPIcomm);
1034 
1035  for (Int kk = 0; kk < incr; ++kk)
1036  {
1037  MPI_Send (&pos, 1, MPI_INT, iproc, 100 + kk, MPIcomm);
1038  MPI_Send (& (*M_elementDomains) [iproc][pos], sizePart, MPI_INT, iproc,
1039  5000000 + kk, MPIcomm);
1040  pos = pos + sizePart;
1041  }
1042 
1043  Int resto = size % incr;
1044 
1045  MPI_Send (&resto, 1, MPI_INT, iproc, 80, MPIcomm);
1046 
1047  if (resto != 0)
1048  {
1049  MPI_Send (&pos, 1, MPI_INT, iproc, 40, MPIcomm);
1050  MPI_Send (& (*M_elementDomains) [iproc][pos], resto, MPI_INT, iproc, 50, MPIcomm);
1051  }
1052  }
1053  else
1054  {
1055  if (size != 0)
1056  {
1057  MPI_Send (& (*M_elementDomains) [iproc][0], size, MPI_INT, iproc, 60, MPIcomm);
1058  }
1059  }
1060  }
1061  else
1062  {
1063  for (Int jproc = 0; jproc < numProcesses; ++jproc)
1064  {
1065  if (jproc != iproc)
1066  {
1067  size = receiveSize[jproc];
1068  std::vector<Int> stack (size, 0);
1069 
1070  if (size > maxInt)
1071  {
1072  Int sizePart, pos, incr;
1073 
1074  MPI_Recv (&incr, 1, MPI_INT, jproc, 20, MPIcomm, &status);
1075  MPI_Recv (&sizePart, 1, MPI_INT, jproc, 30, MPIcomm, &status);
1076 
1077  for (Int kk = 0; kk < incr; ++kk)
1078  {
1079  MPI_Recv (&pos, 1, MPI_INT, jproc, 100 + kk, MPIcomm, &status);
1080  MPI_Recv (&stack[pos], sizePart , MPI_INT, jproc, 5000000 + kk, MPIcomm, &status);
1081  }
1082  Int resto = 0;
1083  MPI_Recv (&resto, 1, MPI_INT, jproc, 80, MPIcomm, &status);
1084 
1085  if (resto != 0)
1086  {
1087  MPI_Recv (&pos, 1, MPI_INT, jproc, 40, MPIcomm, &status);
1088  MPI_Recv (&stack[pos], resto, MPI_INT, jproc, 50, MPIcomm, &status);
1089  }
1090  }
1091  else
1092  {
1093  if (size != 0)
1094  {
1095  MPI_Recv (&stack[0], size , MPI_INT, jproc, 60, MPIcomm, &status);
1096  }
1097  }
1098  for (Int jj = 0; jj < size; ++jj)
1099  {
1100  (*M_elementDomains) [M_me].push_back (stack[jj]);
1101  }
1102  }
1103  }
1104  }
1105  }
1106 }
1107 
1108 template<typename MeshType>
1109 void MeshPartitioner<MeshType>::constructLocalMesh()
1110 {
1111  if (!M_me)
1112  {
1113  std::cout << "Building local mesh ..." << std::endl;
1114  }
1115 
1116  std::map<Int, Int>::iterator im;
1117 
1118  Int count = 0;
1119  UInt ielem;
1120  UInt inode;
1121 
1122  for (UInt i = 0; i < M_numPartitions; ++i)
1123  {
1124  count = 0;
1125  // cycle on local element's ID
1126 
1127  UInt me = M_serialMode ? i : M_me;
1128 
1129  for (UInt jj = 0; jj < (*M_elementDomains) [me].size(); ++jj)
1130  {
1131  ielem = (*M_elementDomains) [me][jj];
1132  M_localElements[i].push_back (ielem);
1133 
1134  // cycle on element's vertices
1135  for (UInt ii = 0; ii < M_elementVertices; ++ii)
1136  {
1137  inode = M_originalMesh->element (ielem).point (ii).id();
1138  im = M_globalToLocalNode[i].find (inode);
1139 
1140  // if the node is not yet present in the list of local vertices, then add it
1141  if (im == M_globalToLocalNode[i].end() )
1142  {
1143  M_globalToLocalNode[i].insert (std::make_pair (inode, count) );
1144  ++count;
1145  // store here the global numbering of the node
1146  M_localNodes[i].push_back (M_originalMesh->element (ielem).point (ii).id() );
1147  }
1148  }
1149 
1150  // cycle on element's ridges
1151  for (UInt ii = 0; ii < M_elementRidges; ++ii)
1152  {
1153  // store here the global numbering of the ridge
1154  M_localRidges[i].insert (M_originalMesh->localRidgeId (ielem, ii) );
1155  }
1156 
1157  // cycle on element's facets
1158  for (UInt ii = 0; ii < M_elementFacets; ++ii)
1159  {
1160  // store here the global numbering of the facet
1161  M_localFacets[i].insert (M_originalMesh->localFacetId (ielem, ii) );
1162  }
1163  }
1164  }
1165 }
1166 
1167 template<typename MeshType>
1168 void MeshPartitioner<MeshType>::constructNodes()
1169 {
1170  UInt inode;
1171  for (UInt i = 0; i < M_numPartitions; ++i)
1172  {
1173  std::vector<Int>::iterator it;
1174 
1175  M_nBoundaryPoints[i] = 0;
1176  (*M_meshPartitions) [i]->pointList.reserve (M_localNodes[i].size() );
1177  // guessing how many boundary points on this processor.
1178  (*M_meshPartitions) [i]->_bPoints.reserve (M_originalMesh->numBPoints() * M_localNodes[i].size() /
1179  M_originalMesh->numBPoints() );
1180 
1181  inode = 0;
1182 
1183  typename MeshType::point_Type* pp = 0;
1184 
1185  // loop in the list of local vertices:
1186  // in this loop inode is the local numbering of the points
1187  for (it = M_localNodes[i].begin(); it != M_localNodes[i].end(); ++it, ++inode)
1188  {
1189  // create a boundary point in the local mesh, if needed
1190  bool boundary = M_originalMesh->isBoundaryPoint (*it);
1191  M_nBoundaryPoints[i] += boundary;
1192 
1193  pp = & (*M_meshPartitions) [i]->addPoint ( boundary, false );
1194  *pp = M_originalMesh->point ( *it );
1195 
1196  pp->setLocalId ( inode );
1197  }
1198  }
1199 }
1200 
1201 
1202 template<typename MeshType>
1203 void MeshPartitioner<MeshType>::constructElements()
1204 {
1205  Int count;
1206  for (UInt i = 0; i < M_numPartitions; ++i)
1207  {
1208  std::map<Int, Int>::iterator im;
1209  std::vector<Int>::iterator it;
1210  count = 0;
1211  UInt inode;
1212 
1213  typename MeshType::element_Type* pv = 0;
1214 
1215  (*M_meshPartitions) [i]->elementList().reserve (M_localElements[i].size() );
1216 
1217  // loop in the list of local elements
1218  // CAREFUL! in this loop inode is the global numbering of the points
1219  // We insert the local numbering of the vertices in the local volume list
1220  for (it = M_localElements[i].begin(); it != M_localElements[i].end(); ++it, ++count)
1221  {
1222  pv = & ( (*M_meshPartitions) [i]->addElement() );
1223  *pv = M_originalMesh->element (*it);
1224  pv->setLocalId (count);
1225 
1226  M_globalToLocalElement[i].insert (std::make_pair ( pv->id(), pv -> localId() ) );
1227 
1228  for (ID id = 0; id < M_elementVertices; ++id)
1229  {
1230  inode = M_originalMesh->element (*it).point (id).id();
1231  // im is an iterator to a map element
1232  // im->first is the key (i. e. the global ID "inode")
1233  // im->second is the value (i. e. the local ID "count")
1234  im = M_globalToLocalNode[i].find (inode);
1235  pv->setPoint (id, (*M_meshPartitions) [i]->point ( (*im).second ) );
1236  }
1237  }
1238  }
1239 }
1240 
1241 template<typename MeshType>
1242 void MeshPartitioner<MeshType>::constructRidges()
1243 {
1244  if (mesh_Type::S_geoDimensions == 2)
1245  {
1246  M_nBoundaryRidges = M_nBoundaryPoints;
1247  }
1248  else if (mesh_Type::S_geoDimensions == 3)
1249  {
1250  Int count;
1251  for (UInt i = 0; i < M_numPartitions; ++i)
1252  {
1253  std::map<Int, Int>::iterator im;
1254  std::set<Int>::iterator is;
1255 
1256  typename MeshType::ridge_Type* pe;
1257  UInt inode;
1258  count = 0;
1259 
1260  M_nBoundaryRidges[i] = 0;
1261  (*M_meshPartitions) [i]->ridgeList().reserve (M_localRidges[i].size() );
1262 
1263  // loop in the list of local ridges
1264  for (is = M_localRidges[i].begin(); is != M_localRidges[i].end(); ++is, ++count)
1265  {
1266  // create a boundary ridge in the local mesh, if needed
1267  bool boundary = (M_originalMesh->isBoundaryRidge (*is) );
1268 
1269  // create a boundary ridge in the local mesh, if needed
1270  M_nBoundaryRidges[i] += boundary;
1271 
1272  pe = & (*M_meshPartitions) [i]->addRidge (boundary);
1273  *pe = M_originalMesh->ridge ( *is );
1274 
1275  pe->setLocalId (count);
1276 
1277  for (ID id = 0; id < 2; ++id)
1278  {
1279  inode = M_originalMesh->ridge (*is).point (id).id();
1280  // im is an iterator to a map element
1281  // im->first is the key (i. e. the global ID "inode")
1282  // im->second is the value (i. e. the local ID "count")
1283  im = M_globalToLocalNode[i].find (inode);
1284  pe->setPoint (id, (*M_meshPartitions) [i]->pointList ( (*im).second) );
1285  }
1286  }
1287  }
1288  }
1289 }
1290 
1291 
1292 template<typename MeshType>
1293 void MeshPartitioner<MeshType>::constructFacets()
1294 {
1295  Int count;
1296  for (UInt i = 0; i < M_numPartitions; ++i)
1297  {
1298  std::map<Int, Int>::iterator im;
1299  std::set<Int>::iterator is;
1300 
1301  typename MeshType::facet_Type* pf = 0;
1302 
1303  UInt inode;
1304  count = 0;
1305 
1306  M_nBoundaryFacets[i] = 0;
1307  (*M_meshPartitions) [i]->facetList().reserve (M_localFacets[i].size() );
1308 
1309  // loop in the list of local faces
1310  for (is = M_localFacets[i].begin(); is != M_localFacets[i].end(); ++is, ++count)
1311  {
1312  // create a boundary facet in the local mesh, if needed
1313  bool boundary = (M_originalMesh->isBoundaryFacet (*is) );
1314 
1315  M_nBoundaryFacets[i] += boundary;
1316 
1317 
1318  Int elem1 = M_originalMesh->facet (*is).firstAdjacentElementIdentity();
1319  Int elem2 = M_originalMesh->facet (*is).secondAdjacentElementIdentity();
1320 
1321  // find the mesh elements adjacent to the facet
1322  im = M_globalToLocalElement[i].find (elem1);
1323 
1324  ID localElem1;
1325 
1326  if (im == M_globalToLocalElement[i].end() )
1327  {
1328  localElem1 = NotAnId;
1329  }
1330  else
1331  {
1332  localElem1 = (*im).second;
1333  }
1334 
1335  im = M_globalToLocalElement[i].find (elem2);
1336 
1337  ID localElem2;
1338  if (im == M_globalToLocalElement[i].end() )
1339  {
1340  localElem2 = NotAnId;
1341  }
1342  else
1343  {
1344  localElem2 = (*im).second;
1345  }
1346 
1347  pf = & (*M_meshPartitions) [i]->addFacet (boundary);
1348  *pf = M_originalMesh->facet ( *is );
1349 
1350  pf->setLocalId ( count );
1351 
1352  for (ID id = 0; id < M_originalMesh->facet (*is).S_numLocalVertices; ++id)
1353  {
1354  inode = pf->point (id).id();
1355  im = M_globalToLocalNode[i].find (inode);
1356  pf->setPoint (id, (*M_meshPartitions) [i]->pointList ( (*im).second) );
1357  }
1358 
1359  // true if we are on a subdomain border
1360  ID ghostElem = ( localElem1 == NotAnId ) ? elem1 : elem2;
1361 
1362  if ( !boundary && ( localElem1 == NotAnId || localElem2 == NotAnId ) )
1363  {
1364  // set the flag for faces on the subdomain border
1365  pf->setFlag ( EntityFlags::SUBDOMAIN_INTERFACE );
1366  // set the flag for all points on that face
1367  for ( UInt pointOnFacet = 0; pointOnFacet < MeshType::facet_Type::S_numLocalPoints; pointOnFacet++ )
1368  {
1369  (*M_meshPartitions) [i]->point ( pf->point ( pointOnFacet ).localId() ).setFlag ( EntityFlags::SUBDOMAIN_INTERFACE );
1370  }
1371 
1372  }
1373 
1374  // if this process does not own either of the adjacent elements
1375  // then the two adjacent elements and the respective facet positions coincide in the local mesh
1376  // possible bug fixed: not only the two adjacent elements facet, but also the facet
1377  // positions should coincide.
1378  // otherwise it could happen that a pair(element, position) is associated to different faces.
1379  // This can lead to a wrong treatment of the dofPerFace (in 2D of the dofPerRidge, as occurred
1380  // with P2)
1381 
1382  ASSERT ( (localElem1 != NotAnId) || (localElem2 != NotAnId), "A hanging facet in mesh partitioner!");
1383 
1384  if ( localElem1 == NotAnId )
1385  {
1386  pf->firstAdjacentElementIdentity() = localElem2;
1387  pf->firstAdjacentElementPosition() = M_originalMesh->facet (*is).secondAdjacentElementPosition();
1388  pf->secondAdjacentElementIdentity() = ghostElem;
1389  pf->secondAdjacentElementPosition() = NotAnId;
1390  pf->reversePoints();
1391  }
1392  else if ( localElem2 == NotAnId )
1393  {
1394  pf->firstAdjacentElementIdentity() = localElem1;
1395  pf->firstAdjacentElementPosition() = M_originalMesh->facet (*is).firstAdjacentElementPosition();
1396  pf->secondAdjacentElementIdentity() = ghostElem;
1397  pf->secondAdjacentElementPosition() = NotAnId;
1398  }
1399  else
1400  {
1401  pf->firstAdjacentElementIdentity() = localElem1;
1402  pf->firstAdjacentElementPosition() = M_originalMesh->facet (*is).firstAdjacentElementPosition();
1403  pf->secondAdjacentElementIdentity() = localElem2;
1404  pf->secondAdjacentElementPosition() = M_originalMesh->facet (*is).secondAdjacentElementPosition();
1405  }
1406 
1407  }
1408  (*M_meshPartitions) [i]->setLinkSwitch ("HAS_ALL_FACETS");
1409  (*M_meshPartitions) [i]->setLinkSwitch ("FACETS_HAVE_ADIACENCY");
1410  }
1411 
1412 }
1413 
1414 template<typename MeshType>
1415 void MeshPartitioner<MeshType>::finalSetup()
1416 {
1417  for (UInt i = 0; i < M_numPartitions; ++i)
1418  {
1419  UInt nElements = M_localElements[i].size();
1420  UInt nNodes = M_localNodes[i].size();
1421  UInt nRidges = M_localRidges[i].size();
1422  UInt nFacets = M_localFacets[i].size();
1423 
1424  (*M_meshPartitions) [i]->setMaxNumPoints (nNodes, true);
1425  (*M_meshPartitions) [i]->setMaxNumRidges (nRidges, true);
1426  (*M_meshPartitions) [i]->setMaxNumFacets (nFacets, true);
1427  (*M_meshPartitions) [i]->setMaxNumElements ( nElements, true);
1428 
1429  (*M_meshPartitions) [i]->setMaxNumGlobalPoints (M_originalMesh->numPoints() );
1430  (*M_meshPartitions) [i]->setNumGlobalVertices (M_originalMesh->numVertices() );
1431  (*M_meshPartitions) [i]->setMaxNumGlobalRidges (M_originalMesh->numRidges() );
1432  (*M_meshPartitions) [i]->setMaxNumGlobalFacets (M_originalMesh->numFacets() );
1433 
1434  (*M_meshPartitions) [i]->setMaxNumGlobalElements (M_originalMesh->numElements() );
1435  (*M_meshPartitions) [i]->setNumBoundaryFacets (M_nBoundaryFacets[i]);
1436 
1437  (*M_meshPartitions) [i]->setNumBPoints (M_nBoundaryPoints[i]);
1438  (*M_meshPartitions) [i]->setNumBoundaryRidges (M_nBoundaryRidges[i]);
1439 
1440  (*M_meshPartitions) [i]->setNumVertices (nNodes );
1441  (*M_meshPartitions) [i]->setNumBVertices (M_nBoundaryPoints[i]);
1442 
1443  if (MeshType::S_geoDimensions == 3)
1444  {
1445  (*M_meshPartitions) [i]->updateElementRidges();
1446  }
1447 
1448  (*M_meshPartitions) [i]->updateElementFacets();
1449 
1450 #ifdef HAVE_LIFEV_DEBUG
1451  if (M_serialMode)
1452  {
1453  debugStream (4000) << "Created local mesh number " << i << "\n";
1454  }
1455  else
1456  {
1457  debugStream (4000) << "Rank " << M_me << " created local mesh.\n";
1458  }
1459 #endif
1460  }
1461 }
1462 
1463 template<typename MeshType>
1464 void MeshPartitioner<MeshType>::execute()
1465 {
1466 
1467  // Build graph vertex distribution vector. Graph vertex represents one element
1468  // in the mesh.
1469  distributeElements (M_originalMesh->numElements() );
1470 
1471 
1472  // In fluid-structure interaction:
1473  // * If the solid mesh is not partitioned the following part won't be
1474  // executed
1475  // * If the solid mesh is partitioned:
1476  // - The fluid is partitioned first
1477  // - The solid mesh partition tries to follow the partition of the fluid
1478  // This is achieved by specifying a weight to some ridge of the graph.
1479  // The interface between two processors is the set of the vertices that for
1480  // at least one processor are on the repeated map and not on the unique map.
1481  // That's why the constructor needs both the unique and repeated maps
1482  // on the interface
1483 
1484 
1485  //////////////////// BEGIN OF SOLID PARTITION PART ////////////////////////
1486  if (M_interfaceMap)
1487  {
1488  findRepeatedFacesFSI();
1489  }
1490  //////////////////// END OF SOLID PARTITION PART ////////////////////////
1491 
1492  // Partition connectivity graph
1493  partitionConnectivityGraph (M_comm->NumProc() );
1494 
1495  //////////////// BEGIN OF SOLID PARTITION PART ////////////////
1496  if (M_interfaceMap)
1497  {
1498  matchFluidPartitionsFSI();
1499  }
1500  ////////////////// END OF SOLID PARTITION PART /////////////////////
1501 
1502 #ifdef HAVE_LIFEV_DEBUG
1503  debugStream (4000) << M_me << " has " << (*M_elementDomains) [M_me].size() << " elements.\n";
1504 #endif
1505 
1506  fillEntityPID ();
1507  if ( M_partitionOverlap > 0 )
1508  {
1509  GhostHandler<mesh_Type> gh ( M_originalMesh, M_comm );
1510  gh.extendGraphFE ( M_elementDomains, M_entityPID.points, M_partitionOverlap );
1511  }
1512 
1513  doPartitionMesh();
1514 
1515  // ***************************************
1516  // release the original unpartitioned mesh
1517  // allowing it to be deleted
1518  // ***************************************
1519  releaseUnpartitionedMesh();
1520 
1521  // ***************************************
1522  // clear all internal structures that are
1523  // not needed anymore
1524  // ***************************************
1525  cleanUp();
1526 }
1527 
1528 template<typename MeshType>
1529 void MeshPartitioner<MeshType>::fillEntityPID ()
1530 {
1531  Int numParts = (M_numPartitions > 1) ? M_numPartitions : M_comm->NumProc();
1532 
1533  // initialize entity PIDs to 0
1534  M_entityPID.points.resize ( M_originalMesh->numPoints(), 0 );
1535  M_entityPID.elements.resize ( M_originalMesh->numElements(), 0 );
1536  M_entityPID.facets.resize ( M_originalMesh->numFacets(), 0 );
1537  M_entityPID.ridges.resize ( M_originalMesh->numRidges(), 0 );
1538 
1539  // check: parallel algorithm seems to be slower for this
1540  // p = 0 can be skipped since M_entityPID is already initialized at that value
1541  for ( Int p = 1; p < numParts; p++ )
1542  {
1543  for ( UInt e = 0; e < (*M_elementDomains) [ p ].size(); e++ )
1544  {
1545  // point block
1546  for ( UInt k = 0; k < mesh_Type::element_Type::S_numPoints; k++ )
1547  {
1548  const ID& pointID = M_originalMesh->element ( (*M_elementDomains) [ p ][ e ] ).point ( k ).id();
1549  // pointPID should be the maximum between the procs that own it
1550  M_entityPID.points[ pointID ] = std::max ( M_entityPID.points[ pointID ], p );
1551  }
1552 
1553  // elem block
1554  const ID& elemID = M_originalMesh->element ( (*M_elementDomains) [ p ][ e ] ).id();
1555  // at his stage each element belongs to a single partition, overlap is not yet done.
1556  M_entityPID.elements[ elemID ] = p;
1557 
1558  // facet block
1559  for ( UInt k = 0; k < mesh_Type::element_Type::S_numFacets; k++ )
1560  {
1561  const ID& facetID = M_originalMesh->facet ( M_originalMesh->localFacetId ( elemID, k ) ).id();
1562  // facetPID should be the maximum between the proc that own it
1563  M_entityPID.facets[ facetID ] = std::max ( M_entityPID.facets[ facetID ], p );
1564  }
1565 
1566  // ridge block
1567  for ( UInt k = 0; k < mesh_Type::element_Type::S_numRidges; k++ )
1568  {
1569  const ID& ridgeID = M_originalMesh->ridge ( M_originalMesh->localRidgeId ( elemID, k ) ).id();
1570  // ridgePID should be the maximum between the proc that own it
1571  M_entityPID.ridges[ ridgeID ] = std::max ( M_entityPID.ridges[ ridgeID ], p );
1572  }
1573  }
1574  }
1575 }
1576 
1577 template<typename MeshType>
1578 void MeshPartitioner<MeshType>::markGhostEntities()
1579 {
1580  // mark ghost entities by each partition as described in M_entityPID
1581  //@todo: does not work for offline partitioning!
1582  //M_entityPID or flags should be exported and read back to make it work
1583  for (UInt i = 0; i < M_numPartitions; ++i)
1584  {
1585  Int const procId = (M_numPartitions > 1) ? i : M_me;
1586  for ( UInt e = 0; e < (*M_meshPartitions) [ i ]->numElements(); e++ )
1587  {
1588  typename MeshType::element_Type& element = (*M_meshPartitions) [ i ]->element ( e );
1589  if ( M_entityPID.elements[ element.id() ] != procId )
1590  {
1591  element.setFlag ( EntityFlags::GHOST );
1592  }
1593  }
1594 
1595  for ( UInt f = 0; f < (*M_meshPartitions) [ i ]->numFacets(); f++ )
1596  {
1597  typename MeshType::facet_Type& facet = (*M_meshPartitions) [ i ]->facet ( f );
1598  if ( M_entityPID.facets[ facet.id() ] != procId )
1599  {
1600  facet.setFlag ( EntityFlags::GHOST );
1601  }
1602  }
1603 
1604  for ( UInt r = 0; r < (*M_meshPartitions) [ i ]->numRidges(); r++ )
1605  {
1606  typename MeshType::ridge_Type& ridge = (*M_meshPartitions) [ i ]->ridge ( r );
1607  if ( M_entityPID.ridges[ ridge.id() ] != procId )
1608  {
1609  ridge.setFlag ( EntityFlags::GHOST );
1610  }
1611  }
1612 
1613  for ( UInt p = 0; p < (*M_meshPartitions) [ i ]->numPoints(); p++ )
1614  {
1615  typename MeshType::point_Type& point = (*M_meshPartitions) [ i ]->point ( p );
1616  if ( M_entityPID.points[ point.id() ] != procId )
1617  {
1618  point.setFlag ( EntityFlags::GHOST );
1619  }
1620  }
1621  }
1622  clearVector ( M_entityPID.elements );
1623  clearVector ( M_entityPID.facets );
1624  clearVector ( M_entityPID.ridges );
1625  clearVector ( M_entityPID.points );
1626 }
1627 
1628 template<typename MeshType>
1629 void MeshPartitioner<MeshType>::cleanUp()
1630 {
1631  clearVector ( M_vertexDistribution );
1632  clearVector ( M_adjacencyGraphKeys );
1633  clearVector ( M_adjacencyGraphValues );
1634  clearVector ( M_localNodes );
1635  clearVector ( M_localRidges );
1636  clearVector ( M_localFacets );
1637  clearVector ( M_localElements );
1638  clearVector ( M_nBoundaryPoints );
1639  clearVector ( M_nBoundaryRidges );
1640  clearVector ( M_nBoundaryFacets );
1641  clearVector ( M_graphVertexLocations );
1642  clearVector ( M_globalToLocalNode );
1643  clearVector ( M_globalToLocalElement );
1644 }
1645 
1646 }
1647 #endif // MESH_PARTITIONER_H
int32_type Int
Generic integer data.
Definition: LifeV.hpp:188
void updateInverseJacobian(const UInt &iQuadPt)
#define ASSERT(X, A)
Definition: LifeAssert.hpp:90
#define LIFEV_DEPRECATED(func)
Definition: LifeV.hpp:117
const ID NotAnId
Definition: LifeV.hpp:264
void importFromHDF5(std::string const &fileName="ghostmap")
Import neighbor lists to an hdf5 file.
uint32_type UInt
generic unsigned integer (used mainly for addressing)
Definition: LifeV.hpp:191