Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
ParallelComm.hpp
Go to the documentation of this file.
1 /**
2  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
3  * storing and accessing finite element mesh data.
4  *
5  * Copyright 2004 Sandia Corporation. Under the terms of Contract
6  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
7  * retains certain rights in this software.
8  *
9  * This library is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  */
15 
16 #ifndef MOAB_PARALLEL_COMM_HPP
17 #define MOAB_PARALLEL_COMM_HPP
18 
19 #include "moab/Forward.hpp"
20 #include "moab/Interface.hpp"
21 #include "moab/Range.hpp"
22 #include "moab/ProcConfig.hpp"
23 #include <map>
24 #include <set>
25 #include <vector>
26 #include <iostream>
27 #include <fstream>
28 #include <cassert>
29 #include <cstdlib>
30 #include <cmath>
31 #include "moab/TupleList.hpp"
32 
33 namespace moab
34 {
35 
36 class SequenceManager;
37 class Error;
38 template < typename KeyType, typename ValType, ValType NullVal >
39 class RangeMap;
41 class ParallelMergeMesh;
42 class DebugOutput;
43 class SharedSetData;
44 
45 #define MAX_SHARING_PROCS 64
46 
47 /**
48  * \brief Parallel communications in MOAB
49  * \author Tim Tautges
50  *
51  * This class implements methods to communicate mesh between processors
52  *
53  */
55 {
56  public:
57  friend class ParallelMergeMesh;
58 
59  // ==================================
60  // \section CONSTRUCTORS/DESTRUCTORS/PCOMM MANAGEMENT
61  // ==================================
62 
63  //! constructor
64  ParallelComm( Interface* impl, MPI_Comm comm, int* pcomm_id_out = 0 );
65 
66  //! constructor taking packed buffer, for testing
67  ParallelComm( Interface* impl, std::vector< unsigned char >& tmp_buff, MPI_Comm comm, int* pcomm_id_out = 0 );
68 
69  //! Get ID used to reference this PCOMM instance
70  int get_id() const
71  {
72  return pcommID;
73  }
74 
75  //! get the indexed pcomm object from the interface
76  static ParallelComm* get_pcomm( Interface* impl, const int index );
77 
78  //! Get ParallelComm instance associated with partition handle
79  //! Will create ParallelComm instance if a) one does not already
80  //! exist and b) a valid value for MPI_Comm is passed.
81  static ParallelComm* get_pcomm( Interface* impl, EntityHandle partitioning, const MPI_Comm* comm = 0 );
82 
83  static ErrorCode get_all_pcomm( Interface* impl, std::vector< ParallelComm* >& list );
84 
85  //! destructor
86  ~ParallelComm();
87 
88  static unsigned char PROC_SHARED, PROC_OWNER;
89 
90  // ==================================
91  // \section GLOBAL IDS
92  // ==================================
93 
94  //! assign a global id space, for largest-dimension or all entities (and
95  //! in either case for vertices too)
96  //!\param owned_only If true, do not get global IDs for non-owned entities
97  //! from remote processors.
99  const int dimension,
100  const int start_id = 1,
101  const bool largest_dim_only = true,
102  const bool parallel = true,
103  const bool owned_only = false );
104 
105  //! assign a global id space, for largest-dimension or all entities (and
106  //! in either case for vertices too)
108  const int dimension,
109  const int start_id,
110  const bool parallel,
111  const bool owned_only );
112 
113  //! check for global ids; based only on tag handle being there or not;
114  //! if it's not there, create them for the specified dimensions
115  //!\param owned_only If true, do not get global IDs for non-owned entities
116  //! from remote processors.
118  const int dimension,
119  const int start_id = 1,
120  const bool largest_dim_only = true,
121  const bool parallel = true,
122  const bool owned_only = false );
123 
124  // ==================================
125  // \section HIGH-LEVEL COMMUNICATION (send/recv/bcast/scatter ents, exchange tags)
126  // ==================================
127 
128  /** \brief send entities to another processor, optionally waiting until it's done
129  *
130  * Send entities to another processor, with adjs, sets, and tags.
131  * If store_remote_handles is true, this call receives back handles assigned to
132  * entities sent to destination processor and stores them in sharedh_tag or
133  * sharedhs_tag.
134  * \param to_proc Destination processor
135  * \param orig_ents Entities requested to send
136  * \param adjs If true, send adjacencies for equiv entities (currently unsupported)
137  * \param tags If true, send tag values for all tags assigned to entities
138  * \param store_remote_handles If true, also recv message with handles on destination processor
139  * (currently unsupported) \param final_ents Range containing all entities sent \param incoming
140  * keep track if any messages are coming to this processor (newly added) \param wait_all If
141  * true, wait until all messages received/sent complete
142  */
143  ErrorCode send_entities( const int to_proc,
144  Range& orig_ents,
145  const bool adjs,
146  const bool tags,
147  const bool store_remote_handles,
148  const bool is_iface,
149  Range& final_ents,
150  int& incoming1,
151  int& incoming2, // newly added
152  TupleList& entprocs, // newly added
153  std::vector< MPI_Request >& recv_remoteh_reqs, // newly added
154  bool wait_all = true );
155 
156  ErrorCode send_entities( std::vector< unsigned int >& send_procs,
157  std::vector< Range* >& send_ents,
158  int& incoming1,
159  int& incoming2,
160  const bool store_remote_handles );
161 
162  /** \brief Receive entities from another processor, optionally waiting until it's done
163  *
164  * Receive entities from another processor, with adjs, sets, and tags.
165  * If store_remote_handles is true, this call sends back handles assigned to
166  * the entities received.
167  * \param from_proc Source processor
168  * \param store_remote_handles If true, send message with new entity handles to source processor
169  * (currently unsupported) \param final_ents Range containing all entities received \param
170  * incoming keep track if any messages are coming to this processor (newly added) \param
171  * wait_all If true, wait until all messages received/sent complete
172  */
173  ErrorCode recv_entities( const int from_proc,
174  const bool store_remote_handles,
175  const bool is_iface,
176  Range& final_ents,
177  int& incomming1,
178  int& incoming2,
179  std::vector< std::vector< EntityHandle > >& L1hloc,
180  std::vector< std::vector< EntityHandle > >& L1hrem,
181  std::vector< std::vector< int > >& L1p,
182  std::vector< EntityHandle >& L2hloc,
183  std::vector< EntityHandle >& L2hrem,
184  std::vector< unsigned int >& L2p,
185  std::vector< MPI_Request >& recv_remoteh_reqs,
186  bool wait_all = true );
187 
188  ErrorCode recv_entities( std::set< unsigned int >& recv_procs,
189  int incoming1,
190  int incoming2,
191  const bool store_remote_handles,
192  const bool migrate = false );
193 
194  /** \brief Receive messages from another processor in while loop
195  *
196  * Receive messages from another processor.
197  * \param from_proc Source processor
198  * \param store_remote_handles If true, send message with new entity handles to source processor
199  * (currently unsupported) \param final_ents Range containing all entities received \param
200  * incoming keep track if any messages are coming to this processor (newly added)
201  */
202  ErrorCode recv_messages( const int from_proc,
203  const bool store_remote_handles,
204  const bool is_iface,
205  Range& final_ents,
206  int& incoming1,
207  int& incoming2,
208  std::vector< std::vector< EntityHandle > >& L1hloc,
209  std::vector< std::vector< EntityHandle > >& L1hrem,
210  std::vector< std::vector< int > >& L1p,
211  std::vector< EntityHandle >& L2hloc,
212  std::vector< EntityHandle >& L2hrem,
213  std::vector< unsigned int >& L2p,
214  std::vector< MPI_Request >& recv_remoteh_reqs );
215 
216  ErrorCode recv_remote_handle_messages( const int from_proc,
217  int& incoming2,
218  std::vector< EntityHandle >& L2hloc,
219  std::vector< EntityHandle >& L2hrem,
220  std::vector< unsigned int >& L2p,
221  std::vector< MPI_Request >& recv_remoteh_reqs );
222 
223  /** \brief Exchange ghost cells with neighboring procs
224  * Neighboring processors are those sharing an interface
225  * with this processor. All entities of dimension ghost_dim
226  * within num_layers of interface, measured going through bridge_dim,
227  * are exchanged. See MeshTopoUtil::get_bridge_adjacencies for description
228  * of bridge adjacencies. If wait_all is false and store_remote_handles
229  * is true, MPI_Request objects are available in the sendReqs[2*MAX_SHARING_PROCS]
230  * member array, with inactive requests marked as MPI_REQUEST_NULL. If
231  * store_remote_handles or wait_all is false, this function returns after
232  * all entities have been received and processed.
233  * \param ghost_dim Dimension of ghost entities to be exchanged
234  * \param bridge_dim Dimension of entities used to measure layers from interface
235  * \param num_layers Number of layers of ghosts requested
236  * \param addl_ents Dimension of additional adjacent entities to exchange with ghosts, 0 if none
237  * \param store_remote_handles If true, send message with new entity handles to source processor
238  * \param wait_all If true, function does not return until all send buffers
239  * are cleared.
240  */
241 
242  ErrorCode exchange_ghost_cells( int ghost_dim,
243  int bridge_dim,
244  int num_layers,
245  int addl_ents,
246  bool store_remote_handles,
247  bool wait_all = true,
248  EntityHandle* file_set = NULL );
249 
250  /** \brief Static version of exchange_ghost_cells, exchanging info through
251  * buffers rather than messages
252  */
254  unsigned int num_procs,
255  int ghost_dim,
256  int bridge_dim,
257  int num_layers,
258  int addl_ents,
259  bool store_remote_handles,
260  EntityHandle* file_sets = NULL );
261 
262  /** \brief Post "MPI_Irecv" before meshing
263  * \param exchange_procs processor vector exchanged
264  */
265  ErrorCode post_irecv( std::vector< unsigned int >& exchange_procs );
266 
267  ErrorCode post_irecv( std::vector< unsigned int >& shared_procs, std::set< unsigned int >& recv_procs );
268 
269  /** \brief Exchange owned mesh for input mesh entities and sets
270  * This function should be called collectively over the communicator for this ParallelComm.
271  * If this version is called, all shared exchanged entities should have a value for this
272  * tag (or the tag should have a default value).
273  * \param exchange_procs processor vector exchanged
274  * \param exchange_ents exchanged entities for each processors
275  * \param migrate if the owner if entities are changed or not
276  */
277  ErrorCode exchange_owned_meshs( std::vector< unsigned int >& exchange_procs,
278  std::vector< Range* >& exchange_ents,
279  std::vector< MPI_Request >& recv_ent_reqs,
280  std::vector< MPI_Request >& recv_remoteh_reqs,
281  bool store_remote_handles,
282  bool wait_all = true,
283  bool migrate = false,
284  int dim = 0 );
285 
286  /** \brief Exchange owned mesh for input mesh entities and sets
287  * This function is called twice by exchange_owned_meshs to exchange entities before sets
288  * \param migrate if the owner if entities are changed or not
289  */
290  ErrorCode exchange_owned_mesh( std::vector< unsigned int >& exchange_procs,
291  std::vector< Range* >& exchange_ents,
292  std::vector< MPI_Request >& recv_ent_reqs,
293  std::vector< MPI_Request >& recv_remoteh_reqs,
294  const bool recv_posted,
295  bool store_remote_handles,
296  bool wait_all,
297  bool migrate = false );
298 
299  /** \brief Exchange tags for all shared and ghosted entities
300  * This function should be called collectively over the communicator for this ParallelComm.
301  * If this version is called, all ghosted/shared entities should have a value for this
302  * tag (or the tag should have a default value). If the entities vector is empty, all shared
303  * entities participate in the exchange. If a proc has no owned entities this function must
304  * still be called since it is collective. \param src_tags Vector of tag handles to be exchanged
305  * \param dst_tags Tag handles to store the tags on the non-owning procs
306  * \param entities Entities for which tags are exchanged
307  */
308  ErrorCode exchange_tags( const std::vector< Tag >& src_tags,
309  const std::vector< Tag >& dst_tags,
310  const Range& entities );
311 
312  /** \brief Exchange tags for all shared and ghosted entities
313  * This function should be called collectively over the communicator for this ParallelComm.
314  * If the entities vector is empty, all shared entities
315  * participate in the exchange. If a proc has no owned entities this function must still be
316  * called since it is collective. \param tag_name Name of tag to be exchanged \param entities
317  * Entities for which tags are exchanged
318  */
319  ErrorCode exchange_tags( const char* tag_name, const Range& entities );
320 
321  /** \brief Exchange tags for all shared and ghosted entities
322  * This function should be called collectively over the communicator for this ParallelComm.
323  * If the entities vector is empty, all shared entities
324  * participate in the exchange. If a proc has no owned entities this function must still be
325  * called since it is collective. \param tagh Handle of tag to be exchanged \param entities
326  * Entities for which tags are exchanged
327  */
328  ErrorCode exchange_tags( Tag tagh, const Range& entities );
329 
330  /** \brief Perform data reduction operation for all shared and ghosted entities
331  * This function should be called collectively over the communicator for this ParallelComm.
332  * If this version is called, all ghosted/shared entities should have a value for this
333  * tag (or the tag should have a default value). Operation is any MPI_Op, with result stored
334  * in destination tag.
335  * \param src_tags Vector of tag handles to be reduced
336  * \param dst_tags Vector of tag handles in which the answer will be stored
337  * \param mpi_op Operation type
338  * \param entities Entities on which reduction will be made; if empty, operates on all shared
339  * entities
340  */
341  ErrorCode reduce_tags( const std::vector< Tag >& src_tags,
342  const std::vector< Tag >& dst_tags,
343  const MPI_Op mpi_op,
344  const Range& entities );
345 
346  /** \brief Perform data reduction operation for all shared and ghosted entities
347  * Same as std::vector variant except for one tag specified by name
348  * \param tag_name Name of tag to be reduced
349  * \param mpi_op Operation type
350  * \param entities Entities on which reduction will be made; if empty, operates on all shared
351  * entities
352  */
353  ErrorCode reduce_tags( const char* tag_name, const MPI_Op mpi_op, const Range& entities );
354 
355  /** \brief Perform data reduction operation for all shared and ghosted entities
356  * Same as std::vector variant except for one tag specified by handle
357  * \param tag_name Name of tag to be reduced
358  * \param mpi_op Operation type
359  * \param entities Entities on which reduction will be made; if empty, operates on all shared
360  * entities
361  */
362  ErrorCode reduce_tags( Tag tag_handle, const MPI_Op mpi_op, const Range& entities );
363 
364  /** \brief Broadcast all entities resident on from_proc to other processors
365  * This function assumes remote handles are *not* being stored, since (usually)
366  * every processor will know about the whole mesh.
367  * \param from_proc Processor having the mesh to be broadcast
368  * \param entities On return, the entities sent or received in this call
369  * \param adjacencies If true, adjacencies are sent for equiv entities (currently unsupported)
370  * \param tags If true, all non-default-valued tags are sent for sent entities
371  */
372  ErrorCode broadcast_entities( const int from_proc,
373  Range& entities,
374  const bool adjacencies = false,
375  const bool tags = true );
376 
377  /** \brief Scatter entities on from_proc to other processors
378  * This function assumes remote handles are *not* being stored, since (usually)
379  * every processor will know about the whole mesh.
380  * \param from_proc Processor having the mesh to be broadcast
381  * \param entities On return, the entities sent or received in this call
382  * \param adjacencies If true, adjacencies are sent for equiv entities (currently unsupported)
383  * \param tags If true, all non-default-valued tags are sent for sent entities
384  */
385  ErrorCode scatter_entities( const int from_proc,
386  std::vector< Range >& entities,
387  const bool adjacencies = false,
388  const bool tags = true );
389 
390  /////////////////////////////////////////////////////////////////////////////////
391  // Send and Receive routines for a sequence of entities: use case UMR
392  /////////////////////////////////////////////////////////////////////////////////
393 
394  /** \brief Send and receives data from a set of processors
395  */
396  ErrorCode send_recv_entities( std::vector< int >& send_procs,
397  std::vector< std::vector< int > >& msgsizes,
398  std::vector< std::vector< EntityHandle > >& senddata,
399  std::vector< std::vector< EntityHandle > >& recvdata );
400 
402  std::vector< int >& procs,
403  std::vector< EntityHandle >& handles );
404 
405  ErrorCode get_remote_handles( EntityHandle* local_vec, EntityHandle* rem_vec, int num_ents, int to_proc );
406 
407  /////////////////////////////////////////////////////////////////////////////////
408 
409  // ==================================
410  // \section INITIALIZATION OF PARALLEL DATA (resolve_shared_ents, etc.)
411  // ==================================
412 
413  /** \brief Resolve shared entities between processors
414  *
415  * Resolve shared entities between processors for entities in proc_ents,
416  * by comparing global id tag values on vertices on skin of elements in
417  * proc_ents. Shared entities are assigned a tag that's either
418  * PARALLEL_SHARED_PROC_TAG_NAME, which is 1 integer in length, or
419  * PARALLEL_SHARED_PROCS_TAG_NAME, whose length depends on the maximum
420  * number of sharing processors. Values in these tags denote the ranks
421  * of sharing processors, and the list ends with the value -1.
422  *
423  * If shared_dim is input as -1 or not input, a value one less than the
424  * maximum dimension of entities in proc_ents is used.
425  *
426  * \param proc_ents Entities for which to resolve shared entities
427  * \param shared_dim Maximum dimension of shared entities to look for
428  */
430  Range& proc_ents,
431  int resolve_dim = -1,
432  int shared_dim = -1,
433  Range* skin_ents = NULL,
434  const Tag* id_tag = 0 );
435 
436  /** \brief Resolve shared entities between processors
437  *
438  * Same as resolve_shared_ents(Range&), except works for
439  * all entities in instance of dimension dim.
440  *
441  * If shared_dim is input as -1 or not input, a value one less than the
442  * maximum dimension of entities is used.
443 
444  * \param dim Dimension of entities in the partition
445  * \param shared_dim Maximum dimension of shared entities to look for
446  */
448  int resolve_dim = 3,
449  int shared_dim = -1,
450  const Tag* id_tag = 0 );
451 
453  const unsigned int np,
454  EntityHandle this_set,
455  const int to_dim );
456 
457  /** Remove shared sets.
458  *
459  * Generates list of candidate sets using from those (directly)
460  * contained in passed set and passes them to the other version
461  * of \c resolve_shared_sets.
462  *\param this_set Set directly containing candidate sets (e.g. file set)
463  *\param id_tag Tag containing global IDs for entity sets.
464  */
465 
466  ErrorCode resolve_shared_sets( EntityHandle this_set, const Tag* id_tag = 0 );
467 
468  /** Remove shared sets.
469  *
470  * Use values of id_tag to match sets across processes and populate
471  * sharing data for sets.
472  *\param candidate_sets Sets to consider as potentially shared.
473  *\param id_tag Tag containing global IDs for entity sets.
474  */
475  ErrorCode resolve_shared_sets( Range& candidate_sets, Tag id_tag );
476 
477  /** extend shared sets with ghost entities
478  * After ghosting, ghost entities do not have yet information about
479  * the material set, partition set, Neumann or Dirichlet set they could
480  * belong to
481  * This method will assign ghosted entities to the those special entity sets
482  * In some case we might even have to create those sets, if they do not exist yet on
483  * the local processor
484  *
485  * The special entity sets all have an unique identifier, in a form of an integer
486  * tag to the set.
487  * The shared sets data is not used, because we do not use the geometry sets, as they are
488  * not uniquely identified
489  *
490  *
491  * \param file_set : file set used per application
492  *
493  */
495  // ==================================
496  // \section GET PARALLEL DATA (shared/owned/iface entities, etc.)
497  // ==================================
498 
499  /** \brief Get parallel status of an entity
500  * Returns the parallel status of an entity
501  *
502  * \param entity The entity being queried
503  * \param pstatus_val Parallel status of the entity
504  */
505  ErrorCode get_pstatus( EntityHandle entity, unsigned char& pstatus_val );
506 
507  /** \brief Get entities with the given pstatus bit(s) set
508  * Returns any entities whose pstatus tag value v satisfies (v & pstatus_val)
509  *
510  * \param dim Dimension of entities to be returned, or -1 if any
511  * \param pstatus_val pstatus value of desired entities
512  * \param pstatus_ents Entities returned from function
513  */
514  ErrorCode get_pstatus_entities( int dim, unsigned char pstatus_val, Range& pstatus_ents );
515 
516  /** \brief Return the rank of the entity owner
517  */
518  ErrorCode get_owner( EntityHandle entity, int& owner );
519 
520  /** \brief Return the owner processor and handle of a given entity
521  */
522  ErrorCode get_owner_handle( EntityHandle entity, int& owner, EntityHandle& handle );
523 
524  /** \brief Get the shared processors/handles for an entity
525  * Get the shared processors/handles for an entity. Arrays must
526  * be large enough to receive data for all sharing procs. Does *not* include
527  * this proc if only shared with one other proc.
528  * \param entity Entity being queried
529  * \param ps Pointer to sharing proc data
530  * \param hs Pointer to shared proc handle data
531  * \param pstat Reference to pstatus data returned from this function
532  */
534  int* ps,
535  EntityHandle* hs,
536  unsigned char& pstat,
537  unsigned int& num_ps );
538 
539  /** \brief Get the shared processors/handles for an entity
540  * Same as other version but with int num_ps
541  * \param entity Entity being queried
542  * \param ps Pointer to sharing proc data
543  * \param hs Pointer to shared proc handle data
544  * \param pstat Reference to pstatus data returned from this function
545  */
547  int* ps,
548  EntityHandle* hs,
549  unsigned char& pstat,
550  int& num_ps );
551 
552  /** \brief Get the intersection or union of all sharing processors
553  * Get the intersection or union of all sharing processors. Processor set
554  * is cleared as part of this function.
555  * \param entities Entity list ptr
556  * \param num_entities Number of entities
557  * \param procs Processors returned
558  * \param op Either Interface::UNION or Interface::INTERSECT
559  */
561  int num_entities,
562  std::set< int >& procs,
563  int op = Interface::INTERSECT );
564 
565  /** \brief Get the intersection or union of all sharing processors
566  * Same as previous variant but with range as input
567  */
568  ErrorCode get_sharing_data( const Range& entities, std::set< int >& procs, int op = Interface::INTERSECT );
569 
570  /** \brief Get shared entities of specified dimension
571  * If other_proc is -1, any shared entities are returned. If dim is -1,
572  * entities of all dimensions on interface are returned.
573  * \param other_proc Rank of processor for which interface entities are requested
574  * \param shared_ents Entities returned from function
575  * \param dim Dimension of interface entities requested
576  * \param iface If true, return only entities on the interface
577  * \param owned_filter If true, return only owned shared entities
578  */
579  ErrorCode get_shared_entities( int other_proc,
580  Range& shared_ents,
581  int dim = -1,
582  const bool iface = false,
583  const bool owned_filter = false );
584  /*
585  //! return partition sets; if tag_name is input, gets sets with
586  //! that tag name, otherwise uses PARALLEL_PARTITION tag
587  ErrorCode get_partition_sets(EntityHandle this_set,
588  Range &part_sets,
589  const char *tag_name = NULL);
590  */
591  //! get processors with which this processor shares an interface
592  ErrorCode get_interface_procs( std::set< unsigned int >& iface_procs, const bool get_buffs = false );
593 
594  //! get processors with which this processor communicates
595  ErrorCode get_comm_procs( std::set< unsigned int >& procs );
596 
597  // ==================================
598  // \section SHARED SETS
599  // ==================================
600 
601  //! Get array of process IDs sharing a set. Returns zero
602  //! and passes back NULL if set is not shared.
603  ErrorCode get_entityset_procs( EntityHandle entity_set, std::vector< unsigned >& ranks ) const;
604 
605  //! Get rank of the owner of a shared set.
606  //! Returns this proc if set is not shared.
607  //! Optionally returns handle on owning process for shared set.
609  unsigned& owner_rank,
610  EntityHandle* remote_handle = 0 ) const;
611 
612  //! Given set owner and handle on owner, find local set handle
613  ErrorCode get_entityset_local_handle( unsigned owning_rank,
614  EntityHandle remote_handle,
615  EntityHandle& local_handle ) const;
616 
617  //! Get all shared sets
618  ErrorCode get_shared_sets( Range& result ) const;
619 
620  //! Get ranks of all processes that own at least one set that is
621  //! shared with this process. Will include the rank of this process
622  //! if this process owns any shared set.
623  ErrorCode get_entityset_owners( std::vector< unsigned >& ranks ) const;
624 
625  //! Get shared sets owned by process with specified rank.
626  ErrorCode get_owned_sets( unsigned owning_rank, Range& sets_out ) const;
627 
628  // ==================================
629  // \section LOW-LEVEL DATA (tags, sets on interface/partition, etc.)
630  // ==================================
631 
632  //! Get proc config for this communication object
633  const ProcConfig& proc_config() const
634  {
635  return procConfig;
636  }
637 
638  //! Get proc config for this communication object
640  {
641  return procConfig;
642  }
643 
644  unsigned rank() const
645  {
646  return proc_config().proc_rank();
647  }
648  unsigned size() const
649  {
650  return proc_config().proc_size();
651  }
652  MPI_Comm comm() const
653  {
654  return proc_config().proc_comm();
655  }
656 
657  //! return the tags used to indicate shared procs and handles
659  Tag& sharedps_tag,
660  Tag& sharedh_tag,
661  Tag& sharedhs_tag,
662  Tag& pstatus_tag );
663 
664  //! return partition, interface set ranges
666  {
667  return partitionSets;
668  }
669  const Range& partition_sets() const
670  {
671  return partitionSets;
672  }
674  {
675  return interfaceSets;
676  }
677  const Range& interface_sets() const
678  {
679  return interfaceSets;
680  }
681 
682  //! return sharedp tag
683  Tag sharedp_tag();
684 
685  //! return sharedps tag
686  Tag sharedps_tag();
687 
688  //! return sharedh tag
689  Tag sharedh_tag();
690 
691  //! return sharedhs tag
692  Tag sharedhs_tag();
693 
694  //! return pstatus tag
695  Tag pstatus_tag();
696 
697  //! return pcomm tag; static because might not have a pcomm before going
698  //! to look for one on the interface
699  static Tag pcomm_tag( Interface* impl, bool create_if_missing = true );
700 
701  //! return partitions set tag
702  Tag partition_tag();
704  {
705  return partition_tag();
706  }
707 
708  // ==================================
709  // \section DEBUGGING AIDS
710  // ==================================
711 
712  //! print contents of pstatus value in human-readable form
713  void print_pstatus( unsigned char pstat, std::string& ostr );
714 
715  //! print contents of pstatus value in human-readable form to std::cut
716  void print_pstatus( unsigned char pstat );
717 
718  // ==================================
719  // \section IMESHP-RELATED FUNCTIONS
720  // ==================================
721 
722  //! return all the entities in parts owned locally
723  ErrorCode get_part_entities( Range& ents, int dim = -1 );
724 
726  {
727  return partitioningSet;
728  }
730  ErrorCode get_global_part_count( int& count_out ) const;
731  ErrorCode get_part_owner( int part_id, int& owner_out ) const;
732  ErrorCode get_part_id( EntityHandle part, int& id_out ) const;
733  ErrorCode get_part_handle( int id, EntityHandle& handle_out ) const;
734  ErrorCode create_part( EntityHandle& part_out );
737  ErrorCode get_part_neighbor_ids( EntityHandle part, int neighbors_out[MAX_SHARING_PROCS], int& num_neighbors_out );
738  ErrorCode get_interface_sets( EntityHandle part, Range& iface_sets_out, int* adj_part_id = 0 );
739  ErrorCode get_owning_part( EntityHandle entity, int& owning_part_id_out, EntityHandle* owning_handle = 0 );
741  int part_ids_out[MAX_SHARING_PROCS],
742  int& num_part_ids_out,
743  EntityHandle remote_handles[MAX_SHARING_PROCS] = 0 );
744 
745  /** Filter the entities by pstatus tag.
746  * op is one of PSTATUS_ AND, OR, NOT; an entity is output if:
747  * AND: all bits set in pstatus_val are also set on entity
748  * OR: any bits set in pstatus_val also set on entity
749  * NOT: any bits set in pstatus_val are not set on entity
750  *
751  * Results returned in input list, unless result_ents is passed in non-null,
752  * in which case results are returned in result_ents.
753  *
754  * If ents is passed in empty, filter is done on shared entities in this
755  * pcomm instance, i.e. contents of sharedEnts.
756  *
757  *\param ents Input entities to filter
758  *\param pstatus_val pstatus value to which entities are compared
759  *\param op Bitwise operation performed between pstatus values
760  *\param to_proc If non-negative and PSTATUS_SHARED is set on pstatus_val,
761  * only entities shared with to_proc are returned
762  *\param result_ents If non-null, results of filter are put in the
763  * pointed-to range
764  */
766  const unsigned char pstatus_val,
767  const unsigned char op,
768  int to_proc = -1,
769  Range* returned_ents = NULL );
770 
771  /** \brief Get entities on interfaces shared with another proc
772  *
773  * \param other_proc Other proc sharing the interface
774  * \param dim Dimension of entities to return, -1 if all dims
775  * \param iface_ents Returned entities
776  */
777  ErrorCode get_iface_entities( int other_proc, int dim, Range& iface_ents );
778 
780  {
781  return mbImpl;
782  }
783 
784  ErrorCode clean_shared_tags( std::vector< Range* >& exchange_ents );
785 
786  class Buffer
787  {
788  public:
789  unsigned char* mem_ptr;
790  unsigned char* buff_ptr;
791  unsigned int alloc_size;
792 
793  Buffer( unsigned int sz = 0 );
794  Buffer( const Buffer& );
795  ~Buffer();
796  void reset_buffer( size_t buff_pos = 0 )
797  {
798  reset_ptr( buff_pos );
800  }
801  void reset_ptr( size_t buff_pos = 0 )
802  {
803  assert( ( !mem_ptr && !buff_pos ) || ( alloc_size >= buff_pos ) );
804  buff_ptr = mem_ptr + buff_pos;
805  }
806  inline void reserve( unsigned int new_size );
808  {
809  *( (int*)mem_ptr ) = (int)( buff_ptr - mem_ptr );
810  }
812  {
813  return *( (int*)mem_ptr );
814  }
816  {
817  return (int)( buff_ptr - mem_ptr );
818  }
819 
820  void check_space( unsigned int addl_space );
821  };
822 
823  //! public 'cuz we want to unit test these externally
824  ErrorCode pack_buffer( Range& orig_ents,
825  const bool adjacencies,
826  const bool tags,
827  const bool store_remote_handles,
828  const int to_proc,
829  Buffer* buff,
830  TupleList* entprocs = NULL,
831  Range* allsent = NULL );
832 
833  ErrorCode unpack_buffer( unsigned char* buff_ptr,
834  const bool store_remote_handles,
835  const int from_proc,
836  const int ind,
837  std::vector< std::vector< EntityHandle > >& L1hloc,
838  std::vector< std::vector< EntityHandle > >& L1hrem,
839  std::vector< std::vector< int > >& L1p,
840  std::vector< EntityHandle >& L2hloc,
841  std::vector< EntityHandle >& L2hrem,
842  std::vector< unsigned int >& L2p,
843  std::vector< EntityHandle >& new_ents,
844  const bool created_iface = false );
845 
847  Buffer* buff,
848  const bool store_remote_handles,
849  const int to_proc,
850  const bool is_iface,
851  TupleList* entprocs = NULL,
852  Range* allsent = NULL );
853 
854  //! unpack entities in buff_ptr
855  ErrorCode unpack_entities( unsigned char*& buff_ptr,
856  const bool store_remote_handles,
857  const int from_ind,
858  const bool is_iface,
859  std::vector< std::vector< EntityHandle > >& L1hloc,
860  std::vector< std::vector< EntityHandle > >& L1hrem,
861  std::vector< std::vector< int > >& L1p,
862  std::vector< EntityHandle >& L2hloc,
863  std::vector< EntityHandle >& L2hrem,
864  std::vector< unsigned int >& L2p,
865  std::vector< EntityHandle >& new_ents,
866  const bool created_iface = false );
867 
868  //! Call exchange_all_shared_handles, then compare the results with tag data
869  //! on local shared entities.
870  ErrorCode check_all_shared_handles( bool print_em = false );
871 
872  static ErrorCode check_all_shared_handles( ParallelComm** pcs, int num_pcs );
873 
875  {
879  };
880 
881  ErrorCode pack_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data );
882 
883  // check consistency of sharedEnts against their tags and their
884  // vertices' tags
886 
887  // check contents of communicated shared entity data against tags
888  ErrorCode check_my_shared_handles( std::vector< std::vector< SharedEntityData > >& shents,
889  const char* prefix = NULL );
890 
891  //! set rank for this pcomm; USED FOR TESTING ONLY!
892  void set_rank( unsigned int r );
893 
894  //! set rank for this pcomm; USED FOR TESTING ONLY!
895  void set_size( unsigned int r );
896 
897  //! get (and possibly allocate) buffers for messages to/from to_proc; returns
898  //! index of to_proc in buffProcs vector; if is_new is non-NULL, sets to
899  //! whether new buffer was allocated
900  //! PUBLIC ONLY FOR TESTING!
901  int get_buffers( int to_proc, bool* is_new = NULL );
902 
903  //! get buff processor vector
904  const std::vector< unsigned int >& buff_procs() const;
905 
906  /* \brief Unpack message with remote handles
907  * PUBLIC ONLY FOR TESTING!
908  */
909  ErrorCode unpack_remote_handles( unsigned int from_proc,
910  unsigned char*& buff_ptr,
911  std::vector< EntityHandle >& L2hloc,
912  std::vector< EntityHandle >& L2hrem,
913  std::vector< unsigned int >& L2p );
914 
915  /* \brief Pack message with remote handles
916  * PUBLIC ONLY FOR TESTING!
917  */
918  ErrorCode pack_remote_handles( std::vector< EntityHandle >& L1hloc,
919  std::vector< EntityHandle >& L1hrem,
920  std::vector< int >& procs,
921  unsigned int to_proc,
922  Buffer* buff );
923 
924  // each iterate in proc_nvecs contains a set of procs and the entities *possibly*
925  // on the interface between those procs; this function makes sets for each,
926  // and tags the set with the procs sharing it; interface sets are optionally
927  // returned; NOTE: a subsequent step is used to verify entities on the interface
928  // and remove them if they're not shared
929  ErrorCode create_interface_sets( std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs );
930 
931  // do the same but working straight from sharedEnts
932  ErrorCode create_interface_sets( EntityHandle this_set, int resolve_dim, int shared_dim );
933 
934  ErrorCode tag_shared_verts( TupleList& shared_ents,
935  std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
936  Range& proc_verts,
937  unsigned int i_extra = 1 );
938 
939  ErrorCode list_entities( const EntityHandle* ents, int num_ents );
940 
941  ErrorCode list_entities( const Range& ents );
942 
943  void set_send_request( int n_request ); // set send request array
944 
945  void set_recv_request( int n_request ); // set recv request array
946 
947  //! reset message buffers to their initial state
948  // changed to public function (HJK)
949  void reset_all_buffers();
950 
951  static const unsigned int INITIAL_BUFF_SIZE;
952 
953  //! set the verbosity level of output from this pcomm
954  void set_debug_verbosity( int verb );
955 
956  //! get the verbosity level of output from this pcomm
957  int get_debug_verbosity();
958 
959  /* \brief Gather tag value from entities down to a specified root proc
960  * This function gathers data from a domain-decomposed mesh onto a global mesh
961  * represented on the root processor. On the root, this gather mesh is distinct from
962  * the root's domain-decomposed subdomain. Entities are matched by global id, or by
963  * another tag if its handle is input. The dimension of all entities in gather_ents should
964  * be the same, since this is the dimension of entities in gather_set that are queried for
965  * matching global id tags.
966  * \param gather_ents (Local) entities from which to gather data
967  * \param tag_handle Tag whose values are being gathered
968  * \param id_tag Tag to use for matching entities (global id used by default)
969  * \param gather_set On root, set containing global mesh onto which to put data
970  * \param root_proc_rank Rank of the specified root processor (default rank is 0)
971  */
972  ErrorCode gather_data( Range& gather_ents,
973  Tag& tag_handle,
974  Tag id_tag = 0,
975  EntityHandle gather_set = 0,
976  int root_proc_rank = 0 );
977 
978  /* \brief communicate extra points positions on boundary
979  * This function is called after intersection of 2 meshes, to settle the
980  * position of the intersection points on the boundary (interface)
981  * The initial mesh distributed on each processor is decomposed after
982  * intersection with another mesh, such as that new points are created on the
983  * boundary. these points should better match at the interface !
984  * we perform an extra caution step, to ensure the robustness of the
985  * intersection algorithm; only shared edges extra nodes
986  * will be actually needed to be communicated, but we just pass by reference
987  * the whole extraNodesVec structure, we do
988  * not need to construct another data structure
989  * The node positions on edges that are owned will be communicated to other
990  * processors
991  *
992  * \param edges total range of entities
993  * \param shared_edges_owned edges for which to communicate data
994  * \param extraNodesVec handles of intersection vertices on all edges;
995  */
997  Range& shared_edges_owned,
998  std::vector< std::vector< EntityHandle >* >& extraNodesVec,
999  double tolerance );
1000 
1001  /* \brief delete entities from moab database
1002  * will check the shared ents array, and clean it if necessary
1003  *
1004  */
1005  ErrorCode delete_entities( Range& to_delete );
1006 
1007  /*
1008  * \brief correct multi-sharing info for thin layers
1009  *
1010  * will be used for at least 3 processes, when there are thin ghost layers
1011  * right now it is public, for allowing users to call it directly
1012  * eventually, it should become private, and be called automatically
1013  */
1014 
1016 
1017  private:
1018  ErrorCode reduce_void( int tag_data_type, const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals );
1019 
1020  template < class T >
1021  ErrorCode reduce( const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals );
1022 
1023  void print_debug_isend( int from, int to, unsigned char* buff, int tag, int size );
1024 
1025  void print_debug_irecv( int to, int from, unsigned char* buff, int size, int tag, int incoming );
1026 
1027  void print_debug_recd( MPI_Status status );
1028 
1029  void print_debug_waitany( std::vector< MPI_Request >& reqs, int tag, int proc );
1030 
1031  // common initialization code, called from various constructors
1032  void initialize();
1033 
1035  unsigned char pstatus,
1036  int old_nump,
1037  int new_nump,
1038  int* ps,
1039  EntityHandle* hs );
1040 
1041  ErrorCode check_clean_iface( Range& allsent );
1042 
1043  void define_mpe();
1044 
1045  ErrorCode get_sent_ents( const bool is_iface,
1046  const int bridge_dim,
1047  const int ghost_dim,
1048  const int num_layers,
1049  const int addl_ents,
1050  Range* sent_ents,
1051  Range& allsent,
1052  TupleList& entprocs );
1053 
1054  /** \brief Set pstatus values on entities
1055  *
1056  * \param pstatus_ents Entities to be set
1057  * \param pstatus_val Pstatus value to be set
1058  * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
1059  * (and created if they don't exist)
1060  * \param verts_too If true, vertices also set
1061  * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
1062  * existing value is over-written
1063  */
1064  ErrorCode set_pstatus_entities( Range& pstatus_ents,
1065  unsigned char pstatus_val,
1066  bool lower_dim_ents = false,
1067  bool verts_too = true,
1068  int operation = Interface::UNION );
1069 
1070  /** \brief Set pstatus values on entities (vector-based function)
1071  *
1072  * \param pstatus_ents Entities to be set
1073  * \param pstatus_val Pstatus value to be set
1074  * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
1075  * (and created if they don't exist)
1076  * \param verts_too If true, vertices also set
1077  * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
1078  * existing value is over-written
1079  */
1081  int num_ents,
1082  unsigned char pstatus_val,
1083  bool lower_dim_ents = false,
1084  bool verts_too = true,
1085  int operation = Interface::UNION );
1086 
1087  //! estimate size required to pack entities
1088  int estimate_ents_buffer_size( Range& entities, const bool store_remote_handles );
1089 
1090  //! estimate size required to pack sets
1091  int estimate_sets_buffer_size( Range& entities, const bool store_remote_handles );
1092 
1093  //! send the indicated buffer, possibly sending size first
1094  ErrorCode send_buffer( const unsigned int to_proc,
1095  Buffer* send_buff,
1096  const int msg_tag,
1097  MPI_Request& send_req,
1098  MPI_Request& ack_recv_req,
1099  int* ack_buff,
1100  int& this_incoming,
1101  int next_mesg_tag = -1,
1102  Buffer* next_recv_buff = NULL,
1103  MPI_Request* next_recv_req = NULL,
1104  int* next_incoming = NULL );
1105 
1106  //! process incoming message; if longer than the initial size, post
1107  //! recv for next part then send ack; if ack, send second part; else
1108  //! indicate that we're done and buffer is ready for processing
1109  ErrorCode recv_buffer( int mesg_tag_expected,
1110  const MPI_Status& mpi_status,
1111  Buffer* recv_buff,
1112  MPI_Request& recv_2nd_req,
1113  MPI_Request& ack_req,
1114  int& this_incoming,
1115  Buffer* send_buff,
1116  MPI_Request& send_req,
1117  MPI_Request& sent_ack_req,
1118  bool& done,
1119  Buffer* next_buff = NULL,
1120  int next_tag = -1,
1121  MPI_Request* next_req = NULL,
1122  int* next_incoming = NULL );
1123 
1124  //! pack a range of entities with equal # verts per entity, along with
1125  //! the range on the sending proc
1126  ErrorCode pack_entity_seq( const int nodes_per_entity,
1127  const bool store_remote_handles,
1128  const int to_proc,
1129  Range& these_ents,
1130  std::vector< EntityHandle >& entities,
1131  Buffer* buff );
1132 
1133  ErrorCode print_buffer( unsigned char* buff_ptr, int mesg_type, int from_proc, bool sent );
1134 
1135  //! for all the entities in the received buffer; for each, save
1136  //! entities in this instance which match connectivity, or zero if none found
1137  ErrorCode unpack_iface_entities( unsigned char*& buff_ptr,
1138  const int from_proc,
1139  const int ind,
1140  std::vector< EntityHandle >& recd_ents );
1141 
1142  ErrorCode pack_sets( Range& entities, Buffer* buff, const bool store_handles, const int to_proc );
1143 
1144  ErrorCode unpack_sets( unsigned char*& buff_ptr,
1145  std::vector< EntityHandle >& entities,
1146  const bool store_handles,
1147  const int to_proc );
1148 
1150  Range::const_iterator& start_rit,
1151  Range& whole_range,
1152  unsigned char*& buff_ptr,
1153  int& count,
1154  const bool just_count,
1155  const bool store_handles,
1156  const int to_proc );
1157 
1158  ErrorCode unpack_adjacencies( unsigned char*& buff_ptr,
1159  Range& entities,
1160  const bool store_handles,
1161  const int from_proc );
1162 
1163  /* \brief Unpack message with remote handles (const pointer to buffer)
1164  */
1165  ErrorCode unpack_remote_handles( unsigned int from_proc,
1166  const unsigned char* buff_ptr,
1167  std::vector< EntityHandle >& L2hloc,
1168  std::vector< EntityHandle >& L2hrem,
1169  std::vector< unsigned int >& L2p );
1170 
1171  //! given connectivity and type, find an existing entity, if there is one
1172  ErrorCode find_existing_entity( const bool is_iface,
1173  const int owner_p,
1174  const EntityHandle owner_h,
1175  const int num_ents,
1176  const EntityHandle* connect,
1177  const int num_connect,
1178  const EntityType this_type,
1179  std::vector< EntityHandle >& L2hloc,
1180  std::vector< EntityHandle >& L2hrem,
1181  std::vector< unsigned int >& L2p,
1182  EntityHandle& new_h );
1183 
1185  const unsigned char pstatus,
1186  const int sharedp,
1187  const std::set< unsigned int >& procs,
1188  unsigned int& num_ents,
1189  int* tmp_procs,
1190  EntityHandle* tmp_handles );
1191 
1192  /**\brief Get list of tags for which to exchange data
1193  *
1194  * Get tags and entities for which to exchange tag data. This function
1195  * was originally part of 'pack_tags' requested with the
1196  * 'all_possible_tags' parameter.
1197  *
1198  *\param all_entities Input. The set of entities for which data is to
1199  * be communicated.
1200  *\param all_tags Output. Populated with the handles of tags to be
1201  * sent.
1202  *\param tag_ranges Output. For each corresponding tag in all_tags, the
1203  * subset of 'all_entities' for which a tag value has
1204  * been set.
1205  */
1206  ErrorCode get_tag_send_list( const Range& all_entities,
1207  std::vector< Tag >& all_tags,
1208  std::vector< Range >& tag_ranges );
1209 
1210  /**\brief Serialize entity tag data
1211  *
1212  * This function operates in two passes. The first phase,
1213  * specified by 'just_count == true' calculates the necessary
1214  * buffer size for the serialized data. The second phase
1215  * writes the actual binary serialized representation of the
1216  * data to the passed buffer.
1217  *
1218  *\NOTE First two arguments are not used. (Legacy interface?)
1219  *
1220  *\param entities NOT USED
1221  *\param start_rit NOT USED
1222  *\param whole_range Should be the union of the sets of entities for
1223  * which tag values are to be serialized. Also
1224  * specifies ordering for indexes for tag values and
1225  * serves as the superset from which to compose entity
1226  * lists from individual tags if just_count and
1227  * all_possible_tags are both true.
1228  *\param buff_ptr Buffer into which to write binary serialized data
1229  *\param count Output: The size of the serialized data is added
1230  * to this parameter. NOTE: Should probably initialize
1231  * to zero before calling.
1232  *\param just_count If true, just calculate the buffer size required to
1233  * hold the serialized data. Will also append to
1234  * 'all_tags' and 'tag_ranges' if all_possible_tags
1235  * == true.
1236  *\param store_handles The data for each tag is preceded by a list of
1237  * EntityHandles designating the entity each of
1238  * the subsequent tag values corresponds to. This value
1239  * may be one of:
1240  * 1) If store_handles == false:
1241  * An invalid handle composed of {MBMAXTYPE,idx}, where
1242  * idx is the position of the entity in "whole_range".
1243  * 2) If store_hanldes == true and a valid remote
1244  * handle exists, the remote handle.
1245  * 3) If store_hanldes == true and no valid remote
1246  * handle is defined for the entity, the same as 1).
1247  *\param to_proc If 'store_handles' is true, the processor rank for
1248  * which to store the corresponding remote entity
1249  * handles.
1250  *\param all_tags List of tags to write
1251  *\param tag_ranges List of entities to serialize tag data, one
1252  * for each corresponding tag handle in 'all_tags.
1253  */
1255  const std::vector< Tag >& src_tags,
1256  const std::vector< Tag >& dst_tags,
1257  const std::vector< Range >& tag_ranges,
1258  Buffer* buff,
1259  const bool store_handles,
1260  const int to_proc );
1261 
1262  /**\brief Calculate buffer size required to pack tag data
1263  *\param source_tag The tag for which data will be serialized
1264  *\param entities The entities for which tag values will be serialized
1265  *\param count_out Output: The required buffer size, in bytes.
1266  */
1267  ErrorCode packed_tag_size( Tag source_tag, const Range& entities, int& count_out );
1268 
1269  /**\brief Serialize tag data
1270  *\param source_tag The tag for which data will be serialized
1271  *\param destination_tag Tag in which to store unpacked tag data. Typically
1272  * the same as source_tag.
1273  *\param entities The entities for which tag values will be serialized
1274  *\param whole_range Calculate entity indices as location in this range
1275  *\param buff_ptr Input/Output: As input, pointer to the start of the
1276  * buffer in which to serialize data. As output, the
1277  * position just passed the serialized data.
1278  *\param count_out Output: The required buffer size, in bytes.
1279  *\param store_handles The data for each tag is preceded by a list of
1280  * EntityHandles designating the entity each of
1281  * the subsequent tag values corresponds to. This value
1282  * may be one of:
1283  * 1) If store_handles == false:
1284  * An invalid handle composed of {MBMAXTYPE,idx}, where
1285  * idx is the position of the entity in "whole_range".
1286  * 2) If store_hanldes == true and a valid remote
1287  * handle exists, the remote handle.
1288  * 3) If store_hanldes == true and no valid remote
1289  * handle is defined for the entity, the same as 1).
1290  *\param to_proc If 'store_handles' is true, the processor rank for
1291  * which to store the corresponding remote entity
1292  * handles.
1293  */
1294  ErrorCode pack_tag( Tag source_tag,
1295  Tag destination_tag,
1296  const Range& entities,
1297  const std::vector< EntityHandle >& whole_range,
1298  Buffer* buff,
1299  const bool store_remote_handles,
1300  const int to_proc );
1301 
1302  ErrorCode unpack_tags( unsigned char*& buff_ptr,
1303  std::vector< EntityHandle >& entities,
1304  const bool store_handles,
1305  const int to_proc,
1306  const MPI_Op* const mpi_op = NULL );
1307 
1308  ErrorCode tag_shared_verts( TupleList& shared_verts,
1309  Range* skin_ents,
1310  std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
1311  Range& proc_verts );
1312 
1313  ErrorCode get_proc_nvecs( int resolve_dim,
1314  int shared_dim,
1315  Range* skin_ents,
1316  std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs );
1317 
1318  // after verifying shared entities, now parent/child links between sets can be established
1320 
1321  //! pack a range map with keys in this_range and values a contiguous series
1322  //! of handles starting at actual_start
1323  ErrorCode pack_range_map( Range& this_range, EntityHandle actual_start, HandleMap& handle_map );
1324 
1325  //! returns true if the set is an interface shared with to_proc
1326  bool is_iface_proc( EntityHandle this_set, int to_proc );
1327 
1328  //! for any remote_handles set to zero, remove corresponding sent_ents from
1329  //! iface_sets corresponding to from_proc
1330  ErrorCode update_iface_sets( Range& sent_ents, std::vector< EntityHandle >& remote_handles, int from_proc );
1331 
1332  //! for specified bridge/ghost dimension, to_proc, and number
1333  //! of layers, get the entities to be ghosted, and info on additional procs
1334  //! needing to communicate with to_proc
1335  ErrorCode get_ghosted_entities( int bridge_dim,
1336  int ghost_dim,
1337  int to_proc,
1338  int num_layers,
1339  int addl_ents,
1340  Range& ghosted_ents );
1341 
1342  //! add vertices adjacent to entities in this list
1343  ErrorCode add_verts( Range& sent_ents );
1344 
1345  //! Every processor sends shared entity handle data to every other processor
1346  //! that it shares entities with. Passed back map is all received data,
1347  //! indexed by processor ID. This function is intended to be used for
1348  //! debugging.
1349  ErrorCode exchange_all_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data,
1350  std::vector< std::vector< SharedEntityData > >& result );
1351 
1352  //! replace handles in from_vec with corresponding handles on
1353  //! to_proc (by checking shared[p/h]_tag and shared[p/h]s_tag;
1354  //! if no remote handle and new_ents is non-null, substitute
1355  //! instead CREATE_HANDLE(MBMAXTYPE, index) where index is handle's
1356  //! position in new_ents
1357  ErrorCode get_remote_handles( const bool store_remote_handles,
1358  EntityHandle* from_vec,
1359  EntityHandle* to_vec_tmp,
1360  int num_ents,
1361  int to_proc,
1362  const std::vector< EntityHandle >& new_ents );
1363 
1364  //! same as other version, except from_range and to_range should be
1365  //! different here
1366  ErrorCode get_remote_handles( const bool store_remote_handles,
1367  const Range& from_range,
1368  Range& to_range,
1369  int to_proc,
1370  const std::vector< EntityHandle >& new_ents );
1371 
1372  //! same as other version, except packs range into vector
1373  ErrorCode get_remote_handles( const bool store_remote_handles,
1374  const Range& from_range,
1375  EntityHandle* to_vec,
1376  int to_proc,
1377  const std::vector< EntityHandle >& new_ents );
1378 
1379  //! goes through from_vec, and for any with type MBMAXTYPE, replaces with
1380  //! new_ents value at index corresponding to id of entity in from_vec
1381  ErrorCode get_local_handles( EntityHandle* from_vec, int num_ents, const Range& new_ents );
1382 
1383  //! same as above except puts results in range
1384  ErrorCode get_local_handles( const Range& remote_handles,
1385  Range& local_handles,
1386  const std::vector< EntityHandle >& new_ents );
1387 
1388  //! same as above except gets new_ents from vector
1389  ErrorCode get_local_handles( EntityHandle* from_vec, int num_ents, const std::vector< EntityHandle >& new_ents );
1390 
1391  ErrorCode update_remote_data( Range& local_range,
1392  Range& remote_range,
1393  int other_proc,
1394  const unsigned char add_pstat );
1395 
1397  const int* ps,
1398  const EntityHandle* hs,
1399  const int num_ps,
1400  const unsigned char add_pstat );
1401 
1403  const int* ps,
1404  const EntityHandle* hs,
1405  const int num_ps,
1406  const unsigned char add_pstat );
1407 
1408  /** \brief Set pstatus tag interface bit on entities in sets passed in
1409  */
1411 
1412  //! add a pc to the iface instance tag PARALLEL_COMM
1413  int add_pcomm( ParallelComm* pc );
1414 
1415  //! remove a pc from the iface instance tag PARALLEL_COMM
1416  void remove_pcomm( ParallelComm* pc );
1417 
1418  //! check entities to make sure there are no zero-valued remote handles
1419  //! where they shouldn't be
1420  ErrorCode check_sent_ents( Range& allsent );
1421 
1422  //! assign entities to the input processor part
1423  ErrorCode assign_entities_part( std::vector< EntityHandle >& entities, const int proc );
1424 
1425  //! remove entities to the input processor part
1426  ErrorCode remove_entities_part( Range& entities, const int proc );
1427 
1428  //! MB interface associated with this writer
1430 
1431  //! Proc config object, keeps info on parallel stuff
1433 
1434  //! Sequence manager, to get more efficient access to entities
1436 
1437  //! Error handler
1439 
1440  //! more data buffers, proc-specific
1441  std::vector< Buffer* > localOwnedBuffs, remoteOwnedBuffs;
1442 
1443  //! reset message buffers to their initial state
1444  // void reset_all_buffers();
1445 
1446  //! delete all buffers, freeing up any memory held by them
1447  void delete_all_buffers();
1448 
1449  //! request objects, may be used if store_remote_handles is used
1450  std::vector< MPI_Request > sendReqs;
1451 
1452  //! receive request objects
1453  std::vector< MPI_Request > recvReqs, recvRemotehReqs;
1454 
1455  //! processor rank for each buffer index
1456  std::vector< unsigned int > buffProcs;
1457 
1458  //! the partition, interface sets for this comm'n instance
1460 
1461  //! all local entities shared with others, whether ghost or ghosted
1462  std::set< EntityHandle > sharedEnts;
1463 
1464  //! tags used to save sharing procs and handles
1466 
1467  int globalPartCount; //!< Cache of global part count
1468 
1469  EntityHandle partitioningSet; //!< entity set containing all parts
1470 
1471  std::ofstream myFile;
1472 
1473  int pcommID;
1474 
1475  int ackbuff;
1476 
1477  //! used to set verbosity level and to report output
1479 
1480  //! Data about shared sets
1482 };
1483 
1484 inline ParallelComm::Buffer::Buffer( const Buffer& other_buff )
1485 {
1486  alloc_size = other_buff.alloc_size;
1487  mem_ptr = (unsigned char*)malloc( alloc_size );
1488  memcpy( mem_ptr, other_buff.mem_ptr, alloc_size );
1489  buff_ptr = mem_ptr + ( other_buff.buff_ptr - other_buff.mem_ptr );
1490 }
1491 
1492 inline ParallelComm::Buffer::Buffer( unsigned int new_size ) : mem_ptr( NULL ), buff_ptr( NULL ), alloc_size( 0 )
1493 {
1494  if( new_size ) this->reserve( new_size );
1495 }
1496 
1498 {
1499  if( mem_ptr )
1500  {
1501  free( mem_ptr );
1502  mem_ptr = NULL;
1503  }
1504 }
1505 
1506 #define DEBUG_BUFFER 0
1507 
1508 inline void ParallelComm::Buffer::reserve( unsigned int new_size )
1509 {
1510 
1511 #ifdef DEBUG_BUFFER
1512  int tmp_pos = 0;
1513  if( mem_ptr )
1514  {
1515  tmp_pos = buff_ptr - mem_ptr;
1516  }
1517  buff_ptr = (unsigned char*)malloc( new_size );
1518  assert( 0 <= tmp_pos && tmp_pos <= (int)alloc_size );
1519  if( tmp_pos ) memcpy( buff_ptr, mem_ptr, tmp_pos );
1520  if( mem_ptr ) free( mem_ptr );
1521  mem_ptr = buff_ptr;
1522  alloc_size = new_size;
1523  buff_ptr = mem_ptr + tmp_pos;
1524 #else
1525  if( mem_ptr && alloc_size < new_size )
1526  {
1527  size_t tmp_pos = mem_ptr ? buff_ptr - mem_ptr : 0;
1528  mem_ptr = (unsigned char*)realloc( mem_ptr, new_size );
1529  alloc_size = new_size;
1530  buff_ptr = mem_ptr + tmp_pos;
1531  }
1532  else if( !mem_ptr )
1533  {
1534  mem_ptr = (unsigned char*)malloc( new_size );
1535  alloc_size = new_size;
1536  buff_ptr = mem_ptr;
1537  }
1538 #endif
1539 }
1540 
1541 inline void ParallelComm::Buffer::check_space( unsigned int addl_space )
1542 {
1543  assert( buff_ptr >= mem_ptr && buff_ptr <= mem_ptr + alloc_size );
1544  unsigned int new_size = buff_ptr - mem_ptr + addl_space;
1545  if( new_size > alloc_size ) reserve( 3 * new_size / 2 );
1546 }
1547 
1549 {
1550  std::vector< Buffer* >::iterator vit;
1551  for( vit = localOwnedBuffs.begin(); vit != localOwnedBuffs.end(); ++vit )
1552  ( *vit )->reset_buffer();
1553  for( vit = remoteOwnedBuffs.begin(); vit != remoteOwnedBuffs.end(); ++vit )
1554  ( *vit )->reset_buffer();
1555 }
1556 
1558 {
1559  std::vector< Buffer* >::iterator vit;
1560  for( vit = localOwnedBuffs.begin(); vit != localOwnedBuffs.end(); ++vit )
1561  delete( *vit );
1562  localOwnedBuffs.clear();
1563 
1564  for( vit = remoteOwnedBuffs.begin(); vit != remoteOwnedBuffs.end(); ++vit )
1565  delete( *vit );
1566  remoteOwnedBuffs.clear();
1567 }
1568 
1569 inline const std::vector< unsigned int >& ParallelComm::buff_procs() const
1570 {
1571  return buffProcs;
1572 }
1573 
1575  Tag& sharedps,
1576  Tag& sharedh,
1577  Tag& sharedhs,
1578  Tag& pstatus )
1579 {
1580  sharedp = sharedp_tag();
1581  sharedps = sharedps_tag();
1582  sharedh = sharedh_tag();
1583  sharedhs = sharedhs_tag();
1584  pstatus = pstatus_tag();
1585 
1586  return MB_SUCCESS;
1587 }
1588 
1589 inline ErrorCode ParallelComm::exchange_tags( const char* tag_name, const Range& entities )
1590 {
1591  // get the tag handle
1592  std::vector< Tag > tags( 1 );
1593  ErrorCode result = mbImpl->tag_get_handle( tag_name, 0, MB_TYPE_OPAQUE, tags[0], MB_TAG_ANY );
1594  if( MB_SUCCESS != result )
1595  return result;
1596  else if( !tags[0] )
1597  return MB_TAG_NOT_FOUND;
1598 
1599  return exchange_tags( tags, tags, entities );
1600 }
1601 
1603 {
1604  // get the tag handle
1605  std::vector< Tag > tags;
1606  tags.push_back( tagh );
1607 
1608  return exchange_tags( tags, tags, entities );
1609 }
1610 
1611 inline ErrorCode ParallelComm::reduce_tags( const char* tag_name, const MPI_Op mpi_op, const Range& entities )
1612 {
1613  // get the tag handle
1614  std::vector< Tag > tags( 1 );
1615  ErrorCode result = mbImpl->tag_get_handle( tag_name, 0, MB_TYPE_OPAQUE, tags[0], MB_TAG_ANY );
1616  if( MB_SUCCESS != result )
1617  return result;
1618  else if( !tags[0] )
1619  return MB_TAG_NOT_FOUND;
1620 
1621  return reduce_tags( tags, tags, mpi_op, entities );
1622 }
1623 
1624 inline ErrorCode ParallelComm::reduce_tags( Tag tagh, const MPI_Op mpi_op, const Range& entities )
1625 {
1626  // get the tag handle
1627  std::vector< Tag > tags;
1628  tags.push_back( tagh );
1629 
1630  return reduce_tags( tags, tags, mpi_op, entities );
1631 }
1632 
1633 inline ErrorCode ParallelComm::get_comm_procs( std::set< unsigned int >& procs )
1634 {
1635  ErrorCode result = get_interface_procs( procs );
1636  if( MB_SUCCESS != result ) return result;
1637 
1638  std::copy( buffProcs.begin(), buffProcs.end(), std::inserter( procs, procs.begin() ) );
1639 
1640  return MB_SUCCESS;
1641 }
1642 
1643 inline ErrorCode ParallelComm::get_owner( EntityHandle entity, int& owner )
1644 {
1645  EntityHandle tmp_handle;
1646  return get_owner_handle( entity, owner, tmp_handle );
1647 }
1648 
1649 /* \brief Unpack message with remote handles (const pointer to buffer)
1650  */
1651 inline ErrorCode ParallelComm::unpack_remote_handles( unsigned int from_proc,
1652  const unsigned char* buff_ptr,
1653  std::vector< EntityHandle >& L2hloc,
1654  std::vector< EntityHandle >& L2hrem,
1655  std::vector< unsigned int >& L2p )
1656 {
1657  // cast away const-ness, we won't be passing back a modified ptr
1658  unsigned char* tmp_buff = const_cast< unsigned char* >( buff_ptr );
1659  return unpack_remote_handles( from_proc, tmp_buff, L2hloc, L2hrem, L2p );
1660 }
1661 
1662 inline void ParallelComm::set_rank( unsigned int r )
1663 {
1664  procConfig.proc_rank( r );
1665  if( procConfig.proc_size() < r ) procConfig.proc_size( r + 1 );
1666 }
1667 
1668 inline void ParallelComm::set_size( unsigned int s )
1669 {
1670  procConfig.proc_size( s );
1671 }
1672 
1674  int num_entities,
1675  std::set< int >& procs,
1676  int op )
1677 {
1678  Range dum_range;
1679  // cast away constness 'cuz the range is passed as const
1680  EntityHandle* ents_cast = const_cast< EntityHandle* >( entities );
1681  std::copy( ents_cast, ents_cast + num_entities, range_inserter( dum_range ) );
1682  return get_sharing_data( dum_range, procs, op );
1683 }
1684 
1686  int* ps,
1687  EntityHandle* hs,
1688  unsigned char& pstat,
1689  int& num_ps )
1690 {
1691  unsigned int dum_ps;
1692  ErrorCode result = get_sharing_data( entity, ps, hs, pstat, dum_ps );
1693  if( MB_SUCCESS == result ) num_ps = dum_ps;
1694  return result;
1695 }
1696 
1697 inline void ParallelComm::set_send_request( int n_request )
1698 {
1699  sendReqs.resize( n_request, MPI_REQUEST_NULL );
1700 }
1701 
1702 inline void ParallelComm::set_recv_request( int n_request )
1703 {
1704  recvReqs.resize( n_request, MPI_REQUEST_NULL );
1705 }
1706 } // namespace moab
1707 
1708 #endif