Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
ParallelComm.cpp
Go to the documentation of this file.
1 #include "moab/Interface.hpp"
2 #include "moab/ParallelComm.hpp"
4 #include "moab/ReadUtilIface.hpp"
5 #include "SequenceManager.hpp"
6 #include "moab/Error.hpp"
7 #include "EntitySequence.hpp"
8 #include "MBTagConventions.hpp"
9 #include "moab/Skinner.hpp"
10 #include "MBParallelConventions.h"
11 #include "moab/Core.hpp"
12 #include "ElementSequence.hpp"
13 #include "moab/CN.hpp"
14 #include "moab/RangeMap.hpp"
15 #include "moab/MeshTopoUtil.hpp"
16 #include "TagInfo.hpp"
17 #include "DebugOutput.hpp"
18 #include "SharedSetData.hpp"
19 #include "moab/ScdInterface.hpp"
20 #include "moab/TupleList.hpp"
21 #include "moab/gs.hpp"
22 
23 #include <iostream>
24 #include <sstream>
25 #include <algorithm>
26 #include <functional>
27 #include <numeric>
28 
29 #include <cmath>
30 #include <cstdlib>
31 #include <cassert>
32 
33 #ifdef MOAB_HAVE_MPI
34 #include "moab_mpi.h"
35 #endif
36 #ifdef MOAB_HAVE_MPE
37 #include "mpe.h"
38 int IFACE_START, IFACE_END;
39 int GHOST_START, GHOST_END;
40 int SHAREDV_START, SHAREDV_END;
41 int RESOLVE_START, RESOLVE_END;
42 int ENTITIES_START, ENTITIES_END;
43 int RHANDLES_START, RHANDLES_END;
44 int OWNED_START, OWNED_END;
45 #endif
46 
47 namespace moab
48 {
49 
50 const unsigned int ParallelComm::INITIAL_BUFF_SIZE = 1024;
51 
52 const int MAX_BCAST_SIZE = ( 1 << 28 );
53 
54 std::vector< ParallelComm::Buffer* > msgs;
55 unsigned int __PACK_num = 0, __UNPACK_num = 0, __PACK_count = 0, __UNPACK_count = 0;
57 
58 #ifdef DEBUG_PACKING_TIMES
59 #define PC( n, m ) \
60  { \
61  if( __PACK_num == (unsigned int)n && __PACK_string == m ) \
62  __PACK_count++; \
63  else \
64  { \
65  if( __PACK_count > 1 ) std::cerr << " (" << __PACK_count << "x)"; \
66  __PACK_count = 1; \
67  __PACK_string = m; \
68  __PACK_num = n; \
69  std::cerr << std::endl << "PACK: " << n << m; \
70  } \
71  }
72 #define UPC( n, m ) \
73  { \
74  if( __UNPACK_num == (unsigned int)n && __UNPACK_string == m ) \
75  __UNPACK_count++; \
76  else \
77  { \
78  if( __UNPACK_count > 1 ) std::cerr << "(" << __UNPACK_count << "x)"; \
79  __UNPACK_count = 1; \
80  __UNPACK_string = m; \
81  __UNPACK_num = n; \
82  std::cerr << std::endl << "UNPACK: " << n << m; \
83  } \
84  }
85 #else
86 #define PC( n, m )
87 #define UPC( n, m )
88 #endif
89 
90 template < typename T >
91 static inline void UNPACK( unsigned char*& buff, T* val, size_t count )
92 {
93  memcpy( val, buff, count * sizeof( T ) );
94  buff += count * sizeof( T );
95 }
96 
97 template < typename T >
98 static inline void PACK( unsigned char*& buff, const T* val, size_t count )
99 {
100  memcpy( buff, val, count * sizeof( T ) );
101  buff += count * sizeof( T );
102 }
103 
104 static inline void PACK_INTS( unsigned char*& buff, const int* int_val, size_t num )
105 {
106  PACK( buff, int_val, num );
107  PC( num, " ints" );
108 }
109 
110 static inline void PACK_INT( unsigned char*& buff, int int_val )
111 {
112  PACK_INTS( buff, &int_val, 1 );
113 }
114 
115 static inline void PACK_DBLS( unsigned char*& buff, const double* dbl_val, size_t num )
116 {
117  PACK( buff, dbl_val, num );
118  PC( num, " doubles" );
119 }
120 
121 // static inline
122 // void PACK_DBL(unsigned char*& buff, const double dbl_val)
123 //{ PACK_DBLS(buff, &dbl_val, 1); }
124 
125 static inline void PACK_EH( unsigned char*& buff, const EntityHandle* eh_val, size_t num )
126 {
127  PACK( buff, eh_val, num );
128  PC( num, " handles" );
129 }
130 
131 // static inline
132 // void PACK_CHAR_64(unsigned char*& buff, const char* str)
133 //{
134 // memcpy(buff, str, 64);
135 // buff += 64;
136 // PC(64, " chars");
137 //}
138 
139 static inline void PACK_VOID( unsigned char*& buff, const void* val, size_t num )
140 {
141  PACK( buff, reinterpret_cast< const unsigned char* >( val ), num );
142  PC( num, " void" );
143 }
144 
145 static inline void PACK_BYTES( unsigned char*& buff, const void* val, int num )
146 {
147  PACK_INT( buff, num );
148  PACK_VOID( buff, val, num );
149 }
150 
151 static inline void PACK_RANGE( unsigned char*& buff, const Range& rng )
152 {
153  PACK_INT( buff, rng.psize() );
155  for( cit = rng.const_pair_begin(); cit != rng.const_pair_end(); ++cit )
156  {
157  EntityHandle eh[2] = { cit->first, cit->second };
158  PACK_EH( buff, eh, 2 );
159  }
160  PC( rng.psize(), "-subranged range" );
161 }
162 
163 static inline void UNPACK_INTS( unsigned char*& buff, int* int_val, size_t num )
164 {
165  UNPACK( buff, int_val, num );
166  UPC( num, " ints" );
167 }
168 
169 static inline void UNPACK_INT( unsigned char*& buff, int& int_val )
170 {
171  UNPACK_INTS( buff, &int_val, 1 );
172 }
173 
174 static inline void UNPACK_DBLS( unsigned char*& buff, double* dbl_val, size_t num )
175 {
176  UNPACK( buff, dbl_val, num );
177  UPC( num, " doubles" );
178 }
179 
180 static inline void UNPACK_DBL( unsigned char*& buff, double& dbl_val )
181 {
182  UNPACK_DBLS( buff, &dbl_val, 1 );
183 }
184 
185 static inline void UNPACK_EH( unsigned char*& buff, EntityHandle* eh_val, size_t num )
186 {
187  UNPACK( buff, eh_val, num );
188  UPC( num, " handles" );
189 }
190 
191 // static inline
192 // void UNPACK_CHAR_64(unsigned char*& buff, char* char_val)
193 //{
194 // memcpy(buff, char_val, 64);
195 // buff += 64;
196 // UPC(64, " chars");
197 //}
198 
199 static inline void UNPACK_VOID( unsigned char*& buff, void* val, size_t num )
200 {
201  UNPACK( buff, reinterpret_cast< unsigned char* >( val ), num );
202  UPC( num, " void" );
203 }
204 
205 static inline void UNPACK_TYPE( unsigned char*& buff, EntityType& type )
206 {
207  int int_type = MBMAXTYPE;
208  UNPACK_INT( buff, int_type );
209  type = static_cast< EntityType >( int_type );
210  assert( type >= MBVERTEX && type <= MBMAXTYPE );
211 }
212 
213 static inline void UNPACK_RANGE( unsigned char*& buff, Range& rng )
214 {
215  int num_subs;
216  EntityHandle eh[2];
217  UNPACK_INT( buff, num_subs );
218  for( int i = 0; i < num_subs; i++ )
219  {
220  UPC( num_subs, "-subranged range" );
221  UNPACK_EH( buff, eh, 2 );
222  rng.insert( eh[0], eh[1] );
223  }
224 }
225 
227 {
228  MB_MESG_ANY = MPI_ANY_TAG,
238 };
239 
240 static inline size_t RANGE_SIZE( const Range& rng )
241 {
242  return 2 * sizeof( EntityHandle ) * rng.psize() + sizeof( int );
243 }
244 
245 #define PRINT_DEBUG_ISEND( A, B, C, D, E ) print_debug_isend( ( A ), ( B ), ( C ), ( D ), ( E ) )
246 #define PRINT_DEBUG_IRECV( A, B, C, D, E, F ) print_debug_irecv( ( A ), ( B ), ( C ), ( D ), ( E ), ( F ) )
247 #define PRINT_DEBUG_RECD( A ) print_debug_recd( ( A ) )
248 #define PRINT_DEBUG_WAITANY( A, B, C ) print_debug_waitany( ( A ), ( B ), ( C ) )
249 
250 void ParallelComm::print_debug_isend( int from, int to, unsigned char* buff, int tag, int sz )
251 {
252  myDebug->tprintf( 3, "Isend, %d->%d, buffer ptr = %p, tag=%d, size=%d\n", from, to, (void*)buff, tag, sz );
253 }
254 
255 void ParallelComm::print_debug_irecv( int to, int from, unsigned char* buff, int sz, int tag, int incoming )
256 {
257  myDebug->tprintf( 3, "Irecv, %d<-%d, buffer ptr = %p, tag=%d, size=%d", to, from, (void*)buff, tag, sz );
258  if( tag < MB_MESG_REMOTEH_ACK )
259  myDebug->printf( 3, ", incoming1=%d\n", incoming );
260  else if( tag < MB_MESG_TAGS_ACK )
261  myDebug->printf( 3, ", incoming2=%d\n", incoming );
262  else
263  myDebug->printf( 3, ", incoming=%d\n", incoming );
264 }
265 
266 void ParallelComm::print_debug_recd( MPI_Status status )
267 {
268  if( myDebug->get_verbosity() == 3 )
269  {
270  int this_count;
271  int success = MPI_Get_count( &status, MPI_UNSIGNED_CHAR, &this_count );
272  if( MPI_SUCCESS != success ) this_count = -1;
273  myDebug->tprintf( 3, "Received from %d, count = %d, tag = %d\n", status.MPI_SOURCE, this_count,
274  status.MPI_TAG );
275  }
276 }
277 
278 void ParallelComm::print_debug_waitany( std::vector< MPI_Request >& reqs, int tag, int proc )
279 {
280  if( myDebug->get_verbosity() == 3 )
281  {
282  myDebug->tprintf( 3, "Waitany, p=%d, ", proc );
283  if( tag < MB_MESG_REMOTEH_ACK )
284  myDebug->print( 3, ", recv_ent_reqs=" );
285  else if( tag < MB_MESG_TAGS_ACK )
286  myDebug->print( 3, ", recv_remoteh_reqs=" );
287  else
288  myDebug->print( 3, ", recv_tag_reqs=" );
289  for( unsigned int i = 0; i < reqs.size(); i++ )
290  myDebug->printf( 3, " %p", (void*)(intptr_t)reqs[i] );
291  myDebug->print( 3, "\n" );
292  }
293 }
294 
295 /** Name of tag used to store ParallelComm Index on mesh paritioning sets */
296 const char* PARTITIONING_PCOMM_TAG_NAME = "__PRTN_PCOMM";
297 
298 /** \brief Tag storing parallel communication objects
299  *
300  * This tag stores pointers to ParallelComm communication
301  * objects; one of these is allocated for each different
302  * communicator used to read mesh. ParallelComm stores
303  * partition and interface sets corresponding to its parallel mesh.
304  * By default, a parallel read uses the first ParallelComm object
305  * on the interface instance; if instantiated with one, ReadParallel
306  * adds this object to the interface instance too.
307  *
308  * Tag type: opaque
309  * Tag size: MAX_SHARING_PROCS*sizeof(ParallelComm*)
310  */
311 #define PARALLEL_COMM_TAG_NAME "__PARALLEL_COMM"
312 
313 ParallelComm::ParallelComm( Interface* impl, MPI_Comm cm, int* id )
314  : mbImpl( impl ), procConfig( cm ), sharedpTag( 0 ), sharedpsTag( 0 ), sharedhTag( 0 ), sharedhsTag( 0 ),
315  pstatusTag( 0 ), ifaceSetsTag( 0 ), partitionTag( 0 ), globalPartCount( -1 ), partitioningSet( 0 ),
316  myDebug( NULL )
317 {
318  initialize();
320  if( id ) *id = pcommID;
321 }
322 
323 ParallelComm::ParallelComm( Interface* impl, std::vector< unsigned char >& /*tmp_buff*/, MPI_Comm cm, int* id )
324  : mbImpl( impl ), procConfig( cm ), sharedpTag( 0 ), sharedpsTag( 0 ), sharedhTag( 0 ), sharedhsTag( 0 ),
325  pstatusTag( 0 ), ifaceSetsTag( 0 ), partitionTag( 0 ), globalPartCount( -1 ), partitioningSet( 0 ),
326  myDebug( NULL )
327 {
328  initialize();
330  if( id ) *id = pcommID;
331 }
332 
334 {
335  remove_pcomm( this );
337  delete myDebug;
338  delete sharedSetData;
339 }
340 
342 {
343  Core* core = dynamic_cast< Core* >( mbImpl );
346 
347  // Initialize MPI, if necessary
348  int flag = 1;
349  int retval = MPI_Initialized( &flag );
350  if( MPI_SUCCESS != retval || !flag )
351  {
352  int argc = 0;
353  char** argv = NULL;
354 
355  // mpi not initialized yet - initialize here
356  retval = MPI_Init( &argc, &argv );
357  assert( MPI_SUCCESS == retval );
358  }
359 
360  // Reserve space for vectors
361  buffProcs.reserve( MAX_SHARING_PROCS );
364 
365  pcommID = add_pcomm( this );
366 
367  if( !myDebug )
368  {
369  myDebug = new DebugOutput( "ParallelComm", std::cerr );
371  }
372 }
373 
375 {
376  // Add this pcomm to instance tag
377  std::vector< ParallelComm* > pc_array( MAX_SHARING_PROCS, (ParallelComm*)NULL );
378  Tag pc_tag = pcomm_tag( mbImpl, true );
379  assert( 0 != pc_tag );
380 
381  const EntityHandle root = 0;
382  ErrorCode result = mbImpl->tag_get_data( pc_tag, &root, 1, (void*)&pc_array[0] );
383  if( MB_SUCCESS != result && MB_TAG_NOT_FOUND != result ) return -1;
384  int index = 0;
385  while( index < MAX_SHARING_PROCS && pc_array[index] )
386  index++;
387  if( index == MAX_SHARING_PROCS )
388  {
389  index = -1;
390  assert( false );
391  }
392  else
393  {
394  pc_array[index] = pc;
395  mbImpl->tag_set_data( pc_tag, &root, 1, (void*)&pc_array[0] );
396  }
397  return index;
398 }
399 
401 {
402  // Remove this pcomm from instance tag
403  std::vector< ParallelComm* > pc_array( MAX_SHARING_PROCS );
404  Tag pc_tag = pcomm_tag( mbImpl, true );
405 
406  const EntityHandle root = 0;
407  ErrorCode result = mbImpl->tag_get_data( pc_tag, &root, 1, (void*)&pc_array[0] );
408  std::vector< ParallelComm* >::iterator pc_it = std::find( pc_array.begin(), pc_array.end(), pc );
409  assert( MB_SUCCESS == result && pc_it != pc_array.end() );
410  // Empty if test to get around compiler warning about unused var
411  if( MB_SUCCESS == result )
412  {
413  }
414 
415  *pc_it = NULL;
416  mbImpl->tag_set_data( pc_tag, &root, 1, (void*)&pc_array[0] );
417 }
418 
419 //! Assign a global id space, for largest-dimension or all entities (and
420 //! in either case for vertices too)
422  const int dimension,
423  const int start_id,
424  const bool largest_dim_only,
425  const bool parallel,
426  const bool owned_only )
427 {
428  Range entities[4];
429  ErrorCode result;
430  std::vector< unsigned char > pstatus;
431  for( int dim = 0; dim <= dimension; dim++ )
432  {
433  if( dim == 0 || !largest_dim_only || dim == dimension )
434  {
435  result = mbImpl->get_entities_by_dimension( this_set, dim, entities[dim] );MB_CHK_SET_ERR( result, "Failed to get vertices in assign_global_ids" );
436  }
437 
438  // Need to filter out non-locally-owned entities!!!
439  pstatus.resize( entities[dim].size() );
440  result = mbImpl->tag_get_data( pstatus_tag(), entities[dim], &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus in assign_global_ids" );
441 
442  Range dum_range;
443  Range::iterator rit;
444  unsigned int i;
445  for( rit = entities[dim].begin(), i = 0; rit != entities[dim].end(); ++rit, i++ )
446  if( pstatus[i] & PSTATUS_NOT_OWNED ) dum_range.insert( *rit );
447  entities[dim] = subtract( entities[dim], dum_range );
448  }
449 
450  return assign_global_ids( entities, dimension, start_id, parallel, owned_only );
451 }
452 
453 //! Assign a global id space, for largest-dimension or all entities (and
454 //! in either case for vertices too)
456  const int dimension,
457  const int start_id,
458  const bool parallel,
459  const bool owned_only )
460 {
461  int local_num_elements[4];
462  ErrorCode result;
463  for( int dim = 0; dim <= dimension; dim++ )
464  {
465  local_num_elements[dim] = entities[dim].size();
466  }
467 
468  // Communicate numbers
469  std::vector< int > num_elements( procConfig.proc_size() * 4 );
470 #ifdef MOAB_HAVE_MPI
471  if( procConfig.proc_size() > 1 && parallel )
472  {
473  int retval =
474  MPI_Allgather( local_num_elements, 4, MPI_INT, &num_elements[0], 4, MPI_INT, procConfig.proc_comm() );
475  if( 0 != retval ) return MB_FAILURE;
476  }
477  else
478 #endif
479  for( int dim = 0; dim < 4; dim++ )
480  num_elements[dim] = local_num_elements[dim];
481 
482  // My entities start at one greater than total_elems[d]
483  int total_elems[4] = { start_id, start_id, start_id, start_id };
484 
485  for( unsigned int proc = 0; proc < procConfig.proc_rank(); proc++ )
486  {
487  for( int dim = 0; dim < 4; dim++ )
488  total_elems[dim] += num_elements[4 * proc + dim];
489  }
490 
491  // Assign global ids now
492  Tag gid_tag = mbImpl->globalId_tag();
493 
494  for( int dim = 0; dim < 4; dim++ )
495  {
496  if( entities[dim].empty() ) continue;
497  num_elements.resize( entities[dim].size() );
498  int i = 0;
499  for( Range::iterator rit = entities[dim].begin(); rit != entities[dim].end(); ++rit )
500  num_elements[i++] = total_elems[dim]++;
501 
502  result = mbImpl->tag_set_data( gid_tag, entities[dim], &num_elements[0] );MB_CHK_SET_ERR( result, "Failed to set global id tag in assign_global_ids" );
503  }
504 
505  if( owned_only ) return MB_SUCCESS;
506 
507  // Exchange tags
508  for( int dim = 1; dim < 4; dim++ )
509  entities[0].merge( entities[dim] );
510 
511  return exchange_tags( gid_tag, entities[0] );
512 }
513 
514 int ParallelComm::get_buffers( int to_proc, bool* is_new )
515 {
516  int ind = -1;
517  std::vector< unsigned int >::iterator vit = std::find( buffProcs.begin(), buffProcs.end(), to_proc );
518  if( vit == buffProcs.end() )
519  {
520  assert( "shouldn't need buffer to myself" && to_proc != (int)procConfig.proc_rank() );
521  ind = buffProcs.size();
522  buffProcs.push_back( (unsigned int)to_proc );
523  localOwnedBuffs.push_back( new Buffer( INITIAL_BUFF_SIZE ) );
524  remoteOwnedBuffs.push_back( new Buffer( INITIAL_BUFF_SIZE ) );
525  if( is_new ) *is_new = true;
526  }
527  else
528  {
529  ind = vit - buffProcs.begin();
530  if( is_new ) *is_new = false;
531  }
532  assert( ind < MAX_SHARING_PROCS );
533  return ind;
534 }
535 
537  Range& entities,
538  const bool adjacencies,
539  const bool tags )
540 {
541 #ifndef MOAB_HAVE_MPI
542  return MB_FAILURE;
543 #else
544 
545  ErrorCode result = MB_SUCCESS;
546  int success;
547  int buff_size;
548 
549  Buffer buff( INITIAL_BUFF_SIZE );
550  buff.reset_ptr( sizeof( int ) );
551  if( (int)procConfig.proc_rank() == from_proc )
552  {
553  result = add_verts( entities );MB_CHK_SET_ERR( result, "Failed to add adj vertices" );
554 
555  buff.reset_ptr( sizeof( int ) );
556  result = pack_buffer( entities, adjacencies, tags, false, -1, &buff );MB_CHK_SET_ERR( result, "Failed to compute buffer size in broadcast_entities" );
557  buff.set_stored_size();
558  buff_size = buff.buff_ptr - buff.mem_ptr;
559  }
560 
561  success = MPI_Bcast( &buff_size, 1, MPI_INT, from_proc, procConfig.proc_comm() );
562  if( MPI_SUCCESS != success )
563  {
564  MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer size failed" );
565  }
566 
567  if( !buff_size ) // No data
568  return MB_SUCCESS;
569 
570  if( (int)procConfig.proc_rank() != from_proc ) buff.reserve( buff_size );
571 
572  size_t offset = 0;
573  while( buff_size )
574  {
575  int sz = std::min( buff_size, MAX_BCAST_SIZE );
576  success = MPI_Bcast( buff.mem_ptr + offset, sz, MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
577  if( MPI_SUCCESS != success )
578  {
579  MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer failed" );
580  }
581 
582  offset += sz;
583  buff_size -= sz;
584  }
585 
586  if( (int)procConfig.proc_rank() != from_proc )
587  {
588  std::vector< std::vector< EntityHandle > > dum1a, dum1b;
589  std::vector< std::vector< int > > dum1p;
590  std::vector< EntityHandle > dum2, dum4;
591  std::vector< unsigned int > dum3;
592  buff.reset_ptr( sizeof( int ) );
593  result = unpack_buffer( buff.buff_ptr, false, from_proc, -1, dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4 );MB_CHK_SET_ERR( result, "Failed to unpack buffer in broadcast_entities" );
594  std::copy( dum4.begin(), dum4.end(), range_inserter( entities ) );
595  }
596 
597  return MB_SUCCESS;
598 #endif
599 }
600 
602  std::vector< Range >& entities,
603  const bool adjacencies,
604  const bool tags )
605 {
606 #ifndef MOAB_HAVE_MPI
607  return MB_FAILURE;
608 #else
609  ErrorCode result = MB_SUCCESS;
610  int i, success, buff_size, prev_size;
611  int nProcs = (int)procConfig.proc_size();
612  int* sendCounts = new int[nProcs];
613  int* displacements = new int[nProcs];
614  sendCounts[0] = sizeof( int );
615  displacements[0] = 0;
616  Buffer buff( INITIAL_BUFF_SIZE );
617  buff.reset_ptr( sizeof( int ) );
618  buff.set_stored_size();
619  unsigned int my_proc = procConfig.proc_rank();
620 
621  // Get buffer size array for each remote processor
622  if( my_proc == (unsigned int)from_proc )
623  {
624  for( i = 1; i < nProcs; i++ )
625  {
626  prev_size = buff.buff_ptr - buff.mem_ptr;
627  buff.reset_ptr( prev_size + sizeof( int ) );
628  result = add_verts( entities[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
629 
630  result = pack_buffer( entities[i], adjacencies, tags, false, -1, &buff );
631  if( MB_SUCCESS != result )
632  {
633  delete[] sendCounts;
634  delete[] displacements;
635  MB_SET_ERR( result, "Failed to pack buffer in scatter_entities" );
636  }
637 
638  buff_size = buff.buff_ptr - buff.mem_ptr - prev_size;
639  *( (int*)( buff.mem_ptr + prev_size ) ) = buff_size;
640  sendCounts[i] = buff_size;
641  }
642  }
643 
644  // Broadcast buffer size array
645  success = MPI_Bcast( sendCounts, nProcs, MPI_INT, from_proc, procConfig.proc_comm() );
646  if( MPI_SUCCESS != success )
647  {
648  delete[] sendCounts;
649  delete[] displacements;
650  MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer size failed" );
651  }
652 
653  for( i = 1; i < nProcs; i++ )
654  {
655  displacements[i] = displacements[i - 1] + sendCounts[i - 1];
656  }
657 
658  Buffer rec_buff;
659  rec_buff.reserve( sendCounts[my_proc] );
660 
661  // Scatter actual geometry
662  success = MPI_Scatterv( buff.mem_ptr, sendCounts, displacements, MPI_UNSIGNED_CHAR, rec_buff.mem_ptr,
663  sendCounts[my_proc], MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
664 
665  if( MPI_SUCCESS != success )
666  {
667  delete[] sendCounts;
668  delete[] displacements;
669  MB_SET_ERR( MB_FAILURE, "MPI_Scatterv of buffer failed" );
670  }
671 
672  // Unpack in remote processors
673  if( my_proc != (unsigned int)from_proc )
674  {
675  std::vector< std::vector< EntityHandle > > dum1a, dum1b;
676  std::vector< std::vector< int > > dum1p;
677  std::vector< EntityHandle > dum2, dum4;
678  std::vector< unsigned int > dum3;
679  rec_buff.reset_ptr( sizeof( int ) );
680  result = unpack_buffer( rec_buff.buff_ptr, false, from_proc, -1, dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4 );
681  if( MB_SUCCESS != result )
682  {
683  delete[] sendCounts;
684  delete[] displacements;
685  MB_SET_ERR( result, "Failed to unpack buffer in scatter_entities" );
686  }
687 
688  std::copy( dum4.begin(), dum4.end(), range_inserter( entities[my_proc] ) );
689  }
690 
691  delete[] sendCounts;
692  delete[] displacements;
693 
694  return MB_SUCCESS;
695 #endif
696 }
697 
699  Range& orig_ents,
700  const bool adjs,
701  const bool tags,
702  const bool store_remote_handles,
703  const bool is_iface,
704  Range& /*final_ents*/,
705  int& incoming1,
706  int& incoming2,
707  TupleList& entprocs,
708  std::vector< MPI_Request >& recv_remoteh_reqs,
709  bool /*wait_all*/ )
710 {
711 #ifndef MOAB_HAVE_MPI
712  return MB_FAILURE;
713 #else
714  // Pack entities to local buffer
715  int ind = get_buffers( to_proc );
716  localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
717 
718  // Add vertices
719  ErrorCode result = add_verts( orig_ents );MB_CHK_SET_ERR( result, "Failed to add verts in send_entities" );
720 
721  // Filter out entities already shared with destination
722  Range tmp_range;
723  result = filter_pstatus( orig_ents, PSTATUS_SHARED, PSTATUS_AND, to_proc, &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
724  if( !tmp_range.empty() )
725  {
726  orig_ents = subtract( orig_ents, tmp_range );
727  }
728 
729  result = pack_buffer( orig_ents, adjs, tags, store_remote_handles, to_proc, localOwnedBuffs[ind], &entprocs );MB_CHK_SET_ERR( result, "Failed to pack buffer in send_entities" );
730 
731  // Send buffer
732  result = send_buffer( to_proc, localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[2 * ind], recvReqs[2 * ind + 1],
733  (int*)( remoteOwnedBuffs[ind]->mem_ptr ),
734  //&ackbuff,
735  incoming1, MB_MESG_REMOTEH_SIZE,
736  ( !is_iface && store_remote_handles ? localOwnedBuffs[ind] : NULL ),
737  &recv_remoteh_reqs[2 * ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to send buffer" );
738 
739  return MB_SUCCESS;
740 #endif
741 }
742 
743 ErrorCode ParallelComm::send_entities( std::vector< unsigned int >& send_procs,
744  std::vector< Range* >& send_ents,
745  int& incoming1,
746  int& incoming2,
747  const bool store_remote_handles )
748 {
749 #ifdef MOAB_HAVE_MPE
750  if( myDebug->get_verbosity() == 2 )
751  {
752  MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting send_entities." );
753  }
754 #endif
755  myDebug->tprintf( 1, "Entering send_entities\n" );
756  if( myDebug->get_verbosity() == 4 )
757  {
758  msgs.clear();
759  msgs.reserve( MAX_SHARING_PROCS );
760  }
761 
762  unsigned int i;
763  int ind;
764  ErrorCode result = MB_SUCCESS;
765 
766  // Set buffProcs with communicating procs
767  unsigned int n_proc = send_procs.size();
768  for( i = 0; i < n_proc; i++ )
769  {
770  ind = get_buffers( send_procs[i] );
771  result = add_verts( *send_ents[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
772 
773  // Filter out entities already shared with destination
774  Range tmp_range;
775  result = filter_pstatus( *send_ents[i], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
776  if( !tmp_range.empty() )
777  {
778  *send_ents[i] = subtract( *send_ents[i], tmp_range );
779  }
780  }
781 
782  //===========================================
783  // Get entities to be sent to neighbors
784  // Need to get procs each entity is sent to
785  //===========================================
786  Range allsent, tmp_range;
787  int npairs = 0;
788  TupleList entprocs;
789  for( i = 0; i < n_proc; i++ )
790  {
791  int n_ents = send_ents[i]->size();
792  if( n_ents > 0 )
793  {
794  npairs += n_ents; // Get the total # of proc/handle pairs
795  allsent.merge( *send_ents[i] );
796  }
797  }
798 
799  // Allocate a TupleList of that size
800  entprocs.initialize( 1, 0, 1, 0, npairs );
801  entprocs.enableWriteAccess();
802 
803  // Put the proc/handle pairs in the list
804  for( i = 0; i < n_proc; i++ )
805  {
806  for( Range::iterator rit = send_ents[i]->begin(); rit != send_ents[i]->end(); ++rit )
807  {
808  entprocs.vi_wr[entprocs.get_n()] = send_procs[i];
809  entprocs.vul_wr[entprocs.get_n()] = *rit;
810  entprocs.inc_n();
811  }
812  }
813 
814  // Sort by handle
815  moab::TupleList::buffer sort_buffer;
816  sort_buffer.buffer_init( npairs );
817  entprocs.sort( 1, &sort_buffer );
818  entprocs.disableWriteAccess();
819  sort_buffer.reset();
820 
821  myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
822  (unsigned long)allsent.size() );
823 
824  //===========================================
825  // Pack and send ents from this proc to others
826  //===========================================
827  for( i = 0; i < n_proc; i++ )
828  {
829  if( send_ents[i]->size() > 0 )
830  {
831  ind = get_buffers( send_procs[i] );
832  myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", send_ents[i]->compactness(),
833  (unsigned long)send_ents[i]->size() );
834  // Reserve space on front for size and for initial buff size
835  localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
836  result = pack_buffer( *send_ents[i], false, true, store_remote_handles, buffProcs[ind],
837  localOwnedBuffs[ind], &entprocs, &allsent );
838 
839  if( myDebug->get_verbosity() == 4 )
840  {
841  msgs.resize( msgs.size() + 1 );
842  msgs.back() = new Buffer( *localOwnedBuffs[ind] );
843  }
844 
845  // Send the buffer (size stored in front in send_buffer)
846  result = send_buffer( send_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[2 * ind],
847  recvReqs[2 * ind + 1], &ackbuff, incoming1, MB_MESG_REMOTEH_SIZE,
848  ( store_remote_handles ? localOwnedBuffs[ind] : NULL ), &recvRemotehReqs[2 * ind],
849  &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost send" );
850  }
851  }
852  entprocs.reset();
853 
854 #ifdef MOAB_HAVE_MPE
855  if( myDebug->get_verbosity() == 2 )
856  {
857  MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending send_entities." );
858  }
859 #endif
860 
861  return MB_SUCCESS;
862 }
863 
864 /////////////////////////////////////////////////////////////////////////////////
865 // Send and Receive routines for a sequence of entities: use case UMR
866 /////////////////////////////////////////////////////////////////////////////////
867 void print_buff( unsigned char* ch, int size )
868 {
869  for( int i = 0; i < size; i++ )
870  std::cout << ch[i];
871  std::cout << "\n";
872 }
873 ErrorCode ParallelComm::send_recv_entities( std::vector< int >& send_procs,
874  std::vector< std::vector< int > >& msgsizes,
875  std::vector< std::vector< EntityHandle > >& senddata,
876  std::vector< std::vector< EntityHandle > >& recvdata )
877 {
878 #ifdef USE_MPE
879  if( myDebug->get_verbosity() == 2 )
880  {
881  MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting send_recv_entities." );
882  }
883 #endif
884  myDebug->tprintf( 1, "Entering send_recv_entities\n" );
885  if( myDebug->get_verbosity() == 4 )
886  {
887  msgs.clear();
888  msgs.reserve( MAX_SHARING_PROCS );
889  }
890 
891  // unsigned int i;
892  int i, ind, success;
894 
895  //===========================================
896  // Pack and send ents from this proc to others
897  //===========================================
898 
899  // std::cout<<"resetting all buffers"<<std::endl;
900 
902  sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
903  std::vector< MPI_Request > recv_ent_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
904  int ack_buff;
905  int incoming = 0;
906 
907  std::vector< unsigned int >::iterator sit;
908 
909  for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
910  {
911  incoming++;
913  MB_MESG_ENTS_SIZE, incoming );
914 
915  success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
916  MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
917  if( success != MPI_SUCCESS )
918  {
919  MB_SET_ERR( MB_FAILURE, "Failed to post irecv in send_recv_entities" );
920  }
921  }
922 
923  // std::set<unsigned int>::iterator it;
924  for( i = 0; i < (int)send_procs.size(); i++ )
925  {
926  // Get index of the shared processor in the local buffer
927  ind = get_buffers( send_procs[i] );
928  localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
929 
930  int buff_size = msgsizes[i].size() * sizeof( int ) + senddata[i].size() * sizeof( EntityHandle );
931  localOwnedBuffs[ind]->check_space( buff_size );
932 
933  // Pack entities
934  std::vector< int > msg;
935  msg.insert( msg.end(), msgsizes[i].begin(), msgsizes[i].end() );
936  PACK_INTS( localOwnedBuffs[ind]->buff_ptr, &msg[0], msg.size() );
937 
938  std::vector< EntityHandle > entities;
939  entities.insert( entities.end(), senddata[i].begin(), senddata[i].end() );
940  PACK_EH( localOwnedBuffs[ind]->buff_ptr, &entities[0], entities.size() );
941  localOwnedBuffs[ind]->set_stored_size();
942 
943  if( myDebug->get_verbosity() == 4 )
944  {
945  msgs.resize( msgs.size() + 1 );
946  msgs.back() = new Buffer( *localOwnedBuffs[ind] );
947  }
948 
949  // Send the buffer (size stored in front in send_buffer)
950  error = send_buffer( send_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[3 * ind],
951  recv_ent_reqs[3 * ind + 2], &ack_buff, incoming );MB_CHK_SET_ERR( error, "Failed to Isend in send_recv_entities" );
952  }
953 
954  //===========================================
955  // Receive and unpack ents from received data
956  //===========================================
957 
958  while( incoming )
959  {
960 
961  MPI_Status status;
962  int index_in_recv_requests;
963 
965  success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &index_in_recv_requests, &status );
966  if( MPI_SUCCESS != success )
967  {
968  MB_SET_ERR( MB_FAILURE, "Failed in waitany in send_recv_entities" );
969  }
970 
971  // Processor index in the list is divided by 3
972  ind = index_in_recv_requests / 3;
973 
974  PRINT_DEBUG_RECD( status );
975 
976  // OK, received something; decrement incoming counter
977  incoming--;
978 
979  bool done = false;
980 
982  recv_ent_reqs[3 * ind + 1], // This is for receiving the second message
983  recv_ent_reqs[3 * ind + 2], // This would be for ack, but it is not
984  // used; consider removing it
985  incoming, localOwnedBuffs[ind],
986  sendReqs[3 * ind + 1], // Send request for sending the second message
987  sendReqs[3 * ind + 2], // This is for sending the ack
988  done );MB_CHK_SET_ERR( error, "Failed to resize recv buffer" );
989 
990  if( done )
991  {
992  remoteOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
993 
994  int from_proc = status.MPI_SOURCE;
995  int idx = std::find( send_procs.begin(), send_procs.end(), from_proc ) - send_procs.begin();
996 
997  int msg = msgsizes[idx].size();
998  std::vector< int > recvmsg( msg );
999  int ndata = senddata[idx].size();
1000  std::vector< EntityHandle > dum_vec( ndata );
1001 
1002  UNPACK_INTS( remoteOwnedBuffs[ind]->buff_ptr, &recvmsg[0], msg );
1003  UNPACK_EH( remoteOwnedBuffs[ind]->buff_ptr, &dum_vec[0], ndata );
1004 
1005  recvdata[idx].insert( recvdata[idx].end(), dum_vec.begin(), dum_vec.end() );
1006  }
1007  }
1008 
1009 #ifdef USE_MPE
1010  if( myDebug->get_verbosity() == 2 )
1011  {
1012  MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending send_recv_entities." );
1013  }
1014 #endif
1015 
1016  return MB_SUCCESS;
1017 }
1018 
1020  std::vector< int >& procs,
1021  std::vector< EntityHandle >& handles )
1022 {
1023  ErrorCode error;
1024  unsigned char pstatus = PSTATUS_INTERFACE;
1025 
1026  int procmin = *std::min_element( procs.begin(), procs.end() );
1027 
1028  if( (int)rank() > procmin )
1029  pstatus |= PSTATUS_NOT_OWNED;
1030  else
1031  procmin = rank();
1032 
1033  // DBG
1034  // std::cout<<"entity = "<<entity<<std::endl;
1035  // for (int j=0; j<procs.size(); j++)
1036  // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
1037  // DBG
1038 
1039  if( (int)procs.size() > 1 )
1040  {
1041  procs.push_back( rank() );
1042  handles.push_back( entity );
1043 
1044  int idx = std::find( procs.begin(), procs.end(), procmin ) - procs.begin();
1045 
1046  std::iter_swap( procs.begin(), procs.begin() + idx );
1047  std::iter_swap( handles.begin(), handles.begin() + idx );
1048 
1049  // DBG
1050  // std::cout<<"entity = "<<entity<<std::endl;
1051  // for (int j=0; j<procs.size(); j++)
1052  // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
1053  // DBG
1054  }
1055 
1056  // if ((entity == 10388) && (rank()==1))
1057  // std::cout<<"Here"<<std::endl;
1058 
1059  error = update_remote_data( entity, &procs[0], &handles[0], procs.size(), pstatus );MB_CHK_ERR( error );
1060 
1061  return MB_SUCCESS;
1062 }
1063 
1064 ErrorCode ParallelComm::get_remote_handles( EntityHandle* local_vec, EntityHandle* rem_vec, int num_ents, int to_proc )
1065 {
1066  ErrorCode error;
1067  std::vector< EntityHandle > newents;
1068  error = get_remote_handles( true, local_vec, rem_vec, num_ents, to_proc, newents );MB_CHK_ERR( error );
1069 
1070  return MB_SUCCESS;
1071 }
1072 
1073 //////////////////////////////////////////////////////////////////
1074 
1076  const bool store_remote_handles,
1077  const bool is_iface,
1078  Range& final_ents,
1079  int& incoming1,
1080  int& incoming2,
1081  std::vector< std::vector< EntityHandle > >& L1hloc,
1082  std::vector< std::vector< EntityHandle > >& L1hrem,
1083  std::vector< std::vector< int > >& L1p,
1084  std::vector< EntityHandle >& L2hloc,
1085  std::vector< EntityHandle >& L2hrem,
1086  std::vector< unsigned int >& L2p,
1087  std::vector< MPI_Request >& recv_remoteh_reqs,
1088  bool /*wait_all*/ )
1089 {
1090 #ifndef MOAB_HAVE_MPI
1091  return MB_FAILURE;
1092 #else
1093  // Non-blocking receive for the first message (having size info)
1094  int ind1 = get_buffers( from_proc );
1095  incoming1++;
1097  MB_MESG_ENTS_SIZE, incoming1 );
1098  int success = MPI_Irecv( remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, from_proc,
1100  if( success != MPI_SUCCESS )
1101  {
1102  MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" );
1103  }
1104 
1105  // Receive messages in while loop
1106  return recv_messages( from_proc, store_remote_handles, is_iface, final_ents, incoming1, incoming2, L1hloc, L1hrem,
1107  L1p, L2hloc, L2hrem, L2p, recv_remoteh_reqs );
1108 #endif
1109 }
1110 
1111 ErrorCode ParallelComm::recv_entities( std::set< unsigned int >& recv_procs,
1112  int incoming1,
1113  int incoming2,
1114  const bool store_remote_handles,
1115  const bool migrate )
1116 {
1117  //===========================================
1118  // Receive/unpack new entities
1119  //===========================================
1120  // Number of incoming messages is the number of procs we communicate with
1121  int success, ind, i;
1122  ErrorCode result;
1123  MPI_Status status;
1124  std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
1125  std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
1126  std::vector< std::vector< int > > L1p( buffProcs.size() );
1127  std::vector< EntityHandle > L2hloc, L2hrem;
1128  std::vector< unsigned int > L2p;
1129  std::vector< EntityHandle > new_ents;
1130 
1131  while( incoming1 )
1132  {
1133  // Wait for all recvs of ents before proceeding to sending remote handles,
1134  // b/c some procs may have sent to a 3rd proc ents owned by me;
1136 
1137  success = MPI_Waitany( 2 * buffProcs.size(), &recvReqs[0], &ind, &status );
1138  if( MPI_SUCCESS != success )
1139  {
1140  MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" );
1141  }
1142 
1143  PRINT_DEBUG_RECD( status );
1144 
1145  // OK, received something; decrement incoming counter
1146  incoming1--;
1147  bool done = false;
1148 
1149  // In case ind is for ack, we need index of one before it
1150  unsigned int base_ind = 2 * ( ind / 2 );
1151  result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 2], recvReqs[ind], recvReqs[ind + 1],
1152  incoming1, localOwnedBuffs[ind / 2], sendReqs[base_ind], sendReqs[base_ind + 1], done,
1153  ( store_remote_handles ? localOwnedBuffs[ind / 2] : NULL ), MB_MESG_REMOTEH_SIZE,
1154  &recvRemotehReqs[base_ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
1155 
1156  if( done )
1157  {
1158  if( myDebug->get_verbosity() == 4 )
1159  {
1160  msgs.resize( msgs.size() + 1 );
1161  msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 2] );
1162  }
1163 
1164  // Message completely received - process buffer that was sent
1165  remoteOwnedBuffs[ind / 2]->reset_ptr( sizeof( int ) );
1166  result = unpack_buffer( remoteOwnedBuffs[ind / 2]->buff_ptr, store_remote_handles, buffProcs[ind / 2],
1167  ind / 2, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents, true );
1168  if( MB_SUCCESS != result )
1169  {
1170  std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
1171  print_buffer( remoteOwnedBuffs[ind / 2]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 2], false );
1172  return result;
1173  }
1174 
1175  if( recvReqs.size() != 2 * buffProcs.size() )
1176  {
1177  // Post irecv's for remote handles from new proc
1178  recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
1179  for( i = recvReqs.size(); i < (int)( 2 * buffProcs.size() ); i += 2 )
1180  {
1181  localOwnedBuffs[i / 2]->reset_buffer();
1182  incoming2++;
1183  PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 2], localOwnedBuffs[i / 2]->mem_ptr,
1185  success = MPI_Irecv( localOwnedBuffs[i / 2]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
1187  &recvRemotehReqs[i] );
1188  if( success != MPI_SUCCESS )
1189  {
1190  MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" );
1191  }
1192  }
1193  recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
1194  sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
1195  }
1196  }
1197  }
1198 
1199  // Assign and remove newly created elements from/to receive processor
1200  result = assign_entities_part( new_ents, procConfig.proc_rank() );MB_CHK_SET_ERR( result, "Failed to assign entities to part" );
1201  if( migrate )
1202  {
1203  // result = remove_entities_part(allsent, procConfig.proc_rank());MB_CHK_SET_ERR(ressult,
1204  // "Failed to remove entities to part");
1205  }
1206 
1207  // Add requests for any new addl procs
1208  if( recvReqs.size() != 2 * buffProcs.size() )
1209  {
1210  // Shouldn't get here...
1211  MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in entity exchange" );
1212  }
1213 
1214 #ifdef MOAB_HAVE_MPE
1215  if( myDebug->get_verbosity() == 2 )
1216  {
1217  MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending recv entities." );
1218  }
1219 #endif
1220 
1221  //===========================================
1222  // Send local handles for new entity to owner
1223  //===========================================
1224  std::set< unsigned int >::iterator it = recv_procs.begin();
1225  std::set< unsigned int >::iterator eit = recv_procs.end();
1226  for( ; it != eit; ++it )
1227  {
1228  ind = get_buffers( *it );
1229  // Reserve space on front for size and for initial buff size
1230  remoteOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
1231 
1232  result = pack_remote_handles( L1hloc[ind], L1hrem[ind], L1p[ind], buffProcs[ind], remoteOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
1233  remoteOwnedBuffs[ind]->set_stored_size();
1234 
1235  if( myDebug->get_verbosity() == 4 )
1236  {
1237  msgs.resize( msgs.size() + 1 );
1238  msgs.back() = new Buffer( *remoteOwnedBuffs[ind] );
1239  }
1240  result = send_buffer( buffProcs[ind], remoteOwnedBuffs[ind], MB_MESG_REMOTEH_SIZE, sendReqs[2 * ind],
1241  recvRemotehReqs[2 * ind + 1], &ackbuff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
1242  }
1243 
1244  //===========================================
1245  // Process remote handles of my ghosteds
1246  //===========================================
1247  while( incoming2 )
1248  {
1250  success = MPI_Waitany( 2 * buffProcs.size(), &recvRemotehReqs[0], &ind, &status );
1251  if( MPI_SUCCESS != success )
1252  {
1253  MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" );
1254  }
1255 
1256  // OK, received something; decrement incoming counter
1257  incoming2--;
1258 
1259  PRINT_DEBUG_RECD( status );
1260  bool done = false;
1261  unsigned int base_ind = 2 * ( ind / 2 );
1262  result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 2], recvRemotehReqs[ind],
1263  recvRemotehReqs[ind + 1], incoming2, remoteOwnedBuffs[ind / 2], sendReqs[base_ind],
1264  sendReqs[base_ind + 1], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
1265  if( done )
1266  {
1267  // Incoming remote handles
1268  if( myDebug->get_verbosity() == 4 )
1269  {
1270  msgs.resize( msgs.size() + 1 );
1271  msgs.back() = new Buffer( *localOwnedBuffs[ind] );
1272  }
1273 
1274  localOwnedBuffs[ind / 2]->reset_ptr( sizeof( int ) );
1275  result =
1276  unpack_remote_handles( buffProcs[ind / 2], localOwnedBuffs[ind / 2]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
1277  }
1278  }
1279 
1280 #ifdef MOAB_HAVE_MPE
1281  if( myDebug->get_verbosity() == 2 )
1282  {
1283  MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
1284  MPE_Log_event( OWNED_END, procConfig.proc_rank(), "Ending recv entities (still doing checks)." );
1285  }
1286 #endif
1287  myDebug->tprintf( 1, "Exiting recv_entities.\n" );
1288 
1289  return MB_SUCCESS;
1290 }
1291 
1293  const bool store_remote_handles,
1294  const bool is_iface,
1295  Range& final_ents,
1296  int& incoming1,
1297  int& incoming2,
1298  std::vector< std::vector< EntityHandle > >& L1hloc,
1299  std::vector< std::vector< EntityHandle > >& L1hrem,
1300  std::vector< std::vector< int > >& L1p,
1301  std::vector< EntityHandle >& L2hloc,
1302  std::vector< EntityHandle >& L2hrem,
1303  std::vector< unsigned int >& L2p,
1304  std::vector< MPI_Request >& recv_remoteh_reqs )
1305 {
1306 #ifndef MOAB_HAVE_MPI
1307  return MB_FAILURE;
1308 #else
1309  MPI_Status status;
1310  ErrorCode result;
1311  int ind1 = get_buffers( from_proc );
1312  int success, ind2;
1313  std::vector< EntityHandle > new_ents;
1314 
1315  // Wait and receive messages
1316  while( incoming1 )
1317  {
1319  success = MPI_Waitany( 2, &recvReqs[2 * ind1], &ind2, &status );
1320  if( MPI_SUCCESS != success )
1321  {
1322  MB_SET_ERR( MB_FAILURE, "Failed in waitany in recv_messages" );
1323  }
1324 
1325  PRINT_DEBUG_RECD( status );
1326 
1327  // OK, received something; decrement incoming counter
1328  incoming1--;
1329  bool done = false;
1330 
1331  // In case ind is for ack, we need index of one before it
1332  ind2 += 2 * ind1;
1333  unsigned int base_ind = 2 * ( ind2 / 2 );
1334 
1335  result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind2 / 2],
1336  // recvbuff,
1337  recvReqs[ind2], recvReqs[ind2 + 1], incoming1, localOwnedBuffs[ind2 / 2],
1338  sendReqs[base_ind], sendReqs[base_ind + 1], done,
1339  ( !is_iface && store_remote_handles ? localOwnedBuffs[ind2 / 2] : NULL ),
1340  MB_MESG_REMOTEH_SIZE, &recv_remoteh_reqs[base_ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
1341 
1342  if( done )
1343  {
1344  // If it is done, unpack buffer
1345  remoteOwnedBuffs[ind2 / 2]->reset_ptr( sizeof( int ) );
1346  result = unpack_buffer( remoteOwnedBuffs[ind2 / 2]->buff_ptr, store_remote_handles, from_proc, ind2 / 2,
1347  L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents );MB_CHK_SET_ERR( result, "Failed to unpack buffer in recev_messages" );
1348 
1349  std::copy( new_ents.begin(), new_ents.end(), range_inserter( final_ents ) );
1350 
1351  // Send local handles for new elements to owner
1352  // Reserve space on front for size and for initial buff size
1353  remoteOwnedBuffs[ind2 / 2]->reset_buffer( sizeof( int ) );
1354 
1355  result = pack_remote_handles( L1hloc[ind2 / 2], L1hrem[ind2 / 2], L1p[ind2 / 2], from_proc,
1356  remoteOwnedBuffs[ind2 / 2] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
1357  remoteOwnedBuffs[ind2 / 2]->set_stored_size();
1358 
1359  result = send_buffer( buffProcs[ind2 / 2], remoteOwnedBuffs[ind2 / 2], MB_MESG_REMOTEH_SIZE, sendReqs[ind2],
1360  recv_remoteh_reqs[ind2 + 1], (int*)( localOwnedBuffs[ind2 / 2]->mem_ptr ),
1361  //&ackbuff,
1362  incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
1363  }
1364  }
1365 
1366  return MB_SUCCESS;
1367 #endif
1368 }
1369 
1371  int& incoming2,
1372  std::vector< EntityHandle >& L2hloc,
1373  std::vector< EntityHandle >& L2hrem,
1374  std::vector< unsigned int >& L2p,
1375  std::vector< MPI_Request >& recv_remoteh_reqs )
1376 {
1377 #ifndef MOAB_HAVE_MPI
1378  return MB_FAILURE;
1379 #else
1380  MPI_Status status;
1381  ErrorCode result;
1382  int ind1 = get_buffers( from_proc );
1383  int success, ind2;
1384 
1385  while( incoming2 )
1386  {
1388  success = MPI_Waitany( 2, &recv_remoteh_reqs[2 * ind1], &ind2, &status );
1389  if( MPI_SUCCESS != success )
1390  {
1391  MB_SET_ERR( MB_FAILURE, "Failed in waitany in recv_remote_handle_messages" );
1392  }
1393 
1394  // OK, received something; decrement incoming counter
1395  incoming2--;
1396 
1397  PRINT_DEBUG_RECD( status );
1398 
1399  bool done = false;
1400  ind2 += 2 * ind1;
1401  unsigned int base_ind = 2 * ( ind2 / 2 );
1402  result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind2 / 2], recv_remoteh_reqs[ind2],
1403  recv_remoteh_reqs[ind2 + 1], incoming2, remoteOwnedBuffs[ind2 / 2], sendReqs[base_ind],
1404  sendReqs[base_ind + 1], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
1405  if( done )
1406  {
1407  // Incoming remote handles
1408  localOwnedBuffs[ind2 / 2]->reset_ptr( sizeof( int ) );
1409  result =
1410  unpack_remote_handles( buffProcs[ind2 / 2], localOwnedBuffs[ind2 / 2]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
1411  }
1412  }
1413 
1414  return MB_SUCCESS;
1415 #endif
1416 }
1417 
1419  const bool /*adjacencies*/,
1420  const bool tags,
1421  const bool store_remote_handles,
1422  const int to_proc,
1423  Buffer* buff,
1424  TupleList* entprocs,
1425  Range* allsent )
1426 {
1427  // Pack the buffer with the entity ranges, adjacencies, and tags sections
1428  //
1429  // Note: new entities used in subsequent connectivity lists, sets, or tags,
1430  // are referred to as (MBMAXTYPE + index), where index is into vector
1431  // of new entities, 0-based
1432  ErrorCode result;
1433 
1434  Range set_range;
1435  std::vector< Tag > all_tags;
1436  std::vector< Range > tag_ranges;
1437 
1439 
1440  // Entities
1441  result = pack_entities( orig_ents, buff, store_remote_handles, to_proc, false, entprocs, allsent );MB_CHK_SET_ERR( result, "Packing entities failed" );
1442 
1443  // Sets
1444  result = pack_sets( orig_ents, buff, store_remote_handles, to_proc );MB_CHK_SET_ERR( result, "Packing sets (count) failed" );
1445 
1446  // Tags
1447  Range final_ents;
1448  if( tags )
1449  {
1450  result = get_tag_send_list( orig_ents, all_tags, tag_ranges );MB_CHK_SET_ERR( result, "Failed to get tagged entities" );
1451  result = pack_tags( orig_ents, all_tags, all_tags, tag_ranges, buff, store_remote_handles, to_proc );MB_CHK_SET_ERR( result, "Packing tags (count) failed" );
1452  }
1453  else
1454  { // Set tag size to 0
1455  buff->check_space( sizeof( int ) );
1456  PACK_INT( buff->buff_ptr, 0 );
1457  buff->set_stored_size();
1458  }
1459 
1460  return result;
1461 }
1462 
1463 ErrorCode ParallelComm::unpack_buffer( unsigned char* buff_ptr,
1464  const bool store_remote_handles,
1465  const int from_proc,
1466  const int ind,
1467  std::vector< std::vector< EntityHandle > >& L1hloc,
1468  std::vector< std::vector< EntityHandle > >& L1hrem,
1469  std::vector< std::vector< int > >& L1p,
1470  std::vector< EntityHandle >& L2hloc,
1471  std::vector< EntityHandle >& L2hrem,
1472  std::vector< unsigned int >& L2p,
1473  std::vector< EntityHandle >& new_ents,
1474  const bool created_iface )
1475 {
1476  unsigned char* tmp_buff = buff_ptr;
1477  ErrorCode result;
1478  result = unpack_entities( buff_ptr, store_remote_handles, ind, false, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
1479  new_ents, created_iface );MB_CHK_SET_ERR( result, "Unpacking entities failed" );
1480  if( myDebug->get_verbosity() == 3 )
1481  {
1482  myDebug->tprintf( 4, "unpack_entities buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
1483  tmp_buff = buff_ptr;
1484  }
1485  result = unpack_sets( buff_ptr, new_ents, store_remote_handles, from_proc );MB_CHK_SET_ERR( result, "Unpacking sets failed" );
1486  if( myDebug->get_verbosity() == 3 )
1487  {
1488  myDebug->tprintf( 4, "unpack_sets buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
1489  tmp_buff = buff_ptr;
1490  }
1491  result = unpack_tags( buff_ptr, new_ents, store_remote_handles, from_proc );MB_CHK_SET_ERR( result, "Unpacking tags failed" );
1492  if( myDebug->get_verbosity() == 3 )
1493  {
1494  myDebug->tprintf( 4, "unpack_tags buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
1495  // tmp_buff = buff_ptr;
1496  }
1497 
1498  if( myDebug->get_verbosity() == 3 ) myDebug->print( 4, "\n" );
1499 
1500  return MB_SUCCESS;
1501 }
1502 
1503 int ParallelComm::estimate_ents_buffer_size( Range& entities, const bool store_remote_handles )
1504 {
1505  int buff_size = 0;
1506  std::vector< EntityHandle > dum_connect_vec;
1507  const EntityHandle* connect;
1508  int num_connect;
1509 
1510  int num_verts = entities.num_of_type( MBVERTEX );
1511  // # verts + coords + handles
1512  buff_size += 2 * sizeof( int ) + 3 * sizeof( double ) * num_verts;
1513  if( store_remote_handles ) buff_size += sizeof( EntityHandle ) * num_verts;
1514 
1515  // Do a rough count by looking at first entity of each type
1516  for( EntityType t = MBEDGE; t < MBENTITYSET; t++ )
1517  {
1518  const Range::iterator rit = entities.lower_bound( t );
1519  if( TYPE_FROM_HANDLE( *rit ) != t ) continue;
1520 
1521  ErrorCode result = mbImpl->get_connectivity( *rit, connect, num_connect, false, &dum_connect_vec );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get connectivity to estimate buffer size", -1 );
1522 
1523  // Number, type, nodes per entity
1524  buff_size += 3 * sizeof( int );
1525  int num_ents = entities.num_of_type( t );
1526  // Connectivity, handle for each ent
1527  buff_size += ( num_connect + 1 ) * sizeof( EntityHandle ) * num_ents;
1528  }
1529 
1530  // Extra entity type at end, passed as int
1531  buff_size += sizeof( int );
1532 
1533  return buff_size;
1534 }
1535 
1536 int ParallelComm::estimate_sets_buffer_size( Range& entities, const bool /*store_remote_handles*/ )
1537 {
1538  // Number of sets
1539  int buff_size = sizeof( int );
1540 
1541  // Do a rough count by looking at first entity of each type
1542  Range::iterator rit = entities.lower_bound( MBENTITYSET );
1543  ErrorCode result;
1544 
1545  for( ; rit != entities.end(); ++rit )
1546  {
1547  unsigned int options;
1548  result = mbImpl->get_meshset_options( *rit, options );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get meshset options", -1 );
1549 
1550  buff_size += sizeof( int );
1551 
1552  Range set_range;
1553  if( options & MESHSET_SET )
1554  {
1555  // Range-based set; count the subranges
1556  result = mbImpl->get_entities_by_handle( *rit, set_range );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get set entities", -1 );
1557 
1558  // Set range
1559  buff_size += RANGE_SIZE( set_range );
1560  }
1561  else if( options & MESHSET_ORDERED )
1562  {
1563  // Just get the number of entities in the set
1564  int num_ents;
1565  result = mbImpl->get_number_entities_by_handle( *rit, num_ents );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get number entities in ordered set", -1 );
1566 
1567  // Set vec
1568  buff_size += sizeof( EntityHandle ) * num_ents + sizeof( int );
1569  }
1570 
1571  // Get numbers of parents/children
1572  int num_par, num_ch;
1573  result = mbImpl->num_child_meshsets( *rit, &num_ch );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get num children", -1 );
1574  result = mbImpl->num_parent_meshsets( *rit, &num_par );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get num parents", -1 );
1575 
1576  buff_size += ( num_ch + num_par ) * sizeof( EntityHandle ) + 2 * sizeof( int );
1577  }
1578 
1579  return buff_size;
1580 }
1581 
1583  Buffer* buff,
1584  const bool store_remote_handles,
1585  const int to_proc,
1586  const bool /*is_iface*/,
1587  TupleList* entprocs,
1588  Range* /*allsent*/ )
1589 {
1590  // Packed information:
1591  // 1. # entities = E
1592  // 2. for e in E
1593  // a. # procs sharing e, incl. sender and receiver = P
1594  // b. for p in P (procs sharing e)
1595  // c. for p in P (handle for e on p) (Note1)
1596  // 3. vertex/entity info
1597 
1598  // Get an estimate of the buffer size & pre-allocate buffer size
1599  int buff_size = estimate_ents_buffer_size( entities, store_remote_handles );
1600  if( buff_size < 0 ) MB_SET_ERR( MB_FAILURE, "Failed to estimate ents buffer size" );
1601  buff->check_space( buff_size );
1602  myDebug->tprintf( 3, "estimate buffer size for %d entities: %d \n", (int)entities.size(), buff_size );
1603 
1604  unsigned int num_ents;
1605  ErrorCode result;
1606 
1607  std::vector< EntityHandle > entities_vec( entities.size() );
1608  std::copy( entities.begin(), entities.end(), entities_vec.begin() );
1609 
1610  // First pack procs/handles sharing this ent, not including this dest but including
1611  // others (with zero handles)
1612  if( store_remote_handles )
1613  {
1614  // Buff space is at least proc + handle for each entity; use avg of 4 other procs
1615  // to estimate buff size, but check later
1616  buff->check_space( sizeof( int ) + ( 5 * sizeof( int ) + sizeof( EntityHandle ) ) * entities.size() );
1617 
1618  // 1. # entities = E
1619  PACK_INT( buff->buff_ptr, entities.size() );
1620 
1621  Range::iterator rit;
1622 
1623  // Pre-fetch sharedp and pstatus
1624  std::vector< int > sharedp_vals( entities.size() );
1625  result = mbImpl->tag_get_data( sharedp_tag(), entities, &sharedp_vals[0] );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
1626  std::vector< char > pstatus_vals( entities.size() );
1627  result = mbImpl->tag_get_data( pstatus_tag(), entities, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
1628 
1629  unsigned int i;
1630  int tmp_procs[MAX_SHARING_PROCS];
1631  EntityHandle tmp_handles[MAX_SHARING_PROCS];
1632  std::set< unsigned int > dumprocs;
1633 
1634  // 2. for e in E
1635  for( rit = entities.begin(), i = 0; rit != entities.end(); ++rit, i++ )
1636  {
1637  unsigned int ind =
1638  std::lower_bound( entprocs->vul_rd, entprocs->vul_rd + entprocs->get_n(), *rit ) - entprocs->vul_rd;
1639  assert( ind < entprocs->get_n() );
1640 
1641  while( ind < entprocs->get_n() && entprocs->vul_rd[ind] == *rit )
1642  dumprocs.insert( entprocs->vi_rd[ind++] );
1643 
1644  result = build_sharedhps_list( *rit, pstatus_vals[i], sharedp_vals[i], dumprocs, num_ents, tmp_procs,
1645  tmp_handles );MB_CHK_SET_ERR( result, "Failed to build sharedhps" );
1646 
1647  dumprocs.clear();
1648 
1649  // Now pack them
1650  buff->check_space( ( num_ents + 1 ) * sizeof( int ) + num_ents * sizeof( EntityHandle ) );
1651  PACK_INT( buff->buff_ptr, num_ents );
1652  PACK_INTS( buff->buff_ptr, tmp_procs, num_ents );
1653  PACK_EH( buff->buff_ptr, tmp_handles, num_ents );
1654 
1655 #ifndef NDEBUG
1656  // Check for duplicates in proc list
1657  unsigned int dp = 0;
1658  for( ; dp < MAX_SHARING_PROCS && -1 != tmp_procs[dp]; dp++ )
1659  dumprocs.insert( tmp_procs[dp] );
1660  assert( dumprocs.size() == dp );
1661  dumprocs.clear();
1662 #endif
1663  }
1664  }
1665 
1666  // Pack vertices
1667  Range these_ents = entities.subset_by_type( MBVERTEX );
1668  num_ents = these_ents.size();
1669 
1670  if( num_ents )
1671  {
1672  buff_size = 2 * sizeof( int ) + 3 * num_ents * sizeof( double );
1673  buff->check_space( buff_size );
1674 
1675  // Type, # ents
1676  PACK_INT( buff->buff_ptr, ( (int)MBVERTEX ) );
1677  PACK_INT( buff->buff_ptr, ( (int)num_ents ) );
1678 
1679  std::vector< double > tmp_coords( 3 * num_ents );
1680  result = mbImpl->get_coords( these_ents, &tmp_coords[0] );MB_CHK_SET_ERR( result, "Failed to get vertex coordinates" );
1681  PACK_DBLS( buff->buff_ptr, &tmp_coords[0], 3 * num_ents );
1682 
1683  myDebug->tprintf( 4, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
1684  CN::EntityTypeName( TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
1685  }
1686 
1687  // Now entities; go through range, packing by type and equal # verts per element
1688  Range::iterator start_rit = entities.find( *these_ents.rbegin() );
1689  ++start_rit;
1690  int last_nodes = -1;
1691  EntityType last_type = MBMAXTYPE;
1692  these_ents.clear();
1693  Range::iterator end_rit = start_rit;
1694  EntitySequence* seq;
1695  ElementSequence* eseq;
1696 
1697  while( start_rit != entities.end() || !these_ents.empty() )
1698  {
1699  // Cases:
1700  // A: !end, last_type == MBMAXTYPE, seq: save contig sequence in these_ents
1701  // B: !end, last type & nodes same, seq: save contig sequence in these_ents
1702  // C: !end, last type & nodes different: pack these_ents, then save contig sequence in
1703  // these_ents D: end: pack these_ents
1704 
1705  // Find the sequence holding current start entity, if we're not at end
1706  eseq = NULL;
1707  if( start_rit != entities.end() )
1708  {
1709  result = sequenceManager->find( *start_rit, seq );MB_CHK_SET_ERR( result, "Failed to find entity sequence" );
1710  if( NULL == seq ) return MB_FAILURE;
1711  eseq = dynamic_cast< ElementSequence* >( seq );
1712  }
1713 
1714  // Pack the last batch if at end or next one is different
1715  if( !these_ents.empty() &&
1716  ( !eseq || eseq->type() != last_type || last_nodes != (int)eseq->nodes_per_element() ) )
1717  {
1718  result = pack_entity_seq( last_nodes, store_remote_handles, to_proc, these_ents, entities_vec, buff );MB_CHK_SET_ERR( result, "Failed to pack entities from a sequence" );
1719  these_ents.clear();
1720  }
1721 
1722  if( eseq )
1723  {
1724  // Continuation of current range, just save these entities
1725  // Get position in entities list one past end of this sequence
1726  end_rit = entities.lower_bound( start_rit, entities.end(), eseq->end_handle() + 1 );
1727 
1728  // Put these entities in the range
1729  std::copy( start_rit, end_rit, range_inserter( these_ents ) );
1730 
1731  last_type = eseq->type();
1732  last_nodes = eseq->nodes_per_element();
1733  }
1734  else if( start_rit != entities.end() && TYPE_FROM_HANDLE( *start_rit ) == MBENTITYSET )
1735  break;
1736 
1737  start_rit = end_rit;
1738  }
1739 
1740  // Pack MBMAXTYPE to indicate end of ranges
1741  buff->check_space( sizeof( int ) );
1742  PACK_INT( buff->buff_ptr, ( (int)MBMAXTYPE ) );
1743 
1744  buff->set_stored_size();
1745  return MB_SUCCESS;
1746 }
1747 
1749  const unsigned char pstatus,
1750  const int
1751 #ifndef NDEBUG
1752  sharedp
1753 #endif
1754  ,
1755  const std::set< unsigned int >& procs,
1756  unsigned int& num_ents,
1757  int* tmp_procs,
1758  EntityHandle* tmp_handles )
1759 {
1760  num_ents = 0;
1761  unsigned char pstat;
1762  ErrorCode result = get_sharing_data( entity, tmp_procs, tmp_handles, pstat, num_ents );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
1763  assert( pstat == pstatus );
1764 
1765  // Build shared proc/handle lists
1766  // Start with multi-shared, since if it is the owner will be first
1767  if( pstatus & PSTATUS_MULTISHARED )
1768  {
1769  }
1770  else if( pstatus & PSTATUS_NOT_OWNED )
1771  {
1772  // If not multishared and not owned, other sharing proc is owner, put that
1773  // one first
1774  assert( "If not owned, I should be shared too" && pstatus & PSTATUS_SHARED && 1 == num_ents );
1775  tmp_procs[1] = procConfig.proc_rank();
1776  tmp_handles[1] = entity;
1777  num_ents = 2;
1778  }
1779  else if( pstatus & PSTATUS_SHARED )
1780  {
1781  // If not multishared and owned, I'm owner
1782  assert( "shared and owned, should be only 1 sharing proc" && 1 == num_ents );
1783  tmp_procs[1] = tmp_procs[0];
1784  tmp_procs[0] = procConfig.proc_rank();
1785  tmp_handles[1] = tmp_handles[0];
1786  tmp_handles[0] = entity;
1787  num_ents = 2;
1788  }
1789  else
1790  {
1791  // Not shared yet, just add owner (me)
1792  tmp_procs[0] = procConfig.proc_rank();
1793  tmp_handles[0] = entity;
1794  num_ents = 1;
1795  }
1796 
1797 #ifndef NDEBUG
1798  int tmp_ps = num_ents;
1799 #endif
1800 
1801  // Now add others, with zero handle for now
1802  for( std::set< unsigned int >::iterator sit = procs.begin(); sit != procs.end(); ++sit )
1803  {
1804 #ifndef NDEBUG
1805  if( tmp_ps && std::find( tmp_procs, tmp_procs + tmp_ps, *sit ) != tmp_procs + tmp_ps )
1806  {
1807  std::cerr << "Trouble with something already in shared list on proc " << procConfig.proc_rank()
1808  << ". Entity:" << std::endl;
1809  list_entities( &entity, 1 );
1810  std::cerr << "pstatus = " << (int)pstatus << ", sharedp = " << sharedp << std::endl;
1811  std::cerr << "tmp_ps = ";
1812  for( int i = 0; i < tmp_ps; i++ )
1813  std::cerr << tmp_procs[i] << " ";
1814  std::cerr << std::endl;
1815  std::cerr << "procs = ";
1816  for( std::set< unsigned int >::iterator sit2 = procs.begin(); sit2 != procs.end(); ++sit2 )
1817  std::cerr << *sit2 << " ";
1818  assert( false );
1819  }
1820 #endif
1821  tmp_procs[num_ents] = *sit;
1822  tmp_handles[num_ents] = 0;
1823  num_ents++;
1824  }
1825 
1826  // Put -1 after procs and 0 after handles
1827  if( MAX_SHARING_PROCS > num_ents )
1828  {
1829  tmp_procs[num_ents] = -1;
1830  tmp_handles[num_ents] = 0;
1831  }
1832 
1833  return MB_SUCCESS;
1834 }
1835 
1836 ErrorCode ParallelComm::pack_entity_seq( const int nodes_per_entity,
1837  const bool store_remote_handles,
1838  const int to_proc,
1839  Range& these_ents,
1840  std::vector< EntityHandle >& entities_vec,
1841  Buffer* buff )
1842 {
1843  int tmp_space = 3 * sizeof( int ) + nodes_per_entity * these_ents.size() * sizeof( EntityHandle );
1844  buff->check_space( tmp_space );
1845 
1846  // Pack the entity type
1847  PACK_INT( buff->buff_ptr, ( (int)TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
1848 
1849  // Pack # ents
1850  PACK_INT( buff->buff_ptr, these_ents.size() );
1851 
1852  // Pack the nodes per entity
1853  PACK_INT( buff->buff_ptr, nodes_per_entity );
1854  myDebug->tprintf( 3, "after some pack int %d \n", buff->get_current_size() );
1855 
1856  // Pack the connectivity
1857  std::vector< EntityHandle > connect;
1858  ErrorCode result = MB_SUCCESS;
1859  for( Range::const_iterator rit = these_ents.begin(); rit != these_ents.end(); ++rit )
1860  {
1861  connect.clear();
1862  result = mbImpl->get_connectivity( &( *rit ), 1, connect, false );MB_CHK_SET_ERR( result, "Failed to get connectivity" );
1863  assert( (int)connect.size() == nodes_per_entity );
1864  result =
1865  get_remote_handles( store_remote_handles, &connect[0], &connect[0], connect.size(), to_proc, entities_vec );MB_CHK_SET_ERR( result, "Failed in get_remote_handles" );
1866  PACK_EH( buff->buff_ptr, &connect[0], connect.size() );
1867  }
1868 
1869  myDebug->tprintf( 3, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
1870  CN::EntityTypeName( TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
1871 
1872  return result;
1873 }
1874 
1875 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles,
1876  EntityHandle* from_vec,
1877  EntityHandle* to_vec_tmp,
1878  int num_ents,
1879  int to_proc,
1880  const std::vector< EntityHandle >& new_ents )
1881 {
1882  // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE RANGE-BASED VERSION, NO REUSE
1883  // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
1884  // OTHER VERSION TOO!!!
1885  if( 0 == num_ents ) return MB_SUCCESS;
1886 
1887  // Use a local destination ptr in case we're doing an in-place copy
1888  std::vector< EntityHandle > tmp_vector;
1889  EntityHandle* to_vec = to_vec_tmp;
1890  if( to_vec == from_vec )
1891  {
1892  tmp_vector.resize( num_ents );
1893  to_vec = &tmp_vector[0];
1894  }
1895 
1896  if( !store_remote_handles )
1897  {
1898  int err;
1899  // In this case, substitute position in new_ents list
1900  for( int i = 0; i < num_ents; i++ )
1901  {
1902  int ind = std::lower_bound( new_ents.begin(), new_ents.end(), from_vec[i] ) - new_ents.begin();
1903  assert( new_ents[ind] == from_vec[i] );
1904  to_vec[i] = CREATE_HANDLE( MBMAXTYPE, ind, err );
1905  assert( to_vec[i] != 0 && !err && -1 != ind );
1906  }
1907  }
1908  else
1909  {
1910  Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
1911  ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
1912 
1913  // Get single-proc destination handles and shared procs
1914  std::vector< int > sharing_procs( num_ents );
1915  result = mbImpl->tag_get_data( shh_tag, from_vec, num_ents, to_vec );MB_CHK_SET_ERR( result, "Failed to get shared handle tag for remote_handles" );
1916  result = mbImpl->tag_get_data( shp_tag, from_vec, num_ents, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharing proc tag in remote_handles" );
1917  for( int j = 0; j < num_ents; j++ )
1918  {
1919  if( to_vec[j] && sharing_procs[j] != to_proc ) to_vec[j] = 0;
1920  }
1921 
1922  EntityHandle tmp_handles[MAX_SHARING_PROCS];
1923  int tmp_procs[MAX_SHARING_PROCS];
1924  int i;
1925  // Go through results, and for 0-valued ones, look for multiple shared proc
1926  for( i = 0; i < num_ents; i++ )
1927  {
1928  if( !to_vec[i] )
1929  {
1930  result = mbImpl->tag_get_data( shps_tag, from_vec + i, 1, tmp_procs );
1931  if( MB_SUCCESS == result )
1932  {
1933  for( int j = 0; j < MAX_SHARING_PROCS; j++ )
1934  {
1935  if( -1 == tmp_procs[j] )
1936  break;
1937  else if( tmp_procs[j] == to_proc )
1938  {
1939  result = mbImpl->tag_get_data( shhs_tag, from_vec + i, 1, tmp_handles );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
1940  to_vec[i] = tmp_handles[j];
1941  assert( to_vec[i] );
1942  break;
1943  }
1944  }
1945  }
1946  if( !to_vec[i] )
1947  {
1948  int j = std::lower_bound( new_ents.begin(), new_ents.end(), from_vec[i] ) - new_ents.begin();
1949  if( (int)new_ents.size() == j )
1950  {
1951  std::cout << "Failed to find new entity in send list, proc " << procConfig.proc_rank()
1952  << std::endl;
1953  for( int k = 0; k <= num_ents; k++ )
1954  std::cout << k << ": " << from_vec[k] << " " << to_vec[k] << std::endl;
1955  MB_SET_ERR( MB_FAILURE, "Failed to find new entity in send list" );
1956  }
1957  int err;
1958  to_vec[i] = CREATE_HANDLE( MBMAXTYPE, j, err );
1959  if( err )
1960  {
1961  MB_SET_ERR( MB_FAILURE, "Failed to create handle in remote_handles" );
1962  }
1963  }
1964  }
1965  }
1966  }
1967 
1968  // memcpy over results if from_vec and to_vec are the same
1969  if( to_vec_tmp == from_vec ) memcpy( from_vec, to_vec, num_ents * sizeof( EntityHandle ) );
1970 
1971  return MB_SUCCESS;
1972 }
1973 
1974 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles,
1975  const Range& from_range,
1976  EntityHandle* to_vec,
1977  int to_proc,
1978  const std::vector< EntityHandle >& new_ents )
1979 {
1980  // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE VECTOR-BASED VERSION, NO REUSE
1981  // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
1982  // OTHER VERSION TOO!!!
1983  if( from_range.empty() ) return MB_SUCCESS;
1984 
1985  if( !store_remote_handles )
1986  {
1987  int err;
1988  // In this case, substitute position in new_ents list
1989  Range::iterator rit;
1990  unsigned int i;
1991  for( rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++ )
1992  {
1993  int ind = std::lower_bound( new_ents.begin(), new_ents.end(), *rit ) - new_ents.begin();
1994  assert( new_ents[ind] == *rit );
1995  to_vec[i] = CREATE_HANDLE( MBMAXTYPE, ind, err );
1996  assert( to_vec[i] != 0 && !err && -1 != ind );
1997  }
1998  }
1999  else
2000  {
2001  Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
2002  ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
2003 
2004  // Get single-proc destination handles and shared procs
2005  std::vector< int > sharing_procs( from_range.size() );
2006  result = mbImpl->tag_get_data( shh_tag, from_range, to_vec );MB_CHK_SET_ERR( result, "Failed to get shared handle tag for remote_handles" );
2007  result = mbImpl->tag_get_data( shp_tag, from_range, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharing proc tag in remote_handles" );
2008  for( unsigned int j = 0; j < from_range.size(); j++ )
2009  {
2010  if( to_vec[j] && sharing_procs[j] != to_proc ) to_vec[j] = 0;
2011  }
2012 
2013  EntityHandle tmp_handles[MAX_SHARING_PROCS];
2014  int tmp_procs[MAX_SHARING_PROCS];
2015  // Go through results, and for 0-valued ones, look for multiple shared proc
2016  Range::iterator rit;
2017  unsigned int i;
2018  for( rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++ )
2019  {
2020  if( !to_vec[i] )
2021  {
2022  result = mbImpl->tag_get_data( shhs_tag, &( *rit ), 1, tmp_handles );
2023  if( MB_SUCCESS == result )
2024  {
2025  result = mbImpl->tag_get_data( shps_tag, &( *rit ), 1, tmp_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
2026  for( int j = 0; j < MAX_SHARING_PROCS; j++ )
2027  if( tmp_procs[j] == to_proc )
2028  {
2029  to_vec[i] = tmp_handles[j];
2030  break;
2031  }
2032  }
2033 
2034  if( !to_vec[i] )
2035  {
2036  int j = std::lower_bound( new_ents.begin(), new_ents.end(), *rit ) - new_ents.begin();
2037  if( (int)new_ents.size() == j )
2038  {
2039  MB_SET_ERR( MB_FAILURE, "Failed to find new entity in send list" );
2040  }
2041  int err;
2042  to_vec[i] = CREATE_HANDLE( MBMAXTYPE, j, err );
2043  if( err )
2044  {
2045  MB_SET_ERR( MB_FAILURE, "Failed to create handle in remote_handles" );
2046  }
2047  }
2048  }
2049  }
2050  }
2051 
2052  return MB_SUCCESS;
2053 }
2054 
2055 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles,
2056  const Range& from_range,
2057  Range& to_range,
2058  int to_proc,
2059  const std::vector< EntityHandle >& new_ents )
2060 {
2061  std::vector< EntityHandle > to_vector( from_range.size() );
2062 
2063  ErrorCode result = get_remote_handles( store_remote_handles, from_range, &to_vector[0], to_proc, new_ents );MB_CHK_SET_ERR( result, "Failed to get remote handles" );
2064  std::copy( to_vector.begin(), to_vector.end(), range_inserter( to_range ) );
2065  return result;
2066 }
2067 
2068 ErrorCode ParallelComm::unpack_entities( unsigned char*& buff_ptr,
2069  const bool store_remote_handles,
2070  const int /*from_ind*/,
2071  const bool is_iface,
2072  std::vector< std::vector< EntityHandle > >& L1hloc,
2073  std::vector< std::vector< EntityHandle > >& L1hrem,
2074  std::vector< std::vector< int > >& L1p,
2075  std::vector< EntityHandle >& L2hloc,
2076  std::vector< EntityHandle >& L2hrem,
2077  std::vector< unsigned int >& L2p,
2078  std::vector< EntityHandle >& new_ents,
2079  const bool created_iface )
2080 {
2081  // General algorithm:
2082  // - unpack # entities
2083  // - save start of remote handle info, then scan forward to entity definition data
2084  // - for all vertices or entities w/ same # verts:
2085  // . get entity type, num ents, and (if !vert) # verts
2086  // . for each ent:
2087  // o get # procs/handles in remote handle info
2088  // o if # procs/handles > 2, check for already-created entity:
2089  // x get index of owner proc (1st in proc list), resize L1 list if nec
2090  // x look for already-arrived entity in L2 by owner handle
2091  // o if no existing entity:
2092  // x if iface, look for existing entity with same connect & type
2093  // x if none found, create vertex or element
2094  // x if !iface & multi-shared, save on L2
2095  // x if !iface, put new entity on new_ents list
2096  // o update proc/handle, pstatus tags, adjusting to put owner first if iface
2097  // o if !iface, save new handle on L1 for all sharing procs
2098 
2099  // Lists of handles/procs to return to sending/other procs
2100  // L1hloc[p], L1hrem[p]: handle pairs [h, h'], where h is the local proc handle
2101  // and h' is either the remote proc handle (if that is known) or
2102  // the owner proc handle (otherwise);
2103  // L1p[p]: indicates whether h is remote handle (= -1) or owner (rank of owner)
2104  // L2hloc, L2hrem: local/remote handles for entities shared by > 2 procs;
2105  // remote handles are on owning proc
2106  // L2p: owning procs for handles in L2hrem
2107 
2108  ErrorCode result;
2109  bool done = false;
2110  ReadUtilIface* ru = NULL;
2111 
2112  result = mbImpl->query_interface( ru );MB_CHK_SET_ERR( result, "Failed to get ReadUtilIface" );
2113 
2114  // 1. # entities = E
2115  int num_ents = 0;
2116  unsigned char* buff_save = buff_ptr;
2117  int i, j;
2118 
2119  if( store_remote_handles )
2120  {
2121  UNPACK_INT( buff_ptr, num_ents );
2122 
2123  buff_save = buff_ptr;
2124 
2125  // Save place where remote handle info starts, then scan forward to ents
2126  for( i = 0; i < num_ents; i++ )
2127  {
2128  UNPACK_INT( buff_ptr, j );
2129  if( j < 0 )
2130  {
2131  std::cout << "Should be non-negative # proc/handles.";
2132  return MB_FAILURE;
2133  }
2134 
2135  buff_ptr += j * ( sizeof( int ) + sizeof( EntityHandle ) );
2136  }
2137  }
2138 
2139  std::vector< EntityHandle > msg_ents;
2140 
2141  while( !done )
2142  {
2143  EntityType this_type = MBMAXTYPE;
2144  UNPACK_TYPE( buff_ptr, this_type );
2145  assert( this_type != MBENTITYSET );
2146 
2147  // MBMAXTYPE signifies end of entities data
2148  if( MBMAXTYPE == this_type ) break;
2149 
2150  // Get the number of ents
2151  int num_ents2, verts_per_entity = 0;
2152  UNPACK_INT( buff_ptr, num_ents2 );
2153 
2154  // Unpack the nodes per entity
2155  if( MBVERTEX != this_type && num_ents2 )
2156  {
2157  UNPACK_INT( buff_ptr, verts_per_entity );
2158  }
2159 
2160  std::vector< int > ps( MAX_SHARING_PROCS, -1 );
2161  std::vector< EntityHandle > hs( MAX_SHARING_PROCS, 0 );
2162  for( int e = 0; e < num_ents2; e++ )
2163  {
2164  // Check for existing entity, otherwise make new one
2165  EntityHandle new_h = 0;
2167  double coords[3];
2168  int num_ps = -1;
2169 
2170  //=======================================
2171  // Unpack all the data at once, to make sure the buffer pointers
2172  // are tracked correctly
2173  //=======================================
2174  if( store_remote_handles )
2175  {
2176  // Pointers to other procs/handles
2177  UNPACK_INT( buff_save, num_ps );
2178  if( 0 >= num_ps )
2179  {
2180  std::cout << "Shouldn't ever be fewer than 1 procs here." << std::endl;
2181  return MB_FAILURE;
2182  }
2183 
2184  UNPACK_INTS( buff_save, &ps[0], num_ps );
2185  UNPACK_EH( buff_save, &hs[0], num_ps );
2186  }
2187 
2188  if( MBVERTEX == this_type )
2189  {
2190  UNPACK_DBLS( buff_ptr, coords, 3 );
2191  }
2192  else
2193  {
2194  assert( verts_per_entity <= CN::MAX_NODES_PER_ELEMENT );
2195  UNPACK_EH( buff_ptr, connect, verts_per_entity );
2196 
2197  // Update connectivity to local handles
2198  result = get_local_handles( connect, verts_per_entity, msg_ents );MB_CHK_SET_ERR( result, "Failed to get local handles" );
2199  }
2200 
2201  //=======================================
2202  // Now, process that data; begin by finding an identical
2203  // entity, if there is one
2204  //=======================================
2205  if( store_remote_handles )
2206  {
2207  result = find_existing_entity( is_iface, ps[0], hs[0], num_ps, connect, verts_per_entity, this_type,
2208  L2hloc, L2hrem, L2p, new_h );MB_CHK_SET_ERR( result, "Failed to get existing entity" );
2209  }
2210 
2211  //=======================================
2212  // If we didn't find one, we'll have to create one
2213  //=======================================
2214  bool created_here = false;
2215  if( !new_h && !is_iface )
2216  {
2217  if( MBVERTEX == this_type )
2218  {
2219  // Create a vertex
2220  result = mbImpl->create_vertex( coords, new_h );MB_CHK_SET_ERR( result, "Failed to make new vertex" );
2221  }
2222  else
2223  {
2224  // Create the element
2225  result = mbImpl->create_element( this_type, connect, verts_per_entity, new_h );MB_CHK_SET_ERR( result, "Failed to make new element" );
2226 
2227  // Update adjacencies
2228  result = ru->update_adjacencies( new_h, 1, verts_per_entity, connect );MB_CHK_SET_ERR( result, "Failed to update adjacencies" );
2229  }
2230 
2231  // Should have a new handle now
2232  assert( new_h );
2233 
2234  created_here = true;
2235  }
2236 
2237  //=======================================
2238  // Take care of sharing data
2239  //=======================================
2240 
2241  // Need to save entities found in order, for interpretation of
2242  // later parts of this message
2243  if( !is_iface )
2244  {
2245  assert( new_h );
2246  msg_ents.push_back( new_h );
2247  }
2248 
2249  if( created_here ) new_ents.push_back( new_h );
2250 
2251  if( new_h && store_remote_handles )
2252  {
2253  unsigned char new_pstat = 0x0;
2254  if( is_iface )
2255  {
2256  new_pstat = PSTATUS_INTERFACE;
2257  // Here, lowest rank proc should be first
2258  int idx = std::min_element( &ps[0], &ps[0] + num_ps ) - &ps[0];
2259  if( idx )
2260  {
2261  std::swap( ps[0], ps[idx] );
2262  std::swap( hs[0], hs[idx] );
2263  }
2264  // Set ownership based on lowest rank; can't be in update_remote_data, because
2265  // there we don't know whether it resulted from ghosting or not
2266  if( ( num_ps > 1 && ps[0] != (int)rank() ) ) new_pstat |= PSTATUS_NOT_OWNED;
2267  }
2268  else if( created_here )
2269  {
2270  if( created_iface )
2271  new_pstat = PSTATUS_NOT_OWNED;
2272  else
2273  new_pstat = PSTATUS_GHOST | PSTATUS_NOT_OWNED;
2274  }
2275 
2276  // Update sharing data and pstatus, adjusting order if iface
2277  result = update_remote_data( new_h, &ps[0], &hs[0], num_ps, new_pstat );MB_CHK_SET_ERR( result, "unpack_entities" );
2278 
2279  // If a new multi-shared entity, save owner for subsequent lookup in L2 lists
2280  if( store_remote_handles && !is_iface && num_ps > 2 )
2281  {
2282  L2hrem.push_back( hs[0] );
2283  L2hloc.push_back( new_h );
2284  L2p.push_back( ps[0] );
2285  }
2286 
2287  // Need to send this new handle to all sharing procs
2288  if( !is_iface )
2289  {
2290  for( j = 0; j < num_ps; j++ )
2291  {
2292  if( ps[j] == (int)procConfig.proc_rank() ) continue;
2293  int idx = get_buffers( ps[j] );
2294  if( idx == (int)L1hloc.size() )
2295  {
2296  L1hloc.resize( idx + 1 );
2297  L1hrem.resize( idx + 1 );
2298  L1p.resize( idx + 1 );
2299  }
2300 
2301  // Don't bother adding if it's already in the list
2302  std::vector< EntityHandle >::iterator vit =
2303  std::find( L1hloc[idx].begin(), L1hloc[idx].end(), new_h );
2304  if( vit != L1hloc[idx].end() )
2305  {
2306  // If it's in the list but remote handle isn't known but we know
2307  // it, replace in the list
2308  if( L1p[idx][vit - L1hloc[idx].begin()] != -1 && hs[j] )
2309  {
2310  L1hrem[idx][vit - L1hloc[idx].begin()] = hs[j];
2311  L1p[idx][vit - L1hloc[idx].begin()] = -1;
2312  }
2313  else
2314  continue;
2315  }
2316  else
2317  {
2318  if( !hs[j] )
2319  {
2320  assert( -1 != ps[0] && num_ps > 2 );
2321  L1p[idx].push_back( ps[0] );
2322  L1hrem[idx].push_back( hs[0] );
2323  }
2324  else
2325  {
2326  assert(
2327  "either this remote handle isn't in the remote list, or "
2328  "it's for another proc" &&
2329  ( std::find( L1hrem[idx].begin(), L1hrem[idx].end(), hs[j] ) == L1hrem[idx].end() ||
2330  L1p[idx][std::find( L1hrem[idx].begin(), L1hrem[idx].end(), hs[j] ) -
2331  L1hrem[idx].begin()] != -1 ) );
2332  L1p[idx].push_back( -1 );
2333  L1hrem[idx].push_back( hs[j] );
2334  }
2335  L1hloc[idx].push_back( new_h );
2336  }
2337  }
2338  }
2339 
2340  assert( "Shouldn't be here for non-shared entities" && -1 != num_ps );
2341  std::fill( &ps[0], &ps[num_ps], -1 );
2342  std::fill( &hs[0], &hs[num_ps], 0 );
2343  }
2344  }
2345 
2346  myDebug->tprintf( 4, "Unpacked %d ents of type %s", num_ents2, CN::EntityTypeName( this_type ) );
2347  }
2348 
2349  myDebug->tprintf( 4, "Done unpacking entities.\n" );
2350 
2351  // Need to sort here, to enable searching
2352  std::sort( new_ents.begin(), new_ents.end() );
2353 
2354  return MB_SUCCESS;
2355 }
2356 
2357 ErrorCode ParallelComm::print_buffer( unsigned char* buff_ptr, int mesg_tag, int from_proc, bool sent )
2358 {
2359  std::cerr << procConfig.proc_rank();
2360  if( sent )
2361  std::cerr << " sent";
2362  else
2363  std::cerr << " received";
2364  std::cerr << " message type " << mesg_tag << " to/from proc " << from_proc << "; contents:" << std::endl;
2365 
2366  int msg_length, num_ents;
2367  unsigned char* orig_ptr = buff_ptr;
2368  UNPACK_INT( buff_ptr, msg_length );
2369  std::cerr << msg_length << " bytes..." << std::endl;
2370 
2371  if( MB_MESG_ENTS_SIZE == mesg_tag || MB_MESG_ENTS_LARGE == mesg_tag )
2372  {
2373  // 1. # entities = E
2374  int i, j, k;
2375  std::vector< int > ps;
2376  std::vector< EntityHandle > hs;
2377 
2378  UNPACK_INT( buff_ptr, num_ents );
2379  std::cerr << num_ents << " entities..." << std::endl;
2380 
2381  // Save place where remote handle info starts, then scan forward to ents
2382  for( i = 0; i < num_ents; i++ )
2383  {
2384  UNPACK_INT( buff_ptr, j );
2385  if( 0 > j ) return MB_FAILURE;
2386  ps.resize( j );
2387  hs.resize( j );
2388  std::cerr << "Entity " << i << ", # procs = " << j << std::endl;
2389  UNPACK_INTS( buff_ptr, &ps[0], j );
2390  UNPACK_EH( buff_ptr, &hs[0], j );
2391  std::cerr << " Procs: ";
2392  for( k = 0; k < j; k++ )
2393  std::cerr << ps[k] << " ";
2394  std::cerr << std::endl;
2395  std::cerr << " Handles: ";
2396  for( k = 0; k < j; k++ )
2397  std::cerr << hs[k] << " ";
2398  std::cerr << std::endl;
2399 
2400  if( buff_ptr - orig_ptr > msg_length )
2401  {
2402  std::cerr << "End of buffer..." << std::endl;
2403  std::cerr.flush();
2404  return MB_FAILURE;
2405  }
2406  }
2407 
2408  while( true )
2409  {
2410  EntityType this_type = MBMAXTYPE;
2411  UNPACK_TYPE( buff_ptr, this_type );
2412  assert( this_type != MBENTITYSET );
2413 
2414  // MBMAXTYPE signifies end of entities data
2415  if( MBMAXTYPE == this_type ) break;
2416 
2417  // Get the number of ents
2418  int num_ents2, verts_per_entity = 0;
2419  UNPACK_INT( buff_ptr, num_ents2 );
2420 
2421  // Unpack the nodes per entity
2422  if( MBVERTEX != this_type && num_ents2 )
2423  {
2424  UNPACK_INT( buff_ptr, verts_per_entity );
2425  }
2426 
2427  std::cerr << "Type: " << CN::EntityTypeName( this_type ) << "; num_ents = " << num_ents2;
2428  if( MBVERTEX != this_type ) std::cerr << "; verts_per_ent = " << verts_per_entity;
2429  std::cerr << std::endl;
2430  if( num_ents2 < 0 || num_ents2 > msg_length )
2431  {
2432  std::cerr << "Wrong number of entities, returning." << std::endl;
2433  return MB_FAILURE;
2434  }
2435 
2436  for( int e = 0; e < num_ents2; e++ )
2437  {
2438  // Check for existing entity, otherwise make new one
2439  if( MBVERTEX == this_type )
2440  {
2441  double coords[3];
2442  UNPACK_DBLS( buff_ptr, coords, 3 );
2443  std::cerr << "xyz = " << coords[0] << ", " << coords[1] << ", " << coords[2] << std::endl;
2444  }
2445  else
2446  {
2448  assert( verts_per_entity <= CN::MAX_NODES_PER_ELEMENT );
2449  UNPACK_EH( buff_ptr, connect, verts_per_entity );
2450 
2451  // Update connectivity to local handles
2452  std::cerr << "Connectivity: ";
2453  for( k = 0; k < verts_per_entity; k++ )
2454  std::cerr << connect[k] << " ";
2455  std::cerr << std::endl;
2456  }
2457 
2458  if( buff_ptr - orig_ptr > msg_length )
2459  {
2460  std::cerr << "End of buffer..." << std::endl;
2461  std::cerr.flush();
2462  return MB_FAILURE;
2463  }
2464  }
2465  }
2466  }
2467  else if( MB_MESG_REMOTEH_SIZE == mesg_tag || MB_MESG_REMOTEH_LARGE == mesg_tag )
2468  {
2469  UNPACK_INT( buff_ptr, num_ents );
2470  std::cerr << num_ents << " entities..." << std::endl;
2471  if( 0 > num_ents || num_ents > msg_length )
2472  {
2473  std::cerr << "Wrong number of entities, returning." << std::endl;
2474  return MB_FAILURE;
2475  }
2476  std::vector< EntityHandle > L1hloc( num_ents ), L1hrem( num_ents );
2477  std::vector< int > L1p( num_ents );
2478  UNPACK_INTS( buff_ptr, &L1p[0], num_ents );
2479  UNPACK_EH( buff_ptr, &L1hrem[0], num_ents );
2480  UNPACK_EH( buff_ptr, &L1hloc[0], num_ents );
2481  std::cerr << num_ents << " Entity pairs; hremote/hlocal/proc: " << std::endl;
2482  for( int i = 0; i < num_ents; i++ )
2483  {
2484  EntityType etype = TYPE_FROM_HANDLE( L1hloc[i] );
2485  std::cerr << CN::EntityTypeName( etype ) << ID_FROM_HANDLE( L1hrem[i] ) << ", "
2486  << CN::EntityTypeName( etype ) << ID_FROM_HANDLE( L1hloc[i] ) << ", " << L1p[i] << std::endl;
2487  }
2488 
2489  if( buff_ptr - orig_ptr > msg_length )
2490  {
2491  std::cerr << "End of buffer..." << std::endl;
2492  std::cerr.flush();
2493  return MB_FAILURE;
2494  }
2495  }
2496  else if( mesg_tag == MB_MESG_TAGS_SIZE || mesg_tag == MB_MESG_TAGS_LARGE )
2497  {
2498  int num_tags, dum1, data_type, tag_size;
2499  UNPACK_INT( buff_ptr, num_tags );
2500  std::cerr << "Number of tags = " << num_tags << std::endl;
2501  for( int i = 0; i < num_tags; i++ )
2502  {
2503  std::cerr << "Tag " << i << ":" << std::endl;
2504  UNPACK_INT( buff_ptr, tag_size );
2505  UNPACK_INT( buff_ptr, dum1 );
2506  UNPACK_INT( buff_ptr, data_type );
2507  std::cerr << "Tag size, type, data type = " << tag_size << ", " << dum1 << ", " << data_type << std::endl;
2508  UNPACK_INT( buff_ptr, dum1 );
2509  std::cerr << "Default value size = " << dum1 << std::endl;
2510  buff_ptr += dum1;
2511  UNPACK_INT( buff_ptr, dum1 );
2512  std::string name( (char*)buff_ptr, dum1 );
2513  std::cerr << "Tag name = " << name.c_str() << std::endl;
2514  buff_ptr += dum1;
2515  UNPACK_INT( buff_ptr, num_ents );
2516  std::cerr << "Number of ents = " << num_ents << std::endl;
2517  std::vector< EntityHandle > tmp_buff( num_ents );
2518  UNPACK_EH( buff_ptr, &tmp_buff[0], num_ents );
2519  int tot_length = 0;
2520  for( int j = 0; j < num_ents; j++ )
2521  {
2522  EntityType etype = TYPE_FROM_HANDLE( tmp_buff[j] );
2523  std::cerr << CN::EntityTypeName( etype ) << " " << ID_FROM_HANDLE( tmp_buff[j] ) << ", tag = ";
2524  if( tag_size == MB_VARIABLE_LENGTH )
2525  {
2526  UNPACK_INT( buff_ptr, dum1 );
2527  tot_length += dum1;
2528  std::cerr << "(variable, length = " << dum1 << ")" << std::endl;
2529  }
2530  else if( data_type == MB_TYPE_DOUBLE )
2531  {
2532  double dum_dbl;
2533  UNPACK_DBL( buff_ptr, dum_dbl );
2534  std::cerr << dum_dbl << std::endl;
2535  }
2536  else if( data_type == MB_TYPE_INTEGER )
2537  {
2538  int dum_int;
2539  UNPACK_INT( buff_ptr, dum_int );
2540  std::cerr << dum_int << std::endl;
2541  }
2542  else if( data_type == MB_TYPE_OPAQUE )
2543  {
2544  std::cerr << "(opaque)" << std::endl;
2545  buff_ptr += tag_size;
2546  }
2547  else if( data_type == MB_TYPE_HANDLE )
2548  {
2549  EntityHandle dum_eh;
2550  UNPACK_EH( buff_ptr, &dum_eh, 1 );
2551  std::cerr << dum_eh << std::endl;
2552  }
2553  else if( data_type == MB_TYPE_BIT )
2554  {
2555  std::cerr << "(bit)" << std::endl;
2556  buff_ptr += tag_size;
2557  }
2558  }
2559  if( tag_size == MB_VARIABLE_LENGTH ) buff_ptr += tot_length;
2560  }
2561  }
2562  else
2563  {
2564  assert( false );
2565  return MB_FAILURE;
2566  }
2567 
2568  std::cerr.flush();
2569 
2570  return MB_SUCCESS;
2571 }
2572 
2574 {
2575  if( NULL == ents )
2576  {
2577  Range shared_ents;
2578  std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( shared_ents ) );
2579  shared_ents.print( "Shared entities:\n" );
2580  return MB_SUCCESS;
2581  }
2582 
2583  unsigned char pstat;
2584  EntityHandle tmp_handles[MAX_SHARING_PROCS];
2585  int tmp_procs[MAX_SHARING_PROCS];
2586  unsigned int num_ps;
2587  ErrorCode result;
2588 
2589  for( int i = 0; i < num_ents; i++ )
2590  {
2591  result = mbImpl->list_entities( ents + i, 1 );MB_CHK_ERR( result );
2592  double coords[3];
2593  result = mbImpl->get_coords( ents + i, 1, coords );
2594  std::cout << " coords: " << coords[0] << " " << coords[1] << " " << coords[2] << "\n";
2595 
2596  result = get_sharing_data( ents[i], tmp_procs, tmp_handles, pstat, num_ps );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
2597 
2598  std::cout << "Pstatus: ";
2599  if( !num_ps )
2600  std::cout << "local " << std::endl;
2601  else
2602  {
2603  if( pstat & PSTATUS_NOT_OWNED ) std::cout << "NOT_OWNED; ";
2604  if( pstat & PSTATUS_SHARED ) std::cout << "SHARED; ";
2605  if( pstat & PSTATUS_MULTISHARED ) std::cout << "MULTISHARED; ";
2606  if( pstat & PSTATUS_INTERFACE ) std::cout << "INTERFACE; ";
2607  if( pstat & PSTATUS_GHOST ) std::cout << "GHOST; ";
2608  std::cout << std::endl;
2609  for( unsigned int j = 0; j < num_ps; j++ )
2610  {
2611  std::cout << " proc " << tmp_procs[j] << " id (handle) " << mbImpl->id_from_handle( tmp_handles[j] )
2612  << "(" << tmp_handles[j] << ")" << std::endl;
2613  }
2614  }
2615  std::cout << std::endl;
2616  }
2617 
2618  return MB_SUCCESS;
2619 }
2620 
2622 {
2623  for( Range::iterator rit = ents.begin(); rit != ents.end(); ++rit )
2624  list_entities( &( *rit ), 1 );
2625 
2626  return MB_SUCCESS;
2627 }
2628 
2630  Range& remote_range,
2631  int other_proc,
2632  const unsigned char add_pstat )
2633 {
2634  Range::iterator rit, rit2;
2635  ErrorCode result = MB_SUCCESS;
2636 
2637  // For each pair of local/remote handles:
2638  for( rit = local_range.begin(), rit2 = remote_range.begin(); rit != local_range.end(); ++rit, ++rit2 )
2639  {
2640  result = update_remote_data( *rit, &other_proc, &( *rit2 ), 1, add_pstat );MB_CHK_ERR( result );
2641  }
2642 
2643  return MB_SUCCESS;
2644 }
2645 
2647  const int* ps,
2648  const EntityHandle* hs,
2649  const int num_ps,
2650  const unsigned char add_pstat
2651  // The following lines left in for future debugging, at least until I trust
2652  // this function; tjt, 10/4/2013
2653  // , int *new_ps,
2654  // EntityHandle *new_hs,
2655  // int &new_numps,
2656  // unsigned char &new_pstat
2657 )
2658 {
2659  // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
2660  // in this function, so no need to initialize; sharing data does not include
2661  // this proc if shared with only one other
2662 
2663  // Following variables declared here to avoid compiler errors
2664  int new_numps;
2665  unsigned char new_pstat;
2666  std::vector< int > new_ps( MAX_SHARING_PROCS, -1 );
2667  std::vector< EntityHandle > new_hs( MAX_SHARING_PROCS, 0 );
2668 
2669  new_numps = 0;
2670  ErrorCode result = get_sharing_data( new_h, &new_ps[0], &new_hs[0], new_pstat, new_numps );MB_CHK_SET_ERR( result, "Failed to get sharing data in update_remote_data" );
2671  int num_exist = new_numps;
2672 
2673  // Add new pstat info to the flag
2674  new_pstat |= add_pstat;
2675 
2676  /*
2677  #define plist(str, lst, siz) \
2678  std::cout << str << "("; \
2679  for (int i = 0; i < (int)siz; i++) std::cout << lst[i] << " "; \
2680  std::cout << ") "; \
2681 
2682  std::cout << "update_remote_data: rank = " << rank() << ", new_h = " << new_h << std::endl;
2683  std::string ostr;
2684  plist("ps", ps, num_ps);
2685  plist("hs", hs, num_ps);
2686  print_pstatus(add_pstat, ostr);
2687  std::cout << ", add_pstat = " << ostr.c_str() << std::endl;
2688  plist("tag_ps", new_ps, new_numps);
2689  plist("tag_hs", new_hs, new_numps);
2690  assert(new_numps <= size());
2691  print_pstatus(new_pstat, ostr);
2692  std::cout << ", tag_pstat=" << ostr.c_str() << std::endl;
2693  */
2694 
2695 #ifndef NDEBUG
2696  {
2697  // Check for duplicates in proc list
2698  std::set< unsigned int > dumprocs;
2699  unsigned int dp = 0;
2700  for( ; (int)dp < num_ps && -1 != ps[dp]; dp++ )
2701  dumprocs.insert( ps[dp] );
2702  assert( dp == dumprocs.size() );
2703  }
2704 #endif
2705 
2706  // If only one sharer and I'm the owner, insert myself in the list;
2707  // otherwise, my data is checked at the end
2708  if( 1 == new_numps && !( new_pstat & PSTATUS_NOT_OWNED ) )
2709  {
2710  new_hs[1] = new_hs[0];
2711  new_ps[1] = new_ps[0];
2712  new_hs[0] = new_h;
2713  new_ps[0] = rank();
2714  new_numps = 2;
2715  }
2716 
2717  // Now put passed-in data onto lists
2718  int idx;
2719  for( int i = 0; i < num_ps; i++ )
2720  {
2721  idx = std::find( &new_ps[0], &new_ps[0] + new_numps, ps[i] ) - &new_ps[0];
2722  if( idx < new_numps )
2723  {
2724  if( !new_hs[idx] && hs[i] )
2725  // h on list is 0 and passed-in h is non-zero, replace it
2726  new_hs[idx] = hs[i];
2727  else
2728  assert( !hs[i] || new_hs[idx] == hs[i] );
2729  }
2730  else
2731  {
2732  if( new_numps + 1 == MAX_SHARING_PROCS )
2733  {
2734  MB_SET_ERR( MB_FAILURE, "Exceeded MAX_SHARING_PROCS for "
2735  << CN::EntityTypeName( TYPE_FROM_HANDLE( new_h ) ) << ' '
2736  << ID_FROM_HANDLE( new_h ) << " in process " << rank() );
2737  }
2738  new_ps[new_numps] = ps[i];
2739  new_hs[new_numps] = hs[i];
2740  new_numps++;
2741  }
2742  }
2743 
2744  // Add myself, if it isn't there already
2745  idx = std::find( &new_ps[0], &new_ps[0] + new_numps, rank() ) - &new_ps[0];
2746  if( idx == new_numps )
2747  {
2748  new_ps[new_numps] = rank();
2749  new_hs[new_numps] = new_h;
2750  new_numps++;
2751  }
2752  else if( !new_hs[idx] && new_numps > 2 )
2753  new_hs[idx] = new_h;
2754 
2755  // Proc list is complete; update for shared, multishared
2756  if( new_numps > 1 )
2757  {
2758  if( new_numps > 2 ) new_pstat |= PSTATUS_MULTISHARED;
2759  new_pstat |= PSTATUS_SHARED;
2760  }
2761 
2762  /*
2763  plist("new_ps", new_ps, new_numps);
2764  plist("new_hs", new_hs, new_numps);
2765  print_pstatus(new_pstat, ostr);
2766  std::cout << ", new_pstat=" << ostr.c_str() << std::endl;
2767  std::cout << std::endl;
2768  */
2769 
2770  result = set_sharing_data( new_h, new_pstat, num_exist, new_numps, &new_ps[0], &new_hs[0] );MB_CHK_SET_ERR( result, "Failed to set sharing data in update_remote_data" );
2771 
2772  if( new_pstat & PSTATUS_SHARED ) sharedEnts.insert( new_h );
2773 
2774  return MB_SUCCESS;
2775 }
2776 
2778  const int* ps,
2779  const EntityHandle* hs,
2780  const int num_ps,
2781  const unsigned char add_pstat )
2782 {
2784  int tag_ps[MAX_SHARING_PROCS];
2785  unsigned char pstat;
2786  // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
2787  // in this function, so no need to initialize
2788  unsigned int num_exist;
2789  ErrorCode result = get_sharing_data( new_h, tag_ps, tag_hs, pstat, num_exist );MB_CHK_ERR( result );
2790 
2791 #ifndef NDEBUG
2792  {
2793  // Check for duplicates in proc list
2794  std::set< unsigned int > dumprocs;
2795  unsigned int dp = 0;
2796  for( ; (int)dp < num_ps && -1 != ps[dp]; dp++ )
2797  dumprocs.insert( ps[dp] );
2798  assert( dp == dumprocs.size() );
2799  }
2800 #endif
2801 
2802  // Add any new sharing data
2803  bool changed = false;
2804  int idx;
2805  if( !num_exist )
2806  {
2807  // Just take what caller passed
2808  memcpy( tag_ps, ps, num_ps * sizeof( int ) );
2809  memcpy( tag_hs, hs, num_ps * sizeof( EntityHandle ) );
2810  num_exist = num_ps;
2811  // If it's only one, hopefully I'm not there yet...
2812  assert( "I shouldn't be the only proc there." && ( 1 != num_exist || ps[0] != (int)procConfig.proc_rank() ) );
2813  changed = true;
2814  }
2815  else
2816  {
2817  for( int i = 0; i < num_ps; i++ )
2818  {
2819  idx = std::find( tag_ps, tag_ps + num_exist, ps[i] ) - tag_ps;
2820  if( idx == (int)num_exist )
2821  {
2822  if( num_exist == MAX_SHARING_PROCS )
2823  {
2824  std::cerr << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE( new_h ) )
2825  << ' ' << ID_FROM_HANDLE( new_h ) << " in process " << proc_config().proc_rank()
2826  << std::endl;
2827  std::cerr.flush();
2828  MPI_Abort( proc_config().proc_comm(), 66 );
2829  }
2830 
2831  // If there's only 1 sharing proc, and it's not me, then
2832  // we'll end up with 3; add me to the front
2833  if( !i && num_ps == 1 && num_exist == 1 && ps[0] != (int)procConfig.proc_rank() )
2834  {
2835  int j = 1;
2836  // If I own this entity, put me at front, otherwise after first
2837  if( !( pstat & PSTATUS_NOT_OWNED ) )
2838  {
2839  tag_ps[1] = tag_ps[0];
2840  tag_hs[1] = tag_hs[0];
2841  j = 0;
2842  }
2843  tag_ps[j] = procConfig.proc_rank();
2844  tag_hs[j] = new_h;
2845  num_exist++;
2846  }
2847 
2848  tag_ps[num_exist] = ps[i];
2849  tag_hs[num_exist] = hs[i];
2850  num_exist++;
2851  changed = true;
2852  }
2853  else if( 0 == tag_hs[idx] )
2854  {
2855  tag_hs[idx] = hs[i];
2856  changed = true;
2857  }
2858  else if( 0 != hs[i] )
2859  {
2860  assert( hs[i] == tag_hs[idx] );
2861  }
2862  }
2863  }
2864 
2865  // Adjust for interface layer if necessary
2866  if( add_pstat & PSTATUS_INTERFACE )
2867  {
2868  idx = std::min_element( tag_ps, tag_ps + num_exist ) - tag_ps;
2869  if( idx )
2870  {
2871  int tag_proc = tag_ps[idx];
2872  tag_ps[idx] = tag_ps[0];
2873  tag_ps[0] = tag_proc;
2874  EntityHandle tag_h = tag_hs[idx];
2875  tag_hs[idx] = tag_hs[0];
2876  tag_hs[0] = tag_h;
2877  changed = true;
2878  if( tag_ps[0] != (int)procConfig.proc_rank() ) pstat |= PSTATUS_NOT_OWNED;
2879  }
2880  }
2881 
2882  if( !changed ) return MB_SUCCESS;
2883 
2884  assert( "interface entities should have > 1 proc" && ( !( add_pstat & PSTATUS_INTERFACE ) || num_exist > 1 ) );
2885  assert( "ghost entities should have > 1 proc" && ( !( add_pstat & PSTATUS_GHOST ) || num_exist > 1 ) );
2886 
2887  // If it's multi-shared and we created the entity in this unpack,
2888  // local handle probably isn't in handle list yet
2889  if( num_exist > 2 )
2890  {
2891  idx = std::find( tag_ps, tag_ps + num_exist, procConfig.proc_rank() ) - tag_ps;
2892  assert( idx < (int)num_exist );
2893  if( !tag_hs[idx] ) tag_hs[idx] = new_h;
2894  }
2895 
2896  int tag_p;
2897  EntityHandle tag_h;
2898 
2899  // Update pstat
2900  pstat |= add_pstat;
2901 
2902  if( num_exist > 2 )
2903  pstat |= ( PSTATUS_MULTISHARED | PSTATUS_SHARED );
2904  else if( num_exist > 0 )
2905  pstat |= PSTATUS_SHARED;
2906 
2907  // compare_remote_data(new_h, num_ps, hs, ps, add_pstat,
2908  // num_exist, tag_hs, tag_ps, pstat);
2909 
2910  // Reset single shared proc/handle if was shared and moving to multi-shared
2911  if( num_exist > 2 && !( pstat & PSTATUS_MULTISHARED ) && ( pstat & PSTATUS_SHARED ) )
2912  {
2913  // Must remove sharedp/h first, which really means set to default value
2914  tag_p = -1;
2915  result = mbImpl->tag_set_data( sharedp_tag(), &new_h, 1, &tag_p );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
2916  tag_h = 0;
2917  result = mbImpl->tag_set_data( sharedh_tag(), &new_h, 1, &tag_h );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
2918  }
2919 
2920  // Set sharing tags
2921  if( num_exist > 2 )
2922  {
2923  std::fill( tag_ps + num_exist, tag_ps + MAX_SHARING_PROCS, -1 );
2924  std::fill( tag_hs + num_exist, tag_hs + MAX_SHARING_PROCS, 0 );
2925  result = mbImpl->tag_set_data( sharedps_tag(), &new_h, 1, tag_ps );MB_CHK_SET_ERR( result, "Failed to set sharedps tag data" );
2926  result = mbImpl->tag_set_data( sharedhs_tag(), &new_h, 1, tag_hs );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag data" );
2927 
2928 #ifndef NDEBUG
2929  {
2930  // Check for duplicates in proc list
2931  std::set< unsigned int > dumprocs;
2932  unsigned int dp = 0;
2933  for( ; dp < num_exist && -1 != tag_ps[dp]; dp++ )
2934  dumprocs.insert( tag_ps[dp] );
2935  assert( dp == dumprocs.size() );
2936  }
2937 #endif
2938  }
2939  else if( num_exist == 2 || num_exist == 1 )
2940  {
2941  if( tag_ps[0] == (int)procConfig.proc_rank() )
2942  {
2943  assert( 2 == num_exist && tag_ps[1] != (int)procConfig.proc_rank() );
2944  tag_ps[0] = tag_ps[1];
2945  tag_hs[0] = tag_hs[1];
2946  }
2947  assert( tag_ps[0] != -1 && tag_hs[0] != 0 );
2948  result = mbImpl->tag_set_data( sharedp_tag(), &new_h, 1, tag_ps );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
2949  result = mbImpl->tag_set_data( sharedh_tag(), &new_h, 1, tag_hs );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
2950  }
2951 
2952  // Now set new pstatus
2953  result = mbImpl->tag_set_data( pstatus_tag(), &new_h, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
2954 
2955  if( pstat & PSTATUS_SHARED ) sharedEnts.insert( new_h );
2956 
2957  return MB_SUCCESS;
2958 }
2959 
2960 ErrorCode ParallelComm::get_sharing_data( const Range& entities, std::set< int >& procs, int operation )
2961 {
2962  // Get the union or intersection of sharing data for multiple entities
2963  ErrorCode result;
2964  int sp2[MAX_SHARING_PROCS];
2965  int num_ps;
2966  unsigned char pstat;
2967  std::set< int > tmp_procs;
2968  procs.clear();
2969 
2970  for( Range::const_iterator rit = entities.begin(); rit != entities.end(); ++rit )
2971  {
2972  // Get sharing procs
2973  result = get_sharing_data( *rit, sp2, NULL, pstat, num_ps );MB_CHK_SET_ERR( result, "Failed to get sharing data in get_sharing_data" );
2974  if( !( pstat & PSTATUS_SHARED ) && Interface::INTERSECT == operation )
2975  {
2976  procs.clear();
2977  return MB_SUCCESS;
2978  }
2979 
2980  if( rit == entities.begin() )
2981  {
2982  std::copy( sp2, sp2 + num_ps, std::inserter( procs, procs.begin() ) );
2983  }
2984  else
2985  {
2986  std::sort( sp2, sp2 + num_ps );
2987  tmp_procs.clear();
2988  if( Interface::UNION == operation )
2989  std::set_union( procs.begin(), procs.end(), sp2, sp2 + num_ps,
2990  std::inserter( tmp_procs, tmp_procs.end() ) );
2991  else if( Interface::INTERSECT == operation )
2992  std::set_intersection( procs.begin(), procs.end(), sp2, sp2 + num_ps,
2993  std::inserter( tmp_procs, tmp_procs.end() ) );
2994  else
2995  {
2996  assert( "Unknown operation." && false );
2997  return MB_FAILURE;
2998  }
2999  procs.swap( tmp_procs );
3000  }
3001  if( Interface::INTERSECT == operation && procs.empty() ) return MB_SUCCESS;
3002  }
3003 
3004  return MB_SUCCESS;
3005 }
3006 
3008  int* ps,
3009  EntityHandle* hs,
3010  unsigned char& pstat,
3011  unsigned int& num_ps )
3012 {
3013  ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
3014  if( pstat & PSTATUS_MULTISHARED )
3015  {
3016  result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, ps );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
3017  if( hs )
3018  {
3019  result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, hs );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
3020  }
3021  num_ps = std::find( ps, ps + MAX_SHARING_PROCS, -1 ) - ps;
3022  }
3023  else if( pstat & PSTATUS_SHARED )
3024  {
3025  result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, ps );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
3026  if( hs )
3027  {
3028  result = mbImpl->tag_get_data( sharedh_tag(), &entity, 1, hs );MB_CHK_SET_ERR( result, "Failed to get sharedh tag data" );
3029  hs[1] = 0;
3030  }
3031  // Initialize past end of data
3032  ps[1] = -1;
3033  num_ps = 1;
3034  }
3035  else
3036  {
3037  ps[0] = -1;
3038  if( hs ) hs[0] = 0;
3039  num_ps = 0;
3040  }
3041 
3042  assert( MAX_SHARING_PROCS >= num_ps );
3043 
3044  return MB_SUCCESS;
3045 }
3046 
3048  const int owner_p,
3049  const EntityHandle owner_h,
3050  const int num_ps,
3051  const EntityHandle* connect,
3052  const int num_connect,
3053  const EntityType this_type,
3054  std::vector< EntityHandle >& L2hloc,
3055  std::vector< EntityHandle >& L2hrem,
3056  std::vector< unsigned int >& L2p,
3057  EntityHandle& new_h )
3058 {
3059  new_h = 0;
3060  if( !is_iface && num_ps > 2 )
3061  {
3062  for( unsigned int i = 0; i < L2hrem.size(); i++ )
3063  {
3064  if( L2hrem[i] == owner_h && owner_p == (int)L2p[i] )
3065  {
3066  new_h = L2hloc[i];
3067  return MB_SUCCESS;
3068  }
3069  }
3070  }
3071 
3072  // If we got here and it's a vertex, we don't need to look further
3073  if( MBVERTEX == this_type || !connect || !num_connect ) return MB_SUCCESS;
3074 
3075  Range tmp_range;
3076  ErrorCode result = mbImpl->get_adjacencies( connect, num_connect, CN::Dimension( this_type ), false, tmp_range );MB_CHK_SET_ERR( result, "Failed to get existing entity" );
3077  if( !tmp_range.empty() )
3078  {
3079  // Found a corresponding entity - return target
3080  new_h = *tmp_range.begin();
3081  }
3082  else
3083  {
3084  new_h = 0;
3085  }
3086 
3087  return MB_SUCCESS;
3088 }
3089 
3091  Range& local_handles,
3092  const std::vector< EntityHandle >& new_ents )
3093 {
3094  std::vector< EntityHandle > rh_vec;
3095  rh_vec.reserve( remote_handles.size() );
3096  std::copy( remote_handles.begin(), remote_handles.end(), std::back_inserter( rh_vec ) );
3097  ErrorCode result = get_local_handles( &rh_vec[0], remote_handles.size(), new_ents );
3098  std::copy( rh_vec.begin(), rh_vec.end(), range_inserter( local_handles ) );
3099  return result;
3100 }
3101 
3102 ErrorCode ParallelComm::get_local_handles( EntityHandle* from_vec, int num_ents, const Range& new_ents )
3103 {
3104  std::vector< EntityHandle > tmp_ents;
3105  std::copy( new_ents.begin(), new_ents.end(), std::back_inserter( tmp_ents ) );
3106  return get_local_handles( from_vec, num_ents, tmp_ents );
3107 }
3108 
3110  int num_ents,
3111  const std::vector< EntityHandle >& new_ents )
3112 {
3113  for( int i = 0; i < num_ents; i++ )
3114  {
3115  if( TYPE_FROM_HANDLE( from_vec[i] ) == MBMAXTYPE )
3116  {
3117  assert( ID_FROM_HANDLE( from_vec[i] ) < (int)new_ents.size() );
3118  from_vec[i] = new_ents[ID_FROM_HANDLE( from_vec[i] )];
3119  }
3120  }
3121 
3122  return MB_SUCCESS;
3123 }
3124 
3125 /*
3126 template <typename T> void
3127 insert_in_array(T* array, size_t array_size, size_t location, T value)
3128 {
3129  assert(location + 1 < array_size);
3130  for (size_t i = array_size - 1; i > location; i--)
3131  array[i] = array[i - 1];
3132  array[location] = value;
3133 }
3134 */
3135 
3137 {
3138  for( Range::const_pair_iterator key_it = key_range.const_pair_begin(); key_it != key_range.const_pair_end();
3139  ++key_it )
3140  {
3141  int tmp_num = ( *key_it ).second - ( *key_it ).first + 1;
3142  handle_map.insert( ( *key_it ).first, val_start, tmp_num );
3143  val_start += tmp_num;
3144  }
3145 
3146  return MB_SUCCESS;
3147 }
3148 
3149 ErrorCode ParallelComm::pack_sets( Range& entities, Buffer* buff, const bool store_remote_handles, const int to_proc )
3150 {
3151  // SETS:
3152  // . #sets
3153  // . for each set:
3154  // - options[#sets] (unsigned int)
3155  // - if (unordered) set range
3156  // - else if ordered
3157  // . #ents in set
3158  // . handles[#ents]
3159  // - #parents
3160  // - if (#parents) handles[#parents]
3161  // - #children
3162  // - if (#children) handles[#children]
3163 
3164  // Now the sets; assume any sets the application wants to pass are in the entities list
3165  ErrorCode result;
3166  Range all_sets = entities.subset_by_type( MBENTITYSET );
3167 
3168  int buff_size = estimate_sets_buffer_size( all_sets, store_remote_handles );
3169  if( buff_size < 0 ) MB_SET_ERR( MB_FAILURE, "Failed to estimate sets buffer size" );
3170  buff->check_space( buff_size );
3171 
3172  // Number of sets
3173  PACK_INT( buff->buff_ptr, all_sets.size() );
3174 
3175  // Options for all sets
3176  std::vector< unsigned int > options( all_sets.size() );
3177  Range::iterator rit;
3178  std::vector< EntityHandle > members;
3179  int i;
3180  for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
3181  {
3182  result = mbImpl->get_meshset_options( *rit, options[i] );MB_CHK_SET_ERR( result, "Failed to get meshset options" );
3183  }
3184  buff->check_space( all_sets.size() * sizeof( unsigned int ) );
3185  PACK_VOID( buff->buff_ptr, &options[0], all_sets.size() * sizeof( unsigned int ) );
3186 
3187  // Pack parallel geometry unique id
3188  if( !all_sets.empty() )
3189  {
3190  Tag uid_tag;
3191  int n_sets = all_sets.size();
3192  bool b_pack = false;
3193  std::vector< int > id_data( n_sets );
3194  result =
3195  mbImpl->tag_get_handle( "PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( result, "Failed to create parallel geometry unique id tag" );
3196 
3197  result = mbImpl->tag_get_data( uid_tag, all_sets, &id_data[0] );
3198  if( MB_TAG_NOT_FOUND != result )
3199  {
3200  if( MB_SUCCESS != result ) MB_SET_ERR( result, "Failed to get parallel geometry unique ids" );
3201  for( i = 0; i < n_sets; i++ )
3202  {
3203  if( id_data[i] != 0 )
3204  {
3205  b_pack = true;
3206  break;
3207  }
3208  }
3209  }
3210 
3211  if( b_pack )
3212  { // If you find
3213  buff->check_space( ( n_sets + 1 ) * sizeof( int ) );
3214  PACK_INT( buff->buff_ptr, n_sets );
3215  PACK_INTS( buff->buff_ptr, &id_data[0], n_sets );
3216  }
3217  else
3218  {
3219  buff->check_space( sizeof( int ) );
3220  PACK_INT( buff->buff_ptr, 0 );
3221  }
3222  }
3223 
3224  // Vectors/ranges
3225  std::vector< EntityHandle > entities_vec( entities.size() );
3226  std::copy( entities.begin(), entities.end(), entities_vec.begin() );
3227  for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
3228  {
3229  members.clear();
3230  result = mbImpl->get_entities_by_handle( *rit, members );MB_CHK_SET_ERR( result, "Failed to get entities in ordered set" );
3231  result =
3232  get_remote_handles( store_remote_handles, &members[0], &members[0], members.size(), to_proc, entities_vec );MB_CHK_SET_ERR( result, "Failed in get_remote_handles" );
3233  buff->check_space( members.size() * sizeof( EntityHandle ) + sizeof( int ) );
3234  PACK_INT( buff->buff_ptr, members.size() );
3235  PACK_EH( buff->buff_ptr, &members[0], members.size() );
3236  }
3237 
3238  // Pack parent/child sets
3239  if( !store_remote_handles )
3240  { // Only works not store remote handles
3241  // Pack numbers of parents/children
3242  unsigned int tot_pch = 0;
3243  int num_pch;
3244  buff->check_space( 2 * all_sets.size() * sizeof( int ) );
3245  for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
3246  {
3247  // Pack parents
3248  result = mbImpl->num_parent_meshsets( *rit, &num_pch );MB_CHK_SET_ERR( result, "Failed to get num parents" );
3249  PACK_INT( buff->buff_ptr, num_pch );
3250  tot_pch += num_pch;
3251  result = mbImpl->num_child_meshsets( *rit, &num_pch );MB_CHK_SET_ERR( result, "Failed to get num children" );
3252  PACK_INT( buff->buff_ptr, num_pch );
3253  tot_pch += num_pch;
3254  }
3255 
3256  // Now pack actual parents/children
3257  members.clear();
3258  members.reserve( tot_pch );
3259  std::vector< EntityHandle > tmp_pch;
3260  for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
3261  {
3262  result = mbImpl->get_parent_meshsets( *rit, tmp_pch );MB_CHK_SET_ERR( result, "Failed to get parents" );
3263  std::copy( tmp_pch.begin(), tmp_pch.end(), std::back_inserter( members ) );
3264  tmp_pch.clear();
3265  result = mbImpl->get_child_meshsets( *rit, tmp_pch );MB_CHK_SET_ERR( result, "Failed to get children" );
3266  std::copy( tmp_pch.begin(), tmp_pch.end(), std::back_inserter( members ) );
3267  tmp_pch.clear();
3268  }
3269  assert( members.size() == tot_pch );
3270  if( !members.empty() )
3271  {
3272  result = get_remote_handles( store_remote_handles, &members[0], &members[0], members.size(), to_proc,
3273  entities_vec );MB_CHK_SET_ERR( result, "Failed to get remote handles for set parent/child sets" );
3274 #ifndef NDEBUG
3275  // Check that all handles are either sets or maxtype
3276  for( unsigned int __j = 0; __j < members.size(); __j++ )
3277  assert( ( TYPE_FROM_HANDLE( members[__j] ) == MBMAXTYPE &&
3278  ID_FROM_HANDLE( members[__j] ) < (int)entities.size() ) ||
3279  TYPE_FROM_HANDLE( members[__j] ) == MBENTITYSET );
3280 #endif
3281  buff->check_space( members.size() * sizeof( EntityHandle ) );
3282  PACK_EH( buff->buff_ptr, &members[0], members.size() );
3283  }
3284  }
3285  else
3286  {
3287  buff->check_space( 2 * all_sets.size() * sizeof( int ) );
3288  for( rit = all_sets.begin(); rit != all_sets.end(); ++rit )
3289  {
3290  PACK_INT( buff->buff_ptr, 0 );
3291  PACK_INT( buff->buff_ptr, 0 );
3292  }
3293  }
3294 
3295  // Pack the handles
3296  if( store_remote_handles && !all_sets.empty() )
3297  {
3298  buff_size = RANGE_SIZE( all_sets );
3299  buff->check_space( buff_size );
3300  PACK_RANGE( buff->buff_ptr, all_sets );
3301  }
3302 
3303  myDebug->tprintf( 4, "Done packing sets.\n" );
3304 
3305  buff->set_stored_size();
3306 
3307  return MB_SUCCESS;
3308 }
3309 
3310 ErrorCode ParallelComm::unpack_sets( unsigned char*& buff_ptr,
3311  std::vector< EntityHandle >& entities,
3312  const bool store_remote_handles,
3313  const int from_proc )
3314 {
3315  // Now the sets; assume any sets the application wants to pass are in the entities list
3316  ErrorCode result;
3317 
3318  bool no_sets = ( entities.empty() || ( mbImpl->type_from_handle( *entities.rbegin() ) == MBENTITYSET ) );
3319 
3320  Range new_sets;
3321  int num_sets;
3322  UNPACK_INT( buff_ptr, num_sets );
3323 
3324  if( !num_sets ) return MB_SUCCESS;
3325 
3326  int i;
3328  std::vector< EntityHandle > members;
3329  int num_ents;
3330  std::vector< unsigned int > options_vec( num_sets );
3331  // Option value
3332  if( num_sets ) UNPACK_VOID( buff_ptr, &options_vec[0], num_sets * sizeof( unsigned int ) );
3333 
3334  // Unpack parallel geometry unique id
3335  int n_uid;
3336  UNPACK_INT( buff_ptr, n_uid );
3337  if( n_uid > 0 && n_uid != num_sets )
3338  {
3339  std::cerr << "The number of Parallel geometry unique ids should be same." << std::endl;
3340  }
3341 
3342  if( n_uid > 0 )
3343  { // If parallel geometry unique id is packed
3344  std::vector< int > uids( n_uid );
3345  UNPACK_INTS( buff_ptr, &uids[0], n_uid );
3346 
3347  Tag uid_tag;
3348  result =
3349  mbImpl->tag_get_handle( "PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( result, "Failed to create parallel geometry unique id tag" );
3350 
3351  // Find existing sets
3352  for( i = 0; i < n_uid; i++ )
3353  {
3354  EntityHandle set_handle;
3355  Range temp_sets;
3356  void* tag_vals[] = { &uids[i] };
3357  if( uids[i] > 0 )
3358  {
3359  result = mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &uid_tag, tag_vals, 1, temp_sets );
3360  }
3361  if( !temp_sets.empty() )
3362  { // Existing set
3363  set_handle = *temp_sets.begin();
3364  }
3365  else
3366  { // Create a new set
3367  result = mbImpl->create_meshset( options_vec[i], set_handle );MB_CHK_SET_ERR( result, "Failed to create set in unpack" );
3368  result = mbImpl->tag_set_data( uid_tag, &set_handle, 1, &uids[i] );MB_CHK_SET_ERR( result, "Failed to set parallel geometry unique ids" );
3369  }
3370  new_sets.insert( set_handle );
3371  }
3372  }
3373  else
3374  {
3375  // Create sets
3376  for( i = 0; i < num_sets; i++ )
3377  {
3378  EntityHandle set_handle;
3379  result = mbImpl->create_meshset( options_vec[i], set_handle );MB_CHK_SET_ERR( result, "Failed to create set in unpack" );
3380 
3381  // Make sure new sets handles are monotonically increasing
3382  assert( set_handle > *new_sets.rbegin() );
3383  new_sets.insert( set_handle );
3384  }
3385  }
3386 
3387  std::copy( new_sets.begin(), new_sets.end(), std::back_inserter( entities ) );
3388  // Only need to sort if we came in with no sets on the end
3389  if( !no_sets ) std::sort( entities.begin(), entities.end() );
3390 
3391  for( rit = new_sets.begin(), i = 0; rit != new_sets.end(); ++rit, i++ )
3392  {
3393  // Unpack entities as vector, with length
3394  UNPACK_INT( buff_ptr, num_ents );
3395  members.resize( num_ents );
3396  if( num_ents ) UNPACK_EH( buff_ptr, &members[0], num_ents );
3397  result = get_local_handles( &members[0], num_ents, entities );MB_CHK_SET_ERR( result, "Failed to get local handles for ordered set contents" );
3398  result = mbImpl->add_entities( *rit, &members[0], num_ents );MB_CHK_SET_ERR( result, "Failed to add ents to ordered set in unpack" );
3399  }
3400 
3401  std::vector< int > num_pch( 2 * new_sets.size() );
3402  std::vector< int >::iterator vit;
3403  int tot_pch = 0;
3404  for( vit = num_pch.begin(); vit != num_pch.end(); ++vit )
3405  {
3406  UNPACK_INT( buff_ptr, *vit );
3407  tot_pch += *vit;
3408  }
3409 
3410  members.resize( tot_pch );
3411  UNPACK_EH( buff_ptr, &members[0], tot_pch );
3412  result = get_local_handles( &members[0], tot_pch, entities );MB_CHK_SET_ERR( result, "Failed to get local handle for parent/child sets" );
3413 
3414  int num = 0;
3415  EntityHandle* mem_ptr = &members[0];
3416  for( rit = new_sets.begin(); rit != new_sets.end(); ++rit )
3417  {
3418  // Unpack parents/children
3419  int num_par = num_pch[num++], num_child = num_pch[num++];
3420  if( num_par + num_child )
3421  {
3422  for( i = 0; i < num_par; i++ )
3423  {
3424  assert( 0 != mem_ptr[i] );
3425  result = mbImpl->add_parent_meshset( *rit, mem_ptr[i] );MB_CHK_SET_ERR( result, "Failed to add parent to set in unpack" );
3426  }
3427  mem_ptr += num_par;
3428  for( i = 0; i < num_child; i++ )
3429  {
3430  assert( 0 != mem_ptr[i] );
3431  result = mbImpl->add_child_meshset( *rit, mem_ptr[i] );MB_CHK_SET_ERR( result, "Failed to add child to set in unpack" );
3432  }
3433  mem_ptr += num_child;
3434  }
3435  }
3436 
3437  // Unpack source handles
3438  Range dum_range;
3439  if( store_remote_handles && !new_sets.empty() )
3440  {
3441  UNPACK_RANGE( buff_ptr, dum_range );
3442  result = update_remote_data( new_sets, dum_range, from_proc, 0 );MB_CHK_SET_ERR( result, "Failed to set sharing data for sets" );
3443  }
3444 
3445  myDebug->tprintf( 4, "Done unpacking sets." );
3446 
3447  return MB_SUCCESS;
3448 }
3449 
3451  Range::const_iterator& /*start_rit*/,
3452  Range& /*whole_range*/,
3453  unsigned char*& /*buff_ptr*/,
3454  int& /*count*/,
3455  const bool /*just_count*/,
3456  const bool /*store_handles*/,
3457  const int /*to_proc*/ )
3458 {
3459  return MB_FAILURE;
3460 }
3461 
3462 ErrorCode ParallelComm::unpack_adjacencies( unsigned char*& /*buff_ptr*/,
3463  Range& /*entities*/,
3464  const bool /*store_handles*/,
3465  const int /*from_proc*/ )
3466 {
3467  return MB_FAILURE;
3468 }
3469 
3471  const std::vector< Tag >& src_tags,
3472  const std::vector< Tag >& dst_tags,
3473  const std::vector< Range >& tag_ranges,
3474  Buffer* buff,
3475  const bool store_remote_handles,
3476  const int to_proc )
3477 {
3478  ErrorCode result;
3479  std::vector< Tag >::const_iterator tag_it, dst_it;
3480  std::vector< Range >::const_iterator rit;
3481  int count = 0;
3482 
3483  for( tag_it = src_tags.begin(), rit = tag_ranges.begin(); tag_it != src_tags.end(); ++tag_it, ++rit )
3484  {
3485  result = packed_tag_size( *tag_it, *rit, count );
3486  if( MB_SUCCESS != result ) return result;
3487  }
3488 
3489  // Number of tags
3490  count += sizeof( int );
3491 
3492  buff->check_space( count );
3493 
3494  PACK_INT( buff->buff_ptr, src_tags.size() );
3495 
3496  std::vector< EntityHandle > entities_vec( entities.size() );
3497  std::copy( entities.begin(), entities.end(), entities_vec.begin() );
3498 
3499  for( tag_it = src_tags.begin(), dst_it = dst_tags.begin(), rit = tag_ranges.begin(); tag_it != src_tags.end();
3500  ++tag_it, ++dst_it, ++rit )
3501  {
3502  result = pack_tag( *tag_it, *dst_it, *rit, entities_vec, buff, store_remote_handles, to_proc );
3503  if( MB_SUCCESS != result ) return result;
3504  }
3505 
3506  myDebug->tprintf( 4, "Done packing tags." );
3507 
3508  buff->set_stored_size();
3509 
3510  return MB_SUCCESS;
3511 }
3512 
3513 ErrorCode ParallelComm::packed_tag_size( Tag tag, const Range& tagged_entities, int& count )
3514 {
3515  // For dense tags, compute size assuming all entities have that tag
3516  // For sparse tags, get number of entities w/ that tag to compute size
3517 
3518  std::vector< int > var_len_sizes;
3519  std::vector< const void* > var_len_values;
3520 
3521  // Default value
3522  count += sizeof( int );
3523  if( NULL != tag->get_default_value() ) count += tag->get_default_value_size();
3524 
3525  // Size, type, data type
3526  count += 3 * sizeof( int );
3527 
3528  // Name
3529  count += sizeof( int );
3530  count += tag->get_name().size();
3531 
3532  // Range of tag
3533  count += sizeof( int ) + tagged_entities.size() * sizeof( EntityHandle );
3534 
3535  if( tag->get_size() == MB_VARIABLE_LENGTH )
3536  {
3537  const int num_ent = tagged_entities.size();
3538  // Send a tag size for each entity
3539  count += num_ent * sizeof( int );
3540  // Send tag data for each entity
3541  var_len_sizes.resize( num_ent );
3542  var_len_values.resize( num_ent );
3543  ErrorCode result =
3544  tag->get_data( sequenceManager, errorHandler, tagged_entities, &var_len_values[0], &var_len_sizes[0] );MB_CHK_SET_ERR( result, "Failed to get lenghts of variable-length tag values" );
3545  count += std::accumulate( var_len_sizes.begin(), var_len_sizes.end(), 0 );
3546  }
3547  else
3548  {
3549  // Tag data values for range or vector
3550  count += tagged_entities.size() * tag->get_size();
3551  }
3552 
3553  return MB_SUCCESS;
3554 }
3555 
3557  Tag dst_tag,
3558  const Range& tagged_entities,
3559  const std::vector< EntityHandle >& whole_vec,
3560  Buffer* buff,
3561  const bool store_remote_handles,
3562  const int to_proc )
3563 {
3564  ErrorCode result;
3565  std::vector< int > var_len_sizes;
3566  std::vector< const void* > var_len_values;
3567 
3568  if( src_tag != dst_tag )
3569  {
3570  if( dst_tag->get_size() != src_tag->get_size() ) return MB_TYPE_OUT_OF_RANGE;
3571  if( dst_tag->get_data_type() != src_tag->get_data_type() && dst_tag->get_data_type() != MB_TYPE_OPAQUE &&
3572  src_tag->get_data_type() != MB_TYPE_OPAQUE )
3573  return MB_TYPE_OUT_OF_RANGE;
3574  }
3575 
3576  // Size, type, data type
3577  buff->check_space( 3 * sizeof( int ) );
3578  PACK_INT( buff->buff_ptr, src_tag->get_size() );
3579  TagType this_type;
3580  result = mbImpl->tag_get_type( dst_tag, this_type );
3581  PACK_INT( buff->buff_ptr, (int)this_type );
3582  DataType data_type = src_tag->get_data_type();
3583  PACK_INT( buff->buff_ptr, (int)data_type );
3584  int type_size = TagInfo::size_from_data_type( data_type );
3585 
3586  // Default value
3587  if( NULL == src_tag->get_default_value() )
3588  {
3589  buff->check_space( sizeof( int ) );
3590  PACK_INT( buff->buff_ptr, 0 );
3591  }
3592  else
3593  {
3594  buff->check_space( src_tag->get_default_value_size() );
3595  PACK_BYTES( buff->buff_ptr, src_tag->get_default_value(), src_tag->get_default_value_size() );
3596  }
3597 
3598  // Name
3599  buff->check_space( src_tag->get_name().size() );
3600  PACK_BYTES( buff->buff_ptr, dst_tag->get_name().c_str(), dst_tag->get_name().size() );
3601 
3602  myDebug->tprintf( 4, "Packing tag \"%s\"", src_tag->get_name().c_str() );
3603  if( src_tag != dst_tag ) myDebug->tprintf( 4, " (as tag \"%s\")", dst_tag->get_name().c_str() );
3604  myDebug->tprintf( 4, "\n" );
3605 
3606  // Pack entities
3607  buff->check_space( tagged_entities.size() * sizeof( EntityHandle ) + sizeof( int ) );
3608  PACK_INT( buff->buff_ptr, tagged_entities.size() );
3609  std::vector< EntityHandle > dum_tagged_entities( tagged_entities.size() );
3610  result = get_remote_handles( store_remote_handles, tagged_entities, &dum_tagged_entities[0], to_proc, whole_vec );
3611  if( MB_SUCCESS != result )
3612  {
3613  if( myDebug->get_verbosity() == 3 )
3614  {
3615  std::cerr << "Failed to get remote handles for tagged entities:" << std::endl;
3616  tagged_entities.print( " " );
3617  }
3618  MB_SET_ERR( result, "Failed to get remote handles for tagged entities" );
3619  }
3620 
3621  PACK_EH( buff->buff_ptr, &dum_tagged_entities[0], dum_tagged_entities.size() );
3622 
3623  const size_t num_ent = tagged_entities.size();
3624  if( src_tag->get_size() == MB_VARIABLE_LENGTH )
3625  {
3626  var_len_sizes.resize( num_ent, 0 );
3627  var_len_values.resize( num_ent, 0 );
3628  result = mbImpl->tag_get_by_ptr( src_tag, tagged_entities, &var_len_values[0], &var_len_sizes[0] );MB_CHK_SET_ERR( result, "Failed to get variable-length tag data in pack_tags" );
3629  buff->check_space( num_ent * sizeof( int ) );
3630  PACK_INTS( buff->buff_ptr, &var_len_sizes[0], num_ent );
3631  for( unsigned int i = 0; i < num_ent; i++ )
3632  {
3633  buff->check_space( var_len_sizes[i] );
3634  PACK_VOID( buff->buff_ptr, var_len_values[i], type_size * var_len_sizes[i] );
3635  }
3636  }
3637  else
3638  {
3639  buff->check_space( num_ent * src_tag->get_size() );
3640  // Should be OK to read directly into buffer, since tags are untyped and
3641  // handled by memcpy
3642  result = mbImpl->tag_get_data( src_tag, tagged_entities, buff->buff_ptr );MB_CHK_SET_ERR( result, "Failed to get tag data in pack_tags" );
3643  buff->buff_ptr += num_ent * src_tag->get_size();
3644  PC( num_ent * src_tag->get_size(), " void" );
3645  }
3646 
3647  return MB_SUCCESS;
3648 }
3649 
3651  std::vector< Tag >& all_tags,
3652  std::vector< Range >& tag_ranges )
3653 {
3654  std::vector< Tag > tmp_tags;
3655  ErrorCode result = mbImpl->tag_get_tags( tmp_tags );MB_CHK_SET_ERR( result, "Failed to get tags in pack_tags" );
3656 
3657  std::vector< Tag >::iterator tag_it;
3658  for( tag_it = tmp_tags.begin(); tag_it != tmp_tags.end(); ++tag_it )
3659  {
3660  std::string tag_name;
3661  result = mbImpl->tag_get_name( *tag_it, tag_name );
3662  if( tag_name.c_str()[0] == '_' && tag_name.c_str()[1] == '_' ) continue;
3663 
3664  Range tmp_range;
3665  result = ( *tag_it )->get_tagged_entities( sequenceManager, tmp_range );MB_CHK_SET_ERR( result, "Failed to get entities for tag in pack_tags" );
3666  tmp_range = intersect( tmp_range, whole_range );
3667 
3668  if( tmp_range.empty() ) continue;
3669 
3670  // OK, we'll be sending this tag
3671  all_tags.push_back( *tag_it );
3672  tag_ranges.push_back( Range() );
3673  tag_ranges.back().swap( tmp_range );
3674  }
3675 
3676  return MB_SUCCESS;
3677 }
3678 
3679 ErrorCode ParallelComm::unpack_tags( unsigned char*& buff_ptr,
3680  std::vector< EntityHandle >& entities,
3681  const bool /*store_remote_handles*/,
3682  const int /*from_proc*/,
3683  const MPI_Op* const mpi_op )
3684 {
3685  // Tags
3686  // Get all the tags
3687  // For dense tags, compute size assuming all entities have that tag
3688  // For sparse tags, get number of entities w/ that tag to compute size
3689 
3690  ErrorCode result;
3691 
3692  int num_tags;
3693  UNPACK_INT( buff_ptr, num_tags );
3694  std::vector< const void* > var_len_vals;
3695  std::vector< unsigned char > dum_vals;
3696  std::vector< EntityHandle > dum_ehvals;
3697 
3698  for( int i = 0; i < num_tags; i++ )
3699  {
3700  // Tag handle
3701  Tag tag_handle;
3702 
3703  // Size, data type
3704  int tag_size, tag_data_type, tag_type;
3705  UNPACK_INT( buff_ptr, tag_size );
3706  UNPACK_INT( buff_ptr, tag_type );
3707  UNPACK_INT( buff_ptr, tag_data_type );
3708 
3709  // Default value
3710  int def_val_size;
3711  UNPACK_INT( buff_ptr, def_val_size );
3712  void* def_val_ptr = NULL;
3713  if( def_val_size )
3714  {
3715  def_val_ptr = buff_ptr;
3716  buff_ptr += def_val_size;
3717  UPC( tag_size, " void" );
3718  }
3719 
3720  // Name
3721  int name_len;
3722  UNPACK_INT( buff_ptr, name_len );
3723  std::string tag_name( reinterpret_cast< char* >( buff_ptr ), name_len );
3724  buff_ptr += name_len;
3725  UPC( 64, " chars" );
3726 
3727  myDebug->tprintf( 4, "Unpacking tag %s\n", tag_name.c_str() );
3728 
3729  // Create the tag
3730  if( tag_size == MB_VARIABLE_LENGTH )
3731  result = mbImpl->tag_get_handle( tag_name.c_str(), def_val_size, (DataType)tag_data_type, tag_handle,
3732  MB_TAG_VARLEN | MB_TAG_CREAT | MB_TAG_BYTES | tag_type, def_val_ptr );
3733  else
3734  result = mbImpl->tag_get_handle( tag_name.c_str(), tag_size, (DataType)tag_data_type, tag_handle,
3735  MB_TAG_CREAT | MB_TAG_BYTES | tag_type, def_val_ptr );
3736  if( MB_SUCCESS != result ) return result;
3737 
3738  // Get handles and convert to local handles
3739  int num_ents;
3740  UNPACK_INT( buff_ptr, num_ents );
3741  std::vector< EntityHandle > dum_ents( num_ents );
3742  UNPACK_EH( buff_ptr, &dum_ents[0], num_ents );
3743 
3744  // In this case handles are indices into new entity range; need to convert
3745  // to local handles
3746  result = get_local_handles( &dum_ents[0], num_ents, entities );MB_CHK_SET_ERR( result, "Unable to convert to local handles" );
3747 
3748  // If it's a handle type, also convert tag vals in-place in buffer
3749  if( MB_TYPE_HANDLE == tag_type )
3750  {
3751  dum_ehvals.resize( num_ents );
3752  UNPACK_EH( buff_ptr, &dum_ehvals[0], num_ents );
3753  result = get_local_handles( &dum_ehvals[0], num_ents, entities );MB_CHK_SET_ERR( result, "Failed to get local handles for tag vals" );
3754  }
3755 
3756  DataType data_type;
3757  mbImpl->tag_get_data_type( tag_handle, data_type );
3758  int type_size = TagInfo::size_from_data_type( data_type );
3759 
3760  if( !dum_ents.empty() )
3761  {
3762  if( tag_size == MB_VARIABLE_LENGTH )
3763  {
3764  // Be careful of alignment here. If the integers are aligned
3765  // in the buffer, we can use them directly. Otherwise we must
3766  // copy them.
3767  std::vector< int > var_lengths( num_ents );
3768  UNPACK_INTS( buff_ptr, &var_lengths[0], num_ents );
3769  UPC( sizeof( int ) * num_ents, " void" );
3770 
3771  // Get pointers into buffer for each tag value
3772  var_len_vals.resize( num_ents );
3773  for( std::vector< EntityHandle >::size_type j = 0; j < (std::vector< EntityHandle >::size_type)num_ents;
3774  j++ )
3775  {
3776  var_len_vals[j] = buff_ptr;
3777  buff_ptr += var_lengths[j] * type_size;
3778  UPC( var_lengths[j], " void" );
3779  }
3780  result =
3781  mbImpl->tag_set_by_ptr( tag_handle, &dum_ents[0], num_ents, &var_len_vals[0], &var_lengths[0] );MB_CHK_SET_ERR( result, "Failed to set tag data when unpacking variable-length tag" );
3782  }
3783  else
3784  {
3785  // Get existing values of dst tag
3786  dum_vals.resize( tag_size * num_ents );
3787  if( mpi_op )
3788  {
3789  int tag_length;
3790  result = mbImpl->tag_get_length( tag_handle, tag_length );MB_CHK_SET_ERR( result, "Failed to get tag length" );
3791  result = mbImpl->tag_get_data( tag_handle, &dum_ents[0], num_ents, &dum_vals[0] );MB_CHK_SET_ERR( result, "Failed to get existing value of dst tag on entities" );
3792  result = reduce_void( tag_data_type, *mpi_op, tag_length * num_ents, &dum_vals[0], buff_ptr );MB_CHK_SET_ERR( result, "Failed to perform mpi op on dst tags" );
3793  }
3794  result = mbImpl->tag_set_data( tag_handle, &dum_ents[0], num_ents, buff_ptr );MB_CHK_SET_ERR( result, "Failed to set range-based tag data when unpacking tag" );
3795  buff_ptr += num_ents * tag_size;
3796  UPC( num_ents * tag_size, " void" );
3797  }
3798  }
3799  }
3800 
3801  myDebug->tprintf( 4, "Done unpacking tags.\n" );
3802 
3803  return MB_SUCCESS;
3804 }
3805 
3806 template < class T >
3807 T LAND( const T& arg1, const T& arg2 )
3808 {
3809  return arg1 && arg2;
3810 }
3811 template < class T >
3812 T LOR( const T& arg1, const T& arg2 )
3813 {
3814  return arg1 || arg2;
3815 }
3816 template < class T >
3817 T LXOR( const T& arg1, const T& arg2 )
3818 {
3819  return ( ( arg1 && !arg2 ) || ( !arg1 && arg2 ) );
3820 }
3821 template < class T >
3822 T MAX( const T& arg1, const T& arg2 )
3823 {
3824  return ( arg1 > arg2 ? arg1 : arg2 );
3825 }
3826 template < class T >
3827 T MIN( const T& arg1, const T& arg2 )
3828 {
3829  return ( arg1 < arg2 ? arg1 : arg2 );
3830 }
3831 template < class T >
3832 T ADD( const T& arg1, const T& arg2 )
3833 {
3834  return arg1 + arg2;
3835 }
3836 template < class T >
3837 T MULT( const T& arg1, const T& arg2 )
3838 {
3839  return arg1 * arg2;
3840 }
3841 
3842 template < class T >
3843 ErrorCode ParallelComm::reduce( const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals )
3844 {
3845  T* old_tmp = reinterpret_cast< T* >( old_vals );
3846  // T *new_tmp = reinterpret_cast<T*>(new_vals);
3847  // new vals pointer needs to be aligned , some compilers will optimize and will shift
3848 
3849  std::vector< T > new_values;
3850  new_values.resize( num_ents );
3851  memcpy( &new_values[0], new_vals, num_ents * sizeof( T ) );
3852  T* new_tmp = &new_values[0];
3853 
3854  if( mpi_op == MPI_SUM )
3855  std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, ADD< T > );
3856  else if( mpi_op == MPI_PROD )
3857  std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MULT< T > );
3858  else if( mpi_op == MPI_MAX )
3859  std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MAX< T > );
3860  else if( mpi_op == MPI_MIN )
3861  std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MIN< T > );
3862  else if( mpi_op == MPI_LAND )
3863  std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LAND< T > );
3864  else if( mpi_op == MPI_LOR )
3865  std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LOR< T > );
3866  else if( mpi_op == MPI_LXOR )
3867  std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LXOR< T > );
3868  else if( mpi_op == MPI_BAND || mpi_op == MPI_BOR || mpi_op == MPI_BXOR )
3869  {
3870  std::cerr << "Bitwise operations not allowed in tag reductions." << std::endl;
3871  return MB_FAILURE;
3872  }
3873  else if( mpi_op != MPI_OP_NULL )
3874  {
3875  std::cerr << "Unknown MPI operation type." << std::endl;
3876  return MB_TYPE_OUT_OF_RANGE;
3877  }
3878 
3879  // copy now the result back where it should be
3880  memcpy( new_vals, new_tmp, num_ents * sizeof( T ) );
3881  std::vector< T >().swap( new_values ); // way to release allocated vector
3882 
3883  return MB_SUCCESS;
3884 }
3885 
3887  const MPI_Op mpi_op,
3888  int num_ents,
3889  void* old_vals,
3890  void* new_vals )
3891 {
3892  ErrorCode result;
3893  switch( tag_data_type )
3894  {
3895  case MB_TYPE_INTEGER:
3896  result = reduce< int >( mpi_op, num_ents, old_vals, new_vals );
3897  break;
3898  case MB_TYPE_DOUBLE:
3899  result = reduce< double >( mpi_op, num_ents, old_vals, new_vals );
3900  break;
3901  case MB_TYPE_BIT:
3902  result = reduce< unsigned char >( mpi_op, num_ents, old_vals, new_vals );
3903  break;
3904  default:
3905  result = MB_SUCCESS;
3906  break;
3907  }
3908 
3909  return result;
3910 }
3911 
3912 ErrorCode ParallelComm::resolve_shared_ents( EntityHandle this_set, int resolve_dim, int shared_dim, const Tag* id_tag )
3913 {
3914  ErrorCode result;
3915  Range proc_ents;
3916 
3917  // Check for structured mesh, and do it differently if it is
3918  ScdInterface* scdi;
3919  result = mbImpl->query_interface( scdi );
3920  if( scdi )
3921  {
3922  result = scdi->tag_shared_vertices( this, this_set );
3923  if( MB_SUCCESS == result )
3924  {
3925  myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
3926  return result;
3927  }
3928  }
3929 
3930  if( 0 == this_set )
3931  {
3932  // Get the entities in the partition sets
3933  for( Range::iterator rit = partitionSets.begin(); rit != partitionSets.end(); ++rit )
3934  {
3935  Range tmp_ents;
3936  result = mbImpl->get_entities_by_handle( *rit, tmp_ents, true );
3937  if( MB_SUCCESS != result ) return result;
3938  proc_ents.merge( tmp_ents );
3939  }
3940  }
3941  else
3942  {
3943  result = mbImpl->get_entities_by_handle( this_set, proc_ents, true );
3944  if( MB_SUCCESS != result ) return result;
3945  }
3946 
3947  // Resolve dim is maximal dim of entities in proc_ents
3948  if( -1 == resolve_dim )
3949  {
3950  if( !proc_ents.empty() ) resolve_dim = mbImpl->dimension_from_handle( *proc_ents.rbegin() );
3951  }
3952 
3953  // proc_ents should all be of same dimension
3954  if( resolve_dim > shared_dim &&
3955  mbImpl->dimension_from_handle( *proc_ents.rbegin() ) != mbImpl->dimension_from_handle( *proc_ents.begin() ) )
3956  {
3957  Range::iterator lower = proc_ents.lower_bound( CN::TypeDimensionMap[0].first ),
3958  upper = proc_ents.upper_bound( CN::TypeDimensionMap[resolve_dim - 1].second );
3959  proc_ents.erase( lower, upper );
3960  }
3961 
3962  // Must call even if we don't have any entities, to make sure
3963  // collective comm'n works
3964  return resolve_shared_ents( this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag );
3965 }
3966 
3968  Range& proc_ents,
3969  int resolve_dim,
3970  int shared_dim,
3971  Range* skin_ents,
3972  const Tag* id_tag )
3973 {
3974 #ifdef MOAB_HAVE_MPE
3975  if( myDebug->get_verbosity() == 2 )
3976  {
3977  define_mpe();
3978  MPE_Log_event( RESOLVE_START, procConfig.proc_rank(), "Entering resolve_shared_ents." );
3979  }
3980 #endif
3981 
3982  ErrorCode result;
3983  myDebug->tprintf( 1, "Resolving shared entities.\n" );
3984 
3985  if( resolve_dim < shared_dim )
3986  {
3987  MB_SET_ERR( MB_FAILURE, "MOAB does not support vertex-based partitions, only element-based ones" );
3988  }
3989 
3990  if( -1 == shared_dim )
3991  {
3992  if( !proc_ents.empty() )
3993  shared_dim = mbImpl->dimension_from_handle( *proc_ents.begin() ) - 1;
3994  else if( resolve_dim == 3 )
3995  shared_dim = 2;
3996  }
3997  int max_global_resolve_dim = -1;
3998  int err = MPI_Allreduce( &resolve_dim, &max_global_resolve_dim, 1, MPI_INT, MPI_MAX, proc_config().proc_comm() );
3999  if( MPI_SUCCESS != err )
4000  {
4001  MB_SET_ERR( MB_FAILURE, "Unable to guess global resolve_dim" );
4002  }
4003  if( shared_dim < 0 || resolve_dim < 0 )
4004  {
4005  // MB_SET_ERR(MB_FAILURE, "Unable to guess shared_dim or resolve_dim");
4006  resolve_dim = max_global_resolve_dim;
4007  shared_dim = resolve_dim - 1;
4008  }
4009 
4010  if( resolve_dim < 0 || shared_dim < 0 ) return MB_SUCCESS;
4011  // no task has any mesh, get out
4012 
4013  // Get the skin entities by dimension
4014  Range tmp_skin_ents[4];
4015 
4016  // Get the entities to be skinned
4017  // Find the skin
4018  int skin_dim = resolve_dim - 1;
4019  if( !skin_ents )
4020  {
4021  skin_ents = tmp_skin_ents;
4022  skin_ents[resolve_dim] = proc_ents;
4023  Skinner skinner( mbImpl );
4024  result =
4025  skinner.find_skin( this_set, skin_ents[skin_dim + 1], false, skin_ents[skin_dim], NULL, true, true, true );MB_CHK_SET_ERR( result, "Failed to find skin" );
4026  myDebug->tprintf( 1, "Found skin: skin_dim: %d resolve_dim: %d , now resolving.\n", skin_dim, resolve_dim );
4027  myDebug->tprintf( 3, "skin_ents[0].size(): %d skin_ents[1].size(): %d \n", (int)skin_ents[0].size(),
4028  (int)skin_ents[1].size() );
4029  // Get entities adjacent to skin ents from shared_dim down to zero
4030  for( int this_dim = skin_dim - 1; this_dim >= 0; this_dim-- )
4031  {
4032  result =
4033  mbImpl->get_adjacencies( skin_ents[skin_dim], this_dim, true, skin_ents[this_dim], Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get skin adjacencies" );
4034 
4035  if( this_set && skin_dim == 2 && this_dim == 1 )
4036  {
4037  result = mbImpl->add_entities( this_set, skin_ents[this_dim] );MB_CHK_ERR( result );
4038  }
4039  }
4040  }
4041  else if( skin_ents[resolve_dim].empty() )
4042  skin_ents[resolve_dim] = proc_ents;
4043 
4044  // Global id tag
4045  Tag gid_tag;
4046  if( id_tag )
4047  gid_tag = *id_tag;
4048  else
4049  {
4050  bool tag_created = false;
4051  int def_val = -1;
4053  &def_val, &tag_created );
4054  if( MB_ALREADY_ALLOCATED != result && MB_SUCCESS != result )
4055  {
4056  MB_SET_ERR( result, "Failed to create/get gid tag handle" );
4057  }
4058  else if( tag_created )
4059  {
4060  // Just created it, so we need global ids
4061  result = assign_global_ids( this_set, skin_dim + 1, true, true, true );MB_CHK_SET_ERR( result, "Failed to assign global ids" );
4062  }
4063  }
4064 
4065  DataType tag_type;
4066  result = mbImpl->tag_get_data_type( gid_tag, tag_type );MB_CHK_SET_ERR( result, "Failed to get tag data type" );
4067  int bytes_per_tag;
4068  result = mbImpl->tag_get_bytes( gid_tag, bytes_per_tag );MB_CHK_SET_ERR( result, "Failed to get number of bytes per tag" );
4069  // On 64 bits, long and int are different
4070  // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
4071 
4072  // Get gids for skin ents in a vector, to pass to gs
4073  std::vector< long > lgid_data( skin_ents[0].size() );
4074  // Size is either long or int
4075  // On 64 bit is 8 or 4
4076  if( sizeof( long ) == bytes_per_tag && ( ( MB_TYPE_HANDLE == tag_type ) || ( MB_TYPE_OPAQUE == tag_type ) ) )
4077  { // It is a special id tag
4078  result = mbImpl->tag_get_data( gid_tag, skin_ents[0], &lgid_data[0] );MB_CHK_SET_ERR( result, "Couldn't get gid tag for skin vertices" );
4079  }
4080  else if( 4 == bytes_per_tag )
4081  { // Must be GLOBAL_ID tag or 32 bits ...
4082  std::vector< int > gid_data( lgid_data.size() );
4083  result = mbImpl->tag_get_data( gid_tag, skin_ents[0], &gid_data[0] );MB_CHK_SET_ERR( result, "Failed to get gid tag for skin vertices" );
4084  std::copy( gid_data.begin(), gid_data.end(), lgid_data.begin() );
4085  }
4086  else
4087  {
4088  // Not supported flag
4089  MB_SET_ERR( MB_FAILURE, "Unsupported id tag" );
4090  }
4091 
4092  // Put handles in vector for passing to gs setup
4093  std::vector< Ulong > handle_vec; // Assumes that we can do conversion from Ulong to EntityHandle
4094  std::copy( skin_ents[0].begin(), skin_ents[0].end(), std::back_inserter( handle_vec ) );
4095 
4096 #ifdef MOAB_HAVE_MPE
4097  if( myDebug->get_verbosity() == 2 )
4098  {
4099  MPE_Log_event( SHAREDV_START, procConfig.proc_rank(), "Creating crystal router." );
4100  }
4101 #endif
4102 
4103  // Get a crystal router
4105 
4106  /*
4107  // Get total number of entities; will overshoot highest global id, but
4108  // that's OK
4109  int num_total[2] = {0, 0}, num_local[2] = {0, 0};
4110  result = mbImpl->get_number_entities_by_dimension(this_set, 0, num_local);
4111  if (MB_SUCCESS != result)return result;
4112  int failure = MPI_Allreduce(num_local, num_total, 1,
4113  MPI_INT, MPI_SUM, procConfig.proc_comm());
4114  if (failure) {
4115  MB_SET_ERR(MB_FAILURE, "Allreduce for total number of shared ents failed");
4116  }
4117  */
4118  // Call gather-scatter to get shared ids & procs
4119  gs_data* gsd = new gs_data();
4120  // assert(sizeof(ulong_) == sizeof(EntityHandle));
4121  result = gsd->initialize( skin_ents[0].size(), &lgid_data[0], &handle_vec[0], 2, 1, 1, cd );MB_CHK_SET_ERR( result, "Failed to create gs data" );
4122 
4123  // Get shared proc tags
4124  Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
4125  result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
4126 
4127  // Load shared verts into a tuple, then sort by index
4128  TupleList shared_verts;
4129  shared_verts.initialize( 2, 0, 1, 0, skin_ents[0].size() * ( MAX_SHARING_PROCS + 1 ) );
4130  shared_verts.enableWriteAccess();
4131 
4132  unsigned int i = 0, j = 0;
4133  for( unsigned int p = 0; p < gsd->nlinfo->_np; p++ )
4134  for( unsigned int np = 0; np < gsd->nlinfo->_nshared[p]; np++ )
4135  {
4136  shared_verts.vi_wr[i++] = gsd->nlinfo->_sh_ind[j];
4137  shared_verts.vi_wr[i++] = gsd->nlinfo->_target[p];
4138  shared_verts.vul_wr[j] = gsd->nlinfo->_ulabels[j];
4139  j++;
4140  shared_verts.inc_n();
4141  }
4142 
4143  myDebug->tprintf( 3, " shared verts size %d \n", (int)shared_verts.get_n() );
4144 
4145  int max_size = skin_ents[0].size() * ( MAX_SHARING_PROCS + 1 );
4146  moab::TupleList::buffer sort_buffer;
4147  sort_buffer.buffer_init( max_size );
4148  shared_verts.sort( 0, &sort_buffer );
4149  sort_buffer.reset();
4150 
4151  // Set sharing procs and handles tags on skin ents
4152  int maxp = -1;
4153  std::vector< int > sharing_procs( MAX_SHARING_PROCS );
4154  std::fill( sharing_procs.begin(), sharing_procs.end(), maxp );
4155  j = 0;
4156  i = 0;
4157 
4158  // Get ents shared by 1 or n procs
4159  std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
4160  Range proc_verts;
4161  result = mbImpl->get_adjacencies( proc_ents, 0, false, proc_verts, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get proc_verts" );
4162 
4163  myDebug->print( 3, " resolve shared ents: proc verts ", proc_verts );
4164  result = tag_shared_verts( shared_verts, skin_ents, proc_nvecs, proc_verts );MB_CHK_SET_ERR( result, "Failed to tag shared verts" );
4165 
4166 #ifdef MOAB_HAVE_MPE
4167  if( myDebug->get_verbosity() == 2 )
4168  {
4169  MPE_Log_event( SHAREDV_END, procConfig.proc_rank(), "Finished tag_shared_verts." );
4170  }
4171 #endif
4172 
4173  // Get entities shared by 1 or n procs
4174  result = get_proc_nvecs( resolve_dim, shared_dim, skin_ents, proc_nvecs );MB_CHK_SET_ERR( result, "Failed to tag shared entities" );
4175 
4176  shared_verts.reset();
4177 
4178  if( myDebug->get_verbosity() > 0 )
4179  {
4180  for( std::map< std::vector< int >, std::vector< EntityHandle > >::const_iterator mit = proc_nvecs.begin();
4181  mit != proc_nvecs.end(); ++mit )
4182  {
4183  myDebug->tprintf( 1, "Iface: " );
4184  for( std::vector< int >::const_iterator vit = ( mit->first ).begin(); vit != ( mit->first ).end(); ++vit )
4185  myDebug->printf( 1, " %d", *vit );
4186  myDebug->print( 1, "\n" );
4187  }
4188  }
4189 
4190  // Create the sets for each interface; store them as tags on
4191  // the interface instance
4192  Range iface_sets;
4193  result = create_interface_sets( proc_nvecs );MB_CHK_SET_ERR( result, "Failed to create interface sets" );
4194 
4195  // Establish comm procs and buffers for them
4196  std::set< unsigned int > procs;
4197  result = get_interface_procs( procs, true );MB_CHK_SET_ERR( result, "Failed to get interface procs" );
4198 
4199 #ifndef NDEBUG
4200  result = check_all_shared_handles( true );MB_CHK_SET_ERR( result, "Shared handle check failed after interface vertex exchange" );
4201 #endif
4202 
4203  // Resolve shared entity remote handles; implemented in ghost cell exchange
4204  // code because it's so similar
4205  result = exchange_ghost_cells( -1, -1, 0, 0, true, true );MB_CHK_SET_ERR( result, "Failed to resolve shared entity remote handles" );
4206 
4207  // Now build parent/child links for interface sets
4208  result = create_iface_pc_links();MB_CHK_SET_ERR( result, "Failed to create interface parent/child links" );
4209 
4210  gsd->reset();
4211  delete gsd;
4212 
4213 #ifdef MOAB_HAVE_MPE
4214  if( myDebug->get_verbosity() == 2 )
4215  {
4216  MPE_Log_event( RESOLVE_END, procConfig.proc_rank(), "Exiting resolve_shared_ents." );
4217  }
4218 #endif
4219 
4220  // std::ostringstream ent_str;
4221  // ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
4222  // mbImpl->write_mesh(ent_str.str().c_str());
4223 
4224  // Done
4225  return result;
4226 }
4227 
4229 {
4230 #ifdef MOAB_HAVE_MPE
4231  if( myDebug->get_verbosity() == 2 )
4232  {
4233  // Define mpe states used for logging
4234  int success;
4235  MPE_Log_get_state_eventIDs( &IFACE_START, &IFACE_END );
4236  MPE_Log_get_state_eventIDs( &GHOST_START, &GHOST_END );
4237  MPE_Log_get_state_eventIDs( &SHAREDV_START, &SHAREDV_END );
4238  MPE_Log_get_state_eventIDs( &RESOLVE_START, &RESOLVE_END );
4239  MPE_Log_get_state_eventIDs( &ENTITIES_START, &ENTITIES_END );
4240  MPE_Log_get_state_eventIDs( &RHANDLES_START, &RHANDLES_END );
4241  MPE_Log_get_state_eventIDs( &OWNED_START, &OWNED_END );
4242  success = MPE_Describe_state( IFACE_START, IFACE_END, "Resolve interface ents", "green" );
4243  assert( MPE_LOG_OK == success );
4244  success = MPE_Describe_state( GHOST_START, GHOST_END, "Exchange ghost ents", "red" );
4245  assert( MPE_LOG_OK == success );
4246  success = MPE_Describe_state( SHAREDV_START, SHAREDV_END, "Resolve interface vertices", "blue" );
4247  assert( MPE_LOG_OK == success );
4248  success = MPE_Describe_state( RESOLVE_START, RESOLVE_END, "Resolve shared ents", "purple" );
4249  assert( MPE_LOG_OK == success );
4250  success = MPE_Describe_state( ENTITIES_START, ENTITIES_END, "Exchange shared ents", "yellow" );
4251  assert( MPE_LOG_OK == success );
4252  success = MPE_Describe_state( RHANDLES_START, RHANDLES_END, "Remote handles", "cyan" );
4253  assert( MPE_LOG_OK == success );
4254  success = MPE_Describe_state( OWNED_START, OWNED_END, "Exchange owned ents", "black" );
4255  assert( MPE_LOG_OK == success );
4256  }
4257 #endif
4258 }
4259 
4261  const unsigned int np,
4262  EntityHandle this_set,
4263  const int part_dim )
4264 {
4265  std::vector< Range > verts( np );
4266  int tot_verts = 0;
4267  unsigned int p, i, j, v;
4268  ErrorCode rval;
4269  for( p = 0; p < np; p++ )
4270  {
4271  Skinner skinner( pc[p]->get_moab() );
4272  Range part_ents, skin_ents;
4273  rval = pc[p]->get_moab()->get_entities_by_dimension( this_set, part_dim, part_ents );
4274  if( MB_SUCCESS != rval ) return rval;
4275  rval = skinner.find_skin( this_set, part_ents, false, skin_ents, 0, true, true, true );
4276  if( MB_SUCCESS != rval ) return rval;
4277  rval = pc[p]->get_moab()->get_adjacencies( skin_ents, 0, true, verts[p], Interface::UNION );
4278  if( MB_SUCCESS != rval ) return rval;
4279  tot_verts += verts[p].size();
4280  }
4281 
4282  TupleList shared_ents;
4283  shared_ents.initialize( 2, 0, 1, 0, tot_verts );
4284  shared_ents.enableWriteAccess();
4285 
4286  i = 0;
4287  j = 0;
4288  std::vector< int > gids;
4289  Range::iterator rit;
4290  Tag gid_tag;
4291  for( p = 0; p < np; p++ )
4292  {
4293  gid_tag = pc[p]->get_moab()->globalId_tag();
4294 
4295  gids.resize( verts[p].size() );
4296  rval = pc[p]->get_moab()->tag_get_data( gid_tag, verts[p], &gids[0] );
4297  if( MB_SUCCESS != rval ) return rval;
4298 
4299  for( v = 0, rit = verts[p].begin(); v < gids.size(); v++, ++rit )
4300  {
4301  shared_ents.vi_wr[i++] = gids[v];
4302  shared_ents.vi_wr[i++] = p;
4303  shared_ents.vul_wr[j] = *rit;
4304  j++;
4305  shared_ents.inc_n();
4306  }
4307  }
4308 
4309  moab::TupleList::buffer sort_buffer;
4310  sort_buffer.buffer_init( tot_verts );
4311  shared_ents.sort( 0, &sort_buffer );
4312  sort_buffer.reset();
4313 
4314  j = 0;
4315  i = 0;
4316  std::vector< EntityHandle > handles;
4317  std::vector< int > procs;
4318 
4319  while( i < shared_ents.get_n() )
4320  {
4321  handles.clear();
4322  procs.clear();
4323 
4324  // Count & accumulate sharing procs
4325  int this_gid = shared_ents.vi_rd[j];
4326  while( i < shared_ents.get_n() && shared_ents.vi_rd[j] == this_gid )
4327  {
4328  j++;
4329  procs.push_back( shared_ents.vi_rd[j++] );
4330  handles.push_back( shared_ents.vul_rd[i++] );
4331  }
4332  if( 1 == procs.size() ) continue;
4333 
4334  for( v = 0; v < procs.size(); v++ )
4335  {
4336  rval = pc[procs[v]]->update_remote_data( handles[v], &procs[0], &handles[0], procs.size(),
4337  ( procs[0] == (int)pc[procs[v]]->rank()
4340  if( MB_SUCCESS != rval ) return rval;
4341  }
4342  }
4343 
4344  std::set< unsigned int > psets;
4345  for( p = 0; p < np; p++ )
4346  {
4347  rval = pc[p]->create_interface_sets( this_set, part_dim, part_dim - 1 );
4348  if( MB_SUCCESS != rval ) return rval;
4349  // Establish comm procs and buffers for them
4350  psets.clear();
4351  rval = pc[p]->get_interface_procs( psets, true );
4352  if( MB_SUCCESS != rval ) return rval;
4353  }
4354 
4355  shared_ents.reset();
4356 
4357  return MB_SUCCESS;
4358 }
4359 
4361 {
4362  ErrorCode result = MB_SUCCESS;
4363  Range iface_ents, tmp_ents, rmv_ents;
4364  std::vector< unsigned char > pstat;
4365  unsigned char set_pstat;
4366  Range::iterator rit2;
4367  unsigned int i;
4368 
4369  for( Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
4370  {
4371  iface_ents.clear();
4372 
4373  result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get interface set contents" );
4374  pstat.resize( iface_ents.size() );
4375  result = mbImpl->tag_get_data( pstatus_tag(), iface_ents, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus values for interface set entities" );
4376  result = mbImpl->tag_get_data( pstatus_tag(), &( *rit ), 1, &set_pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus values for interface set" );
4377  rmv_ents.clear();
4378  for( rit2 = iface_ents.begin(), i = 0; rit2 != iface_ents.end(); ++rit2, i++ )
4379  {
4380  if( !( pstat[i] & PSTATUS_INTERFACE ) )
4381  {
4382  rmv_ents.insert( *rit2 );
4383  pstat[i] = 0x0;
4384  }
4385  }
4386  result = mbImpl->remove_entities( *rit, rmv_ents );MB_CHK_SET_ERR( result, "Failed to remove entities from interface set" );
4387 
4388  if( !( set_pstat & PSTATUS_NOT_OWNED ) ) continue;
4389  // If we're here, we need to set the notowned status on (remaining) set contents
4390 
4391  // Remove rmv_ents from the contents list
4392  iface_ents = subtract( iface_ents, rmv_ents );
4393  // Compress the pstat vector (removing 0x0's)
4394  std::remove_if( pstat.begin(), pstat.end(),
4395  std::bind( std::equal_to< unsigned char >(), std::placeholders::_1, 0x0 ) );
4396  // std::bind2nd(std::equal_to<unsigned char>(), 0x0));
4397  // https://stackoverflow.com/questions/32739018/a-replacement-for-stdbind2nd
4398  // Fold the not_owned bit into remaining values
4399  unsigned int sz = iface_ents.size();
4400  for( i = 0; i < sz; i++ )
4401  pstat[i] |= PSTATUS_NOT_OWNED;
4402 
4403  // Set the tag on the entities
4404  result = mbImpl->tag_set_data( pstatus_tag(), iface_ents, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus values for interface set entities" );
4405  }
4406 
4407  return MB_SUCCESS;
4408 }
4409 
4411  unsigned char pstatus_val,
4412  bool lower_dim_ents,
4413  bool verts_too,
4414  int operation )
4415 {
4416  std::vector< unsigned char > pstatus_vals( pstatus_ents.size() );
4417  Range all_ents, *range_ptr = &pstatus_ents;
4418  ErrorCode result;
4419  if( lower_dim_ents || verts_too )
4420  {
4421  all_ents = pstatus_ents;
4422  range_ptr = &all_ents;
4423  int start_dim = ( lower_dim_ents ? mbImpl->dimension_from_handle( *pstatus_ents.rbegin() ) - 1 : 0 );
4424  for( ; start_dim >= 0; start_dim-- )
4425  {
4426  result = mbImpl->get_adjacencies( all_ents, start_dim, true, all_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get adjacencies for pstatus entities" );
4427  }
4428  }
4429  if( Interface::UNION == operation )
4430  {
4431  result = mbImpl->tag_get_data( pstatus_tag(), *range_ptr, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
4432  for( unsigned int i = 0; i < pstatus_vals.size(); i++ )
4433  pstatus_vals[i] |= pstatus_val;
4434  }
4435  else
4436  {
4437  for( unsigned int i = 0; i < pstatus_vals.size(); i++ )
4438  pstatus_vals[i] = pstatus_val;
4439  }
4440  result = mbImpl->tag_set_data( pstatus_tag(), *range_ptr, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
4441 
4442  return MB_SUCCESS;
4443 }
4444 
4446  int num_ents,
4447  unsigned char pstatus_val,
4448  bool lower_dim_ents,
4449  bool verts_too,
4450  int operation )
4451 {
4452  std::vector< unsigned char > pstatus_vals( num_ents );
4453  ErrorCode result;
4454  if( lower_dim_ents || verts_too )
4455  {
4456  // In this case, call the range-based version
4457  Range tmp_range;
4458  std::copy( pstatus_ents, pstatus_ents + num_ents, range_inserter( tmp_range ) );
4459  return set_pstatus_entities( tmp_range, pstatus_val, lower_dim_ents, verts_too, operation );
4460  }
4461 
4462  if( Interface::UNION == operation )
4463  {
4464  result = mbImpl->tag_get_data( pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
4465  for( unsigned int i = 0; i < (unsigned int)num_ents; i++ )
4466  pstatus_vals[i] |= pstatus_val;
4467  }
4468  else
4469  {
4470  for( unsigned int i = 0; i < (unsigned int)num_ents; i++ )
4471  pstatus_vals[i] = pstatus_val;
4472  }
4473  result = mbImpl->tag_set_data( pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
4474 
4475  return MB_SUCCESS;
4476 }
4477 
4478 static size_t choose_owner_idx( const std::vector< unsigned >& proc_list )
4479 {
4480  // Try to assign owners randomly so we get a good distribution,
4481  // (note: specifying the same seed on all procs is essential)
4482  unsigned val = 0;
4483  for( size_t i = 0; i < proc_list.size(); i++ )
4484  val ^= proc_list[i];
4485  srand( (int)( val ) );
4486  return rand() % proc_list.size();
4487 }
4488 
4490 {
4491  unsigned idx;
4492  unsigned proc;
4494  inline bool operator<( set_tuple other ) const
4495  {
4496  return ( idx == other.idx ) ? ( proc < other.proc ) : ( idx < other.idx );
4497  }
4498 };
4499 
4501 {
4502  // Find all sets with any of the following tags:
4503  const char* const shared_set_tag_names[] = { GEOM_DIMENSION_TAG_NAME, MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME,
4505  int num_tags = sizeof( shared_set_tag_names ) / sizeof( shared_set_tag_names[0] );
4506  Range candidate_sets;
4507  ErrorCode result = MB_FAILURE;
4508 
4509  // If we're not given an ID tag to use to globally identify sets,
4510  // then fall back to using known tag values
4511  if( !idtag )
4512  {
4513  Tag gid, tag;
4514  gid = mbImpl->globalId_tag();
4515  if( NULL != gid ) result = mbImpl->tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, tag );
4516  if( MB_SUCCESS == result )
4517  {
4518  for( int d = 0; d < 4; d++ )
4519  {
4520  candidate_sets.clear();
4521  const void* vals[] = { &d };
4522  result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, vals, 1, candidate_sets );
4523  if( MB_SUCCESS == result ) resolve_shared_sets( candidate_sets, gid );
4524  }
4525  }
4526 
4527  for( int i = 1; i < num_tags; i++ )
4528  {
4529  result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag );
4530  if( MB_SUCCESS == result )
4531  {
4532  candidate_sets.clear();
4533  result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets );
4534  if( MB_SUCCESS == result ) resolve_shared_sets( candidate_sets, tag );
4535  }
4536  }
4537 
4538  return MB_SUCCESS;
4539  }
4540 
4541  for( int i = 0; i < num_tags; i++ )
4542  {
4543  Tag tag;
4544  result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag, MB_TAG_ANY );
4545  if( MB_SUCCESS != result ) continue;
4546 
4547  mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets, Interface::UNION );
4548  }
4549 
4550  // Find any additional sets that contain shared entities
4551  Range::iterator hint = candidate_sets.begin();
4552  Range all_sets;
4553  mbImpl->get_entities_by_type( file, MBENTITYSET, all_sets );
4554  all_sets = subtract( all_sets, candidate_sets );
4555  Range::iterator it = all_sets.begin();
4556  while( it != all_sets.end() )
4557  {
4558  Range contents;
4559  mbImpl->get_entities_by_handle( *it, contents );
4560  contents.erase( contents.lower_bound( MBENTITYSET ), contents.end() );
4562  if( contents.empty() )
4563  {
4564  ++it;
4565  }
4566  else
4567  {
4568  hint = candidate_sets.insert( hint, *it );
4569  it = all_sets.erase( it );
4570  }
4571  }
4572 
4573  // Find any additionl sets that contain or are parents of potential shared sets
4574  Range prev_list = candidate_sets;
4575  while( !prev_list.empty() )
4576  {
4577  it = all_sets.begin();
4578  Range new_list;
4579  hint = new_list.begin();
4580  while( it != all_sets.end() )
4581  {
4582  Range contents;
4583  mbImpl->get_entities_by_type( *it, MBENTITYSET, contents );
4584  if( !intersect( prev_list, contents ).empty() )
4585  {
4586  hint = new_list.insert( hint, *it );
4587  it = all_sets.erase( it );
4588  }
4589  else
4590  {
4591  new_list.clear();
4592  mbImpl->get_child_meshsets( *it, contents );
4593  if( !intersect( prev_list, contents ).empty() )
4594  {
4595  hint = new_list.insert( hint, *it );
4596  it = all_sets.erase( it );
4597  }
4598  else
4599  {
4600  ++it;
4601  }
4602  }
4603  }
4604 
4605  candidate_sets.merge( new_list );
4606  prev_list.swap( new_list );
4607  }
4608 
4609  return resolve_shared_sets( candidate_sets, *idtag );
4610 }
4611 
4612 #ifndef NDEBUG
4613 bool is_sorted_unique( std::vector< unsigned >& v )
4614 {
4615  for( size_t i = 1; i < v.size(); i++ )
4616  if( v[i - 1] >= v[i] ) return false;
4617  return true;
4618 }
4619 #endif
4620 
4622 {
4623  ErrorCode result;
4624  const unsigned rk = proc_config().proc_rank();
4625  MPI_Comm cm = proc_config().proc_comm();
4626 
4627  // Build sharing list for all sets
4628 
4629  // Get ids for sets in a vector, to pass to gs
4630  std::vector< long > larray; // Allocate sufficient space for longs
4631  std::vector< Ulong > handles;
4632  Range tmp_sets;
4633  // The id tag can be size 4 or size 8
4634  // Based on that, convert to int or to long, similarly to what we do
4635  // for resolving shared vertices;
4636  // This code must work on 32 bit too, where long is 4 bytes, also
4637  // so test first size 4, then we should be fine
4638  DataType tag_type;
4639  result = mbImpl->tag_get_data_type( idtag, tag_type );MB_CHK_SET_ERR( result, "Failed getting tag data type" );
4640  int bytes_per_tag;
4641  result = mbImpl->tag_get_bytes( idtag, bytes_per_tag );MB_CHK_SET_ERR( result, "Failed getting number of bytes per tag" );
4642  // On 64 bits, long and int are different
4643  // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
4644 
4645  for( Range::iterator rit = sets.begin(); rit != sets.end(); ++rit )
4646  {
4647  if( sizeof( long ) == bytes_per_tag && ( ( MB_TYPE_HANDLE == tag_type ) || ( MB_TYPE_OPAQUE == tag_type ) ) )
4648  { // It is a special id tag
4649  long dum;
4650  result = mbImpl->tag_get_data( idtag, &( *rit ), 1, &dum );
4651  if( MB_SUCCESS == result )
4652  {
4653  larray.push_back( dum );
4654  handles.push_back( *rit );
4655  tmp_sets.insert( tmp_sets.end(), *rit );
4656  }
4657  }
4658  else if( 4 == bytes_per_tag )
4659  { // Must be GLOBAL_ID tag or MATERIAL_ID, etc
4660  int dum;
4661  result = mbImpl->tag_get_data( idtag, &( *rit ), 1, &dum );
4662  if( MB_SUCCESS == result )
4663  {
4664  larray.push_back( dum );
4665  handles.push_back( *rit );
4666  tmp_sets.insert( tmp_sets.end(), *rit );
4667  }
4668  }
4669  }
4670 
4671  const size_t nsets = handles.size();
4672 
4673  // Get handle array for sets
4674  // This is not true on windows machine, 64 bits: entity handle is 64 bit, long is 32
4675  // assert(sizeof(EntityHandle) <= sizeof(unsigned long));
4676 
4677  // Do communication of data
4679  gs_data* gsd = new gs_data();
4680  result = gsd->initialize( nsets, &larray[0], &handles[0], 2, 1, 1, cd );MB_CHK_SET_ERR( result, "Failed to create gs data" );
4681 
4682  // Convert from global IDs grouped by process rank to list
4683  // of <idx, rank> pairs so that we can sort primarily
4684  // by idx and secondarily by rank (we want lists of procs for each
4685  // idx, not lists if indices for each proc).
4686  size_t ntuple = 0;
4687  for( unsigned p = 0; p < gsd->nlinfo->_np; p++ )
4688  ntuple += gsd->nlinfo->_nshared[p];
4689  std::vector< set_tuple > tuples;
4690  tuples.reserve( ntuple );
4691  size_t j = 0;
4692  for( unsigned p = 0; p < gsd->nlinfo->_np; p++ )
4693  {
4694  for( unsigned np = 0; np < gsd->nlinfo->_nshared[p]; np++ )
4695  {
4696  set_tuple t;
4697  t.idx = gsd->nlinfo->_sh_ind[j];
4698  t.proc = gsd->nlinfo->_target[p];
4699  t.handle = gsd->nlinfo->_ulabels[j];
4700  tuples.push_back( t );
4701  j++;
4702  }
4703  }
4704  std::sort( tuples.begin(), tuples.end() );
4705 
4706  // Release crystal router stuff
4707  gsd->reset();
4708  delete gsd;
4709 
4710  // Storing sharing data for each set
4711  size_t ti = 0;
4712  unsigned idx = 0;
4713  std::vector< unsigned > procs;
4714  Range::iterator si = tmp_sets.begin();
4715  while( si != tmp_sets.end() && ti < tuples.size() )
4716  {
4717  assert( idx <= tuples[ti].idx );
4718  if( idx < tuples[ti].idx ) si += ( tuples[ti].idx - idx );
4719  idx = tuples[ti].idx;
4720 
4721  procs.clear();
4722  size_t ti_init = ti;
4723  while( ti < tuples.size() && tuples[ti].idx == idx )
4724  {
4725  procs.push_back( tuples[ti].proc );
4726  ++ti;
4727  }
4728  assert( is_sorted_unique( procs ) );
4729 
4730  result = sharedSetData->set_sharing_procs( *si, procs );
4731  if( MB_SUCCESS != result )
4732  {
4733  std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
4734  std::cerr.flush();
4735  MPI_Abort( cm, 1 );
4736  }
4737 
4738  // Add this proc to list of sharing procs in correct position
4739  // so that all procs select owner based on same list
4740  std::vector< unsigned >::iterator it = std::lower_bound( procs.begin(), procs.end(), rk );
4741  assert( it == procs.end() || *it > rk );
4742  procs.insert( it, rk );
4743  size_t owner_idx = choose_owner_idx( procs );
4744  EntityHandle owner_handle;
4745  if( procs[owner_idx] == rk )
4746  owner_handle = *si;
4747  else if( procs[owner_idx] > rk )
4748  owner_handle = tuples[ti_init + owner_idx - 1].handle;
4749  else
4750  owner_handle = tuples[ti_init + owner_idx].handle;
4751  result = sharedSetData->set_owner( *si, procs[owner_idx], owner_handle );
4752  if( MB_SUCCESS != result )
4753  {
4754  std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
4755  std::cerr.flush();
4756  MPI_Abort( cm, 1 );
4757  }
4758 
4759  ++si;
4760  ++idx;
4761  }
4762 
4763  return MB_SUCCESS;
4764 }
4765 // populate sets with ghost entities, if necessary
4767 {
4768  // gather all default sets we are interested in, material, neumann, etc
4769  // we will skip geometry sets, because they are not uniquely identified with their tag value
4770  // maybe we will add another tag, like category
4771 
4772  if( procConfig.proc_size() < 2 ) return MB_SUCCESS; // no reason to stop by
4773  const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
4775 
4776  int num_tags = sizeof( shared_set_tag_names ) / sizeof( shared_set_tag_names[0] );
4777 
4778  Range* rangeSets = new Range[num_tags];
4779  Tag* tags = new Tag[num_tags + 1]; // one extra for global id tag, which is an int, so far
4780 
4781  int my_rank = rank();
4782  int** tagVals = new int*[num_tags];
4783  for( int i = 0; i < num_tags; i++ )
4784  tagVals[i] = NULL;
4785  ErrorCode rval;
4786 
4787  // for each tag, we keep a local map, from the value to the actual set with that value
4788  // we assume that the tag values are unique, for a given set, otherwise we
4789  // do not know to which set to add the entity
4790 
4791  typedef std::map< int, EntityHandle > MVal;
4792  typedef std::map< int, EntityHandle >::iterator itMVal;
4793  MVal* localMaps = new MVal[num_tags];
4794 
4795  for( int i = 0; i < num_tags; i++ )
4796  {
4797 
4798  rval = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tags[i], MB_TAG_ANY );
4799  if( MB_SUCCESS != rval ) continue;
4800  rval = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &( tags[i] ), 0, 1, rangeSets[i],
4801  Interface::UNION );MB_CHK_SET_ERR( rval, "can't get sets with a tag" );
4802 
4803  if( rangeSets[i].size() > 0 )
4804  {
4805  tagVals[i] = new int[rangeSets[i].size()];
4806  // fill up with the tag values
4807  rval = mbImpl->tag_get_data( tags[i], rangeSets[i], tagVals[i] );MB_CHK_SET_ERR( rval, "can't get set tag values" );
4808  // now for inverse mapping:
4809  for( int j = 0; j < (int)rangeSets[i].size(); j++ )
4810  {
4811  localMaps[i][tagVals[i][j]] = rangeSets[i][j];
4812  }
4813  }
4814  }
4815  // get the global id tag too
4816  tags[num_tags] = mbImpl->globalId_tag();
4817 
4818  TupleList remoteEnts;
4819  // processor to send to, type of tag (0-mat,) tag value, remote handle
4820  // 1-diri
4821  // 2-neum
4822  // 3-part
4823  //
4824  int initialSize = (int)sharedEnts.size(); // estimate that on average, each shared ent
4825  // will be sent to one processor, for one tag
4826  // we will actually send only entities that are owned locally, and from those
4827  // only those that do have a special tag (material, neumann, etc)
4828  // if we exceed the capacity, we resize the tuple
4829  remoteEnts.initialize( 3, 0, 1, 0, initialSize );
4830  remoteEnts.enableWriteAccess();
4831 
4832  // now, for each owned entity, get the remote handle(s) and Proc(s), and verify if it
4833  // belongs to one of the sets; if yes, create a tuple and append it
4834 
4835  std::set< EntityHandle > own_and_sha;
4836  int ir = 0, jr = 0;
4837  for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
4838  {
4839  // ghosted eh
4840  EntityHandle geh = *vit;
4841  if( own_and_sha.find( geh ) != own_and_sha.end() ) // already encountered
4842  continue;
4843  int procs[MAX_SHARING_PROCS];
4845  int nprocs;
4846  unsigned char pstat;
4847  rval = get_sharing_data( geh, procs, handles, pstat, nprocs );
4848  if( rval != MB_SUCCESS )
4849  {
4850  for( int i = 0; i < num_tags; i++ )
4851  delete[] tagVals[i];
4852  delete[] tagVals;
4853 
4854  MB_CHK_SET_ERR( rval, "Failed to get sharing data" );
4855  }
4856  if( pstat & PSTATUS_NOT_OWNED ) continue; // we will send info only for entities that we own
4857  own_and_sha.insert( geh );
4858  for( int i = 0; i < num_tags; i++ )
4859  {
4860  for( int j = 0; j < (int)rangeSets[i].size(); j++ )
4861  {
4862  EntityHandle specialSet = rangeSets[i][j]; // this set has tag i, value tagVals[i][j];
4863  if( mbImpl->contains_entities( specialSet, &geh, 1 ) )
4864  {
4865  // this ghosted entity is in a special set, so form the tuple
4866  // to send to the processors that do not own this
4867  for( int k = 0; k < nprocs; k++ )
4868  {
4869  if( procs[k] != my_rank )
4870  {
4871  if( remoteEnts.get_n() >= remoteEnts.get_max() - 1 )
4872  {
4873  // resize, so we do not overflow
4874  int oldSize = remoteEnts.get_max();
4875  // increase with 50% the capacity
4876  remoteEnts.resize( oldSize + oldSize / 2 + 1 );
4877  }
4878  remoteEnts.vi_wr[ir++] = procs[k]; // send to proc
4879  remoteEnts.vi_wr[ir++] = i; // for the tags [i] (0-3)
4880  remoteEnts.vi_wr[ir++] = tagVals[i][j]; // actual value of the tag
4881  remoteEnts.vul_wr[jr++] = handles[k];
4882  remoteEnts.inc_n();
4883  }
4884  }
4885  }
4886  }
4887  }
4888  // if the local entity has a global id, send it too, so we avoid
4889  // another "exchange_tags" for global id
4890  int gid;
4891  rval = mbImpl->tag_get_data( tags[num_tags], &geh, 1, &gid );MB_CHK_SET_ERR( rval, "Failed to get global id" );
4892  if( gid != 0 )
4893  {
4894  for( int k = 0; k < nprocs; k++ )
4895  {
4896  if( procs[k] != my_rank )
4897  {
4898  if( remoteEnts.get_n() >= remoteEnts.get_max() - 1 )
4899  {
4900  // resize, so we do not overflow
4901  int oldSize = remoteEnts.get_max();
4902  // increase with 50% the capacity
4903  remoteEnts.resize( oldSize + oldSize / 2 + 1 );
4904  }
4905  remoteEnts.vi_wr[ir++] = procs[k]; // send to proc
4906  remoteEnts.vi_wr[ir++] = num_tags; // for the tags [j] (4)
4907  remoteEnts.vi_wr[ir++] = gid; // actual value of the tag
4908  remoteEnts.vul_wr[jr++] = handles[k];
4909  remoteEnts.inc_n();
4910  }
4911  }
4912  }
4913  }
4914 
4915 #ifndef NDEBUG
4916  if( my_rank == 1 && 1 == get_debug_verbosity() ) remoteEnts.print( " on rank 1, before augment routing" );
4917  MPI_Barrier( procConfig.proc_comm() );
4918  int sentEnts = remoteEnts.get_n();
4919  assert( ( sentEnts == jr ) && ( 3 * sentEnts == ir ) );
4920 #endif
4921  // exchange the info now, and send to
4923  // All communication happens here; no other mpi calls
4924  // Also, this is a collective call
4925  rval = cd->gs_transfer( 1, remoteEnts, 0 );MB_CHK_SET_ERR( rval, "Error in tuple transfer" );
4926 #ifndef NDEBUG
4927  if( my_rank == 0 && 1 == get_debug_verbosity() ) remoteEnts.print( " on rank 0, after augment routing" );
4928  MPI_Barrier( procConfig.proc_comm() );
4929 #endif
4930 
4931  // now process the data received from other processors
4932  int received = remoteEnts.get_n();
4933  for( int i = 0; i < received; i++ )
4934  {
4935  // int from = ents_to_delete.vi_rd[i];
4936  EntityHandle geh = (EntityHandle)remoteEnts.vul_rd[i];
4937  int from_proc = remoteEnts.vi_rd[3 * i];
4938  if( my_rank == from_proc )
4939  std::cout << " unexpected receive from my rank " << my_rank << " during augmenting with ghosts\n ";
4940  int tag_type = remoteEnts.vi_rd[3 * i + 1];
4941  assert( ( 0 <= tag_type ) && ( tag_type <= num_tags ) );
4942  int value = remoteEnts.vi_rd[3 * i + 2];
4943  if( tag_type == num_tags )
4944  {
4945  // it is global id
4946  rval = mbImpl->tag_set_data( tags[num_tags], &geh, 1, &value );MB_CHK_SET_ERR( rval, "Error in setting gid tag" );
4947  }
4948  else
4949  {
4950  // now, based on value and tag type, see if we have that value in the map
4951  MVal& lmap = localMaps[tag_type];
4952  itMVal itm = lmap.find( value );
4953  if( itm == lmap.end() )
4954  {
4955  // the value was not found yet in the local map, so we have to create the set
4956  EntityHandle newSet;
4957  rval = mbImpl->create_meshset( MESHSET_SET, newSet );MB_CHK_SET_ERR( rval, "can't create new set" );
4958  lmap[value] = newSet;
4959  // set the tag value
4960  rval = mbImpl->tag_set_data( tags[tag_type], &newSet, 1, &value );MB_CHK_SET_ERR( rval, "can't set tag for new set" );
4961 
4962  // we also need to add the new created set to the file set, if not null
4963  if( file_set )
4964  {
4965  rval = mbImpl->add_entities( file_set, &newSet, 1 );MB_CHK_SET_ERR( rval, "can't add new set to the file set" );
4966  }
4967  }
4968  // add the entity to the set pointed to by the map
4969  rval = mbImpl->add_entities( lmap[value], &geh, 1 );MB_CHK_SET_ERR( rval, "can't add ghost ent to the set" );
4970  }
4971  }
4972 
4973  for( int i = 0; i < num_tags; i++ )
4974  delete[] tagVals[i];
4975  delete[] tagVals;
4976  delete[] rangeSets;
4977  delete[] tags;
4978  delete[] localMaps;
4979  return MB_SUCCESS;
4980 }
4981 ErrorCode ParallelComm::create_interface_sets( EntityHandle this_set, int resolve_dim, int shared_dim )
4982 {
4983  std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
4984 
4985  // Build up the list of shared entities
4986  int procs[MAX_SHARING_PROCS];
4988  ErrorCode result;
4989  int nprocs;
4990  unsigned char pstat;
4991  for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
4992  {
4993  if( shared_dim != -1 && mbImpl->dimension_from_handle( *vit ) > shared_dim ) continue;
4994  result = get_sharing_data( *vit, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
4995  std::sort( procs, procs + nprocs );
4996  std::vector< int > tmp_procs( procs, procs + nprocs );
4997  assert( tmp_procs.size() != 2 );
4998  proc_nvecs[tmp_procs].push_back( *vit );
4999  }
5000 
5001  Skinner skinner( mbImpl );
5002  Range skin_ents[4];
5003  result = mbImpl->get_entities_by_dimension( this_set, resolve_dim, skin_ents[resolve_dim] );MB_CHK_SET_ERR( result, "Failed to get skin entities by dimension" );
5004  result =
5005  skinner.find_skin( this_set, skin_ents[resolve_dim], false, skin_ents[resolve_dim - 1], 0, true, true, true );MB_CHK_SET_ERR( result, "Failed to find skin" );
5006  if( shared_dim > 1 )
5007  {
5008  result = mbImpl->get_adjacencies( skin_ents[resolve_dim - 1], resolve_dim - 2, true, skin_ents[resolve_dim - 2],
5009  Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get skin adjacencies" );
5010  }
5011 
5012  result = get_proc_nvecs( resolve_dim, shared_dim, skin_ents, proc_nvecs );
5013 
5014  return create_interface_sets( proc_nvecs );
5015 }
5016 
5017 ErrorCode ParallelComm::create_interface_sets( std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs )
5018 {
5019  if( proc_nvecs.empty() ) return MB_SUCCESS;
5020 
5021  int proc_ids[MAX_SHARING_PROCS];
5022  EntityHandle proc_handles[MAX_SHARING_PROCS];
5023  Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
5024  ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in create_interface_sets" );
5025  Range::iterator rit;
5026 
5027  // Create interface sets, tag them, and tag their contents with iface set tag
5028  std::vector< unsigned char > pstatus;
5029  for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator vit = proc_nvecs.begin();
5030  vit != proc_nvecs.end(); ++vit )
5031  {
5032  // Create the set
5033  EntityHandle new_set;
5034  result = mbImpl->create_meshset( MESHSET_SET, new_set );MB_CHK_SET_ERR( result, "Failed to create interface set" );
5035  interfaceSets.insert( new_set );
5036 
5037  // Add entities
5038  assert( !vit->second.empty() );
5039  result = mbImpl->add_entities( new_set, &( vit->second )[0], ( vit->second ).size() );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
5040  // Tag set with the proc rank(s)
5041  if( vit->first.size() == 1 )
5042  {
5043  assert( ( vit->first )[0] != (int)procConfig.proc_rank() );
5044  result = mbImpl->tag_set_data( shp_tag, &new_set, 1, &( vit->first )[0] );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
5045  proc_handles[0] = 0;
5046  result = mbImpl->tag_set_data( shh_tag, &new_set, 1, proc_handles );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
5047  }
5048  else
5049  {
5050  // Pad tag data out to MAX_SHARING_PROCS with -1
5051  if( vit->first.size() > MAX_SHARING_PROCS )
5052  {
5053  std::cerr << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE( new_set ) )
5054  << ' ' << ID_FROM_HANDLE( new_set ) << " on process " << proc_config().proc_rank()
5055  << std::endl;
5056  std::cerr.flush();
5057  MPI_Abort( proc_config().proc_comm(), 66 );
5058  }
5059  // assert(vit->first.size() <= MAX_SHARING_PROCS);
5060  std::copy( vit->first.begin(), vit->first.end(), proc_ids );
5061  std::fill( proc_ids + vit->first.size(), proc_ids + MAX_SHARING_PROCS, -1 );
5062  result = mbImpl->tag_set_data( shps_tag, &new_set, 1, proc_ids );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
5063  unsigned int ind = std::find( proc_ids, proc_ids + vit->first.size(), procConfig.proc_rank() ) - proc_ids;
5064  assert( ind < vit->first.size() );
5065  std::fill( proc_handles, proc_handles + MAX_SHARING_PROCS, 0 );
5066  proc_handles[ind] = new_set;
5067  result = mbImpl->tag_set_data( shhs_tag, &new_set, 1, proc_handles );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
5068  }
5069 
5070  // Get the owning proc, then set the pstatus tag on iface set
5071  int min_proc = ( vit->first )[0];
5072  unsigned char pval = ( PSTATUS_SHARED | PSTATUS_INTERFACE );
5073  if( min_proc < (int)procConfig.proc_rank() ) pval |= PSTATUS_NOT_OWNED;
5074  if( vit->first.size() > 1 ) pval |= PSTATUS_MULTISHARED;
5075  result = mbImpl->tag_set_data( pstat_tag, &new_set, 1, &pval );MB_CHK_SET_ERR( result, "Failed to tag interface set with pstatus" );
5076 
5077  // Tag the vertices with the same thing
5078  pstatus.clear();
5079  std::vector< EntityHandle > verts;
5080  for( std::vector< EntityHandle >::iterator v2it = ( vit->second ).begin(); v2it != ( vit->second ).end();
5081  ++v2it )
5082  if( mbImpl->type_from_handle( *v2it ) == MBVERTEX ) verts.push_back( *v2it );
5083  pstatus.resize( verts.size(), pval );
5084  if( !verts.empty() )
5085  {
5086  result = mbImpl->tag_set_data( pstat_tag, &verts[0], verts.size(), &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to tag interface set vertices with pstatus" );
5087  }
5088  }
5089 
5090  return MB_SUCCESS;
5091 }
5092 
5094 {
5095  // Now that we've resolved the entities in the iface sets,
5096  // set parent/child links between the iface sets
5097 
5098  // First tag all entities in the iface sets
5099  Tag tmp_iface_tag;
5100  EntityHandle tmp_iface_set = 0;
5101  ErrorCode result = mbImpl->tag_get_handle( "__tmp_iface", 1, MB_TYPE_HANDLE, tmp_iface_tag,
5102  MB_TAG_DENSE | MB_TAG_CREAT, &tmp_iface_set );MB_CHK_SET_ERR( result, "Failed to create temporary interface set tag" );
5103 
5104  Range iface_ents;
5105  std::vector< EntityHandle > tag_vals;
5106  Range::iterator rit;
5107 
5108  for( rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
5109  {
5110  // tag entities with interface set
5111  iface_ents.clear();
5112  result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get entities in interface set" );
5113 
5114  if( iface_ents.empty() ) continue;
5115 
5116  tag_vals.resize( iface_ents.size() );
5117  std::fill( tag_vals.begin(), tag_vals.end(), *rit );
5118  result = mbImpl->