Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
iMOAB.cpp
Go to the documentation of this file.
1 /** \file iMOAB.cpp
2  */
3 
4 #include "moab/MOABConfig.h"
5 #include "moab/Core.hpp"
6 
7 #ifdef MOAB_HAVE_MPI
8 #include "moab_mpi.h"
9 #include "moab/ParallelComm.hpp"
10 #include "moab/ParCommGraph.hpp"
13 #endif
14 #include "DebugOutput.hpp"
15 #include "moab/iMOAB.h"
16 
17 /* this is needed because of direct access to hdf5/mhdf */
18 #ifdef MOAB_HAVE_HDF5
19 #include "mhdf.h"
20 #include <H5Tpublic.h>
21 #endif
22 
23 #include "moab/CartVect.hpp"
24 #include "MBTagConventions.hpp"
25 #include "moab/MeshTopoUtil.hpp"
26 #include "moab/ReadUtilIface.hpp"
27 #include "moab/MergeMesh.hpp"
28 
29 #ifdef MOAB_HAVE_TEMPESTREMAP
30 #include "STLStringHelper.h"
32 
35 #endif
36 
37 // C++ includes
38 #include <cassert>
39 #include <sstream>
40 #include <iostream>
41 
42 using namespace moab;
43 
44 // #define VERBOSE
45 
46 // global variables ; should they be organized in a structure, for easier references?
47 // or how do we keep them global?
48 
49 #ifdef __cplusplus
50 extern "C" {
51 #endif
52 
53 #ifdef MOAB_HAVE_TEMPESTREMAP
54 struct TempestMapAppData
55 {
56  moab::TempestRemapper* remapper;
57  std::map< std::string, moab::TempestOnlineMap* > weightMaps;
58  iMOAB_AppID pid_src;
59  iMOAB_AppID pid_dest;
60  int num_src_ghost_layers; // number of ghost layers
61  int num_tgt_ghost_layers; // number of ghost layers
62 };
63 #endif
64 
65 struct appData
66 {
67  EntityHandle file_set; // primary file set containing all entities of interest
68  int global_id; // external component id, unique for application
69  std::string name; // name of the application
70  Range all_verts; // local vertices would be all_verts if no ghosting was required
71  Range local_verts; // it could include shared, but not owned at the interface
72  Range owned_verts; // owned_verts <= local_verts <= all_verts
73  Range ghost_vertices; // locally ghosted from other processors
74  Range primary_elems; // all primary entities (owned + ghosted)
75  Range owned_elems; // only owned entities
76  Range ghost_elems; // only ghosted entities (filtered)
77  int dimension; // 2 or 3, dimension of primary elements (redundant?)
78  long num_global_elements; // reunion of all elements in primary_elements; either from hdf5
79  // reading or from reduce
80  long num_global_vertices; // reunion of all nodes, after sharing is resolved; it could be
81  // determined from hdf5 reading
82  int num_ghost_layers; // number of ghost layers
84  std::map< int, int > matIndex; // map from global block id to index in mat_sets
87  std::map< std::string, Tag > tagMap;
88  std::vector< Tag > tagList;
90  bool is_fortran;
91 
92 #ifdef MOAB_HAVE_MPI
94  // constructor for this ParCommGraph takes the joint comm and the MPI groups for each
95  // application
96  std::map< int, ParCommGraph* > pgraph; // map from context () to the parcommgraph*
97 #endif
98 
99 #ifdef MOAB_HAVE_TEMPESTREMAP
100  EntityHandle secondary_file_set; // secondary file set (typically a child set like covering mesh)
101  // so we assume only one covering set for all maps on this intx app
102  // we can have multiple weightMaps, but only one coverage set for all maps
103  TempestMapAppData tempestData;
104  std::map< std::string, std::string > metadataMap;
105 #endif
106 };
107 
109 {
110  // are there reasons to have multiple moab inits? Is ref count needed?
112  // we should also have the default tags stored, initialized
113  Tag material_tag, neumann_tag, dirichlet_tag,
114  globalID_tag; // material, neumann, dirichlet, globalID
116  int iArgc;
118 
119  std::map< std::string, int > appIdMap; // from app string (uppercase) to app id
120  std::map< int, appData > appDatas; // the same order as pcomms
121  int globalrank, worldprocs;
123 
125  {
126  MBI = 0;
127  refCountMB = 0;
128  }
129 };
130 
131 static struct GlobalContext context;
132 
134 {
135  if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
136 
137  context.iArgc = argc;
138  context.iArgv = argv; // shallow copy
139 
140  if( 0 == context.refCountMB )
141  {
142  context.MBI = new( std::nothrow ) moab::Core;
143  // retrieve the default tags
144  const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
146  // materials (blocks), surface-BC (neumann), vertex-BC (Dirichlet), global-id
147  Tag gtags[4];
148  for( int i = 0; i < 4; i++ )
149  {
150  MB_CHK_ERR(
151  context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY ) );
152  }
153  // now set the relevant (standard) tags for future use
154  context.material_tag = gtags[0];
155  context.neumann_tag = gtags[1];
156  context.dirichlet_tag = gtags[2];
157  context.globalID_tag = gtags[3];
158 
159  context.MPI_initialized = false;
160 #ifdef MOAB_HAVE_MPI
161  int flagInit;
162  MPI_Initialized( &flagInit );
163 
164  if( flagInit && !context.MPI_initialized )
165  {
166  MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
167  MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
168  context.MPI_initialized = true;
169  }
170 #endif
171  }
172 
174  return moab::MB_SUCCESS;
175 }
176 
178 {
179  return iMOAB_Initialize( 0, 0 );
180 }
181 
183 {
185 
186  if( 0 == context.refCountMB )
187  {
188  delete context.MBI;
189  }
190 
191  return MB_SUCCESS;
192 }
193 
194 // A string to integer hash function that is fast and robust
195 // Based on the Fowler–Noll–Vo (or FNV) algorithm
196 static int apphash( const iMOAB_String str, int identifier = 0 )
197 {
198  // A minimal perfect hash function (MPHF)
199  std::string appstr( str );
200  // Fowler–Noll–Vo (or FNV) is a non-cryptographic hash function created
201  // by Glenn Fowler, Landon Curt Noll, and Kiem-Phong Vo.
202  // Ref: https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
203  unsigned int h = 2166136261u; // FNV offset basis
204  for( char c : appstr )
205  {
206  h ^= static_cast< unsigned char >( c );
207  h *= 16777619u; // FNV prime
208  }
209  // Incorporate the integer into the hash
210  if( identifier )
211  {
212  h ^= static_cast< unsigned int >( identifier & 0xFFFFFFFF );
213  h *= 16777619u; // FNV prime again
214  }
215  return static_cast< int >( h & 0x7FFFFFFF ); // Mask to ensure positive value
216 }
217 
219 #ifdef MOAB_HAVE_MPI
220  MPI_Comm* comm,
221 #endif
222  int* compid,
223  iMOAB_AppID pid )
224 {
225  IMOAB_CHECKPOINTER( app_name, 1 );
226 #ifdef MOAB_HAVE_MPI
227  IMOAB_CHECKPOINTER( comm, 2 );
228  IMOAB_CHECKPOINTER( compid, 3 );
229 #else
230  IMOAB_CHECKPOINTER( compid, 2 );
231 #endif
232 
233  // will create a parallel comm for this application too, so there will be a
234  // mapping from *pid to file set and to parallel comm instances
235  std::string name( app_name );
236 
237  if( context.appIdMap.find( name ) != context.appIdMap.end() )
238  {
239  std::cout << " application " << name << " already registered \n";
240  return moab::MB_FAILURE;
241  }
242 
243  // trivial hash: just use the id that the user provided and assume it is unique
244  // *pid = *compid;
245  // hash: compute a unique hash as a combination of the appname and the component id
246  *pid = apphash( app_name, *compid );
247  // store map of application name and the ID we just generated
248  context.appIdMap[name] = *pid;
249  int rankHere = 0;
250 #ifdef MOAB_HAVE_MPI
251  MPI_Comm_rank( *comm, &rankHere );
252 #endif
253  if( !rankHere )
254  std::cout << " application " << name << " with ID = " << *pid << " and external id: " << *compid
255  << " is registered now \n";
256  if( *compid <= 0 )
257  {
258  std::cout << " convention for external application is to have its id positive \n";
259  return moab::MB_FAILURE;
260  }
261 
262  // create now the file set that will be used for loading the model in
263  EntityHandle file_set;
264  ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
265 
266  appData app_data;
267  app_data.file_set = file_set;
268  app_data.global_id = *compid; // will be used mostly for par comm graph
269  app_data.name = name; // save the name of application
270 
271 #ifdef MOAB_HAVE_TEMPESTREMAP
272  app_data.tempestData.remapper = nullptr; // Only allocate as needed
273  app_data.tempestData.num_src_ghost_layers = 0;
274  app_data.tempestData.num_tgt_ghost_layers = 0;
275 #endif
276 
277  // set some default values
278  app_data.num_ghost_layers = 0;
279  app_data.point_cloud = false;
280  app_data.is_fortran = false;
281 #ifdef MOAB_HAVE_TEMPESTREMAP
282  app_data.secondary_file_set = app_data.file_set;
283 #endif
284 
285 #ifdef MOAB_HAVE_MPI
286  if( *comm ) app_data.pcomm = new ParallelComm( context.MBI, *comm );
287 #endif
288  context.appDatas[*pid] = app_data; // it will correspond to app_FileSets[*pid] will be the file set of interest
289  return moab::MB_SUCCESS;
290 }
291 
293 #ifdef MOAB_HAVE_MPI
294  int* comm,
295 #endif
296  int* compid,
297  iMOAB_AppID pid )
298 {
299  IMOAB_CHECKPOINTER( app_name, 1 );
300 #ifdef MOAB_HAVE_MPI
301  IMOAB_CHECKPOINTER( comm, 2 );
302  IMOAB_CHECKPOINTER( compid, 3 );
303 #else
304  IMOAB_CHECKPOINTER( compid, 2 );
305 #endif
306 
307  ErrCode err;
308  assert( app_name != nullptr );
309  std::string name( app_name );
310 
311 #ifdef MOAB_HAVE_MPI
312  MPI_Comm ccomm;
313  if( comm )
314  {
315  // Convert from Fortran communicator to a C communicator
316  // NOTE: see transfer of handles at
317  // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
318  ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
319  }
320 #endif
321 
322  // now call C style registration function:
323  err = iMOAB_RegisterApplication( app_name,
324 #ifdef MOAB_HAVE_MPI
325  &ccomm,
326 #endif
327  compid, pid );
328 
329  // Now that we have created the application context, store that
330  // the application being registered is from a Fortran context
331  context.appDatas[*pid].is_fortran = true;
332 
333  return err;
334 }
335 
337 {
338  // the file set , parallel comm are all in vectors indexed by *pid
339  // assume we did not delete anything yet
340  // *pid will not be reused if we register another application
341  auto appIterator = context.appDatas.find( *pid );
342  // ensure that the application exists before we attempt to delete it
343  if( appIterator == context.appDatas.end() ) return MB_FAILURE;
344 
345  // we found the application
346  appData& data = appIterator->second;
347  int rankHere = 0;
348 #ifdef MOAB_HAVE_MPI
349  rankHere = data.pcomm->rank();
350 #endif
351  if( !rankHere )
352  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
353  << " is de-registered now \n";
354 
355  EntityHandle fileSet = data.file_set;
356  // get all entities part of the file set
357  Range fileents;
358  MB_CHK_ERR( context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true ) );
359  fileents.insert( fileSet );
360  MB_CHK_ERR( context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents ) ); // append all mesh sets
361 
362 #ifdef MOAB_HAVE_TEMPESTREMAP
363  if( data.tempestData.remapper ) delete data.tempestData.remapper;
364  if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
365 #endif
366 
367 #ifdef MOAB_HAVE_MPI
368  // NOTE: we could get the pco also with the following workflow.
369  // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
370 
371  auto& pargs = data.pgraph;
372  // free the parallel comm graphs associated with this app
373  for( auto mt = pargs.begin(); mt != pargs.end(); ++mt )
374  {
375  ParCommGraph* pgr = mt->second;
376  if( pgr != nullptr )
377  {
378  delete pgr;
379  pgr = nullptr;
380  }
381  }
382  // now free the ParallelComm resources
383  if( data.pcomm )
384  {
385  delete data.pcomm;
386  data.pcomm = nullptr;
387  }
388 #endif
389 
390  // delete first all except vertices
391  Range vertices = fileents.subset_by_type( MBVERTEX );
392  Range noverts = subtract( fileents, vertices );
393 
394  MB_CHK_ERR( context.MBI->delete_entities( noverts ) );
395  // now retrieve connected elements that still exist (maybe in other sets, pids?)
396  Range adj_ents_left;
397  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION ) );
398  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION ) );
399  MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION ) );
400 
401  if( !adj_ents_left.empty() )
402  {
403  Range conn_verts;
404  MB_CHK_ERR( context.MBI->get_connectivity( adj_ents_left, conn_verts ) );
405  vertices = subtract( vertices, conn_verts );
406  }
407 
408  MB_CHK_ERR( context.MBI->delete_entities( vertices ) );
409 
410  for( auto mit = context.appIdMap.begin(); mit != context.appIdMap.end(); ++mit )
411  {
412  if( *pid == mit->second )
413  {
414 #ifdef MOAB_HAVE_MPI
415  if( data.pcomm )
416  {
417  delete data.pcomm;
418  data.pcomm = nullptr;
419  }
420 #endif
421  context.appIdMap.erase( mit );
422  break;
423  }
424  }
425 
426  // now we can finally delete the application data itself
427  context.appDatas.erase( appIterator );
428 
429  return moab::MB_SUCCESS;
430 }
431 
433 {
434  // release any Fortran specific allocations here before we pass it on
435  context.appDatas[*pid].is_fortran = false;
436 
437  // release all datastructure allocations
438  return iMOAB_DeregisterApplication( pid );
439 }
440 
442  int* num_global_vertices,
443  int* num_global_elements,
444  int* num_dimension,
445  int* num_parts )
446 {
447  IMOAB_CHECKPOINTER( filename, 1 );
448  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
449 
450 #ifdef MOAB_HAVE_HDF5
451  std::string filen( filename );
452 
453  int edges = 0;
454  int faces = 0;
455  int regions = 0;
456  if( num_global_vertices ) *num_global_vertices = 0;
457  if( num_global_elements ) *num_global_elements = 0;
458  if( num_dimension ) *num_dimension = 0;
459  if( num_parts ) *num_parts = 0;
460 
461  mhdf_FileHandle file;
462  mhdf_Status status;
463  unsigned long max_id;
464  struct mhdf_FileDesc* data;
465 
466  file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
467 
468  if( mhdf_isError( &status ) )
469  {
470  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
471  return moab::MB_FAILURE;
472  }
473 
474  data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
475  1 ); // will use extra set info; will get parallel partition tag info too!
476 
477  if( mhdf_isError( &status ) )
478  {
479  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
480  return moab::MB_FAILURE;
481  }
482 
483  if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
484  if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
485 
486  for( int i = 0; i < data->num_elem_desc; i++ )
487  {
488  struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
489  struct mhdf_EntDesc* ent_d = &( el_desc->desc );
490 
491  if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
492  {
493  edges += ent_d->count;
494  }
495 
496  if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
497  {
498  faces += ent_d->count;
499  }
500 
501  if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
502  {
503  faces += ent_d->count;
504  }
505 
506  if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
507  {
508  faces += ent_d->count;
509  }
510 
511  if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
512  {
513  regions += ent_d->count;
514  }
515 
516  if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
517  {
518  regions += ent_d->count;
519  }
520 
521  if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
522  {
523  regions += ent_d->count;
524  }
525 
526  if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
527  {
528  regions += ent_d->count;
529  }
530 
531  if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
532  {
533  regions += ent_d->count;
534  }
535 
536  if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
537  {
538  regions += ent_d->count;
539  }
540 
541  if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
542  {
543  regions += ent_d->count;
544  }
545  }
546 
547  if( num_parts ) *num_parts = data->numEntSets[0];
548 
549  // is this required?
550  if( edges > 0 )
551  {
552  if( num_dimension ) *num_dimension = 1; // I don't think it will ever return 1
553  if( num_global_elements ) *num_global_elements = edges;
554  }
555 
556  if( faces > 0 )
557  {
558  if( num_dimension ) *num_dimension = 2;
559  if( num_global_elements ) *num_global_elements = faces;
560  }
561 
562  if( regions > 0 )
563  {
564  if( num_dimension ) *num_dimension = 3;
565  if( num_global_elements ) *num_global_elements = regions;
566  }
567 
568  mhdf_closeFile( file, &status );
569 
570  free( data );
571 
572 #else
573  std::cout << filename
574  << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
575  "formats other than a h5m file.\n";
576  if( num_global_vertices ) *num_global_vertices = 0;
577  if( num_global_elements ) *num_global_elements = 0;
578  if( num_dimension ) *num_dimension = 0;
579  if( num_parts ) *num_parts = 0;
580 #endif
581 
582  return moab::MB_SUCCESS;
583 }
584 
586  const iMOAB_String filename,
587  const iMOAB_String read_options,
588  int* num_ghost_layers )
589 {
590  IMOAB_CHECKPOINTER( filename, 2 );
591  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
592  IMOAB_CHECKPOINTER( num_ghost_layers, 4 );
593 
594  // make sure we use the file set and pcomm associated with the *pid
595  std::ostringstream newopts;
596  if( read_options ) newopts << read_options;
597 
598 #ifdef MOAB_HAVE_MPI
599 
601  {
602  if( context.worldprocs > 1 )
603  {
604  std::string opts( ( read_options ? read_options : "" ) );
605  std::string pcid( "PARALLEL_COMM=" );
606  std::size_t found = opts.find( pcid );
607 
608  if( found != std::string::npos )
609  {
610  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
611  return moab::MB_FAILURE;
612  }
613 
614  // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
615  // files (used in test_remapping)
616  std::string filen( filename );
617  std::string::size_type idx = filen.rfind( '.' );
618 
619  if( idx != std::string::npos )
620  {
621  ParallelComm* pco = context.appDatas[*pid].pcomm;
622  std::string extension = filen.substr( idx + 1 );
623  if( ( extension == std::string( "h5m" ) ) || ( extension == std::string( "nc" ) ) )
624  newopts << ";;PARALLEL_COMM=" << pco->get_id();
625  }
626 
627  if( *num_ghost_layers >= 1 )
628  {
629  // if we want ghosts, we will want additional entities, the last .1
630  // because the addl ents can be edges, faces that are part of the neumann sets
631  std::string pcid2( "PARALLEL_GHOSTS=" );
632  std::size_t found2 = opts.find( pcid2 );
633 
634  if( found2 != std::string::npos )
635  {
636  std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
637  "number of layers \n";
638  }
639  else
640  {
641  // dimension of primary entities is 3 here, but it could be 2 for climate
642  // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
643  // example: ";PARALLEL_GHOSTS=2.0.1"
644  newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
645  }
646  }
647  }
648  }
649 #else
650  IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
651 #endif
652 
653  // Now let us actually load the MOAB file with the appropriate read options
654  ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
655 
656 #ifdef VERBOSE
657  // some debugging stuff
658  std::ostringstream outfile;
659 #ifdef MOAB_HAVE_MPI
660  ParallelComm* pco = context.appDatas[*pid].pcomm;
661  int rank = pco->rank();
662  int nprocs = pco->size();
663  outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
664 #else
665  outfile << "TaskMesh_n1.0.h5m";
666 #endif
667  // the mesh contains ghosts too, but they are not part of mat/neumann set
668  // write in serial the file, to see what tags are missing
669  rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval ); // everything on current task, written in serial
670 #endif
671 
672  // Update ghost layer information
673  context.appDatas[*pid].num_ghost_layers = *num_ghost_layers;
674 
675  // Update mesh information
676  return iMOAB_UpdateMeshInfo( pid );
677 }
678 
680  const iMOAB_String filename,
681  const iMOAB_String write_options,
682  bool primary_set = true )
683 {
684  IMOAB_CHECKPOINTER( filename, 2 );
685  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
686 
687  appData& data = context.appDatas[*pid];
688  EntityHandle fileSet = data.file_set;
689 
690  std::ostringstream newopts;
691 #ifdef MOAB_HAVE_MPI
692  std::string write_opts( ( write_options ? write_options : "" ) );
693  std::string pcid( "PARALLEL_COMM=" );
694 
695  if( write_opts.find( pcid ) != std::string::npos )
696  {
697  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
698  return moab::MB_FAILURE;
699  }
700 
701  // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
702  std::string pw( "PARALLEL=WRITE_PART" );
703  if( write_opts.find( pw ) != std::string::npos )
704  {
705  ParallelComm* pco = data.pcomm;
706  newopts << "PARALLEL_COMM=" << pco->get_id() << ";";
707  }
708 
709 #endif
710 
711 #ifdef MOAB_HAVE_TEMPESTREMAP
712  if( !primary_set )
713  {
714  if( data.tempestData.remapper != nullptr )
715  fileSet = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
716  else if( data.file_set != data.secondary_file_set )
717  fileSet = data.secondary_file_set;
718  else
719  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid secondary file set handle" );
720  }
721 #endif
722 
723  // append user write options to the one we have built
724  if( write_options ) newopts << write_options;
725 
726  std::vector< Tag > copyTagList = data.tagList;
727  // append Global ID and Parallel Partition
728  std::string gid_name_tag( "GLOBAL_ID" );
729 
730  // export global id tag, we need it always
731  if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
732  {
733  Tag gid = context.MBI->globalId_tag();
734  copyTagList.push_back( gid );
735  }
736  // also Parallel_Partition PARALLEL_PARTITION
737  std::string pp_name_tag( "PARALLEL_PARTITION" );
738 
739  // write parallel part tag too, if it exists
740  if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
741  {
742  Tag ptag;
743  context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
744  if( ptag ) copyTagList.push_back( ptag );
745  }
746 
747  // Now let us actually write the file to disk with appropriate options
748  if( primary_set )
749  {
750  MB_CHK_ERR( context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, copyTagList.data(),
751  copyTagList.size() ) );
752  }
753  else
754  {
755  MB_CHK_ERR( context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1 ) );
756  }
757 
758  return moab::MB_SUCCESS;
759 }
760 
761 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options )
762 {
763  return internal_WriteMesh( pid, filename, write_options );
764 }
765 
767 {
768  IMOAB_CHECKPOINTER( prefix, 2 );
769  IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
770 
771  std::ostringstream file_name;
772  int rank = 0, size = 1;
773 #ifdef MOAB_HAVE_MPI
774  ParallelComm* pcomm = context.appDatas[*pid].pcomm;
775  rank = pcomm->rank();
776  size = pcomm->size();
777 #endif
778  file_name << prefix << "_" << size << "_" << rank << ".h5m";
779  // Now let us actually write the file to disk with appropriate options
780  ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
781 
782  return moab::MB_SUCCESS;
783 }
784 
786 {
787  // this will include ghost elements info
788  appData& data = context.appDatas[*pid];
789  EntityHandle fileSet = data.file_set;
790 
791  // first clear all data ranges; this can be called after ghosting
792  data.all_verts.clear();
793  data.primary_elems.clear();
794  data.local_verts.clear();
795  data.owned_verts.clear();
796  data.ghost_vertices.clear();
797  data.owned_elems.clear();
798  data.ghost_elems.clear();
799  data.mat_sets.clear();
800  data.neu_sets.clear();
801  data.diri_sets.clear();
802 
803  // Let us get all the vertex entities
804  ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval ); // recursive
805 
806  // Let us check first entities of dimension = 3
807  data.dimension = 3;
808  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
809 
810  if( data.primary_elems.empty() )
811  {
812  // Now 3-D elements. Let us check entities of dimension = 2
813  data.dimension = 2;
814  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
815 
816  if( data.primary_elems.empty() )
817  {
818  // Now 3-D/2-D elements. Let us check entities of dimension = 1
819  data.dimension = 1;
820  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
821 
822  if( data.primary_elems.empty() )
823  {
824  // no elements of dimension 1 or 2 or 3; it could happen for point clouds
825  data.dimension = 0;
826  }
827  }
828  }
829 
830  // check if the current mesh is just a point cloud
831  data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
832 
833 #ifdef MOAB_HAVE_MPI
834 
836  {
837  ParallelComm* pco = context.appDatas[*pid].pcomm;
838 
839  // filter ghost vertices, from local
840  rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
841 
842  // Store handles for all ghosted entities
843  data.ghost_vertices = subtract( data.all_verts, data.local_verts );
844 
845  // filter ghost elements, from local
846  rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
847 
848  data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
849  // now update global number of primary cells and global number of vertices
850  // determine first number of owned vertices
851  // Get local owned vertices
852  rval = pco->filter_pstatus( data.all_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &data.owned_verts );MB_CHK_ERR( rval );
853  int local[2], global[2];
854  local[0] = data.owned_verts.size();
855  local[1] = data.owned_elems.size();
856  MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->comm() );
857  rval = iMOAB_SetGlobalInfo( pid, &( global[0] ), &( global[1] ) );MB_CHK_ERR( rval );
858  }
859  else
860  {
861  data.local_verts = data.all_verts;
862  data.owned_elems = data.primary_elems;
863  }
864 
865 #else
866 
867  data.local_verts = data.all_verts;
868  data.owned_elems = data.primary_elems;
869 
870 #endif
871 
872  // Get the references for some standard internal tags such as material blocks, BCs, etc
874  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
875 
877  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
878 
880  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
881 
882  return moab::MB_SUCCESS;
883 }
884 
886  int* num_visible_vertices,
887  int* num_visible_elements,
888  int* num_visible_blocks,
889  int* num_visible_surfaceBC,
890  int* num_visible_vertexBC )
891 {
892  ErrorCode rval;
893  appData& data = context.appDatas[*pid];
894  EntityHandle fileSet = data.file_set;
895 
896  // this will include ghost elements
897  // first clear all data ranges; this can be called after ghosting
898  if( num_visible_elements )
899  {
900  num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
901  // separate ghost and local/owned primary elements
902  num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
903  num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
904  }
905  if( num_visible_vertices )
906  {
907  num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
908  num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
909  // local are those that are not ghosts; they may include shared, not owned vertices
910  num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
911  }
912 
913  if( num_visible_blocks )
914  {
916  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
917 
918  num_visible_blocks[2] = data.mat_sets.size();
919  num_visible_blocks[0] = num_visible_blocks[2];
920  num_visible_blocks[1] = 0;
921  }
922 
923  if( num_visible_surfaceBC )
924  {
926  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
927 
928  num_visible_surfaceBC[2] = 0;
929  // count how many faces are in each neu set, and how many regions are
930  // adjacent to them;
931  int numNeuSets = (int)data.neu_sets.size();
932 
933  for( int i = 0; i < numNeuSets; i++ )
934  {
935  Range subents;
936  EntityHandle nset = data.neu_sets[i];
937  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
938 
939  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
940  {
941  EntityHandle subent = *it;
942  Range adjPrimaryEnts;
943  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
944 
945  num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
946  }
947  }
948 
949  num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
950  num_visible_surfaceBC[1] = 0;
951  }
952 
953  if( num_visible_vertexBC )
954  {
956  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
957 
958  num_visible_vertexBC[2] = 0;
959  int numDiriSets = (int)data.diri_sets.size();
960 
961  for( int i = 0; i < numDiriSets; i++ )
962  {
963  Range verts;
964  EntityHandle diset = data.diri_sets[i];
965  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
966 
967  num_visible_vertexBC[2] += (int)verts.size();
968  }
969 
970  num_visible_vertexBC[0] = num_visible_vertexBC[2];
971  num_visible_vertexBC[1] = 0;
972  }
973 
974  return moab::MB_SUCCESS;
975 }
976 
977 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
978 {
979  IMOAB_CHECKPOINTER( vertices_length, 2 );
980  IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
981 
982  const Range& verts = context.appDatas[*pid].all_verts;
983  // check for problems with array length
984  IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
985 
986  // global id tag is context.globalID_tag
987  return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
988 }
989 
990 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
991 {
992  assert( vertices_length && *vertices_length );
993  assert( visible_global_rank_ID );
994 
995  Range& verts = context.appDatas[*pid].all_verts;
996  int i = 0;
997 #ifdef MOAB_HAVE_MPI
998  ParallelComm* pco = context.appDatas[*pid].pcomm;
999 
1000  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
1001  {
1002  ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
1003  }
1004 
1005  if( i != *vertices_length )
1006  {
1007  return moab::MB_FAILURE;
1008  } // warning array allocation problem
1009 
1010 #else
1011 
1012  /* everything owned by proc 0 */
1013  if( (int)verts.size() != *vertices_length )
1014  {
1015  return moab::MB_FAILURE;
1016  } // warning array allocation problem
1017 
1018  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
1019  {
1020  visible_global_rank_ID[i] = 0;
1021  } // all vertices are owned by processor 0, as this is serial run
1022 
1023 #endif
1024 
1025  return moab::MB_SUCCESS;
1026 }
1027 
1028 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
1029 {
1030  Range& verts = context.appDatas[*pid].all_verts;
1031 
1032  // interleaved coordinates, so that means deep copy anyway
1033  if( *coords_length != 3 * (int)verts.size() )
1034  {
1035  return moab::MB_FAILURE;
1036  }
1037 
1038  ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
1039 
1040  return moab::MB_SUCCESS;
1041 }
1042 
1043 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
1044 {
1045  // local id blocks? they are counted from 0 to number of visible blocks ...
1046  // will actually return material set tag value for global
1047  Range& matSets = context.appDatas[*pid].mat_sets;
1048 
1049  if( *block_length != (int)matSets.size() )
1050  {
1051  return moab::MB_FAILURE;
1052  }
1053 
1054  // return material set tag gtags[0 is material set tag
1055  ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
1056 
1057  // populate map with index
1058  std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
1059  for( unsigned i = 0; i < matSets.size(); i++ )
1060  {
1061  matIdx[global_block_IDs[i]] = i;
1062  }
1063 
1064  return moab::MB_SUCCESS;
1065 }
1066 
1068  iMOAB_GlobalID* global_block_ID,
1069  int* vertices_per_element,
1070  int* num_elements_in_block )
1071 {
1072  assert( global_block_ID );
1073 
1074  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1075  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1076 
1077  if( it == matMap.end() )
1078  {
1079  return moab::MB_FAILURE;
1080  } // error in finding block with id
1081 
1082  int blockIndex = matMap[*global_block_ID];
1083  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1084  Range blo_elems;
1085  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
1086 
1087  if( MB_SUCCESS != rval || blo_elems.empty() )
1088  {
1089  return moab::MB_FAILURE;
1090  }
1091 
1092  EntityType type = context.MBI->type_from_handle( blo_elems[0] );
1093 
1094  if( !blo_elems.all_of_type( type ) )
1095  {
1096  return moab::MB_FAILURE;
1097  } // not all of same type
1098 
1099  const EntityHandle* conn = nullptr;
1100  int num_verts = 0;
1101  rval = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
1102 
1103  *vertices_per_element = num_verts;
1104  *num_elements_in_block = (int)blo_elems.size();
1105 
1106  return moab::MB_SUCCESS;
1107 }
1108 
1110  int* num_visible_elements,
1111  iMOAB_GlobalID* element_global_IDs,
1112  int* ranks,
1113  iMOAB_GlobalID* block_IDs )
1114 {
1115  appData& data = context.appDatas[*pid];
1116 #ifdef MOAB_HAVE_MPI
1117  ParallelComm* pco = context.appDatas[*pid].pcomm;
1118 #endif
1119 
1120  ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
1121 
1122  int i = 0;
1123 
1124  for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
1125  {
1126 #ifdef MOAB_HAVE_MPI
1127  rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
1128 
1129 #else
1130  /* everything owned by task 0 */
1131  ranks[i] = 0;
1132 #endif
1133  }
1134 
1135  for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
1136  {
1137  EntityHandle matMeshSet = *mit;
1138  Range elems;
1139  rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1140 
1141  int valMatTag;
1142  rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
1143 
1144  for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
1145  {
1146  EntityHandle eh = *eit;
1147  int index = data.primary_elems.index( eh );
1148 
1149  if( -1 == index )
1150  {
1151  return moab::MB_FAILURE;
1152  }
1153 
1154  if( -1 >= *num_visible_elements )
1155  {
1156  return moab::MB_FAILURE;
1157  }
1158 
1159  block_IDs[index] = valMatTag;
1160  }
1161  }
1162 
1163  return moab::MB_SUCCESS;
1164 }
1165 
1167  iMOAB_GlobalID* global_block_ID,
1168  int* connectivity_length,
1169  int* element_connectivity )
1170 {
1171  assert( global_block_ID ); // ensure global block ID argument is not null
1172  assert( connectivity_length ); // ensure connectivity length argument is not null
1173 
1174  appData& data = context.appDatas[*pid];
1175  std::map< int, int >& matMap = data.matIndex;
1176  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1177 
1178  if( it == matMap.end() )
1179  {
1180  return moab::MB_FAILURE;
1181  } // error in finding block with id
1182 
1183  int blockIndex = matMap[*global_block_ID];
1184  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1185  std::vector< EntityHandle > elems;
1186 
1187  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1188 
1189  if( elems.empty() )
1190  {
1191  return moab::MB_FAILURE;
1192  }
1193 
1194  std::vector< EntityHandle > vconnect;
1195  rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
1196 
1197  if( *connectivity_length != (int)vconnect.size() )
1198  {
1199  return moab::MB_FAILURE;
1200  } // mismatched sizes
1201 
1202  for( int i = 0; i < *connectivity_length; i++ )
1203  {
1204  int inx = data.all_verts.index( vconnect[i] );
1205 
1206  if( -1 == inx )
1207  {
1208  return moab::MB_FAILURE;
1209  } // error, vertex not in local range
1210 
1211  element_connectivity[i] = inx;
1212  }
1213 
1214  return moab::MB_SUCCESS;
1215 }
1216 
1218  iMOAB_LocalID* elem_index,
1219  int* connectivity_length,
1220  int* element_connectivity )
1221 {
1222  assert( elem_index ); // ensure element index argument is not null
1223  assert( connectivity_length ); // ensure connectivity length argument is not null
1224 
1225  appData& data = context.appDatas[*pid];
1226  assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
1227 
1228  int num_nodes;
1229  const EntityHandle* conn;
1230 
1231  EntityHandle eh = data.primary_elems[*elem_index];
1232 
1233  ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
1234 
1235  if( *connectivity_length < num_nodes )
1236  {
1237  return moab::MB_FAILURE;
1238  } // wrong number of vertices
1239 
1240  for( int i = 0; i < num_nodes; i++ )
1241  {
1242  int index = data.all_verts.index( conn[i] );
1243 
1244  if( -1 == index )
1245  {
1246  return moab::MB_FAILURE;
1247  }
1248 
1249  element_connectivity[i] = index;
1250  }
1251 
1252  *connectivity_length = num_nodes;
1253 
1254  return moab::MB_SUCCESS;
1255 }
1256 
1258  iMOAB_GlobalID* global_block_ID,
1259  int* num_elements_in_block,
1260  int* element_ownership )
1261 {
1262  assert( global_block_ID ); // ensure global block ID argument is not null
1263  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1264 
1265  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1266 
1267  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1268 
1269  if( it == matMap.end() )
1270  {
1271  return moab::MB_FAILURE;
1272  } // error in finding block with id
1273 
1274  int blockIndex = matMap[*global_block_ID];
1275  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1276  Range elems;
1277 
1278  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1279 
1280  if( elems.empty() )
1281  {
1282  return moab::MB_FAILURE;
1283  }
1284 
1285  if( *num_elements_in_block != (int)elems.size() )
1286  {
1287  return moab::MB_FAILURE;
1288  } // bad memory allocation
1289 
1290  int i = 0;
1291 #ifdef MOAB_HAVE_MPI
1292  ParallelComm* pco = context.appDatas[*pid].pcomm;
1293 #endif
1294 
1295  for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
1296  {
1297 #ifdef MOAB_HAVE_MPI
1298  rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
1299 #else
1300  element_ownership[i] = 0; /* owned by 0 */
1301 #endif
1302  }
1303 
1304  return moab::MB_SUCCESS;
1305 }
1306 
1308  iMOAB_GlobalID* global_block_ID,
1309  int* num_elements_in_block,
1310  iMOAB_GlobalID* global_element_ID,
1311  iMOAB_LocalID* local_element_ID )
1312 {
1313  assert( global_block_ID ); // ensure global block ID argument is not null
1314  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1315 
1316  appData& data = context.appDatas[*pid];
1317  std::map< int, int >& matMap = data.matIndex;
1318 
1319  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1320 
1321  if( it == matMap.end() )
1322  {
1323  return moab::MB_FAILURE;
1324  } // error in finding block with id
1325 
1326  int blockIndex = matMap[*global_block_ID];
1327  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1328  Range elems;
1329  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1330 
1331  if( elems.empty() )
1332  {
1333  return moab::MB_FAILURE;
1334  }
1335 
1336  if( *num_elements_in_block != (int)elems.size() )
1337  {
1338  return moab::MB_FAILURE;
1339  } // bad memory allocation
1340 
1341  rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
1342 
1343  // check that elems are among primary_elems in data
1344  for( int i = 0; i < *num_elements_in_block; i++ )
1345  {
1346  local_element_ID[i] = data.primary_elems.index( elems[i] );
1347 
1348  if( -1 == local_element_ID[i] )
1349  {
1350  return moab::MB_FAILURE;
1351  } // error, not in local primary elements
1352  }
1353 
1354  return moab::MB_SUCCESS;
1355 }
1356 
1358  int* surface_BC_length,
1359  iMOAB_LocalID* local_element_ID,
1360  int* reference_surface_ID,
1361  int* boundary_condition_value )
1362 {
1363  // we have to fill bc data for neumann sets;/
1364  ErrorCode rval;
1365 
1366  // it was counted above, in GetMeshInfo
1367  appData& data = context.appDatas[*pid];
1368  int numNeuSets = (int)data.neu_sets.size();
1369 
1370  int index = 0; // index [0, surface_BC_length) for the arrays returned
1371 
1372  for( int i = 0; i < numNeuSets; i++ )
1373  {
1374  Range subents;
1375  EntityHandle nset = data.neu_sets[i];
1376  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
1377 
1378  int neuVal;
1379  rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
1380 
1381  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
1382  {
1383  EntityHandle subent = *it;
1384  Range adjPrimaryEnts;
1385  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
1386 
1387  // get global id of the primary ents, and side number of the quad/subentity
1388  // this is moab ordering
1389  for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
1390  {
1391  EntityHandle primaryEnt = *pit;
1392  // get global id
1393  /*int globalID;
1394  rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
1395  if (MB_SUCCESS!=rval)
1396  return moab::MB_FAILURE;
1397  global_element_ID[index] = globalID;*/
1398 
1399  // get local element id
1400  local_element_ID[index] = data.primary_elems.index( primaryEnt );
1401 
1402  if( -1 == local_element_ID[index] )
1403  {
1404  return moab::MB_FAILURE;
1405  } // did not find the element locally
1406 
1407  int side_number, sense, offset;
1408  rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
1409 
1410  reference_surface_ID[index] = side_number + 1; // moab is from 0 to 5, it needs 1 to 6
1411  boundary_condition_value[index] = neuVal;
1412  index++;
1413  }
1414  }
1415  }
1416 
1417  if( index != *surface_BC_length )
1418  {
1419  return moab::MB_FAILURE;
1420  } // error in array allocations
1421 
1422  return moab::MB_SUCCESS;
1423 }
1424 
1426  int* vertex_BC_length,
1427  iMOAB_LocalID* local_vertex_ID,
1428  int* boundary_condition_value )
1429 {
1430  ErrorCode rval;
1431 
1432  // it was counted above, in GetMeshInfo
1433  appData& data = context.appDatas[*pid];
1434  int numDiriSets = (int)data.diri_sets.size();
1435  int index = 0; // index [0, *vertex_BC_length) for the arrays returned
1436 
1437  for( int i = 0; i < numDiriSets; i++ )
1438  {
1439  Range verts;
1440  EntityHandle diset = data.diri_sets[i];
1441  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
1442 
1443  int diriVal;
1444  rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
1445 
1446  for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
1447  {
1448  EntityHandle vt = *vit;
1449  /*int vgid;
1450  rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
1451  if (MB_SUCCESS!=rval)
1452  return moab::MB_FAILURE;
1453  global_vertext_ID[index] = vgid;*/
1454  local_vertex_ID[index] = data.all_verts.index( vt );
1455 
1456  if( -1 == local_vertex_ID[index] )
1457  {
1458  return moab::MB_FAILURE;
1459  } // vertex was not found
1460 
1461  boundary_condition_value[index] = diriVal;
1462  index++;
1463  }
1464  }
1465 
1466  if( *vertex_BC_length != index )
1467  {
1468  return moab::MB_FAILURE;
1469  } // array allocation issue
1470 
1471  return moab::MB_SUCCESS;
1472 }
1473 
1474 // Utility function
1475 static void split_tag_names( std::string input_names,
1476  std::string& separator,
1477  std::vector< std::string >& list_tag_names )
1478 {
1479  size_t pos = 0;
1480  std::string token;
1481  while( ( pos = input_names.find( separator ) ) != std::string::npos )
1482  {
1483  token = input_names.substr( 0, pos );
1484  if( !token.empty() ) list_tag_names.push_back( token );
1485  // std::cout << token << std::endl;
1486  input_names.erase( 0, pos + separator.length() );
1487  }
1488  if( !input_names.empty() )
1489  {
1490  // if leftover something, or if not ended with delimiter
1491  list_tag_names.push_back( input_names );
1492  }
1493  return;
1494 }
1495 
1497  const iMOAB_String tag_storage_name,
1498  int* tag_type,
1499  int* components_per_entity,
1500  int* tag_index )
1501 {
1502  // we have 6 types of tags supported so far
1503  // check if tag type is valid
1504  if( *tag_type < 0 || *tag_type > 5 ) return moab::MB_FAILURE;
1505 
1506  DataType tagDataType;
1507  TagType tagType;
1508  void* defaultVal = nullptr;
1509  int* defInt = new int[*components_per_entity];
1510  double* defDouble = new double[*components_per_entity];
1511  EntityHandle* defHandle = new EntityHandle[*components_per_entity];
1512 
1513  for( int i = 0; i < *components_per_entity; i++ )
1514  {
1515  defInt[i] = 0;
1516  defDouble[i] = -1e+10;
1517  defHandle[i] = static_cast< EntityHandle >( 0 );
1518  }
1519 
1520  switch( *tag_type )
1521  {
1522  case 0:
1523  tagDataType = MB_TYPE_INTEGER;
1524  tagType = MB_TAG_DENSE;
1525  defaultVal = defInt;
1526  break;
1527 
1528  case 1:
1529  tagDataType = MB_TYPE_DOUBLE;
1530  tagType = MB_TAG_DENSE;
1531  defaultVal = defDouble;
1532  break;
1533 
1534  case 2:
1535  tagDataType = MB_TYPE_HANDLE;
1536  tagType = MB_TAG_DENSE;
1537  defaultVal = defHandle;
1538  break;
1539 
1540  case 3:
1541  tagDataType = MB_TYPE_INTEGER;
1542  tagType = MB_TAG_SPARSE;
1543  defaultVal = defInt;
1544  break;
1545 
1546  case 4:
1547  tagDataType = MB_TYPE_DOUBLE;
1548  tagType = MB_TAG_SPARSE;
1549  defaultVal = defDouble;
1550  break;
1551 
1552  case 5:
1553  tagDataType = MB_TYPE_HANDLE;
1554  tagType = MB_TAG_SPARSE;
1555  defaultVal = defHandle;
1556  break;
1557 
1558  default: {
1559  delete[] defInt;
1560  delete[] defDouble;
1561  delete[] defHandle;
1562  return moab::MB_FAILURE;
1563  } // error
1564  }
1565 
1566  // split storage names if separated list
1567  std::string tag_name( tag_storage_name );
1568  // first separate the names of the tags
1569  // we assume that there are separators ":" between the tag names
1570  std::vector< std::string > tagNames;
1571  std::string separator( ":" );
1572  split_tag_names( tag_name, separator, tagNames );
1573 
1574  ErrorCode rval = moab::MB_SUCCESS; // assume success already :)
1575  appData& data = context.appDatas[*pid];
1576  int already_defined_tags = 0;
1577 
1578  Tag tagHandle;
1579  for( size_t i = 0; i < tagNames.size(); i++ )
1580  {
1581  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
1582  tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal );
1583 
1584  if( MB_ALREADY_ALLOCATED == rval )
1585  {
1586  std::map< std::string, Tag >& mTags = data.tagMap;
1587  std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1588 
1589  if( mit == mTags.end() )
1590  {
1591  // add it to the map
1592  mTags[tagNames[i]] = tagHandle;
1593  // push it to the list of tags, too
1594  *tag_index = (int)data.tagList.size();
1595  data.tagList.push_back( tagHandle );
1596  }
1597  rval = MB_SUCCESS;
1598  already_defined_tags++;
1599  }
1600  else if( MB_SUCCESS == rval )
1601  {
1602  data.tagMap[tagNames[i]] = tagHandle;
1603  *tag_index = (int)data.tagList.size();
1604  data.tagList.push_back( tagHandle );
1605  }
1606  else
1607  {
1608  rval = moab::MB_FAILURE; // some tags were not created
1609  }
1610  }
1611  // we don't need default values anymore, avoid leaks
1612  int rankHere = 0;
1613 #ifdef MOAB_HAVE_MPI
1614  ParallelComm* pco = context.appDatas[*pid].pcomm;
1615  rankHere = pco->rank();
1616 #endif
1617  if( !rankHere && already_defined_tags )
1618  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
1619  << " has " << already_defined_tags << " already defined tags out of " << tagNames.size()
1620  << " tags \n";
1621  delete[] defInt;
1622  delete[] defDouble;
1623  delete[] defHandle;
1624  return rval;
1625 }
1626 
1628  const iMOAB_String tag_storage_name,
1629  int* num_tag_storage_length,
1630  int* ent_type,
1631  int* tag_storage_data )
1632 {
1633  std::string tag_name( tag_storage_name );
1634 
1635  // Get the application data
1636  appData& data = context.appDatas[*pid];
1637  // check if tag is defined
1638  if( data.tagMap.find( tag_name ) == data.tagMap.end() ) return moab::MB_FAILURE;
1639 
1640  Tag tag = data.tagMap[tag_name];
1641 
1642  int tagLength = 0;
1643  MB_CHK_ERR( context.MBI->tag_get_length( tag, tagLength ) );
1644 
1645  DataType dtype;
1646  MB_CHK_ERR( context.MBI->tag_get_data_type( tag, dtype ) );
1647 
1648  if( dtype != MB_TYPE_INTEGER )
1649  {
1650  MB_CHK_SET_ERR( moab::MB_FAILURE, "The tag is not of integer type." );
1651  }
1652 
1653  // set it on a subset of entities, based on type and length
1654  // if *entity_type = 0, then use vertices; else elements
1655  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1656  int nents_to_be_set = *num_tag_storage_length / tagLength;
1657 
1658  if( nents_to_be_set > (int)ents_to_set->size() )
1659  {
1660  return moab::MB_FAILURE;
1661  } // to many entities to be set or too few
1662 
1663  // now set the tag data
1664  MB_CHK_ERR( context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data ) );
1665 
1666  return moab::MB_SUCCESS; // no error
1667 }
1668 
1670  const iMOAB_String tag_storage_name,
1671  int* num_tag_storage_length,
1672  int* ent_type,
1673  int* tag_storage_data )
1674 {
1675  ErrorCode rval;
1676  std::string tag_name( tag_storage_name );
1677 
1678  appData& data = context.appDatas[*pid];
1679 
1680  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1681  {
1682  return moab::MB_FAILURE;
1683  } // tag not defined
1684 
1685  Tag tag = data.tagMap[tag_name];
1686 
1687  int tagLength = 0;
1688  MB_CHK_ERR( context.MBI->tag_get_length( tag, tagLength ) );
1689 
1690  DataType dtype;
1691  MB_CHK_ERR( context.MBI->tag_get_data_type( tag, dtype ) );
1692 
1693  if( dtype != MB_TYPE_INTEGER )
1694  {
1695  MB_CHK_SET_ERR( moab::MB_FAILURE, "The tag is not of integer type." );
1696  }
1697 
1698  // set it on a subset of entities, based on type and length
1699  // if *entity_type = 0, then use vertices; else elements
1700  Range* ents_to_get = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1701  int nents_to_get = *num_tag_storage_length / tagLength;
1702 
1703  if( nents_to_get > (int)ents_to_get->size() )
1704  {
1705  return moab::MB_FAILURE;
1706  } // to many entities to get, or too little
1707 
1708  // now set the tag data
1709  rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
1710 
1711  return moab::MB_SUCCESS; // no error
1712 }
1713 
1715  const iMOAB_String tag_storage_names,
1716  int* num_tag_storage_length,
1717  int* ent_type,
1718  double* tag_storage_data )
1719 {
1720  ErrorCode rval;
1721  std::string tag_names( tag_storage_names );
1722  // exactly the same code as for int tag :) maybe should check the type of tag too
1723  std::vector< std::string > tagNames;
1724  std::vector< Tag > tagHandles;
1725  std::string separator( ":" );
1726  split_tag_names( tag_names, separator, tagNames );
1727 
1728  appData& data = context.appDatas[*pid];
1729  // set it on a subset of entities, based on type and length
1730  // if *entity_type = 0, then use vertices; else elements
1731  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1732 
1733  int nents_to_be_set = (int)( *ents_to_set ).size();
1734  int position = 0;
1735  for( size_t i = 0; i < tagNames.size(); i++ )
1736  {
1737  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1738  {
1739  return moab::MB_FAILURE;
1740  } // some tag not defined yet in the app
1741 
1742  Tag tag = data.tagMap[tagNames[i]];
1743 
1744  int tagLength = 0;
1745  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1746 
1747  DataType dtype;
1748  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1749 
1750  if( dtype != MB_TYPE_DOUBLE )
1751  {
1752  return moab::MB_FAILURE;
1753  }
1754 
1755  // set it on the subset of entities, based on type and length
1756  if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1757  return moab::MB_FAILURE; // too many entity values to be set
1758 
1759  rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
1760  // increment position to next tag
1761  position = position + tagLength * nents_to_be_set;
1762  }
1763  return moab::MB_SUCCESS; // no error
1764 }
1765 
1767  const iMOAB_String tag_storage_names,
1768  int* num_tag_storage_length,
1769  int* ent_type,
1770  double* tag_storage_data,
1771  int* globalIds )
1772 {
1773  ErrorCode rval;
1774  std::string tag_names( tag_storage_names );
1775  // exactly the same code as for int tag :) maybe should check the type of tag too
1776  std::vector< std::string > tagNames;
1777  std::vector< Tag > tagHandles;
1778  std::string separator( ":" );
1779  split_tag_names( tag_names, separator, tagNames );
1780 
1781  appData& data = context.appDatas[*pid];
1782  // set it on a subset of entities, based on type and length
1783  // if *entity_type = 0, then use vertices; else elements
1784  Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1785  int nents_to_be_set = (int)( *ents_to_set ).size();
1786 
1787  Tag gidTag = context.MBI->globalId_tag();
1788  std::vector< int > gids;
1789  gids.resize( nents_to_be_set );
1790  rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval );
1791 
1792  // so we will need to set the tags according to the global id passed;
1793  // so the order in tag_storage_data is the same as the order in globalIds, but the order
1794  // in local range is gids
1795  std::map< int, EntityHandle > eh_by_gid;
1796  int i = 0;
1797  for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i )
1798  {
1799  eh_by_gid[gids[i]] = *it;
1800  }
1801  // TODO: allow for tags of different length
1802  int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() ); // assumes all tags have the same length?
1803  // check global ids to have different values
1804  std::set< int > globalIdsSet;
1805  for( int j = 0; j < nbLocalVals; j++ )
1806  globalIdsSet.insert( globalIds[j] );
1807  if( globalIdsSet.size() < nbLocalVals )
1808  {
1809  std::cout << "iMOAB_SetDoubleTagStorageWithGid: for pid:" << *pid << " tags[0]:" << tagNames[0]
1810  << " global ids passed are not unique, major error\n";
1811  std::cout << " nbLocalVals:" << nbLocalVals << " globalIdsSet.size():" << globalIdsSet.size()
1812  << " first global id:" << globalIds[0] << "\n";
1813  return moab::MB_FAILURE;
1814  }
1815 
1816  std::vector< int > tagLengths( tagNames.size() );
1817  std::vector< Tag > tagList;
1818  size_t total_tag_len = 0;
1819  for( size_t i = 0; i < tagNames.size(); i++ )
1820  {
1821  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1822  {
1823  MB_SET_ERR( moab::MB_FAILURE, "tag missing" );
1824  } // some tag not defined yet in the app
1825 
1826  Tag tag = data.tagMap[tagNames[i]];
1827  tagList.push_back( tag );
1828 
1829  int tagLength = 0;
1830  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1831 
1832  total_tag_len += tagLength;
1833  tagLengths[i] = tagLength;
1834  DataType dtype;
1835  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1836 
1837  if( dtype != MB_TYPE_DOUBLE )
1838  {
1839  MB_SET_ERR( moab::MB_FAILURE, "tag not double type" );
1840  }
1841  }
1842  bool serial = true;
1843 #ifdef MOAB_HAVE_MPI
1844  ParallelComm* pco = context.appDatas[*pid].pcomm;
1845  unsigned num_procs = pco->size();
1846  if( num_procs > 1 ) serial = false;
1847 #endif
1848 
1849  if( serial )
1850  {
1851  // we do not assume anymore that the number of entities has to match
1852  // we will set only what matches, and skip entities that do not have corresponding global ids
1853  //assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 );
1854  // tags are unrolled, we loop over global ids first, then careful about tags
1855  for( int i = 0; i < nents_to_be_set; i++ )
1856  {
1857  int gid = globalIds[i];
1858  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1859  if( mapIt == eh_by_gid.end() ) continue;
1860  EntityHandle eh = mapIt->second;
1861  // now loop over tags
1862  int indexInTagValues = 0; //
1863  for( size_t j = 0; j < tagList.size(); j++ )
1864  {
1865  indexInTagValues += i * tagLengths[j];
1866  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval );
1867  // advance the pointer/index
1868  indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j]; // at the end of tag data
1869  }
1870  }
1871  }
1872 #ifdef MOAB_HAVE_MPI
1873  else // it can be not serial only if pco->size() > 1, parallel
1874  {
1875  // in this case, we have to use 2 crystal routers, to send data to the processor that needs it
1876  // we will create first a tuple to rendevous points, then from there send to the processor that requested it
1877  // it is a 2-hop global gather scatter
1878  // we do not expect the sizes to match
1879  //assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 );
1880  TupleList TLsend;
1881  TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals ); // to proc, marker(gid), total_tag_len doubles
1882  TLsend.enableWriteAccess();
1883  // the processor id that processes global_id is global_id / num_ents_per_proc
1884 
1885  int indexInRealLocal = 0;
1886  for( int i = 0; i < nbLocalVals; i++ )
1887  {
1888  // to proc, marker, element local index, index in el
1889  int marker = globalIds[i];
1890  int to_proc = marker % num_procs;
1891  int n = TLsend.get_n();
1892  TLsend.vi_wr[2 * n] = to_proc; // send to processor
1893  TLsend.vi_wr[2 * n + 1] = marker;
1894  int indexInTagValues = 0;
1895  // tag data collect by number of tags
1896  for( size_t j = 0; j < tagList.size(); j++ )
1897  {
1898  indexInTagValues += i * tagLengths[j];
1899  for( int k = 0; k < tagLengths[j]; k++ )
1900  {
1901  TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1902  }
1903  indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1904  }
1905  TLsend.inc_n();
1906  }
1907 
1908  //assert( nbLocalVals * total_tag_len - indexInRealLocal == 0 );
1909  // send now requests, basically inform the rendez-vous point who needs a particular global id
1910  // send the data to the other processors:
1911  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 );
1912  TupleList TLreq;
1913  TLreq.initialize( 2, 0, 0, 0, nents_to_be_set );
1914  TLreq.enableWriteAccess();
1915  for( int i = 0; i < nents_to_be_set; i++ )
1916  {
1917  // to proc, marker
1918  int marker = gids[i];
1919  int to_proc = marker % num_procs;
1920  int n = TLreq.get_n();
1921  TLreq.vi_wr[2 * n] = to_proc; // send to processor
1922  TLreq.vi_wr[2 * n + 1] = marker;
1923  // tag data collect by number of tags
1924  TLreq.inc_n();
1925  }
1926  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
1927 
1928  // we know now that process TLreq.vi_wr[2 * n] needs tags for gid TLreq.vi_wr[2 * n + 1]
1929  // we should first order by global id, and then build the new TL with send to proc, global id and
1930  // tags for it
1931  // sort by global ids the tuple lists
1932  moab::TupleList::buffer sort_buffer;
1933  sort_buffer.buffer_init( TLreq.get_n() );
1934  TLreq.sort( 1, &sort_buffer );
1935  sort_buffer.reset();
1936  sort_buffer.buffer_init( TLsend.get_n() );
1937  TLsend.sort( 1, &sort_buffer );
1938  sort_buffer.reset();
1939  // now send the tag values to the proc that requested it
1940  // in theory, for a full partition, TLreq and TLsend should have the same size, and
1941  // each dof should have exactly one target proc. Is that true or not in general ?
1942  // how do we plan to use this? Is it better to store the comm graph for future
1943 
1944  // start copy from comm graph settle
1945  TupleList TLBack;
1946  TLBack.initialize( 3, 0, 0, total_tag_len, 0 ); // to proc, marker, tag from proc , tag values
1947  TLBack.enableWriteAccess();
1948 
1949  int n1 = TLreq.get_n();
1950  int n2 = TLsend.get_n();
1951 
1952  int indexInTLreq = 0;
1953  int indexInTLsend = 0; // advance both, according to the marker
1954  if( n1 > 0 && n2 > 0 )
1955  {
1956 
1957  while( indexInTLreq < n1 && indexInTLsend < n2 ) // if any is over, we are done
1958  {
1959  int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1];
1960  int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1];
1961  if( currentValue1 < currentValue2 )
1962  {
1963  // we have a big problem; basically, we are saying that
1964  // dof currentValue is on one model and not on the other
1965  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
1966  indexInTLreq++;
1967  continue;
1968  }
1969  if( currentValue1 > currentValue2 )
1970  {
1971  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
1972  indexInTLsend++;
1973  continue;
1974  }
1975  int size1 = 1;
1976  int size2 = 1;
1977  while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1978  size1++;
1979  while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1980  size2++;
1981  // must be found in both lists, find the start and end indices
1982  for( int i1 = 0; i1 < size1; i1++ )
1983  {
1984  for( int i2 = 0; i2 < size2; i2++ )
1985  {
1986  // send the info back to components
1987  int n = TLBack.get_n();
1988  TLBack.reserve();
1989  TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )]; // send back to the proc marker
1990  // came from, info from comp2
1991  TLBack.vi_wr[3 * n + 1] = currentValue1; // initial value (resend, just for verif ?)
1992  TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )]; // from proc on comp2
1993  // also fill tag values
1994  for( size_t k = 0; k < total_tag_len; k++ )
1995  {
1996  TLBack.vr_rd[total_tag_len * n + k] =
1997  TLsend.vr_rd[total_tag_len * indexInTLsend + k]; // deep copy of tag values
1998  }
1999  }
2000  }
2001  indexInTLreq += size1;
2002  indexInTLsend += size2;
2003  }
2004  }
2005  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
2006  // end copy from comm graph
2007  // after we are done sending, we need to set those tag values, in a reverse process compared to send
2008  n1 = TLBack.get_n();
2009  double* ptrVal = &TLBack.vr_rd[0]; //
2010  for( int i = 0; i < n1; i++ )
2011  {
2012  int gid = TLBack.vi_rd[3 * i + 1]; // marker
2013  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
2014  if( mapIt == eh_by_gid.end() ) continue;
2015  EntityHandle eh = mapIt->second;
2016  // now loop over tags
2017 
2018  for( size_t j = 0; j < tagList.size(); j++ )
2019  {
2020  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval );
2021  // advance the pointer/index
2022  ptrVal += tagLengths[j]; // at the end of tag data per call
2023  }
2024  }
2025  }
2026 #endif
2027  return MB_SUCCESS;
2028 }
2030  const iMOAB_String tag_storage_names,
2031  int* num_tag_storage_length,
2032  int* ent_type,
2033  double* tag_storage_data )
2034 {
2035  ErrorCode rval;
2036  // exactly the same code, except tag type check
2037  std::string tag_names( tag_storage_names );
2038  // exactly the same code as for int tag :) maybe should check the type of tag too
2039  std::vector< std::string > tagNames;
2040  std::vector< Tag > tagHandles;
2041  std::string separator( ":" );
2042  split_tag_names( tag_names, separator, tagNames );
2043 
2044  appData& data = context.appDatas[*pid];
2045 
2046  // set it on a subset of entities, based on type and length
2047  Range* ents_to_get = nullptr;
2048 
2049  if( *ent_type == 0 ) // vertices
2050  {
2051  ents_to_get = &data.all_verts;
2052  }
2053  else if( *ent_type == 1 )
2054  {
2055  ents_to_get = &data.primary_elems;
2056  }
2057  int nents_to_get = (int)ents_to_get->size();
2058  int position = 0;
2059  for( size_t i = 0; i < tagNames.size(); i++ )
2060  {
2061  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
2062  {
2063  return moab::MB_FAILURE;
2064  } // tag not defined
2065 
2066  Tag tag = data.tagMap[tagNames[i]];
2067 
2068  int tagLength = 0;
2069  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
2070 
2071  DataType dtype;
2072  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
2073 
2074  if( dtype != MB_TYPE_DOUBLE )
2075  {
2076  return moab::MB_FAILURE;
2077  }
2078 
2079  if( position + nents_to_get * tagLength > *num_tag_storage_length )
2080  return moab::MB_FAILURE; // too many entity values to get
2081 
2082  rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
2083  position = position + nents_to_get * tagLength;
2084  }
2085 
2086  return moab::MB_SUCCESS; // no error
2087 }
2088 
2089 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
2090 {
2091 #ifdef MOAB_HAVE_MPI
2092  appData& data = context.appDatas[*pid];
2093  Range ent_exchange;
2094  std::vector< Tag > tags;
2095 
2096  for( int i = 0; i < *num_tag; i++ )
2097  {
2098  if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
2099  {
2100  return moab::MB_FAILURE;
2101  } // error in tag index
2102 
2103  tags.push_back( data.tagList[tag_indices[i]] );
2104  }
2105 
2106  if( *ent_type == 0 )
2107  {
2108  ent_exchange = data.all_verts;
2109  }
2110  else if( *ent_type == 1 )
2111  {
2112  ent_exchange = data.primary_elems;
2113  }
2114  else
2115  {
2116  return moab::MB_FAILURE;
2117  } // unexpected type
2118 
2119  ParallelComm* pco = context.appDatas[*pid].pcomm;
2120 
2121  ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
2122 
2123 #else
2124  /* do nothing if serial */
2125  // just silence the warning
2126  // do not call sync tags in serial!
2127  int k = *pid + *num_tag + *tag_indices + *ent_type;
2128  k++;
2129 #endif
2130 
2131  return moab::MB_SUCCESS;
2132 }
2133 
2134 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
2135 {
2136 #ifdef MOAB_HAVE_MPI
2137  appData& data = context.appDatas[*pid];
2138  Range ent_exchange;
2139 
2140  if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
2141  {
2142  return moab::MB_FAILURE;
2143  } // error in tag index
2144 
2145  Tag tagh = data.tagList[*tag_index];
2146 
2147  if( *ent_type == 0 )
2148  {
2149  ent_exchange = data.all_verts;
2150  }
2151  else if( *ent_type == 1 )
2152  {
2153  ent_exchange = data.primary_elems;
2154  }
2155  else
2156  {
2157  return moab::MB_FAILURE;
2158  } // unexpected type
2159 
2160  ParallelComm* pco = context.appDatas[*pid].pcomm;
2161  // we could do different MPI_Op; do not bother now, we will call from fortran
2162  ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
2163 
2164 #else
2165  /* do nothing if serial */
2166  // just silence the warning
2167  // do not call sync tags in serial!
2168  int k = *pid + *tag_index + *ent_type;
2169  k++; // just do junk, to avoid complaints
2170 #endif
2171  return moab::MB_SUCCESS;
2172 }
2173 
2175  iMOAB_LocalID* local_index,
2176  int* num_adjacent_elements,
2177  iMOAB_LocalID* adjacent_element_IDs )
2178 {
2179  ErrorCode rval;
2180 
2181  // one neighbor for each subentity of dimension-1
2182  MeshTopoUtil mtu( context.MBI );
2183  appData& data = context.appDatas[*pid];
2184  EntityHandle eh = data.primary_elems[*local_index];
2185  Range adjs;
2186  rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
2187 
2188  if( *num_adjacent_elements < (int)adjs.size() )
2189  {
2190  return moab::MB_FAILURE;
2191  } // not dimensioned correctly
2192 
2193  *num_adjacent_elements = (int)adjs.size();
2194 
2195  for( int i = 0; i < *num_adjacent_elements; i++ )
2196  {
2197  adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
2198  }
2199 
2200  return moab::MB_SUCCESS;
2201 }
2202 
2203 #if 0
2204 
2205 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
2206 {
2207  return moab::MB_SUCCESS;
2208 }
2209 
2210 #endif
2211 
2212 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
2213 {
2214  ErrorCode rval;
2215  appData& data = context.appDatas[*pid];
2216 
2217  if( !data.local_verts.empty() ) // we should have no vertices in the app
2218  {
2219  return moab::MB_FAILURE;
2220  }
2221 
2222  int nverts = *coords_len / *dim;
2223 
2224  rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
2225 
2226  rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
2227 
2228  // also add the vertices to the all_verts range
2229  data.all_verts.merge( data.local_verts );
2230  return moab::MB_SUCCESS;
2231 }
2232 
2234  int* num_elem,
2235  int* type,
2236  int* num_nodes_per_element,
2237  int* connectivity,
2238  int* block_ID )
2239 {
2240  // Create elements
2241  appData& data = context.appDatas[*pid];
2242 
2243  ReadUtilIface* read_iface;
2244  ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
2245 
2246  EntityType mbtype = (EntityType)( *type );
2247  EntityHandle actual_start_handle;
2248  EntityHandle* array = nullptr;
2249  rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
2250 
2251  // fill up with actual connectivity from input; assume the vertices are in order, and start
2252  // vertex is the first in the current data vertex range
2253  EntityHandle firstVertex = data.local_verts[0];
2254 
2255  for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2256  {
2257  array[j] = connectivity[j] + firstVertex - 1;
2258  } // assumes connectivity uses 1 based array (from fortran, mostly)
2259 
2260  Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2261 
2262  rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
2263 
2264  data.primary_elems.merge( new_elems );
2265 
2266  // add to adjacency
2267  rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
2268  // organize all new elements in block, with the given block ID; if the block set is not
2269  // existing, create a new mesh set;
2270  Range sets;
2271  int set_no = *block_ID;
2272  const void* setno_ptr = &set_no;
2274  sets );
2275  EntityHandle block_set;
2276 
2277  if( MB_FAILURE == rval || sets.empty() )
2278  {
2279  // create a new set, with this block ID
2280  rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
2281 
2282  rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
2283 
2284  // add the material set to file set
2285  rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
2286  }
2287  else
2288  {
2289  block_set = sets[0];
2290  } // first set is the one we want
2291 
2292  /// add the new ents to the clock set
2293  rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
2294 
2295  return moab::MB_SUCCESS;
2296 }
2297 
2298 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
2299 {
2300  appData& data = context.appDatas[*pid];
2301  data.num_global_vertices = *num_global_verts;
2302  data.num_global_elements = *num_global_elems;
2303  return moab::MB_SUCCESS;
2304 }
2305 
2306 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
2307 {
2308  appData& data = context.appDatas[*pid];
2309  if( nullptr != num_global_verts )
2310  {
2311  *num_global_verts = data.num_global_vertices;
2312  }
2313  if( nullptr != num_global_elems )
2314  {
2315  *num_global_elems = data.num_global_elements;
2316  }
2317 
2318  return moab::MB_SUCCESS;
2319 }
2320 
2321 #ifdef MOAB_HAVE_MPI
2322 
2323 // this makes sense only for parallel runs
2324 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
2325 {
2326  appData& data = context.appDatas[*pid];
2327  ParallelComm* pco = context.appDatas[*pid].pcomm;
2328  EntityHandle cset = data.file_set;
2329  int dum_id = 0;
2330  ErrorCode rval;
2331  if( data.primary_elems.empty() )
2332  {
2333  // skip actual resolve, assume vertices are distributed already ,
2334  // no need to share them
2335  }
2336  else
2337  {
2338  // create an integer tag for resolving ; maybe it can be a long tag in the future
2339  // (more than 2 B vertices;)
2340 
2341  Tag stag;
2342  rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
2343  &dum_id );MB_CHK_ERR( rval );
2344 
2345  if( *num_verts > (int)data.local_verts.size() )
2346  {
2347  return moab::MB_FAILURE;
2348  } // we are not setting the size
2349 
2350  rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval ); // assumes integer tag
2351 
2352  rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
2353 
2354  rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
2355  }
2356  // provide partition tag equal to rank
2357  Tag part_tag;
2358  dum_id = -1;
2359  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2360  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2361 
2362  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2363  {
2364  std::cout << " can't get par part tag.\n";
2365  return moab::MB_FAILURE;
2366  }
2367 
2368  int rank = pco->rank();
2369  rval = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
2370 
2371  return moab::MB_SUCCESS;
2372 }
2373 
2374 // this assumes that this was not called before
2375 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
2376 {
2377  ErrorCode rval;
2378 
2379  // verify we have valid ghost layers input specified. If invalid, exit quick.
2380  if( num_ghost_layers && *num_ghost_layers <= 0 )
2381  {
2382  return moab::MB_SUCCESS;
2383  } // nothing to do
2384 
2385  appData& data = context.appDatas[*pid];
2386  ParallelComm* pco = context.appDatas[*pid].pcomm;
2387 
2388  // maybe we should be passing this too; most of the time we do not need additional ents collective call
2389  constexpr int addl_ents = 0;
2390  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, 1, // get only one layer of ghost entities
2391  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2392  for( int i = 2; i <= *num_ghost_layers; i++ )
2393  {
2394  rval = pco->correct_thin_ghost_layers();MB_CHK_ERR( rval ); // correct for thin layers
2395  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, i, // iteratively get one extra layer
2396  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2397  }
2398 
2399  // Update ghost layer information
2400  data.num_ghost_layers = *num_ghost_layers;
2401 
2402  // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
2403  return iMOAB_UpdateMeshInfo( pid );
2404 }
2405 
2407  MPI_Comm* joint_communicator,
2408  MPI_Group* receivingGroup,
2409  int* rcompid,
2410  int* method )
2411 {
2412  assert( joint_communicator != nullptr );
2413  assert( receivingGroup != nullptr );
2414  assert( rcompid != nullptr );
2415 
2416  ErrorCode rval;
2417  int ierr;
2418  appData& data = context.appDatas[*pid];
2419  ParallelComm* pco = context.appDatas[*pid].pcomm;
2420 
2421  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2422  : *joint_communicator );
2423  MPI_Group recvGroup =
2424  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
2425  MPI_Comm sender = pco->comm(); // the sender comm is obtained from parallel comm in moab; no need to pass it along
2426  // first see what are the processors in each group; get the sender group too, from the sender communicator
2427  MPI_Group senderGroup;
2428  ierr = MPI_Comm_group( sender, &senderGroup );
2429  if( ierr != 0 ) return moab::MB_FAILURE;
2430 
2431  // instantiate the par comm graph
2432  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2433  // int coid2)
2434  ParCommGraph* cgraph =
2435  new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
2436  // we should search if we have another pcomm with the same comp ids in the list already
2437  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2438  context.appDatas[*pid].pgraph[*rcompid] = cgraph;
2439 
2440  int sender_rank = -1;
2441  MPI_Comm_rank( sender, &sender_rank );
2442 
2443  // decide how to distribute elements to each processor
2444  // now, get the entities on local processor, and pack them into a buffer for various processors
2445  // we will do trivial partition: first get the total number of elements from "sender"
2446  std::vector< int > number_elems_per_part;
2447  // how to distribute local elements to receiving tasks?
2448  // trivial partition: compute first the total number of elements need to be sent
2449  Range owned = context.appDatas[*pid].owned_elems;
2450  if( owned.size() == 0 )
2451  {
2452  // must be vertices that we want to send then
2453  owned = context.appDatas[*pid].local_verts;
2454  // we should have some vertices here
2455  }
2456 
2457  if( *method == 0 ) // trivial partitioning, old method
2458  {
2459  int local_owned_elem = (int)owned.size();
2460  int size = pco->size();
2461  int rank = pco->rank();
2462  number_elems_per_part.resize( size ); //
2463  number_elems_per_part[rank] = local_owned_elem;
2464 #if( MPI_VERSION >= 2 )
2465  // Use "in place" option
2466  ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2467 #else
2468  {
2469  std::vector< int > all_tmp( size );
2470  ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2471  number_elems_per_part = all_tmp;
2472  }
2473 #endif
2474 
2475  if( ierr != 0 )
2476  {
2477  return moab::MB_FAILURE;
2478  }
2479 
2480  // every sender computes the trivial partition, it is cheap, and we need to send it anyway
2481  // to each sender
2482  rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
2483 
2484  rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
2485  }
2486  else // *method != 0, so it is either graph or geometric, parallel
2487  {
2488  // owned are the primary elements on this app
2489  rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
2490 
2491  // basically, send the graph to the receiver side, with unblocking send
2492  rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
2493  }
2494  // pco is needed to pack, not for communication
2495  rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
2496 
2497  // mark for deletion
2498  MPI_Group_free( &senderGroup );
2499  return moab::MB_SUCCESS;
2500 }
2501 
2502 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* joint_communicator, MPI_Group* sendingGroup, int* scompid )
2503 {
2504  assert( joint_communicator != nullptr );
2505  assert( sendingGroup != nullptr );
2506  assert( scompid != nullptr );
2507 
2508  ErrorCode rval;
2509  appData& data = context.appDatas[*pid];
2510  ParallelComm* pco = context.appDatas[*pid].pcomm;
2511  MPI_Comm receive = pco->comm();
2512  EntityHandle local_set = data.file_set;
2513 
2514  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2515  : *joint_communicator );
2516  MPI_Group sendGroup =
2517  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
2518 
2519  // first see what are the processors in each group; get the sender group too, from the sender
2520  // communicator
2521  MPI_Group receiverGroup;
2522  int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
2523 
2524  // instantiate the par comm graph
2525  ParCommGraph* cgraph =
2526  new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
2527  // TODO we should search if we have another pcomm with the same comp ids in the list already
2528  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2529  context.appDatas[*pid].pgraph[*scompid] = cgraph;
2530 
2531  int receiver_rank = -1;
2532  MPI_Comm_rank( receive, &receiver_rank );
2533 
2534  // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
2535  // knows what data to expect
2536  std::vector< int > pack_array;
2537  rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
2538 
2539  // senders across for the current receiver
2540  int current_receiver = cgraph->receiver( receiver_rank );
2541 
2542  std::vector< int > senders_local;
2543  size_t n = 0;
2544 
2545  while( n < pack_array.size() )
2546  {
2547  if( current_receiver == pack_array[n] )
2548  {
2549  for( int j = 0; j < pack_array[n + 1]; j++ )
2550  {
2551  senders_local.push_back( pack_array[n + 2 + j] );
2552  }
2553 
2554  break;
2555  }
2556 
2557  n = n + 2 + pack_array[n + 1];
2558  }
2559 
2560 #ifdef VERBOSE
2561  std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
2562  << senders_local.size() << " tasks: ";
2563 
2564  for( int k = 0; k < (int)senders_local.size(); k++ )
2565  {
2566  std::cout << " " << senders_local[k];
2567  }
2568 
2569  std::cout << "\n";
2570 #endif
2571 
2572  if( senders_local.empty() )
2573  {
2574  std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
2575  }
2576  rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
2577 
2578  // after we are done, we could merge vertices that come from different senders, but
2579  // have the same global id
2580  Tag idtag;
2581  rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
2582 
2583  // data.point_cloud = false;
2584  Range local_ents;
2585  rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
2586 
2587  // do not do merge if point cloud
2588  if( !local_ents.all_of_type( MBVERTEX ) )
2589  {
2590  if( (int)senders_local.size() >= 2 ) // need to remove duplicate vertices
2591  // that might come from different senders
2592  {
2593 
2594  Range local_verts = local_ents.subset_by_type( MBVERTEX );
2595  Range local_elems = subtract( local_ents, local_verts );
2596 
2597  // remove from local set the vertices
2598  rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
2599 
2600 #ifdef VERBOSE
2601  std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
2602 #endif
2603  MergeMesh mm( context.MBI );
2604 
2605  rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
2606 
2607  Range new_verts; // local elems are local entities without vertices
2608  rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
2609 
2610 #ifdef VERBOSE
2611  std::cout << "after merging: new verts: " << new_verts.size() << "\n";
2612 #endif
2613  rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
2614  }
2615  }
2616  else
2617  data.point_cloud = true;
2618 
2619  if( !data.point_cloud )
2620  {
2621  // still need to resolve shared entities (in this case, vertices )
2622  rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
2623  }
2624  else
2625  {
2626  // if partition tag exists, set it to current rank; just to make it visible in VisIt
2627  Tag densePartTag;
2628  rval = context.MBI->tag_get_handle( "partition", densePartTag );
2629  if( nullptr != densePartTag && MB_SUCCESS == rval )
2630  {
2631  Range local_verts;
2632  rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
2633  std::vector< int > vals;
2634  int rank = pco->rank();
2635  vals.resize( local_verts.size(), rank );
2636  rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
2637  }
2638  }
2639  // set the parallel partition tag
2640  Tag part_tag;
2641  int dum_id = -1;
2642  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2643  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2644 
2645  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2646  {
2647  std::cout << " can't get par part tag.\n";
2648  return moab::MB_FAILURE;
2649  }
2650 
2651  int rank = pco->rank();
2652  rval = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
2653 
2654  // make sure that the GLOBAL_ID is defined as a tag
2655  int tagtype = 0; // dense, integer
2656  int numco = 1; // size
2657  int tagindex; // not used
2658  rval = iMOAB_DefineTagStorage( pid, "GLOBAL_ID", &tagtype, &numco, &tagindex );MB_CHK_ERR( rval );
2659  // populate the mesh with current data info
2660  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
2661 
2662  // mark for deletion
2663  MPI_Group_free( &receiverGroup );
2664 
2665  return moab::MB_SUCCESS;
2666 }
2667 
2669  const iMOAB_String tag_storage_name,
2670  MPI_Comm* joint_communicator,
2671  int* context_id )
2672 {
2673  appData& data = context.appDatas[*pid];
2674  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2675  if( mt == data.pgraph.end() )
2676  {
2677  std::cout << " no par com graph for context_id:" << *context_id << " available contexts:";
2678  for( auto mit = data.pgraph.begin(); mit != data.pgraph.end(); mit++ )
2679  std::cout << " " << mit->first;
2680  std::cout << "\n";
2681  return moab::MB_FAILURE;
2682  }
2683 
2684  ParCommGraph* cgraph = mt->second;
2685  ParallelComm* pco = context.appDatas[*pid].pcomm;
2686  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2687  : *joint_communicator );
2688  Range owned = ( data.point_cloud ? data.local_verts : data.owned_elems );
2689 
2690 #ifdef MOAB_HAVE_TEMPESTREMAP
2691  if( data.tempestData.remapper != nullptr ) // this is the case this is part of intx;;
2692  {
2693  EntityHandle cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2694  MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2695  }
2696 #else
2697  // now, in case we are sending from intx between ocn and atm, we involve coverage set
2698  // how do I know if this receiver already participated in an intersection driven by coupler?
2699  // also, what if this was the "source" mesh in intx?
2700  // in that case, the elements might have been instantiated in the coverage set locally, the
2701  // "owned" range can be different the elements are now in tempestRemap coverage_set
2702  EntityHandle cover_set = cgraph->get_cover_set(); // this will be non null only for intx app ?
2703  if( 0 != cover_set ) MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2704 #endif
2705 
2706  std::string tag_name( tag_storage_name );
2707 
2708  // basically, we assume everything is defined already on the tag,
2709  // and we can get the tags just by its name
2710  // we assume that there are separators ":" between the tag names
2711  std::vector< std::string > tagNames;
2712  std::vector< Tag > tagHandles;
2713  std::string separator( ":" );
2714  split_tag_names( tag_name, separator, tagNames );
2715  for( size_t i = 0; i < tagNames.size(); i++ )
2716  {
2717  Tag tagHandle;
2718  ErrorCode rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2719  if( MB_SUCCESS != rval || nullptr == tagHandle )
2720  {
2721  MB_CHK_SET_ERR( moab::MB_FAILURE,
2722  "can't get tag handle with name: " << tagNames[i].c_str() << " at index " << i );
2723  }
2724  tagHandles.push_back( tagHandle );
2725  }
2726 
2727  // pco is needed to pack, and for moab instance, not for communication!
2728  // still use nonblocking communication, over the joint comm
2729  MB_CHK_ERR( cgraph->send_tag_values( global, pco, owned, tagHandles ) );
2730  // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
2731 
2732  return moab::MB_SUCCESS;
2733 }
2734 
2736  const iMOAB_String tag_storage_name,
2737  MPI_Comm* joint_communicator,
2738  int* context_id )
2739 {
2740  appData& data = context.appDatas[*pid];
2741  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2742  if( mt == data.pgraph.end() )
2743  {
2744  std::cout << " no par com graph for context_id:" << *context_id << " available contexts:";
2745  for( auto mit = data.pgraph.begin(); mit != data.pgraph.end(); mit++ )
2746  std::cout << " " << mit->first;
2747  std::cout << "\n";
2748  return moab::MB_FAILURE;
2749  }
2750 
2751  ParCommGraph* cgraph = mt->second;
2752  ParallelComm* pco = context.appDatas[*pid].pcomm;
2753  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2754  : *joint_communicator );
2755  Range owned = ( data.point_cloud ? data.local_verts : data.owned_elems );
2756 
2757  // how do I know if this receiver already participated in an intersection driven by coupler?
2758  // also, what if this was the "source" mesh in intx?
2759  // in that case, the elements might have been instantiated in the coverage set locally, the
2760  // "owned" range can be different the elements are now in tempestRemap coverage_set
2761  EntityHandle cover_set = cgraph->get_cover_set();
2762  if( 0 != cover_set ) MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2763 
2764  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2765  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2766  // from ints remapper
2767 #ifdef MOAB_HAVE_TEMPESTREMAP
2768  if( data.tempestData.remapper != nullptr ) // this is the case this is part of intx;;
2769  {
2770  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2771  MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2772  }
2773 #endif
2774 
2775  std::string tag_name( tag_storage_name );
2776  // we assume that there are separators ";" between the tag names
2777  std::vector< std::string > tagNames;
2778  std::vector< Tag > tagHandles;
2779  std::string separator( ":" );
2780  split_tag_names( tag_name, separator, tagNames );
2781  for( size_t i = 0; i < tagNames.size(); i++ )
2782  {
2783  Tag tagHandle;
2784  ErrorCode rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2785  if( MB_SUCCESS != rval || nullptr == tagHandle )
2786  {
2787  MB_CHK_SET_ERR( moab::MB_FAILURE,
2788  " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i );
2789  }
2790  tagHandles.push_back( tagHandle );
2791  }
2792 
2793 #ifdef VERBOSE
2794  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
2795  << " and file set = " << ( data.file_set ) << "\n";
2796 #endif
2797  // pco is needed to pack, and for moab instance, not for communication!
2798  // still use nonblocking communication
2799  MB_CHK_SET_ERR( cgraph->receive_tag_values( global, pco, owned, tagHandles ), "failed to receive tag values" );
2800 
2801 #ifdef VERBOSE
2802  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
2803 #endif
2804 
2805  return moab::MB_SUCCESS;
2806 }
2807 
2809 {
2810  // need first to find the pgraph that holds the information we need
2811  // this will be called on sender side only
2812  appData& data = context.appDatas[*pid];
2813  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2814  if( mt == data.pgraph.end() ) return moab::MB_FAILURE; // error
2815 
2816  mt->second->release_send_buffers();
2817  return moab::MB_SUCCESS;
2818 }
2819 
2820 //#define VERBOSE
2822  iMOAB_AppID pid2,
2823  MPI_Comm* joint_communicator,
2824  MPI_Group* group1,
2825  MPI_Group* group2,
2826  int* type1,
2827  int* type2,
2828  int* comp1,
2829  int* comp2 )
2830 {
2831  assert( joint_communicator );
2832  assert( group1 );
2833  assert( group2 );
2834  ErrorCode rval = MB_SUCCESS;
2835  int localRank = 0, numProcs = 1;
2836 
2837  bool isFortran = false;
2838  if( *pid1 >= 0 ) isFortran = isFortran || context.appDatas[*pid1].is_fortran;
2839  if( *pid2 >= 0 ) isFortran = isFortran || context.appDatas[*pid2].is_fortran;
2840 
2841  MPI_Comm global =
2842  ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) ) : *joint_communicator );
2843  MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
2844  MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
2845 
2846  MPI_Comm_rank( global, &localRank );
2847  MPI_Comm_size( global, &numProcs );
2848  // instantiate the par comm graph
2849 
2850  // we should search if we have another pcomm with the same comp ids in the list already
2851  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2852  ParCommGraph* cgraph = nullptr;
2853  ParCommGraph* cgraph_rev = nullptr;
2854  if( *pid1 >= 0 )
2855  {
2856  appData& data = context.appDatas[*pid1];
2857  auto mt = data.pgraph.find( *comp2 );
2858  if( mt != data.pgraph.end() ) data.pgraph.erase( mt );
2859  // now let us compute the new communication graph
2860  cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2861  context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
2862  }
2863  if( *pid2 >= 0 )
2864  {
2865  appData& data = context.appDatas[*pid2];
2866  auto mt = data.pgraph.find( *comp1 );
2867  if( mt != data.pgraph.end() ) data.pgraph.erase( mt );
2868  // now let us compute the new communication graph
2869  cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2870  context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev; // from 2 to 1
2871  }
2872 
2873  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
2874  // model on the joint comm
2875  TupleList TLcomp1;
2876  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2877  TupleList TLcomp2;
2878  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2879  // will push_back a new tuple, if needed
2880 
2881  TLcomp1.enableWriteAccess();
2882 
2883  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
2884  Tag gdsTag;
2885  // find the values on first cell
2886  int lenTagType1 = 1;
2887  if( 1 == *type1 || 1 == *type2 )
2888  {
2889  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
2890  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
2891  }
2892  Tag gidTag = context.MBI->globalId_tag();
2893 
2894  std::vector< int > valuesComp1;
2895  // populate first tuple
2896  if( *pid1 >= 0 )
2897  {
2898  appData& data1 = context.appDatas[*pid1];
2899  EntityHandle fset1 = data1.file_set;
2900  // in case of tempest remap, get the coverage set
2901 #ifdef MOAB_HAVE_TEMPESTREMAP
2902  if( data1.tempestData.remapper != nullptr ) // this is the case this is part of intx;
2903  fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2904 #endif
2905  Range ents_of_interest;
2906  if( *type1 == 1 )
2907  {
2908  assert( gdsTag );
2909  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2910  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
2911  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
2912  }
2913  else if( *type1 == 2 )
2914  {
2915  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2916  valuesComp1.resize( ents_of_interest.size() );
2917  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2918  }
2919  else if( *type1 == 3 ) // for FV meshes, just get the global id of cell
2920  {
2921  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
2922  valuesComp1.resize( ents_of_interest.size() );
2923  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2924  }
2925  else
2926  {
2927  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
2928  }
2929  // now fill the tuple list with info and markers
2930  // because we will send only the ids, order and compress the list
2931  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2932  TLcomp1.resize( uniq.size() );
2933  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2934  {
2935  // to proc, marker, element local index, index in el
2936  int marker = *sit;
2937  int to_proc = marker % numProcs;
2938  int n = TLcomp1.get_n();
2939  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
2940  TLcomp1.vi_wr[2 * n + 1] = marker;
2941  TLcomp1.inc_n();
2942  }
2943  }
2944 
2945  ProcConfig pc( global ); // proc config does the crystal router
2946  pc.crystal_router()->gs_transfer( 1, TLcomp1,
2947  0 ); // communication towards joint tasks, with markers
2948  // sort by value (key 1)
2949 #ifdef VERBOSE
2950  std::stringstream ff1;
2951  ff1 << "TLcomp1_" << localRank << ".txt";
2952  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2953 #endif
2954  moab::TupleList::buffer sort_buffer;
2955  sort_buffer.buffer_init( TLcomp1.get_n() );
2956  TLcomp1.sort( 1, &sort_buffer );
2957  sort_buffer.reset();
2958 #ifdef VERBOSE
2959  // after sorting
2960  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2961 #endif
2962  // do the same, for the other component, number2, with type2
2963  // start copy
2964  TLcomp2.enableWriteAccess();
2965  // populate second tuple
2966  std::vector< int > valuesComp2;
2967  if( *pid2 >= 0 )
2968  {
2969  appData& data2 = context.appDatas[*pid2];
2970  EntityHandle fset2 = data2.file_set;
2971  // in case of tempest remap, get the coverage set
2972 #ifdef MOAB_HAVE_TEMPESTREMAP
2973  if( data2.tempestData.remapper != nullptr ) // this is the case this is part of intx;
2974  fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2975 #endif
2976 
2977  Range ents_of_interest;
2978  if( *type2 == 1 )
2979  {
2980  assert( gdsTag );
2981  rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2982  valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
2983  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
2984  }
2985  else if( *type2 == 2 )
2986  {
2987  rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2988  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
2989  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
2990  }
2991  else if( *type2 == 3 )
2992  {
2993  rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
2994  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
2995  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
2996  }
2997  else
2998  {
2999  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2
3000  }
3001  // now fill the tuple list with info and markers
3002  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3003  TLcomp2.resize( uniq.size() );
3004  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3005  {
3006  // to proc, marker, element local index, index in el
3007  int marker = *sit;
3008  int to_proc = marker % numProcs;
3009  int n = TLcomp2.get_n();
3010  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
3011  TLcomp2.vi_wr[2 * n + 1] = marker;
3012  TLcomp2.inc_n();
3013  }
3014  }
3015  pc.crystal_router()->gs_transfer( 1, TLcomp2,
3016  0 ); // communication towards joint tasks, with markers
3017  // sort by value (key 1)
3018 #ifdef VERBOSE
3019  std::stringstream ff2;
3020  ff2 << "TLcomp2_" << localRank << ".txt";
3021  TLcomp2.print_to_file( ff2.str().c_str() );
3022 #endif
3023  sort_buffer.buffer_reserve( TLcomp2.get_n() );
3024  TLcomp2.sort( 1, &sort_buffer );
3025  sort_buffer.reset();
3026  // end copy
3027 #ifdef VERBOSE
3028  TLcomp2.print_to_file( ff2.str().c_str() );
3029 #endif
3030  // need to send back the info, from the rendezvous point, for each of the values
3031  /* so go over each value, on local process in joint communicator
3032 
3033  now have to send back the info needed for communication;
3034  loop in in sync over both TLComp1 and TLComp2, in local process;
3035  So, build new tuple lists, to send synchronous communication
3036  populate them at the same time, based on marker, that is indexed
3037  */
3038 
3039  TupleList TLBackToComp1;
3040  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
3041  TLBackToComp1.enableWriteAccess();
3042 
3043  TupleList TLBackToComp2;
3044  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
3045  TLBackToComp2.enableWriteAccess();
3046 
3047  int n1 = TLcomp1.get_n();
3048  int n2 = TLcomp2.get_n();
3049 
3050  int indexInTLComp1 = 0;
3051  int indexInTLComp2 = 0; // advance both, according to the marker
3052  if( n1 > 0 && n2 > 0 )
3053  {
3054  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
3055  {
3056  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3057  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3058  if( currentValue1 < currentValue2 )
3059  {
3060  // we have a big problem; basically, we are saying that
3061  // dof currentValue is on one model and not on the other
3062  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
3063  indexInTLComp1++;
3064  continue;
3065  }
3066  if( currentValue1 > currentValue2 )
3067  {
3068  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
3069  indexInTLComp2++;
3070  continue;
3071  }
3072  int size1 = 1;
3073  int size2 = 1;
3074  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3075  size1++;
3076  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3077  size2++;
3078  // must be found in both lists, find the start and end indices
3079  for( int i1 = 0; i1 < size1; i1++ )
3080  {
3081  for( int i2 = 0; i2 < size2; i2++ )
3082  {
3083  // send the info back to components
3084  int n = TLBackToComp1.get_n();
3085  TLBackToComp1.reserve();
3086  TLBackToComp1.vi_wr[3 * n] =
3087  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
3088  // came from, info from comp2
3089  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3090  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
3091  n = TLBackToComp2.get_n();
3092  TLBackToComp2.reserve();
3093  TLBackToComp2.vi_wr[3 * n] =
3094  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
3095  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3096  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
3097  // what if there are repeated markers in TLcomp2? increase just index2
3098  }
3099  }
3100  indexInTLComp1 += size1;
3101  indexInTLComp2 += size2;
3102  }
3103  }
3104  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
3105  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3106 
3107  if( *pid1 >= 0 )
3108  {
3109  // we are on original comp 1 tasks
3110  // before ordering
3111  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3112  // processors it communicates with
3113 #ifdef VERBOSE
3114  std::stringstream f1;
3115  f1 << "TLBack1_" << localRank << ".txt";
3116  TLBackToComp1.print_to_file( f1.str().c_str() );
3117 #endif
3118  sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
3119  TLBackToComp1.sort( 1, &sort_buffer );
3120  sort_buffer.reset();
3121 #ifdef VERBOSE
3122  TLBackToComp1.print_to_file( f1.str().c_str() );
3123 #endif
3124  // so we are now on pid1, we know now each marker were it has to go
3125  // add a new method to ParCommGraph, to set up the involved_IDs_map
3126  cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
3127  }
3128  if( *pid2 >= 0 )
3129  {
3130  // we are on original comp 1 tasks
3131  // before ordering
3132  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3133  // processors it communicates with
3134 #ifdef VERBOSE
3135  std::stringstream f2;
3136  f2 << "TLBack2_" << localRank << ".txt";
3137  TLBackToComp2.print_to_file( f2.str().c_str() );
3138 #endif
3139  sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
3140  TLBackToComp2.sort( 2, &sort_buffer );
3141  sort_buffer.reset();
3142 #ifdef VERBOSE
3143  TLBackToComp2.print_to_file( f2.str().c_str() );
3144 #endif
3145  cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
3146  }
3147  return moab::MB_SUCCESS;
3148 }
3149 
3151 {
3152  appData& data = context.appDatas[*pid];
3153  ParallelComm* pco = context.appDatas[*pid].pcomm;
3154  // collapse vertices and transform cells into triangles/quads /polys
3155  // tags we care about: area, frac, global id
3156  std::vector< Tag > tagsList;
3157  Tag tag;
3158  ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
3159  if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE; // fatal error, abort
3160  tagsList.push_back( tag );
3161  rval = context.MBI->tag_get_handle( "area", tag );
3162  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3163  rval = context.MBI->tag_get_handle( "frac", tag );
3164  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3165  double tol = 1.0e-9;
3166  rval = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
3167 
3168  // clean material sets of cells that were deleted
3170  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
3171 
3172  if( !data.mat_sets.empty() )
3173  {
3174  EntityHandle matSet = data.mat_sets[0];
3175  Range elems;
3176  rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
3177  rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
3178  // put back only cells from data.file_set
3179  elems.clear();
3180  rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
3181  rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
3182  }
3183  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
3184 
3185  ParallelMergeMesh pmm( pco, tol );
3186  rval = pmm.merge( data.file_set,
3187  /* do not do local merge*/ false,
3188  /* 2d cells*/ 2 );MB_CHK_ERR( rval );
3189 
3190  // assign global ids only for vertices, cells have them fine
3191  rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
3192 
3193  // set the partition tag on the file set
3194  Tag part_tag;
3195  int dum_id = -1;
3196  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
3197  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
3198 
3199  if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
3200  {
3201  std::cout << " can't get par part tag.\n";
3202  return moab::MB_FAILURE;
3203  }
3204 
3205  int rank = pco->rank();
3206  rval = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
3207 
3208  return moab::MB_SUCCESS;
3209 }
3210 
3211 #ifdef MOAB_HAVE_TEMPESTREMAP
3212 
3213 // this call must be collective on the joint communicator
3214 // intersection tasks on coupler will need to send to the components tasks the list of
3215 // id elements that are relevant: they intersected some of the target elements (which are not needed
3216 // here)
3217 // in the intersection
3218 ErrCode iMOAB_CoverageGraph( MPI_Comm* joint_communicator,
3219  iMOAB_AppID pid_src,
3220  iMOAB_AppID pid_migr,
3221  iMOAB_AppID pid_intx,
3222  int* source_id,
3223  int* migration_id,
3224  int* context_id )
3225 {
3226  // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this exchange
3227  std::vector< int > srcSenders, receivers;
3228  ParCommGraph* sendGraph = nullptr;
3229  bool is_fortran_context = false;
3230 
3231  if( pid_src && *pid_src > 0 && context.appDatas.find( *pid_src ) == context.appDatas.end() )
3232  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid source application ID specified: " << *pid_src );
3233  if( pid_migr && *pid_migr > 0 && context.appDatas.find( *pid_migr ) == context.appDatas.end() )
3234  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid migration/coverage application ID specified: " << *pid_migr );
3235  if( pid_intx && *pid_intx > 0 && context.appDatas.find( *pid_intx ) == context.appDatas.end() )
3236  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid intersection application ID specified: " << *pid_intx );
3237 
3238  // First, find the original graph between PE sets
3239  // And based on this one, we will build the newly modified one for coverage
3240  if( *pid_src >= 0 )
3241  {
3242  appData& dataSrc = context.appDatas[*pid_src];
3243  int default_context_id = *migration_id; // the other one
3244  assert( dataSrc.global_id == *source_id );
3245  is_fortran_context = dataSrc.is_fortran || is_fortran_context;
3246  if( dataSrc.pgraph.find( default_context_id ) != dataSrc.pgraph.end() )
3247  sendGraph = dataSrc.pgraph[default_context_id]; // maybe check if it does not exist
3248  else
3249  MB_CHK_SET_ERR( moab::MB_FAILURE, "Could not find source ParCommGraph with default migration context" );
3250 
3251  // report the sender and receiver tasks in the joint comm
3252  srcSenders = sendGraph->senders();
3253  receivers = sendGraph->receivers();
3254 #ifdef VERBOSE
3255  std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
3256 #endif
3257  }
3258 
3259  ParCommGraph* recvGraph = nullptr; // will be non null on receiver tasks (intx tasks)
3260  if( *pid_migr >= 0 )
3261  {
3262  appData& dataMigr = context.appDatas[*pid_migr];
3263  is_fortran_context = dataMigr.is_fortran || is_fortran_context;
3264  // find the original one
3265  int default_context_id = *source_id;
3266  assert( dataMigr.global_id == *migration_id );
3267  if( dataMigr.pgraph.find( default_context_id ) != dataMigr.pgraph.end() )
3268  recvGraph = dataMigr.pgraph[default_context_id];
3269  else
3270  MB_CHK_SET_ERR( moab::MB_FAILURE,
3271  "Could not find coverage receiver ParCommGraph with default migration context" );
3272  // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
3273  srcSenders = recvGraph->senders();
3274  receivers = recvGraph->receivers();
3275 #ifdef VERBOSE
3276  std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
3277 #endif
3278  }
3279 
3280  if( *pid_intx >= 0 ) is_fortran_context = context.appDatas[*pid_intx].is_fortran || is_fortran_context;
3281 
3282  // loop over pid_intx elements, to see what original processors in joint comm have sent the
3283  // coverage mesh; If we are on intx tasks, send coverage info towards original component tasks,
3284  // about needed cells
3285  TupleList TLcovIDs;
3286  TLcovIDs.initialize( 2, 0, 0, 0, 0 ); // to proc, GLOBAL ID; estimate about 100 IDs to be sent
3287  // will push_back a new tuple, if needed
3288  TLcovIDs.enableWriteAccess();
3289  // the crystal router will send ID cell to the original source, on the component task
3290  // if we are on intx tasks, loop over all intx elements and
3291 
3292  MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
3293  : *joint_communicator );
3294  int currentRankInJointComm = -1;CHK_MPI_ERR( MPI_Comm_rank( global, &currentRankInJointComm ) );
3295 
3296  // Global id of the coverage source elements
3297  Tag gidTag = context.MBI->globalId_tag();
3298  if( !gidTag ) return moab::MB_TAG_NOT_FOUND; // fatal error, abort
3299 
3300  // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we
3301  // need to send information towards component tasks
3302  if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
3303  receivers.end() ) // we are on receivers tasks, we can request intx info
3304  {
3305  appData& dataIntx = context.appDatas[*pid_intx];
3306  EntityHandle intx_set = dataIntx.file_set;
3307  EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3308 
3309  Tag orgSendProcTag;
3310  MB_CHK_ERR( context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag ) );
3311  if( !orgSendProcTag ) return moab::MB_TAG_NOT_FOUND; // fatal error, abort
3312 
3313  Range intx_cells;
3314  // get all entities from the intersection set, and look at their SourceParent
3315  MB_CHK_ERR( context.MBI->get_entities_by_dimension( intx_set, 2, intx_cells ) );
3316  // we did not compute the intersection (perhaps it was loaded from disk)
3317  // so then just get the cells from covering mesh (no need to filter participating elements only)
3318 
3319  // send that info back to enhance parCommGraph cache
3320  std::map< int, std::set< int > > idsFromProcs;
3321  if( intx_cells.size() )
3322  {
3323  Tag parentTag;
3324  // global id of the source element corresponding to intersection cell
3325  MB_CHK_ERR( context.MBI->tag_get_handle( "SourceParent", parentTag ) );
3326  if( !parentTag ) return moab::MB_TAG_NOT_FOUND; // fatal error, abort
3327 
3328  for( auto it = intx_cells.begin(); it != intx_cells.end(); ++it )
3329  {
3330  EntityHandle intx_cell = *it;
3331 
3332  int origProc; // look at receivers
3333  MB_CHK_ERR( context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc ) );
3334 
3335  // we have augmented the overlap set with ghost cells ; in that case, the
3336  // orig_sending_processor is not set so it will be -1;
3337  // if( origProc < 0 || currentRankInJointComm == origProc ) continue;
3338  if( origProc < 0 ) continue;
3339 
3340  int gidCell; // get the global id of the cell for association
3341  MB_CHK_ERR( context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell ) );
3342  idsFromProcs[origProc].insert( gidCell );
3343  // printf( "%d: active-intx: adding %d for proc %d\n", currentRankInJointComm, gidCell, origProc );
3344  }
3345  }
3346 
3347  // NOTE: if we have no intx cells, we fall back to coverage set anyway
3348  // In either case, let us iterate over the coverage mesh and compute
3349  // the right connections needed for the coverage graph
3350  {
3351  // get all entities from the source covering set
3352  Range cover_cells;
3353  MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, cover_cells ) );
3354 
3355  // get all cells from coverage set
3356  Tag gidTag = context.MBI->globalId_tag();
3357 
3358  // look at their orig_sending_processor and associate to global ID
3359  for( auto it = cover_cells.begin(); it != cover_cells.end(); ++it )
3360  {
3361  const EntityHandle cov_cell = *it;
3362 
3363  int origProc; // look at receivers
3364  MB_CHK_ERR( context.MBI->tag_get_data( orgSendProcTag, &cov_cell, 1, &origProc ) );
3365 
3366  // we have augmented the overlap set with ghost cells ; in that case, the
3367  // orig_sending_processor is not set so it will be -1;
3368  if( origProc < 0 ) continue;
3369 
3370  int gidCell; // get the global id of the cell for association
3371  MB_CHK_ERR( context.MBI->tag_get_data( gidTag, &cov_cell, 1, &gidCell ) );
3372  assert( gidCell > 0 );
3373  idsFromProcs[origProc].insert( gidCell );
3374  }
3375  }
3376 
3377 #ifdef VERBOSE
3378  std::ofstream dbfile;
3379  std::stringstream outf;
3380  outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
3381  dbfile.open( outf.str().c_str() );
3382  dbfile << "Writing this to a file.\n";
3383 
3384  dbfile << " map size:" << idsFromProcs.size()
3385  << std::endl; // on the receiver side, these show how much data to receive
3386  // from the sender (how many ids, and how much tag data later; we need to size up the
3387  // receiver buffer) arrange in tuples , use map iterators to send the ids
3388  for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
3389  {
3390  std::set< int >& setIds = mt->second;
3391  dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
3392  int counter = 0;
3393  for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
3394  {
3395  int valueID = *st;
3396  dbfile << " " << valueID;
3397  counter++;
3398  if( counter % 10 == 0 ) dbfile << "\n";
3399  }
3400  dbfile << "\n";
3401  }
3402  dbfile.close();
3403 #endif
3404 
3405  if( nullptr != recvGraph )
3406  {
3407  ParCommGraph* newRecvGraph = new ParCommGraph( *recvGraph ); // just copy
3408  newRecvGraph->set_context_id( *context_id );
3409  newRecvGraph->SetReceivingAfterCoverage( idsFromProcs );
3410  // this par comm graph will need to use the coverage set
3411  // so we are for sure on intx pes (the receiver is the coupler mesh)
3412  newRecvGraph->set_cover_set( cover_set );
3413  if( context.appDatas[*pid_migr].pgraph.find( *context_id ) == context.appDatas[*pid_migr].pgraph.end() )
3414  context.appDatas[*pid_migr].pgraph[*context_id] = newRecvGraph;
3415  else // possible memory leak if context_id is same
3416  MB_CHK_SET_ERR( moab::MB_FAILURE, "ParCommGraph for context_id="
3417  << *context_id << " already exists. Check the workflow" );
3418  }
3419  for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); ++mit )
3420  {
3421  int procToSendTo = mit->first;
3422  std::set< int >& idSet = mit->second;
3423  for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); ++sit )
3424  {
3425  int n = TLcovIDs.get_n();
3426  TLcovIDs.reserve();
3427  TLcovIDs.vi_wr[2 * n] = procToSendTo; // send to processor
3428  TLcovIDs.vi_wr[2 * n + 1] = *sit; // global id needs index in the local_verts range
3429  }
3430  }
3431  }
3432 
3433  ProcConfig pc( global ); // proc config does the crystal router
3434  // communication towards component tasks, with what ids are needed
3435  // for each task from receiver
3436  pc.crystal_router()->gs_transfer( 1, TLcovIDs, 0 );
3437 
3438  // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
3439  if( nullptr != sendGraph )
3440  {
3441  appData& dataSrc = context.appDatas[*pid_src];
3442  // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each receiver task
3443  ParCommGraph* newSendGraph = new ParCommGraph( *sendGraph ); // just copy
3444  newSendGraph->set_context_id( *context_id );
3445  dataSrc.pgraph[*context_id] = newSendGraph;
3446  MB_CHK_ERR( newSendGraph->settle_send_graph( TLcovIDs ) );
3447  }
3448  return moab::MB_SUCCESS; // success
3449 }
3450 
3451 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix )
3452 {
3453  assert( prefix && strlen( prefix ) );
3454 
3455  ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
3456  std::string prefix_str( prefix );
3457 
3458  if( nullptr != cgraph )
3459  cgraph->dump_comm_information( prefix_str, *is_sender );
3460  else
3461  {
3462  std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
3463  << " context: " << *context_id << "\n";
3464  }
3465  return moab::MB_SUCCESS;
3466 }
3467 
3468 #endif // #ifdef MOAB_HAVE_TEMPESTREMAP
3469 
3470 #endif // #ifdef MOAB_HAVE_MPI
3471 
3472 #ifdef MOAB_HAVE_TEMPESTREMAP
3473 
3474 #ifdef MOAB_HAVE_NETCDF
3475 
3476 static ErrCode set_aream_from_trivial_distribution( iMOAB_AppID pid, int N, std::vector< double >& trvArea )
3477 {
3478  // this needs several rounds of communication, because the mesh is distributed differently than trivially
3479  // get all the cells from the pid_source
3480  // get global ids of the cells
3481  // number of local cells: [n/size] nL = [n/size]
3482  // last one extraN = n%size; nL += extraN
3483  // start id = [ rank*nL + 1; endId = (rank+1)*nL
3484  // lst one (rank = =size-1; endId = na
3485  // the last cells get the rest
3486  // construct global ids that correspond to trvArea [, rank *
3487  appData& data = context.appDatas[*pid];
3488  int size = 1, rank = 0;
3489 #ifdef MOAB_HAVE_MPI
3490  ParallelComm* pcomm = context.appDatas[*pid].pcomm;
3491  size = pcomm->size();
3492  rank = pcomm->rank();
3493 #endif
3494 
3495  /// The "aream" tag should be created already; error out if not
3496  /// NOTE: This is a bad assumption
3497  /// TODO: Fix it.
3498  Tag areaTag;
3499  MB_CHK_ERR( context.MBI->tag_get_handle( "aream", areaTag ) );
3500 
3501  // start copy
3502  const Range& ents_to_set = data.primary_elems;
3503  size_t nents_to_be_set = ents_to_set.size();
3504 
3505  Tag gidTag = context.MBI->globalId_tag();
3506  std::vector< int > globalIds( nents_to_be_set );
3507  MB_CHK_ERR( context.MBI->tag_get_data( gidTag, ents_to_set, &globalIds[0] ) );
3508 
3509  const bool serial = ( size == 1 );
3510  if( serial )
3511  {
3512  // we do not assume anymore that the number of entities has to match
3513  // we will set only what matches, and skip entities that do not have corresponding global ids
3514  //assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 );
3515  // tags are unrolled, we loop over global ids first, then careful about tags
3516  for( size_t i = 0; i < nents_to_be_set; i++ )
3517  {
3518  int gid = globalIds[i];
3519  int indexInVal =
3520  gid - 1; // assume the values are in order of global id, starting from 1 to number of cells
3521  assert( indexInVal < N );
3522  EntityHandle eh = ents_to_set[i];
3523  MB_CHK_ERR( context.MBI->tag_set_data( areaTag, &eh, 1, &trvArea[indexInVal] ) );
3524  }
3525  }
3526 #ifdef MOAB_HAVE_MPI
3527  else // it can be not serial only if size > 1, parallel
3528  {
3529  int nL = N / size; // how many global ids per task, except the last one
3530 
3531  // in this case, we have to use 2 crystal routers, to send data to the processor that needs it
3532  // we will create first a tuple to rendevous points, then from there send to the processor that requested it
3533  // it is a 2-hop global gather scatter
3534  // TODO: allow for tags of different length; this is wrong
3535  //assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 );
3536  TupleList TLreq;
3537  TLreq.initialize( 3, 0, 0, 0, nents_to_be_set ); // to proc, marker(gid),
3538  TLreq.enableWriteAccess();
3539  // the processor id that processes global_id is global_id / num_ents_per_proc
3540 
3541  // send requests to processor that has the global id
3542  for( size_t i = 0; i < nents_to_be_set; i++ )
3543  {
3544  // to proc, marker, element local index, index in el
3545  int marker = globalIds[i];
3546  int to_proc = ( marker - 1 ) / nL; // proc from 0 to size - 1
3547 
3548  if( to_proc == size ) to_proc = size - 1; // the last array is the longest
3549  int n = TLreq.get_n();
3550  TLreq.vi_wr[3 * n] = to_proc; // send to processor
3551  TLreq.vi_wr[3 * n + 1] = marker;
3552  TLreq.vi_wr[3 * n + 2] = i; // local index for this global id
3553  TLreq.inc_n();
3554  }
3555 
3556  // send now requests, basically inform the rendez-vous point who needs a particular global id
3557  // send the data to the other processors:
3558  ( pcomm->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
3559 
3560  int sizeBack = TLreq.get_n();
3561  // start copy from comm graph settle
3562  TupleList TLBack;
3563  TLBack.initialize( 3, 0, 0, 1, sizeBack ); // to proc, marker, tag from proc , tag values
3564  TLBack.enableWriteAccess();
3565  for( int i = 0; i < sizeBack; i++ )
3566  {
3567  int from_proc = TLreq.vi_wr[3 * i]; // send to processor
3568  TLBack.vi_wr[3 * i] = from_proc;
3569  int marker = TLreq.vi_wr[3 * i + 1]; // the actual global id
3570  TLBack.vi_wr[3 * i + 1] = marker;
3571  TLBack.vi_wr[3 * i + 2] = TLreq.vi_wr[3 * i + 2]; // the original index this came from
3572  // this marker should give an idea of what index is actually needed for value
3573  int index = marker - 1 - rank * nL;
3574  TLBack.vr_wr[i] = trvArea[index]; // !!! big assumptions about indices
3575  TLBack.inc_n();
3576  }
3577 
3578  ( pcomm->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
3579 
3580  int n1 = TLBack.get_n(); // should be the number of nents_to_be_set
3581  for( int i = 0; i < n1; i++ )
3582  {
3583  // int gid = TLBack.vi_rd[3 * i + 1]; // marker
3584  int origIndex = TLBack.vi_rd[3 * i + 2];
3585  EntityHandle eh = ents_to_set[origIndex];
3586  MB_CHK_ERR( context.MBI->tag_set_data( areaTag, &eh, 1, &TLBack.vr_rd[i] ) );
3587  }
3588  }
3589 #endif
3590  // end copy
3591 
3592  return MB_SUCCESS;
3593 }
3594 
3595 ErrCode iMOAB_LoadMappingWeightsFromFile(
3596  iMOAB_AppID pid_source,
3597  iMOAB_AppID pid_target,
3598  iMOAB_AppID pid_intersection,
3599  int* srctype,
3600  int* tgttype,
3601  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
3602  const iMOAB_String remap_weights_filename )
3603 {
3604  assert( srctype && tgttype );
3605 
3606  // get the local degrees of freedom, from the pid_cpl and type of mesh
3607  // Get the source and target data and pcomm objects
3608  appData& data_source = context.appDatas[*pid_source];
3609  appData& data_target = context.appDatas[*pid_target];
3610  appData& data_intx = context.appDatas[*pid_intersection];
3611  // do we need to check for tempest remap?
3612  // if we do not have it, do not do anything anyway
3613  //#ifdef MOAB_HAVE_TEMPESTREMAP
3614  TempestMapAppData& tdata = data_intx.tempestData;
3615 
3616  // check if the remapped context is null; we need to fix that, if so
3617  // what if we read a map and compute a map, on a particular iMOAB app a2o for example?
3618  // we compute an intx map and read a bilinear map
3619  // this is rather wrong, we need to fix it
3620  if( tdata.remapper == nullptr )
3621  {
3622  // do not compute coverage anymore in advance;
3623  // need to initialize the coverage set creation?
3624  // or should we really just one enclosing coverage set for all maps in here ?
3625  // Now allocate and initialize the remapper object
3626 #ifdef MOAB_HAVE_MPI
3627  ParallelComm* pco_intx = data_intx.pcomm;
3628  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
3629 #else
3630  tdata.remapper = new moab::TempestRemapper( context.MBI );
3631 #endif
3632  tdata.remapper->meshValidate = true;
3633  tdata.remapper->constructEdgeMap = true;
3634 
3635  // Do not create new filesets; Use the sets from our respective applications
3636  tdata.remapper->initialize( false );
3637  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_source.file_set;
3638  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_target.file_set;
3639  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
3640  // create a unique coverage set; it will be
3641  moab::EntityHandle covering_set_new;
3642  MB_CHK_SET_ERR( context.MBI->create_meshset( moab::MESHSET_SET, covering_set_new ), "Can't create new set" );
3643  tdata.remapper->GetMeshSet( moab::Remapper::CoveringMesh ) = covering_set_new;
3644  }
3645 
3646  // Setup loading of weights onto TempestOnlineMap
3647  // Set the context for the remapping weights computation
3648  tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
3649 
3650  // Now allocate and initialize the remapper object
3651  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3652  assert( weightMap != nullptr );
3653 
3654  EntityHandle source_set = data_source.file_set;
3655  EntityHandle covering_set = tdata.remapper->GetMeshSet( Remapper::CoveringMesh );
3656  EntityHandle target_set = data_target.file_set; // default: row based partition
3657 
3658  int src_elem_dof_length = 1, tgt_elem_dof_length = 1; // default=1: FV - element average DoF value
3659  // tags of interest are either GLOBAL_DOFS (SE) or GLOBAL_ID (FV)
3660  Tag gdsTag = nullptr;
3661  if( *srctype == 1 || *tgttype == 1 )
3662  {
3663  MB_CHK_ERR( context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag ) );
3664  assert( gdsTag );
3665  }
3666  //#endif
3667  // Find the DoF tag length
3668  // if it fails, usually it is 16
3669  // first check if we need to query the source
3670  if( *srctype == 1 ) // spectral element
3671  MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, src_elem_dof_length ) );
3672  // find the DoF tag length
3673  // next check if we need to query the target
3674  if( *tgttype == 1 ) // spectral element
3675  MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, tgt_elem_dof_length ) );
3676 
3677  Tag gidTag = context.MBI->globalId_tag();
3678  std::vector< int > tgtDofValues; // srcDofValues,
3679 
3680  // populate first tuple
3681  // will be filled with entities on coupler, from which we will get the DOFs, based on type
3682  Range tgt_ents_of_interest;
3683 
3684  if( *tgttype == 1 ) // spectral element
3685  {
3686  MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, tgt_elem_dof_length ) );
3687  MB_CHK_ERR( context.MBI->get_entities_by_type( target_set, MBQUAD, tgt_ents_of_interest ) );
3688  tgtDofValues.resize( tgt_ents_of_interest.size() * tgt_elem_dof_length );
3689  MB_CHK_ERR( context.MBI->tag_get_data( gdsTag, tgt_ents_of_interest, &tgtDofValues[0] ) );
3690  }
3691  else if( *tgttype == 2 )
3692  {
3693  // vertex global ids
3694  MB_CHK_ERR( context.MBI->get_entities_by_type( target_set, MBVERTEX, tgt_ents_of_interest ) );
3695  tgtDofValues.resize( tgt_ents_of_interest.size() * tgt_elem_dof_length );
3696  MB_CHK_ERR( context.MBI->tag_get_data( gidTag, tgt_ents_of_interest, &tgtDofValues[0] ) );
3697  }
3698  else if( *tgttype == 3 ) // for FV meshes, just get the global id of cell
3699  {
3700  // element global ids
3701  MB_CHK_ERR( context.MBI->get_entities_by_dimension( target_set, 2, tgt_ents_of_interest ) );
3702  tgtDofValues.resize( tgt_ents_of_interest.size() * tgt_elem_dof_length );
3703  MB_CHK_ERR( context.MBI->tag_get_data( gidTag, tgt_ents_of_interest, &tgtDofValues[0] ) );
3704  }
3705  else
3706  {
3707  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
3708  }
3709 
3710  // pass tgt ordered dofs, and unique
3711  // we need to read area_b and set aream tag on target cells, too
3712  std::vector< int > sortTgtDofs( tgtDofValues.begin(), tgtDofValues.end() );
3713  std::sort( sortTgtDofs.begin(), sortTgtDofs.end() );
3714  sortTgtDofs.erase( std::unique( sortTgtDofs.begin(), sortTgtDofs.end() ), sortTgtDofs.end() ); // remove duplicates
3715 
3716  /// the tag should be created already in the e3sm workflow; if not, create it here
3717  Tag areaTag;
3718  ErrorCode rval =
3720  if( MB_ALREADY_ALLOCATED == rval )
3721  {
3722 #ifdef MOAB_HAVE_MPI
3723  if( 0 == data_intx.pcomm->rank() )
3724 #endif
3725  std::cout << " aream tag already defined \n ";
3726  }
3727 
3728  std::vector< double > trvAreaA, trvAreaB; // passed by reference
3729  int nA, nB; // passed by reference, so returned
3730  MB_CHK_SET_ERR( weightMap->ReadParallelMap( remap_weights_filename, sortTgtDofs, trvAreaA, nA, trvAreaB, nB ),
3731  "reading map from disk failed" );
3732  // trivially distributed areaAs and areaBs will need to be set on their correct source and target cells, as an aream tag
3733 
3734  // if we are on target mesh (row based partition)
3735  tdata.pid_src = pid_source;
3736  tdata.pid_dest = pid_target;
3737 
3738  // TODO: now if coverage set exists, let us perform a filter based on
3739  // available source/target nnz data in the map. The idea is to look at the
3740  // source coverage and target meshes and to figure out only relevant elements
3741  // that need to participate in the meshes.
3742 
3743  //tdata.remapper->SetMeshSet( Remapper::SourceMesh, source_set, &srcc_ents_of_interest );
3744  // we have read the area A from map file, and we will set it as a aream double tag on the source set, knowing that we
3745  // read it trivially, with a trivial distribution by the global DOFs
3746  // local , private method:
3747  MB_CHK_SET_ERR( set_aream_from_trivial_distribution( pid_source, nA, trvAreaA ), " fail to set aream on source " );
3748  MB_CHK_SET_ERR( set_aream_from_trivial_distribution( pid_target, nB, trvAreaB ), " fail to set aream on target " );
3749 
3750  //tdata.remapper->SetMeshSet( Remapper::CoveringMesh, covering_set, &src_ents_of_interest );
3751  weightMap->SetSourceNDofsPerElement( src_elem_dof_length );
3752  //weightMap->set_col_dc_dofs( srcDofValues ); // will set col_dtoc_dofmap
3753 
3754  tdata.remapper->SetMeshSet( Remapper::TargetMesh, target_set, &tgt_ents_of_interest );
3755  weightMap->SetDestinationNDofsPerElement( tgt_elem_dof_length );
3756  weightMap->set_row_dc_dofs( tgtDofValues ); // will set row_dtoc_dofmap
3757 
3758  // TODO: ideally, we should get this from the remap_weights_filename and just propagate it
3759  std::string metadataStr = std::string( remap_weights_filename ) + ";FV:1:GLOBAL_ID;FV:1:GLOBAL_ID";
3760 
3761  data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
3762 
3763  return moab::MB_SUCCESS;
3764 }
3765 
3766 ErrCode iMOAB_WriteMappingWeightsToFile(
3767  iMOAB_AppID pid_intersection,
3768  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
3769  const iMOAB_String remap_weights_filename )
3770 {
3771  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
3772  assert( remap_weights_filename && strlen( remap_weights_filename ) );
3773 
3774  ErrorCode rval;
3775 
3776  // Get the source and target data and pcomm objects
3777  appData& data_intx = context.appDatas[*pid_intersection];
3778  TempestMapAppData& tdata = data_intx.tempestData;
3779 
3780  // Get the handle to the remapper object
3781  assert( tdata.remapper != nullptr );
3782 
3783  // Now get online weights object and ensure it is valid
3784  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3785  assert( weightMap != nullptr );
3786 
3787  std::string filename = std::string( remap_weights_filename );
3788 
3789  std::string metadataStr = data_intx.metadataMap[std::string( solution_weights_identifier )];
3790  std::map< std::string, std::string > attrMap;
3791  attrMap["title"] = "MOAB-TempestRemap Online Regridding Weight Generator";
3792  attrMap["normalization"] = "ovarea";
3793  attrMap["map_aPb"] = filename;
3794 
3795  // const std::string delim = ";";
3796  // size_t pos = 0, index = 0;
3797  // std::vector< std::string > stringAttr( 3 );
3798  // // use find() function to get the position of the delimiters
3799  // while( ( pos = metadataStr.find( delim ) ) != std::string::npos )
3800  // {
3801  // std::string token1 = metadataStr.substr( 0, pos ); // store the substring
3802  // if( token1.size() > 0 || index == 0 ) stringAttr[index++] = token1;
3803  // std::cout << "\t Found token: " << token1 << std::endl;
3804  // metadataStr.erase(
3805  // 0, pos + delim.length() ); /* erase() function store the current positon and move to next token. */
3806  // }
3807  // std::cout << "\t Found token: " << metadataStr << " -- and index = " << index << std::endl;
3808  // stringAttr[index] = metadataStr; // store the last token of the string.
3809  // assert( index == 2 );
3810  // attrMap["remap_options"] = stringAttr[0];
3811  // attrMap["methodorder_b"] = stringAttr[1];
3812  // attrMap["methodorder_a"] = stringAttr[2];
3813  attrMap["concave_a"] = "false"; // defaults
3814  attrMap["concave_b"] = "false"; // defaults
3815  attrMap["bubble"] = "true"; // defaults
3816  attrMap["MOABversion"] = std::string( MOAB_VERSION );
3817 
3818  // Write the map file to disk in parallel using either HDF5 or SCRIP interface
3819  rval = weightMap->WriteParallelMap( filename, attrMap );MB_CHK_ERR( rval );
3820 
3821  return moab::MB_SUCCESS;
3822 }
3823 //#define VERBOSE
3824 #ifdef MOAB_HAVE_MPI
3825 ErrCode iMOAB_MigrateMapMesh( iMOAB_AppID pid1,
3826  iMOAB_AppID pid2,
3827  MPI_Comm* jointcomm,
3828  MPI_Group* groupA,
3829  MPI_Group* groupB,
3830  int* type,
3831  int* comp1,
3832  int* comp2 )
3833 {
3834  assert( jointcomm );
3835  assert( groupA );
3836  assert( groupB );
3837  bool is_fortran = false;
3838  if( *pid1 >= 0 ) is_fortran = context.appDatas[*pid1].is_fortran || is_fortran;
3839  if( *pid2 >= 0 ) is_fortran = context.appDatas[*pid2].is_fortran || is_fortran;
3840 
3841  MPI_Comm joint_communicator =
3842  ( is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( jointcomm ) ) : *jointcomm );
3843 
3844  ErrorCode rval = MB_SUCCESS;
3845  int localRank = 0, numProcs = 1;
3846 
3847  // Get the local rank and number of processes in the joint communicator
3848  MPI_Comm_rank( joint_communicator, &localRank );
3849  MPI_Comm_size( joint_communicator, &numProcs );
3850 
3851  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
3852  // model on the joint_communicator
3853  TupleList TLcomp1;
3854  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
3855  TupleList TLcomp2;
3856  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
3857 
3858  // will push_back a new tuple, if needed
3859  TLcomp1.enableWriteAccess();
3860 
3861  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
3862  Tag gdsTag;
3863 
3864  // find the values on first cell
3865  int lenTagType1 = 1;
3866  if( *type == 1 )
3867  {
3868  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
3869  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
3870  }
3871  Tag gidTag = context.MBI->globalId_tag();
3872 
3873  std::vector< int > valuesComp1;
3874 
3875  // populate first tuple
3876  Range ents_of_interest; // will be filled with entities on pid1, that need to be distributed,
3877  // rearranged in split_ranges map
3878  if( *pid1 >= 0 )
3879  {
3880  appData& data1 = context.appDatas[*pid1];
3881  EntityHandle fset1 = data1.file_set;
3882 
3883  if( *type == 1 )
3884  {
3885  assert( gdsTag );
3886  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3887  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
3888  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
3889  }
3890  else if( *type == 2 )
3891  {
3892  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3893  valuesComp1.resize( ents_of_interest.size() );
3894  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
3895  }
3896  else if( *type == 3 ) // for FV meshes, just get the global id of cell
3897  {
3898  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
3899  valuesComp1.resize( ents_of_interest.size() );
3900  rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
3901  }
3902  else
3903  {
3904  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
3905  }
3906  // now fill the tuple list with info and markers
3907  // because we will send only the ids, order and compress the list
3908  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
3909  TLcomp1.resize( uniq.size() );
3910  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3911  {
3912  // to proc, marker, element local index, index in el
3913  int marker = *sit;
3914  int to_proc = marker % numProcs;
3915  int n = TLcomp1.get_n();
3916  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
3917  TLcomp1.vi_wr[2 * n + 1] = marker;
3918  TLcomp1.inc_n();
3919  }
3920  }
3921 
3922  ProcConfig pc( joint_communicator ); // proc config does the crystal router
3923  pc.crystal_router()->gs_transfer( 1, TLcomp1,
3924  0 ); // communication towards joint tasks, with markers
3925  // sort by value (key 1)
3926 #ifdef VERBOSE
3927  std::stringstream ff1;
3928  ff1 << "TLcomp1_" << localRank << ".txt";
3929  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
3930 #endif
3931  moab::TupleList::buffer sort_buffer;
3932  sort_buffer.buffer_init( TLcomp1.get_n() );
3933  TLcomp1.sort( 1, &sort_buffer );
3934  sort_buffer.reset();
3935 #ifdef VERBOSE
3936  // after sorting
3937  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
3938 #endif
3939  // do the same, for the other component, number2, with type2
3940  // start copy
3941  TLcomp2.enableWriteAccess();
3942 
3943  //moab::TempestOnlineMap* weightMap = nullptr; // declare it outside, but it will make sense only for *pid2 >= 0
3944  // we know that :) (or *pid2 >= 0, it means we are on the coupler PEs, read map exists, and coupler procs exist)
3945  // populate second tuple with ids from read map: we need row_gdofmap and col_gdofmap
3946  std::vector< int > valuesComp2;
3947  if( *pid2 >= 0 ) // we are now on coupler, map side
3948  {
3949  appData& data2 = context.appDatas[*pid2];
3950  TempestMapAppData& tdata = data2.tempestData;
3951  // could be more than one map, read from file
3952  // get all column dofs, and create a union
3953  // assert( tdata.weightMaps.size() == 1 );
3954  // maybe we need to check it is the map we expect
3955  for( auto mapIt = tdata.weightMaps.begin(); mapIt != tdata.weightMaps.end(); ++mapIt )
3956  {
3957  moab::TempestOnlineMap* weightMap = mapIt->second;
3958  std::vector< int > valueDofs;
3959  rval = weightMap->fill_col_ids( valueDofs );MB_CHK_ERR( rval );
3960  valuesComp2.insert( valuesComp2.end(), valueDofs.begin(), valueDofs.end() );
3961  }
3962  // std::vector<int> ids_of_interest;
3963  // do a deep copy of the ids of interest: row ids
3964  // we are interested in col ids, source
3965  // new method from moab::TempestOnlineMap
3966 
3967  // now fill the tuple list with info and markers
3968  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3969  TLcomp2.resize( uniq.size() );
3970  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3971  {
3972  // to proc, marker, element local index, index in el
3973  int marker = *sit;
3974  int to_proc = marker % numProcs;
3975  int n = TLcomp2.get_n();
3976  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
3977  TLcomp2.vi_wr[2 * n + 1] = marker;
3978  TLcomp2.inc_n();
3979  }
3980  }
3981  pc.crystal_router()->gs_transfer( 1, TLcomp2,
3982  0 ); // communication towards joint tasks, with markers
3983  // sort by value (key 1)
3984  // in the rendez-vous approach, markers meet at meet point (rendez-vous) processor marker % nprocs
3985 #ifdef VERBOSE
3986  std::stringstream ff2;
3987  ff2 << "TLcomp2_" << localRank << ".txt";
3988  TLcomp2.print_to_file( ff2.str().c_str() );
3989 #endif
3990  sort_buffer.buffer_reserve( TLcomp2.get_n() );
3991  TLcomp2.sort( 1, &sort_buffer );
3992  sort_buffer.reset();
3993  // end copy
3994 #ifdef VERBOSE
3995  TLcomp2.print_to_file( ff2.str().c_str() );
3996 #endif
3997  // need to send back the info, from the rendezvous point, for each of the values
3998  /* so go over each value, on local process in joint communicator,
3999 
4000  now have to send back the info needed for communication;
4001  loop in in sync over both TLComp1 and TLComp2, in local process;
4002  So, build new tuple lists, to send synchronous communication
4003  populate them at the same time, based on marker, that is indexed
4004  */
4005 
4006  TupleList TLBackToComp1;
4007  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
4008  TLBackToComp1.enableWriteAccess();
4009 
4010  int n1 = TLcomp1.get_n();
4011  int n2 = TLcomp2.get_n();
4012 
4013  int indexInTLComp1 = 0;
4014  int indexInTLComp2 = 0; // advance both, according to the marker
4015  if( n1 > 0 && n2 > 0 )
4016  {
4017 
4018  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
4019  {
4020  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
4021  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
4022  if( currentValue1 < currentValue2 )
4023  {
4024  // we have a big problem; basically, we are saying that
4025  // dof currentValue is on one model and not on the other
4026  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
4027  indexInTLComp1++;
4028  continue;
4029  }
4030  if( currentValue1 > currentValue2 )
4031  {
4032  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
4033  indexInTLComp2++;
4034  continue;
4035  }
4036  int size1 = 1;
4037  int size2 = 1;
4038  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
4039  size1++;
4040  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
4041  size2++;
4042  // must be found in both lists, find the start and end indices
4043  for( int i1 = 0; i1 < size1; i1++ )
4044  {
4045  for( int i2 = 0; i2 < size2; i2++ )
4046  {
4047  // send the info back to components
4048  int n = TLBackToComp1.get_n();
4049  TLBackToComp1.reserve();
4050  TLBackToComp1.vi_wr[3 * n] =
4051  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
4052  // came from, info from comp2
4053  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
4054  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
4055  }
4056  }
4057  indexInTLComp1 += size1;
4058  indexInTLComp2 += size2;
4059  }
4060  }
4061  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
4062 
4063  TupleList TLv; // vertices
4064  TupleList TLc; // cells if needed (not type 2)
4065 
4066  if( *pid1 >= 0 )
4067  {
4068  // we are on original comp 1 tasks
4069  // before ordering
4070  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
4071  // processors it communicates with
4072 #ifdef VERBOSE
4073  std::stringstream f1;
4074  f1 << "TLBack1_" << localRank << ".txt";
4075  TLBackToComp1.print_to_file( f1.str().c_str() );
4076 #endif
4077  // so we are now on pid1, we know now each cell where it has to go
4078  int n = TLBackToComp1.get_n();
4079  std::map< int, std::set< int > > uniqueIDs;
4080  for( int i = 0; i < n; i++ )
4081  {
4082  int to_proc = TLBackToComp1.vi_wr[3 * i + 2];
4083  int globalId = TLBackToComp1.vi_wr[3 * i + 1];
4084  uniqueIDs[to_proc].insert( globalId );
4085  }
4086  // gidTag is gid tag
4087  // gdsTag is GLOBAL_DOFS , used only for spectral (type 1)
4088  // (*type 1 is spectral, right now not used in E3SM)
4089 
4090  std::map< int, Range > splits;
4091  for( size_t i = 0; i < ents_of_interest.size(); i++ )
4092  {
4093  EntityHandle ent = ents_of_interest[i];
4094  for( int j = 0; j < lenTagType1; j++ )
4095  {
4096  int marker = valuesComp1[i * lenTagType1 + j];
4097  for( auto mit = uniqueIDs.begin(); mit != uniqueIDs.end(); mit++ )
4098  {
4099  int proc = mit->first;
4100  std::set< int >& setIds = mit->second;
4101  if( setIds.find( marker ) != setIds.end() )
4102  {
4103  splits[proc].insert( ent );
4104  }
4105  }
4106  }
4107  }
4108 
4109  std::map< int, Range > verts_to_proc;
4110  int numv = 0, numc = 0;
4111  for( auto it = splits.begin(); it != splits.end(); it++ )
4112  {
4113  int to_proc = it->first;
4114  Range verts;
4115  if( *type != 2 )
4116  {
4117  rval = context.MBI->get_connectivity( it->second, verts );MB_CHK_ERR( rval );
4118  numc += (int)it->second.size();
4119  }
4120  else
4121  verts = it->second;
4122  verts_to_proc[to_proc] = verts;
4123  numv += (int)verts.size();
4124  }
4125  // first vertices:
4126  TLv.initialize( 2, 0, 0, 3, numv ); // to proc, GLOBAL ID, 3 real coordinates
4127  TLv.enableWriteAccess();
4128  // use the global id of vertices for connectivity
4129  for( auto it = verts_to_proc.begin(); it != verts_to_proc.end(); it++ )
4130  {
4131  int to_proc = it->first;
4132  Range& verts = it->second;
4133  for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
4134  {
4135  EntityHandle v = *vit;
4136  int n = TLv.get_n(); // current size of tuple list
4137  TLv.vi_wr[2 * n] = to_proc; // send to processor
4138 
4139  rval = context.MBI->tag_get_data( gidTag, &v, 1, &( TLv.vi_wr[2 * n + 1] ) );MB_CHK_ERR( rval );
4140  rval = context.MBI->get_coords( &v, 1, &( TLv.vr_wr[3 * n] ) );MB_CHK_ERR( rval );
4141  TLv.inc_n(); // increment tuple list size
4142  }
4143  }
4144  if( *type != 2 )
4145  {
4146  // to proc, ID cell, gdsTag, nbv, id conn,
4147  int size_tuple =
4148  2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 + 10; // 10 is the max number of vertices in cell
4149 
4150  std::vector< int > gdvals;
4151 
4152  TLc.initialize( size_tuple, 0, 0, 0, numc ); // to proc, GLOBAL ID, 3 real coordinates
4153  TLc.enableWriteAccess();
4154  for( auto it = splits.begin(); it != splits.end(); it++ )
4155  {
4156  int to_proc = it->first;
4157  Range& cells = it->second;
4158  for( Range::iterator cit = cells.begin(); cit != cells.end(); ++cit )
4159  {
4160  EntityHandle cell = *cit;
4161  int n = TLc.get_n(); // current size of tuple list
4162  TLc.vi_wr[size_tuple * n] = to_proc;
4163  int current_index = 2;
4164  rval = context.MBI->tag_get_data( gidTag, &cell, 1, &( TLc.vi_wr[size_tuple * n + 1] ) );MB_CHK_ERR( rval );
4165  if( 1 == *type )
4166  {
4167  rval = context.MBI->tag_get_data( gdsTag, &cell, 1,
4168  &( TLc.vi_wr[size_tuple * n + current_index] ) );MB_CHK_ERR( rval );
4169  current_index += lenTagType1;
4170  }
4171  // now get connectivity
4172  const EntityHandle* conn = NULL;
4173  int nnodes = 0;
4174  rval = context.MBI->get_connectivity( cell, conn, nnodes );MB_CHK_ERR( rval );
4175  // fill nnodes:
4176  TLc.vi_wr[size_tuple * n + current_index] = nnodes;
4177  rval = context.MBI->tag_get_data( gidTag, conn, nnodes,
4178  &( TLc.vi_wr[size_tuple * n + current_index + 1] ) );MB_CHK_ERR( rval );
4179  TLc.inc_n(); // increment tuple list size
4180  }
4181  }
4182  }
4183  }
4184  else if( *pid2 >= 0 ) // TLv and TLc should be able to receive if *pid2 >= 0
4185  // this case will not happen if pid1 and pid2 are both on the coupler side
4186  // we need to cover the case if map migrate was used directly
4187  // in one hop projection; right now, we prefer 2 hop projection
4188  {
4189  TLv.initialize( 2, 0, 0, 3, 0 ); // no vertices here yet, for sure
4190  TLv.enableWriteAccess(); // to be able to receive stuff, even if nothing is here yet, on this task
4191  if( *type != 2 ) // for point cloud, we do not need to initialize TLc (for cells)
4192  {
4193  // we still need to initialize the tuples with the right size, as in form_tuples_to_migrate_mesh
4194  int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 +
4195  10; // 10 is the max number of vertices in cell; kind of arbitrary
4196  TLc.initialize( size_tuple, 0, 0, 0, 0 );
4197  TLc.enableWriteAccess();
4198  }
4199  }
4200  pc.crystal_router()->gs_transfer( 1, TLv, 0 ); // communication towards coupler tasks, with mesh vertices
4201  if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 ); // those are cells
4202 
4203  if( *pid2 >= 0 ) // will receive the mesh, on coupler pes!, the coverage mesh !
4204  {
4205  appData& dataIntx = context.appDatas[*pid2];
4206  TempestMapAppData& tdata = dataIntx.tempestData;
4207  Range primary_ents; // vertices for type 2, cells of dim 2 for type 1 or 3
4208  std::vector< int > values_entities; // will be the size of primary_ents3 * lenTagType1
4209  EntityHandle fset3 = tdata.remapper->GetMeshSet( Remapper::CoveringMesh );
4210 
4211  // start copy
4212  std::map< int, EntityHandle > vertexMap; //
4213  Range verts;
4214  // always form vertices and add them to the fset3;
4215  int n = TLv.get_n();
4217  for( int i = 0; i < n; i++ )
4218  {
4219  int gid = TLv.vi_rd[2 * i + 1];
4220  if( vertexMap.find( gid ) == vertexMap.end() )
4221  {
4222  // need to form this vertex
4223  rval = context.MBI->create_vertex( &( TLv.vr_rd[3 * i] ), vertex );MB_CHK_ERR( rval );
4224  vertexMap[gid] = vertex;
4225  verts.insert( vertex );
4226  rval = context.MBI->tag_set_data( gidTag, &vertex, 1, &gid );MB_CHK_ERR( rval );
4227  }
4228  }
4229  rval = context.MBI->add_entities( fset3, verts );MB_CHK_ERR( rval );
4230  if( 2 == *type )
4231  {
4232  values_entities.resize( verts.size() ); // just get the ids of vertices
4233  rval = context.MBI->tag_get_data( gidTag, verts, &values_entities[0] );MB_CHK_ERR( rval );
4234  primary_ents = verts;
4235  //return MB_SUCCESS;
4236  }
4237  else
4238  {
4239  n = TLc.get_n();
4240  int size_tuple =
4241  2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 + 10; // 10 is the max number of vertices in cell
4242 
4243  EntityHandle new_element;
4244 
4245  std::map< int, EntityHandle >
4246  cellMap; // do not create one if it already exists, maybe from other processes
4247  for( int i = 0; i < n; i++ )
4248  {
4249  int from_proc = TLc.vi_rd[size_tuple * i];
4250  int globalIdEl = TLc.vi_rd[size_tuple * i + 1];
4251  if( cellMap.find( globalIdEl ) == cellMap.end() ) // need to create the cell
4252  {
4253  int current_index = 2;
4254  if( 1 == *type ) current_index += lenTagType1;
4255  int nnodes = TLc.vi_rd[size_tuple * i + current_index];
4256  std::vector< EntityHandle > conn;
4257  conn.resize( nnodes );
4258  for( int j = 0; j < nnodes; j++ )
4259  {
4260  conn[j] = vertexMap[TLc.vi_rd[size_tuple * i + current_index + j + 1]];
4261  }
4262  //
4263  EntityType entType = MBQUAD;
4264  if( nnodes > 4 ) entType = MBPOLYGON;
4265  if( nnodes < 4 ) entType = MBTRI;
4266  rval = context.MBI->create_element( entType, &conn[0], nnodes, new_element );MB_CHK_SET_ERR( rval, "can't create new element " );
4267  primary_ents.insert( new_element );
4268  cellMap[globalIdEl] = new_element;
4269  rval = context.MBI->tag_set_data( gidTag, &new_element, 1, &globalIdEl );MB_CHK_SET_ERR( rval, "can't set global id tag on cell " );
4270  if( 1 == *type )
4271  {
4272  // set the gds tag
4273  rval = context.MBI->tag_set_data( gdsTag, &new_element, 1, &( TLc.vi_rd[size_tuple * i + 2] ) );MB_CHK_SET_ERR( rval, "can't set gds tag on cell " );
4274  }
4275  }
4276  }
4277  rval = context.MBI->add_entities( fset3, primary_ents );MB_CHK_ERR( rval );
4278  if( 1 == *type )
4279  {
4280  values_entities.resize( lenTagType1 * primary_ents.size() );
4281  rval = context.MBI->tag_get_data( gdsTag, primary_ents, &values_entities[0] );MB_CHK_ERR( rval );
4282  }
4283  else // *type == 3
4284  {
4285  values_entities.resize( primary_ents.size() ); // just get the global ids !
4286  rval = context.MBI->tag_get_data( gidTag, primary_ents, &values_entities[0] );MB_CHK_ERR( rval );
4287  }
4288  }
4289 
4290  int ndofPerEl = 1;
4291  if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
4292 
4293  tdata.remapper->SetMeshSet( Remapper::CoveringMesh, fset3, &primary_ents );
4294  // dump covering mesh in a file, to look at it
4295  // should be one covering mesh per task, should cover the target mesh set
4296 #ifdef VERBOSE
4297  std::stringstream fcov;
4298  fcov << "MapCover_" << numProcs << "_" << localRank << ".h5m";
4299  context.MBI->write_file( fcov.str().c_str(), 0, 0, &fset3, 1 );
4300  EntityHandle fset2 = tdata.remapper->GetMeshSet( Remapper::TargetMesh );
4301  std::stringstream ftarg;
4302  ftarg << "TargMap_" << numProcs << "_" << localRank << ".h5m";
4303  context.MBI->write_file( ftarg.str().c_str(), 0, 0, &fset2, 1 );
4304 #endif
4305  for( auto mapIt = tdata.weightMaps.begin(); mapIt != tdata.weightMaps.end(); ++mapIt )
4306  {
4307  moab::TempestOnlineMap* weightMap = mapIt->second;
4308  weightMap->SetSourceNDofsPerElement( ndofPerEl );
4309  weightMap->set_col_dc_dofs( values_entities ); // will set col_dtoc_dofmap
4310  }
4311  }
4312 
4313  // compute par comm graph
4314  int ierr = iMOAB_ComputeCommGraph( pid1, pid2, jointcomm, groupA, groupB, type, type, comp1, comp2 );
4315  if( ierr != 0 ) return moab::MB_FAILURE;
4316 
4317  return moab::MB_SUCCESS;
4318 }
4319 #undef VERBOSE
4320 #endif // #ifdef MOAB_HAVE_MPI
4321 
4322 #endif // #ifdef MOAB_HAVE_NETCDF
4323 
4324 #define USE_API
4325 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
4326 {
4327  ErrorCode rval;
4328  moab::CartVect pos;
4329 
4330  Range& verts = context.appDatas[*pid].all_verts;
4331  *radius = 1.0;
4332  if( verts.empty() ) return moab::MB_SUCCESS;
4333  moab::EntityHandle firstVertex = ( verts[0] );
4334 
4335  // coordinate data
4336  rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );MB_CHK_ERR( rval );
4337 
4338  // compute the distance from origin
4339  // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
4340  *radius = pos.length();
4341  return moab::MB_SUCCESS;
4342 }
4343 
4344 ErrCode iMOAB_SetMapGhostLayers( iMOAB_AppID pid, int* n_src_ghost_layers, int* n_tgt_ghost_layers )
4345 {
4346  appData& data = context.appDatas[*pid];
4347 
4348  // Set the number of ghost layers
4349  data.tempestData.num_src_ghost_layers = *n_src_ghost_layers; // number of ghost layers for source
4350  data.tempestData.num_tgt_ghost_layers = *n_tgt_ghost_layers; // number of ghost layers for source
4351 
4352  return moab::MB_SUCCESS;
4353 }
4354 
4355 ErrCode iMOAB_ComputeCoverageMesh( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4356 {
4357  // Default constant parameters
4358  constexpr bool validate = true;
4359  constexpr bool meshCleanup = true;
4360  bool gnomonic = true;
4361  constexpr double defaultradius = 1.0;
4362  constexpr double boxeps = 1.e-10;
4363 
4364  // Other constant parameters
4365  const double epsrel = ReferenceTolerance; // ReferenceTolerance is defined in Defines.h in tempestremap source ;
4366  double radius_source = 1.0;
4367  double radius_target = 1.0;
4368 
4369  // Error code definitions
4370  ErrorCode rval;
4371  ErrCode ierr;
4372 
4373  // Get the source and target data and pcomm objects
4374  appData& data_src = context.appDatas[*pid_src];
4375  appData& data_tgt = context.appDatas[*pid_tgt];
4376  appData& data_intx = context.appDatas[*pid_intx];
4377 #ifdef MOAB_HAVE_MPI
4378  ParallelComm* pco_intx = context.appDatas[*pid_intx].pcomm;
4379 #endif
4380 
4381  // Mesh intersection has already been computed; Return early.
4382  TempestMapAppData& tdata = data_intx.tempestData;
4383  if( tdata.remapper != nullptr ) return moab::MB_SUCCESS; // nothing to do
4384 
4385  int rank = 0;
4386 #ifdef MOAB_HAVE_MPI
4387  if( pco_intx )
4388  {
4389  rank = pco_intx->rank();
4390  rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
4391  }
4392 #endif
4393 
4394  moab::DebugOutput outputFormatter( std::cout, rank, 0 );
4395  outputFormatter.set_prefix( "[iMOAB_ComputeCoverageMesh]: " );
4396 
4397  ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
4398  ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
4399 
4400  // Rescale the radius of both to compute the intersection
4401  rval = ComputeSphereRadius( pid_src, &radius_source );MB_CHK_ERR( rval );
4402  rval = ComputeSphereRadius( pid_tgt, &radius_target );MB_CHK_ERR( rval );
4403 #ifdef VERBOSE
4404  if( !rank )
4405  outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
4406  radius_target );
4407 #endif
4408 
4409  /* Let make sure that the radius match for source and target meshes. If not, rescale now and
4410  * unscale later. */
4411  if( fabs( radius_source - radius_target ) > 1e-10 )
4412  { /* the radii are different */
4413  rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval );
4414  rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval );
4415  }
4416 
4417  // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
4418 #ifdef MOAB_HAVE_TEMPESTREMAP
4420 #else
4421  IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
4422 #endif
4423 
4424  if( meshCleanup )
4425  {
4426  // Address issues for source mesh first
4427  // fixes to enforce positive orientation of the vertices (outward normal)
4428  MB_CHK_ERR(
4429  areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/ ) );
4430 
4431  // fixes to clean up any degenerate quadrangular elements present in the mesh (RLL specifically?)
4433 
4434  // fixes to enforce convexity in case concave elements are present
4436 
4437  // Address issues for target mesh first
4438  // fixes to enforce positive orientation of the vertices (outward normal)
4439  MB_CHK_ERR(
4440  areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ ) );
4441 
4442  // fixes to clean up any degenerate quadrangular elements present in the mesh (RLL specifically?)
4444 
4445  // fixes to enforce convexity in case concave elements are present
4447  }
4448 
4449  // print verbosely about the problem setting
4450 #ifdef VERBOSE
4451  {
4452  moab::Range rintxverts, rintxelems;
4453  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
4454  rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );MB_CHK_ERR( rval );
4455 
4456  moab::Range bintxverts, bintxelems;
4457  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
4458  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );MB_CHK_ERR( rval );
4459 
4460  if( !rank )
4461  {
4462  outputFormatter.printf( 0, "The source set contains %zu vertices and %zu elements\n", rintxverts.size(),
4463  rintxelems.size() );
4464  outputFormatter.printf( 0, "The target set contains %zu vertices and %zu elements\n", bintxverts.size(),
4465  bintxelems.size() );
4466  }
4467  }
4468 #endif
4469  // use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] );
4470  // advancing front method will fail unless source mesh has no holes (area = 4 * pi) on unit sphere
4471  // use_kdtree_search = true;
4472 
4473  data_intx.dimension = data_tgt.dimension;
4474  // set the context for the source and destination applications
4475  tdata.pid_src = pid_src;
4476  tdata.pid_dest = pid_tgt;
4477 
4478  // Now allocate and initialize the remapper object
4479 #ifdef MOAB_HAVE_MPI
4480  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
4481 #else
4482  tdata.remapper = new moab::TempestRemapper( context.MBI );
4483 #endif
4484  tdata.remapper->meshValidate = validate;
4485  tdata.remapper->constructEdgeMap = true;
4486 
4487  // Do not create new filesets; Use the sets from our respective applications
4488  tdata.remapper->initialize( false );
4489  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
4490  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
4491  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
4492 
4493  // First, compute the covering source set.
4494  if( tdata.num_src_ghost_layers >= 1 ) gnomonic = false; // do not use gnomonic when we need ghost layers;
4495  rval =
4496  tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false, gnomonic, tdata.num_src_ghost_layers );MB_CHK_ERR( rval );
4497 
4498 #ifdef MOAB_HAVE_TEMPESTREMAP
4499  // set the reference to the covering set in the source PID
4500  data_intx.secondary_file_set = tdata.remapper->GetMeshSet( moab::Remapper::CoveringMesh );
4501 #endif
4502 
4503  return moab::MB_SUCCESS;
4504 }
4505 #ifdef MOAB_HAVE_TEMPESTREMAP
4506 ErrCode iMOAB_WriteCoverageMesh( iMOAB_AppID pid, const iMOAB_String prefix )
4507 {
4508  IMOAB_CHECKPOINTER( prefix, 2 );
4509  IMOAB_ASSERT( strlen( prefix ), "Invalid prefix." );
4510 
4511  appData& data = context.appDatas[*pid];
4512  EntityHandle fileSet = data.file_set;
4513 
4514  if( data.tempestData.remapper != nullptr )
4515  fileSet = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
4516  else if( data.file_set != data.secondary_file_set )
4517  fileSet = data.secondary_file_set;
4518  else
4519  MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid secondary file set handle" );
4520 
4521  std::ostringstream file_name;
4522  int rank = 0, size = 1;
4523 
4524 #ifdef MOAB_HAVE_MPI
4525  ParallelComm* pcomm = data.pcomm;
4526  rank = pcomm->rank();
4527  size = pcomm->size();
4528 #endif
4529 
4530  file_name << prefix << "_" << size << "_" << rank << ".h5m";
4531  // Now let us actually write the file to disk with appropriate options
4532  ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &fileSet, 1 );MB_CHK_ERR( rval );
4533  return moab::MB_SUCCESS;
4534 }
4535 #endif
4536 
4537 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4538 {
4539  // Default constant parameters
4540  constexpr bool validate = true;
4541  constexpr bool use_kdtree_search = true;
4542  constexpr double defaultradius = 1.0;
4543 
4544  // Get the source and target data and pcomm objects
4545  appData& data_src = context.appDatas[*pid_src];
4546  appData& data_tgt = context.appDatas[*pid_tgt];
4547  appData& data_intx = context.appDatas[*pid_intx];
4548 #ifdef MOAB_HAVE_MPI
4549  ParallelComm* pco_intx = context.appDatas[*pid_intx].pcomm;
4550 #endif
4551 
4552  // Mesh intersection has already been computed; Return early.
4553  TempestMapAppData& tdata = data_intx.tempestData;
4554 
4555  if( tdata.remapper == nullptr )
4556  {
4557  // user has not called the coverage mesh computation routine -- so explicitly call it now
4558  // this check supports the traditional workflow of directly computing mesh intersection
4559  // and letting this routine compute coverage mesh as needed
4560  MB_CHK_ERR( iMOAB_ComputeCoverageMesh( pid_src, pid_tgt, pid_intx ) );
4561  }
4562 
4563  int rank = 0;
4564 #ifdef MOAB_HAVE_MPI
4565  rank = pco_intx->rank();
4566 #endif
4567  moab::DebugOutput outputFormatter( std::cout, rank, 0 );
4568  outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
4569 
4570  // Next, compute intersections with MOAB.
4571  // for bilinear, this is an overkill
4572  MB_CHK_ERR( tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false ) );
4573 
4574  // Mapping computation done
4575  if( validate )
4576  {
4577  // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
4578  IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
4579  double local_areas[3] = { 0.0, 0.0, 0.0 }, global_areas[3] = { 0.0, 0.0, 0.0 };
4580  local_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );
4581  local_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );
4582  local_areas[2] = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, defaultradius );
4583 #ifdef MOAB_HAVE_MPI
4584  global_areas[0] = global_areas[1] = global_areas[2] = 0.0;
4585  MPI_Reduce( &local_areas[0], &global_areas[0], 3, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() );
4586 #else
4587  global_areas[0] = local_areas[0];
4588  global_areas[1] = local_areas[1];
4589  global_areas[2] = local_areas[2];
4590 #endif
4591 
4592  if( rank == 0 )
4593  {
4594  outputFormatter.printf( 0,
4595  "initial area: source mesh = %12.14f, target mesh = %12.14f, "
4596  "overlap mesh = %12.14f\n",
4597  global_areas[0], global_areas[1], global_areas[2] );
4598  outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
4599  fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
4600  fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
4601  }
4602  }
4603 
4604  return moab::MB_SUCCESS;
4605 }
4606 
4607 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4608 {
4609  ErrorCode rval;
4610  ErrCode ierr;
4611 
4612  double radius_source = 1.0;
4613  double radius_target = 1.0;
4614  const double epsrel = ReferenceTolerance; // ReferenceTolerance is defined in Defines.h in tempestremap source ;
4615  const double boxeps = 1.e-8;
4616 
4617  // Get the source and target data and pcomm objects
4618  appData& data_src = context.appDatas[*pid_src];
4619  appData& data_tgt = context.appDatas[*pid_tgt];
4620  appData& data_intx = context.appDatas[*pid_intx];
4621 #ifdef MOAB_HAVE_MPI
4622  ParallelComm* pco_intx = context.appDatas[*pid_intx].pcomm;
4623 #endif
4624 
4625  // Mesh intersection has already been computed; Return early.
4626  TempestMapAppData& tdata = data_intx.tempestData;
4627  if( tdata.remapper != nullptr ) return moab::MB_SUCCESS; // nothing to do
4628 
4629 #ifdef MOAB_HAVE_MPI
4630  if( pco_intx )
4631  {
4632  rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
4633  }
4634 #endif
4635 
4636  ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
4637  ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
4638 
4639  // Rescale the radius of both to compute the intersection
4640  ComputeSphereRadius( pid_src, &radius_source );
4641  ComputeSphereRadius( pid_tgt, &radius_target );
4642 
4643  IntxAreaUtils areaAdaptor;
4644  // print verbosely about the problem setting
4645  {
4646  moab::Range rintxverts, rintxelems;
4647  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
4648  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );MB_CHK_ERR( rval );
4649  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
4650  rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );MB_CHK_ERR( rval );
4651 #ifdef VERBOSE
4652  std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
4653  << " elements \n";
4654 #endif
4655 
4656  moab::Range bintxverts, bintxelems;
4657  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
4658  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );MB_CHK_ERR( rval );
4659  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
4660  rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );MB_CHK_ERR( rval );
4661 #ifdef VERBOSE
4662  std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
4663  << " elements \n";
4664 #endif
4665  }
4666 
4667  data_intx.dimension = data_tgt.dimension;
4668  // set the context for the source and destination applications
4669  // set the context for the source and destination applications
4670  tdata.pid_src = pid_src;
4671  tdata.pid_dest = pid_tgt;
4672  data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
4673  assert( data_intx.point_cloud == true );
4674 
4675  // Now allocate and initialize the remapper object
4676 #ifdef MOAB_HAVE_MPI
4677  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
4678 #else
4679  tdata.remapper = new moab::TempestRemapper( context.MBI );
4680 #endif
4681  tdata.remapper->meshValidate = true;
4682  tdata.remapper->constructEdgeMap = true;
4683 
4684  // Do not create new filesets; Use the sets from our respective applications
4685  tdata.remapper->initialize( false );
4686  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
4687  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
4688  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
4689 
4690  /* Let make sure that the radius match for source and target meshes. If not, rescale now and
4691  * unscale later. */
4692  if( fabs( radius_source - radius_target ) > 1e-10 )
4693  { /* the radii are different */
4694  rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );MB_CHK_ERR( rval );
4695  rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );MB_CHK_ERR( rval );
4696  }
4697 
4698  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
4699  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
4700 
4701  // First, compute the covering source set.
4702  rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
4703 
4704 #ifdef MOAB_HAVE_MPI
4705  /* VSM: This context should be set on the data_src but it would overwrite the source
4706  covering set context in case it is coupled to another APP as well.
4707  This needs a redesign. */
4708  // data_intx.covering_set = tdata.remapper->GetCoveringSet();
4709  // data_src.covering_set = tdata.remapper->GetCoveringSet();
4710 #endif
4711 
4712  // Now let us re-convert the MOAB mesh back to Tempest representation
4713  // rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval );
4714 
4715  return moab::MB_SUCCESS;
4716 }
4717 
4718 ErrCode iMOAB_ComputeScalarProjectionWeights(
4719  iMOAB_AppID pid_intx,
4720  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
4721  const iMOAB_String disc_method_source,
4722  int* disc_order_source,
4723  const iMOAB_String disc_method_target,
4724  int* disc_order_target,
4725  const iMOAB_String fv_method,
4726  int* fNoBubble,
4727  int* fMonotoneTypeID,
4728  int* fVolumetric,
4729  int* fInverseDistanceMap,
4730  int* fNoConservation,
4731  int* fValidate,
4732  const iMOAB_String source_solution_tag_dof_name,
4733  const iMOAB_String target_solution_tag_dof_name )
4734 {
4735  assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
4736  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4737  assert( disc_method_source && strlen( disc_method_source ) );
4738  assert( disc_method_target && strlen( disc_method_target ) );
4739  assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
4740  assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
4741 
4742  // Get the source and target data and pcomm objects
4743  appData& data_intx = context.appDatas[*pid_intx];
4744  TempestMapAppData& tdata = data_intx.tempestData;
4745 
4746  // Setup computation of weights
4747  // Set the context for the remapping weights computation
4748  tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
4749 
4750  // Call to generate the remap weights with the tempest meshes
4751  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
4752  assert( weightMap != nullptr );
4753 
4754  GenerateOfflineMapAlgorithmOptions mapOptions;
4755  mapOptions.nPin = *disc_order_source;
4756  mapOptions.nPout = *disc_order_target;
4757  mapOptions.fSourceConcave = false;
4758  mapOptions.fTargetConcave = false;
4759 
4760  mapOptions.strMethod = "";
4761  if( fv_method ) mapOptions.strMethod += std::string( fv_method ) + ";";
4762  if( fMonotoneTypeID )
4763  {
4764  switch( *fMonotoneTypeID )
4765  {
4766  case 0:
4767  mapOptions.fMonotone = false;
4768  break;
4769  case 3:
4770  mapOptions.strMethod += "mono3;";
4771  break;
4772  case 2:
4773  mapOptions.strMethod += "mono2;";
4774  break;
4775  default:
4776  mapOptions.fMonotone = true;
4777  }
4778  }
4779  else
4780  mapOptions.fMonotone = false;
4781 
4782  mapOptions.fNoBubble = ( fNoBubble ? *fNoBubble : false );
4783  mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
4784  mapOptions.fNoCorrectAreas = false;
4785  // mapOptions.fNoCheck = !( fValidate ? *fValidate : true );
4786  mapOptions.fNoCheck = true;
4787  if( fVolumetric && *fVolumetric ) mapOptions.strMethod += "volumetric;";
4788  if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod += "invdist;";
4789 
4790  std::string metadataStr = mapOptions.strMethod + ";" + std::string( disc_method_source ) + ":" +
4791  std::to_string( *disc_order_source ) + ":" + std::string( source_solution_tag_dof_name ) +
4792  ";" + std::string( disc_method_target ) + ":" + std::to_string( *disc_order_target ) +
4793  ":" + std::string( target_solution_tag_dof_name );
4794 
4795  data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
4796 
4797  // Now let us compute the local-global mapping and store it in the context
4798  // We need this mapping when computing matvec products and to do reductions in parallel
4799  // Additionally, the call below will also compute weights with TempestRemap
4801  std::string( disc_method_source ), // const std::string& strInputType
4802  std::string( disc_method_target ), // const std::string& strOutputType
4803  mapOptions, // GenerateOfflineMapAlgorithmOptions& mapOptions
4804  std::string( source_solution_tag_dof_name ), // const std::string& srcDofTagName = "GLOBAL_ID"
4805  std::string( target_solution_tag_dof_name ) // const std::string& tgtDofTagName = "GLOBAL_ID"
4806  ) );
4807 
4808  // print some map statistics
4809  weightMap->PrintMapStatistics();
4810 
4811  if( fValidate && *fValidate )
4812  {
4813  const double dNormalTolerance = 1.0E-8;
4814  const double dStrictTolerance = 1.0E-12;
4815  weightMap->CheckMap( true, true, ( fMonotoneTypeID && *fMonotoneTypeID ), dNormalTolerance, dStrictTolerance );
4816  }
4817 
4818  return moab::MB_SUCCESS;
4819 }
4820 
4821 ErrCode iMOAB_ApplyScalarProjectionWeights(
4822  iMOAB_AppID pid_intersection,
4823  int* filter_type,
4824  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
4825  const iMOAB_String source_solution_tag_name,
4826  const iMOAB_String target_solution_tag_name )
4827 {
4828  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4829  assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
4830  assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
4831 
4832  moab::ErrorCode rval;
4833 
4834  // Get the source and target data and pcomm objects
4835  appData& data_intx = context.appDatas[*pid_intersection];
4836  TempestMapAppData& tdata = data_intx.tempestData;
4837 
4838  if( !tdata.weightMaps.count( std::string( solution_weights_identifier ) ) ) // key does not exist
4840  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
4841 
4842  // we assume that there are separators ":" between the tag names
4843  std::vector< std::string > srcNames;
4844  std::vector< std::string > tgtNames;
4845  std::vector< Tag > srcTagHandles;
4846  std::vector< Tag > tgtTagHandles;
4847  std::string separator( ":" );
4848  std::string src_name( source_solution_tag_name );
4849  std::string tgt_name( target_solution_tag_name );
4850  split_tag_names( src_name, separator, srcNames );
4851  split_tag_names( tgt_name, separator, tgtNames );
4852  if( srcNames.size() != tgtNames.size() )
4853  {
4854  std::cout << " error in parsing source and target tag names. \n";
4855  return moab::MB_FAILURE;
4856  }
4857 
4858  // get both source and target tag handles
4859  for( size_t i = 0; i < srcNames.size(); i++ )
4860  {
4861  Tag tagHandle;
4862  // get source tag handles and arrange in order
4863  rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
4864  if( MB_SUCCESS != rval || nullptr == tagHandle )
4865  {
4866  return moab::MB_TAG_NOT_FOUND;
4867  }
4868  srcTagHandles.push_back( tagHandle );
4869 
4870  // get corresponding target handles and arrange in the same order
4871  rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
4872  if( MB_SUCCESS != rval || nullptr == tagHandle )
4873  {
4874  return moab::MB_TAG_NOT_FOUND;
4875  }
4876  tgtTagHandles.push_back( tagHandle );
4877  }
4878 
4879 #ifdef VERBOSE
4880  // Now get reference to the remapper object
4881  moab::TempestRemapper* remapper = tdata.remapper;
4882  std::vector< double > solSTagVals;
4883  std::vector< double > solTTagVals;
4884 
4885  moab::Range sents, tents;
4886  if( data_intx.point_cloud )
4887  {
4888  appData& data_src = context.appDatas[*tdata.pid_src];
4889  appData& data_tgt = context.appDatas[*tdata.pid_dest];
4890  if( data_src.point_cloud )
4891  {
4892  moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
4893  solSTagVals.resize( covSrcEnts.size(), 0. );
4894  sents = covSrcEnts;
4895  }
4896  else
4897  {
4898  moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
4899  solSTagVals.resize(
4900  covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), 0. );
4901  sents = covSrcEnts;
4902  }
4903  if( data_tgt.point_cloud )
4904  {
4905  moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
4906  solTTagVals.resize( tgtEnts.size(), 0. );
4907  tents = tgtEnts;
4908  }
4909  else
4910  {
4911  moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
4912  solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
4913  weightMap->GetDestinationNDofsPerElement(),
4914  0. );
4915  tents = tgtEnts;
4916  }
4917  }
4918  else
4919  {
4920  moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
4921  moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
4922  solSTagVals.resize(
4923  covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
4924  solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
4925  weightMap->GetDestinationNDofsPerElement(),
4926  0. );
4927 
4928  sents = covSrcEnts;
4929  tents = tgtEnts;
4930  }
4931 #endif
4932 
4934  if( filter_type )
4935  {
4936  switch( *filter_type )
4937  {
4938  case 1:
4940  break;
4941  case 2:
4943  break;
4944  case 3:
4946  break;
4947  default:
4949  }
4950  }
4951 
4952  for( size_t i = 0; i < srcTagHandles.size(); i++ )
4953  {
4954  // Compute the application of weights on the suorce solution data and store it in the
4955  // destination solution vector data Optionally, can also perform the transpose application
4956  // of the weight matrix. Set the 3rd argument to true if this is needed
4957  rval = weightMap->ApplyWeights( srcTagHandles[i], tgtTagHandles[i], false, caasType );MB_CHK_ERR( rval );
4958  }
4959 
4960 // #define VERBOSE
4961 #ifdef VERBOSE
4962  ParallelComm* pco_intx = context.appDatas[*pid_intersection].pcomm;
4963 
4964  int ivar = 0;
4965  {
4966  Tag ssolnTag = srcTagHandles[ivar];
4967  std::stringstream sstr;
4968  sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
4969  std::ofstream output_file( sstr.str().c_str() );
4970  for( unsigned i = 0; i < sents.size(); ++i )
4971  {
4972  EntityHandle elem = sents[i];
4973  std::vector< double > locsolSTagVals( 16 );
4974  rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );MB_CHK_ERR( rval );
4975  output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
4976  for( unsigned j = 0; j < 16; ++j )
4977  output_file << locsolSTagVals[j] << " ";
4978  }
4979  output_file.flush(); // required here
4980  output_file.close();
4981  }
4982  {
4983  std::stringstream sstr;
4984  sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
4985  EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set,
4986  context.appDatas[*tdata.pid_dest].file_set };
4987  rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", sets, 2 );MB_CHK_ERR( rval );
4988  }
4989  {
4990  std::stringstream sstr;
4991  sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
4992  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
4993  EntityHandle covering_set = remapper->GetCoveringSet();
4994  EntityHandle sets[2] = { covering_set, context.appDatas[*tdata.pid_dest].file_set };
4995  rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", sets, 2 );MB_CHK_ERR( rval );
4996  }
4997  {
4998  std::stringstream sstr;
4999  sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
5000  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
5001  EntityHandle covering_set = remapper->GetCoveringSet();
5002  rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", &covering_set, 1 );MB_CHK_ERR( rval );
5003  }
5004  {
5005  std::stringstream sstr;
5006  sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
5007  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
5008  EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
5009  rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", &target_set, 1 );MB_CHK_ERR( rval );
5010  }
5011  {
5012  std::stringstream sstr;
5013  sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
5014  std::ofstream output_file( sstr.str().c_str() );
5015  for( unsigned i = 0; i < solSTagVals.size(); ++i )
5016  output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
5017  << solSTagVals[i] << "\n";
5018  output_file.flush(); // required here
5019  output_file.close();
5020  }
5021 #endif
5022  // #undef VERBOSE
5023 
5024  return moab::MB_SUCCESS;
5025 }
5026 
5027 #endif // MOAB_HAVE_TEMPESTREMAP
5028 
5029 #ifdef __cplusplus
5030 }
5031 #endif