Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
iMOAB.cpp
Go to the documentation of this file.
1 /** \file iMOAB.cpp
2  */
3 
4 #include "moab/MOABConfig.h"
5 #include "moab/Core.hpp"
6 
7 #ifdef MOAB_HAVE_MPI
8 #include "moab_mpi.h"
9 #include "moab/ParallelComm.hpp"
10 #include "moab/ParCommGraph.hpp"
13 #endif
14 #include "DebugOutput.hpp"
15 #include "moab/iMOAB.h"
16 
17 /* this is needed because of direct access to hdf5/mhdf */
18 #ifdef MOAB_HAVE_HDF5
19 #include "mhdf.h"
20 #include <H5Tpublic.h>
21 #endif
22 
23 #include "moab/CartVect.hpp"
24 #include "MBTagConventions.hpp"
25 #include "moab/MeshTopoUtil.hpp"
26 #include "moab/ReadUtilIface.hpp"
27 #include "moab/MergeMesh.hpp"
28 
29 #ifdef MOAB_HAVE_TEMPESTREMAP
30 #include "STLStringHelper.h"
32 
35 #endif
36 
37 // C++ includes
38 #include <cassert>
39 #include <sstream>
40 #include <iostream>
41 
42 using namespace moab;
43 
44 // #define VERBOSE
45 
46 // global variables ; should they be organized in a structure, for easier references?
47 // or how do we keep them global?
48 
49 #ifdef __cplusplus
50 extern "C" {
51 #endif
52 
53 #ifdef MOAB_HAVE_TEMPESTREMAP
54 struct TempestMapAppData
55 {
56  moab::TempestRemapper* remapper;
57  std::map< std::string, moab::TempestOnlineMap* > weightMaps;
58  iMOAB_AppID pid_src;
59  iMOAB_AppID pid_dest;
60 };
61 #endif
62 
63 struct appData
64 {
66  int global_id; // external component id, unique for application
67  std::string name;
69  // local vertices would be all_verts if no ghosting was required
70  Range local_verts; // it could include shared, but not owned at the interface
71  Range owned_verts; // owned_verts <= local_verts <= all_verts
72  Range ghost_vertices; // locally ghosted from other processors
76  int dimension; // 2 or 3, dimension of primary elements (redundant?)
77  long num_global_elements; // reunion of all elements in primary_elements; either from hdf5
78  // reading or from reduce
79  long num_global_vertices; // reunion of all nodes, after sharing is resolved; it could be
80  // determined from hdf5 reading
81  int num_ghost_layers; // number of ghost layers
83  std::map< int, int > matIndex; // map from global block id to index in mat_sets
86  std::map< std::string, Tag > tagMap;
87  std::vector< Tag > tagList;
89  bool is_fortran;
90 
91 #ifdef MOAB_HAVE_MPI
92  // constructor for this ParCommGraph takes the joint comm and the MPI groups for each
93  // application
94  std::map< int, ParCommGraph* > pgraph; // map from context () to the parcommgraph*
95 #endif
96 
97 #ifdef MOAB_HAVE_TEMPESTREMAP
98  TempestMapAppData tempestData;
99  std::map< std::string, std::string > metadataMap;
100 #endif
101 };
102 
104 {
105  // are there reasons to have multiple moab inits? Is ref count needed?
107  // we should also have the default tags stored, initialized
108  Tag material_tag, neumann_tag, dirichlet_tag,
109  globalID_tag; // material, neumann, dirichlet, globalID
111  int iArgc;
114 
115  std::map< std::string, int > appIdMap; // from app string (uppercase) to app id
116  std::map< int, int > appIdCompMap; // from component id to app id
117 
118 #ifdef MOAB_HAVE_MPI
119  std::vector< ParallelComm* > pcomms; // created in order of applications, one moab::ParallelComm for each
120 #endif
121 
122  std::vector< appData > appDatas; // the same order as pcomms
123  int globalrank, worldprocs;
125 
127  {
128  MBI = 0;
129  refCountMB = 0;
130  unused_pid = 0;
131  }
132 };
133 
134 static struct GlobalContext context;
135 
137 {
138  if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
139 
140  context.iArgc = argc;
141  context.iArgv = argv; // shallow copy
142 
143  if( 0 == context.refCountMB )
144  {
145  context.MBI = new( std::nothrow ) moab::Core;
146  // retrieve the default tags
147  const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
149  // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
150  Tag gtags[4];
151 
152  for( int i = 0; i < 4; i++ )
153  {
154 
155  ErrorCode rval =
156  context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY );MB_CHK_ERR( rval );
157  }
158 
159  context.material_tag = gtags[0];
160  context.neumann_tag = gtags[1];
161  context.dirichlet_tag = gtags[2];
162  context.globalID_tag = gtags[3];
163  }
164 
165  context.MPI_initialized = false;
166 #ifdef MOAB_HAVE_MPI
167  int flagInit;
168  MPI_Initialized( &flagInit );
169 
170  if( flagInit && !context.MPI_initialized )
171  {
172  MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
173  MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
174  context.MPI_initialized = true;
175  }
176 #endif
177 
179  return moab::MB_SUCCESS;
180 }
181 
183 {
184  return iMOAB_Initialize( 0, 0 );
185 }
186 
188 {
190 
191  if( 0 == context.refCountMB )
192  {
193  delete context.MBI;
194  }
195 
196  return MB_SUCCESS;
197 }
198 
200 #ifdef MOAB_HAVE_MPI
201  MPI_Comm* comm,
202 #endif
203  int* compid,
204  iMOAB_AppID pid )
205 {
206  IMOAB_CHECKPOINTER( app_name, 1 );
207 #ifdef MOAB_HAVE_MPI
208  IMOAB_CHECKPOINTER( comm, 2 );
209  IMOAB_CHECKPOINTER( compid, 3 );
210 #else
211  IMOAB_CHECKPOINTER( compid, 2 );
212 #endif
213 
214  // will create a parallel comm for this application too, so there will be a
215  // mapping from *pid to file set and to parallel comm instances
216  std::string name( app_name );
217 
218  if( context.appIdMap.find( name ) != context.appIdMap.end() )
219  {
220  std::cout << " application " << name << " already registered \n";
221  return moab::MB_FAILURE;
222  }
223 
224  *pid = context.unused_pid++;
225  context.appIdMap[name] = *pid;
226  int rankHere = 0;
227 #ifdef MOAB_HAVE_MPI
228  MPI_Comm_rank( *comm, &rankHere );
229 #endif
230  if( !rankHere ) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
231  if( *compid <= 0 )
232  {
233  std::cout << " convention for external application is to have its id positive \n";
234  return moab::MB_FAILURE;
235  }
236 
237  if( context.appIdCompMap.find( *compid ) != context.appIdCompMap.end() )
238  {
239  std::cout << " external application with comp id " << *compid << " is already registered\n";
240  return moab::MB_FAILURE;
241  }
242 
243  context.appIdCompMap[*compid] = *pid;
244 
245  // now create ParallelComm and a file set for this application
246 #ifdef MOAB_HAVE_MPI
247  if( *comm )
248  {
249  ParallelComm* pco = new ParallelComm( context.MBI, *comm );
250 
251 #ifndef NDEBUG
252  int index = pco->get_id(); // it could be useful to get app id from pcomm instance ...
253  assert( index == *pid );
254  // here, we assert the the pid is the same as the id of the ParallelComm instance
255  // useful for writing in parallel
256 #endif
257  context.pcomms.push_back( pco );
258  }
259  else
260  {
261  context.pcomms.push_back( 0 );
262  }
263 #endif
264 
265  // create now the file set that will be used for loading the model in
266  EntityHandle file_set;
267  ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
268 
269  appData app_data;
270  app_data.file_set = file_set;
271  app_data.global_id = *compid; // will be used mostly for par comm graph
272  app_data.name = name; // save the name of application
273 
274 #ifdef MOAB_HAVE_TEMPESTREMAP
275  app_data.tempestData.remapper = NULL; // Only allocate as needed
276 #endif
277 
278  app_data.num_ghost_layers = 0;
279  app_data.point_cloud = false;
280  app_data.is_fortran = false;
281 
282  context.appDatas.push_back(
283  app_data ); // it will correspond to app_FileSets[*pid] will be the file set of interest
284  return moab::MB_SUCCESS;
285 }
286 
288 #ifdef MOAB_HAVE_MPI
289  int* comm,
290 #endif
291  int* compid,
292  iMOAB_AppID pid )
293 {
294  IMOAB_CHECKPOINTER( app_name, 1 );
295 #ifdef MOAB_HAVE_MPI
296  IMOAB_CHECKPOINTER( comm, 2 );
297  IMOAB_CHECKPOINTER( compid, 3 );
298 #else
299  IMOAB_CHECKPOINTER( compid, 2 );
300 #endif
301 
302  ErrCode err;
303  assert( app_name != nullptr );
304  std::string name( app_name );
305 
306 #ifdef MOAB_HAVE_MPI
307  MPI_Comm ccomm;
308  if( comm )
309  {
310  // convert from Fortran communicator to a C communicator
311  // see transfer of handles
312  // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
313  ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
314  }
315 #endif
316 
317  // now call C style registration function:
318  err = iMOAB_RegisterApplication( app_name,
319 #ifdef MOAB_HAVE_MPI
320  &ccomm,
321 #endif
322  compid, pid );
323 
324  // Now that we have created the application context, store that
325  // the application being registered is from a Fortran context
326  context.appDatas[*pid].is_fortran = true;
327 
328  return err;
329 }
330 
332 {
333  // the file set , parallel comm are all in vectors indexed by *pid
334  // assume we did not delete anything yet
335  // *pid will not be reused if we register another application
336  appData& data = context.appDatas[*pid];
337  int rankHere = 0;
338 #ifdef MOAB_HAVE_MPI
339  ParallelComm* pco = context.pcomms[*pid];
340  rankHere = pco->rank();
341 #endif
342  if( !rankHere )
343  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
344  << " is de-registered now \n";
345 
346  EntityHandle fileSet = data.file_set;
347  // get all entities part of the file set
348  Range fileents;
349  ErrorCode rval = context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true );MB_CHK_ERR( rval );
350 
351  fileents.insert( fileSet );
352 
353  rval = context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents );MB_CHK_ERR( rval ); // append all mesh sets
354 
355 #ifdef MOAB_HAVE_TEMPESTREMAP
356  if( data.tempestData.remapper ) delete data.tempestData.remapper;
357  if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
358 #endif
359 
360 #ifdef MOAB_HAVE_MPI
361 
362  // we could get the pco also with
363  // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
364 
365  std::map< int, ParCommGraph* >& pargs = data.pgraph;
366 
367  // free the parallel comm graphs associated with this app
368  for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
369  {
370  ParCommGraph* pgr = mt->second;
371  delete pgr;
372  pgr = NULL;
373  }
374  if( pco ) delete pco;
375 #endif
376 
377  // delete first all except vertices
378  Range vertices = fileents.subset_by_type( MBVERTEX );
379  Range noverts = subtract( fileents, vertices );
380 
381  rval = context.MBI->delete_entities( noverts );MB_CHK_ERR( rval );
382  // now retrieve connected elements that still exist (maybe in other sets, pids?)
383  Range adj_ents_left;
384  rval = context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
385  rval = context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
386  rval = context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
387 
388  if( !adj_ents_left.empty() )
389  {
390  Range conn_verts;
391  rval = context.MBI->get_connectivity( adj_ents_left, conn_verts );MB_CHK_ERR( rval );
392  vertices = subtract( vertices, conn_verts );
393  }
394 
395  rval = context.MBI->delete_entities( vertices );MB_CHK_ERR( rval );
396 
397  std::map< std::string, int >::iterator mit;
398 
399  for( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
400  {
401  int pidx = mit->second;
402 
403  if( *pid == pidx )
404  {
405  break;
406  }
407  }
408 
409  context.appIdMap.erase( mit );
410  std::map< int, int >::iterator mit1;
411 
412  for( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
413  {
414  int pidx = mit1->second;
415 
416  if( *pid == pidx )
417  {
418  break;
419  }
420  }
421 
422  context.appIdCompMap.erase( mit1 );
423 
424  context.unused_pid--; // we have to go backwards always TODO
425  context.appDatas.pop_back();
426 #ifdef MOAB_HAVE_MPI
427  context.pcomms.pop_back();
428 #endif
429  return moab::MB_SUCCESS;
430 }
431 
433 {
434  // release any Fortran specific allocations here before we pass it on
435  context.appDatas[*pid].is_fortran = false;
436 
437  // release all datastructure allocations
438  return iMOAB_DeregisterApplication( pid );
439 }
440 
441 // Utility function
442 static void split_tag_names( std::string input_names,
443  std::string& separator,
444  std::vector< std::string >& list_tag_names )
445 {
446  size_t pos = 0;
447  std::string token;
448  while( ( pos = input_names.find( separator ) ) != std::string::npos )
449  {
450  token = input_names.substr( 0, pos );
451  if( !token.empty() ) list_tag_names.push_back( token );
452  // std::cout << token << std::endl;
453  input_names.erase( 0, pos + separator.length() );
454  }
455  if( !input_names.empty() )
456  {
457  // if leftover something, or if not ended with delimiter
458  list_tag_names.push_back( input_names );
459  }
460  return;
461 }
462 
464  int* num_global_vertices,
465  int* num_global_elements,
466  int* num_dimension,
467  int* num_parts )
468 {
469  IMOAB_CHECKPOINTER( filename, 1 );
470  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
471 
472 #ifdef MOAB_HAVE_HDF5
473  std::string filen( filename );
474 
475  int edges = 0;
476  int faces = 0;
477  int regions = 0;
478  if( num_global_vertices ) *num_global_vertices = 0;
479  if( num_global_elements ) *num_global_elements = 0;
480  if( num_dimension ) *num_dimension = 0;
481  if( num_parts ) *num_parts = 0;
482 
483  mhdf_FileHandle file;
484  mhdf_Status status;
485  unsigned long max_id;
486  struct mhdf_FileDesc* data;
487 
488  file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
489 
490  if( mhdf_isError( &status ) )
491  {
492  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
493  return moab::MB_FAILURE;
494  }
495 
496  data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
497  1 ); // will use extra set info; will get parallel partition tag info too!
498 
499  if( mhdf_isError( &status ) )
500  {
501  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
502  return moab::MB_FAILURE;
503  }
504 
505  if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
506  if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
507 
508  for( int i = 0; i < data->num_elem_desc; i++ )
509  {
510  struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
511  struct mhdf_EntDesc* ent_d = &( el_desc->desc );
512 
513  if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
514  {
515  edges += ent_d->count;
516  }
517 
518  if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
519  {
520  faces += ent_d->count;
521  }
522 
523  if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
524  {
525  faces += ent_d->count;
526  }
527 
528  if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
529  {
530  faces += ent_d->count;
531  }
532 
533  if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
534  {
535  regions += ent_d->count;
536  }
537 
538  if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
539  {
540  regions += ent_d->count;
541  }
542 
543  if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
544  {
545  regions += ent_d->count;
546  }
547 
548  if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
549  {
550  regions += ent_d->count;
551  }
552 
553  if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
554  {
555  regions += ent_d->count;
556  }
557 
558  if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
559  {
560  regions += ent_d->count;
561  }
562 
563  if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
564  {
565  regions += ent_d->count;
566  }
567  }
568 
569  if( num_parts ) *num_parts = data->numEntSets[0];
570 
571  // is this required?
572  if( edges > 0 )
573  {
574  if( num_dimension ) *num_dimension = 1; // I don't think it will ever return 1
575  if( num_global_elements ) *num_global_elements = edges;
576  }
577 
578  if( faces > 0 )
579  {
580  if( num_dimension ) *num_dimension = 2;
581  if( num_global_elements ) *num_global_elements = faces;
582  }
583 
584  if( regions > 0 )
585  {
586  if( num_dimension ) *num_dimension = 3;
587  if( num_global_elements ) *num_global_elements = regions;
588  }
589 
590  mhdf_closeFile( file, &status );
591 
592  free( data );
593 
594 #else
595  std::cout << filename
596  << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
597  "formats other than a h5m file.\n";
598  if( num_global_vertices ) *num_global_vertices = 0;
599  if( num_global_elements ) *num_global_elements = 0;
600  if( num_dimension ) *num_dimension = 0;
601  if( num_parts ) *num_parts = 0;
602 #endif
603 
604  return moab::MB_SUCCESS;
605 }
606 
608  const iMOAB_String filename,
609  const iMOAB_String read_options,
610  int* num_ghost_layers )
611 {
612  IMOAB_CHECKPOINTER( filename, 2 );
613  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
614  IMOAB_CHECKPOINTER( num_ghost_layers, 4 );
615 
616  // make sure we use the file set and pcomm associated with the *pid
617  std::ostringstream newopts;
618  if( read_options ) newopts << read_options;
619 
620 #ifdef MOAB_HAVE_MPI
621 
623  {
624  if( context.worldprocs > 1 )
625  {
626  std::string opts( ( read_options ? read_options : "" ) );
627  std::string pcid( "PARALLEL_COMM=" );
628  std::size_t found = opts.find( pcid );
629 
630  if( found != std::string::npos )
631  {
632  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
633  return moab::MB_FAILURE;
634  }
635 
636  // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
637  // files (used in test_remapping)
638  std::string filen( filename );
639  std::string::size_type idx = filen.rfind( '.' );
640 
641  if( idx != std::string::npos )
642  {
643  std::string extension = filen.substr( idx + 1 );
644  if( ( extension == std::string( "h5m" ) ) || ( extension == std::string( "nc" ) ) )
645  newopts << ";;PARALLEL_COMM=" << *pid;
646  }
647 
648  if( *num_ghost_layers >= 1 )
649  {
650  // if we want ghosts, we will want additional entities, the last .1
651  // because the addl ents can be edges, faces that are part of the neumann sets
652  std::string pcid2( "PARALLEL_GHOSTS=" );
653  std::size_t found2 = opts.find( pcid2 );
654 
655  if( found2 != std::string::npos )
656  {
657  std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
658  "number of layers \n";
659  }
660  else
661  {
662  // dimension of primary entities is 3 here, but it could be 2 for climate
663  // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
664  // example: ";PARALLEL_GHOSTS=2.0.1"
665  newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
666  }
667  }
668  }
669  }
670 #else
671  IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
672 #endif
673 
674  // Now let us actually load the MOAB file with the appropriate read options
675  ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
676 
677 #ifdef VERBOSE
678  // some debugging stuff
679  std::ostringstream outfile;
680 #ifdef MOAB_HAVE_MPI
681  int rank = context.pcomms[*pid]->rank();
682  int nprocs = context.pcomms[*pid]->size();
683  outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
684 #else
685  outfile << "TaskMesh_n1.0.h5m";
686 #endif
687  // the mesh contains ghosts too, but they are not part of mat/neumann set
688  // write in serial the file, to see what tags are missing
689  rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval ); // everything on current task, written in serial
690 #endif
691 
692  // Update ghost layer information
693  context.appDatas[*pid].num_ghost_layers = *num_ghost_layers;
694 
695  // Update mesh information
696  return iMOAB_UpdateMeshInfo( pid );
697 }
698 
699 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options )
700 {
701  IMOAB_CHECKPOINTER( filename, 2 );
702  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
703 
704  appData& data = context.appDatas[*pid];
705  EntityHandle fileSet = data.file_set;
706 
707  std::ostringstream newopts;
708 #ifdef MOAB_HAVE_MPI
709  std::string write_opts( ( write_options ? write_options : "" ) );
710  std::string pcid( "PARALLEL_COMM=" );
711  std::size_t found = write_opts.find( pcid );
712 
713  if( found != std::string::npos )
714  {
715  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
716  return moab::MB_FAILURE;
717  }
718 
719  // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
720  std::string pw( "PARALLEL=WRITE_PART" );
721  found = write_opts.find( pw );
722 
723  if( found != std::string::npos )
724  {
725  newopts << "PARALLEL_COMM=" << *pid << ";";
726  }
727 
728 #endif
729 
730  if( write_options ) newopts << write_options;
731 
732  std::vector< Tag > copyTagList = data.tagList;
733  // append Global ID and Parallel Partition
734  std::string gid_name_tag( "GLOBAL_ID" );
735 
736  // export global id tag, we need it always
737  if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
738  {
739  Tag gid = context.MBI->globalId_tag();
740  copyTagList.push_back( gid );
741  }
742  // also Parallel_Partition PARALLEL_PARTITION
743  std::string pp_name_tag( "PARALLEL_PARTITION" );
744 
745  // write parallel part tag too, if it exists
746  if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
747  {
748  Tag ptag;
749  context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
750  if( ptag ) copyTagList.push_back( ptag );
751  }
752 
753  // Now let us actually write the file to disk with appropriate options
754  ErrorCode rval = context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, &copyTagList[0],
755  (int)copyTagList.size() );MB_CHK_ERR( rval );
756 
757  return moab::MB_SUCCESS;
758 }
759 
761 {
762  IMOAB_CHECKPOINTER( prefix, 2 );
763  IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
764 
765  std::ostringstream file_name;
766  int rank = 0, size = 1;
767 #ifdef MOAB_HAVE_MPI
768  rank = context.pcomms[*pid]->rank();
769  size = context.pcomms[*pid]->size();
770 #endif
771  file_name << prefix << "_" << size << "_" << rank << ".h5m";
772  // Now let us actually write the file to disk with appropriate options
773  ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
774 
775  return moab::MB_SUCCESS;
776 }
777 
779 {
780  // this will include ghost elements info
781  appData& data = context.appDatas[*pid];
782  EntityHandle fileSet = data.file_set;
783 
784  // first clear all data ranges; this can be called after ghosting
785  data.all_verts.clear();
786  data.primary_elems.clear();
787  data.local_verts.clear();
788  data.owned_verts.clear();
789  data.ghost_vertices.clear();
790  data.owned_elems.clear();
791  data.ghost_elems.clear();
792  data.mat_sets.clear();
793  data.neu_sets.clear();
794  data.diri_sets.clear();
795 
796  // Let us get all the vertex entities
797  ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval ); // recursive
798 
799  // Let us check first entities of dimension = 3
800  data.dimension = 3;
801  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
802 
803  if( data.primary_elems.empty() )
804  {
805  // Now 3-D elements. Let us check entities of dimension = 2
806  data.dimension = 2;
807  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
808 
809  if( data.primary_elems.empty() )
810  {
811  // Now 3-D/2-D elements. Let us check entities of dimension = 1
812  data.dimension = 1;
813  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
814 
815  if( data.primary_elems.empty() )
816  {
817  // no elements of dimension 1 or 2 or 3; it could happen for point clouds
818  data.dimension = 0;
819  }
820  }
821  }
822 
823  // check if the current mesh is just a point cloud
824  data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
825 
826 #ifdef MOAB_HAVE_MPI
827 
829  {
830  ParallelComm* pco = context.pcomms[*pid];
831 
832  // filter ghost vertices, from local
833  rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
834 
835  // Store handles for all ghosted entities
836  data.ghost_vertices = subtract( data.all_verts, data.local_verts );
837 
838  // filter ghost elements, from local
839  rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
840 
841  data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
842  // now update global number of primary cells and global number of vertices
843  // determine first number of owned vertices
844  // Get local owned vertices
845  rval = pco->filter_pstatus( data.all_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &data.owned_verts );MB_CHK_ERR( rval );
846  int local[2], global[2];
847  local[0] = data.owned_verts.size();
848  local[1] = data.owned_elems.size();
849  MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->comm() );
850  rval = iMOAB_SetGlobalInfo( pid, &( global[0] ), &( global[1] ) );MB_CHK_ERR( rval );
851  }
852  else
853  {
854  data.local_verts = data.all_verts;
855  data.owned_elems = data.primary_elems;
856  }
857 
858 #else
859 
860  data.local_verts = data.all_verts;
861  data.owned_elems = data.primary_elems;
862 
863 #endif
864 
865  // Get the references for some standard internal tags such as material blocks, BCs, etc
867  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
868 
870  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
871 
873  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
874 
875  return moab::MB_SUCCESS;
876 }
877 
879  int* num_visible_vertices,
880  int* num_visible_elements,
881  int* num_visible_blocks,
882  int* num_visible_surfaceBC,
883  int* num_visible_vertexBC )
884 {
885  ErrorCode rval;
886  appData& data = context.appDatas[*pid];
887  EntityHandle fileSet = data.file_set;
888 
889  // this will include ghost elements
890  // first clear all data ranges; this can be called after ghosting
891  if( num_visible_elements )
892  {
893  num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
894  // separate ghost and local/owned primary elements
895  num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
896  num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
897  }
898  if( num_visible_vertices )
899  {
900  num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
901  num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
902  // local are those that are not ghosts; they may include shared, not owned vertices
903  num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
904  }
905 
906  if( num_visible_blocks )
907  {
909  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
910 
911  num_visible_blocks[2] = data.mat_sets.size();
912  num_visible_blocks[0] = num_visible_blocks[2];
913  num_visible_blocks[1] = 0;
914  }
915 
916  if( num_visible_surfaceBC )
917  {
919  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
920 
921  num_visible_surfaceBC[2] = 0;
922  // count how many faces are in each neu set, and how many regions are
923  // adjacent to them;
924  int numNeuSets = (int)data.neu_sets.size();
925 
926  for( int i = 0; i < numNeuSets; i++ )
927  {
928  Range subents;
929  EntityHandle nset = data.neu_sets[i];
930  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
931 
932  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
933  {
934  EntityHandle subent = *it;
935  Range adjPrimaryEnts;
936  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
937 
938  num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
939  }
940  }
941 
942  num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
943  num_visible_surfaceBC[1] = 0;
944  }
945 
946  if( num_visible_vertexBC )
947  {
949  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
950 
951  num_visible_vertexBC[2] = 0;
952  int numDiriSets = (int)data.diri_sets.size();
953 
954  for( int i = 0; i < numDiriSets; i++ )
955  {
956  Range verts;
957  EntityHandle diset = data.diri_sets[i];
958  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
959 
960  num_visible_vertexBC[2] += (int)verts.size();
961  }
962 
963  num_visible_vertexBC[0] = num_visible_vertexBC[2];
964  num_visible_vertexBC[1] = 0;
965  }
966 
967  return moab::MB_SUCCESS;
968 }
969 
970 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
971 {
972  IMOAB_CHECKPOINTER( vertices_length, 2 );
973  IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
974 
975  const Range& verts = context.appDatas[*pid].all_verts;
976  // check for problems with array length
977  IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
978 
979  // global id tag is context.globalID_tag
980  return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
981 }
982 
983 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
984 {
985  assert( vertices_length && *vertices_length );
986  assert( visible_global_rank_ID );
987 
988  Range& verts = context.appDatas[*pid].all_verts;
989  int i = 0;
990 #ifdef MOAB_HAVE_MPI
991  ParallelComm* pco = context.pcomms[*pid];
992 
993  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
994  {
995  ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
996  }
997 
998  if( i != *vertices_length )
999  {
1000  return moab::MB_FAILURE;
1001  } // warning array allocation problem
1002 
1003 #else
1004 
1005  /* everything owned by proc 0 */
1006  if( (int)verts.size() != *vertices_length )
1007  {
1008  return moab::MB_FAILURE;
1009  } // warning array allocation problem
1010 
1011  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
1012  {
1013  visible_global_rank_ID[i] = 0;
1014  } // all vertices are owned by processor 0, as this is serial run
1015 
1016 #endif
1017 
1018  return moab::MB_SUCCESS;
1019 }
1020 
1021 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
1022 {
1023  Range& verts = context.appDatas[*pid].all_verts;
1024 
1025  // interleaved coordinates, so that means deep copy anyway
1026  if( *coords_length != 3 * (int)verts.size() )
1027  {
1028  return moab::MB_FAILURE;
1029  }
1030 
1031  ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
1032 
1033  return moab::MB_SUCCESS;
1034 }
1035 
1036 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
1037 {
1038  // local id blocks? they are counted from 0 to number of visible blocks ...
1039  // will actually return material set tag value for global
1040  Range& matSets = context.appDatas[*pid].mat_sets;
1041 
1042  if( *block_length != (int)matSets.size() )
1043  {
1044  return moab::MB_FAILURE;
1045  }
1046 
1047  // return material set tag gtags[0 is material set tag
1048  ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
1049 
1050  // populate map with index
1051  std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
1052  for( unsigned i = 0; i < matSets.size(); i++ )
1053  {
1054  matIdx[global_block_IDs[i]] = i;
1055  }
1056 
1057  return moab::MB_SUCCESS;
1058 }
1059 
1061  iMOAB_GlobalID* global_block_ID,
1062  int* vertices_per_element,
1063  int* num_elements_in_block )
1064 {
1065  assert( global_block_ID );
1066 
1067  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1068  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1069 
1070  if( it == matMap.end() )
1071  {
1072  return moab::MB_FAILURE;
1073  } // error in finding block with id
1074 
1075  int blockIndex = matMap[*global_block_ID];
1076  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1077  Range blo_elems;
1078  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
1079 
1080  if( MB_SUCCESS != rval || blo_elems.empty() )
1081  {
1082  return moab::MB_FAILURE;
1083  }
1084 
1085  EntityType type = context.MBI->type_from_handle( blo_elems[0] );
1086 
1087  if( !blo_elems.all_of_type( type ) )
1088  {
1089  return moab::MB_FAILURE;
1090  } // not all of same type
1091 
1092  const EntityHandle* conn = NULL;
1093  int num_verts = 0;
1094  rval = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
1095 
1096  *vertices_per_element = num_verts;
1097  *num_elements_in_block = (int)blo_elems.size();
1098 
1099  return moab::MB_SUCCESS;
1100 }
1101 
1103  int* num_visible_elements,
1104  iMOAB_GlobalID* element_global_IDs,
1105  int* ranks,
1106  iMOAB_GlobalID* block_IDs )
1107 {
1108  appData& data = context.appDatas[*pid];
1109 #ifdef MOAB_HAVE_MPI
1110  ParallelComm* pco = context.pcomms[*pid];
1111 #endif
1112 
1113  ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
1114 
1115  int i = 0;
1116 
1117  for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
1118  {
1119 #ifdef MOAB_HAVE_MPI
1120  rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
1121 
1122 #else
1123  /* everything owned by task 0 */
1124  ranks[i] = 0;
1125 #endif
1126  }
1127 
1128  for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
1129  {
1130  EntityHandle matMeshSet = *mit;
1131  Range elems;
1132  rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1133 
1134  int valMatTag;
1135  rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
1136 
1137  for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
1138  {
1139  EntityHandle eh = *eit;
1140  int index = data.primary_elems.index( eh );
1141 
1142  if( -1 == index )
1143  {
1144  return moab::MB_FAILURE;
1145  }
1146 
1147  if( -1 >= *num_visible_elements )
1148  {
1149  return moab::MB_FAILURE;
1150  }
1151 
1152  block_IDs[index] = valMatTag;
1153  }
1154  }
1155 
1156  return moab::MB_SUCCESS;
1157 }
1158 
1160  iMOAB_GlobalID* global_block_ID,
1161  int* connectivity_length,
1162  int* element_connectivity )
1163 {
1164  assert( global_block_ID ); // ensure global block ID argument is not null
1165  assert( connectivity_length ); // ensure connectivity length argument is not null
1166 
1167  appData& data = context.appDatas[*pid];
1168  std::map< int, int >& matMap = data.matIndex;
1169  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1170 
1171  if( it == matMap.end() )
1172  {
1173  return moab::MB_FAILURE;
1174  } // error in finding block with id
1175 
1176  int blockIndex = matMap[*global_block_ID];
1177  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1178  std::vector< EntityHandle > elems;
1179 
1180  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1181 
1182  if( elems.empty() )
1183  {
1184  return moab::MB_FAILURE;
1185  }
1186 
1187  std::vector< EntityHandle > vconnect;
1188  rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
1189 
1190  if( *connectivity_length != (int)vconnect.size() )
1191  {
1192  return moab::MB_FAILURE;
1193  } // mismatched sizes
1194 
1195  for( int i = 0; i < *connectivity_length; i++ )
1196  {
1197  int inx = data.all_verts.index( vconnect[i] );
1198 
1199  if( -1 == inx )
1200  {
1201  return moab::MB_FAILURE;
1202  } // error, vertex not in local range
1203 
1204  element_connectivity[i] = inx;
1205  }
1206 
1207  return moab::MB_SUCCESS;
1208 }
1209 
1211  iMOAB_LocalID* elem_index,
1212  int* connectivity_length,
1213  int* element_connectivity )
1214 {
1215  assert( elem_index ); // ensure element index argument is not null
1216  assert( connectivity_length ); // ensure connectivity length argument is not null
1217 
1218  appData& data = context.appDatas[*pid];
1219  assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
1220 
1221  int num_nodes;
1222  const EntityHandle* conn;
1223 
1224  EntityHandle eh = data.primary_elems[*elem_index];
1225 
1226  ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
1227 
1228  if( *connectivity_length < num_nodes )
1229  {
1230  return moab::MB_FAILURE;
1231  } // wrong number of vertices
1232 
1233  for( int i = 0; i < num_nodes; i++ )
1234  {
1235  int index = data.all_verts.index( conn[i] );
1236 
1237  if( -1 == index )
1238  {
1239  return moab::MB_FAILURE;
1240  }
1241 
1242  element_connectivity[i] = index;
1243  }
1244 
1245  *connectivity_length = num_nodes;
1246 
1247  return moab::MB_SUCCESS;
1248 }
1249 
1251  iMOAB_GlobalID* global_block_ID,
1252  int* num_elements_in_block,
1253  int* element_ownership )
1254 {
1255  assert( global_block_ID ); // ensure global block ID argument is not null
1256  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1257 
1258  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1259 
1260  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1261 
1262  if( it == matMap.end() )
1263  {
1264  return moab::MB_FAILURE;
1265  } // error in finding block with id
1266 
1267  int blockIndex = matMap[*global_block_ID];
1268  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1269  Range elems;
1270 
1271  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1272 
1273  if( elems.empty() )
1274  {
1275  return moab::MB_FAILURE;
1276  }
1277 
1278  if( *num_elements_in_block != (int)elems.size() )
1279  {
1280  return moab::MB_FAILURE;
1281  } // bad memory allocation
1282 
1283  int i = 0;
1284 #ifdef MOAB_HAVE_MPI
1285  ParallelComm* pco = context.pcomms[*pid];
1286 #endif
1287 
1288  for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
1289  {
1290 #ifdef MOAB_HAVE_MPI
1291  rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
1292 #else
1293  element_ownership[i] = 0; /* owned by 0 */
1294 #endif
1295  }
1296 
1297  return moab::MB_SUCCESS;
1298 }
1299 
1301  iMOAB_GlobalID* global_block_ID,
1302  int* num_elements_in_block,
1303  iMOAB_GlobalID* global_element_ID,
1304  iMOAB_LocalID* local_element_ID )
1305 {
1306  assert( global_block_ID ); // ensure global block ID argument is not null
1307  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1308 
1309  appData& data = context.appDatas[*pid];
1310  std::map< int, int >& matMap = data.matIndex;
1311 
1312  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1313 
1314  if( it == matMap.end() )
1315  {
1316  return moab::MB_FAILURE;
1317  } // error in finding block with id
1318 
1319  int blockIndex = matMap[*global_block_ID];
1320  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1321  Range elems;
1322  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1323 
1324  if( elems.empty() )
1325  {
1326  return moab::MB_FAILURE;
1327  }
1328 
1329  if( *num_elements_in_block != (int)elems.size() )
1330  {
1331  return moab::MB_FAILURE;
1332  } // bad memory allocation
1333 
1334  rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
1335 
1336  // check that elems are among primary_elems in data
1337  for( int i = 0; i < *num_elements_in_block; i++ )
1338  {
1339  local_element_ID[i] = data.primary_elems.index( elems[i] );
1340 
1341  if( -1 == local_element_ID[i] )
1342  {
1343  return moab::MB_FAILURE;
1344  } // error, not in local primary elements
1345  }
1346 
1347  return moab::MB_SUCCESS;
1348 }
1349 
1351  int* surface_BC_length,
1352  iMOAB_LocalID* local_element_ID,
1353  int* reference_surface_ID,
1354  int* boundary_condition_value )
1355 {
1356  // we have to fill bc data for neumann sets;/
1357  ErrorCode rval;
1358 
1359  // it was counted above, in GetMeshInfo
1360  appData& data = context.appDatas[*pid];
1361  int numNeuSets = (int)data.neu_sets.size();
1362 
1363  int index = 0; // index [0, surface_BC_length) for the arrays returned
1364 
1365  for( int i = 0; i < numNeuSets; i++ )
1366  {
1367  Range subents;
1368  EntityHandle nset = data.neu_sets[i];
1369  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
1370 
1371  int neuVal;
1372  rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
1373 
1374  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
1375  {
1376  EntityHandle subent = *it;
1377  Range adjPrimaryEnts;
1378  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
1379 
1380  // get global id of the primary ents, and side number of the quad/subentity
1381  // this is moab ordering
1382  for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
1383  {
1384  EntityHandle primaryEnt = *pit;
1385  // get global id
1386  /*int globalID;
1387  rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
1388  if (MB_SUCCESS!=rval)
1389  return moab::MB_FAILURE;
1390  global_element_ID[index] = globalID;*/
1391 
1392  // get local element id
1393  local_element_ID[index] = data.primary_elems.index( primaryEnt );
1394 
1395  if( -1 == local_element_ID[index] )
1396  {
1397  return moab::MB_FAILURE;
1398  } // did not find the element locally
1399 
1400  int side_number, sense, offset;
1401  rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
1402 
1403  reference_surface_ID[index] = side_number + 1; // moab is from 0 to 5, it needs 1 to 6
1404  boundary_condition_value[index] = neuVal;
1405  index++;
1406  }
1407  }
1408  }
1409 
1410  if( index != *surface_BC_length )
1411  {
1412  return moab::MB_FAILURE;
1413  } // error in array allocations
1414 
1415  return moab::MB_SUCCESS;
1416 }
1417 
1419  int* vertex_BC_length,
1420  iMOAB_LocalID* local_vertex_ID,
1421  int* boundary_condition_value )
1422 {
1423  ErrorCode rval;
1424 
1425  // it was counted above, in GetMeshInfo
1426  appData& data = context.appDatas[*pid];
1427  int numDiriSets = (int)data.diri_sets.size();
1428  int index = 0; // index [0, *vertex_BC_length) for the arrays returned
1429 
1430  for( int i = 0; i < numDiriSets; i++ )
1431  {
1432  Range verts;
1433  EntityHandle diset = data.diri_sets[i];
1434  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
1435 
1436  int diriVal;
1437  rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
1438 
1439  for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
1440  {
1441  EntityHandle vt = *vit;
1442  /*int vgid;
1443  rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
1444  if (MB_SUCCESS!=rval)
1445  return moab::MB_FAILURE;
1446  global_vertext_ID[index] = vgid;*/
1447  local_vertex_ID[index] = data.all_verts.index( vt );
1448 
1449  if( -1 == local_vertex_ID[index] )
1450  {
1451  return moab::MB_FAILURE;
1452  } // vertex was not found
1453 
1454  boundary_condition_value[index] = diriVal;
1455  index++;
1456  }
1457  }
1458 
1459  if( *vertex_BC_length != index )
1460  {
1461  return moab::MB_FAILURE;
1462  } // array allocation issue
1463 
1464  return moab::MB_SUCCESS;
1465 }
1466 
1468  const iMOAB_String tag_storage_name,
1469  int* tag_type,
1470  int* components_per_entity,
1471  int* tag_index )
1472 {
1473  // see if the tag is already existing, and if yes, check the type, length
1474  if( *tag_type < 0 || *tag_type > 5 )
1475  {
1476  return moab::MB_FAILURE;
1477  } // we have 6 types of tags supported so far
1478 
1479  DataType tagDataType;
1480  TagType tagType;
1481  void* defaultVal = NULL;
1482  int* defInt = new int[*components_per_entity];
1483  double* defDouble = new double[*components_per_entity];
1484  EntityHandle* defHandle = new EntityHandle[*components_per_entity];
1485 
1486  for( int i = 0; i < *components_per_entity; i++ )
1487  {
1488  defInt[i] = 0;
1489  defDouble[i] = -1e+10;
1490  defHandle[i] = (EntityHandle)0;
1491  }
1492 
1493  switch( *tag_type )
1494  {
1495  case 0:
1496  tagDataType = MB_TYPE_INTEGER;
1497  tagType = MB_TAG_DENSE;
1498  defaultVal = defInt;
1499  break;
1500 
1501  case 1:
1502  tagDataType = MB_TYPE_DOUBLE;
1503  tagType = MB_TAG_DENSE;
1504  defaultVal = defDouble;
1505  break;
1506 
1507  case 2:
1508  tagDataType = MB_TYPE_HANDLE;
1509  tagType = MB_TAG_DENSE;
1510  defaultVal = defHandle;
1511  break;
1512 
1513  case 3:
1514  tagDataType = MB_TYPE_INTEGER;
1515  tagType = MB_TAG_SPARSE;
1516  defaultVal = defInt;
1517  break;
1518 
1519  case 4:
1520  tagDataType = MB_TYPE_DOUBLE;
1521  tagType = MB_TAG_SPARSE;
1522  defaultVal = defDouble;
1523  break;
1524 
1525  case 5:
1526  tagDataType = MB_TYPE_HANDLE;
1527  tagType = MB_TAG_SPARSE;
1528  defaultVal = defHandle;
1529  break;
1530 
1531  default: {
1532  delete[] defInt;
1533  delete[] defDouble;
1534  delete[] defHandle;
1535  return moab::MB_FAILURE;
1536  } // error
1537  }
1538 
1539  Tag tagHandle;
1540  // split storage names if separated list
1541 
1542  std::string tag_name( tag_storage_name );
1543 
1544  // first separate the names of the tags
1545  // we assume that there are separators ":" between the tag names
1546  std::vector< std::string > tagNames;
1547  std::string separator( ":" );
1548  split_tag_names( tag_name, separator, tagNames );
1549 
1550  ErrorCode rval = moab::MB_SUCCESS; // assume success already :)
1551  appData& data = context.appDatas[*pid];
1552  int already_defined_tags = 0;
1553 
1554  for( size_t i = 0; i < tagNames.size(); i++ )
1555  {
1556  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
1557  tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal );
1558 
1559  if( MB_ALREADY_ALLOCATED == rval )
1560  {
1561  std::map< std::string, Tag >& mTags = data.tagMap;
1562  std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1563 
1564  if( mit == mTags.end() )
1565  {
1566  // add it to the map
1567  mTags[tagNames[i]] = tagHandle;
1568  // push it to the list of tags, too
1569  *tag_index = (int)data.tagList.size();
1570  data.tagList.push_back( tagHandle );
1571  }
1572  rval = MB_SUCCESS;
1573  already_defined_tags++;
1574  }
1575  else if( MB_SUCCESS == rval )
1576  {
1577  data.tagMap[tagNames[i]] = tagHandle;
1578  *tag_index = (int)data.tagList.size();
1579  data.tagList.push_back( tagHandle );
1580  }
1581  else
1582  {
1583  rval = moab::MB_FAILURE; // some tags were not created
1584  }
1585  }
1586  // we don't need default values anymore, avoid leaks
1587  int rankHere = 0;
1588 #ifdef MOAB_HAVE_MPI
1589  ParallelComm* pco = context.pcomms[*pid];
1590  rankHere = pco->rank();
1591 #endif
1592  if( !rankHere && already_defined_tags )
1593  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
1594  << " has " << already_defined_tags << " already defined tags out of " << tagNames.size()
1595  << " tags \n";
1596  delete[] defInt;
1597  delete[] defDouble;
1598  delete[] defHandle;
1599  return rval;
1600 }
1601 
1603  const iMOAB_String tag_storage_name,
1604  int* num_tag_storage_length,
1605  int* ent_type,
1606  int* tag_storage_data )
1607 {
1608  std::string tag_name( tag_storage_name );
1609 
1610  // Get the application data
1611  appData& data = context.appDatas[*pid];
1612 
1613  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1614  {
1615  return moab::MB_FAILURE;
1616  } // tag not defined
1617 
1618  Tag tag = data.tagMap[tag_name];
1619 
1620  int tagLength = 0;
1621  ErrorCode rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1622 
1623  DataType dtype;
1624  rval = context.MBI->tag_get_data_type( tag, dtype );
1625 
1626  if( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER )
1627  {
1628  return moab::MB_FAILURE;
1629  }
1630 
1631  // set it on a subset of entities, based on type and length
1632  Range* ents_to_set;
1633 
1634  if( *ent_type == 0 ) // vertices
1635  {
1636  ents_to_set = &data.all_verts;
1637  }
1638  else // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
1639  {
1640  ents_to_set = &data.primary_elems;
1641  }
1642 
1643  int nents_to_be_set = *num_tag_storage_length / tagLength;
1644 
1645  if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 )
1646  {
1647  return moab::MB_FAILURE;
1648  } // to many entities to be set or too few
1649 
1650  // restrict the range; everything is contiguous; or not?
1651 
1652  // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
1653  // 1 ) );
1654  rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );MB_CHK_ERR( rval );
1655 
1656  return moab::MB_SUCCESS; // no error
1657 }
1658 
1660  const iMOAB_String tag_storage_name,
1661  int* num_tag_storage_length,
1662  int* ent_type,
1663  int* tag_storage_data )
1664 {
1665  ErrorCode rval;
1666  std::string tag_name( tag_storage_name );
1667 
1668  appData& data = context.appDatas[*pid];
1669 
1670  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1671  {
1672  return moab::MB_FAILURE;
1673  } // tag not defined
1674 
1675  Tag tag = data.tagMap[tag_name];
1676 
1677  int tagLength = 0;
1678  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1679 
1680  DataType dtype;
1681  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1682 
1683  if( dtype != MB_TYPE_INTEGER )
1684  {
1685  return moab::MB_FAILURE;
1686  }
1687 
1688  // set it on a subset of entities, based on type and length
1689  Range* ents_to_get;
1690 
1691  if( *ent_type == 0 ) // vertices
1692  {
1693  ents_to_get = &data.all_verts;
1694  }
1695  else // if (*ent_type == 1)
1696  {
1697  ents_to_get = &data.primary_elems;
1698  }
1699 
1700  int nents_to_get = *num_tag_storage_length / tagLength;
1701 
1702  if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 )
1703  {
1704  return moab::MB_FAILURE;
1705  } // to many entities to get, or too little
1706 
1707  // restrict the range; everything is contiguous; or not?
1708  // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
1709  // ) );
1710 
1711  rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
1712 
1713  return moab::MB_SUCCESS; // no error
1714 }
1715 
1717  const iMOAB_String tag_storage_names,
1718  int* num_tag_storage_length,
1719  int* ent_type,
1720  double* tag_storage_data )
1721 {
1722  ErrorCode rval;
1723  std::string tag_names( tag_storage_names );
1724  // exactly the same code as for int tag :) maybe should check the type of tag too
1725  std::vector< std::string > tagNames;
1726  std::vector< Tag > tagHandles;
1727  std::string separator( ":" );
1728  split_tag_names( tag_names, separator, tagNames );
1729 
1730  appData& data = context.appDatas[*pid];
1731  Range* ents_to_set = NULL;
1732 
1733  if( *ent_type == 0 ) // vertices
1734  {
1735  ents_to_set = &data.all_verts;
1736  }
1737  else if( *ent_type == 1 )
1738  {
1739  ents_to_set = &data.primary_elems;
1740  }
1741 
1742  int nents_to_be_set = (int)( *ents_to_set ).size();
1743  int position = 0;
1744 
1745  for( size_t i = 0; i < tagNames.size(); i++ )
1746  {
1747  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1748  {
1749  return moab::MB_FAILURE;
1750  } // some tag not defined yet in the app
1751 
1752  Tag tag = data.tagMap[tagNames[i]];
1753 
1754  int tagLength = 0;
1755  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1756 
1757  DataType dtype;
1758  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1759 
1760  if( dtype != MB_TYPE_DOUBLE )
1761  {
1762  return moab::MB_FAILURE;
1763  }
1764 
1765  // set it on the subset of entities, based on type and length
1766  if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1767  return moab::MB_FAILURE; // too many entity values to be set
1768 
1769  rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
1770  // increment position to next tag
1771  position = position + tagLength * nents_to_be_set;
1772  }
1773  return moab::MB_SUCCESS; // no error
1774 }
1775 
1777  const iMOAB_String tag_storage_names,
1778  int* num_tag_storage_length,
1779  int* ent_type,
1780  double* tag_storage_data,
1781  int* globalIds )
1782 {
1783  ErrorCode rval;
1784  std::string tag_names( tag_storage_names );
1785  // exactly the same code as for int tag :) maybe should check the type of tag too
1786  std::vector< std::string > tagNames;
1787  std::vector< Tag > tagHandles;
1788  std::string separator( ":" );
1789  split_tag_names( tag_names, separator, tagNames );
1790 
1791  appData& data = context.appDatas[*pid];
1792  Range* ents_to_set = NULL;
1793 
1794  if( *ent_type == 0 ) // vertices
1795  {
1796  ents_to_set = &data.all_verts;
1797  }
1798  else if( *ent_type == 1 )
1799  {
1800  ents_to_set = &data.primary_elems;
1801  }
1802 
1803  int nents_to_be_set = (int)( *ents_to_set ).size();
1804 
1805  Tag gidTag = context.MBI->globalId_tag();
1806  std::vector< int > gids;
1807  gids.resize( nents_to_be_set );
1808  rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval );
1809 
1810  // so we will need to set the tags according to the global id passed;
1811  // so the order in tag_storage_data is the same as the order in globalIds, but the order
1812  // in local range is gids
1813  std::map< int, EntityHandle > eh_by_gid;
1814  int i = 0;
1815  for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i )
1816  {
1817  eh_by_gid[gids[i]] = *it;
1818  }
1819 
1820  std::vector< int > tagLengths( tagNames.size() );
1821  std::vector< Tag > tagList;
1822  size_t total_tag_len = 0;
1823  for( size_t i = 0; i < tagNames.size(); i++ )
1824  {
1825  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1826  {
1827  MB_SET_ERR( moab::MB_FAILURE, "tag missing" );
1828  } // some tag not defined yet in the app
1829 
1830  Tag tag = data.tagMap[tagNames[i]];
1831  tagList.push_back( tag );
1832 
1833  int tagLength = 0;
1834  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1835 
1836  total_tag_len += tagLength;
1837  tagLengths[i] = tagLength;
1838  DataType dtype;
1839  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1840 
1841  if( dtype != MB_TYPE_DOUBLE )
1842  {
1843  MB_SET_ERR( moab::MB_FAILURE, "tag not double type" );
1844  }
1845  }
1846  bool serial = true;
1847 #ifdef MOAB_HAVE_MPI
1848  ParallelComm* pco = context.pcomms[*pid];
1849  unsigned num_procs = pco->size();
1850  if( num_procs > 1 ) serial = false;
1851 #endif
1852 
1853  if( serial )
1854  {
1855  // we do not assume anymore that the number of entities has to match
1856  // we will set only what matches, and skip entities that do not have corresponding global ids
1857  //assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 );
1858  // tags are unrolled, we loop over global ids first, then careful about tags
1859  for( int i = 0; i < nents_to_be_set; i++ )
1860  {
1861  int gid = globalIds[i];
1862  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1863  if( mapIt == eh_by_gid.end() ) continue;
1864  EntityHandle eh = mapIt->second;
1865  // now loop over tags
1866  int indexInTagValues = 0; //
1867  for( size_t j = 0; j < tagList.size(); j++ )
1868  {
1869  indexInTagValues += i * tagLengths[j];
1870  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval );
1871  // advance the pointer/index
1872  indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j]; // at the end of tag data
1873  }
1874  }
1875  }
1876 #ifdef MOAB_HAVE_MPI
1877  else // it can be not serial only if pco->size() > 1, parallel
1878  {
1879  // in this case, we have to use 2 crystal routers, to send data to the processor that needs it
1880  // we will create first a tuple to rendevous points, then from there send to the processor that requested it
1881  // it is a 2-hop global gather scatter
1882  // TODO: allow for tags of different length; this is wrong
1883  int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() ); // assumes all tags have the same length?
1884  // we do not expect the sizes to match
1885  //assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 );
1886  TupleList TLsend;
1887  TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals ); // to proc, marker(gid), total_tag_len doubles
1888  TLsend.enableWriteAccess();
1889  // the processor id that processes global_id is global_id / num_ents_per_proc
1890 
1891  int indexInRealLocal = 0;
1892  for( int i = 0; i < nbLocalVals; i++ )
1893  {
1894  // to proc, marker, element local index, index in el
1895  int marker = globalIds[i];
1896  int to_proc = marker % num_procs;
1897  int n = TLsend.get_n();
1898  TLsend.vi_wr[2 * n] = to_proc; // send to processor
1899  TLsend.vi_wr[2 * n + 1] = marker;
1900  int indexInTagValues = 0;
1901  // tag data collect by number of tags
1902  for( size_t j = 0; j < tagList.size(); j++ )
1903  {
1904  indexInTagValues += i * tagLengths[j];
1905  for( int k = 0; k < tagLengths[j]; k++ )
1906  {
1907  TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1908  }
1909  indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1910  }
1911  TLsend.inc_n();
1912  }
1913 
1914  //assert( nbLocalVals * total_tag_len - indexInRealLocal == 0 );
1915  // send now requests, basically inform the rendez-vous point who needs a particular global id
1916  // send the data to the other processors:
1917  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 );
1918  TupleList TLreq;
1919  TLreq.initialize( 2, 0, 0, 0, nents_to_be_set );
1920  TLreq.enableWriteAccess();
1921  for( int i = 0; i < nents_to_be_set; i++ )
1922  {
1923  // to proc, marker
1924  int marker = gids[i];
1925  int to_proc = marker % num_procs;
1926  int n = TLreq.get_n();
1927  TLreq.vi_wr[2 * n] = to_proc; // send to processor
1928  TLreq.vi_wr[2 * n + 1] = marker;
1929  // tag data collect by number of tags
1930  TLreq.inc_n();
1931  }
1932  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
1933 
1934  // we know now that process TLreq.vi_wr[2 * n] needs tags for gid TLreq.vi_wr[2 * n + 1]
1935  // we should first order by global id, and then build the new TL with send to proc, global id and
1936  // tags for it
1937  // sort by global ids the tuple lists
1938  moab::TupleList::buffer sort_buffer;
1939  sort_buffer.buffer_init( TLreq.get_n() );
1940  TLreq.sort( 1, &sort_buffer );
1941  sort_buffer.reset();
1942  sort_buffer.buffer_init( TLsend.get_n() );
1943  TLsend.sort( 1, &sort_buffer );
1944  sort_buffer.reset();
1945  // now send the tag values to the proc that requested it
1946  // in theory, for a full partition, TLreq and TLsend should have the same size, and
1947  // each dof should have exactly one target proc. Is that true or not in general ?
1948  // how do we plan to use this? Is it better to store the comm graph for future
1949 
1950  // start copy from comm graph settle
1951  TupleList TLBack;
1952  TLBack.initialize( 3, 0, 0, total_tag_len, 0 ); // to proc, marker, tag from proc , tag values
1953  TLBack.enableWriteAccess();
1954 
1955  int n1 = TLreq.get_n();
1956  int n2 = TLsend.get_n();
1957 
1958  int indexInTLreq = 0;
1959  int indexInTLsend = 0; // advance both, according to the marker
1960  if( n1 > 0 && n2 > 0 )
1961  {
1962 
1963  while( indexInTLreq < n1 && indexInTLsend < n2 ) // if any is over, we are done
1964  {
1965  int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1];
1966  int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1];
1967  if( currentValue1 < currentValue2 )
1968  {
1969  // we have a big problem; basically, we are saying that
1970  // dof currentValue is on one model and not on the other
1971  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
1972  indexInTLreq++;
1973  continue;
1974  }
1975  if( currentValue1 > currentValue2 )
1976  {
1977  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
1978  indexInTLsend++;
1979  continue;
1980  }
1981  int size1 = 1;
1982  int size2 = 1;
1983  while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1984  size1++;
1985  while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1986  size2++;
1987  // must be found in both lists, find the start and end indices
1988  for( int i1 = 0; i1 < size1; i1++ )
1989  {
1990  for( int i2 = 0; i2 < size2; i2++ )
1991  {
1992  // send the info back to components
1993  int n = TLBack.get_n();
1994  TLBack.reserve();
1995  TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )]; // send back to the proc marker
1996  // came from, info from comp2
1997  TLBack.vi_wr[3 * n + 1] = currentValue1; // initial value (resend, just for verif ?)
1998  TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )]; // from proc on comp2
1999  // also fill tag values
2000  for( size_t k = 0; k < total_tag_len; k++ )
2001  {
2002  TLBack.vr_rd[total_tag_len * n + k] =
2003  TLsend.vr_rd[total_tag_len * indexInTLsend + k]; // deep copy of tag values
2004  }
2005  }
2006  }
2007  indexInTLreq += size1;
2008  indexInTLsend += size2;
2009  }
2010  }
2011  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
2012  // end copy from comm graph
2013  // after we are done sending, we need to set those tag values, in a reverse process compared to send
2014  n1 = TLBack.get_n();
2015  double* ptrVal = &TLBack.vr_rd[0]; //
2016  for( int i = 0; i < n1; i++ )
2017  {
2018  int gid = TLBack.vi_rd[3 * i + 1]; // marker
2019  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
2020  if( mapIt == eh_by_gid.end() ) continue;
2021  EntityHandle eh = mapIt->second;
2022  // now loop over tags
2023 
2024  for( size_t j = 0; j < tagList.size(); j++ )
2025  {
2026  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval );
2027  // advance the pointer/index
2028  ptrVal += tagLengths[j]; // at the end of tag data per call
2029  }
2030  }
2031  }
2032 #endif
2033  return MB_SUCCESS;
2034 }
2036  const iMOAB_String tag_storage_names,
2037  int* num_tag_storage_length,
2038  int* ent_type,
2039  double* tag_storage_data )
2040 {
2041  ErrorCode rval;
2042  // exactly the same code, except tag type check
2043  std::string tag_names( tag_storage_names );
2044  // exactly the same code as for int tag :) maybe should check the type of tag too
2045  std::vector< std::string > tagNames;
2046  std::vector< Tag > tagHandles;
2047  std::string separator( ":" );
2048  split_tag_names( tag_names, separator, tagNames );
2049 
2050  appData& data = context.appDatas[*pid];
2051 
2052  // set it on a subset of entities, based on type and length
2053  Range* ents_to_get = NULL;
2054 
2055  if( *ent_type == 0 ) // vertices
2056  {
2057  ents_to_get = &data.all_verts;
2058  }
2059  else if( *ent_type == 1 )
2060  {
2061  ents_to_get = &data.primary_elems;
2062  }
2063  int nents_to_get = (int)ents_to_get->size();
2064  int position = 0;
2065  for( size_t i = 0; i < tagNames.size(); i++ )
2066  {
2067  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
2068  {
2069  return moab::MB_FAILURE;
2070  } // tag not defined
2071 
2072  Tag tag = data.tagMap[tagNames[i]];
2073 
2074  int tagLength = 0;
2075  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
2076 
2077  DataType dtype;
2078  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
2079 
2080  if( dtype != MB_TYPE_DOUBLE )
2081  {
2082  return moab::MB_FAILURE;
2083  }
2084 
2085  if( position + nents_to_get * tagLength > *num_tag_storage_length )
2086  return moab::MB_FAILURE; // too many entity values to get
2087 
2088  rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
2089  position = position + nents_to_get * tagLength;
2090  }
2091 
2092  return moab::MB_SUCCESS; // no error
2093 }
2094 
2095 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
2096 {
2097 #ifdef MOAB_HAVE_MPI
2098  appData& data = context.appDatas[*pid];
2099  Range ent_exchange;
2100  std::vector< Tag > tags;
2101 
2102  for( int i = 0; i < *num_tag; i++ )
2103  {
2104  if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
2105  {
2106  return moab::MB_FAILURE;
2107  } // error in tag index
2108 
2109  tags.push_back( data.tagList[tag_indices[i]] );
2110  }
2111 
2112  if( *ent_type == 0 )
2113  {
2114  ent_exchange = data.all_verts;
2115  }
2116  else if( *ent_type == 1 )
2117  {
2118  ent_exchange = data.primary_elems;
2119  }
2120  else
2121  {
2122  return moab::MB_FAILURE;
2123  } // unexpected type
2124 
2125  ParallelComm* pco = context.pcomms[*pid];
2126 
2127  ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
2128 
2129 #else
2130  /* do nothing if serial */
2131  // just silence the warning
2132  // do not call sync tags in serial!
2133  int k = *pid + *num_tag + *tag_indices + *ent_type;
2134  k++;
2135 #endif
2136 
2137  return moab::MB_SUCCESS;
2138 }
2139 
2140 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
2141 {
2142 #ifdef MOAB_HAVE_MPI
2143  appData& data = context.appDatas[*pid];
2144  Range ent_exchange;
2145 
2146  if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
2147  {
2148  return moab::MB_FAILURE;
2149  } // error in tag index
2150 
2151  Tag tagh = data.tagList[*tag_index];
2152 
2153  if( *ent_type == 0 )
2154  {
2155  ent_exchange = data.all_verts;
2156  }
2157  else if( *ent_type == 1 )
2158  {
2159  ent_exchange = data.primary_elems;
2160  }
2161  else
2162  {
2163  return moab::MB_FAILURE;
2164  } // unexpected type
2165 
2166  ParallelComm* pco = context.pcomms[*pid];
2167  // we could do different MPI_Op; do not bother now, we will call from fortran
2168  ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
2169 
2170 #else
2171  /* do nothing if serial */
2172  // just silence the warning
2173  // do not call sync tags in serial!
2174  int k = *pid + *tag_index + *ent_type;
2175  k++; // just do junk, to avoid complaints
2176 #endif
2177  return moab::MB_SUCCESS;
2178 }
2179 
2181  iMOAB_LocalID* local_index,
2182  int* num_adjacent_elements,
2183  iMOAB_LocalID* adjacent_element_IDs )
2184 {
2185  ErrorCode rval;
2186 
2187  // one neighbor for each subentity of dimension-1
2188  MeshTopoUtil mtu( context.MBI );
2189  appData& data = context.appDatas[*pid];
2190  EntityHandle eh = data.primary_elems[*local_index];
2191  Range adjs;
2192  rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
2193 
2194  if( *num_adjacent_elements < (int)adjs.size() )
2195  {
2196  return moab::MB_FAILURE;
2197  } // not dimensioned correctly
2198 
2199  *num_adjacent_elements = (int)adjs.size();
2200 
2201  for( int i = 0; i < *num_adjacent_elements; i++ )
2202  {
2203  adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
2204  }
2205 
2206  return moab::MB_SUCCESS;
2207 }
2208 
2209 #if 0
2210 
2211 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
2212 {
2213  return moab::MB_SUCCESS;
2214 }
2215 
2216 #endif
2217 
2218 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
2219 {
2220  ErrorCode rval;
2221  appData& data = context.appDatas[*pid];
2222 
2223  if( !data.local_verts.empty() ) // we should have no vertices in the app
2224  {
2225  return moab::MB_FAILURE;
2226  }
2227 
2228  int nverts = *coords_len / *dim;
2229 
2230  rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
2231 
2232  rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
2233 
2234  // also add the vertices to the all_verts range
2235  data.all_verts.merge( data.local_verts );
2236  return moab::MB_SUCCESS;
2237 }
2238 
2240  int* num_elem,
2241  int* type,
2242  int* num_nodes_per_element,
2243  int* connectivity,
2244  int* block_ID )
2245 {
2246  // Create elements
2247  appData& data = context.appDatas[*pid];
2248 
2249  ReadUtilIface* read_iface;
2250  ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
2251 
2252  EntityType mbtype = (EntityType)( *type );
2253  EntityHandle actual_start_handle;
2254  EntityHandle* array = NULL;
2255  rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
2256 
2257  // fill up with actual connectivity from input; assume the vertices are in order, and start
2258  // vertex is the first in the current data vertex range
2259  EntityHandle firstVertex = data.local_verts[0];
2260 
2261  for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2262  {
2263  array[j] = connectivity[j] + firstVertex - 1;
2264  } // assumes connectivity uses 1 based array (from fortran, mostly)
2265 
2266  Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2267 
2268  rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
2269 
2270  data.primary_elems.merge( new_elems );
2271 
2272  // add to adjacency
2273  rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
2274  // organize all new elements in block, with the given block ID; if the block set is not
2275  // existing, create a new mesh set;
2276  Range sets;
2277  int set_no = *block_ID;
2278  const void* setno_ptr = &set_no;
2280  sets );
2281  EntityHandle block_set;
2282 
2283  if( MB_FAILURE == rval || sets.empty() )
2284  {
2285  // create a new set, with this block ID
2286  rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
2287 
2288  rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
2289 
2290  // add the material set to file set
2291  rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
2292  }
2293  else
2294  {
2295  block_set = sets[0];
2296  } // first set is the one we want
2297 
2298  /// add the new ents to the clock set
2299  rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
2300 
2301  return moab::MB_SUCCESS;
2302 }
2303 
2304 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
2305 {
2306  appData& data = context.appDatas[*pid];
2307  data.num_global_vertices = *num_global_verts;
2308  data.num_global_elements = *num_global_elems;
2309  return moab::MB_SUCCESS;
2310 }
2311 
2312 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
2313 {
2314  appData& data = context.appDatas[*pid];
2315  if( NULL != num_global_verts )
2316  {
2317  *num_global_verts = data.num_global_vertices;
2318  }
2319  if( NULL != num_global_elems )
2320  {
2321  *num_global_elems = data.num_global_elements;
2322  }
2323 
2324  return moab::MB_SUCCESS;
2325 }
2326 
2327 #ifdef MOAB_HAVE_MPI
2328 
2329 // this makes sense only for parallel runs
2330 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
2331 {
2332  appData& data = context.appDatas[*pid];
2333  ParallelComm* pco = context.pcomms[*pid];
2334  EntityHandle cset = data.file_set;
2335  int dum_id = 0;
2336  ErrorCode rval;
2337  if( data.primary_elems.empty() )
2338  {
2339  // skip actual resolve, assume vertices are distributed already ,
2340  // no need to share them
2341  }
2342  else
2343  {
2344  // create an integer tag for resolving ; maybe it can be a long tag in the future
2345  // (more than 2 B vertices;)
2346 
2347  Tag stag;
2348  rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
2349  &dum_id );MB_CHK_ERR( rval );
2350 
2351  if( *num_verts > (int)data.local_verts.size() )
2352  {
2353  return moab::MB_FAILURE;
2354  } // we are not setting the size
2355 
2356  rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval ); // assumes integer tag
2357 
2358  rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
2359 
2360  rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
2361  }
2362  // provide partition tag equal to rank
2363  Tag part_tag;
2364  dum_id = -1;
2365  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2366  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2367 
2368  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2369  {
2370  std::cout << " can't get par part tag.\n";
2371  return moab::MB_FAILURE;
2372  }
2373 
2374  int rank = pco->rank();
2375  rval = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
2376 
2377  return moab::MB_SUCCESS;
2378 }
2379 
2380 // this assumes that this was not called before
2381 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
2382 {
2383  ErrorCode rval;
2384 
2385  // verify we have valid ghost layers input specified. If invalid, exit quick.
2386  if( num_ghost_layers && *num_ghost_layers <= 0 )
2387  {
2388  return moab::MB_SUCCESS;
2389  } // nothing to do
2390 
2391  appData& data = context.appDatas[*pid];
2392  ParallelComm* pco = context.pcomms[*pid];
2393 
2394  // maybe we should be passing this too; most of the time we do not need additional ents collective call
2395  constexpr int addl_ents = 0;
2396  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, 1, // get only one layer of ghost entities
2397  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2398  for( int i = 2; i <= *num_ghost_layers; i++ )
2399  {
2400  rval = pco->correct_thin_ghost_layers();MB_CHK_ERR( rval ); // correct for thin layers
2401  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, i, // iteratively get one extra layer
2402  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2403  }
2404 
2405  // Update ghost layer information
2406  data.num_ghost_layers = *num_ghost_layers;
2407 
2408  // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
2409  return iMOAB_UpdateMeshInfo( pid );
2410 }
2411 
2412 ErrCode iMOAB_SendMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* receivingGroup, int* rcompid, int* method )
2413 {
2414  assert( join != nullptr );
2415  assert( receivingGroup != nullptr );
2416  assert( rcompid != nullptr );
2417 
2418  ErrorCode rval;
2419  int ierr;
2420  appData& data = context.appDatas[*pid];
2421  ParallelComm* pco = context.pcomms[*pid];
2422 
2423  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2424  MPI_Group recvGroup =
2425  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
2426  MPI_Comm sender = pco->comm(); // the sender comm is obtained from parallel comm in moab; no need to pass it along
2427  // first see what are the processors in each group; get the sender group too, from the sender communicator
2428  MPI_Group senderGroup;
2429  ierr = MPI_Comm_group( sender, &senderGroup );
2430  if( ierr != 0 ) return moab::MB_FAILURE;
2431 
2432  // instantiate the par comm graph
2433  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2434  // int coid2)
2435  ParCommGraph* cgraph =
2436  new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
2437  // we should search if we have another pcomm with the same comp ids in the list already
2438  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2439  context.appDatas[*pid].pgraph[*rcompid] = cgraph;
2440 
2441  int sender_rank = -1;
2442  MPI_Comm_rank( sender, &sender_rank );
2443 
2444  // decide how to distribute elements to each processor
2445  // now, get the entities on local processor, and pack them into a buffer for various processors
2446  // we will do trivial partition: first get the total number of elements from "sender"
2447  std::vector< int > number_elems_per_part;
2448  // how to distribute local elements to receiving tasks?
2449  // trivial partition: compute first the total number of elements need to be sent
2450  Range owned = context.appDatas[*pid].owned_elems;
2451  if( owned.size() == 0 )
2452  {
2453  // must be vertices that we want to send then
2454  owned = context.appDatas[*pid].local_verts;
2455  // we should have some vertices here
2456  }
2457 
2458  if( *method == 0 ) // trivial partitioning, old method
2459  {
2460  int local_owned_elem = (int)owned.size();
2461  int size = pco->size();
2462  int rank = pco->rank();
2463  number_elems_per_part.resize( size ); //
2464  number_elems_per_part[rank] = local_owned_elem;
2465 #if( MPI_VERSION >= 2 )
2466  // Use "in place" option
2467  ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2468 #else
2469  {
2470  std::vector< int > all_tmp( size );
2471  ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2472  number_elems_per_part = all_tmp;
2473  }
2474 #endif
2475 
2476  if( ierr != 0 )
2477  {
2478  return moab::MB_FAILURE;
2479  }
2480 
2481  // every sender computes the trivial partition, it is cheap, and we need to send it anyway
2482  // to each sender
2483  rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
2484 
2485  rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
2486  }
2487  else // *method != 0, so it is either graph or geometric, parallel
2488  {
2489  // owned are the primary elements on this app
2490  rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
2491 
2492  // basically, send the graph to the receiver side, with unblocking send
2493  rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
2494  }
2495  // pco is needed to pack, not for communication
2496  rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
2497 
2498  // mark for deletion
2499  MPI_Group_free( &senderGroup );
2500  return moab::MB_SUCCESS;
2501 }
2502 
2503 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* sendingGroup, int* scompid )
2504 {
2505  assert( join != nullptr );
2506  assert( sendingGroup != nullptr );
2507  assert( scompid != nullptr );
2508 
2509  ErrorCode rval;
2510  appData& data = context.appDatas[*pid];
2511  ParallelComm* pco = context.pcomms[*pid];
2512  MPI_Comm receive = pco->comm();
2513  EntityHandle local_set = data.file_set;
2514 
2515  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2516  MPI_Group sendGroup =
2517  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
2518 
2519  // first see what are the processors in each group; get the sender group too, from the sender
2520  // communicator
2521  MPI_Group receiverGroup;
2522  int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
2523 
2524  // instantiate the par comm graph
2525  ParCommGraph* cgraph =
2526  new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
2527  // TODO we should search if we have another pcomm with the same comp ids in the list already
2528  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2529  context.appDatas[*pid].pgraph[*scompid] = cgraph;
2530 
2531  int receiver_rank = -1;
2532  MPI_Comm_rank( receive, &receiver_rank );
2533 
2534  // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
2535  // knows what data to expect
2536  std::vector< int > pack_array;
2537  rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
2538 
2539  // senders across for the current receiver
2540  int current_receiver = cgraph->receiver( receiver_rank );
2541 
2542  std::vector< int > senders_local;
2543  size_t n = 0;
2544 
2545  while( n < pack_array.size() )
2546  {
2547  if( current_receiver == pack_array[n] )
2548  {
2549  for( int j = 0; j < pack_array[n + 1]; j++ )
2550  {
2551  senders_local.push_back( pack_array[n + 2 + j] );
2552  }
2553 
2554  break;
2555  }
2556 
2557  n = n + 2 + pack_array[n + 1];
2558  }
2559 
2560 #ifdef VERBOSE
2561  std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
2562  << senders_local.size() << " tasks: ";
2563 
2564  for( int k = 0; k < (int)senders_local.size(); k++ )
2565  {
2566  std::cout << " " << senders_local[k];
2567  }
2568 
2569  std::cout << "\n";
2570 #endif
2571 
2572  if( senders_local.empty() )
2573  {
2574  std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
2575  }
2576  rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
2577 
2578  // after we are done, we could merge vertices that come from different senders, but
2579  // have the same global id
2580  Tag idtag;
2581  rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
2582 
2583  // data.point_cloud = false;
2584  Range local_ents;
2585  rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
2586 
2587  // do not do merge if point cloud
2588  if( !local_ents.all_of_type( MBVERTEX ) )
2589  {
2590  if( (int)senders_local.size() >= 2 ) // need to remove duplicate vertices
2591  // that might come from different senders
2592  {
2593 
2594  Range local_verts = local_ents.subset_by_type( MBVERTEX );
2595  Range local_elems = subtract( local_ents, local_verts );
2596 
2597  // remove from local set the vertices
2598  rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
2599 
2600 #ifdef VERBOSE
2601  std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
2602 #endif
2603  MergeMesh mm( context.MBI );
2604 
2605  rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
2606 
2607  Range new_verts; // local elems are local entities without vertices
2608  rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
2609 
2610 #ifdef VERBOSE
2611  std::cout << "after merging: new verts: " << new_verts.size() << "\n";
2612 #endif
2613  rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
2614  }
2615  }
2616  else
2617  data.point_cloud = true;
2618 
2619  if( !data.point_cloud )
2620  {
2621  // still need to resolve shared entities (in this case, vertices )
2622  rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
2623  }
2624  else
2625  {
2626  // if partition tag exists, set it to current rank; just to make it visible in VisIt
2627  Tag densePartTag;
2628  rval = context.MBI->tag_get_handle( "partition", densePartTag );
2629  if( NULL != densePartTag && MB_SUCCESS == rval )
2630  {
2631  Range local_verts;
2632  rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
2633  std::vector< int > vals;
2634  int rank = pco->rank();
2635  vals.resize( local_verts.size(), rank );
2636  rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
2637  }
2638  }
2639  // set the parallel partition tag
2640  Tag part_tag;
2641  int dum_id = -1;
2642  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2643  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2644 
2645  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2646  {
2647  std::cout << " can't get par part tag.\n";
2648  return moab::MB_FAILURE;
2649  }
2650 
2651  int rank = pco->rank();
2652  rval = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
2653 
2654  // make sure that the GLOBAL_ID is defined as a tag
2655  int tagtype = 0; // dense, integer
2656  int numco = 1; // size
2657  int tagindex; // not used
2658  rval = iMOAB_DefineTagStorage( pid, "GLOBAL_ID", &tagtype, &numco, &tagindex );MB_CHK_ERR( rval );
2659  // populate the mesh with current data info
2660  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
2661 
2662  // mark for deletion
2663  MPI_Group_free( &receiverGroup );
2664 
2665  return moab::MB_SUCCESS;
2666 }
2667 
2668 ErrCode iMOAB_SendElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
2669 {
2670  appData& data = context.appDatas[*pid];
2671  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2672  if( mt == data.pgraph.end() )
2673  {
2674  return moab::MB_FAILURE;
2675  }
2676  ParCommGraph* cgraph = mt->second;
2677  ParallelComm* pco = context.pcomms[*pid];
2678  Range owned = data.owned_elems;
2679  ErrorCode rval;
2680  EntityHandle cover_set;
2681 
2682  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2683  if( data.point_cloud )
2684  {
2685  owned = data.local_verts;
2686  }
2687 
2688  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2689  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2690  // from ints remapper
2691 #ifdef MOAB_HAVE_TEMPESTREMAP
2692  if( data.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2693  {
2694  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2695  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2696  // should still have only quads ?
2697  }
2698 #else
2699  // now, in case we are sending from intx between ocn and atm, we involve coverage set
2700  // how do I know if this receiver already participated in an intersection driven by coupler?
2701  // also, what if this was the "source" mesh in intx?
2702  // in that case, the elements might have been instantiated in the coverage set locally, the
2703  // "owned" range can be different the elements are now in tempestRemap coverage_set
2704  cover_set = cgraph->get_cover_set(); // this will be non null only for intx app ?
2705 
2706  if( 0 != cover_set )
2707  {
2708  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2709  }
2710 #endif
2711 
2712  std::string tag_name( tag_storage_name );
2713 
2714  // basically, we assume everything is defined already on the tag,
2715  // and we can get the tags just by its name
2716  // we assume that there are separators ":" between the tag names
2717  std::vector< std::string > tagNames;
2718  std::vector< Tag > tagHandles;
2719  std::string separator( ":" );
2720  split_tag_names( tag_name, separator, tagNames );
2721  for( size_t i = 0; i < tagNames.size(); i++ )
2722  {
2723  Tag tagHandle;
2724  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2725  if( MB_SUCCESS != rval || NULL == tagHandle )
2726  {
2727  std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
2728  }
2729  tagHandles.push_back( tagHandle );
2730  }
2731 
2732  // pco is needed to pack, and for moab instance, not for communication!
2733  // still use nonblocking communication, over the joint comm
2734  rval = cgraph->send_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
2735  // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
2736 
2737  return moab::MB_SUCCESS;
2738 }
2739 
2740 ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
2741 {
2742  appData& data = context.appDatas[*pid];
2743 
2744  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2745  if( mt == data.pgraph.end() )
2746  {
2747  return moab::MB_FAILURE;
2748  }
2749  ParCommGraph* cgraph = mt->second;
2750 
2751  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2752  ParallelComm* pco = context.pcomms[*pid];
2753  Range owned = data.owned_elems;
2754 
2755  // how do I know if this receiver already participated in an intersection driven by coupler?
2756  // also, what if this was the "source" mesh in intx?
2757  // in that case, the elements might have been instantiated in the coverage set locally, the
2758  // "owned" range can be different the elements are now in tempestRemap coverage_set
2759  EntityHandle cover_set = cgraph->get_cover_set();
2760  ErrorCode rval;
2761  if( 0 != cover_set )
2762  {
2763  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2764  }
2765  if( data.point_cloud )
2766  {
2767  owned = data.local_verts;
2768  }
2769  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2770  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2771  // from ints remapper
2772 #ifdef MOAB_HAVE_TEMPESTREMAP
2773  if( data.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2774  {
2775  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2776  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2777  // should still have only quads ?
2778  }
2779 #endif
2780  /*
2781  * data_intx.remapper exists though only on the intersection application
2782  * how do we get from here ( we know the pid that receives, and the commgraph used by migrate
2783  * mesh )
2784  */
2785 
2786  std::string tag_name( tag_storage_name );
2787 
2788  // we assume that there are separators ";" between the tag names
2789  std::vector< std::string > tagNames;
2790  std::vector< Tag > tagHandles;
2791  std::string separator( ":" );
2792  split_tag_names( tag_name, separator, tagNames );
2793  for( size_t i = 0; i < tagNames.size(); i++ )
2794  {
2795  Tag tagHandle;
2796  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2797  if( MB_SUCCESS != rval || NULL == tagHandle )
2798  {
2799  std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
2800  }
2801  tagHandles.push_back( tagHandle );
2802  }
2803 
2804 #ifdef VERBOSE
2805  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
2806  << " and file set = " << ( data.file_set ) << "\n";
2807 #endif
2808  // pco is needed to pack, and for moab instance, not for communication!
2809  // still use nonblocking communication
2810  rval = cgraph->receive_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
2811 
2812 #ifdef VERBOSE
2813  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
2814 #endif
2815 
2816  return moab::MB_SUCCESS;
2817 }
2818 
2819 ErrCode iMOAB_FreeSenderBuffers( iMOAB_AppID pid, int* context_id )
2820 {
2821  // need first to find the pgraph that holds the information we need
2822  // this will be called on sender side only
2823  appData& data = context.appDatas[*pid];
2824  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2825  if( mt == data.pgraph.end() ) return moab::MB_FAILURE; // error
2826 
2827  mt->second->release_send_buffers();
2828  return moab::MB_SUCCESS;
2829 }
2830 
2831 /**
2832 \brief compute a comm graph between 2 moab apps, based on ID matching
2833 <B>Operations:</B> Collective
2834 */
2835 //#define VERBOSE
2836 ErrCode iMOAB_ComputeCommGraph( iMOAB_AppID pid1,
2837  iMOAB_AppID pid2,
2838  MPI_Comm* join,
2839  MPI_Group* group1,
2840  MPI_Group* group2,
2841  int* type1,
2842  int* type2,
2843  int* comp1,
2844  int* comp2 )
2845 {
2846  assert( join );
2847  assert( group1 );
2848  assert( group2 );
2849  ErrorCode rval = MB_SUCCESS;
2850  int localRank = 0, numProcs = 1;
2851 
2852  bool isFortran = false;
2853  if( *pid1 >= 0 ) isFortran = isFortran || context.appDatas[*pid1].is_fortran;
2854  if( *pid2 >= 0 ) isFortran = isFortran || context.appDatas[*pid2].is_fortran;
2855 
2856  MPI_Comm global = ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2857  MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
2858  MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
2859 
2860  MPI_Comm_rank( global, &localRank );
2861  MPI_Comm_size( global, &numProcs );
2862  // instantiate the par comm graph
2863 
2864  // we should search if we have another pcomm with the same comp ids in the list already
2865  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2866  bool already_exists = false;
2867  if( *pid1 >= 0 )
2868  {
2869  appData& data = context.appDatas[*pid1];
2870  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp2 );
2871  if( mt != data.pgraph.end() ) already_exists = true;
2872  }
2873  if( *pid2 >= 0 )
2874  {
2875  appData& data = context.appDatas[*pid2];
2876  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp1 );
2877  if( mt != data.pgraph.end() ) already_exists = true;
2878  }
2879  // nothing to do if it already exists
2880  if( already_exists )
2881  {
2882 #ifdef VERBOSE
2883  if( !localRank )
2884  std::cout << " parcomgraph already existing between components " << *comp1 << " and " << *comp2
2885  << ". Do not compute again\n";
2886 #endif
2887  return moab::MB_SUCCESS;
2888  }
2889 
2890  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2891  // int coid2)
2892  ParCommGraph* cgraph = NULL;
2893  if( *pid1 >= 0 ) cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2894  ParCommGraph* cgraph_rev = NULL;
2895  if( *pid2 >= 0 ) cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2896 
2897  if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
2898  if( *pid2 >= 0 ) context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev; // from 2 to 1
2899  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
2900  // model on the joint comm
2901  TupleList TLcomp1;
2902  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2903  TupleList TLcomp2;
2904  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2905  // will push_back a new tuple, if needed
2906 
2907  TLcomp1.enableWriteAccess();
2908 
2909  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
2910  Tag gdsTag;
2911  // find the values on first cell
2912  int lenTagType1 = 1;
2913  if( 1 == *type1 || 1 == *type2 )
2914  {
2915  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
2916  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
2917  }
2918  Tag tagType2 = context.MBI->globalId_tag();
2919 
2920  std::vector< int > valuesComp1;
2921  // populate first tuple
2922  if( *pid1 >= 0 )
2923  {
2924  appData& data1 = context.appDatas[*pid1];
2925  EntityHandle fset1 = data1.file_set;
2926  // in case of tempest remap, get the coverage set
2927 #ifdef MOAB_HAVE_TEMPESTREMAP
2928  if( data1.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2929  {
2930  fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2931  // should still have only quads ?
2932  }
2933 #endif
2934  Range ents_of_interest;
2935  if( *type1 == 1 )
2936  {
2937  assert( gdsTag );
2938  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2939  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
2940  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
2941  }
2942  else if( *type1 == 2 )
2943  {
2944  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2945  valuesComp1.resize( ents_of_interest.size() );
2946  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2947  }
2948  else if( *type1 == 3 ) // for FV meshes, just get the global id of cell
2949  {
2950  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
2951  valuesComp1.resize( ents_of_interest.size() );
2952  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2953  }
2954  else
2955  {
2956  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
2957  }
2958  // now fill the tuple list with info and markers
2959  // because we will send only the ids, order and compress the list
2960  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2961  TLcomp1.resize( uniq.size() );
2962  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2963  {
2964  // to proc, marker, element local index, index in el
2965  int marker = *sit;
2966  int to_proc = marker % numProcs;
2967  int n = TLcomp1.get_n();
2968  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
2969  TLcomp1.vi_wr[2 * n + 1] = marker;
2970  TLcomp1.inc_n();
2971  }
2972  }
2973 
2974  ProcConfig pc( global ); // proc config does the crystal router
2975  pc.crystal_router()->gs_transfer( 1, TLcomp1,
2976  0 ); // communication towards joint tasks, with markers
2977  // sort by value (key 1)
2978 #ifdef VERBOSE
2979  std::stringstream ff1;
2980  ff1 << "TLcomp1_" << localRank << ".txt";
2981  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2982 #endif
2983  moab::TupleList::buffer sort_buffer;
2984  sort_buffer.buffer_init( TLcomp1.get_n() );
2985  TLcomp1.sort( 1, &sort_buffer );
2986  sort_buffer.reset();
2987 #ifdef VERBOSE
2988  // after sorting
2989  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2990 #endif
2991  // do the same, for the other component, number2, with type2
2992  // start copy
2993  TLcomp2.enableWriteAccess();
2994  // populate second tuple
2995  std::vector< int > valuesComp2;
2996  if( *pid2 >= 0 )
2997  {
2998  appData& data2 = context.appDatas[*pid2];
2999  EntityHandle fset2 = data2.file_set;
3000  // in case of tempest remap, get the coverage set
3001 #ifdef MOAB_HAVE_TEMPESTREMAP
3002  if( data2.tempestData.remapper != NULL ) // this is the case this is part of intx;;
3003  {
3004  fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3005  // should still have only quads ?
3006  }
3007 #endif
3008 
3009  Range ents_of_interest;
3010  if( *type2 == 1 )
3011  {
3012  assert( gdsTag );
3013  rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3014  valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
3015  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
3016  }
3017  else if( *type2 == 2 )
3018  {
3019  rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3020  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
3021  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
3022  }
3023  else if( *type2 == 3 )
3024  {
3025  rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
3026  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
3027  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
3028  }
3029  else
3030  {
3031  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2
3032  }
3033  // now fill the tuple list with info and markers
3034  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3035  TLcomp2.resize( uniq.size() );
3036  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3037  {
3038  // to proc, marker, element local index, index in el
3039  int marker = *sit;
3040  int to_proc = marker % numProcs;
3041  int n = TLcomp2.get_n();
3042  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
3043  TLcomp2.vi_wr[2 * n + 1] = marker;
3044  TLcomp2.inc_n();
3045  }
3046  }
3047  pc.crystal_router()->gs_transfer( 1, TLcomp2,
3048  0 ); // communication towards joint tasks, with markers
3049  // sort by value (key 1)
3050 #ifdef VERBOSE
3051  std::stringstream ff2;
3052  ff2 << "TLcomp2_" << localRank << ".txt";
3053  TLcomp2.print_to_file( ff2.str().c_str() );
3054 #endif
3055  sort_buffer.buffer_reserve( TLcomp2.get_n() );
3056  TLcomp2.sort( 1, &sort_buffer );
3057  sort_buffer.reset();
3058  // end copy
3059 #ifdef VERBOSE
3060  TLcomp2.print_to_file( ff2.str().c_str() );
3061 #endif
3062  // need to send back the info, from the rendezvous point, for each of the values
3063  /* so go over each value, on local process in joint communicator
3064 
3065  now have to send back the info needed for communication;
3066  loop in in sync over both TLComp1 and TLComp2, in local process;
3067  So, build new tuple lists, to send synchronous communication
3068  populate them at the same time, based on marker, that is indexed
3069  */
3070 
3071  TupleList TLBackToComp1;
3072  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
3073  TLBackToComp1.enableWriteAccess();
3074 
3075  TupleList TLBackToComp2;
3076  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
3077  TLBackToComp2.enableWriteAccess();
3078 
3079  int n1 = TLcomp1.get_n();
3080  int n2 = TLcomp2.get_n();
3081 
3082  int indexInTLComp1 = 0;
3083  int indexInTLComp2 = 0; // advance both, according to the marker
3084  if( n1 > 0 && n2 > 0 )
3085  {
3086 
3087  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
3088  {
3089  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3090  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3091  if( currentValue1 < currentValue2 )
3092  {
3093  // we have a big problem; basically, we are saying that
3094  // dof currentValue is on one model and not on the other
3095  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
3096  indexInTLComp1++;
3097  continue;
3098  }
3099  if( currentValue1 > currentValue2 )
3100  {
3101  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
3102  indexInTLComp2++;
3103  continue;
3104  }
3105  int size1 = 1;
3106  int size2 = 1;
3107  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3108  size1++;
3109  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3110  size2++;
3111  // must be found in both lists, find the start and end indices
3112  for( int i1 = 0; i1 < size1; i1++ )
3113  {
3114  for( int i2 = 0; i2 < size2; i2++ )
3115  {
3116  // send the info back to components
3117  int n = TLBackToComp1.get_n();
3118  TLBackToComp1.reserve();
3119  TLBackToComp1.vi_wr[3 * n] =
3120  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
3121  // came from, info from comp2
3122  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3123  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
3124  n = TLBackToComp2.get_n();
3125  TLBackToComp2.reserve();
3126  TLBackToComp2.vi_wr[3 * n] =
3127  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
3128  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3129  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
3130  // what if there are repeated markers in TLcomp2? increase just index2
3131  }
3132  }
3133  indexInTLComp1 += size1;
3134  indexInTLComp2 += size2;
3135  }
3136  }
3137  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
3138  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3139 
3140  if( *pid1 >= 0 )
3141  {
3142  // we are on original comp 1 tasks
3143  // before ordering
3144  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3145  // processors it communicates with
3146 #ifdef VERBOSE
3147  std::stringstream f1;
3148  f1 << "TLBack1_" << localRank << ".txt";
3149  TLBackToComp1.print_to_file( f1.str().c_str() );
3150 #endif
3151  sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
3152  TLBackToComp1.sort( 1, &sort_buffer );
3153  sort_buffer.reset();
3154 #ifdef VERBOSE
3155  TLBackToComp1.print_to_file( f1.str().c_str() );
3156 #endif
3157  // so we are now on pid1, we know now each marker were it has to go
3158  // add a new method to ParCommGraph, to set up the involved_IDs_map
3159  cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
3160  }
3161  if( *pid2 >= 0 )
3162  {
3163  // we are on original comp 1 tasks
3164  // before ordering
3165  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3166  // processors it communicates with
3167 #ifdef VERBOSE
3168  std::stringstream f2;
3169  f2 << "TLBack2_" << localRank << ".txt";
3170  TLBackToComp2.print_to_file( f2.str().c_str() );
3171 #endif
3172  sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
3173  TLBackToComp2.sort( 2, &sort_buffer );
3174  sort_buffer.reset();
3175 #ifdef VERBOSE
3176  TLBackToComp2.print_to_file( f2.str().c_str() );
3177 #endif
3178  cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
3179  //
3180  }
3181  return moab::MB_SUCCESS;
3182 }
3183 
3184 //#undef VERBOSE
3185 
3186 ErrCode iMOAB_MergeVertices( iMOAB_AppID pid )
3187 {
3188  appData& data = context.appDatas[*pid];
3189  ParallelComm* pco = context.pcomms[*pid];
3190  // collapse vertices and transform cells into triangles/quads /polys
3191  // tags we care about: area, frac, global id
3192  std::vector< Tag > tagsList;
3193  Tag tag;
3194  ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
3195  if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE; // fatal error, abort
3196  tagsList.push_back( tag );
3197  rval = context.MBI->tag_get_handle( "area", tag );
3198  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3199  rval = context.MBI->tag_get_handle( "frac", tag );
3200  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3201  double tol = 1.0e-9;
3202  rval = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
3203 
3204  // clean material sets of cells that were deleted
3206  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
3207 
3208  if( !data.mat_sets.empty() )
3209  {
3210  EntityHandle matSet = data.mat_sets[0];
3211  Range elems;
3212  rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
3213  rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
3214  // put back only cells from data.file_set
3215  elems.clear();
3216  rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
3217  rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
3218  }
3219  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
3220 
3221  ParallelMergeMesh pmm( pco, tol );
3222  rval = pmm.merge( data.file_set,
3223  /* do not do local merge*/ false,
3224  /* 2d cells*/ 2 );MB_CHK_ERR( rval );
3225 
3226  // assign global ids only for vertices, cells have them fine
3227  rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
3228 
3229  // set the partition tag on the file set
3230  Tag part_tag;
3231  int dum_id = -1;
3232  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
3233  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
3234 
3235  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
3236  {
3237  std::cout << " can't get par part tag.\n";
3238  return moab::MB_FAILURE;
3239  }
3240 
3241  int rank = pco->rank();
3242  rval = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
3243 
3244  return moab::MB_SUCCESS;
3245 }
3246 
3247 #ifdef MOAB_HAVE_TEMPESTREMAP
3248 
3249 // this call must be collective on the joint communicator
3250 // intersection tasks on coupler will need to send to the components tasks the list of
3251 // id elements that are relevant: they intersected some of the target elements (which are not needed
3252 // here)
3253 // in the intersection
3254 ErrCode iMOAB_CoverageGraph( MPI_Comm* join,
3255  iMOAB_AppID pid_src,
3256  iMOAB_AppID pid_migr,
3257  iMOAB_AppID pid_intx,
3258  int* src_id,
3259  int* migr_id,
3260  int* context_id )
3261 {
3262  // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this
3263  // exchange
3264  ErrorCode rval;
3265  std::vector< int > srcSenders;
3266  std::vector< int > receivers;
3267  ParCommGraph* sendGraph = NULL;
3268  int ierr;
3269  int default_context_id = -1;
3270  bool is_fortran_context = false;
3271 
3272  // First, find the original graph between PE sets
3273  // And based on this one, we will build the newly modified one for coverage
3274  if( *pid_src >= 0 )
3275  {
3276  default_context_id = *migr_id; // the other one
3277  assert( context.appDatas[*pid_src].global_id == *src_id );
3278  is_fortran_context = context.appDatas[*pid_src].is_fortran || is_fortran_context;
3279  sendGraph = context.appDatas[*pid_src].pgraph[default_context_id]; // maybe check if it does not exist
3280 
3281  // report the sender and receiver tasks in the joint comm
3282  srcSenders = sendGraph->senders();
3283  receivers = sendGraph->receivers();
3284 #ifdef VERBOSE
3285  std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
3286 #endif
3287  }
3288 
3289  ParCommGraph* recvGraph = NULL; // will be non null on receiver tasks (intx tasks)
3290  if( *pid_migr >= 0 )
3291  {
3292  is_fortran_context = context.appDatas[*pid_migr].is_fortran || is_fortran_context;
3293  // find the original one
3294  default_context_id = *src_id;
3295  assert( context.appDatas[*pid_migr].global_id == *migr_id );
3296  recvGraph = context.appDatas[*pid_migr].pgraph[default_context_id];
3297  // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
3298  srcSenders = recvGraph->senders();
3299  receivers = recvGraph->receivers();
3300 #ifdef VERBOSE
3301  std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
3302 #endif
3303  }
3304 
3305  if( *pid_intx >= 0 ) is_fortran_context = context.appDatas[*pid_intx].is_fortran || is_fortran_context;
3306 
3307  // loop over pid_intx elements, to see what original processors in joint comm have sent the
3308  // coverage mesh;
3309  // If we are on intx tasks, send coverage info towards original component tasks,
3310  // about needed cells
3311  TupleList TLcovIDs;
3312  TLcovIDs.initialize( 2, 0, 0, 0, 0 ); // to proc, GLOBAL ID; estimate about 100 IDs to be sent
3313  // will push_back a new tuple, if needed
3314  TLcovIDs.enableWriteAccess();
3315  // the crystal router will send ID cell to the original source, on the component task
3316  // if we are on intx tasks, loop over all intx elements and
3317 
3318  MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
3319  int currentRankInJointComm = -1;
3320  ierr = MPI_Comm_rank( global, &currentRankInJointComm );CHK_MPI_ERR( ierr );
3321 
3322  // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we
3323  // need to send information towards component tasks
3324  if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
3325  receivers.end() ) // we are on receivers tasks, we can request intx info
3326  {
3327  // find the pcomm for the intx pid
3328  if( *pid_intx >= (int)context.appDatas.size() ) return moab::MB_FAILURE;
3329 
3330  appData& dataIntx = context.appDatas[*pid_intx];
3331  Tag parentTag, orgSendProcTag;
3332 
3333  rval = context.MBI->tag_get_handle( "SourceParent", parentTag );MB_CHK_ERR( rval ); // global id of the blue, source element
3334  if( !parentTag ) return moab::MB_FAILURE; // fatal error, abort
3335 
3336  rval = context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag );MB_CHK_ERR( rval );
3337  if( !orgSendProcTag ) return moab::MB_FAILURE; // fatal error, abort
3338 
3339  // find the file set, red parents for intx cells, and put them in tuples
3340  EntityHandle intxSet = dataIntx.file_set;
3341  Range cells;
3342  // get all entities from the set, and look at their RedParent
3343  rval = context.MBI->get_entities_by_dimension( intxSet, 2, cells );MB_CHK_ERR( rval );
3344 
3345  std::map< int, std::set< int > > idsFromProcs; // send that info back to enhance parCommGraph cache
3346  for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
3347  {
3348  EntityHandle intx_cell = *it;
3349  int gidCell, origProc; // look at receivers
3350 
3351  rval = context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell );MB_CHK_ERR( rval );
3352  rval = context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc );MB_CHK_ERR( rval );
3353  // we have augmented the overlap set with ghost cells ; in that case, the
3354  // orig_sending_processor is not set so it will be -1;
3355  if( origProc < 0 ) continue;
3356 
3357  std::set< int >& setInts = idsFromProcs[origProc];
3358  setInts.insert( gidCell );
3359  }
3360 
3361  // if we have no intx cells, it means we are on point clouds; quick fix just use all cells
3362  // from coverage set
3363  // if( cells.empty() )
3364  {
3365  cells.clear();
3366  // get coverage set
3367  assert( *pid_intx >= 0 );
3368  appData& dataIntx = context.appDatas[*pid_intx];
3369  EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3370 
3371  // get all cells from coverage set
3372  Tag gidTag;
3373  rval = context.MBI->tag_get_handle( "GLOBAL_ID", gidTag );MB_CHK_ERR( rval );
3374  rval = context.MBI->get_entities_by_dimension( cover_set, 2, cells );MB_CHK_ERR( rval );
3375  // look at their orig_sending_processor
3376  for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
3377  {
3378  EntityHandle covCell = *it;
3379  int gidCell, origProc; // look at o
3380 
3381  rval = context.MBI->tag_get_data( gidTag, &covCell, 1, &gidCell );MB_CHK_ERR( rval );
3382  rval = context.MBI->tag_get_data( orgSendProcTag, &covCell, 1, &origProc );MB_CHK_ERR( rval );
3383  // we have augmented the overlap set with ghost cells ; in that case, the
3384  // orig_sending_processor is not set so it will be -1;
3385  if( origProc < 0 ) // it cannot < 0, I think
3386  continue;
3387  std::set< int >& setInts = idsFromProcs[origProc];
3388  setInts.insert( gidCell );
3389  }
3390  }
3391 
3392 #ifdef VERBOSE
3393  std::ofstream dbfile;
3394  std::stringstream outf;
3395  outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
3396  dbfile.open( outf.str().c_str() );
3397  dbfile << "Writing this to a file.\n";
3398 
3399  dbfile << " map size:" << idsFromProcs.size()
3400  << std::endl; // on the receiver side, these show how much data to receive
3401  // from the sender (how many ids, and how much tag data later; we need to size up the
3402  // receiver buffer) arrange in tuples , use map iterators to send the ids
3403  for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
3404  {
3405  std::set< int >& setIds = mt->second;
3406  dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
3407  int counter = 0;
3408  for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
3409  {
3410  int valueID = *st;
3411  dbfile << " " << valueID;
3412  counter++;
3413  if( counter % 10 == 0 ) dbfile << "\n";
3414  }
3415  dbfile << "\n";
3416  }
3417  dbfile.close();
3418 #endif
3419  if( NULL != recvGraph )
3420  {
3421  ParCommGraph* recvGraph1 = new ParCommGraph( *recvGraph ); // just copy
3422  recvGraph1->set_context_id( *context_id );
3423  recvGraph1->SetReceivingAfterCoverage( idsFromProcs );
3424  // this par comm graph will need to use the coverage set
3425  // so we are for sure on intx pes (the receiver is the coupler mesh)
3426  assert( *pid_intx >= 0 );
3427  appData& dataIntx = context.appDatas[*pid_intx];
3428  EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3429  recvGraph1->set_cover_set( cover_set );
3430  context.appDatas[*pid_migr].pgraph[*context_id] = recvGraph1; // possible memory leak if context_id is same
3431  }
3432  for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
3433  {
3434  int procToSendTo = mit->first;
3435  std::set< int >& idSet = mit->second;
3436  for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
3437  {
3438  int n = TLcovIDs.get_n();
3439  TLcovIDs.reserve();
3440  TLcovIDs.vi_wr[2 * n] = procToSendTo; // send to processor
3441  TLcovIDs.vi_wr[2 * n + 1] = *sit; // global id needs index in the local_verts range
3442  }
3443  }
3444  }
3445 
3446  ProcConfig pc( global ); // proc config does the crystal router
3447  pc.crystal_router()->gs_transfer( 1, TLcovIDs,
3448  0 ); // communication towards component tasks, with what ids are needed
3449  // for each task from receiver
3450 
3451  // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
3452  if( NULL != sendGraph )
3453  {
3454  // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each
3455  // receiver task
3456  ParCommGraph* sendGraph1 = new ParCommGraph( *sendGraph ); // just copy
3457  sendGraph1->set_context_id( *context_id );
3458  context.appDatas[*pid_src].pgraph[*context_id] = sendGraph1;
3459  rval = sendGraph1->settle_send_graph( TLcovIDs );MB_CHK_ERR( rval );
3460  }
3461  return moab::MB_SUCCESS; // success
3462 }
3463 
3464 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix )
3465 {
3466  assert( prefix && strlen( prefix ) );
3467 
3468  ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
3469  std::string prefix_str( prefix );
3470 
3471  if( NULL != cgraph )
3472  cgraph->dump_comm_information( prefix_str, *is_sender );
3473  else
3474  {
3475  std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
3476  << " context: " << *context_id << "\n";
3477  }
3478  return moab::MB_SUCCESS;
3479 }
3480 
3481 #endif // #ifdef MOAB_HAVE_TEMPESTREMAP
3482 
3483 #endif // #ifdef MOAB_HAVE_MPI
3484 
3485 #ifdef MOAB_HAVE_TEMPESTREMAP
3486 
3487 #ifdef MOAB_HAVE_NETCDF
3488 ErrCode iMOAB_LoadMappingWeightsFromFile(
3489  iMOAB_AppID pid_intersection,
3490  iMOAB_AppID pid_cpl,
3491  int* col_or_row,
3492  int* type,
3493  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
3494  const iMOAB_String remap_weights_filename )
3495 {
3496  ErrorCode rval;
3497  bool row_based_partition = true;
3498  if( *col_or_row == 1 ) row_based_partition = false; // do a column based partition;
3499 
3500  // get the local degrees of freedom, from the pid_cpl and type of mesh
3501 
3502  // Get the source and target data and pcomm objects
3503  appData& data_intx = context.appDatas[*pid_intersection];
3504  TempestMapAppData& tdata = data_intx.tempestData;
3505 
3506  // Get the handle to the remapper object
3507  if( tdata.remapper == NULL )
3508  {
3509  // Now allocate and initialize the remapper object
3510 #ifdef MOAB_HAVE_MPI
3511  ParallelComm* pco = context.pcomms[*pid_intersection];
3512  tdata.remapper = new moab::TempestRemapper( context.MBI, pco );
3513 #else
3514  tdata.remapper = new moab::TempestRemapper( context.MBI );
3515 #endif
3516  tdata.remapper->meshValidate = true;
3517  tdata.remapper->constructEdgeMap = true;
3518  // Do not create new filesets; Use the sets from our respective applications
3519  tdata.remapper->initialize( false );
3520  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
3521  }
3522 
3523  // Setup loading of weights onto TempestOnlineMap
3524  // Set the context for the remapping weights computation
3525  tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
3526 
3527  // Now allocate and initialize the remapper object
3528  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3529  assert( weightMap != NULL );
3530 
3531  if( *pid_cpl >= 0 ) // it means we are looking for how to distribute the degrees of freedom, new map reader
3532  {
3533  appData& data1 = context.appDatas[*pid_cpl];
3534  EntityHandle fset1 = data1.file_set; // this is source or target, depending on direction
3535 
3536  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
3537  Tag gdsTag;
3538 
3539  // find the values on first cell
3540  int lenTagType1 = 1;
3541  if( *type == 1 )
3542  {
3543  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
3544  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
3545  }
3546  Tag tagType2 = context.MBI->globalId_tag();
3547 
3548  std::vector< int > dofValues;
3549 
3550  // populate first tuple
3551  Range
3552  ents_of_interest; // will be filled with entities on coupler, from which we will get the DOFs, based on type
3553  int ndofPerEl = 1;
3554 
3555  if( *type == 1 )
3556  {
3557  assert( gdsTag );
3558  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3559  dofValues.resize( ents_of_interest.size() * lenTagType1 );
3560  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );
3561  ndofPerEl = lenTagType1;
3562  }
3563  else if( *type == 2 )
3564  {
3565  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3566  dofValues.resize( ents_of_interest.size() );
3567  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval ); // just global ids
3568  }
3569  else if( *type == 3 ) // for FV meshes, just get the global id of cell
3570  {
3571  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
3572  dofValues.resize( ents_of_interest.size() );
3573  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval ); // just global ids
3574  }
3575  else
3576  {
3577  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
3578  }
3579  // pass ordered dofs, and unique
3580  std::vector< int > orderDofs( dofValues.begin(), dofValues.end() );
3581 
3582  std::sort( orderDofs.begin(), orderDofs.end() );
3583  orderDofs.erase( std::unique( orderDofs.begin(), orderDofs.end() ), orderDofs.end() ); // remove duplicates
3584 
3585  rval = weightMap->ReadParallelMap( remap_weights_filename, orderDofs, row_based_partition );MB_CHK_ERR( rval );
3586 
3587  // if we are on target mesh (row based partition)
3588  if( row_based_partition )
3589  {
3590  tdata.pid_dest = pid_cpl;
3591  tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset1, &ents_of_interest );
3592  weightMap->SetDestinationNDofsPerElement( ndofPerEl );
3593  weightMap->set_row_dc_dofs( dofValues ); // will set row_dtoc_dofmap
3594  }
3595  else
3596  {
3597  tdata.pid_src = pid_cpl;
3598  tdata.remapper->SetMeshSet( Remapper::SourceMesh, fset1, &ents_of_interest );
3599  weightMap->SetSourceNDofsPerElement( ndofPerEl );
3600  weightMap->set_col_dc_dofs( dofValues ); // will set col_dtoc_dofmap
3601  }
3602  }
3603  else // old reader, trivial distribution by row
3604  {
3605  std::vector< int > tmp_owned_ids; // this will do a trivial row distribution
3606  rval = weightMap->ReadParallelMap( remap_weights_filename, tmp_owned_ids, row_based_partition );MB_CHK_ERR( rval );
3607  }
3608 
3609  return moab::MB_SUCCESS;
3610 }
3611 
3612 ErrCode iMOAB_WriteMappingWeightsToFile(
3613  iMOAB_AppID pid_intersection,
3614  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
3615  const iMOAB_String remap_weights_filename )
3616 {
3617  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
3618  assert( remap_weights_filename && strlen( remap_weights_filename ) );
3619 
3620  ErrorCode rval;
3621 
3622  // Get the source and target data and pcomm objects
3623  appData& data_intx = context.appDatas[*pid_intersection];
3624  TempestMapAppData& tdata = data_intx.tempestData;
3625 
3626  // Get the handle to the remapper object
3627  assert( tdata.remapper != NULL );
3628 
3629  // Now get online weights object and ensure it is valid
3630  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3631  assert( weightMap != NULL );
3632 
3633  std::string filename = std::string( remap_weights_filename );
3634 
3635  std::string metadataStr = data_intx.metadataMap[std::string( solution_weights_identifier )];
3636  std::map< std::string, std::string > attrMap;
3637  attrMap["title"] = "MOAB-TempestRemap Online Regridding Weight Generator";
3638  attrMap["normalization"] = "ovarea";
3639  attrMap["map_aPb"] = filename;
3640 
3641  const std::string delim = ";";
3642  size_t pos = 0, index = 0;
3643  std::vector< std::string > stringAttr( 3 );
3644  // use find() function to get the position of the delimiters
3645  while( ( pos = metadataStr.find( delim ) ) != std::string::npos )
3646  {
3647  std::string token1 = metadataStr.substr( 0, pos ); // store the substring
3648  if( token1.size() > 0 || index == 0 ) stringAttr[index++] = token1;
3649  metadataStr.erase(
3650  0, pos + delim.length() ); /* erase() function store the current positon and move to next token. */
3651  }
3652  stringAttr[index] = metadataStr; // store the last token of the string.
3653  assert( index == 2 );
3654  attrMap["remap_options"] = stringAttr[0];
3655  attrMap["methodorder_b"] = stringAttr[1];
3656  attrMap["methodorder_a"] = stringAttr[2];
3657  attrMap["concave_a"] = "false"; // defaults
3658  attrMap["concave_b"] = "false"; // defaults
3659  attrMap["bubble"] = "true"; // defaults
3660  attrMap["MOABversion"] = std::string( MOAB_VERSION );
3661 
3662  // Write the map file to disk in parallel using either HDF5 or SCRIP interface
3663  rval = weightMap->WriteParallelMap( filename, attrMap );MB_CHK_ERR( rval );
3664 
3665  return moab::MB_SUCCESS;
3666 }
3667 
3668 #ifdef MOAB_HAVE_MPI
3669 ErrCode iMOAB_MigrateMapMesh( iMOAB_AppID pid1,
3670  iMOAB_AppID pid2,
3671  iMOAB_AppID pid3,
3672  MPI_Comm* jointcomm,
3673  MPI_Group* groupA,
3674  MPI_Group* groupB,
3675  int* type,
3676  int* comp1,
3677  int* comp2,
3678  int* direction )
3679 {
3680  assert( jointcomm );
3681  assert( groupA );
3682  assert( groupB );
3683  bool is_fortran = false;
3684  if( *pid1 >= 0 ) is_fortran = context.appDatas[*pid1].is_fortran || is_fortran;
3685  if( *pid2 >= 0 ) is_fortran = context.appDatas[*pid2].is_fortran || is_fortran;
3686  if( *pid3 >= 0 ) is_fortran = context.appDatas[*pid3].is_fortran || is_fortran;
3687 
3688  MPI_Comm joint_communicator =
3689  ( is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( jointcomm ) ) : *jointcomm );
3690  MPI_Group group_first = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupA ) ) : *groupA );
3691  MPI_Group group_second = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupB ) ) : *groupB );
3692 
3693  ErrorCode rval = MB_SUCCESS;
3694  int localRank = 0, numProcs = 1;
3695 
3696  // Get the local rank and number of processes in the joint communicator
3697  MPI_Comm_rank( joint_communicator, &localRank );
3698  MPI_Comm_size( joint_communicator, &numProcs );
3699 
3700  // Next instantiate the par comm graph
3701  // here we need to look at direction
3702  // this direction is good for atm source -> ocn target coupler example
3703  ParCommGraph* cgraph = NULL;
3704  if( *pid1 >= 0 ) cgraph = new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 );
3705 
3706  ParCommGraph* cgraph_rev = NULL;
3707  if( *pid3 >= 0 ) cgraph_rev = new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 );
3708 
3709  // we should search if we have another pcomm with the same comp ids in the list already
3710  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
3711  if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
3712  if( *pid3 >= 0 ) context.appDatas[*pid3].pgraph[*comp1] = cgraph_rev; // from 2 to 1
3713 
3714  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
3715  // model on the joint_communicator
3716  TupleList TLcomp1;
3717  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
3718  TupleList TLcomp2;
3719  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
3720 
3721  // will push_back a new tuple, if needed
3722  TLcomp1.enableWriteAccess();
3723 
3724  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
3725  Tag gdsTag;
3726 
3727  // find the values on first cell
3728  int lenTagType1 = 1;
3729  if( *type == 1 )
3730  {
3731  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
3732  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
3733  }
3734  Tag tagType2 = context.MBI->globalId_tag();
3735 
3736  std::vector< int > valuesComp1;
3737 
3738  // populate first tuple
3739  Range ents_of_interest; // will be filled with entities on pid1, that need to be distributed,
3740  // rearranged in split_ranges map
3741  if( *pid1 >= 0 )
3742  {
3743  appData& data1 = context.appDatas[*pid1];
3744  EntityHandle fset1 = data1.file_set;
3745 
3746  if( *type == 1 )
3747  {
3748  assert( gdsTag );
3749  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3750  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
3751  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
3752  }
3753  else if( *type == 2 )
3754  {
3755  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3756  valuesComp1.resize( ents_of_interest.size() );
3757  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
3758  }
3759  else if( *type == 3 ) // for FV meshes, just get the global id of cell
3760  {
3761  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
3762  valuesComp1.resize( ents_of_interest.size() );
3763  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
3764  }
3765  else
3766  {
3767  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
3768  }
3769  // now fill the tuple list with info and markers
3770  // because we will send only the ids, order and compress the list
3771  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
3772  TLcomp1.resize( uniq.size() );
3773  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3774  {
3775  // to proc, marker, element local index, index in el
3776  int marker = *sit;
3777  int to_proc = marker % numProcs;
3778  int n = TLcomp1.get_n();
3779  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
3780  TLcomp1.vi_wr[2 * n + 1] = marker;
3781  TLcomp1.inc_n();
3782  }
3783  }
3784 
3785  ProcConfig pc( joint_communicator ); // proc config does the crystal router
3786  pc.crystal_router()->gs_transfer( 1, TLcomp1,
3787  0 ); // communication towards joint tasks, with markers
3788  // sort by value (key 1)
3789 #ifdef VERBOSE
3790  std::stringstream ff1;
3791  ff1 << "TLcomp1_" << localRank << ".txt";
3792  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
3793 #endif
3794  moab::TupleList::buffer sort_buffer;
3795  sort_buffer.buffer_init( TLcomp1.get_n() );
3796  TLcomp1.sort( 1, &sort_buffer );
3797  sort_buffer.reset();
3798 #ifdef VERBOSE
3799  // after sorting
3800  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
3801 #endif
3802  // do the same, for the other component, number2, with type2
3803  // start copy
3804  TLcomp2.enableWriteAccess();
3805 
3806  moab::TempestOnlineMap* weightMap = NULL; // declare it outside, but it will make sense only for *pid >= 0
3807  // we know that :) (or *pid2 >= 0, it means we are on the coupler PEs, read map exists, and coupler procs exist)
3808  // populate second tuple with ids from read map: we need row_gdofmap and col_gdofmap
3809  std::vector< int > valuesComp2;
3810  if( *pid2 >= 0 ) // we are now on coupler, map side
3811  {
3812  appData& data2 = context.appDatas[*pid2];
3813  TempestMapAppData& tdata = data2.tempestData;
3814  // should be only one map, read from file
3815  assert( tdata.weightMaps.size() == 1 );
3816  // maybe we need to check it is the map we expect
3817  weightMap = tdata.weightMaps.begin()->second;
3818  // std::vector<int> ids_of_interest;
3819  // do a deep copy of the ids of interest: row ids or col ids, target or source direction
3820  if( *direction == 1 )
3821  {
3822  // we are interested in col ids, source
3823  // new method from moab::TempestOnlineMap
3824  rval = weightMap->fill_col_ids( valuesComp2 );MB_CHK_ERR( rval );
3825  }
3826  else if( *direction == 2 )
3827  {
3828  // we are interested in row ids, for target ids
3829  rval = weightMap->fill_row_ids( valuesComp2 );MB_CHK_ERR( rval );
3830  }
3831  //
3832 
3833  // now fill the tuple list with info and markers
3834  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3835  TLcomp2.resize( uniq.size() );
3836  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3837  {
3838  // to proc, marker, element local index, index in el
3839  int marker = *sit;
3840  int to_proc = marker % numProcs;
3841  int n = TLcomp2.get_n();
3842  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
3843  TLcomp2.vi_wr[2 * n + 1] = marker;
3844  TLcomp2.inc_n();
3845  }
3846  }
3847  pc.crystal_router()->gs_transfer( 1, TLcomp2,
3848  0 ); // communication towards joint tasks, with markers
3849  // sort by value (key 1)
3850  // in the rendez-vous approach, markers meet at meet point (rendez-vous) processor marker % nprocs
3851 #ifdef VERBOSE
3852  std::stringstream ff2;
3853  ff2 << "TLcomp2_" << localRank << ".txt";
3854  TLcomp2.print_to_file( ff2.str().c_str() );
3855 #endif
3856  sort_buffer.buffer_reserve( TLcomp2.get_n() );
3857  TLcomp2.sort( 1, &sort_buffer );
3858  sort_buffer.reset();
3859  // end copy
3860 #ifdef VERBOSE
3861  TLcomp2.print_to_file( ff2.str().c_str() );
3862 #endif
3863  // need to send back the info, from the rendezvous point, for each of the values
3864  /* so go over each value, on local process in joint communicator,
3865 
3866  now have to send back the info needed for communication;
3867  loop in in sync over both TLComp1 and TLComp2, in local process;
3868  So, build new tuple lists, to send synchronous communication
3869  populate them at the same time, based on marker, that is indexed
3870  */
3871 
3872  TupleList TLBackToComp1;
3873  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
3874  TLBackToComp1.enableWriteAccess();
3875 
3876  TupleList TLBackToComp2;
3877  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
3878  TLBackToComp2.enableWriteAccess();
3879 
3880  int n1 = TLcomp1.get_n();
3881  int n2 = TLcomp2.get_n();
3882 
3883  int indexInTLComp1 = 0;
3884  int indexInTLComp2 = 0; // advance both, according to the marker
3885  if( n1 > 0 && n2 > 0 )
3886  {
3887 
3888  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
3889  {
3890  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3891  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3892  if( currentValue1 < currentValue2 )
3893  {
3894  // we have a big problem; basically, we are saying that
3895  // dof currentValue is on one model and not on the other
3896  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
3897  indexInTLComp1++;
3898  continue;
3899  }
3900  if( currentValue1 > currentValue2 )
3901  {
3902  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
3903  indexInTLComp2++;
3904  continue;
3905  }
3906  int size1 = 1;
3907  int size2 = 1;
3908  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3909  size1++;
3910  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3911  size2++;
3912  // must be found in both lists, find the start and end indices
3913  for( int i1 = 0; i1 < size1; i1++ )
3914  {
3915  for( int i2 = 0; i2 < size2; i2++ )
3916  {
3917  // send the info back to components
3918  int n = TLBackToComp1.get_n();
3919  TLBackToComp1.reserve();
3920  TLBackToComp1.vi_wr[3 * n] =
3921  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
3922  // came from, info from comp2
3923  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3924  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
3925  n = TLBackToComp2.get_n();
3926  TLBackToComp2.reserve();
3927  TLBackToComp2.vi_wr[3 * n] =
3928  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
3929  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3930  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
3931  // what if there are repeated markers in TLcomp2? increase just index2
3932  }
3933  }
3934  indexInTLComp1 += size1;
3935  indexInTLComp2 += size2;
3936  }
3937  }
3938  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
3939  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3940 
3941  TupleList TLv; // vertices
3942  TupleList TLc; // cells if needed (not type 2)
3943 
3944  if( *pid1 >= 0 )
3945  {
3946  // we are on original comp 1 tasks
3947  // before ordering
3948  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3949  // processors it communicates with
3950 #ifdef VERBOSE
3951  std::stringstream f1;
3952  f1 << "TLBack1_" << localRank << ".txt";
3953  TLBackToComp1.print_to_file( f1.str().c_str() );
3954 #endif
3955  // so we are now on pid1, we know now each marker were it has to go
3956  // add a new method to ParCommGraph, to set up the split_ranges and involved_IDs_map
3957  rval = cgraph->set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );MB_CHK_ERR( rval );
3958  // we can just send vertices and elements, with crystal routers;
3959  // on the receiving end, make sure they are not duplicated, by looking at the global id
3960  // if *type is 1, also send global_dofs tag in the element tuple
3961  rval = cgraph->form_tuples_to_migrate_mesh( context.MBI, TLv, TLc, *type, lenTagType1 );MB_CHK_ERR( rval );
3962  }
3963  else
3964  {
3965  TLv.initialize( 2, 0, 0, 3, 0 ); // no vertices here, for sure
3966  TLv.enableWriteAccess(); // to be able to receive stuff, even if nothing is here yet, on this task
3967  if( *type != 2 ) // for point cloud, we do not need to initialize TLc (for cells)
3968  {
3969  // we still need to initialize the tuples with the right size, as in form_tuples_to_migrate_mesh
3970  int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 +
3971  10; // 10 is the max number of vertices in cell; kind of arbitrary
3972  TLc.initialize( size_tuple, 0, 0, 0, 0 );
3973  TLc.enableWriteAccess();
3974  }
3975  }
3976  pc.crystal_router()->gs_transfer( 1, TLv, 0 ); // communication towards coupler tasks, with mesh vertices
3977  if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 ); // those are cells
3978 
3979  if( *pid3 >= 0 ) // will receive the mesh, on coupler pes!
3980  {
3981  appData& data3 = context.appDatas[*pid3];
3982  EntityHandle fset3 = data3.file_set;
3983  Range primary_ents3; // vertices for type 2, cells of dim 2 for type 1 or 3
3984  std::vector< int > values_entities; // will be the size of primary_ents3 * lenTagType1
3985  rval = cgraph_rev->form_mesh_from_tuples( context.MBI, TLv, TLc, *type, lenTagType1, fset3, primary_ents3,
3986  values_entities );MB_CHK_ERR( rval );
3987  iMOAB_UpdateMeshInfo( pid3 );
3988  int ndofPerEl = 1;
3989  if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
3990  // because we are on the coupler, we know that the read map pid2 exists
3991  assert( *pid2 >= 0 );
3992  appData& dataIntx = context.appDatas[*pid2];
3993  TempestMapAppData& tdata = dataIntx.tempestData;
3994 
3995  // if we are on source coverage, direction 1, we can set covering mesh, covering cells
3996  if( 1 == *direction )
3997  {
3998  tdata.pid_src = pid3;
3999  tdata.remapper->SetMeshSet( Remapper::CoveringMesh, fset3, &primary_ents3 );
4000  weightMap->SetSourceNDofsPerElement( ndofPerEl );
4001  weightMap->set_col_dc_dofs( values_entities ); // will set col_dtoc_dofmap
4002  }
4003  // if we are on target, we can set the target cells
4004  else
4005  {
4006  tdata.pid_dest = pid3;
4007  tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset3, &primary_ents3 );
4008  weightMap->SetDestinationNDofsPerElement( ndofPerEl );
4009  weightMap->set_row_dc_dofs( values_entities ); // will set row_dtoc_dofmap
4010  }
4011  }
4012 
4013  return moab::MB_SUCCESS;
4014 }
4015 
4016 #endif // #ifdef MOAB_HAVE_MPI
4017 
4018 #endif // #ifdef MOAB_HAVE_NETCDF
4019 
4020 #define USE_API
4021 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
4022 {
4023  ErrorCode rval;
4024  moab::CartVect pos;
4025 
4026  Range& verts = context.appDatas[*pid].all_verts;
4027  moab::EntityHandle firstVertex = ( verts[0] );
4028 
4029  // coordinate data
4030  rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );MB_CHK_ERR( rval );
4031 
4032  // compute the distance from origin
4033  // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
4034  *radius = pos.length();
4035  return moab::MB_SUCCESS;
4036 }
4037 
4038 ErrCode iMOAB_SetGhostLayers( iMOAB_AppID pid, int* nghost_layers )
4039 {
4040  appData& data = context.appDatas[*pid];
4041 
4042  // Set the number of ghost layers
4043  data.num_ghost_layers = *nghost_layers; // number of ghost layers
4044 
4045  return moab::MB_SUCCESS;
4046 }
4047 
4048 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4049 {
4050  // Default constant parameters
4051  constexpr bool validate = true;
4052  constexpr bool enforceConvexity = true;
4053  constexpr bool use_kdtree_search = true;
4054  constexpr bool gnomonic = true;
4055  constexpr double defaultradius = 1.0;
4056  constexpr double boxeps = 1.e-10;
4057 
4058  // Other constant parameters
4059  const double epsrel = ReferenceTolerance; // ReferenceTolerance is defined in Defines.h in tempestremap source ;
4060  double radius_source = 1.0;
4061  double radius_target = 1.0;
4062 
4063  // Error code definitions
4064  ErrorCode rval;
4065  ErrCode ierr;
4066 
4067  // Get the source and target data and pcomm objects
4068  appData& data_src = context.appDatas[*pid_src];
4069  appData& data_tgt = context.appDatas[*pid_tgt];
4070  appData& data_intx = context.appDatas[*pid_intx];
4071 #ifdef MOAB_HAVE_MPI
4072  ParallelComm* pco_intx = context.pcomms[*pid_intx];
4073 #endif
4074 
4075  // Mesh intersection has already been computed; Return early.
4076  TempestMapAppData& tdata = data_intx.tempestData;
4077  if( tdata.remapper != NULL ) return moab::MB_SUCCESS; // nothing to do
4078 
4079  bool is_parallel = false, is_root = true;
4080  int rank = 0;
4081 #ifdef MOAB_HAVE_MPI
4082  if( pco_intx )
4083  {
4084  rank = pco_intx->rank();
4085  is_parallel = ( pco_intx->size() > 1 );
4086  is_root = ( rank == 0 );
4087  rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
4088  }
4089 #endif
4090 
4091  moab::DebugOutput outputFormatter( std::cout, rank, 0 );
4092  outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
4093 
4094  ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
4095  ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
4096 
4097  // Rescale the radius of both to compute the intersection
4098  rval = ComputeSphereRadius( pid_src, &radius_source );MB_CHK_ERR( rval );
4099  rval = ComputeSphereRadius( pid_tgt, &radius_target );MB_CHK_ERR( rval );
4100 #ifdef VERBOSE
4101  if( is_root )
4102  outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
4103  radius_target );
4104 #endif
4105 
4106  /* Let make sure that the radius match for source and target meshes. If not, rescale now and
4107  * unscale later. */
4108  if( fabs( radius_source - radius_target ) > 1e-10 )
4109  { /* the radii are different */
4110  rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval );
4111  rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval );
4112  }
4113 
4114  // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
4115 #ifdef MOAB_HAVE_TEMPESTREMAP
4117 #else
4118  IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
4119 #endif
4120 
4121  // print verbosely about the problem setting
4122  rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );MB_CHK_ERR( rval );
4123  rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );MB_CHK_ERR( rval );
4124 #ifdef VERBOSE
4125  {
4126  moab::Range rintxverts, rintxelems;
4127  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
4128  rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );MB_CHK_ERR( rval );
4129  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
4130  if( enforceConvexity )
4131  {
4132  rval = moab::IntxUtils::enforce_convexity( context.MBI, data_src.file_set, rank );MB_CHK_ERR( rval );
4133  }
4134  rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval );
4135 
4136  moab::Range bintxverts, bintxelems;
4137  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
4138  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );MB_CHK_ERR( rval );
4139  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
4140  if( enforceConvexity )
4141  {
4142  rval = moab::IntxUtils::enforce_convexity( context.MBI, data_tgt.file_set, rank );MB_CHK_ERR( rval );
4143  }
4144  rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval );
4145 
4146  if( is_root )
4147  {
4148  outputFormatter.printf( 0, "The source set contains %d vertices and %d elements \n", rintxverts.size(),
4149  rintxelems.size() );
4150  outputFormatter.printf( 0, "The target set contains %d vertices and %d elements \n", bintxverts.size(),
4151  bintxelems.size() );
4152  }
4153  // use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] );
4154  // advancing front method will fail unless source mesh has no holes (area = 4 * pi) on unit sphere
4155  // use_kdtree_search = true;
4156  }
4157 #endif
4158 
4159  data_intx.dimension = data_tgt.dimension;
4160  // set the context for the source and destination applications
4161  tdata.pid_src = pid_src;
4162  tdata.pid_dest = pid_tgt;
4163 
4164  // Now allocate and initialize the remapper object
4165 #ifdef MOAB_HAVE_MPI
4166  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
4167 #else
4168  tdata.remapper = new moab::TempestRemapper( context.MBI );
4169 #endif
4170  tdata.remapper->meshValidate = validate;
4171  tdata.remapper->constructEdgeMap = true;
4172 
4173  // Do not create new filesets; Use the sets from our respective applications
4174  tdata.remapper->initialize( false );
4175  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
4176  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
4177  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
4178 
4179 #ifdef MOAB_HAVE_MPI
4180  if( is_parallel && data_src.num_ghost_layers )
4181  {
4182  if( is_root )
4183  outputFormatter.printf( 0, "Generating %d ghost layers for the source mesh\n", data_src.num_ghost_layers );
4184  moab::EntityHandle augmentedSourceSet;
4185  // get order -1 ghost layers; actually it should be decided by the mesh
4186  // if the mesh has holes, it could be more
4187  rval = tdata.remapper->GhostLayers( data_src.file_set, data_src.num_ghost_layers, augmentedSourceSet );MB_CHK_ERR( rval );
4188  tdata.remapper->SetMeshSet( moab::Remapper::SourceMeshWithGhosts, augmentedSourceSet );
4189  }
4190  else
4191  {
4192  // this one needs to be initialized too with source set
4193  tdata.remapper->GetMeshSet( moab::Remapper::SourceMeshWithGhosts ) = data_src.file_set;
4194  }
4195 #else
4196  // this one needs to be initialized too with source set
4197  tdata.remapper->GetMeshSet( moab::Remapper::SourceMeshWithGhosts ) = data_src.file_set;
4198 #endif
4199 
4200 #ifdef MOAB_HAVE_MPI
4201  if( is_parallel && data_tgt.num_ghost_layers )
4202  {
4203  if( is_root )
4204  outputFormatter.printf( 0, "Generating %d ghost layers for the target mesh\n", data_src.num_ghost_layers );
4205  moab::EntityHandle augmentedTargetSet;
4206  // get order -1 ghost layers; actually it should be decided by the mesh
4207  // if the mesh has holes, it could be more
4208  rval = tdata.remapper->GhostLayers( data_tgt.file_set, data_tgt.num_ghost_layers, augmentedTargetSet );MB_CHK_ERR( rval );
4209  tdata.remapper->SetMeshSet( moab::Remapper::TargetMeshWithGhosts, augmentedTargetSet );
4210  }
4211  else
4212  {
4213  // this one needs to be initialized too with source set
4214  tdata.remapper->GetMeshSet( moab::Remapper::TargetMeshWithGhosts ) = data_tgt.file_set;
4215  }
4216 #else
4217  // this one needs to be initialized too with source set
4218  tdata.remapper->GetMeshSet( moab::Remapper::SourceMeshWithGhosts ) = data_tgt.file_set;
4219 #endif
4220 
4221  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
4222  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
4223 
4224  // First, compute the covering source set.
4225  rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false, gnomonic, data_src.num_ghost_layers );MB_CHK_ERR( rval );
4226 
4227  // Next, compute intersections with MOAB.
4228  // for bilinear, this is an overkill
4229  rval = tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false, data_src.num_ghost_layers );MB_CHK_ERR( rval );
4230 
4231  // Mapping computation done
4232  if( validate )
4233  {
4234  double local_areas[3], global_areas[3];
4235  local_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );
4236  local_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );
4237  local_areas[2] = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, radius_source );
4238 #ifdef MOAB_HAVE_MPI
4239  MPI_Reduce( &local_areas[0], &global_areas[0], 3, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() );
4240 #else
4241  global_areas[0] = local_areas[0];
4242  global_areas[1] = local_areas[1];
4243  global_areas[2] = local_areas[2];
4244 #endif
4245 
4246  if( is_root )
4247  {
4248  outputFormatter.printf( 0,
4249  "initial area: source mesh = %12.14f, target mesh = %12.14f, "
4250  "overlap mesh = %12.14f\n",
4251  global_areas[0], global_areas[1], global_areas[2] );
4252  outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
4253  fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
4254  fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
4255  }
4256  }
4257 
4258  return moab::MB_SUCCESS;
4259 }
4260 
4261 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4262 {
4263  ErrorCode rval;
4264  ErrCode ierr;
4265 
4266  double radius_source = 1.0;
4267  double radius_target = 1.0;
4268  const double epsrel = ReferenceTolerance; // ReferenceTolerance is defined in Defines.h in tempestremap source ;
4269  const double boxeps = 1.e-8;
4270 
4271  // Get the source and target data and pcomm objects
4272  appData& data_src = context.appDatas[*pid_src];
4273  appData& data_tgt = context.appDatas[*pid_tgt];
4274  appData& data_intx = context.appDatas[*pid_intx];
4275 #ifdef MOAB_HAVE_MPI
4276  ParallelComm* pco_intx = context.pcomms[*pid_intx];
4277 #endif
4278 
4279  // Mesh intersection has already been computed; Return early.
4280  TempestMapAppData& tdata = data_intx.tempestData;
4281  if( tdata.remapper != NULL ) return moab::MB_SUCCESS; // nothing to do
4282 
4283 #ifdef MOAB_HAVE_MPI
4284  if( pco_intx )
4285  {
4286  rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
4287  }
4288 #endif
4289 
4290  ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
4291  ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
4292 
4293  // Rescale the radius of both to compute the intersection
4294  ComputeSphereRadius( pid_src, &radius_source );
4295  ComputeSphereRadius( pid_tgt, &radius_target );
4296 
4297  IntxAreaUtils areaAdaptor;
4298  // print verbosely about the problem setting
4299  {
4300  moab::Range rintxverts, rintxelems;
4301  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
4302  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );MB_CHK_ERR( rval );
4303  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
4304  rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );MB_CHK_ERR( rval );
4305 #ifdef VERBOSE
4306  std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
4307  << " elements \n";
4308 #endif
4309 
4310  moab::Range bintxverts, bintxelems;
4311  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
4312  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );MB_CHK_ERR( rval );
4313  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
4314  rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );MB_CHK_ERR( rval );
4315 #ifdef VERBOSE
4316  std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
4317  << " elements \n";
4318 #endif
4319  }
4320 
4321  data_intx.dimension = data_tgt.dimension;
4322  // set the context for the source and destination applications
4323  // set the context for the source and destination applications
4324  tdata.pid_src = pid_src;
4325  tdata.pid_dest = pid_tgt;
4326  data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
4327  assert( data_intx.point_cloud == true );
4328 
4329  // Now allocate and initialize the remapper object
4330 #ifdef MOAB_HAVE_MPI
4331  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
4332 #else
4333  tdata.remapper = new moab::TempestRemapper( context.MBI );
4334 #endif
4335  tdata.remapper->meshValidate = true;
4336  tdata.remapper->constructEdgeMap = true;
4337 
4338  // Do not create new filesets; Use the sets from our respective applications
4339  tdata.remapper->initialize( false );
4340  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
4341  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
4342  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
4343 
4344  // needed in parallel
4345  tdata.remapper->GetMeshSet( moab::Remapper::SourceMeshWithGhosts ) = data_src.file_set;
4346 
4347  /* Let make sure that the radius match for source and target meshes. If not, rescale now and
4348  * unscale later. */
4349  if( fabs( radius_source - radius_target ) > 1e-10 )
4350  { /* the radii are different */
4351  rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );MB_CHK_ERR( rval );
4352  rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );MB_CHK_ERR( rval );
4353  }
4354 
4355  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
4356  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
4357 
4358  // First, compute the covering source set.
4359  rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
4360 
4361 #ifdef MOAB_HAVE_MPI
4362  /* VSM: This context should be set on the data_src but it would overwrite the source
4363  covering set context in case it is coupled to another APP as well.
4364  This needs a redesign. */
4365  // data_intx.covering_set = tdata.remapper->GetCoveringSet();
4366  // data_src.covering_set = tdata.remapper->GetCoveringSet();
4367 #endif
4368 
4369  // Now let us re-convert the MOAB mesh back to Tempest representation
4370  rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval );
4371 
4372  return moab::MB_SUCCESS;
4373 }
4374 
4375 ErrCode iMOAB_ComputeScalarProjectionWeights(
4376  iMOAB_AppID pid_intx,
4377  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
4378  const iMOAB_String disc_method_source,
4379  int* disc_order_source,
4380  const iMOAB_String disc_method_target,
4381  int* disc_order_target,
4382  const iMOAB_String fv_method,
4383  int* fNoBubble,
4384  int* fMonotoneTypeID,
4385  int* fVolumetric,
4386  int* fInverseDistanceMap,
4387  int* fNoConservation,
4388  int* fValidate,
4389  const iMOAB_String source_solution_tag_dof_name,
4390  const iMOAB_String target_solution_tag_dof_name )
4391 {
4392  moab::ErrorCode rval;
4393 
4394  assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
4395  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4396  assert( disc_method_source && strlen( disc_method_source ) );
4397  assert( disc_method_target && strlen( disc_method_target ) );
4398  assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
4399  assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
4400 
4401  // Get the source and target data and pcomm objects
4402  appData& data_intx = context.appDatas[*pid_intx];
4403  TempestMapAppData& tdata = data_intx.tempestData;
4404 
4405  // Setup computation of weights
4406  // Set the context for the remapping weights computation
4407  tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
4408 
4409  // Call to generate the remap weights with the tempest meshes
4410  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
4411  assert( weightMap != NULL );
4412 
4413  GenerateOfflineMapAlgorithmOptions mapOptions;
4414  mapOptions.nPin = *disc_order_source;
4415  mapOptions.nPout = *disc_order_target;
4416  mapOptions.fSourceConcave = false;
4417  mapOptions.fTargetConcave = false;
4418 
4419  mapOptions.strMethod = "";
4420  if( fv_method ) mapOptions.strMethod += std::string( fv_method ) + ";";
4421  if( fMonotoneTypeID )
4422  {
4423  switch( *fMonotoneTypeID )
4424  {
4425  case 0:
4426  mapOptions.fMonotone = false;
4427  break;
4428  case 3:
4429  mapOptions.strMethod += "mono3;";
4430  break;
4431  case 2:
4432  mapOptions.strMethod += "mono2;";
4433  break;
4434  default:
4435  mapOptions.fMonotone = true;
4436  }
4437  }
4438  else
4439  mapOptions.fMonotone = false;
4440 
4441  mapOptions.fNoBubble = ( fNoBubble ? *fNoBubble : false );
4442  mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
4443  mapOptions.fNoCorrectAreas = false;
4444  // mapOptions.fNoCheck = !( fValidate ? *fValidate : true );
4445  mapOptions.fNoCheck = true;
4446  if( fVolumetric && *fVolumetric ) mapOptions.strMethod += "volumetric;";
4447  if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod += "invdist;";
4448 
4449  std::string metadataStr = mapOptions.strMethod + ";" + std::string( disc_method_source ) + ":" +
4450  std::to_string( *disc_order_source ) + ":" + std::string( source_solution_tag_dof_name ) +
4451  ";" + std::string( disc_method_target ) + ":" + std::to_string( *disc_order_target ) +
4452  ":" + std::string( target_solution_tag_dof_name );
4453 
4454  data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
4455 
4456  // Now let us compute the local-global mapping and store it in the context
4457  // We need this mapping when computing matvec products and to do reductions in parallel
4458  // Additionally, the call below will also compute weights with TempestRemap
4459  rval = weightMap->GenerateRemappingWeights(
4460  std::string( disc_method_source ), // const std::string& strInputType
4461  std::string( disc_method_target ), // const std::string& strOutputType
4462  mapOptions, // GenerateOfflineMapAlgorithmOptions& mapOptions
4463  std::string( source_solution_tag_dof_name ), // const std::string& srcDofTagName = "GLOBAL_ID"
4464  std::string( target_solution_tag_dof_name ) // const std::string& tgtDofTagName = "GLOBAL_ID"
4465  );MB_CHK_ERR( rval );
4466 
4467  // print some map statistics
4468  weightMap->PrintMapStatistics();
4469 
4470  if( fValidate && *fValidate )
4471  {
4472  const double dNormalTolerance = 1.0E-8;
4473  const double dStrictTolerance = 1.0E-12;
4474  weightMap->CheckMap( true, true, ( fMonotoneTypeID && *fMonotoneTypeID ), dNormalTolerance, dStrictTolerance );
4475  }
4476 
4477  return moab::MB_SUCCESS;
4478 }
4479 
4480 ErrCode iMOAB_ApplyScalarProjectionWeights(
4481  iMOAB_AppID pid_intersection,
4482  int* filter_type,
4483  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
4484  const iMOAB_String source_solution_tag_name,
4485  const iMOAB_String target_solution_tag_name )
4486 {
4487  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4488  assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
4489  assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
4490 
4491  moab::ErrorCode rval;
4492 
4493  // Get the source and target data and pcomm objects
4494  appData& data_intx = context.appDatas[*pid_intersection];
4495  TempestMapAppData& tdata = data_intx.tempestData;
4496 
4497  // Now allocate and initialize the remapper object
4498  moab::TempestRemapper* remapper = tdata.remapper;
4499  if( !tdata.weightMaps.count( std::string( solution_weights_identifier ) ) ) // key does not exist
4501  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
4502 
4503  // we assume that there are separators ";" between the tag names
4504  std::vector< std::string > srcNames;
4505  std::vector< std::string > tgtNames;
4506  std::vector< Tag > srcTagHandles;
4507  std::vector< Tag > tgtTagHandles;
4508  std::string separator( ":" );
4509  std::string src_name( source_solution_tag_name );
4510  std::string tgt_name( target_solution_tag_name );
4511  split_tag_names( src_name, separator, srcNames );
4512  split_tag_names( tgt_name, separator, tgtNames );
4513  if( srcNames.size() != tgtNames.size() )
4514  {
4515  std::cout << " error in parsing source and target tag names. \n";
4516  return moab::MB_FAILURE;
4517  }
4518 
4519  for( size_t i = 0; i < srcNames.size(); i++ )
4520  {
4521  Tag tagHandle;
4522  rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
4523  if( MB_SUCCESS != rval || NULL == tagHandle )
4524  {
4525  return moab::MB_FAILURE;
4526  }
4527  srcTagHandles.push_back( tagHandle );
4528  rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
4529  if( MB_SUCCESS != rval || NULL == tagHandle )
4530  {
4531  return moab::MB_FAILURE;
4532  }
4533  tgtTagHandles.push_back( tagHandle );
4534  }
4535 
4536  std::vector< double > solSTagVals;
4537  std::vector< double > solTTagVals;
4538 
4539  moab::Range sents, tents;
4540  if( data_intx.point_cloud )
4541  {
4542  appData& data_src = context.appDatas[*tdata.pid_src];
4543  appData& data_tgt = context.appDatas[*tdata.pid_dest];
4544  if( data_src.point_cloud )
4545  {
4546  moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
4547  solSTagVals.resize( covSrcEnts.size(), 0. );
4548  sents = covSrcEnts;
4549  }
4550  else
4551  {
4552  moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
4553  solSTagVals.resize(
4554  covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), 0. );
4555  sents = covSrcEnts;
4556  }
4557  if( data_tgt.point_cloud )
4558  {
4559  moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
4560  solTTagVals.resize( tgtEnts.size(), 0. );
4561  tents = tgtEnts;
4562  }
4563  else
4564  {
4565  moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
4566  solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
4567  weightMap->GetDestinationNDofsPerElement(),
4568  0. );
4569  tents = tgtEnts;
4570  }
4571  }
4572  else
4573  {
4574  moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
4575  moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
4576  solSTagVals.resize(
4577  covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
4578  solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
4579  weightMap->GetDestinationNDofsPerElement(),
4580  0. );
4581 
4582  sents = covSrcEnts;
4583  tents = tgtEnts;
4584  }
4585 
4587  if( filter_type )
4588  {
4589  switch( *filter_type )
4590  {
4591  case 1:
4593  break;
4594  case 2:
4596  break;
4597  case 3:
4599  break;
4600  default:
4602  }
4603  }
4604 
4605  for( size_t i = 0; i < srcTagHandles.size(); i++ )
4606  {
4607  // Compute the application of weights on the suorce solution data and store it in the
4608  // destination solution vector data Optionally, can also perform the transpose application
4609  // of the weight matrix. Set the 3rd argument to true if this is needed
4610  rval = weightMap->ApplyWeights( srcTagHandles[i], tgtTagHandles[i], false, caasType );MB_CHK_ERR( rval );
4611  }
4612 
4613 // #define VERBOSE
4614 #ifdef VERBOSE
4615  ParallelComm* pco_intx = context.pcomms[*pid_intersection];
4616 
4617  int ivar = 0;
4618  {
4619  Tag ssolnTag = srcTagHandles[ivar];
4620  std::stringstream sstr;
4621  sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
4622  std::ofstream output_file( sstr.str().c_str() );
4623  for( unsigned i = 0; i < sents.size(); ++i )
4624  {
4625  EntityHandle elem = sents[i];
4626  std::vector< double > locsolSTagVals( 16 );
4627  rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );MB_CHK_ERR( rval );
4628  output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
4629  for( unsigned j = 0; j < 16; ++j )
4630  output_file << locsolSTagVals[j] << " ";
4631  }
4632  output_file.flush(); // required here
4633  output_file.close();
4634  }
4635  {
4636  std::stringstream sstr;
4637  sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
4638  EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set,
4639  context.appDatas[*tdata.pid_dest].file_set };
4640  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
4641  }
4642  {
4643  std::stringstream sstr;
4644  sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
4645  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
4646  EntityHandle covering_set = remapper->GetCoveringSet();
4647  EntityHandle sets[2] = { covering_set, context.appDatas[*tdata.pid_dest].file_set };
4648  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
4649  }
4650  {
4651  std::stringstream sstr;
4652  sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
4653  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
4654  EntityHandle covering_set = remapper->GetCoveringSet();
4655  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", &covering_set, 1 );MB_CHK_ERR( rval );
4656  }
4657  {
4658  std::stringstream sstr;
4659  sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
4660  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
4661  EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
4662  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", &target_set, 1 );MB_CHK_ERR( rval );
4663  }
4664  {
4665  std::stringstream sstr;
4666  sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
4667  std::ofstream output_file( sstr.str().c_str() );
4668  for( unsigned i = 0; i < solSTagVals.size(); ++i )
4669  output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
4670  << solSTagVals[i] << "\n";
4671  output_file.flush(); // required here
4672  output_file.close();
4673  }
4674 #endif
4675  // #undef VERBOSE
4676 
4677  return moab::MB_SUCCESS;
4678 }
4679 
4680 #endif // MOAB_HAVE_TEMPESTREMAP
4681 
4682 #ifdef __cplusplus
4683 }
4684 #endif