Mesh Oriented datABase  (version 5.5.0)
An array-based unstructured mesh library
iMOAB.cpp
Go to the documentation of this file.
1 /** \file iMOAB.cpp
2  */
3 
4 #include "moab/MOABConfig.h"
5 #include "moab/Core.hpp"
6 
7 #ifdef MOAB_HAVE_MPI
8 #include "moab_mpi.h"
9 #include "moab/ParallelComm.hpp"
10 #include "moab/ParCommGraph.hpp"
13 #endif
14 #include "DebugOutput.hpp"
15 #include "moab/iMOAB.h"
16 
17 /* this is needed because of direct access to hdf5/mhdf */
18 #ifdef MOAB_HAVE_HDF5
19 #include "mhdf.h"
20 #include <H5Tpublic.h>
21 #endif
22 
23 #include "moab/CartVect.hpp"
24 #include "MBTagConventions.hpp"
25 #include "moab/MeshTopoUtil.hpp"
26 #include "moab/ReadUtilIface.hpp"
27 #include "moab/MergeMesh.hpp"
28 
29 #ifdef MOAB_HAVE_TEMPESTREMAP
30 #include "STLStringHelper.h"
32 
35 #endif
36 
37 // C++ includes
38 #include <cassert>
39 #include <sstream>
40 #include <iostream>
41 
42 using namespace moab;
43 
44 // #define VERBOSE
45 
46 // global variables ; should they be organized in a structure, for easier references?
47 // or how do we keep them global?
48 
49 #ifdef __cplusplus
50 extern "C" {
51 #endif
52 
53 #ifdef MOAB_HAVE_TEMPESTREMAP
54 struct TempestMapAppData
55 {
56  moab::TempestRemapper* remapper;
57  std::map< std::string, moab::TempestOnlineMap* > weightMaps;
58  iMOAB_AppID pid_src;
59  iMOAB_AppID pid_dest;
60 };
61 #endif
62 
63 struct appData
64 {
66  int global_id; // external component id, unique for application
67  std::string name;
69  // local vertices would be all_verts if no ghosting was required
70  Range local_verts; // it could include shared, but not owned at the interface
71  Range owned_verts; // owned_verts <= local_verts <= all_verts
72  Range ghost_vertices; // locally ghosted from other processors
76  int dimension; // 2 or 3, dimension of primary elements (redundant?)
77  long num_global_elements; // reunion of all elements in primary_elements; either from hdf5
78  // reading or from reduce
79  long num_global_vertices; // reunion of all nodes, after sharing is resolved; it could be
80  // determined from hdf5 reading
82  std::map< int, int > matIndex; // map from global block id to index in mat_sets
85  std::map< std::string, Tag > tagMap;
86  std::vector< Tag > tagList;
88  bool is_fortran;
89 
90 #ifdef MOAB_HAVE_MPI
91  // constructor for this ParCommGraph takes the joint comm and the MPI groups for each
92  // application
93  std::map< int, ParCommGraph* > pgraph; // map from context () to the parcommgraph*
94 #endif
95 
96 #ifdef MOAB_HAVE_TEMPESTREMAP
97  TempestMapAppData tempestData;
98  std::map< std::string, std::string > metadataMap;
99 #endif
100 };
101 
103 {
104  // are there reasons to have multiple moab inits? Is ref count needed?
106  // we should also have the default tags stored, initialized
107  Tag material_tag, neumann_tag, dirichlet_tag,
108  globalID_tag; // material, neumann, dirichlet, globalID
110  int iArgc;
113 
114  std::map< std::string, int > appIdMap; // from app string (uppercase) to app id
115  std::map< int, int > appIdCompMap; // from component id to app id
116 
117 #ifdef MOAB_HAVE_MPI
118  std::vector< ParallelComm* > pcomms; // created in order of applications, one moab::ParallelComm for each
119 #endif
120 
121  std::vector< appData > appDatas; // the same order as pcomms
122  int globalrank, worldprocs;
124 
126  {
127  MBI = 0;
128  refCountMB = 0;
129  unused_pid = 0;
130  }
131 };
132 
133 static struct GlobalContext context;
134 
136 {
137  if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
138 
139  context.iArgc = argc;
140  context.iArgv = argv; // shallow copy
141 
142  if( 0 == context.refCountMB )
143  {
144  context.MBI = new( std::nothrow ) moab::Core;
145  // retrieve the default tags
146  const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
148  // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
149  Tag gtags[4];
150 
151  for( int i = 0; i < 4; i++ )
152  {
153 
154  ErrorCode rval =
155  context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY );MB_CHK_ERR( rval );
156  }
157 
158  context.material_tag = gtags[0];
159  context.neumann_tag = gtags[1];
160  context.dirichlet_tag = gtags[2];
161  context.globalID_tag = gtags[3];
162  }
163 
164  context.MPI_initialized = false;
165 #ifdef MOAB_HAVE_MPI
166  int flagInit;
167  MPI_Initialized( &flagInit );
168 
169  if( flagInit && !context.MPI_initialized )
170  {
171  MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
172  MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
173  context.MPI_initialized = true;
174  }
175 #endif
176 
178  return moab::MB_SUCCESS;
179 }
180 
182 {
183  return iMOAB_Initialize( 0, 0 );
184 }
185 
187 {
189 
190  if( 0 == context.refCountMB )
191  {
192  delete context.MBI;
193  }
194 
195  return MB_SUCCESS;
196 }
197 
199 #ifdef MOAB_HAVE_MPI
200  MPI_Comm* comm,
201 #endif
202  int* compid,
203  iMOAB_AppID pid )
204 {
205  IMOAB_CHECKPOINTER( app_name, 1 );
206 #ifdef MOAB_HAVE_MPI
207  IMOAB_CHECKPOINTER( comm, 2 );
208  IMOAB_CHECKPOINTER( compid, 3 );
209 #else
210  IMOAB_CHECKPOINTER( compid, 2 );
211 #endif
212 
213  // will create a parallel comm for this application too, so there will be a
214  // mapping from *pid to file set and to parallel comm instances
215  std::string name( app_name );
216 
217  if( context.appIdMap.find( name ) != context.appIdMap.end() )
218  {
219  std::cout << " application " << name << " already registered \n";
220  return moab::MB_FAILURE;
221  }
222 
223  *pid = context.unused_pid++;
224  context.appIdMap[name] = *pid;
225  int rankHere = 0;
226 #ifdef MOAB_HAVE_MPI
227  MPI_Comm_rank( *comm, &rankHere );
228 #endif
229  if( !rankHere ) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
230  if( *compid <= 0 )
231  {
232  std::cout << " convention for external application is to have its id positive \n";
233  return moab::MB_FAILURE;
234  }
235 
236  if( context.appIdCompMap.find( *compid ) != context.appIdCompMap.end() )
237  {
238  std::cout << " external application with comp id " << *compid << " is already registered\n";
239  return moab::MB_FAILURE;
240  }
241 
242  context.appIdCompMap[*compid] = *pid;
243 
244  // now create ParallelComm and a file set for this application
245 #ifdef MOAB_HAVE_MPI
246  if( *comm )
247  {
248  ParallelComm* pco = new ParallelComm( context.MBI, *comm );
249 
250 #ifndef NDEBUG
251  int index = pco->get_id(); // it could be useful to get app id from pcomm instance ...
252  assert( index == *pid );
253  // here, we assert the the pid is the same as the id of the ParallelComm instance
254  // useful for writing in parallel
255 #endif
256  context.pcomms.push_back( pco );
257  }
258  else
259  {
260  context.pcomms.push_back( 0 );
261  }
262 #endif
263 
264  // create now the file set that will be used for loading the model in
265  EntityHandle file_set;
266  ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
267 
268  appData app_data;
269  app_data.file_set = file_set;
270  app_data.global_id = *compid; // will be used mostly for par comm graph
271  app_data.name = name; // save the name of application
272 
273 #ifdef MOAB_HAVE_TEMPESTREMAP
274  app_data.tempestData.remapper = NULL; // Only allocate as needed
275 #endif
276 
277  app_data.point_cloud = false;
278  app_data.is_fortran = false;
279 
280  context.appDatas.push_back(
281  app_data ); // it will correspond to app_FileSets[*pid] will be the file set of interest
282  return moab::MB_SUCCESS;
283 }
284 
286 #ifdef MOAB_HAVE_MPI
287  int* comm,
288 #endif
289  int* compid,
290  iMOAB_AppID pid )
291 {
292  IMOAB_CHECKPOINTER( app_name, 1 );
293 #ifdef MOAB_HAVE_MPI
294  IMOAB_CHECKPOINTER( comm, 2 );
295  IMOAB_CHECKPOINTER( compid, 3 );
296 #else
297  IMOAB_CHECKPOINTER( compid, 2 );
298 #endif
299 
300  ErrCode err;
301  assert( app_name != nullptr );
302  std::string name( app_name );
303 
304 #ifdef MOAB_HAVE_MPI
305  MPI_Comm ccomm;
306  if( comm )
307  {
308  // convert from Fortran communicator to a C communicator
309  // see transfer of handles
310  // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
311  ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
312  }
313 #endif
314 
315  // now call C style registration function:
316  err = iMOAB_RegisterApplication( app_name,
317 #ifdef MOAB_HAVE_MPI
318  &ccomm,
319 #endif
320  compid, pid );
321 
322  // Now that we have created the application context, store that
323  // the application being registered is from a Fortran context
324  context.appDatas[*pid].is_fortran = true;
325 
326  return err;
327 }
328 
330 {
331  // the file set , parallel comm are all in vectors indexed by *pid
332  // assume we did not delete anything yet
333  // *pid will not be reused if we register another application
334  appData& data = context.appDatas[*pid];
335  int rankHere = 0;
336 #ifdef MOAB_HAVE_MPI
337  ParallelComm* pco = context.pcomms[*pid];
338  rankHere = pco->rank();
339 #endif
340  if( !rankHere )
341  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
342  << " is de-registered now \n";
343 
344  EntityHandle fileSet = data.file_set;
345  // get all entities part of the file set
346  Range fileents;
347  ErrorCode rval = context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true );MB_CHK_ERR( rval );
348 
349  fileents.insert( fileSet );
350 
351  rval = context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents );MB_CHK_ERR( rval ); // append all mesh sets
352 
353 #ifdef MOAB_HAVE_TEMPESTREMAP
354  if( data.tempestData.remapper ) delete data.tempestData.remapper;
355  if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
356 #endif
357 
358 #ifdef MOAB_HAVE_MPI
359 
360  // we could get the pco also with
361  // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
362 
363  std::map< int, ParCommGraph* >& pargs = data.pgraph;
364 
365  // free the parallel comm graphs associated with this app
366  for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
367  {
368  ParCommGraph* pgr = mt->second;
369  delete pgr;
370  pgr = NULL;
371  }
372  if( pco ) delete pco;
373 #endif
374 
375  // delete first all except vertices
376  Range vertices = fileents.subset_by_type( MBVERTEX );
377  Range noverts = subtract( fileents, vertices );
378 
379  rval = context.MBI->delete_entities( noverts );MB_CHK_ERR( rval );
380  // now retrieve connected elements that still exist (maybe in other sets, pids?)
381  Range adj_ents_left;
382  rval = context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
383  rval = context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
384  rval = context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
385 
386  if( !adj_ents_left.empty() )
387  {
388  Range conn_verts;
389  rval = context.MBI->get_connectivity( adj_ents_left, conn_verts );MB_CHK_ERR( rval );
390  vertices = subtract( vertices, conn_verts );
391  }
392 
393  rval = context.MBI->delete_entities( vertices );MB_CHK_ERR( rval );
394 
395  std::map< std::string, int >::iterator mit;
396 
397  for( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
398  {
399  int pidx = mit->second;
400 
401  if( *pid == pidx )
402  {
403  break;
404  }
405  }
406 
407  context.appIdMap.erase( mit );
408  std::map< int, int >::iterator mit1;
409 
410  for( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
411  {
412  int pidx = mit1->second;
413 
414  if( *pid == pidx )
415  {
416  break;
417  }
418  }
419 
420  context.appIdCompMap.erase( mit1 );
421 
422  context.unused_pid--; // we have to go backwards always TODO
423  context.appDatas.pop_back();
424 #ifdef MOAB_HAVE_MPI
425  context.pcomms.pop_back();
426 #endif
427  return moab::MB_SUCCESS;
428 }
429 
431 {
432  // release any Fortran specific allocations here before we pass it on
433  context.appDatas[*pid].is_fortran = false;
434 
435  // release all datastructure allocations
436  return iMOAB_DeregisterApplication( pid );
437 }
438 
439 // Utility function
440 static void split_tag_names( std::string input_names,
441  std::string& separator,
442  std::vector< std::string >& list_tag_names )
443 {
444  size_t pos = 0;
445  std::string token;
446  while( ( pos = input_names.find( separator ) ) != std::string::npos )
447  {
448  token = input_names.substr( 0, pos );
449  if( !token.empty() ) list_tag_names.push_back( token );
450  // std::cout << token << std::endl;
451  input_names.erase( 0, pos + separator.length() );
452  }
453  if( !input_names.empty() )
454  {
455  // if leftover something, or if not ended with delimiter
456  list_tag_names.push_back( input_names );
457  }
458  return;
459 }
460 
462  int* num_global_vertices,
463  int* num_global_elements,
464  int* num_dimension,
465  int* num_parts )
466 {
467  IMOAB_CHECKPOINTER( filename, 1 );
468  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
469 
470 #ifdef MOAB_HAVE_HDF5
471  std::string filen( filename );
472 
473  int edges = 0;
474  int faces = 0;
475  int regions = 0;
476  if( num_global_vertices ) *num_global_vertices = 0;
477  if( num_global_elements ) *num_global_elements = 0;
478  if( num_dimension ) *num_dimension = 0;
479  if( num_parts ) *num_parts = 0;
480 
481  mhdf_FileHandle file;
482  mhdf_Status status;
483  unsigned long max_id;
484  struct mhdf_FileDesc* data;
485 
486  file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
487 
488  if( mhdf_isError( &status ) )
489  {
490  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
491  return moab::MB_FAILURE;
492  }
493 
494  data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
495  1 ); // will use extra set info; will get parallel partition tag info too!
496 
497  if( mhdf_isError( &status ) )
498  {
499  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
500  return moab::MB_FAILURE;
501  }
502 
503  if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
504  if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
505 
506  for( int i = 0; i < data->num_elem_desc; i++ )
507  {
508  struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
509  struct mhdf_EntDesc* ent_d = &( el_desc->desc );
510 
511  if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
512  {
513  edges += ent_d->count;
514  }
515 
516  if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
517  {
518  faces += ent_d->count;
519  }
520 
521  if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
522  {
523  faces += ent_d->count;
524  }
525 
526  if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
527  {
528  faces += ent_d->count;
529  }
530 
531  if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
532  {
533  regions += ent_d->count;
534  }
535 
536  if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
537  {
538  regions += ent_d->count;
539  }
540 
541  if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
542  {
543  regions += ent_d->count;
544  }
545 
546  if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
547  {
548  regions += ent_d->count;
549  }
550 
551  if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
552  {
553  regions += ent_d->count;
554  }
555 
556  if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
557  {
558  regions += ent_d->count;
559  }
560 
561  if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
562  {
563  regions += ent_d->count;
564  }
565  }
566 
567  if( num_parts ) *num_parts = data->numEntSets[0];
568 
569  // is this required?
570  if( edges > 0 )
571  {
572  if( num_dimension ) *num_dimension = 1; // I don't think it will ever return 1
573  if( num_global_elements ) *num_global_elements = edges;
574  }
575 
576  if( faces > 0 )
577  {
578  if( num_dimension ) *num_dimension = 2;
579  if( num_global_elements ) *num_global_elements = faces;
580  }
581 
582  if( regions > 0 )
583  {
584  if( num_dimension ) *num_dimension = 3;
585  if( num_global_elements ) *num_global_elements = regions;
586  }
587 
588  mhdf_closeFile( file, &status );
589 
590  free( data );
591 
592 #else
593  std::cout << filename
594  << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
595  "formats other than a h5m file.\n";
596  if( num_global_vertices ) *num_global_vertices = 0;
597  if( num_global_elements ) *num_global_elements = 0;
598  if( num_dimension ) *num_dimension = 0;
599  if( num_parts ) *num_parts = 0;
600 #endif
601 
602  return moab::MB_SUCCESS;
603 }
604 
606  const iMOAB_String filename,
607  const iMOAB_String read_options,
608  int* num_ghost_layers )
609 {
610  IMOAB_CHECKPOINTER( filename, 2 );
611  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
612 
613  // make sure we use the file set and pcomm associated with the *pid
614  std::ostringstream newopts;
615  if( read_options ) newopts << read_options;
616 
617 #ifdef MOAB_HAVE_MPI
618 
620  {
621  if( context.worldprocs > 1 )
622  {
623  std::string opts( ( read_options ? read_options : "" ) );
624  std::string pcid( "PARALLEL_COMM=" );
625  std::size_t found = opts.find( pcid );
626 
627  if( found != std::string::npos )
628  {
629  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
630  return moab::MB_FAILURE;
631  }
632 
633  // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
634  // files (used in test_remapping)
635  std::string filen( filename );
636  std::string::size_type idx = filen.rfind( '.' );
637 
638  if( idx != std::string::npos )
639  {
640  std::string extension = filen.substr( idx + 1 );
641  if( ( extension == std::string( "h5m" ) ) || ( extension == std::string( "nc" ) ) )
642  newopts << ";;PARALLEL_COMM=" << *pid;
643  }
644 
645  if( *num_ghost_layers >= 1 )
646  {
647  // if we want ghosts, we will want additional entities, the last .1
648  // because the addl ents can be edges, faces that are part of the neumann sets
649  std::string pcid2( "PARALLEL_GHOSTS=" );
650  std::size_t found2 = opts.find( pcid2 );
651 
652  if( found2 != std::string::npos )
653  {
654  std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
655  "number of layers \n";
656  }
657  else
658  {
659  // dimension of primary entities is 3 here, but it could be 2 for climate
660  // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
661  // example: ";PARALLEL_GHOSTS=2.0.1"
662  newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
663  }
664  }
665  }
666  }
667 #else
668  IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
669 #endif
670 
671  // Now let us actually load the MOAB file with the appropriate read options
672  ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
673 
674 #ifdef VERBOSE
675  // some debugging stuff
676  std::ostringstream outfile;
677 #ifdef MOAB_HAVE_MPI
678  int rank = context.pcomms[*pid]->rank();
679  int nprocs = context.pcomms[*pid]->size();
680  outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
681 #else
682  outfile << "TaskMesh_n1.0.h5m";
683 #endif
684  // the mesh contains ghosts too, but they are not part of mat/neumann set
685  // write in serial the file, to see what tags are missing
686  rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval ); // everything on current task, written in serial
687 #endif
688 
689  // Update mesh information
690  return iMOAB_UpdateMeshInfo( pid );
691 }
692 
693 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options )
694 {
695  IMOAB_CHECKPOINTER( filename, 2 );
696  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
697 
698  appData& data = context.appDatas[*pid];
699  EntityHandle fileSet = data.file_set;
700 
701  std::ostringstream newopts;
702 #ifdef MOAB_HAVE_MPI
703  std::string write_opts( ( write_options ? write_options : "" ) );
704  std::string pcid( "PARALLEL_COMM=" );
705  std::size_t found = write_opts.find( pcid );
706 
707  if( found != std::string::npos )
708  {
709  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
710  return moab::MB_FAILURE;
711  }
712 
713  // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
714  std::string pw( "PARALLEL=WRITE_PART" );
715  found = write_opts.find( pw );
716 
717  if( found != std::string::npos )
718  {
719  newopts << "PARALLEL_COMM=" << *pid << ";";
720  }
721 
722 #endif
723 
724  if( write_options ) newopts << write_options;
725 
726  std::vector< Tag > copyTagList = data.tagList;
727  // append Global ID and Parallel Partition
728  std::string gid_name_tag( "GLOBAL_ID" );
729 
730  // export global id tag, we need it always
731  if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
732  {
733  Tag gid = context.MBI->globalId_tag();
734  copyTagList.push_back( gid );
735  }
736  // also Parallel_Partition PARALLEL_PARTITION
737  std::string pp_name_tag( "PARALLEL_PARTITION" );
738 
739  // write parallel part tag too, if it exists
740  if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
741  {
742  Tag ptag;
743  context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
744  if( ptag ) copyTagList.push_back( ptag );
745  }
746 
747  // Now let us actually write the file to disk with appropriate options
748  ErrorCode rval = context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, &copyTagList[0],
749  (int)copyTagList.size() );MB_CHK_ERR( rval );
750 
751  return moab::MB_SUCCESS;
752 }
753 
755 {
756  IMOAB_CHECKPOINTER( prefix, 2 );
757  IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
758 
759  std::ostringstream file_name;
760  int rank = 0, size = 1;
761 #ifdef MOAB_HAVE_MPI
762  rank = context.pcomms[*pid]->rank();
763  size = context.pcomms[*pid]->size();
764 #endif
765  file_name << prefix << "_" << size << "_" << rank << ".h5m";
766  // Now let us actually write the file to disk with appropriate options
767  ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
768 
769  return moab::MB_SUCCESS;
770 }
771 
773 {
774  // this will include ghost elements info
775  appData& data = context.appDatas[*pid];
776  EntityHandle fileSet = data.file_set;
777 
778  // first clear all data ranges; this can be called after ghosting
779  data.all_verts.clear();
780  data.primary_elems.clear();
781  data.local_verts.clear();
782  data.owned_verts.clear();
783  data.ghost_vertices.clear();
784  data.owned_elems.clear();
785  data.ghost_elems.clear();
786  data.mat_sets.clear();
787  data.neu_sets.clear();
788  data.diri_sets.clear();
789 
790  // Let us get all the vertex entities
791  ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval ); // recursive
792 
793  // Let us check first entities of dimension = 3
794  data.dimension = 3;
795  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
796 
797  if( data.primary_elems.empty() )
798  {
799  // Now 3-D elements. Let us check entities of dimension = 2
800  data.dimension = 2;
801  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
802 
803  if( data.primary_elems.empty() )
804  {
805  // Now 3-D/2-D elements. Let us check entities of dimension = 1
806  data.dimension = 1;
807  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
808 
809  if( data.primary_elems.empty() )
810  {
811  // no elements of dimension 1 or 2 or 3; it could happen for point clouds
812  data.dimension = 0;
813  }
814  }
815  }
816 
817  // check if the current mesh is just a point cloud
818  data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
819 
820 #ifdef MOAB_HAVE_MPI
821 
823  {
824  ParallelComm* pco = context.pcomms[*pid];
825 
826  // filter ghost vertices, from local
827  rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
828 
829  // Store handles for all ghosted entities
830  data.ghost_vertices = subtract( data.all_verts, data.local_verts );
831 
832  // filter ghost elements, from local
833  rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
834 
835  data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
836  // now update global number of primary cells and global number of vertices
837  // determine first number of owned vertices
838  // Get local owned vertices
839  rval = pco->filter_pstatus( data.all_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &data.owned_verts );MB_CHK_ERR( rval );
840  int local[2], global[2];
841  local[0] = data.owned_verts.size();
842  local[1] = data.owned_elems.size();
843  MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->comm() );
844  rval = iMOAB_SetGlobalInfo( pid, &( global[0] ), &( global[1] ) );MB_CHK_ERR( rval );
845  }
846  else
847  {
848  data.local_verts = data.all_verts;
849  data.owned_elems = data.primary_elems;
850  }
851 
852 #else
853 
854  data.local_verts = data.all_verts;
855  data.owned_elems = data.primary_elems;
856 
857 #endif
858 
859  // Get the references for some standard internal tags such as material blocks, BCs, etc
861  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
862 
864  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
865 
867  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
868 
869  return moab::MB_SUCCESS;
870 }
871 
873  int* num_visible_vertices,
874  int* num_visible_elements,
875  int* num_visible_blocks,
876  int* num_visible_surfaceBC,
877  int* num_visible_vertexBC )
878 {
879  ErrorCode rval;
880  appData& data = context.appDatas[*pid];
881  EntityHandle fileSet = data.file_set;
882 
883  // this will include ghost elements
884  // first clear all data ranges; this can be called after ghosting
885  if( num_visible_elements )
886  {
887  num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
888  // separate ghost and local/owned primary elements
889  num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
890  num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
891  }
892  if( num_visible_vertices )
893  {
894  num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
895  num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
896  // local are those that are not ghosts; they may include shared, not owned vertices
897  num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
898  }
899 
900  if( num_visible_blocks )
901  {
903  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
904 
905  num_visible_blocks[2] = data.mat_sets.size();
906  num_visible_blocks[0] = num_visible_blocks[2];
907  num_visible_blocks[1] = 0;
908  }
909 
910  if( num_visible_surfaceBC )
911  {
913  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
914 
915  num_visible_surfaceBC[2] = 0;
916  // count how many faces are in each neu set, and how many regions are
917  // adjacent to them;
918  int numNeuSets = (int)data.neu_sets.size();
919 
920  for( int i = 0; i < numNeuSets; i++ )
921  {
922  Range subents;
923  EntityHandle nset = data.neu_sets[i];
924  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
925 
926  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
927  {
928  EntityHandle subent = *it;
929  Range adjPrimaryEnts;
930  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
931 
932  num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
933  }
934  }
935 
936  num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
937  num_visible_surfaceBC[1] = 0;
938  }
939 
940  if( num_visible_vertexBC )
941  {
943  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
944 
945  num_visible_vertexBC[2] = 0;
946  int numDiriSets = (int)data.diri_sets.size();
947 
948  for( int i = 0; i < numDiriSets; i++ )
949  {
950  Range verts;
951  EntityHandle diset = data.diri_sets[i];
952  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
953 
954  num_visible_vertexBC[2] += (int)verts.size();
955  }
956 
957  num_visible_vertexBC[0] = num_visible_vertexBC[2];
958  num_visible_vertexBC[1] = 0;
959  }
960 
961  return moab::MB_SUCCESS;
962 }
963 
964 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
965 {
966  IMOAB_CHECKPOINTER( vertices_length, 2 );
967  IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
968 
969  const Range& verts = context.appDatas[*pid].all_verts;
970  // check for problems with array length
971  IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
972 
973  // global id tag is context.globalID_tag
974  return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
975 }
976 
977 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
978 {
979  assert( vertices_length && *vertices_length );
980  assert( visible_global_rank_ID );
981 
982  Range& verts = context.appDatas[*pid].all_verts;
983  int i = 0;
984 #ifdef MOAB_HAVE_MPI
985  ParallelComm* pco = context.pcomms[*pid];
986 
987  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
988  {
989  ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
990  }
991 
992  if( i != *vertices_length )
993  {
994  return moab::MB_FAILURE;
995  } // warning array allocation problem
996 
997 #else
998 
999  /* everything owned by proc 0 */
1000  if( (int)verts.size() != *vertices_length )
1001  {
1002  return moab::MB_FAILURE;
1003  } // warning array allocation problem
1004 
1005  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
1006  {
1007  visible_global_rank_ID[i] = 0;
1008  } // all vertices are owned by processor 0, as this is serial run
1009 
1010 #endif
1011 
1012  return moab::MB_SUCCESS;
1013 }
1014 
1015 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
1016 {
1017  Range& verts = context.appDatas[*pid].all_verts;
1018 
1019  // interleaved coordinates, so that means deep copy anyway
1020  if( *coords_length != 3 * (int)verts.size() )
1021  {
1022  return moab::MB_FAILURE;
1023  }
1024 
1025  ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
1026 
1027  return moab::MB_SUCCESS;
1028 }
1029 
1030 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
1031 {
1032  // local id blocks? they are counted from 0 to number of visible blocks ...
1033  // will actually return material set tag value for global
1034  Range& matSets = context.appDatas[*pid].mat_sets;
1035 
1036  if( *block_length != (int)matSets.size() )
1037  {
1038  return moab::MB_FAILURE;
1039  }
1040 
1041  // return material set tag gtags[0 is material set tag
1042  ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
1043 
1044  // populate map with index
1045  std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
1046  for( unsigned i = 0; i < matSets.size(); i++ )
1047  {
1048  matIdx[global_block_IDs[i]] = i;
1049  }
1050 
1051  return moab::MB_SUCCESS;
1052 }
1053 
1055  iMOAB_GlobalID* global_block_ID,
1056  int* vertices_per_element,
1057  int* num_elements_in_block )
1058 {
1059  assert( global_block_ID );
1060 
1061  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1062  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1063 
1064  if( it == matMap.end() )
1065  {
1066  return moab::MB_FAILURE;
1067  } // error in finding block with id
1068 
1069  int blockIndex = matMap[*global_block_ID];
1070  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1071  Range blo_elems;
1072  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
1073 
1074  if( MB_SUCCESS != rval || blo_elems.empty() )
1075  {
1076  return moab::MB_FAILURE;
1077  }
1078 
1079  EntityType type = context.MBI->type_from_handle( blo_elems[0] );
1080 
1081  if( !blo_elems.all_of_type( type ) )
1082  {
1083  return moab::MB_FAILURE;
1084  } // not all of same type
1085 
1086  const EntityHandle* conn = NULL;
1087  int num_verts = 0;
1088  rval = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
1089 
1090  *vertices_per_element = num_verts;
1091  *num_elements_in_block = (int)blo_elems.size();
1092 
1093  return moab::MB_SUCCESS;
1094 }
1095 
1097  int* num_visible_elements,
1098  iMOAB_GlobalID* element_global_IDs,
1099  int* ranks,
1100  iMOAB_GlobalID* block_IDs )
1101 {
1102  appData& data = context.appDatas[*pid];
1103 #ifdef MOAB_HAVE_MPI
1104  ParallelComm* pco = context.pcomms[*pid];
1105 #endif
1106 
1107  ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
1108 
1109  int i = 0;
1110 
1111  for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
1112  {
1113 #ifdef MOAB_HAVE_MPI
1114  rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
1115 
1116 #else
1117  /* everything owned by task 0 */
1118  ranks[i] = 0;
1119 #endif
1120  }
1121 
1122  for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
1123  {
1124  EntityHandle matMeshSet = *mit;
1125  Range elems;
1126  rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1127 
1128  int valMatTag;
1129  rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
1130 
1131  for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
1132  {
1133  EntityHandle eh = *eit;
1134  int index = data.primary_elems.index( eh );
1135 
1136  if( -1 == index )
1137  {
1138  return moab::MB_FAILURE;
1139  }
1140 
1141  if( -1 >= *num_visible_elements )
1142  {
1143  return moab::MB_FAILURE;
1144  }
1145 
1146  block_IDs[index] = valMatTag;
1147  }
1148  }
1149 
1150  return moab::MB_SUCCESS;
1151 }
1152 
1154  iMOAB_GlobalID* global_block_ID,
1155  int* connectivity_length,
1156  int* element_connectivity )
1157 {
1158  assert( global_block_ID ); // ensure global block ID argument is not null
1159  assert( connectivity_length ); // ensure connectivity length argument is not null
1160 
1161  appData& data = context.appDatas[*pid];
1162  std::map< int, int >& matMap = data.matIndex;
1163  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1164 
1165  if( it == matMap.end() )
1166  {
1167  return moab::MB_FAILURE;
1168  } // error in finding block with id
1169 
1170  int blockIndex = matMap[*global_block_ID];
1171  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1172  std::vector< EntityHandle > elems;
1173 
1174  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1175 
1176  if( elems.empty() )
1177  {
1178  return moab::MB_FAILURE;
1179  }
1180 
1181  std::vector< EntityHandle > vconnect;
1182  rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
1183 
1184  if( *connectivity_length != (int)vconnect.size() )
1185  {
1186  return moab::MB_FAILURE;
1187  } // mismatched sizes
1188 
1189  for( int i = 0; i < *connectivity_length; i++ )
1190  {
1191  int inx = data.all_verts.index( vconnect[i] );
1192 
1193  if( -1 == inx )
1194  {
1195  return moab::MB_FAILURE;
1196  } // error, vertex not in local range
1197 
1198  element_connectivity[i] = inx;
1199  }
1200 
1201  return moab::MB_SUCCESS;
1202 }
1203 
1205  iMOAB_LocalID* elem_index,
1206  int* connectivity_length,
1207  int* element_connectivity )
1208 {
1209  assert( elem_index ); // ensure element index argument is not null
1210  assert( connectivity_length ); // ensure connectivity length argument is not null
1211 
1212  appData& data = context.appDatas[*pid];
1213  assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
1214 
1215  int num_nodes;
1216  const EntityHandle* conn;
1217 
1218  EntityHandle eh = data.primary_elems[*elem_index];
1219 
1220  ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
1221 
1222  if( *connectivity_length < num_nodes )
1223  {
1224  return moab::MB_FAILURE;
1225  } // wrong number of vertices
1226 
1227  for( int i = 0; i < num_nodes; i++ )
1228  {
1229  int index = data.all_verts.index( conn[i] );
1230 
1231  if( -1 == index )
1232  {
1233  return moab::MB_FAILURE;
1234  }
1235 
1236  element_connectivity[i] = index;
1237  }
1238 
1239  *connectivity_length = num_nodes;
1240 
1241  return moab::MB_SUCCESS;
1242 }
1243 
1245  iMOAB_GlobalID* global_block_ID,
1246  int* num_elements_in_block,
1247  int* element_ownership )
1248 {
1249  assert( global_block_ID ); // ensure global block ID argument is not null
1250  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1251 
1252  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1253 
1254  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1255 
1256  if( it == matMap.end() )
1257  {
1258  return moab::MB_FAILURE;
1259  } // error in finding block with id
1260 
1261  int blockIndex = matMap[*global_block_ID];
1262  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1263  Range elems;
1264 
1265  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1266 
1267  if( elems.empty() )
1268  {
1269  return moab::MB_FAILURE;
1270  }
1271 
1272  if( *num_elements_in_block != (int)elems.size() )
1273  {
1274  return moab::MB_FAILURE;
1275  } // bad memory allocation
1276 
1277  int i = 0;
1278 #ifdef MOAB_HAVE_MPI
1279  ParallelComm* pco = context.pcomms[*pid];
1280 #endif
1281 
1282  for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
1283  {
1284 #ifdef MOAB_HAVE_MPI
1285  rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
1286 #else
1287  element_ownership[i] = 0; /* owned by 0 */
1288 #endif
1289  }
1290 
1291  return moab::MB_SUCCESS;
1292 }
1293 
1295  iMOAB_GlobalID* global_block_ID,
1296  int* num_elements_in_block,
1297  iMOAB_GlobalID* global_element_ID,
1298  iMOAB_LocalID* local_element_ID )
1299 {
1300  assert( global_block_ID ); // ensure global block ID argument is not null
1301  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1302 
1303  appData& data = context.appDatas[*pid];
1304  std::map< int, int >& matMap = data.matIndex;
1305 
1306  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1307 
1308  if( it == matMap.end() )
1309  {
1310  return moab::MB_FAILURE;
1311  } // error in finding block with id
1312 
1313  int blockIndex = matMap[*global_block_ID];
1314  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1315  Range elems;
1316  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1317 
1318  if( elems.empty() )
1319  {
1320  return moab::MB_FAILURE;
1321  }
1322 
1323  if( *num_elements_in_block != (int)elems.size() )
1324  {
1325  return moab::MB_FAILURE;
1326  } // bad memory allocation
1327 
1328  rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
1329 
1330  // check that elems are among primary_elems in data
1331  for( int i = 0; i < *num_elements_in_block; i++ )
1332  {
1333  local_element_ID[i] = data.primary_elems.index( elems[i] );
1334 
1335  if( -1 == local_element_ID[i] )
1336  {
1337  return moab::MB_FAILURE;
1338  } // error, not in local primary elements
1339  }
1340 
1341  return moab::MB_SUCCESS;
1342 }
1343 
1345  int* surface_BC_length,
1346  iMOAB_LocalID* local_element_ID,
1347  int* reference_surface_ID,
1348  int* boundary_condition_value )
1349 {
1350  // we have to fill bc data for neumann sets;/
1351  ErrorCode rval;
1352 
1353  // it was counted above, in GetMeshInfo
1354  appData& data = context.appDatas[*pid];
1355  int numNeuSets = (int)data.neu_sets.size();
1356 
1357  int index = 0; // index [0, surface_BC_length) for the arrays returned
1358 
1359  for( int i = 0; i < numNeuSets; i++ )
1360  {
1361  Range subents;
1362  EntityHandle nset = data.neu_sets[i];
1363  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
1364 
1365  int neuVal;
1366  rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
1367 
1368  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
1369  {
1370  EntityHandle subent = *it;
1371  Range adjPrimaryEnts;
1372  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
1373 
1374  // get global id of the primary ents, and side number of the quad/subentity
1375  // this is moab ordering
1376  for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
1377  {
1378  EntityHandle primaryEnt = *pit;
1379  // get global id
1380  /*int globalID;
1381  rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
1382  if (MB_SUCCESS!=rval)
1383  return moab::MB_FAILURE;
1384  global_element_ID[index] = globalID;*/
1385 
1386  // get local element id
1387  local_element_ID[index] = data.primary_elems.index( primaryEnt );
1388 
1389  if( -1 == local_element_ID[index] )
1390  {
1391  return moab::MB_FAILURE;
1392  } // did not find the element locally
1393 
1394  int side_number, sense, offset;
1395  rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
1396 
1397  reference_surface_ID[index] = side_number + 1; // moab is from 0 to 5, it needs 1 to 6
1398  boundary_condition_value[index] = neuVal;
1399  index++;
1400  }
1401  }
1402  }
1403 
1404  if( index != *surface_BC_length )
1405  {
1406  return moab::MB_FAILURE;
1407  } // error in array allocations
1408 
1409  return moab::MB_SUCCESS;
1410 }
1411 
1413  int* vertex_BC_length,
1414  iMOAB_LocalID* local_vertex_ID,
1415  int* boundary_condition_value )
1416 {
1417  ErrorCode rval;
1418 
1419  // it was counted above, in GetMeshInfo
1420  appData& data = context.appDatas[*pid];
1421  int numDiriSets = (int)data.diri_sets.size();
1422  int index = 0; // index [0, *vertex_BC_length) for the arrays returned
1423 
1424  for( int i = 0; i < numDiriSets; i++ )
1425  {
1426  Range verts;
1427  EntityHandle diset = data.diri_sets[i];
1428  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
1429 
1430  int diriVal;
1431  rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
1432 
1433  for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
1434  {
1435  EntityHandle vt = *vit;
1436  /*int vgid;
1437  rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
1438  if (MB_SUCCESS!=rval)
1439  return moab::MB_FAILURE;
1440  global_vertext_ID[index] = vgid;*/
1441  local_vertex_ID[index] = data.all_verts.index( vt );
1442 
1443  if( -1 == local_vertex_ID[index] )
1444  {
1445  return moab::MB_FAILURE;
1446  } // vertex was not found
1447 
1448  boundary_condition_value[index] = diriVal;
1449  index++;
1450  }
1451  }
1452 
1453  if( *vertex_BC_length != index )
1454  {
1455  return moab::MB_FAILURE;
1456  } // array allocation issue
1457 
1458  return moab::MB_SUCCESS;
1459 }
1460 
1462  const iMOAB_String tag_storage_name,
1463  int* tag_type,
1464  int* components_per_entity,
1465  int* tag_index )
1466 {
1467  // see if the tag is already existing, and if yes, check the type, length
1468  if( *tag_type < 0 || *tag_type > 5 )
1469  {
1470  return moab::MB_FAILURE;
1471  } // we have 6 types of tags supported so far
1472 
1473  DataType tagDataType;
1474  TagType tagType;
1475  void* defaultVal = NULL;
1476  int* defInt = new int[*components_per_entity];
1477  double* defDouble = new double[*components_per_entity];
1478  EntityHandle* defHandle = new EntityHandle[*components_per_entity];
1479 
1480  for( int i = 0; i < *components_per_entity; i++ )
1481  {
1482  defInt[i] = 0;
1483  defDouble[i] = -1e+10;
1484  defHandle[i] = (EntityHandle)0;
1485  }
1486 
1487  switch( *tag_type )
1488  {
1489  case 0:
1490  tagDataType = MB_TYPE_INTEGER;
1491  tagType = MB_TAG_DENSE;
1492  defaultVal = defInt;
1493  break;
1494 
1495  case 1:
1496  tagDataType = MB_TYPE_DOUBLE;
1497  tagType = MB_TAG_DENSE;
1498  defaultVal = defDouble;
1499  break;
1500 
1501  case 2:
1502  tagDataType = MB_TYPE_HANDLE;
1503  tagType = MB_TAG_DENSE;
1504  defaultVal = defHandle;
1505  break;
1506 
1507  case 3:
1508  tagDataType = MB_TYPE_INTEGER;
1509  tagType = MB_TAG_SPARSE;
1510  defaultVal = defInt;
1511  break;
1512 
1513  case 4:
1514  tagDataType = MB_TYPE_DOUBLE;
1515  tagType = MB_TAG_SPARSE;
1516  defaultVal = defDouble;
1517  break;
1518 
1519  case 5:
1520  tagDataType = MB_TYPE_HANDLE;
1521  tagType = MB_TAG_SPARSE;
1522  defaultVal = defHandle;
1523  break;
1524 
1525  default: {
1526  delete[] defInt;
1527  delete[] defDouble;
1528  delete[] defHandle;
1529  return moab::MB_FAILURE;
1530  } // error
1531  }
1532 
1533  Tag tagHandle;
1534  // split storage names if separated list
1535 
1536  std::string tag_name( tag_storage_name );
1537 
1538  // first separate the names of the tags
1539  // we assume that there are separators ":" between the tag names
1540  std::vector< std::string > tagNames;
1541  std::string separator( ":" );
1542  split_tag_names( tag_name, separator, tagNames );
1543 
1544  ErrorCode rval = moab::MB_SUCCESS; // assume success already :)
1545  appData& data = context.appDatas[*pid];
1546  int already_defined_tags = 0;
1547 
1548  for( size_t i = 0; i < tagNames.size(); i++ )
1549  {
1550  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
1551  tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal );
1552 
1553  if( MB_ALREADY_ALLOCATED == rval )
1554  {
1555  std::map< std::string, Tag >& mTags = data.tagMap;
1556  std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1557 
1558  if( mit == mTags.end() )
1559  {
1560  // add it to the map
1561  mTags[tagNames[i]] = tagHandle;
1562  // push it to the list of tags, too
1563  *tag_index = (int)data.tagList.size();
1564  data.tagList.push_back( tagHandle );
1565  }
1566  rval = MB_SUCCESS;
1567  already_defined_tags++;
1568  }
1569  else if( MB_SUCCESS == rval )
1570  {
1571  data.tagMap[tagNames[i]] = tagHandle;
1572  *tag_index = (int)data.tagList.size();
1573  data.tagList.push_back( tagHandle );
1574  }
1575  else
1576  {
1577  rval = moab::MB_FAILURE; // some tags were not created
1578  }
1579  }
1580  // we don't need default values anymore, avoid leaks
1581  int rankHere = 0;
1582 #ifdef MOAB_HAVE_MPI
1583  ParallelComm* pco = context.pcomms[*pid];
1584  rankHere = pco->rank();
1585 #endif
1586  if( !rankHere && already_defined_tags )
1587  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
1588  << " has " << already_defined_tags << " already defined tags out of " << tagNames.size()
1589  << " tags \n";
1590  delete[] defInt;
1591  delete[] defDouble;
1592  delete[] defHandle;
1593  return rval;
1594 }
1595 
1597  const iMOAB_String tag_storage_name,
1598  int* num_tag_storage_length,
1599  int* ent_type,
1600  int* tag_storage_data )
1601 {
1602  std::string tag_name( tag_storage_name );
1603 
1604  // Get the application data
1605  appData& data = context.appDatas[*pid];
1606 
1607  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1608  {
1609  return moab::MB_FAILURE;
1610  } // tag not defined
1611 
1612  Tag tag = data.tagMap[tag_name];
1613 
1614  int tagLength = 0;
1615  ErrorCode rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1616 
1617  DataType dtype;
1618  rval = context.MBI->tag_get_data_type( tag, dtype );
1619 
1620  if( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER )
1621  {
1622  return moab::MB_FAILURE;
1623  }
1624 
1625  // set it on a subset of entities, based on type and length
1626  Range* ents_to_set;
1627 
1628  if( *ent_type == 0 ) // vertices
1629  {
1630  ents_to_set = &data.all_verts;
1631  }
1632  else // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
1633  {
1634  ents_to_set = &data.primary_elems;
1635  }
1636 
1637  int nents_to_be_set = *num_tag_storage_length / tagLength;
1638 
1639  if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 )
1640  {
1641  return moab::MB_FAILURE;
1642  } // to many entities to be set or too few
1643 
1644  // restrict the range; everything is contiguous; or not?
1645 
1646  // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
1647  // 1 ) );
1648  rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );MB_CHK_ERR( rval );
1649 
1650  return moab::MB_SUCCESS; // no error
1651 }
1652 
1654  const iMOAB_String tag_storage_name,
1655  int* num_tag_storage_length,
1656  int* ent_type,
1657  int* tag_storage_data )
1658 {
1659  ErrorCode rval;
1660  std::string tag_name( tag_storage_name );
1661 
1662  appData& data = context.appDatas[*pid];
1663 
1664  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1665  {
1666  return moab::MB_FAILURE;
1667  } // tag not defined
1668 
1669  Tag tag = data.tagMap[tag_name];
1670 
1671  int tagLength = 0;
1672  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1673 
1674  DataType dtype;
1675  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1676 
1677  if( dtype != MB_TYPE_INTEGER )
1678  {
1679  return moab::MB_FAILURE;
1680  }
1681 
1682  // set it on a subset of entities, based on type and length
1683  Range* ents_to_get;
1684 
1685  if( *ent_type == 0 ) // vertices
1686  {
1687  ents_to_get = &data.all_verts;
1688  }
1689  else // if (*ent_type == 1)
1690  {
1691  ents_to_get = &data.primary_elems;
1692  }
1693 
1694  int nents_to_get = *num_tag_storage_length / tagLength;
1695 
1696  if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 )
1697  {
1698  return moab::MB_FAILURE;
1699  } // to many entities to get, or too little
1700 
1701  // restrict the range; everything is contiguous; or not?
1702  // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
1703  // ) );
1704 
1705  rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
1706 
1707  return moab::MB_SUCCESS; // no error
1708 }
1709 
1711  const iMOAB_String tag_storage_names,
1712  int* num_tag_storage_length,
1713  int* ent_type,
1714  double* tag_storage_data )
1715 {
1716  ErrorCode rval;
1717  std::string tag_names( tag_storage_names );
1718  // exactly the same code as for int tag :) maybe should check the type of tag too
1719  std::vector< std::string > tagNames;
1720  std::vector< Tag > tagHandles;
1721  std::string separator( ":" );
1722  split_tag_names( tag_names, separator, tagNames );
1723 
1724  appData& data = context.appDatas[*pid];
1725  Range* ents_to_set = NULL;
1726 
1727  if( *ent_type == 0 ) // vertices
1728  {
1729  ents_to_set = &data.all_verts;
1730  }
1731  else if( *ent_type == 1 )
1732  {
1733  ents_to_set = &data.primary_elems;
1734  }
1735 
1736  int nents_to_be_set = (int)( *ents_to_set ).size();
1737  int position = 0;
1738 
1739  for( size_t i = 0; i < tagNames.size(); i++ )
1740  {
1741  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1742  {
1743  return moab::MB_FAILURE;
1744  } // some tag not defined yet in the app
1745 
1746  Tag tag = data.tagMap[tagNames[i]];
1747 
1748  int tagLength = 0;
1749  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1750 
1751  DataType dtype;
1752  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1753 
1754  if( dtype != MB_TYPE_DOUBLE )
1755  {
1756  return moab::MB_FAILURE;
1757  }
1758 
1759  // set it on the subset of entities, based on type and length
1760  if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1761  return moab::MB_FAILURE; // too many entity values to be set
1762 
1763  rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
1764  // increment position to next tag
1765  position = position + tagLength * nents_to_be_set;
1766  }
1767  return moab::MB_SUCCESS; // no error
1768 }
1769 
1771  const iMOAB_String tag_storage_names,
1772  int* num_tag_storage_length,
1773  int* ent_type,
1774  double* tag_storage_data,
1775  int* globalIds )
1776 {
1777  ErrorCode rval;
1778  std::string tag_names( tag_storage_names );
1779  // exactly the same code as for int tag :) maybe should check the type of tag too
1780  std::vector< std::string > tagNames;
1781  std::vector< Tag > tagHandles;
1782  std::string separator( ":" );
1783  split_tag_names( tag_names, separator, tagNames );
1784 
1785  appData& data = context.appDatas[*pid];
1786  Range* ents_to_set = NULL;
1787 
1788  if( *ent_type == 0 ) // vertices
1789  {
1790  ents_to_set = &data.all_verts;
1791  }
1792  else if( *ent_type == 1 )
1793  {
1794  ents_to_set = &data.primary_elems;
1795  }
1796 
1797  int nents_to_be_set = (int)( *ents_to_set ).size();
1798 
1799  Tag gidTag = context.MBI->globalId_tag();
1800  std::vector< int > gids;
1801  gids.resize( nents_to_be_set );
1802  rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval );
1803 
1804  // so we will need to set the tags according to the global id passed;
1805  // so the order in tag_storage_data is the same as the order in globalIds, but the order
1806  // in local range is gids
1807  std::map< int, EntityHandle > eh_by_gid;
1808  int i = 0;
1809  for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i )
1810  {
1811  eh_by_gid[gids[i]] = *it;
1812  }
1813 
1814  std::vector< int > tagLengths( tagNames.size() );
1815  std::vector< Tag > tagList;
1816  size_t total_tag_len = 0;
1817  for( size_t i = 0; i < tagNames.size(); i++ )
1818  {
1819  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1820  {
1821  MB_SET_ERR( moab::MB_FAILURE, "tag missing" );
1822  } // some tag not defined yet in the app
1823 
1824  Tag tag = data.tagMap[tagNames[i]];
1825  tagList.push_back( tag );
1826 
1827  int tagLength = 0;
1828  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1829 
1830  total_tag_len += tagLength;
1831  tagLengths[i] = tagLength;
1832  DataType dtype;
1833  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1834 
1835  if( dtype != MB_TYPE_DOUBLE )
1836  {
1837  MB_SET_ERR( moab::MB_FAILURE, "tag not double type" );
1838  }
1839  }
1840  bool serial = true;
1841 #ifdef MOAB_HAVE_MPI
1842  ParallelComm* pco = context.pcomms[*pid];
1843  unsigned num_procs = pco->size();
1844  if( num_procs > 1 ) serial = false;
1845 #endif
1846 
1847  if( serial )
1848  {
1849  assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 );
1850  // tags are unrolled, we loop over global ids first, then careful about tags
1851  for( int i = 0; i < nents_to_be_set; i++ )
1852  {
1853  int gid = globalIds[i];
1854  EntityHandle eh = eh_by_gid[gid];
1855  // now loop over tags
1856  int indexInTagValues = 0; //
1857  for( size_t j = 0; j < tagList.size(); j++ )
1858  {
1859  indexInTagValues += i * tagLengths[j];
1860  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval );
1861  // advance the pointer/index
1862  indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j]; // at the end of tag data
1863  }
1864  }
1865  }
1866 #ifdef MOAB_HAVE_MPI
1867  else // it can be not serial only if pco->size() > 1, parallel
1868  {
1869  // in this case, we have to use 2 crystal routers, to send data to the processor that needs it
1870  // we will create first a tuple to rendevous points, then from there send to the processor that requested it
1871  // it is a 2-hop global gather scatter
1872  int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() );
1873  assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 );
1874  TupleList TLsend;
1875  TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals ); // to proc, marker(gid), total_tag_len doubles
1876  TLsend.enableWriteAccess();
1877  // the processor id that processes global_id is global_id / num_ents_per_proc
1878 
1879  int indexInRealLocal = 0;
1880  for( int i = 0; i < nbLocalVals; i++ )
1881  {
1882  // to proc, marker, element local index, index in el
1883  int marker = globalIds[i];
1884  int to_proc = marker % num_procs;
1885  int n = TLsend.get_n();
1886  TLsend.vi_wr[2 * n] = to_proc; // send to processor
1887  TLsend.vi_wr[2 * n + 1] = marker;
1888  int indexInTagValues = 0;
1889  // tag data collect by number of tags
1890  for( size_t j = 0; j < tagList.size(); j++ )
1891  {
1892  indexInTagValues += i * tagLengths[j];
1893  for( int k = 0; k < tagLengths[j]; k++ )
1894  {
1895  TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1896  }
1897  indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1898  }
1899  TLsend.inc_n();
1900  }
1901 
1902  assert( nbLocalVals * total_tag_len - indexInRealLocal == 0 );
1903  // send now requests, basically inform the rendez-vous point who needs a particular global id
1904  // send the data to the other processors:
1905  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 );
1906  TupleList TLreq;
1907  TLreq.initialize( 2, 0, 0, 0, nents_to_be_set );
1908  TLreq.enableWriteAccess();
1909  for( int i = 0; i < nents_to_be_set; i++ )
1910  {
1911  // to proc, marker
1912  int marker = gids[i];
1913  int to_proc = marker % num_procs;
1914  int n = TLreq.get_n();
1915  TLreq.vi_wr[2 * n] = to_proc; // send to processor
1916  TLreq.vi_wr[2 * n + 1] = marker;
1917  // tag data collect by number of tags
1918  TLreq.inc_n();
1919  }
1920  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
1921 
1922  // we know now that process TLreq.vi_wr[2 * n] needs tags for gid TLreq.vi_wr[2 * n + 1]
1923  // we should first order by global id, and then build the new TL with send to proc, global id and
1924  // tags for it
1925  // sort by global ids the tuple lists
1926  moab::TupleList::buffer sort_buffer;
1927  sort_buffer.buffer_init( TLreq.get_n() );
1928  TLreq.sort( 1, &sort_buffer );
1929  sort_buffer.reset();
1930  sort_buffer.buffer_init( TLsend.get_n() );
1931  TLsend.sort( 1, &sort_buffer );
1932  sort_buffer.reset();
1933  // now send the tag values to the proc that requested it
1934  // in theory, for a full partition, TLreq and TLsend should have the same size, and
1935  // each dof should have exactly one target proc. Is that true or not in general ?
1936  // how do we plan to use this? Is it better to store the comm graph for future
1937 
1938  // start copy from comm graph settle
1939  TupleList TLBack;
1940  TLBack.initialize( 3, 0, 0, total_tag_len, 0 ); // to proc, marker, tag from proc , tag values
1941  TLBack.enableWriteAccess();
1942 
1943  int n1 = TLreq.get_n();
1944  int n2 = TLsend.get_n();
1945 
1946  int indexInTLreq = 0;
1947  int indexInTLsend = 0; // advance both, according to the marker
1948  if( n1 > 0 && n2 > 0 )
1949  {
1950 
1951  while( indexInTLreq < n1 && indexInTLsend < n2 ) // if any is over, we are done
1952  {
1953  int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1];
1954  int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1];
1955  if( currentValue1 < currentValue2 )
1956  {
1957  // we have a big problem; basically, we are saying that
1958  // dof currentValue is on one model and not on the other
1959  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
1960  indexInTLreq++;
1961  continue;
1962  }
1963  if( currentValue1 > currentValue2 )
1964  {
1965  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
1966  indexInTLsend++;
1967  continue;
1968  }
1969  int size1 = 1;
1970  int size2 = 1;
1971  while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1972  size1++;
1973  while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1974  size2++;
1975  // must be found in both lists, find the start and end indices
1976  for( int i1 = 0; i1 < size1; i1++ )
1977  {
1978  for( int i2 = 0; i2 < size2; i2++ )
1979  {
1980  // send the info back to components
1981  int n = TLBack.get_n();
1982  TLBack.reserve();
1983  TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )]; // send back to the proc marker
1984  // came from, info from comp2
1985  TLBack.vi_wr[3 * n + 1] = currentValue1; // initial value (resend, just for verif ?)
1986  TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )]; // from proc on comp2
1987  // also fill tag values
1988  for( size_t k = 0; k < total_tag_len; k++ )
1989  {
1990  TLBack.vr_rd[total_tag_len * n + k] =
1991  TLsend.vr_rd[total_tag_len * indexInTLsend + k]; // deep copy of tag values
1992  }
1993  }
1994  }
1995  indexInTLreq += size1;
1996  indexInTLsend += size2;
1997  }
1998  }
1999  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
2000  // end copy from comm graph
2001  // after we are done sending, we need to set those tag values, in a reverse process compared to send
2002  n1 = TLBack.get_n();
2003  double* ptrVal = &TLBack.vr_rd[0]; //
2004  for( int i = 0; i < n1; i++ )
2005  {
2006  int gid = TLBack.vi_rd[3 * i + 1]; // marker
2007  EntityHandle eh = eh_by_gid[gid];
2008  // now loop over tags
2009 
2010  for( size_t j = 0; j < tagList.size(); j++ )
2011  {
2012  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval );
2013  // advance the pointer/index
2014  ptrVal += tagLengths[j]; // at the end of tag data per call
2015  }
2016  }
2017  }
2018 #endif
2019  return MB_SUCCESS;
2020 }
2022  const iMOAB_String tag_storage_names,
2023  int* num_tag_storage_length,
2024  int* ent_type,
2025  double* tag_storage_data )
2026 {
2027  ErrorCode rval;
2028  // exactly the same code, except tag type check
2029  std::string tag_names( tag_storage_names );
2030  // exactly the same code as for int tag :) maybe should check the type of tag too
2031  std::vector< std::string > tagNames;
2032  std::vector< Tag > tagHandles;
2033  std::string separator( ":" );
2034  split_tag_names( tag_names, separator, tagNames );
2035 
2036  appData& data = context.appDatas[*pid];
2037 
2038  // set it on a subset of entities, based on type and length
2039  Range* ents_to_get = NULL;
2040 
2041  if( *ent_type == 0 ) // vertices
2042  {
2043  ents_to_get = &data.all_verts;
2044  }
2045  else if( *ent_type == 1 )
2046  {
2047  ents_to_get = &data.primary_elems;
2048  }
2049  int nents_to_get = (int)ents_to_get->size();
2050  int position = 0;
2051  for( size_t i = 0; i < tagNames.size(); i++ )
2052  {
2053  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
2054  {
2055  return moab::MB_FAILURE;
2056  } // tag not defined
2057 
2058  Tag tag = data.tagMap[tagNames[i]];
2059 
2060  int tagLength = 0;
2061  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
2062 
2063  DataType dtype;
2064  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
2065 
2066  if( dtype != MB_TYPE_DOUBLE )
2067  {
2068  return moab::MB_FAILURE;
2069  }
2070 
2071  if( position + nents_to_get * tagLength > *num_tag_storage_length )
2072  return moab::MB_FAILURE; // too many entity values to get
2073 
2074  rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
2075  position = position + nents_to_get * tagLength;
2076  }
2077 
2078  return moab::MB_SUCCESS; // no error
2079 }
2080 
2081 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
2082 {
2083 #ifdef MOAB_HAVE_MPI
2084  appData& data = context.appDatas[*pid];
2085  Range ent_exchange;
2086  std::vector< Tag > tags;
2087 
2088  for( int i = 0; i < *num_tag; i++ )
2089  {
2090  if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
2091  {
2092  return moab::MB_FAILURE;
2093  } // error in tag index
2094 
2095  tags.push_back( data.tagList[tag_indices[i]] );
2096  }
2097 
2098  if( *ent_type == 0 )
2099  {
2100  ent_exchange = data.all_verts;
2101  }
2102  else if( *ent_type == 1 )
2103  {
2104  ent_exchange = data.primary_elems;
2105  }
2106  else
2107  {
2108  return moab::MB_FAILURE;
2109  } // unexpected type
2110 
2111  ParallelComm* pco = context.pcomms[*pid];
2112 
2113  ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
2114 
2115 #else
2116  /* do nothing if serial */
2117  // just silence the warning
2118  // do not call sync tags in serial!
2119  int k = *pid + *num_tag + *tag_indices + *ent_type;
2120  k++;
2121 #endif
2122 
2123  return moab::MB_SUCCESS;
2124 }
2125 
2126 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
2127 {
2128 #ifdef MOAB_HAVE_MPI
2129  appData& data = context.appDatas[*pid];
2130  Range ent_exchange;
2131 
2132  if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
2133  {
2134  return moab::MB_FAILURE;
2135  } // error in tag index
2136 
2137  Tag tagh = data.tagList[*tag_index];
2138 
2139  if( *ent_type == 0 )
2140  {
2141  ent_exchange = data.all_verts;
2142  }
2143  else if( *ent_type == 1 )
2144  {
2145  ent_exchange = data.primary_elems;
2146  }
2147  else
2148  {
2149  return moab::MB_FAILURE;
2150  } // unexpected type
2151 
2152  ParallelComm* pco = context.pcomms[*pid];
2153  // we could do different MPI_Op; do not bother now, we will call from fortran
2154  ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
2155 
2156 #else
2157  /* do nothing if serial */
2158  // just silence the warning
2159  // do not call sync tags in serial!
2160  int k = *pid + *tag_index + *ent_type;
2161  k++; // just do junk, to avoid complaints
2162 #endif
2163  return moab::MB_SUCCESS;
2164 }
2165 
2167  iMOAB_LocalID* local_index,
2168  int* num_adjacent_elements,
2169  iMOAB_LocalID* adjacent_element_IDs )
2170 {
2171  ErrorCode rval;
2172 
2173  // one neighbor for each subentity of dimension-1
2174  MeshTopoUtil mtu( context.MBI );
2175  appData& data = context.appDatas[*pid];
2176  EntityHandle eh = data.primary_elems[*local_index];
2177  Range adjs;
2178  rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
2179 
2180  if( *num_adjacent_elements < (int)adjs.size() )
2181  {
2182  return moab::MB_FAILURE;
2183  } // not dimensioned correctly
2184 
2185  *num_adjacent_elements = (int)adjs.size();
2186 
2187  for( int i = 0; i < *num_adjacent_elements; i++ )
2188  {
2189  adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
2190  }
2191 
2192  return moab::MB_SUCCESS;
2193 }
2194 
2195 #if 0
2196 
2197 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
2198 {
2199  return moab::MB_SUCCESS;
2200 }
2201 
2202 #endif
2203 
2204 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
2205 {
2206  ErrorCode rval;
2207  appData& data = context.appDatas[*pid];
2208 
2209  if( !data.local_verts.empty() ) // we should have no vertices in the app
2210  {
2211  return moab::MB_FAILURE;
2212  }
2213 
2214  int nverts = *coords_len / *dim;
2215 
2216  rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
2217 
2218  rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
2219 
2220  // also add the vertices to the all_verts range
2221  data.all_verts.merge( data.local_verts );
2222  return moab::MB_SUCCESS;
2223 }
2224 
2226  int* num_elem,
2227  int* type,
2228  int* num_nodes_per_element,
2229  int* connectivity,
2230  int* block_ID )
2231 {
2232  // Create elements
2233  appData& data = context.appDatas[*pid];
2234 
2235  ReadUtilIface* read_iface;
2236  ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
2237 
2238  EntityType mbtype = (EntityType)( *type );
2239  EntityHandle actual_start_handle;
2240  EntityHandle* array = NULL;
2241  rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
2242 
2243  // fill up with actual connectivity from input; assume the vertices are in order, and start
2244  // vertex is the first in the current data vertex range
2245  EntityHandle firstVertex = data.local_verts[0];
2246 
2247  for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2248  {
2249  array[j] = connectivity[j] + firstVertex - 1;
2250  } // assumes connectivity uses 1 based array (from fortran, mostly)
2251 
2252  Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2253 
2254  rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
2255 
2256  data.primary_elems.merge( new_elems );
2257 
2258  // add to adjacency
2259  rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
2260  // organize all new elements in block, with the given block ID; if the block set is not
2261  // existing, create a new mesh set;
2262  Range sets;
2263  int set_no = *block_ID;
2264  const void* setno_ptr = &set_no;
2266  sets );
2267  EntityHandle block_set;
2268 
2269  if( MB_FAILURE == rval || sets.empty() )
2270  {
2271  // create a new set, with this block ID
2272  rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
2273 
2274  rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
2275 
2276  // add the material set to file set
2277  rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
2278  }
2279  else
2280  {
2281  block_set = sets[0];
2282  } // first set is the one we want
2283 
2284  /// add the new ents to the clock set
2285  rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
2286 
2287  return moab::MB_SUCCESS;
2288 }
2289 
2290 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
2291 {
2292  appData& data = context.appDatas[*pid];
2293  data.num_global_vertices = *num_global_verts;
2294  data.num_global_elements = *num_global_elems;
2295  return moab::MB_SUCCESS;
2296 }
2297 
2298 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
2299 {
2300  appData& data = context.appDatas[*pid];
2301  if( NULL != num_global_verts )
2302  {
2303  *num_global_verts = data.num_global_vertices;
2304  }
2305  if( NULL != num_global_elems )
2306  {
2307  *num_global_elems = data.num_global_elements;
2308  }
2309 
2310  return moab::MB_SUCCESS;
2311 }
2312 
2313 #ifdef MOAB_HAVE_MPI
2314 
2315 // this makes sense only for parallel runs
2316 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
2317 {
2318  appData& data = context.appDatas[*pid];
2319  ParallelComm* pco = context.pcomms[*pid];
2320  EntityHandle cset = data.file_set;
2321  int dum_id = 0;
2322  ErrorCode rval;
2323  if( data.primary_elems.empty() )
2324  {
2325  // skip actual resolve, assume vertices are distributed already ,
2326  // no need to share them
2327  }
2328  else
2329  {
2330  // create an integer tag for resolving ; maybe it can be a long tag in the future
2331  // (more than 2 B vertices;)
2332 
2333  Tag stag;
2334  rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
2335  &dum_id );MB_CHK_ERR( rval );
2336 
2337  if( *num_verts > (int)data.local_verts.size() )
2338  {
2339  return moab::MB_FAILURE;
2340  } // we are not setting the size
2341 
2342  rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval ); // assumes integer tag
2343 
2344  rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
2345 
2346  rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
2347  }
2348  // provide partition tag equal to rank
2349  Tag part_tag;
2350  dum_id = -1;
2351  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2352  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2353 
2354  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2355  {
2356  std::cout << " can't get par part tag.\n";
2357  return moab::MB_FAILURE;
2358  }
2359 
2360  int rank = pco->rank();
2361  rval = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
2362 
2363  return moab::MB_SUCCESS;
2364 }
2365 
2366 // this assumes that this was not called before
2367 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
2368 {
2369  ErrorCode rval;
2370 
2371  // verify we have valid ghost layers input specified. If invalid, exit quick.
2372  if( *num_ghost_layers <= 0 )
2373  {
2374  return moab::MB_SUCCESS;
2375  } // nothing to do
2376 
2377  appData& data = context.appDatas[*pid];
2378  ParallelComm* pco = context.pcomms[*pid];
2379 
2380  int addl_ents =
2381  0; // maybe we should be passing this too; most of the time we do not need additional ents collective call
2382  rval =
2383  pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, *num_ghost_layers, addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2384 
2385  // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
2386  return iMOAB_UpdateMeshInfo( pid );
2387 }
2388 
2389 ErrCode iMOAB_SendMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* receivingGroup, int* rcompid, int* method )
2390 {
2391  assert( join != nullptr );
2392  assert( receivingGroup != nullptr );
2393  assert( rcompid != nullptr );
2394 
2395  ErrorCode rval;
2396  int ierr;
2397  appData& data = context.appDatas[*pid];
2398  ParallelComm* pco = context.pcomms[*pid];
2399 
2400  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2401  MPI_Group recvGroup =
2402  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
2403  MPI_Comm sender = pco->comm(); // the sender comm is obtained from parallel comm in moab; no need to pass it along
2404  // first see what are the processors in each group; get the sender group too, from the sender communicator
2405  MPI_Group senderGroup;
2406  ierr = MPI_Comm_group( sender, &senderGroup );
2407  if( ierr != 0 ) return moab::MB_FAILURE;
2408 
2409  // instantiate the par comm graph
2410  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2411  // int coid2)
2412  ParCommGraph* cgraph =
2413  new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
2414  // we should search if we have another pcomm with the same comp ids in the list already
2415  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2416  context.appDatas[*pid].pgraph[*rcompid] = cgraph;
2417 
2418  int sender_rank = -1;
2419  MPI_Comm_rank( sender, &sender_rank );
2420 
2421  // decide how to distribute elements to each processor
2422  // now, get the entities on local processor, and pack them into a buffer for various processors
2423  // we will do trivial partition: first get the total number of elements from "sender"
2424  std::vector< int > number_elems_per_part;
2425  // how to distribute local elements to receiving tasks?
2426  // trivial partition: compute first the total number of elements need to be sent
2427  Range owned = context.appDatas[*pid].owned_elems;
2428  if( owned.size() == 0 )
2429  {
2430  // must be vertices that we want to send then
2431  owned = context.appDatas[*pid].local_verts;
2432  // we should have some vertices here
2433  }
2434 
2435  if( *method == 0 ) // trivial partitioning, old method
2436  {
2437  int local_owned_elem = (int)owned.size();
2438  int size = pco->size();
2439  int rank = pco->rank();
2440  number_elems_per_part.resize( size ); //
2441  number_elems_per_part[rank] = local_owned_elem;
2442 #if( MPI_VERSION >= 2 )
2443  // Use "in place" option
2444  ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2445 #else
2446  {
2447  std::vector< int > all_tmp( size );
2448  ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2449  number_elems_per_part = all_tmp;
2450  }
2451 #endif
2452 
2453  if( ierr != 0 )
2454  {
2455  return moab::MB_FAILURE;
2456  }
2457 
2458  // every sender computes the trivial partition, it is cheap, and we need to send it anyway
2459  // to each sender
2460  rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
2461 
2462  rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
2463  }
2464  else // *method != 0, so it is either graph or geometric, parallel
2465  {
2466  // owned are the primary elements on this app
2467  rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
2468 
2469  // basically, send the graph to the receiver side, with unblocking send
2470  rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
2471  }
2472  // pco is needed to pack, not for communication
2473  rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
2474 
2475  // mark for deletion
2476  MPI_Group_free( &senderGroup );
2477  return moab::MB_SUCCESS;
2478 }
2479 
2480 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* sendingGroup, int* scompid )
2481 {
2482  assert( join != nullptr );
2483  assert( sendingGroup != nullptr );
2484  assert( scompid != nullptr );
2485 
2486  ErrorCode rval;
2487  appData& data = context.appDatas[*pid];
2488  ParallelComm* pco = context.pcomms[*pid];
2489  MPI_Comm receive = pco->comm();
2490  EntityHandle local_set = data.file_set;
2491 
2492  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2493  MPI_Group sendGroup =
2494  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
2495 
2496  // first see what are the processors in each group; get the sender group too, from the sender
2497  // communicator
2498  MPI_Group receiverGroup;
2499  int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
2500 
2501  // instantiate the par comm graph
2502  ParCommGraph* cgraph =
2503  new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
2504  // TODO we should search if we have another pcomm with the same comp ids in the list already
2505  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2506  context.appDatas[*pid].pgraph[*scompid] = cgraph;
2507 
2508  int receiver_rank = -1;
2509  MPI_Comm_rank( receive, &receiver_rank );
2510 
2511  // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
2512  // knows what data to expect
2513  std::vector< int > pack_array;
2514  rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
2515 
2516  // senders across for the current receiver
2517  int current_receiver = cgraph->receiver( receiver_rank );
2518 
2519  std::vector< int > senders_local;
2520  size_t n = 0;
2521 
2522  while( n < pack_array.size() )
2523  {
2524  if( current_receiver == pack_array[n] )
2525  {
2526  for( int j = 0; j < pack_array[n + 1]; j++ )
2527  {
2528  senders_local.push_back( pack_array[n + 2 + j] );
2529  }
2530 
2531  break;
2532  }
2533 
2534  n = n + 2 + pack_array[n + 1];
2535  }
2536 
2537 #ifdef VERBOSE
2538  std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
2539  << senders_local.size() << " tasks: ";
2540 
2541  for( int k = 0; k < (int)senders_local.size(); k++ )
2542  {
2543  std::cout << " " << senders_local[k];
2544  }
2545 
2546  std::cout << "\n";
2547 #endif
2548 
2549  if( senders_local.empty() )
2550  {
2551  std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
2552  }
2553  rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
2554 
2555  // after we are done, we could merge vertices that come from different senders, but
2556  // have the same global id
2557  Tag idtag;
2558  rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
2559 
2560  // data.point_cloud = false;
2561  Range local_ents;
2562  rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
2563 
2564  // do not do merge if point cloud
2565  if( !local_ents.all_of_type( MBVERTEX ) )
2566  {
2567  if( (int)senders_local.size() >= 2 ) // need to remove duplicate vertices
2568  // that might come from different senders
2569  {
2570 
2571  Range local_verts = local_ents.subset_by_type( MBVERTEX );
2572  Range local_elems = subtract( local_ents, local_verts );
2573 
2574  // remove from local set the vertices
2575  rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
2576 
2577 #ifdef VERBOSE
2578  std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
2579 #endif
2580  MergeMesh mm( context.MBI );
2581 
2582  rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
2583 
2584  Range new_verts; // local elems are local entities without vertices
2585  rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
2586 
2587 #ifdef VERBOSE
2588  std::cout << "after merging: new verts: " << new_verts.size() << "\n";
2589 #endif
2590  rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
2591  }
2592  }
2593  else
2594  data.point_cloud = true;
2595 
2596  if( !data.point_cloud )
2597  {
2598  // still need to resolve shared entities (in this case, vertices )
2599  rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
2600  }
2601  else
2602  {
2603  // if partition tag exists, set it to current rank; just to make it visible in VisIt
2604  Tag densePartTag;
2605  rval = context.MBI->tag_get_handle( "partition", densePartTag );
2606  if( NULL != densePartTag && MB_SUCCESS == rval )
2607  {
2608  Range local_verts;
2609  rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
2610  std::vector< int > vals;
2611  int rank = pco->rank();
2612  vals.resize( local_verts.size(), rank );
2613  rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
2614  }
2615  }
2616  // set the parallel partition tag
2617  Tag part_tag;
2618  int dum_id = -1;
2619  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2620  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2621 
2622  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2623  {
2624  std::cout << " can't get par part tag.\n";
2625  return moab::MB_FAILURE;
2626  }
2627 
2628  int rank = pco->rank();
2629  rval = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
2630 
2631  // make sure that the GLOBAL_ID is defined as a tag
2632  int tagtype = 0; // dense, integer
2633  int numco = 1; // size
2634  int tagindex; // not used
2635  rval = iMOAB_DefineTagStorage( pid, "GLOBAL_ID", &tagtype, &numco, &tagindex );MB_CHK_ERR( rval );
2636  // populate the mesh with current data info
2637  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
2638 
2639  // mark for deletion
2640  MPI_Group_free( &receiverGroup );
2641 
2642  return moab::MB_SUCCESS;
2643 }
2644 
2645 ErrCode iMOAB_SendElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
2646 {
2647  appData& data = context.appDatas[*pid];
2648  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2649  if( mt == data.pgraph.end() )
2650  {
2651  return moab::MB_FAILURE;
2652  }
2653  ParCommGraph* cgraph = mt->second;
2654  ParallelComm* pco = context.pcomms[*pid];
2655  Range owned = data.owned_elems;
2656  ErrorCode rval;
2657  EntityHandle cover_set;
2658 
2659  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2660  if( data.point_cloud )
2661  {
2662  owned = data.local_verts;
2663  }
2664 
2665  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2666  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2667  // from ints remapper
2668 #ifdef MOAB_HAVE_TEMPESTREMAP
2669  if( data.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2670  {
2671  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2672  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2673  // should still have only quads ?
2674  }
2675 #else
2676  // now, in case we are sending from intx between ocn and atm, we involve coverage set
2677  // how do I know if this receiver already participated in an intersection driven by coupler?
2678  // also, what if this was the "source" mesh in intx?
2679  // in that case, the elements might have been instantiated in the coverage set locally, the
2680  // "owned" range can be different the elements are now in tempestRemap coverage_set
2681  cover_set = cgraph->get_cover_set(); // this will be non null only for intx app ?
2682 
2683  if( 0 != cover_set )
2684  {
2685  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2686  }
2687 #endif
2688 
2689  std::string tag_name( tag_storage_name );
2690 
2691  // basically, we assume everything is defined already on the tag,
2692  // and we can get the tags just by its name
2693  // we assume that there are separators ":" between the tag names
2694  std::vector< std::string > tagNames;
2695  std::vector< Tag > tagHandles;
2696  std::string separator( ":" );
2697  split_tag_names( tag_name, separator, tagNames );
2698  for( size_t i = 0; i < tagNames.size(); i++ )
2699  {
2700  Tag tagHandle;
2701  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2702  if( MB_SUCCESS != rval || NULL == tagHandle )
2703  {
2704  std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
2705  }
2706  tagHandles.push_back( tagHandle );
2707  }
2708 
2709  // pco is needed to pack, and for moab instance, not for communication!
2710  // still use nonblocking communication, over the joint comm
2711  rval = cgraph->send_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
2712  // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
2713 
2714  return moab::MB_SUCCESS;
2715 }
2716 
2717 ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
2718 {
2719  appData& data = context.appDatas[*pid];
2720 
2721  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2722  if( mt == data.pgraph.end() )
2723  {
2724  return moab::MB_FAILURE;
2725  }
2726  ParCommGraph* cgraph = mt->second;
2727 
2728  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2729  ParallelComm* pco = context.pcomms[*pid];
2730  Range owned = data.owned_elems;
2731 
2732  // how do I know if this receiver already participated in an intersection driven by coupler?
2733  // also, what if this was the "source" mesh in intx?
2734  // in that case, the elements might have been instantiated in the coverage set locally, the
2735  // "owned" range can be different the elements are now in tempestRemap coverage_set
2736  EntityHandle cover_set = cgraph->get_cover_set();
2737  ErrorCode rval;
2738  if( 0 != cover_set )
2739  {
2740  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2741  }
2742  if( data.point_cloud )
2743  {
2744  owned = data.local_verts;
2745  }
2746  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2747  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2748  // from ints remapper
2749 #ifdef MOAB_HAVE_TEMPESTREMAP
2750  if( data.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2751  {
2752  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2753  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2754  // should still have only quads ?
2755  }
2756 #endif
2757  /*
2758  * data_intx.remapper exists though only on the intersection application
2759  * how do we get from here ( we know the pid that receives, and the commgraph used by migrate
2760  * mesh )
2761  */
2762 
2763  std::string tag_name( tag_storage_name );
2764 
2765  // we assume that there are separators ";" between the tag names
2766  std::vector< std::string > tagNames;
2767  std::vector< Tag > tagHandles;
2768  std::string separator( ":" );
2769  split_tag_names( tag_name, separator, tagNames );
2770  for( size_t i = 0; i < tagNames.size(); i++ )
2771  {
2772  Tag tagHandle;
2773  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2774  if( MB_SUCCESS != rval || NULL == tagHandle )
2775  {
2776  std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
2777  }
2778  tagHandles.push_back( tagHandle );
2779  }
2780 
2781 #ifdef VERBOSE
2782  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
2783  << " and file set = " << ( data.file_set ) << "\n";
2784 #endif
2785  // pco is needed to pack, and for moab instance, not for communication!
2786  // still use nonblocking communication
2787  rval = cgraph->receive_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
2788 
2789 #ifdef VERBOSE
2790  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
2791 #endif
2792 
2793  return moab::MB_SUCCESS;
2794 }
2795 
2796 ErrCode iMOAB_FreeSenderBuffers( iMOAB_AppID pid, int* context_id )
2797 {
2798  // need first to find the pgraph that holds the information we need
2799  // this will be called on sender side only
2800  appData& data = context.appDatas[*pid];
2801  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2802  if( mt == data.pgraph.end() ) return moab::MB_FAILURE; // error
2803 
2804  mt->second->release_send_buffers();
2805  return moab::MB_SUCCESS;
2806 }
2807 
2808 /**
2809 \brief compute a comm graph between 2 moab apps, based on ID matching
2810 <B>Operations:</B> Collective
2811 */
2812 //#define VERBOSE
2813 ErrCode iMOAB_ComputeCommGraph( iMOAB_AppID pid1,
2814  iMOAB_AppID pid2,
2815  MPI_Comm* join,
2816  MPI_Group* group1,
2817  MPI_Group* group2,
2818  int* type1,
2819  int* type2,
2820  int* comp1,
2821  int* comp2 )
2822 {
2823  assert( join );
2824  assert( group1 );
2825  assert( group2 );
2826  ErrorCode rval = MB_SUCCESS;
2827  int localRank = 0, numProcs = 1;
2828 
2829  bool isFortran = false;
2830  if( *pid1 >= 0 ) isFortran = isFortran || context.appDatas[*pid1].is_fortran;
2831  if( *pid2 >= 0 ) isFortran = isFortran || context.appDatas[*pid2].is_fortran;
2832 
2833  MPI_Comm global = ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2834  MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
2835  MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
2836 
2837  MPI_Comm_rank( global, &localRank );
2838  MPI_Comm_size( global, &numProcs );
2839  // instantiate the par comm graph
2840 
2841  // we should search if we have another pcomm with the same comp ids in the list already
2842  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2843  bool already_exists = false;
2844  if( *pid1 >= 0 )
2845  {
2846  appData& data = context.appDatas[*pid1];
2847  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp2 );
2848  if( mt != data.pgraph.end() ) already_exists = true;
2849  }
2850  if( *pid2 >= 0 )
2851  {
2852  appData& data = context.appDatas[*pid2];
2853  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp1 );
2854  if( mt != data.pgraph.end() ) already_exists = true;
2855  }
2856  // nothing to do if it already exists
2857  if( already_exists )
2858  {
2859 #ifdef VERBOSE
2860  if( !localRank )
2861  std::cout << " parcomgraph already existing between components " << *comp1 << " and " << *comp2
2862  << ". Do not compute again\n";
2863 #endif
2864  return moab::MB_SUCCESS;
2865  }
2866 
2867  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2868  // int coid2)
2869  ParCommGraph* cgraph = NULL;
2870  if( *pid1 >= 0 ) cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2871  ParCommGraph* cgraph_rev = NULL;
2872  if( *pid2 >= 0 ) cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2873 
2874  if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
2875  if( *pid2 >= 0 ) context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev; // from 2 to 1
2876  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
2877  // model on the joint comm
2878  TupleList TLcomp1;
2879  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2880  TupleList TLcomp2;
2881  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2882  // will push_back a new tuple, if needed
2883 
2884  TLcomp1.enableWriteAccess();
2885 
2886  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
2887  Tag gdsTag;
2888  // find the values on first cell
2889  int lenTagType1 = 1;
2890  if( 1 == *type1 || 1 == *type2 )
2891  {
2892  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
2893  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
2894  }
2895  Tag tagType2 = context.MBI->globalId_tag();
2896 
2897  std::vector< int > valuesComp1;
2898  // populate first tuple
2899  if( *pid1 >= 0 )
2900  {
2901  appData& data1 = context.appDatas[*pid1];
2902  EntityHandle fset1 = data1.file_set;
2903  // in case of tempest remap, get the coverage set
2904 #ifdef MOAB_HAVE_TEMPESTREMAP
2905  if( data1.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2906  {
2907  fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2908  // should still have only quads ?
2909  }
2910 #endif
2911  Range ents_of_interest;
2912  if( *type1 == 1 )
2913  {
2914  assert( gdsTag );
2915  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2916  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
2917  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
2918  }
2919  else if( *type1 == 2 )
2920  {
2921  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2922  valuesComp1.resize( ents_of_interest.size() );
2923  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2924  }
2925  else if( *type1 == 3 ) // for FV meshes, just get the global id of cell
2926  {
2927  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
2928  valuesComp1.resize( ents_of_interest.size() );
2929  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2930  }
2931  else
2932  {
2933  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
2934  }
2935  // now fill the tuple list with info and markers
2936  // because we will send only the ids, order and compress the list
2937  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2938  TLcomp1.resize( uniq.size() );
2939  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2940  {
2941  // to proc, marker, element local index, index in el
2942  int marker = *sit;
2943  int to_proc = marker % numProcs;
2944  int n = TLcomp1.get_n();
2945  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
2946  TLcomp1.vi_wr[2 * n + 1] = marker;
2947  TLcomp1.inc_n();
2948  }
2949  }
2950 
2951  ProcConfig pc( global ); // proc config does the crystal router
2952  pc.crystal_router()->gs_transfer( 1, TLcomp1,
2953  0 ); // communication towards joint tasks, with markers
2954  // sort by value (key 1)
2955 #ifdef VERBOSE
2956  std::stringstream ff1;
2957  ff1 << "TLcomp1_" << localRank << ".txt";
2958  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2959 #endif
2960  moab::TupleList::buffer sort_buffer;
2961  sort_buffer.buffer_init( TLcomp1.get_n() );
2962  TLcomp1.sort( 1, &sort_buffer );
2963  sort_buffer.reset();
2964 #ifdef VERBOSE
2965  // after sorting
2966  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2967 #endif
2968  // do the same, for the other component, number2, with type2
2969  // start copy
2970  TLcomp2.enableWriteAccess();
2971  // populate second tuple
2972  std::vector< int > valuesComp2;
2973  if( *pid2 >= 0 )
2974  {
2975  appData& data2 = context.appDatas[*pid2];
2976  EntityHandle fset2 = data2.file_set;
2977  // in case of tempest remap, get the coverage set
2978 #ifdef MOAB_HAVE_TEMPESTREMAP
2979  if( data2.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2980  {
2981  fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2982  // should still have only quads ?
2983  }
2984 #endif
2985 
2986  Range ents_of_interest;
2987  if( *type2 == 1 )
2988  {
2989  assert( gdsTag );
2990  rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2991  valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
2992  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
2993  }
2994  else if( *type2 == 2 )
2995  {
2996  rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2997  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
2998  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
2999  }
3000  else if( *type2 == 3 )
3001  {
3002  rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
3003  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
3004  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
3005  }
3006  else
3007  {
3008  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2
3009  }
3010  // now fill the tuple list with info and markers
3011  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3012  TLcomp2.resize( uniq.size() );
3013  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3014  {
3015  // to proc, marker, element local index, index in el
3016  int marker = *sit;
3017  int to_proc = marker % numProcs;
3018  int n = TLcomp2.get_n();
3019  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
3020  TLcomp2.vi_wr[2 * n + 1] = marker;
3021  TLcomp2.inc_n();
3022  }
3023  }
3024  pc.crystal_router()->gs_transfer( 1, TLcomp2,
3025  0 ); // communication towards joint tasks, with markers
3026  // sort by value (key 1)
3027 #ifdef VERBOSE
3028  std::stringstream ff2;
3029  ff2 << "TLcomp2_" << localRank << ".txt";
3030  TLcomp2.print_to_file( ff2.str().c_str() );
3031 #endif
3032  sort_buffer.buffer_reserve( TLcomp2.get_n() );
3033  TLcomp2.sort( 1, &sort_buffer );
3034  sort_buffer.reset();
3035  // end copy
3036 #ifdef VERBOSE
3037  TLcomp2.print_to_file( ff2.str().c_str() );
3038 #endif
3039  // need to send back the info, from the rendezvous point, for each of the values
3040  /* so go over each value, on local process in joint communicator
3041 
3042  now have to send back the info needed for communication;
3043  loop in in sync over both TLComp1 and TLComp2, in local process;
3044  So, build new tuple lists, to send synchronous communication
3045  populate them at the same time, based on marker, that is indexed
3046  */
3047 
3048  TupleList TLBackToComp1;
3049  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
3050  TLBackToComp1.enableWriteAccess();
3051 
3052  TupleList TLBackToComp2;
3053  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
3054  TLBackToComp2.enableWriteAccess();
3055 
3056  int n1 = TLcomp1.get_n();
3057  int n2 = TLcomp2.get_n();
3058 
3059  int indexInTLComp1 = 0;
3060  int indexInTLComp2 = 0; // advance both, according to the marker
3061  if( n1 > 0 && n2 > 0 )
3062  {
3063 
3064  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
3065  {
3066  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3067  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3068  if( currentValue1 < currentValue2 )
3069  {
3070  // we have a big problem; basically, we are saying that
3071  // dof currentValue is on one model and not on the other
3072  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
3073  indexInTLComp1++;
3074  continue;
3075  }
3076  if( currentValue1 > currentValue2 )
3077  {
3078  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
3079  indexInTLComp2++;
3080  continue;
3081  }
3082  int size1 = 1;
3083  int size2 = 1;
3084  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3085  size1++;
3086  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3087  size2++;
3088  // must be found in both lists, find the start and end indices
3089  for( int i1 = 0; i1 < size1; i1++ )
3090  {
3091  for( int i2 = 0; i2 < size2; i2++ )
3092  {
3093  // send the info back to components
3094  int n = TLBackToComp1.get_n();
3095  TLBackToComp1.reserve();
3096  TLBackToComp1.vi_wr[3 * n] =
3097  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
3098  // came from, info from comp2
3099  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3100  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
3101  n = TLBackToComp2.get_n();
3102  TLBackToComp2.reserve();
3103  TLBackToComp2.vi_wr[3 * n] =
3104  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
3105  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3106  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
3107  // what if there are repeated markers in TLcomp2? increase just index2
3108  }
3109  }
3110  indexInTLComp1 += size1;
3111  indexInTLComp2 += size2;
3112  }
3113  }
3114  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
3115  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3116 
3117  if( *pid1 >= 0 )
3118  {
3119  // we are on original comp 1 tasks
3120  // before ordering
3121  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3122  // processors it communicates with
3123 #ifdef VERBOSE
3124  std::stringstream f1;
3125  f1 << "TLBack1_" << localRank << ".txt";
3126  TLBackToComp1.print_to_file( f1.str().c_str() );
3127 #endif
3128  sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
3129  TLBackToComp1.sort( 1, &sort_buffer );
3130  sort_buffer.reset();
3131 #ifdef VERBOSE
3132  TLBackToComp1.print_to_file( f1.str().c_str() );
3133 #endif
3134  // so we are now on pid1, we know now each marker were it has to go
3135  // add a new method to ParCommGraph, to set up the involved_IDs_map
3136  cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
3137  }
3138  if( *pid2 >= 0 )
3139  {
3140  // we are on original comp 1 tasks
3141  // before ordering
3142  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3143  // processors it communicates with
3144 #ifdef VERBOSE
3145  std::stringstream f2;
3146  f2 << "TLBack2_" << localRank << ".txt";
3147  TLBackToComp2.print_to_file( f2.str().c_str() );
3148 #endif
3149  sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
3150  TLBackToComp2.sort( 2, &sort_buffer );
3151  sort_buffer.reset();
3152 #ifdef VERBOSE
3153  TLBackToComp2.print_to_file( f2.str().c_str() );
3154 #endif
3155  cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
3156  //
3157  }
3158  return moab::MB_SUCCESS;
3159 }
3160 
3161 //#undef VERBOSE
3162 
3163 ErrCode iMOAB_MergeVertices( iMOAB_AppID pid )
3164 {
3165  appData& data = context.appDatas[*pid];
3166  ParallelComm* pco = context.pcomms[*pid];
3167  // collapse vertices and transform cells into triangles/quads /polys
3168  // tags we care about: area, frac, global id
3169  std::vector< Tag > tagsList;
3170  Tag tag;
3171  ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
3172  if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE; // fatal error, abort
3173  tagsList.push_back( tag );
3174  rval = context.MBI->tag_get_handle( "area", tag );
3175  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3176  rval = context.MBI->tag_get_handle( "frac", tag );
3177  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3178  double tol = 1.0e-9;
3179  rval = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
3180 
3181  // clean material sets of cells that were deleted
3183  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
3184 
3185  if( !data.mat_sets.empty() )
3186  {
3187  EntityHandle matSet = data.mat_sets[0];
3188  Range elems;
3189  rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
3190  rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
3191  // put back only cells from data.file_set
3192  elems.clear();
3193  rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
3194  rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
3195  }
3196  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
3197 
3198  ParallelMergeMesh pmm( pco, tol );
3199  rval = pmm.merge( data.file_set,
3200  /* do not do local merge*/ false,
3201  /* 2d cells*/ 2 );MB_CHK_ERR( rval );
3202 
3203  // assign global ids only for vertices, cells have them fine
3204  rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
3205 
3206  // set the partition tag on the file set
3207  Tag part_tag;
3208  int dum_id = -1;
3209  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
3210  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
3211 
3212  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
3213  {
3214  std::cout << " can't get par part tag.\n";
3215  return moab::MB_FAILURE;
3216  }
3217 
3218  int rank = pco->rank();
3219  rval = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
3220 
3221  return moab::MB_SUCCESS;
3222 }
3223 
3224 #ifdef MOAB_HAVE_TEMPESTREMAP
3225 
3226 // this call must be collective on the joint communicator
3227 // intersection tasks on coupler will need to send to the components tasks the list of
3228 // id elements that are relevant: they intersected some of the target elements (which are not needed
3229 // here)
3230 // in the intersection
3231 ErrCode iMOAB_CoverageGraph( MPI_Comm* join,
3232  iMOAB_AppID pid_src,
3233  iMOAB_AppID pid_migr,
3234  iMOAB_AppID pid_intx,
3235  int* src_id,
3236  int* migr_id,
3237  int* context_id )
3238 {
3239  // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this
3240  // exchange
3241  ErrorCode rval;
3242  std::vector< int > srcSenders;
3243  std::vector< int > receivers;
3244  ParCommGraph* sendGraph = NULL;
3245  int ierr;
3246  int default_context_id = -1;
3247  bool is_fortran_context = false;
3248 
3249  // First, find the original graph between PE sets
3250  // And based on this one, we will build the newly modified one for coverage
3251  if( *pid_src >= 0 )
3252  {
3253  default_context_id = *migr_id; // the other one
3254  assert( context.appDatas[*pid_src].global_id == *src_id );
3255  is_fortran_context = context.appDatas[*pid_src].is_fortran || is_fortran_context;
3256  sendGraph = context.appDatas[*pid_src].pgraph[default_context_id]; // maybe check if it does not exist
3257 
3258  // report the sender and receiver tasks in the joint comm
3259  srcSenders = sendGraph->senders();
3260  receivers = sendGraph->receivers();
3261 #ifdef VERBOSE
3262  std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
3263 #endif
3264  }
3265 
3266  ParCommGraph* recvGraph = NULL; // will be non null on receiver tasks (intx tasks)
3267  if( *pid_migr >= 0 )
3268  {
3269  is_fortran_context = context.appDatas[*pid_migr].is_fortran || is_fortran_context;
3270  // find the original one
3271  default_context_id = *src_id;
3272  assert( context.appDatas[*pid_migr].global_id == *migr_id );
3273  recvGraph = context.appDatas[*pid_migr].pgraph[default_context_id];
3274  // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
3275  srcSenders = recvGraph->senders();
3276  receivers = recvGraph->receivers();
3277 #ifdef VERBOSE
3278  std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
3279 #endif
3280  }
3281 
3282  if( *pid_intx >= 0 ) is_fortran_context = context.appDatas[*pid_intx].is_fortran || is_fortran_context;
3283 
3284  // loop over pid_intx elements, to see what original processors in joint comm have sent the
3285  // coverage mesh;
3286  // If we are on intx tasks, send coverage info towards original component tasks,
3287  // about needed cells
3288  TupleList TLcovIDs;
3289  TLcovIDs.initialize( 2, 0, 0, 0, 0 ); // to proc, GLOBAL ID; estimate about 100 IDs to be sent
3290  // will push_back a new tuple, if needed
3291  TLcovIDs.enableWriteAccess();
3292  // the crystal router will send ID cell to the original source, on the component task
3293  // if we are on intx tasks, loop over all intx elements and
3294 
3295  MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
3296  int currentRankInJointComm = -1;
3297  ierr = MPI_Comm_rank( global, &currentRankInJointComm );CHK_MPI_ERR( ierr );
3298 
3299  // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we
3300  // need to send information towards component tasks
3301  if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
3302  receivers.end() ) // we are on receivers tasks, we can request intx info
3303  {
3304  // find the pcomm for the intx pid
3305  if( *pid_intx >= (int)context.appDatas.size() ) return moab::MB_FAILURE;
3306 
3307  appData& dataIntx = context.appDatas[*pid_intx];
3308  Tag parentTag, orgSendProcTag;
3309 
3310  rval = context.MBI->tag_get_handle( "SourceParent", parentTag );MB_CHK_ERR( rval ); // global id of the blue, source element
3311  if( !parentTag ) return moab::MB_FAILURE; // fatal error, abort
3312 
3313  rval = context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag );MB_CHK_ERR( rval );
3314  if( !orgSendProcTag ) return moab::MB_FAILURE; // fatal error, abort
3315 
3316  // find the file set, red parents for intx cells, and put them in tuples
3317  EntityHandle intxSet = dataIntx.file_set;
3318  Range cells;
3319  // get all entities from the set, and look at their RedParent
3320  rval = context.MBI->get_entities_by_dimension( intxSet, 2, cells );MB_CHK_ERR( rval );
3321 
3322  std::map< int, std::set< int > > idsFromProcs; // send that info back to enhance parCommGraph cache
3323  for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
3324  {
3325  EntityHandle intx_cell = *it;
3326  int gidCell, origProc; // look at receivers
3327 
3328  rval = context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell );MB_CHK_ERR( rval );
3329  rval = context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc );MB_CHK_ERR( rval );
3330  // we have augmented the overlap set with ghost cells ; in that case, the
3331  // orig_sending_processor is not set so it will be -1;
3332  if( origProc < 0 ) continue;
3333 
3334  std::set< int >& setInts = idsFromProcs[origProc];
3335  setInts.insert( gidCell );
3336  }
3337 
3338  // if we have no intx cells, it means we are on point clouds; quick fix just use all cells
3339  // from coverage set
3340  // if( cells.empty() )
3341  {
3342  cells.clear();
3343  // get coverage set
3344  assert( *pid_intx >= 0 );
3345  appData& dataIntx = context.appDatas[*pid_intx];
3346  EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3347 
3348  // get all cells from coverage set
3349  Tag gidTag;
3350  rval = context.MBI->tag_get_handle( "GLOBAL_ID", gidTag );MB_CHK_ERR( rval );
3351  rval = context.MBI->get_entities_by_dimension( cover_set, 2, cells );MB_CHK_ERR( rval );
3352  // look at their orig_sending_processor
3353  for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
3354  {
3355  EntityHandle covCell = *it;
3356  int gidCell, origProc; // look at o
3357 
3358  rval = context.MBI->tag_get_data( gidTag, &covCell, 1, &gidCell );MB_CHK_ERR( rval );
3359  rval = context.MBI->tag_get_data( orgSendProcTag, &covCell, 1, &origProc );MB_CHK_ERR( rval );
3360  // we have augmented the overlap set with ghost cells ; in that case, the
3361  // orig_sending_processor is not set so it will be -1;
3362  if( origProc < 0 ) // it cannot < 0, I think
3363  continue;
3364  std::set< int >& setInts = idsFromProcs[origProc];
3365  setInts.insert( gidCell );
3366  }
3367  }
3368 
3369 #ifdef VERBOSE
3370  std::ofstream dbfile;
3371  std::stringstream outf;
3372  outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
3373  dbfile.open( outf.str().c_str() );
3374  dbfile << "Writing this to a file.\n";
3375 
3376  dbfile << " map size:" << idsFromProcs.size()
3377  << std::endl; // on the receiver side, these show how much data to receive
3378  // from the sender (how many ids, and how much tag data later; we need to size up the
3379  // receiver buffer) arrange in tuples , use map iterators to send the ids
3380  for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
3381  {
3382  std::set< int >& setIds = mt->second;
3383  dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
3384  int counter = 0;
3385  for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
3386  {
3387  int valueID = *st;
3388  dbfile << " " << valueID;
3389  counter++;
3390  if( counter % 10 == 0 ) dbfile << "\n";
3391  }
3392  dbfile << "\n";
3393  }
3394  dbfile.close();
3395 #endif
3396  if( NULL != recvGraph )
3397  {
3398  ParCommGraph* recvGraph1 = new ParCommGraph( *recvGraph ); // just copy
3399  recvGraph1->set_context_id( *context_id );
3400  recvGraph1->SetReceivingAfterCoverage( idsFromProcs );
3401  // this par comm graph will need to use the coverage set
3402  // so we are for sure on intx pes (the receiver is the coupler mesh)
3403  assert( *pid_intx >= 0 );
3404  appData& dataIntx = context.appDatas[*pid_intx];
3405  EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3406  recvGraph1->set_cover_set( cover_set );
3407  context.appDatas[*pid_migr].pgraph[*context_id] = recvGraph1; // possible memory leak if context_id is same
3408  }
3409  for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
3410  {
3411  int procToSendTo = mit->first;
3412  std::set< int >& idSet = mit->second;
3413  for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
3414  {
3415  int n = TLcovIDs.get_n();
3416  TLcovIDs.reserve();
3417  TLcovIDs.vi_wr[2 * n] = procToSendTo; // send to processor
3418  TLcovIDs.vi_wr[2 * n + 1] = *sit; // global id needs index in the local_verts range
3419  }
3420  }
3421  }
3422 
3423  ProcConfig pc( global ); // proc config does the crystal router
3424  pc.crystal_router()->gs_transfer( 1, TLcovIDs,
3425  0 ); // communication towards component tasks, with what ids are needed
3426  // for each task from receiver
3427 
3428  // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
3429  if( NULL != sendGraph )
3430  {
3431  // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each
3432  // receiver task
3433  ParCommGraph* sendGraph1 = new ParCommGraph( *sendGraph ); // just copy
3434  sendGraph1->set_context_id( *context_id );
3435  context.appDatas[*pid_src].pgraph[*context_id] = sendGraph1;
3436  rval = sendGraph1->settle_send_graph( TLcovIDs );MB_CHK_ERR( rval );
3437  }
3438  return moab::MB_SUCCESS; // success
3439 }
3440 
3441 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix )
3442 {
3443  assert( prefix && strlen( prefix ) );
3444 
3445  ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
3446  std::string prefix_str( prefix );
3447 
3448  if( NULL != cgraph )
3449  cgraph->dump_comm_information( prefix_str, *is_sender );
3450  else
3451  {
3452  std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
3453  << " context: " << *context_id << "\n";
3454  }
3455  return moab::MB_SUCCESS;
3456 }
3457 
3458 #endif // #ifdef MOAB_HAVE_TEMPESTREMAP
3459 
3460 #endif // #ifdef MOAB_HAVE_MPI
3461 
3462 #ifdef MOAB_HAVE_TEMPESTREMAP
3463 
3464 #ifdef MOAB_HAVE_NETCDF
3465 ErrCode iMOAB_LoadMappingWeightsFromFile(
3466  iMOAB_AppID pid_intersection,
3467  iMOAB_AppID pid_cpl,
3468  int* col_or_row,
3469  int* type,
3470  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
3471  const iMOAB_String remap_weights_filename )
3472 {
3473  ErrorCode rval;
3474  bool row_based_partition = true;
3475  if( *col_or_row == 1 ) row_based_partition = false; // do a column based partition;
3476 
3477  // get the local degrees of freedom, from the pid_cpl and type of mesh
3478 
3479  // Get the source and target data and pcomm objects
3480  appData& data_intx = context.appDatas[*pid_intersection];
3481  TempestMapAppData& tdata = data_intx.tempestData;
3482 
3483  // Get the handle to the remapper object
3484  if( tdata.remapper == NULL )
3485  {
3486  // Now allocate and initialize the remapper object
3487 #ifdef MOAB_HAVE_MPI
3488  ParallelComm* pco = context.pcomms[*pid_intersection];
3489  tdata.remapper = new moab::TempestRemapper( context.MBI, pco );
3490 #else
3491  tdata.remapper = new moab::TempestRemapper( context.MBI );
3492 #endif
3493  tdata.remapper->meshValidate = true;
3494  tdata.remapper->constructEdgeMap = true;
3495  // Do not create new filesets; Use the sets from our respective applications
3496  tdata.remapper->initialize( false );
3497  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
3498  }
3499 
3500  // Setup loading of weights onto TempestOnlineMap
3501  // Set the context for the remapping weights computation
3502  tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
3503 
3504  // Now allocate and initialize the remapper object
3505  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3506  assert( weightMap != NULL );
3507 
3508  if( *pid_cpl >= 0 ) // it means we are looking for how to distribute the degrees of freedom, new map reader
3509  {
3510  appData& data1 = context.appDatas[*pid_cpl];
3511  EntityHandle fset1 = data1.file_set; // this is source or target, depending on direction
3512 
3513  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
3514  Tag gdsTag;
3515 
3516  // find the values on first cell
3517  int lenTagType1 = 1;
3518  if( *type == 1 )
3519  {
3520  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
3521  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
3522  }
3523  Tag tagType2 = context.MBI->globalId_tag();
3524 
3525  std::vector< int > dofValues;
3526 
3527  // populate first tuple
3528  Range
3529  ents_of_interest; // will be filled with entities on coupler, from which we will get the DOFs, based on type
3530  int ndofPerEl = 1;
3531 
3532  if( *type == 1 )
3533  {
3534  assert( gdsTag );
3535  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3536  dofValues.resize( ents_of_interest.size() * lenTagType1 );
3537  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );
3538  ndofPerEl = lenTagType1;
3539  }
3540  else if( *type == 2 )
3541  {
3542  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3543  dofValues.resize( ents_of_interest.size() );
3544  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval ); // just global ids
3545  }
3546  else if( *type == 3 ) // for FV meshes, just get the global id of cell
3547  {
3548  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
3549  dofValues.resize( ents_of_interest.size() );
3550  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval ); // just global ids
3551  }
3552  else
3553  {
3554  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
3555  }
3556  // pass ordered dofs, and unique
3557  std::vector< int > orderDofs( dofValues.begin(), dofValues.end() );
3558 
3559  std::sort( orderDofs.begin(), orderDofs.end() );
3560  orderDofs.erase( std::unique( orderDofs.begin(), orderDofs.end() ), orderDofs.end() ); // remove duplicates
3561 
3562  rval = weightMap->ReadParallelMap( remap_weights_filename, orderDofs, row_based_partition );MB_CHK_ERR( rval );
3563 
3564  // if we are on target mesh (row based partition)
3565  if( row_based_partition )
3566  {
3567  tdata.pid_dest = pid_cpl;
3568  tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset1, ents_of_interest );
3569  weightMap->SetDestinationNDofsPerElement( ndofPerEl );
3570  weightMap->set_row_dc_dofs( dofValues ); // will set row_dtoc_dofmap
3571  }
3572  else
3573  {
3574  tdata.pid_src = pid_cpl;
3575  tdata.remapper->SetMeshSet( Remapper::SourceMesh, fset1, ents_of_interest );
3576  weightMap->SetSourceNDofsPerElement( ndofPerEl );
3577  weightMap->set_col_dc_dofs( dofValues ); // will set col_dtoc_dofmap
3578  }
3579  }
3580  else // old reader, trivial distribution by row
3581  {
3582  std::vector< int > tmp_owned_ids; // this will do a trivial row distribution
3583  rval = weightMap->ReadParallelMap( remap_weights_filename, tmp_owned_ids, row_based_partition );MB_CHK_ERR( rval );
3584  }
3585 
3586  return moab::MB_SUCCESS;
3587 }
3588 
3589 ErrCode iMOAB_WriteMappingWeightsToFile(
3590  iMOAB_AppID pid_intersection,
3591  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
3592  const iMOAB_String remap_weights_filename )
3593 {
3594  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
3595  assert( remap_weights_filename && strlen( remap_weights_filename ) );
3596 
3597  ErrorCode rval;
3598 
3599  // Get the source and target data and pcomm objects
3600  appData& data_intx = context.appDatas[*pid_intersection];
3601  TempestMapAppData& tdata = data_intx.tempestData;
3602 
3603  // Get the handle to the remapper object
3604  assert( tdata.remapper != NULL );
3605 
3606  // Now get online weights object and ensure it is valid
3607  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3608  assert( weightMap != NULL );
3609 
3610  std::string filename = std::string( remap_weights_filename );
3611 
3612  std::string metadataStr = data_intx.metadataMap[std::string( solution_weights_identifier )];
3613  std::map<std::string, std::string> attrMap;
3614  attrMap["title"] = "MOAB-TempestRemap Online Regridding Weight Generator";
3615  attrMap["normalization"] = "ovarea";
3616  attrMap["map_aPb"] = filename;
3617 
3618  const std::string delim = ";";
3619  size_t pos = 0, index = 0;
3620  std::vector<std::string> stringAttr(3);
3621  // use find() function to get the position of the delimiters
3622  while (( pos = metadataStr.find (delim)) != std::string::npos)
3623  {
3624  std::string token1 = metadataStr.substr(0, pos); // store the substring
3625  if ( token1.size() > 0 || index == 0 )
3626  stringAttr[index++] = token1;
3627  metadataStr.erase(0, pos + delim.length()); /* erase() function store the current positon and move to next token. */
3628  }
3629  stringAttr[index] = metadataStr; // store the last token of the string.
3630  assert(index == 2);
3631  attrMap["remap_options"] = stringAttr[0];
3632  attrMap["methodorder_b"] = stringAttr[1];
3633  attrMap["methodorder_a"] = stringAttr[2];
3634  attrMap["concave_a"] = "false"; // defaults
3635  attrMap["concave_b"] = "false"; // defaults
3636  attrMap["bubble"] = "true"; // defaults
3637  attrMap["MOABversion"] = std::string(MOAB_VERSION);
3638 
3639  // Write the map file to disk in parallel using either HDF5 or SCRIP interface
3640  rval = weightMap->WriteParallelMap( filename, attrMap );MB_CHK_ERR( rval );
3641 
3642  return moab::MB_SUCCESS;
3643 }
3644 
3645 #ifdef MOAB_HAVE_MPI
3646 ErrCode iMOAB_MigrateMapMesh( iMOAB_AppID pid1,
3647  iMOAB_AppID pid2,
3648  iMOAB_AppID pid3,
3649  MPI_Comm* jointcomm,
3650  MPI_Group* groupA,
3651  MPI_Group* groupB,
3652  int* type,
3653  int* comp1,
3654  int* comp2,
3655  int* direction )
3656 {
3657  assert( jointcomm );
3658  assert( groupA );
3659  assert( groupB );
3660  bool is_fortran = false;
3661  if( *pid1 >= 0 ) is_fortran = context.appDatas[*pid1].is_fortran || is_fortran;
3662  if( *pid2 >= 0 ) is_fortran = context.appDatas[*pid2].is_fortran || is_fortran;
3663  if( *pid3 >= 0 ) is_fortran = context.appDatas[*pid3].is_fortran || is_fortran;
3664 
3665  MPI_Comm joint_communicator =
3666  ( is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( jointcomm ) ) : *jointcomm );
3667  MPI_Group group_first = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupA ) ) : *groupA );
3668  MPI_Group group_second = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupB ) ) : *groupB );
3669 
3670  ErrorCode rval = MB_SUCCESS;
3671  int localRank = 0, numProcs = 1;
3672 
3673  // Get the local rank and number of processes in the joint communicator
3674  MPI_Comm_rank( joint_communicator, &localRank );
3675  MPI_Comm_size( joint_communicator, &numProcs );
3676 
3677  // Next instantiate the par comm graph
3678  // here we need to look at direction
3679  // this direction is good for atm source -> ocn target coupler example
3680  ParCommGraph* cgraph = NULL;
3681  if( *pid1 >= 0 ) cgraph = new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 );
3682 
3683  ParCommGraph* cgraph_rev = NULL;
3684  if( *pid3 >= 0 ) cgraph_rev = new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 );
3685 
3686  // we should search if we have another pcomm with the same comp ids in the list already
3687  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
3688  if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
3689  if( *pid3 >= 0 ) context.appDatas[*pid3].pgraph[*comp1] = cgraph_rev; // from 2 to 1
3690 
3691  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
3692  // model on the joint_communicator
3693  TupleList TLcomp1;
3694  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
3695  TupleList TLcomp2;
3696  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
3697 
3698  // will push_back a new tuple, if needed
3699  TLcomp1.enableWriteAccess();
3700 
3701  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
3702  Tag gdsTag;
3703 
3704  // find the values on first cell
3705  int lenTagType1 = 1;
3706  if( *type == 1 )
3707  {
3708  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
3709  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
3710  }
3711  Tag tagType2 = context.MBI->globalId_tag();
3712 
3713  std::vector< int > valuesComp1;
3714 
3715  // populate first tuple
3716  Range ents_of_interest; // will be filled with entities on pid1, that need to be distributed,
3717  // rearranged in split_ranges map
3718  if( *pid1 >= 0 )
3719  {
3720  appData& data1 = context.appDatas[*pid1];
3721  EntityHandle fset1 = data1.file_set;
3722 
3723  if( *type == 1 )
3724  {
3725  assert( gdsTag );
3726  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3727  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
3728  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
3729  }
3730  else if( *type == 2 )
3731  {
3732  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3733  valuesComp1.resize( ents_of_interest.size() );
3734  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
3735  }
3736  else if( *type == 3 ) // for FV meshes, just get the global id of cell
3737  {
3738  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
3739  valuesComp1.resize( ents_of_interest.size() );
3740  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
3741  }
3742  else
3743  {
3744  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
3745  }
3746  // now fill the tuple list with info and markers
3747  // because we will send only the ids, order and compress the list
3748  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
3749  TLcomp1.resize( uniq.size() );
3750  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3751  {
3752  // to proc, marker, element local index, index in el
3753  int marker = *sit;
3754  int to_proc = marker % numProcs;
3755  int n = TLcomp1.get_n();
3756  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
3757  TLcomp1.vi_wr[2 * n + 1] = marker;
3758  TLcomp1.inc_n();
3759  }
3760  }
3761 
3762  ProcConfig pc( joint_communicator ); // proc config does the crystal router
3763  pc.crystal_router()->gs_transfer( 1, TLcomp1,
3764  0 ); // communication towards joint tasks, with markers
3765  // sort by value (key 1)
3766 #ifdef VERBOSE
3767  std::stringstream ff1;
3768  ff1 << "TLcomp1_" << localRank << ".txt";
3769  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
3770 #endif
3771  moab::TupleList::buffer sort_buffer;
3772  sort_buffer.buffer_init( TLcomp1.get_n() );
3773  TLcomp1.sort( 1, &sort_buffer );
3774  sort_buffer.reset();
3775 #ifdef VERBOSE
3776  // after sorting
3777  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
3778 #endif
3779  // do the same, for the other component, number2, with type2
3780  // start copy
3781  TLcomp2.enableWriteAccess();
3782 
3783  moab::TempestOnlineMap* weightMap = NULL; // declare it outside, but it will make sense only for *pid >= 0
3784  // we know that :) (or *pid2 >= 0, it means we are on the coupler PEs, read map exists, and coupler procs exist)
3785  // populate second tuple with ids from read map: we need row_gdofmap and col_gdofmap
3786  std::vector< int > valuesComp2;
3787  if( *pid2 >= 0 ) // we are now on coupler, map side
3788  {
3789  appData& data2 = context.appDatas[*pid2];
3790  TempestMapAppData& tdata = data2.tempestData;
3791  // should be only one map, read from file
3792  assert( tdata.weightMaps.size() == 1 );
3793  // maybe we need to check it is the map we expect
3794  weightMap = tdata.weightMaps.begin()->second;
3795  // std::vector<int> ids_of_interest;
3796  // do a deep copy of the ids of interest: row ids or col ids, target or source direction
3797  if( *direction == 1 )
3798  {
3799  // we are interested in col ids, source
3800  // new method from moab::TempestOnlineMap
3801  rval = weightMap->fill_col_ids( valuesComp2 );MB_CHK_ERR( rval );
3802  }
3803  else if( *direction == 2 )
3804  {
3805  // we are interested in row ids, for target ids
3806  rval = weightMap->fill_row_ids( valuesComp2 );MB_CHK_ERR( rval );
3807  }
3808  //
3809 
3810  // now fill the tuple list with info and markers
3811  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3812  TLcomp2.resize( uniq.size() );
3813  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3814  {
3815  // to proc, marker, element local index, index in el
3816  int marker = *sit;
3817  int to_proc = marker % numProcs;
3818  int n = TLcomp2.get_n();
3819  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
3820  TLcomp2.vi_wr[2 * n + 1] = marker;
3821  TLcomp2.inc_n();
3822  }
3823  }
3824  pc.crystal_router()->gs_transfer( 1, TLcomp2,
3825  0 ); // communication towards joint tasks, with markers
3826  // sort by value (key 1)
3827  // in the rendez-vous approach, markers meet at meet point (rendez-vous) processor marker % nprocs
3828 #ifdef VERBOSE
3829  std::stringstream ff2;
3830  ff2 << "TLcomp2_" << localRank << ".txt";
3831  TLcomp2.print_to_file( ff2.str().c_str() );
3832 #endif
3833  sort_buffer.buffer_reserve( TLcomp2.get_n() );
3834  TLcomp2.sort( 1, &sort_buffer );
3835  sort_buffer.reset();
3836  // end copy
3837 #ifdef VERBOSE
3838  TLcomp2.print_to_file( ff2.str().c_str() );
3839 #endif
3840  // need to send back the info, from the rendezvous point, for each of the values
3841  /* so go over each value, on local process in joint communicator,
3842 
3843  now have to send back the info needed for communication;
3844  loop in in sync over both TLComp1 and TLComp2, in local process;
3845  So, build new tuple lists, to send synchronous communication
3846  populate them at the same time, based on marker, that is indexed
3847  */
3848 
3849  TupleList TLBackToComp1;
3850  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
3851  TLBackToComp1.enableWriteAccess();
3852 
3853  TupleList TLBackToComp2;
3854  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
3855  TLBackToComp2.enableWriteAccess();
3856 
3857  int n1 = TLcomp1.get_n();
3858  int n2 = TLcomp2.get_n();
3859 
3860  int indexInTLComp1 = 0;
3861  int indexInTLComp2 = 0; // advance both, according to the marker
3862  if( n1 > 0 && n2 > 0 )
3863  {
3864 
3865  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
3866  {
3867  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3868  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3869  if( currentValue1 < currentValue2 )
3870  {
3871  // we have a big problem; basically, we are saying that
3872  // dof currentValue is on one model and not on the other
3873  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
3874  indexInTLComp1++;
3875  continue;
3876  }
3877  if( currentValue1 > currentValue2 )
3878  {
3879  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
3880  indexInTLComp2++;
3881  continue;
3882  }
3883  int size1 = 1;
3884  int size2 = 1;
3885  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3886  size1++;
3887  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3888  size2++;
3889  // must be found in both lists, find the start and end indices
3890  for( int i1 = 0; i1 < size1; i1++ )
3891  {
3892  for( int i2 = 0; i2 < size2; i2++ )
3893  {
3894  // send the info back to components
3895  int n = TLBackToComp1.get_n();
3896  TLBackToComp1.reserve();
3897  TLBackToComp1.vi_wr[3 * n] =
3898  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
3899  // came from, info from comp2
3900  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3901  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
3902  n = TLBackToComp2.get_n();
3903  TLBackToComp2.reserve();
3904  TLBackToComp2.vi_wr[3 * n] =
3905  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
3906  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3907  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
3908  // what if there are repeated markers in TLcomp2? increase just index2
3909  }
3910  }
3911  indexInTLComp1 += size1;
3912  indexInTLComp2 += size2;
3913  }
3914  }
3915  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
3916  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3917 
3918  TupleList TLv; // vertices
3919  TupleList TLc; // cells if needed (not type 2)
3920 
3921  if( *pid1 >= 0 )
3922  {
3923  // we are on original comp 1 tasks
3924  // before ordering
3925  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3926  // processors it communicates with
3927 #ifdef VERBOSE
3928  std::stringstream f1;
3929  f1 << "TLBack1_" << localRank << ".txt";
3930  TLBackToComp1.print_to_file( f1.str().c_str() );
3931 #endif
3932  // so we are now on pid1, we know now each marker were it has to go
3933  // add a new method to ParCommGraph, to set up the split_ranges and involved_IDs_map
3934  rval = cgraph->set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );MB_CHK_ERR( rval );
3935  // we can just send vertices and elements, with crystal routers;
3936  // on the receiving end, make sure they are not duplicated, by looking at the global id
3937  // if *type is 1, also send global_dofs tag in the element tuple
3938  rval = cgraph->form_tuples_to_migrate_mesh( context.MBI, TLv, TLc, *type, lenTagType1 );MB_CHK_ERR( rval );
3939  }
3940  else
3941  {
3942  TLv.initialize( 2, 0, 0, 3, 0 ); // no vertices here, for sure
3943  TLv.enableWriteAccess(); // to be able to receive stuff, even if nothing is here yet, on this task
3944  if( *type != 2 ) // for point cloud, we do not need to initialize TLc (for cells)
3945  {
3946  // we still need to initialize the tuples with the right size, as in form_tuples_to_migrate_mesh
3947  int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 +
3948  10; // 10 is the max number of vertices in cell; kind of arbitrary
3949  TLc.initialize( size_tuple, 0, 0, 0, 0 );
3950  TLc.enableWriteAccess();
3951  }
3952  }
3953  pc.crystal_router()->gs_transfer( 1, TLv, 0 ); // communication towards coupler tasks, with mesh vertices
3954  if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 ); // those are cells
3955 
3956  if( *pid3 >= 0 ) // will receive the mesh, on coupler pes!
3957  {
3958  appData& data3 = context.appDatas[*pid3];
3959  EntityHandle fset3 = data3.file_set;
3960  Range primary_ents3; // vertices for type 2, cells of dim 2 for type 1 or 3
3961  std::vector< int > values_entities; // will be the size of primary_ents3 * lenTagType1
3962  rval = cgraph_rev->form_mesh_from_tuples( context.MBI, TLv, TLc, *type, lenTagType1, fset3, primary_ents3,
3963  values_entities );MB_CHK_ERR( rval );
3964  iMOAB_UpdateMeshInfo( pid3 );
3965  int ndofPerEl = 1;
3966  if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
3967  // because we are on the coupler, we know that the read map pid2 exists
3968  assert( *pid2 >= 0 );
3969  appData& dataIntx = context.appDatas[*pid2];
3970  TempestMapAppData& tdata = dataIntx.tempestData;
3971 
3972  // if we are on source coverage, direction 1, we can set covering mesh, covering cells
3973  if( 1 == *direction )
3974  {
3975  tdata.pid_src = pid3;
3976  tdata.remapper->SetMeshSet( Remapper::CoveringMesh, fset3, primary_ents3 );
3977  weightMap->SetSourceNDofsPerElement( ndofPerEl );
3978  weightMap->set_col_dc_dofs( values_entities ); // will set col_dtoc_dofmap
3979  }
3980  // if we are on target, we can set the target cells
3981  else
3982  {
3983  tdata.pid_dest = pid3;
3984  tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset3, primary_ents3 );
3985  weightMap->SetDestinationNDofsPerElement( ndofPerEl );
3986  weightMap->set_row_dc_dofs( values_entities ); // will set row_dtoc_dofmap
3987  }
3988  }
3989 
3990  return moab::MB_SUCCESS;
3991 }
3992 
3993 #endif // #ifdef MOAB_HAVE_MPI
3994 
3995 #endif // #ifdef MOAB_HAVE_NETCDF
3996 
3997 #define USE_API
3998 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
3999 {
4000  ErrorCode rval;
4001  moab::CartVect pos;
4002 
4003  Range& verts = context.appDatas[*pid].all_verts;
4004  moab::EntityHandle firstVertex = ( verts[0] );
4005 
4006  // coordinate data
4007  rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );MB_CHK_ERR( rval );
4008 
4009  // compute the distance from origin
4010  // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
4011  *radius = pos.length();
4012  return moab::MB_SUCCESS;
4013 }
4014 
4015 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4016 {
4017  ErrorCode rval;
4018  ErrCode ierr;
4019  bool validate = true;
4020 
4021  double radius_source = 1.0;
4022  double radius_target = 1.0;
4023  const double epsrel = ReferenceTolerance; // ReferenceTolerance is defined in Defines.h in tempestremap source ;
4024  constexpr double boxeps = 1.e-1;
4025  constexpr bool gnomonic = false;
4026 
4027  // Get the source and target data and pcomm objects
4028  appData& data_src = context.appDatas[*pid_src];
4029  appData& data_tgt = context.appDatas[*pid_tgt];
4030  appData& data_intx = context.appDatas[*pid_intx];
4031 #ifdef MOAB_HAVE_MPI
4032  ParallelComm* pco_intx = context.pcomms[*pid_intx];
4033 #endif
4034 
4035  // Mesh intersection has already been computed; Return early.
4036  TempestMapAppData& tdata = data_intx.tempestData;
4037  if( tdata.remapper != NULL ) return moab::MB_SUCCESS; // nothing to do
4038 
4039  bool is_parallel = false, is_root = true;
4040  int rank = 0;
4041 #ifdef MOAB_HAVE_MPI
4042  if( pco_intx )
4043  {
4044  rank = pco_intx->rank();
4045  is_parallel = ( pco_intx->size() > 1 );
4046  is_root = ( rank == 0 );
4047  rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
4048  }
4049 #endif
4050 
4051  moab::DebugOutput outputFormatter( std::cout, rank, 0 );
4052  outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
4053 
4054  ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
4055  ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
4056 
4057  // Rescale the radius of both to compute the intersection
4058  ComputeSphereRadius( pid_src, &radius_source );
4059  ComputeSphereRadius( pid_tgt, &radius_target );
4060 #ifdef VERBOSE
4061  if( is_root )
4062  outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
4063  radius_target );
4064 #endif
4065 
4066  /* Let make sure that the radius match for source and target meshes. If not, rescale now and
4067  * unscale later. */
4068  double defaultradius = 1.0;
4069  if( fabs( radius_source - radius_target ) > 1e-10 )
4070  { /* the radii are different */
4071  rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval );
4072  rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval );
4073  }
4074 
4075  // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
4076 #ifdef MOAB_HAVE_TEMPESTREMAP
4078 #else
4079  IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
4080 #endif
4081 
4082  // print verbosely about the problem setting
4083  bool use_kdtree_search = false;
4084  double srctgt_areas[2], srctgt_areas_glb[2];
4085  {
4086  moab::Range rintxverts, rintxelems;
4087  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
4088  rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );MB_CHK_ERR( rval );
4089  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
4090  rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );MB_CHK_ERR( rval );
4091  srctgt_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );
4092 #ifdef VERBOSE
4093  if( is_root )
4094  outputFormatter.printf( 0, "The red set contains %d vertices and %d elements \n", rintxverts.size(),
4095  rintxelems.size() );
4096 #endif
4097 
4098  moab::Range bintxverts, bintxelems;
4099  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
4100  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );MB_CHK_ERR( rval );
4101  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
4102  rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );MB_CHK_ERR( rval );
4103  srctgt_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );
4104 #ifdef VERBOSE
4105  if( is_root )
4106  outputFormatter.printf( 0, "The blue set contains %d vertices and %d elements \n", bintxverts.size(),
4107  bintxelems.size() );
4108 #endif
4109 #ifdef MOAB_HAVE_MPI
4110  MPI_Allreduce( &srctgt_areas[0], &srctgt_areas_glb[0], 2, MPI_DOUBLE, MPI_SUM, pco_intx->comm() );
4111 #else
4112  srctgt_areas_glb[0] = srctgt_areas[0];
4113  srctgt_areas_glb[1] = srctgt_areas[1];
4114 #endif
4115  use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] );
4116  }
4117 
4118  data_intx.dimension = data_tgt.dimension;
4119  // set the context for the source and destination applications
4120  tdata.pid_src = pid_src;
4121  tdata.pid_dest = pid_tgt;
4122 
4123  // Now allocate and initialize the remapper object
4124 #ifdef MOAB_HAVE_MPI
4125  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
4126 #else
4127  tdata.remapper = new moab::TempestRemapper( context.MBI );
4128 #endif
4129  tdata.remapper->meshValidate = true;
4130  tdata.remapper->constructEdgeMap = true;
4131 
4132  // Do not create new filesets; Use the sets from our respective applications
4133  tdata.remapper->initialize( false );
4134  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
4135  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
4136  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
4137 
4138  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
4139  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
4140 
4141  // First, compute the covering source set.
4142  rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false, gnomonic );MB_CHK_ERR( rval );
4143 
4144  // Next, compute intersections with MOAB.
4145  // for bilinear, this is an overkill
4146  rval = tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false );MB_CHK_ERR( rval );
4147 
4148  // Mapping computation done
4149  if( validate )
4150  {
4151  double local_area,
4152  global_areas[3]; // Array for Initial area, and through Method 1 and Method 2
4153  local_area = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, radius_source );
4154 
4155  global_areas[0] = srctgt_areas_glb[0];
4156  global_areas[1] = srctgt_areas_glb[1];
4157 
4158 #ifdef MOAB_HAVE_MPI
4159  if( is_parallel )
4160  {
4161  MPI_Reduce( &local_area, &global_areas[2], 1, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() );
4162  }
4163  else
4164  {
4165  global_areas[2] = local_area;
4166  }
4167 #else
4168  global_areas[2] = local_area;
4169 #endif
4170  if( is_root )
4171  {
4172  outputFormatter.printf( 0,
4173  "initial area: source mesh = %12.14f, target mesh = %12.14f, "
4174  "overlap mesh = %12.14f\n",
4175  global_areas[0], global_areas[1], global_areas[2] );
4176  outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
4177  fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
4178  fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
4179  }
4180  }
4181 
4182  return moab::MB_SUCCESS;
4183 }
4184 
4185 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4186 {
4187  ErrorCode rval;
4188  ErrCode ierr;
4189 
4190  double radius_source = 1.0;
4191  double radius_target = 1.0;
4192  const double epsrel = ReferenceTolerance; // ReferenceTolerance is defined in Defines.h in tempestremap source ;
4193  const double boxeps = 1.e-8;
4194 
4195  // Get the source and target data and pcomm objects
4196  appData& data_src = context.appDatas[*pid_src];
4197  appData& data_tgt = context.appDatas[*pid_tgt];
4198  appData& data_intx = context.appDatas[*pid_intx];
4199 #ifdef MOAB_HAVE_MPI
4200  ParallelComm* pco_intx = context.pcomms[*pid_intx];
4201 #endif
4202 
4203  // Mesh intersection has already been computed; Return early.
4204  TempestMapAppData& tdata = data_intx.tempestData;
4205  if( tdata.remapper != NULL ) return moab::MB_SUCCESS; // nothing to do
4206 
4207 #ifdef MOAB_HAVE_MPI
4208  if( pco_intx )
4209  {
4210  rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
4211  }
4212 #endif
4213 
4214  ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
4215  ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
4216 
4217  // Rescale the radius of both to compute the intersection
4218  ComputeSphereRadius( pid_src, &radius_source );
4219  ComputeSphereRadius( pid_tgt, &radius_target );
4220 
4221  IntxAreaUtils areaAdaptor;
4222  // print verbosely about the problem setting
4223  {
4224  moab::Range rintxverts, rintxelems;
4225  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
4226  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );MB_CHK_ERR( rval );
4227  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
4228  rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );MB_CHK_ERR( rval );
4229 #ifdef VERBOSE
4230  std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
4231  << " elements \n";
4232 #endif
4233 
4234  moab::Range bintxverts, bintxelems;
4235  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
4236  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );MB_CHK_ERR( rval );
4237  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
4238  rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );MB_CHK_ERR( rval );
4239 #ifdef VERBOSE
4240  std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
4241  << " elements \n";
4242 #endif
4243  }
4244 
4245  data_intx.dimension = data_tgt.dimension;
4246  // set the context for the source and destination applications
4247  // set the context for the source and destination applications
4248  tdata.pid_src = pid_src;
4249  tdata.pid_dest = pid_tgt;
4250  data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
4251  assert( data_intx.point_cloud == true );
4252 
4253  // Now allocate and initialize the remapper object
4254 #ifdef MOAB_HAVE_MPI
4255  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
4256 #else
4257  tdata.remapper = new moab::TempestRemapper( context.MBI );
4258 #endif
4259  tdata.remapper->meshValidate = true;
4260  tdata.remapper->constructEdgeMap = true;
4261 
4262  // Do not create new filesets; Use the sets from our respective applications
4263  tdata.remapper->initialize( false );
4264  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
4265  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
4266  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
4267 
4268  /* Let make sure that the radius match for source and target meshes. If not, rescale now and
4269  * unscale later. */
4270  if( fabs( radius_source - radius_target ) > 1e-10 )
4271  { /* the radii are different */
4272  rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );MB_CHK_ERR( rval );
4273  rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );MB_CHK_ERR( rval );
4274  }
4275 
4276  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
4277  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
4278 
4279  // First, compute the covering source set.
4280  rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
4281 
4282 #ifdef MOAB_HAVE_MPI
4283  /* VSM: This context should be set on the data_src but it would overwrite the source
4284  covering set context in case it is coupled to another APP as well.
4285  This needs a redesign. */
4286  // data_intx.covering_set = tdata.remapper->GetCoveringSet();
4287  // data_src.covering_set = tdata.remapper->GetCoveringSet();
4288 #endif
4289 
4290  // Now let us re-convert the MOAB mesh back to Tempest representation
4291  rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval );
4292 
4293  return moab::MB_SUCCESS;
4294 }
4295 
4296 ErrCode iMOAB_ComputeScalarProjectionWeights(
4297  iMOAB_AppID pid_intx,
4298  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
4299  const iMOAB_String disc_method_source,
4300  int* disc_order_source,
4301  const iMOAB_String disc_method_target,
4302  int* disc_order_target,
4303  const iMOAB_String fv_method,
4304  int* fNoBubble,
4305  int* fMonotoneTypeID,
4306  int* fVolumetric,
4307  int* fInverseDistanceMap,
4308  int* fNoConservation,
4309  int* fValidate,
4310  const iMOAB_String source_solution_tag_dof_name,
4311  const iMOAB_String target_solution_tag_dof_name )
4312 {
4313  moab::ErrorCode rval;
4314 
4315  assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
4316  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4317  assert( disc_method_source && strlen( disc_method_source ) );
4318  assert( disc_method_target && strlen( disc_method_target ) );
4319  assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
4320  assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
4321 
4322  // Get the source and target data and pcomm objects
4323  appData& data_intx = context.appDatas[*pid_intx];
4324  TempestMapAppData& tdata = data_intx.tempestData;
4325 
4326  // Setup computation of weights
4327  // Set the context for the remapping weights computation
4328  tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
4329 
4330  // Call to generate the remap weights with the tempest meshes
4331  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
4332  assert( weightMap != NULL );
4333 
4334  GenerateOfflineMapAlgorithmOptions mapOptions;
4335  mapOptions.nPin = *disc_order_source;
4336  mapOptions.nPout = *disc_order_target;
4337  mapOptions.fSourceConcave = false;
4338  mapOptions.fTargetConcave = false;
4339 
4340  mapOptions.strMethod = "";
4341  if( fv_method ) mapOptions.strMethod += std::string( fv_method ) + ";";
4342  if( fMonotoneTypeID )
4343  {
4344  switch( *fMonotoneTypeID )
4345  {
4346  case 0:
4347  mapOptions.fMonotone = false;
4348  break;
4349  case 3:
4350  mapOptions.strMethod += "mono3;";
4351  break;
4352  case 2:
4353  mapOptions.strMethod += "mono2;";
4354  break;
4355  default:
4356  mapOptions.fMonotone = true;
4357  }
4358  }
4359  else
4360  mapOptions.fMonotone = false;
4361 
4362  mapOptions.fNoBubble = ( fNoBubble ? *fNoBubble : false );
4363  mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
4364  mapOptions.fNoCorrectAreas = false;
4365  mapOptions.fNoCheck = !( fValidate ? *fValidate : true );
4366  if( fVolumetric && *fVolumetric ) mapOptions.strMethod += "volumetric;";
4367  if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod += "invdist;";
4368 
4369  std::string metadataStr = mapOptions.strMethod + ";" +
4370  std::string( disc_method_source ) + ":" + std::to_string(*disc_order_source) + ":" + std::string( source_solution_tag_dof_name ) + ";" +
4371  std::string( disc_method_target ) + ":" + std::to_string(*disc_order_target) + ":" + std::string( target_solution_tag_dof_name );
4372 
4373  data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
4374 
4375  // Now let us compute the local-global mapping and store it in the context
4376  // We need this mapping when computing matvec products and to do reductions in parallel
4377  // Additionally, the call below will also compute weights with TempestRemap
4378  rval = weightMap->GenerateRemappingWeights(
4379  std::string( disc_method_source ), // const std::string& strInputType
4380  std::string( disc_method_target ), // const std::string& strOutputType
4381  mapOptions, // GenerateOfflineMapAlgorithmOptions& mapOptions
4382  std::string( source_solution_tag_dof_name ), // const std::string& srcDofTagName = "GLOBAL_ID"
4383  std::string( target_solution_tag_dof_name ) // const std::string& tgtDofTagName = "GLOBAL_ID"
4384  );MB_CHK_ERR( rval );
4385 
4386  return moab::MB_SUCCESS;
4387 }
4388 
4389 ErrCode iMOAB_ApplyScalarProjectionWeights(
4390  iMOAB_AppID pid_intersection,
4391  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
4392  const iMOAB_String source_solution_tag_name,
4393  const iMOAB_String target_solution_tag_name )
4394 {
4395  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4396  assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
4397  assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
4398 
4399  moab::ErrorCode rval;
4400 
4401  // Get the source and target data and pcomm objects
4402  appData& data_intx = context.appDatas[*pid_intersection];
4403  TempestMapAppData& tdata = data_intx.tempestData;
4404 
4405  // Now allocate and initialize the remapper object
4406  moab::TempestRemapper* remapper = tdata.remapper;
4407  if( !tdata.weightMaps.count( std::string( solution_weights_identifier ) ) ) // key does not exist
4409  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
4410 
4411  // we assume that there are separators ";" between the tag names
4412  std::vector< std::string > srcNames;
4413  std::vector< std::string > tgtNames;
4414  std::vector< Tag > srcTagHandles;
4415  std::vector< Tag > tgtTagHandles;
4416  std::string separator( ":" );
4417  std::string src_name( source_solution_tag_name );
4418  std::string tgt_name( target_solution_tag_name );
4419  split_tag_names( src_name, separator, srcNames );
4420  split_tag_names( tgt_name, separator, tgtNames );
4421  if( srcNames.size() != tgtNames.size() )
4422  {
4423  std::cout << " error in parsing source and target tag names. \n";
4424  return moab::MB_FAILURE;
4425  }
4426 
4427  for( size_t i = 0; i < srcNames.size(); i++ )
4428  {
4429  Tag tagHandle;
4430  rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
4431  if( MB_SUCCESS != rval || NULL == tagHandle )
4432  {
4433  return moab::MB_FAILURE;
4434  }
4435  srcTagHandles.push_back( tagHandle );
4436  rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
4437  if( MB_SUCCESS != rval || NULL == tagHandle )
4438  {
4439  return moab::MB_FAILURE;
4440  }
4441  tgtTagHandles.push_back( tagHandle );
4442  }
4443 
4444  std::vector< double > solSTagVals;
4445  std::vector< double > solTTagVals;
4446 
4447  moab::Range sents, tents;
4448  if( data_intx.point_cloud )
4449  {
4450  appData& data_src = context.appDatas[*tdata.pid_src];
4451  appData& data_tgt = context.appDatas[*tdata.pid_dest];
4452  if( data_src.point_cloud )
4453  {
4454  moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
4455  solSTagVals.resize( covSrcEnts.size(), -1.0 );
4456  sents = covSrcEnts;
4457  }
4458  else
4459  {
4460  moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
4461  solSTagVals.resize( covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() *
4462  weightMap->GetSourceNDofsPerElement(),
4463  -1.0 );
4464  sents = covSrcEnts;
4465  }
4466  if( data_tgt.point_cloud )
4467  {
4468  moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
4469  solTTagVals.resize( tgtEnts.size(), -1.0 );
4470  tents = tgtEnts;
4471  }
4472  else
4473  {
4474  moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
4475  solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
4476  weightMap->GetDestinationNDofsPerElement(),
4477  -1.0 );
4478  tents = tgtEnts;
4479  }
4480  }
4481  else
4482  {
4483  moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
4484  moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
4485  solSTagVals.resize(
4486  covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
4487  solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
4488  weightMap->GetDestinationNDofsPerElement(),
4489  -1.0 );
4490 
4491  sents = covSrcEnts;
4492  tents = tgtEnts;
4493  }
4494 
4495  for( size_t i = 0; i < srcTagHandles.size(); i++ )
4496  {
4497  // The tag data is np*np*n_el_src
4498  Tag ssolnTag = srcTagHandles[i];
4499  Tag tsolnTag = tgtTagHandles[i];
4500  rval = context.MBI->tag_get_data( ssolnTag, sents, &solSTagVals[0] );MB_CHK_ERR( rval );
4501 
4502  // Compute the application of weights on the suorce solution data and store it in the
4503  // destination solution vector data Optionally, can also perform the transpose application
4504  // of the weight matrix. Set the 3rd argument to true if this is needed
4505  rval = weightMap->ApplyWeights( solSTagVals, solTTagVals, false );MB_CHK_ERR( rval );
4506 
4507  // The tag data is np*np*n_el_dest
4508  rval = context.MBI->tag_set_data( tsolnTag, tents, &solTTagVals[0] );MB_CHK_ERR( rval );
4509  }
4510 
4511 // #define VERBOSE
4512 #ifdef VERBOSE
4513  ParallelComm* pco_intx = context.pcomms[*pid_intersection];
4514 
4515  int ivar = 0;
4516  {
4517  Tag ssolnTag = srcTagHandles[ivar];
4518  std::stringstream sstr;
4519  sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
4520  std::ofstream output_file( sstr.str().c_str() );
4521  for( unsigned i = 0; i < sents.size(); ++i )
4522  {
4523  EntityHandle elem = sents[i];
4524  std::vector< double > locsolSTagVals( 16 );
4525  rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );MB_CHK_ERR( rval );
4526  output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
4527  for( unsigned j = 0; j < 16; ++j )
4528  output_file << locsolSTagVals[j] << " ";
4529  }
4530  output_file.flush(); // required here
4531  output_file.close();
4532  }
4533  {
4534  std::stringstream sstr;
4535  sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
4536  EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set,
4537  context.appDatas[*tdata.pid_dest].file_set };
4538  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
4539  }
4540  {
4541  std::stringstream sstr;
4542  sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
4543  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
4544  EntityHandle covering_set = remapper->GetCoveringSet();
4545  EntityHandle sets[2] = { covering_set, context.appDatas[*tdata.pid_dest].file_set };
4546  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
4547  }
4548  {
4549  std::stringstream sstr;
4550  sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
4551  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
4552  EntityHandle covering_set = remapper->GetCoveringSet();
4553  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", &covering_set, 1 );MB_CHK_ERR( rval );
4554  }
4555  {
4556  std::stringstream sstr;
4557  sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
4558  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
4559  EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
4560  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", &target_set, 1 );MB_CHK_ERR( rval );
4561  }
4562  {
4563  std::stringstream sstr;
4564  sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
4565  std::ofstream output_file( sstr.str().c_str() );
4566  for( unsigned i = 0; i < solSTagVals.size(); ++i )
4567  output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
4568  << solSTagVals[i] << "\n";
4569  output_file.flush(); // required here
4570  output_file.close();
4571  }
4572 #endif
4573  // #undef VERBOSE
4574 
4575  return moab::MB_SUCCESS;
4576 }
4577 
4578 #endif // MOAB_HAVE_TEMPESTREMAP
4579 
4580 #ifdef __cplusplus
4581 }
4582 #endif