Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
iMOAB.cpp
Go to the documentation of this file.
1 /** \file iMOAB.cpp
2  */
3 
4 #include "moab/MOABConfig.h"
5 #include "moab/Core.hpp"
6 
7 #ifdef MOAB_HAVE_MPI
8 #include "moab_mpi.h"
9 #include "moab/ParallelComm.hpp"
10 #include "moab/ParCommGraph.hpp"
13 #endif
14 #include "DebugOutput.hpp"
15 #include "moab/iMOAB.h"
16 
17 /* this is needed because of direct access to hdf5/mhdf */
18 #ifdef MOAB_HAVE_HDF5
19 #include "mhdf.h"
20 #include <H5Tpublic.h>
21 #endif
22 
23 #include "moab/CartVect.hpp"
24 #include "MBTagConventions.hpp"
25 #include "moab/MeshTopoUtil.hpp"
26 #include "moab/ReadUtilIface.hpp"
27 #include "moab/MergeMesh.hpp"
28 
29 #ifdef MOAB_HAVE_TEMPESTREMAP
30 #include "STLStringHelper.h"
32 
35 #endif
36 
37 // C++ includes
38 #include <cassert>
39 #include <sstream>
40 #include <iostream>
41 
42 using namespace moab;
43 
44 // #define VERBOSE
45 
46 // global variables ; should they be organized in a structure, for easier references?
47 // or how do we keep them global?
48 
49 #ifdef __cplusplus
50 extern "C" {
51 #endif
52 
53 #ifdef MOAB_HAVE_TEMPESTREMAP
54 struct TempestMapAppData
55 {
56  moab::TempestRemapper* remapper;
57  std::map< std::string, moab::TempestOnlineMap* > weightMaps;
58  iMOAB_AppID pid_src;
59  iMOAB_AppID pid_dest;
60  int num_src_ghost_layers; // number of ghost layers
61  int num_tgt_ghost_layers; // number of ghost layers
62 };
63 #endif
64 
65 struct appData
66 {
68  int global_id; // external component id, unique for application
69  std::string name;
71  // local vertices would be all_verts if no ghosting was required
72  Range local_verts; // it could include shared, but not owned at the interface
73  Range owned_verts; // owned_verts <= local_verts <= all_verts
74  Range ghost_vertices; // locally ghosted from other processors
78  int dimension; // 2 or 3, dimension of primary elements (redundant?)
79  long num_global_elements; // reunion of all elements in primary_elements; either from hdf5
80  // reading or from reduce
81  long num_global_vertices; // reunion of all nodes, after sharing is resolved; it could be
82  // determined from hdf5 reading
83  int num_ghost_layers; // number of ghost layers
85  std::map< int, int > matIndex; // map from global block id to index in mat_sets
88  std::map< std::string, Tag > tagMap;
89  std::vector< Tag > tagList;
91  bool is_fortran;
92 
93 #ifdef MOAB_HAVE_MPI
94  // constructor for this ParCommGraph takes the joint comm and the MPI groups for each
95  // application
96  std::map< int, ParCommGraph* > pgraph; // map from context () to the parcommgraph*
97 #endif
98 
99 #ifdef MOAB_HAVE_TEMPESTREMAP
100  TempestMapAppData tempestData;
101  std::map< std::string, std::string > metadataMap;
102 #endif
103 };
104 
106 {
107  // are there reasons to have multiple moab inits? Is ref count needed?
109  // we should also have the default tags stored, initialized
110  Tag material_tag, neumann_tag, dirichlet_tag,
111  globalID_tag; // material, neumann, dirichlet, globalID
113  int iArgc;
116 
117  std::map< std::string, int > appIdMap; // from app string (uppercase) to app id
118  std::map< int, int > appIdCompMap; // from component id to app id
119 
120 #ifdef MOAB_HAVE_MPI
121  std::vector< ParallelComm* > pcomms; // created in order of applications, one moab::ParallelComm for each
122 #endif
123 
124  std::vector< appData > appDatas; // the same order as pcomms
125  int globalrank, worldprocs;
127 
129  {
130  MBI = 0;
131  refCountMB = 0;
132  unused_pid = 0;
133  }
134 };
135 
136 static struct GlobalContext context;
137 
139 {
140  if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
141 
142  context.iArgc = argc;
143  context.iArgv = argv; // shallow copy
144 
145  if( 0 == context.refCountMB )
146  {
147  context.MBI = new( std::nothrow ) moab::Core;
148  // retrieve the default tags
149  const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
151  // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
152  Tag gtags[4];
153 
154  for( int i = 0; i < 4; i++ )
155  {
156 
157  ErrorCode rval =
158  context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY );MB_CHK_ERR( rval );
159  }
160 
161  context.material_tag = gtags[0];
162  context.neumann_tag = gtags[1];
163  context.dirichlet_tag = gtags[2];
164  context.globalID_tag = gtags[3];
165  }
166 
167  context.MPI_initialized = false;
168 #ifdef MOAB_HAVE_MPI
169  int flagInit;
170  MPI_Initialized( &flagInit );
171 
172  if( flagInit && !context.MPI_initialized )
173  {
174  MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
175  MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
176  context.MPI_initialized = true;
177  }
178 #endif
179 
181  return moab::MB_SUCCESS;
182 }
183 
185 {
186  return iMOAB_Initialize( 0, 0 );
187 }
188 
190 {
192 
193  if( 0 == context.refCountMB )
194  {
195  delete context.MBI;
196  }
197 
198  return MB_SUCCESS;
199 }
200 
202 #ifdef MOAB_HAVE_MPI
203  MPI_Comm* comm,
204 #endif
205  int* compid,
206  iMOAB_AppID pid )
207 {
208  IMOAB_CHECKPOINTER( app_name, 1 );
209 #ifdef MOAB_HAVE_MPI
210  IMOAB_CHECKPOINTER( comm, 2 );
211  IMOAB_CHECKPOINTER( compid, 3 );
212 #else
213  IMOAB_CHECKPOINTER( compid, 2 );
214 #endif
215 
216  // will create a parallel comm for this application too, so there will be a
217  // mapping from *pid to file set and to parallel comm instances
218  std::string name( app_name );
219 
220  if( context.appIdMap.find( name ) != context.appIdMap.end() )
221  {
222  std::cout << " application " << name << " already registered \n";
223  return moab::MB_FAILURE;
224  }
225 
226  *pid = context.unused_pid++;
227  context.appIdMap[name] = *pid;
228  int rankHere = 0;
229 #ifdef MOAB_HAVE_MPI
230  MPI_Comm_rank( *comm, &rankHere );
231 #endif
232  if( !rankHere ) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
233  if( *compid <= 0 )
234  {
235  std::cout << " convention for external application is to have its id positive \n";
236  return moab::MB_FAILURE;
237  }
238 
239  if( context.appIdCompMap.find( *compid ) != context.appIdCompMap.end() )
240  {
241  std::cout << " external application with comp id " << *compid << " is already registered\n";
242  return moab::MB_FAILURE;
243  }
244 
245  context.appIdCompMap[*compid] = *pid;
246 
247  // now create ParallelComm and a file set for this application
248 #ifdef MOAB_HAVE_MPI
249  if( *comm )
250  {
251  ParallelComm* pco = new ParallelComm( context.MBI, *comm );
252 
253 #ifndef NDEBUG
254  int index = pco->get_id(); // it could be useful to get app id from pcomm instance ...
255  assert( index == *pid );
256  // here, we assert the the pid is the same as the id of the ParallelComm instance
257  // useful for writing in parallel
258 #endif
259  context.pcomms.push_back( pco );
260  }
261  else
262  {
263  context.pcomms.push_back( 0 );
264  }
265 #endif
266 
267  // create now the file set that will be used for loading the model in
268  EntityHandle file_set;
269  ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
270 
271  appData app_data;
272  app_data.file_set = file_set;
273  app_data.global_id = *compid; // will be used mostly for par comm graph
274  app_data.name = name; // save the name of application
275 
276 #ifdef MOAB_HAVE_TEMPESTREMAP
277  app_data.tempestData.remapper = NULL; // Only allocate as needed
278  app_data.tempestData.num_src_ghost_layers = 0;
279  app_data.tempestData.num_tgt_ghost_layers = 0;
280 #endif
281 
282  app_data.num_ghost_layers = 0;
283  app_data.point_cloud = false;
284  app_data.is_fortran = false;
285 
286  context.appDatas.push_back(
287  app_data ); // it will correspond to app_FileSets[*pid] will be the file set of interest
288  return moab::MB_SUCCESS;
289 }
290 
292 #ifdef MOAB_HAVE_MPI
293  int* comm,
294 #endif
295  int* compid,
296  iMOAB_AppID pid )
297 {
298  IMOAB_CHECKPOINTER( app_name, 1 );
299 #ifdef MOAB_HAVE_MPI
300  IMOAB_CHECKPOINTER( comm, 2 );
301  IMOAB_CHECKPOINTER( compid, 3 );
302 #else
303  IMOAB_CHECKPOINTER( compid, 2 );
304 #endif
305 
306  ErrCode err;
307  assert( app_name != nullptr );
308  std::string name( app_name );
309 
310 #ifdef MOAB_HAVE_MPI
311  MPI_Comm ccomm;
312  if( comm )
313  {
314  // convert from Fortran communicator to a C communicator
315  // see transfer of handles
316  // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
317  ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
318  }
319 #endif
320 
321  // now call C style registration function:
322  err = iMOAB_RegisterApplication( app_name,
323 #ifdef MOAB_HAVE_MPI
324  &ccomm,
325 #endif
326  compid, pid );
327 
328  // Now that we have created the application context, store that
329  // the application being registered is from a Fortran context
330  context.appDatas[*pid].is_fortran = true;
331 
332  return err;
333 }
334 
336 {
337  // the file set , parallel comm are all in vectors indexed by *pid
338  // assume we did not delete anything yet
339  // *pid will not be reused if we register another application
340  appData& data = context.appDatas[*pid];
341  int rankHere = 0;
342 #ifdef MOAB_HAVE_MPI
343  ParallelComm* pco = context.pcomms[*pid];
344  rankHere = pco->rank();
345 #endif
346  if( !rankHere )
347  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
348  << " is de-registered now \n";
349 
350  EntityHandle fileSet = data.file_set;
351  // get all entities part of the file set
352  Range fileents;
353  ErrorCode rval = context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true );MB_CHK_ERR( rval );
354 
355  fileents.insert( fileSet );
356 
357  rval = context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents );MB_CHK_ERR( rval ); // append all mesh sets
358 
359 #ifdef MOAB_HAVE_TEMPESTREMAP
360  if( data.tempestData.remapper ) delete data.tempestData.remapper;
361  if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
362 #endif
363 
364 #ifdef MOAB_HAVE_MPI
365 
366  // we could get the pco also with
367  // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
368 
369  std::map< int, ParCommGraph* >& pargs = data.pgraph;
370 
371  // free the parallel comm graphs associated with this app
372  for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
373  {
374  ParCommGraph* pgr = mt->second;
375  delete pgr;
376  pgr = NULL;
377  }
378  if( pco ) delete pco;
379 #endif
380 
381  // delete first all except vertices
382  Range vertices = fileents.subset_by_type( MBVERTEX );
383  Range noverts = subtract( fileents, vertices );
384 
385  rval = context.MBI->delete_entities( noverts );MB_CHK_ERR( rval );
386  // now retrieve connected elements that still exist (maybe in other sets, pids?)
387  Range adj_ents_left;
388  rval = context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
389  rval = context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
390  rval = context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
391 
392  if( !adj_ents_left.empty() )
393  {
394  Range conn_verts;
395  rval = context.MBI->get_connectivity( adj_ents_left, conn_verts );MB_CHK_ERR( rval );
396  vertices = subtract( vertices, conn_verts );
397  }
398 
399  rval = context.MBI->delete_entities( vertices );MB_CHK_ERR( rval );
400 
401  std::map< std::string, int >::iterator mit;
402 
403  for( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
404  {
405  int pidx = mit->second;
406 
407  if( *pid == pidx )
408  {
409  break;
410  }
411  }
412 
413  context.appIdMap.erase( mit );
414  std::map< int, int >::iterator mit1;
415 
416  for( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
417  {
418  int pidx = mit1->second;
419 
420  if( *pid == pidx )
421  {
422  break;
423  }
424  }
425 
426  context.appIdCompMap.erase( mit1 );
427 
428  context.unused_pid--; // we have to go backwards always TODO
429  context.appDatas.pop_back();
430 #ifdef MOAB_HAVE_MPI
431  context.pcomms.pop_back();
432 #endif
433  return moab::MB_SUCCESS;
434 }
435 
437 {
438  // release any Fortran specific allocations here before we pass it on
439  context.appDatas[*pid].is_fortran = false;
440 
441  // release all datastructure allocations
442  return iMOAB_DeregisterApplication( pid );
443 }
444 
445 // Utility function
446 static void split_tag_names( std::string input_names,
447  std::string& separator,
448  std::vector< std::string >& list_tag_names )
449 {
450  size_t pos = 0;
451  std::string token;
452  while( ( pos = input_names.find( separator ) ) != std::string::npos )
453  {
454  token = input_names.substr( 0, pos );
455  if( !token.empty() ) list_tag_names.push_back( token );
456  // std::cout << token << std::endl;
457  input_names.erase( 0, pos + separator.length() );
458  }
459  if( !input_names.empty() )
460  {
461  // if leftover something, or if not ended with delimiter
462  list_tag_names.push_back( input_names );
463  }
464  return;
465 }
466 
468  int* num_global_vertices,
469  int* num_global_elements,
470  int* num_dimension,
471  int* num_parts )
472 {
473  IMOAB_CHECKPOINTER( filename, 1 );
474  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
475 
476 #ifdef MOAB_HAVE_HDF5
477  std::string filen( filename );
478 
479  int edges = 0;
480  int faces = 0;
481  int regions = 0;
482  if( num_global_vertices ) *num_global_vertices = 0;
483  if( num_global_elements ) *num_global_elements = 0;
484  if( num_dimension ) *num_dimension = 0;
485  if( num_parts ) *num_parts = 0;
486 
487  mhdf_FileHandle file;
488  mhdf_Status status;
489  unsigned long max_id;
490  struct mhdf_FileDesc* data;
491 
492  file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
493 
494  if( mhdf_isError( &status ) )
495  {
496  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
497  return moab::MB_FAILURE;
498  }
499 
500  data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
501  1 ); // will use extra set info; will get parallel partition tag info too!
502 
503  if( mhdf_isError( &status ) )
504  {
505  fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
506  return moab::MB_FAILURE;
507  }
508 
509  if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
510  if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
511 
512  for( int i = 0; i < data->num_elem_desc; i++ )
513  {
514  struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
515  struct mhdf_EntDesc* ent_d = &( el_desc->desc );
516 
517  if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
518  {
519  edges += ent_d->count;
520  }
521 
522  if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
523  {
524  faces += ent_d->count;
525  }
526 
527  if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
528  {
529  faces += ent_d->count;
530  }
531 
532  if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
533  {
534  faces += ent_d->count;
535  }
536 
537  if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
538  {
539  regions += ent_d->count;
540  }
541 
542  if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
543  {
544  regions += ent_d->count;
545  }
546 
547  if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
548  {
549  regions += ent_d->count;
550  }
551 
552  if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
553  {
554  regions += ent_d->count;
555  }
556 
557  if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
558  {
559  regions += ent_d->count;
560  }
561 
562  if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
563  {
564  regions += ent_d->count;
565  }
566 
567  if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
568  {
569  regions += ent_d->count;
570  }
571  }
572 
573  if( num_parts ) *num_parts = data->numEntSets[0];
574 
575  // is this required?
576  if( edges > 0 )
577  {
578  if( num_dimension ) *num_dimension = 1; // I don't think it will ever return 1
579  if( num_global_elements ) *num_global_elements = edges;
580  }
581 
582  if( faces > 0 )
583  {
584  if( num_dimension ) *num_dimension = 2;
585  if( num_global_elements ) *num_global_elements = faces;
586  }
587 
588  if( regions > 0 )
589  {
590  if( num_dimension ) *num_dimension = 3;
591  if( num_global_elements ) *num_global_elements = regions;
592  }
593 
594  mhdf_closeFile( file, &status );
595 
596  free( data );
597 
598 #else
599  std::cout << filename
600  << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
601  "formats other than a h5m file.\n";
602  if( num_global_vertices ) *num_global_vertices = 0;
603  if( num_global_elements ) *num_global_elements = 0;
604  if( num_dimension ) *num_dimension = 0;
605  if( num_parts ) *num_parts = 0;
606 #endif
607 
608  return moab::MB_SUCCESS;
609 }
610 
612  const iMOAB_String filename,
613  const iMOAB_String read_options,
614  int* num_ghost_layers )
615 {
616  IMOAB_CHECKPOINTER( filename, 2 );
617  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
618  IMOAB_CHECKPOINTER( num_ghost_layers, 4 );
619 
620  // make sure we use the file set and pcomm associated with the *pid
621  std::ostringstream newopts;
622  if( read_options ) newopts << read_options;
623 
624 #ifdef MOAB_HAVE_MPI
625 
627  {
628  if( context.worldprocs > 1 )
629  {
630  std::string opts( ( read_options ? read_options : "" ) );
631  std::string pcid( "PARALLEL_COMM=" );
632  std::size_t found = opts.find( pcid );
633 
634  if( found != std::string::npos )
635  {
636  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
637  return moab::MB_FAILURE;
638  }
639 
640  // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
641  // files (used in test_remapping)
642  std::string filen( filename );
643  std::string::size_type idx = filen.rfind( '.' );
644 
645  if( idx != std::string::npos )
646  {
647  std::string extension = filen.substr( idx + 1 );
648  if( ( extension == std::string( "h5m" ) ) || ( extension == std::string( "nc" ) ) )
649  newopts << ";;PARALLEL_COMM=" << *pid;
650  }
651 
652  if( *num_ghost_layers >= 1 )
653  {
654  // if we want ghosts, we will want additional entities, the last .1
655  // because the addl ents can be edges, faces that are part of the neumann sets
656  std::string pcid2( "PARALLEL_GHOSTS=" );
657  std::size_t found2 = opts.find( pcid2 );
658 
659  if( found2 != std::string::npos )
660  {
661  std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
662  "number of layers \n";
663  }
664  else
665  {
666  // dimension of primary entities is 3 here, but it could be 2 for climate
667  // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
668  // example: ";PARALLEL_GHOSTS=2.0.1"
669  newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
670  }
671  }
672  }
673  }
674 #else
675  IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
676 #endif
677 
678  // Now let us actually load the MOAB file with the appropriate read options
679  ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
680 
681 #ifdef VERBOSE
682  // some debugging stuff
683  std::ostringstream outfile;
684 #ifdef MOAB_HAVE_MPI
685  int rank = context.pcomms[*pid]->rank();
686  int nprocs = context.pcomms[*pid]->size();
687  outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
688 #else
689  outfile << "TaskMesh_n1.0.h5m";
690 #endif
691  // the mesh contains ghosts too, but they are not part of mat/neumann set
692  // write in serial the file, to see what tags are missing
693  rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval ); // everything on current task, written in serial
694 #endif
695 
696  // Update ghost layer information
697  context.appDatas[*pid].num_ghost_layers = *num_ghost_layers;
698 
699  // Update mesh information
700  return iMOAB_UpdateMeshInfo( pid );
701 }
702 
703 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options )
704 {
705  IMOAB_CHECKPOINTER( filename, 2 );
706  IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
707 
708  appData& data = context.appDatas[*pid];
709  EntityHandle fileSet = data.file_set;
710 
711  std::ostringstream newopts;
712 #ifdef MOAB_HAVE_MPI
713  std::string write_opts( ( write_options ? write_options : "" ) );
714  std::string pcid( "PARALLEL_COMM=" );
715  std::size_t found = write_opts.find( pcid );
716 
717  if( found != std::string::npos )
718  {
719  std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
720  return moab::MB_FAILURE;
721  }
722 
723  // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
724  std::string pw( "PARALLEL=WRITE_PART" );
725  found = write_opts.find( pw );
726 
727  if( found != std::string::npos )
728  {
729  newopts << "PARALLEL_COMM=" << *pid << ";";
730  }
731 
732 #endif
733 
734  if( write_options ) newopts << write_options;
735 
736  std::vector< Tag > copyTagList = data.tagList;
737  // append Global ID and Parallel Partition
738  std::string gid_name_tag( "GLOBAL_ID" );
739 
740  // export global id tag, we need it always
741  if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
742  {
743  Tag gid = context.MBI->globalId_tag();
744  copyTagList.push_back( gid );
745  }
746  // also Parallel_Partition PARALLEL_PARTITION
747  std::string pp_name_tag( "PARALLEL_PARTITION" );
748 
749  // write parallel part tag too, if it exists
750  if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
751  {
752  Tag ptag;
753  context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
754  if( ptag ) copyTagList.push_back( ptag );
755  }
756 
757  // Now let us actually write the file to disk with appropriate options
758  ErrorCode rval = context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, &copyTagList[0],
759  (int)copyTagList.size() );MB_CHK_ERR( rval );
760 
761  return moab::MB_SUCCESS;
762 }
763 
765 {
766  IMOAB_CHECKPOINTER( prefix, 2 );
767  IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
768 
769  std::ostringstream file_name;
770  int rank = 0, size = 1;
771 #ifdef MOAB_HAVE_MPI
772  rank = context.pcomms[*pid]->rank();
773  size = context.pcomms[*pid]->size();
774 #endif
775  file_name << prefix << "_" << size << "_" << rank << ".h5m";
776  // Now let us actually write the file to disk with appropriate options
777  ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
778 
779  return moab::MB_SUCCESS;
780 }
781 
783 {
784  // this will include ghost elements info
785  appData& data = context.appDatas[*pid];
786  EntityHandle fileSet = data.file_set;
787 
788  // first clear all data ranges; this can be called after ghosting
789  data.all_verts.clear();
790  data.primary_elems.clear();
791  data.local_verts.clear();
792  data.owned_verts.clear();
793  data.ghost_vertices.clear();
794  data.owned_elems.clear();
795  data.ghost_elems.clear();
796  data.mat_sets.clear();
797  data.neu_sets.clear();
798  data.diri_sets.clear();
799 
800  // Let us get all the vertex entities
801  ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval ); // recursive
802 
803  // Let us check first entities of dimension = 3
804  data.dimension = 3;
805  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
806 
807  if( data.primary_elems.empty() )
808  {
809  // Now 3-D elements. Let us check entities of dimension = 2
810  data.dimension = 2;
811  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
812 
813  if( data.primary_elems.empty() )
814  {
815  // Now 3-D/2-D elements. Let us check entities of dimension = 1
816  data.dimension = 1;
817  rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval ); // recursive
818 
819  if( data.primary_elems.empty() )
820  {
821  // no elements of dimension 1 or 2 or 3; it could happen for point clouds
822  data.dimension = 0;
823  }
824  }
825  }
826 
827  // check if the current mesh is just a point cloud
828  data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
829 
830 #ifdef MOAB_HAVE_MPI
831 
833  {
834  ParallelComm* pco = context.pcomms[*pid];
835 
836  // filter ghost vertices, from local
837  rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
838 
839  // Store handles for all ghosted entities
840  data.ghost_vertices = subtract( data.all_verts, data.local_verts );
841 
842  // filter ghost elements, from local
843  rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
844 
845  data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
846  // now update global number of primary cells and global number of vertices
847  // determine first number of owned vertices
848  // Get local owned vertices
849  rval = pco->filter_pstatus( data.all_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &data.owned_verts );MB_CHK_ERR( rval );
850  int local[2], global[2];
851  local[0] = data.owned_verts.size();
852  local[1] = data.owned_elems.size();
853  MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->comm() );
854  rval = iMOAB_SetGlobalInfo( pid, &( global[0] ), &( global[1] ) );MB_CHK_ERR( rval );
855  }
856  else
857  {
858  data.local_verts = data.all_verts;
859  data.owned_elems = data.primary_elems;
860  }
861 
862 #else
863 
864  data.local_verts = data.all_verts;
865  data.owned_elems = data.primary_elems;
866 
867 #endif
868 
869  // Get the references for some standard internal tags such as material blocks, BCs, etc
871  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
872 
874  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
875 
877  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
878 
879  return moab::MB_SUCCESS;
880 }
881 
883  int* num_visible_vertices,
884  int* num_visible_elements,
885  int* num_visible_blocks,
886  int* num_visible_surfaceBC,
887  int* num_visible_vertexBC )
888 {
889  ErrorCode rval;
890  appData& data = context.appDatas[*pid];
891  EntityHandle fileSet = data.file_set;
892 
893  // this will include ghost elements
894  // first clear all data ranges; this can be called after ghosting
895  if( num_visible_elements )
896  {
897  num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
898  // separate ghost and local/owned primary elements
899  num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
900  num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
901  }
902  if( num_visible_vertices )
903  {
904  num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
905  num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
906  // local are those that are not ghosts; they may include shared, not owned vertices
907  num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
908  }
909 
910  if( num_visible_blocks )
911  {
913  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
914 
915  num_visible_blocks[2] = data.mat_sets.size();
916  num_visible_blocks[0] = num_visible_blocks[2];
917  num_visible_blocks[1] = 0;
918  }
919 
920  if( num_visible_surfaceBC )
921  {
923  data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
924 
925  num_visible_surfaceBC[2] = 0;
926  // count how many faces are in each neu set, and how many regions are
927  // adjacent to them;
928  int numNeuSets = (int)data.neu_sets.size();
929 
930  for( int i = 0; i < numNeuSets; i++ )
931  {
932  Range subents;
933  EntityHandle nset = data.neu_sets[i];
934  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
935 
936  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
937  {
938  EntityHandle subent = *it;
939  Range adjPrimaryEnts;
940  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
941 
942  num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
943  }
944  }
945 
946  num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
947  num_visible_surfaceBC[1] = 0;
948  }
949 
950  if( num_visible_vertexBC )
951  {
953  data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
954 
955  num_visible_vertexBC[2] = 0;
956  int numDiriSets = (int)data.diri_sets.size();
957 
958  for( int i = 0; i < numDiriSets; i++ )
959  {
960  Range verts;
961  EntityHandle diset = data.diri_sets[i];
962  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
963 
964  num_visible_vertexBC[2] += (int)verts.size();
965  }
966 
967  num_visible_vertexBC[0] = num_visible_vertexBC[2];
968  num_visible_vertexBC[1] = 0;
969  }
970 
971  return moab::MB_SUCCESS;
972 }
973 
974 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
975 {
976  IMOAB_CHECKPOINTER( vertices_length, 2 );
977  IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
978 
979  const Range& verts = context.appDatas[*pid].all_verts;
980  // check for problems with array length
981  IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
982 
983  // global id tag is context.globalID_tag
984  return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
985 }
986 
987 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
988 {
989  assert( vertices_length && *vertices_length );
990  assert( visible_global_rank_ID );
991 
992  Range& verts = context.appDatas[*pid].all_verts;
993  int i = 0;
994 #ifdef MOAB_HAVE_MPI
995  ParallelComm* pco = context.pcomms[*pid];
996 
997  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
998  {
999  ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
1000  }
1001 
1002  if( i != *vertices_length )
1003  {
1004  return moab::MB_FAILURE;
1005  } // warning array allocation problem
1006 
1007 #else
1008 
1009  /* everything owned by proc 0 */
1010  if( (int)verts.size() != *vertices_length )
1011  {
1012  return moab::MB_FAILURE;
1013  } // warning array allocation problem
1014 
1015  for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
1016  {
1017  visible_global_rank_ID[i] = 0;
1018  } // all vertices are owned by processor 0, as this is serial run
1019 
1020 #endif
1021 
1022  return moab::MB_SUCCESS;
1023 }
1024 
1025 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
1026 {
1027  Range& verts = context.appDatas[*pid].all_verts;
1028 
1029  // interleaved coordinates, so that means deep copy anyway
1030  if( *coords_length != 3 * (int)verts.size() )
1031  {
1032  return moab::MB_FAILURE;
1033  }
1034 
1035  ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
1036 
1037  return moab::MB_SUCCESS;
1038 }
1039 
1040 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
1041 {
1042  // local id blocks? they are counted from 0 to number of visible blocks ...
1043  // will actually return material set tag value for global
1044  Range& matSets = context.appDatas[*pid].mat_sets;
1045 
1046  if( *block_length != (int)matSets.size() )
1047  {
1048  return moab::MB_FAILURE;
1049  }
1050 
1051  // return material set tag gtags[0 is material set tag
1052  ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
1053 
1054  // populate map with index
1055  std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
1056  for( unsigned i = 0; i < matSets.size(); i++ )
1057  {
1058  matIdx[global_block_IDs[i]] = i;
1059  }
1060 
1061  return moab::MB_SUCCESS;
1062 }
1063 
1065  iMOAB_GlobalID* global_block_ID,
1066  int* vertices_per_element,
1067  int* num_elements_in_block )
1068 {
1069  assert( global_block_ID );
1070 
1071  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1072  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1073 
1074  if( it == matMap.end() )
1075  {
1076  return moab::MB_FAILURE;
1077  } // error in finding block with id
1078 
1079  int blockIndex = matMap[*global_block_ID];
1080  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1081  Range blo_elems;
1082  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
1083 
1084  if( MB_SUCCESS != rval || blo_elems.empty() )
1085  {
1086  return moab::MB_FAILURE;
1087  }
1088 
1089  EntityType type = context.MBI->type_from_handle( blo_elems[0] );
1090 
1091  if( !blo_elems.all_of_type( type ) )
1092  {
1093  return moab::MB_FAILURE;
1094  } // not all of same type
1095 
1096  const EntityHandle* conn = NULL;
1097  int num_verts = 0;
1098  rval = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
1099 
1100  *vertices_per_element = num_verts;
1101  *num_elements_in_block = (int)blo_elems.size();
1102 
1103  return moab::MB_SUCCESS;
1104 }
1105 
1107  int* num_visible_elements,
1108  iMOAB_GlobalID* element_global_IDs,
1109  int* ranks,
1110  iMOAB_GlobalID* block_IDs )
1111 {
1112  appData& data = context.appDatas[*pid];
1113 #ifdef MOAB_HAVE_MPI
1114  ParallelComm* pco = context.pcomms[*pid];
1115 #endif
1116 
1117  ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
1118 
1119  int i = 0;
1120 
1121  for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
1122  {
1123 #ifdef MOAB_HAVE_MPI
1124  rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
1125 
1126 #else
1127  /* everything owned by task 0 */
1128  ranks[i] = 0;
1129 #endif
1130  }
1131 
1132  for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
1133  {
1134  EntityHandle matMeshSet = *mit;
1135  Range elems;
1136  rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1137 
1138  int valMatTag;
1139  rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
1140 
1141  for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
1142  {
1143  EntityHandle eh = *eit;
1144  int index = data.primary_elems.index( eh );
1145 
1146  if( -1 == index )
1147  {
1148  return moab::MB_FAILURE;
1149  }
1150 
1151  if( -1 >= *num_visible_elements )
1152  {
1153  return moab::MB_FAILURE;
1154  }
1155 
1156  block_IDs[index] = valMatTag;
1157  }
1158  }
1159 
1160  return moab::MB_SUCCESS;
1161 }
1162 
1164  iMOAB_GlobalID* global_block_ID,
1165  int* connectivity_length,
1166  int* element_connectivity )
1167 {
1168  assert( global_block_ID ); // ensure global block ID argument is not null
1169  assert( connectivity_length ); // ensure connectivity length argument is not null
1170 
1171  appData& data = context.appDatas[*pid];
1172  std::map< int, int >& matMap = data.matIndex;
1173  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1174 
1175  if( it == matMap.end() )
1176  {
1177  return moab::MB_FAILURE;
1178  } // error in finding block with id
1179 
1180  int blockIndex = matMap[*global_block_ID];
1181  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1182  std::vector< EntityHandle > elems;
1183 
1184  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1185 
1186  if( elems.empty() )
1187  {
1188  return moab::MB_FAILURE;
1189  }
1190 
1191  std::vector< EntityHandle > vconnect;
1192  rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
1193 
1194  if( *connectivity_length != (int)vconnect.size() )
1195  {
1196  return moab::MB_FAILURE;
1197  } // mismatched sizes
1198 
1199  for( int i = 0; i < *connectivity_length; i++ )
1200  {
1201  int inx = data.all_verts.index( vconnect[i] );
1202 
1203  if( -1 == inx )
1204  {
1205  return moab::MB_FAILURE;
1206  } // error, vertex not in local range
1207 
1208  element_connectivity[i] = inx;
1209  }
1210 
1211  return moab::MB_SUCCESS;
1212 }
1213 
1215  iMOAB_LocalID* elem_index,
1216  int* connectivity_length,
1217  int* element_connectivity )
1218 {
1219  assert( elem_index ); // ensure element index argument is not null
1220  assert( connectivity_length ); // ensure connectivity length argument is not null
1221 
1222  appData& data = context.appDatas[*pid];
1223  assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
1224 
1225  int num_nodes;
1226  const EntityHandle* conn;
1227 
1228  EntityHandle eh = data.primary_elems[*elem_index];
1229 
1230  ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
1231 
1232  if( *connectivity_length < num_nodes )
1233  {
1234  return moab::MB_FAILURE;
1235  } // wrong number of vertices
1236 
1237  for( int i = 0; i < num_nodes; i++ )
1238  {
1239  int index = data.all_verts.index( conn[i] );
1240 
1241  if( -1 == index )
1242  {
1243  return moab::MB_FAILURE;
1244  }
1245 
1246  element_connectivity[i] = index;
1247  }
1248 
1249  *connectivity_length = num_nodes;
1250 
1251  return moab::MB_SUCCESS;
1252 }
1253 
1255  iMOAB_GlobalID* global_block_ID,
1256  int* num_elements_in_block,
1257  int* element_ownership )
1258 {
1259  assert( global_block_ID ); // ensure global block ID argument is not null
1260  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1261 
1262  std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1263 
1264  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1265 
1266  if( it == matMap.end() )
1267  {
1268  return moab::MB_FAILURE;
1269  } // error in finding block with id
1270 
1271  int blockIndex = matMap[*global_block_ID];
1272  EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1273  Range elems;
1274 
1275  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1276 
1277  if( elems.empty() )
1278  {
1279  return moab::MB_FAILURE;
1280  }
1281 
1282  if( *num_elements_in_block != (int)elems.size() )
1283  {
1284  return moab::MB_FAILURE;
1285  } // bad memory allocation
1286 
1287  int i = 0;
1288 #ifdef MOAB_HAVE_MPI
1289  ParallelComm* pco = context.pcomms[*pid];
1290 #endif
1291 
1292  for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
1293  {
1294 #ifdef MOAB_HAVE_MPI
1295  rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
1296 #else
1297  element_ownership[i] = 0; /* owned by 0 */
1298 #endif
1299  }
1300 
1301  return moab::MB_SUCCESS;
1302 }
1303 
1305  iMOAB_GlobalID* global_block_ID,
1306  int* num_elements_in_block,
1307  iMOAB_GlobalID* global_element_ID,
1308  iMOAB_LocalID* local_element_ID )
1309 {
1310  assert( global_block_ID ); // ensure global block ID argument is not null
1311  assert( num_elements_in_block ); // ensure number of elements in block argument is not null
1312 
1313  appData& data = context.appDatas[*pid];
1314  std::map< int, int >& matMap = data.matIndex;
1315 
1316  std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1317 
1318  if( it == matMap.end() )
1319  {
1320  return moab::MB_FAILURE;
1321  } // error in finding block with id
1322 
1323  int blockIndex = matMap[*global_block_ID];
1324  EntityHandle matMeshSet = data.mat_sets[blockIndex];
1325  Range elems;
1326  ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1327 
1328  if( elems.empty() )
1329  {
1330  return moab::MB_FAILURE;
1331  }
1332 
1333  if( *num_elements_in_block != (int)elems.size() )
1334  {
1335  return moab::MB_FAILURE;
1336  } // bad memory allocation
1337 
1338  rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
1339 
1340  // check that elems are among primary_elems in data
1341  for( int i = 0; i < *num_elements_in_block; i++ )
1342  {
1343  local_element_ID[i] = data.primary_elems.index( elems[i] );
1344 
1345  if( -1 == local_element_ID[i] )
1346  {
1347  return moab::MB_FAILURE;
1348  } // error, not in local primary elements
1349  }
1350 
1351  return moab::MB_SUCCESS;
1352 }
1353 
1355  int* surface_BC_length,
1356  iMOAB_LocalID* local_element_ID,
1357  int* reference_surface_ID,
1358  int* boundary_condition_value )
1359 {
1360  // we have to fill bc data for neumann sets;/
1361  ErrorCode rval;
1362 
1363  // it was counted above, in GetMeshInfo
1364  appData& data = context.appDatas[*pid];
1365  int numNeuSets = (int)data.neu_sets.size();
1366 
1367  int index = 0; // index [0, surface_BC_length) for the arrays returned
1368 
1369  for( int i = 0; i < numNeuSets; i++ )
1370  {
1371  Range subents;
1372  EntityHandle nset = data.neu_sets[i];
1373  rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
1374 
1375  int neuVal;
1376  rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
1377 
1378  for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
1379  {
1380  EntityHandle subent = *it;
1381  Range adjPrimaryEnts;
1382  rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
1383 
1384  // get global id of the primary ents, and side number of the quad/subentity
1385  // this is moab ordering
1386  for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
1387  {
1388  EntityHandle primaryEnt = *pit;
1389  // get global id
1390  /*int globalID;
1391  rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
1392  if (MB_SUCCESS!=rval)
1393  return moab::MB_FAILURE;
1394  global_element_ID[index] = globalID;*/
1395 
1396  // get local element id
1397  local_element_ID[index] = data.primary_elems.index( primaryEnt );
1398 
1399  if( -1 == local_element_ID[index] )
1400  {
1401  return moab::MB_FAILURE;
1402  } // did not find the element locally
1403 
1404  int side_number, sense, offset;
1405  rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
1406 
1407  reference_surface_ID[index] = side_number + 1; // moab is from 0 to 5, it needs 1 to 6
1408  boundary_condition_value[index] = neuVal;
1409  index++;
1410  }
1411  }
1412  }
1413 
1414  if( index != *surface_BC_length )
1415  {
1416  return moab::MB_FAILURE;
1417  } // error in array allocations
1418 
1419  return moab::MB_SUCCESS;
1420 }
1421 
1423  int* vertex_BC_length,
1424  iMOAB_LocalID* local_vertex_ID,
1425  int* boundary_condition_value )
1426 {
1427  ErrorCode rval;
1428 
1429  // it was counted above, in GetMeshInfo
1430  appData& data = context.appDatas[*pid];
1431  int numDiriSets = (int)data.diri_sets.size();
1432  int index = 0; // index [0, *vertex_BC_length) for the arrays returned
1433 
1434  for( int i = 0; i < numDiriSets; i++ )
1435  {
1436  Range verts;
1437  EntityHandle diset = data.diri_sets[i];
1438  rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
1439 
1440  int diriVal;
1441  rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
1442 
1443  for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
1444  {
1445  EntityHandle vt = *vit;
1446  /*int vgid;
1447  rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
1448  if (MB_SUCCESS!=rval)
1449  return moab::MB_FAILURE;
1450  global_vertext_ID[index] = vgid;*/
1451  local_vertex_ID[index] = data.all_verts.index( vt );
1452 
1453  if( -1 == local_vertex_ID[index] )
1454  {
1455  return moab::MB_FAILURE;
1456  } // vertex was not found
1457 
1458  boundary_condition_value[index] = diriVal;
1459  index++;
1460  }
1461  }
1462 
1463  if( *vertex_BC_length != index )
1464  {
1465  return moab::MB_FAILURE;
1466  } // array allocation issue
1467 
1468  return moab::MB_SUCCESS;
1469 }
1470 
1472  const iMOAB_String tag_storage_name,
1473  int* tag_type,
1474  int* components_per_entity,
1475  int* tag_index )
1476 {
1477  // see if the tag is already existing, and if yes, check the type, length
1478  if( *tag_type < 0 || *tag_type > 5 )
1479  {
1480  return moab::MB_FAILURE;
1481  } // we have 6 types of tags supported so far
1482 
1483  DataType tagDataType;
1484  TagType tagType;
1485  void* defaultVal = NULL;
1486  int* defInt = new int[*components_per_entity];
1487  double* defDouble = new double[*components_per_entity];
1488  EntityHandle* defHandle = new EntityHandle[*components_per_entity];
1489 
1490  for( int i = 0; i < *components_per_entity; i++ )
1491  {
1492  defInt[i] = 0;
1493  defDouble[i] = -1e+10;
1494  defHandle[i] = (EntityHandle)0;
1495  }
1496 
1497  switch( *tag_type )
1498  {
1499  case 0:
1500  tagDataType = MB_TYPE_INTEGER;
1501  tagType = MB_TAG_DENSE;
1502  defaultVal = defInt;
1503  break;
1504 
1505  case 1:
1506  tagDataType = MB_TYPE_DOUBLE;
1507  tagType = MB_TAG_DENSE;
1508  defaultVal = defDouble;
1509  break;
1510 
1511  case 2:
1512  tagDataType = MB_TYPE_HANDLE;
1513  tagType = MB_TAG_DENSE;
1514  defaultVal = defHandle;
1515  break;
1516 
1517  case 3:
1518  tagDataType = MB_TYPE_INTEGER;
1519  tagType = MB_TAG_SPARSE;
1520  defaultVal = defInt;
1521  break;
1522 
1523  case 4:
1524  tagDataType = MB_TYPE_DOUBLE;
1525  tagType = MB_TAG_SPARSE;
1526  defaultVal = defDouble;
1527  break;
1528 
1529  case 5:
1530  tagDataType = MB_TYPE_HANDLE;
1531  tagType = MB_TAG_SPARSE;
1532  defaultVal = defHandle;
1533  break;
1534 
1535  default: {
1536  delete[] defInt;
1537  delete[] defDouble;
1538  delete[] defHandle;
1539  return moab::MB_FAILURE;
1540  } // error
1541  }
1542 
1543  Tag tagHandle;
1544  // split storage names if separated list
1545 
1546  std::string tag_name( tag_storage_name );
1547 
1548  // first separate the names of the tags
1549  // we assume that there are separators ":" between the tag names
1550  std::vector< std::string > tagNames;
1551  std::string separator( ":" );
1552  split_tag_names( tag_name, separator, tagNames );
1553 
1554  ErrorCode rval = moab::MB_SUCCESS; // assume success already :)
1555  appData& data = context.appDatas[*pid];
1556  int already_defined_tags = 0;
1557 
1558  for( size_t i = 0; i < tagNames.size(); i++ )
1559  {
1560  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
1561  tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal );
1562 
1563  if( MB_ALREADY_ALLOCATED == rval )
1564  {
1565  std::map< std::string, Tag >& mTags = data.tagMap;
1566  std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1567 
1568  if( mit == mTags.end() )
1569  {
1570  // add it to the map
1571  mTags[tagNames[i]] = tagHandle;
1572  // push it to the list of tags, too
1573  *tag_index = (int)data.tagList.size();
1574  data.tagList.push_back( tagHandle );
1575  }
1576  rval = MB_SUCCESS;
1577  already_defined_tags++;
1578  }
1579  else if( MB_SUCCESS == rval )
1580  {
1581  data.tagMap[tagNames[i]] = tagHandle;
1582  *tag_index = (int)data.tagList.size();
1583  data.tagList.push_back( tagHandle );
1584  }
1585  else
1586  {
1587  rval = moab::MB_FAILURE; // some tags were not created
1588  }
1589  }
1590  // we don't need default values anymore, avoid leaks
1591  int rankHere = 0;
1592 #ifdef MOAB_HAVE_MPI
1593  ParallelComm* pco = context.pcomms[*pid];
1594  rankHere = pco->rank();
1595 #endif
1596  if( !rankHere && already_defined_tags )
1597  std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
1598  << " has " << already_defined_tags << " already defined tags out of " << tagNames.size()
1599  << " tags \n";
1600  delete[] defInt;
1601  delete[] defDouble;
1602  delete[] defHandle;
1603  return rval;
1604 }
1605 
1607  const iMOAB_String tag_storage_name,
1608  int* num_tag_storage_length,
1609  int* ent_type,
1610  int* tag_storage_data )
1611 {
1612  std::string tag_name( tag_storage_name );
1613 
1614  // Get the application data
1615  appData& data = context.appDatas[*pid];
1616 
1617  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1618  {
1619  return moab::MB_FAILURE;
1620  } // tag not defined
1621 
1622  Tag tag = data.tagMap[tag_name];
1623 
1624  int tagLength = 0;
1625  ErrorCode rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1626 
1627  DataType dtype;
1628  rval = context.MBI->tag_get_data_type( tag, dtype );
1629 
1630  if( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER )
1631  {
1632  return moab::MB_FAILURE;
1633  }
1634 
1635  // set it on a subset of entities, based on type and length
1636  Range* ents_to_set;
1637 
1638  if( *ent_type == 0 ) // vertices
1639  {
1640  ents_to_set = &data.all_verts;
1641  }
1642  else // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
1643  {
1644  ents_to_set = &data.primary_elems;
1645  }
1646 
1647  int nents_to_be_set = *num_tag_storage_length / tagLength;
1648 
1649  if( nents_to_be_set > (int)ents_to_set->size() )
1650  {
1651  return moab::MB_FAILURE;
1652  } // to many entities to be set or too few
1653 
1654  // restrict the range; everything is contiguous; or not?
1655 
1656  // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
1657  // 1 ) );
1658  rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );MB_CHK_ERR( rval );
1659 
1660  return moab::MB_SUCCESS; // no error
1661 }
1662 
1664  const iMOAB_String tag_storage_name,
1665  int* num_tag_storage_length,
1666  int* ent_type,
1667  int* tag_storage_data )
1668 {
1669  ErrorCode rval;
1670  std::string tag_name( tag_storage_name );
1671 
1672  appData& data = context.appDatas[*pid];
1673 
1674  if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1675  {
1676  return moab::MB_FAILURE;
1677  } // tag not defined
1678 
1679  Tag tag = data.tagMap[tag_name];
1680 
1681  int tagLength = 0;
1682  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1683 
1684  DataType dtype;
1685  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1686 
1687  if( dtype != MB_TYPE_INTEGER )
1688  {
1689  return moab::MB_FAILURE;
1690  }
1691 
1692  // set it on a subset of entities, based on type and length
1693  Range* ents_to_get;
1694 
1695  if( *ent_type == 0 ) // vertices
1696  {
1697  ents_to_get = &data.all_verts;
1698  }
1699  else // if (*ent_type == 1)
1700  {
1701  ents_to_get = &data.primary_elems;
1702  }
1703 
1704  int nents_to_get = *num_tag_storage_length / tagLength;
1705 
1706  if( nents_to_get > (int)ents_to_get->size() )
1707  {
1708  return moab::MB_FAILURE;
1709  } // to many entities to get, or too little
1710 
1711  // restrict the range; everything is contiguous; or not?
1712  // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
1713  // ) );
1714 
1715  rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
1716 
1717  return moab::MB_SUCCESS; // no error
1718 }
1719 
1721  const iMOAB_String tag_storage_names,
1722  int* num_tag_storage_length,
1723  int* ent_type,
1724  double* tag_storage_data )
1725 {
1726  ErrorCode rval;
1727  std::string tag_names( tag_storage_names );
1728  // exactly the same code as for int tag :) maybe should check the type of tag too
1729  std::vector< std::string > tagNames;
1730  std::vector< Tag > tagHandles;
1731  std::string separator( ":" );
1732  split_tag_names( tag_names, separator, tagNames );
1733 
1734  appData& data = context.appDatas[*pid];
1735  Range* ents_to_set = NULL;
1736 
1737  if( *ent_type == 0 ) // vertices
1738  {
1739  ents_to_set = &data.all_verts;
1740  }
1741  else if( *ent_type == 1 )
1742  {
1743  ents_to_set = &data.primary_elems;
1744  }
1745 
1746  int nents_to_be_set = (int)( *ents_to_set ).size();
1747  int position = 0;
1748 
1749  for( size_t i = 0; i < tagNames.size(); i++ )
1750  {
1751  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1752  {
1753  return moab::MB_FAILURE;
1754  } // some tag not defined yet in the app
1755 
1756  Tag tag = data.tagMap[tagNames[i]];
1757 
1758  int tagLength = 0;
1759  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1760 
1761  DataType dtype;
1762  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1763 
1764  if( dtype != MB_TYPE_DOUBLE )
1765  {
1766  return moab::MB_FAILURE;
1767  }
1768 
1769  // set it on the subset of entities, based on type and length
1770  if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1771  return moab::MB_FAILURE; // too many entity values to be set
1772 
1773  rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
1774  // increment position to next tag
1775  position = position + tagLength * nents_to_be_set;
1776  }
1777  return moab::MB_SUCCESS; // no error
1778 }
1779 
1781  const iMOAB_String tag_storage_names,
1782  int* num_tag_storage_length,
1783  int* ent_type,
1784  double* tag_storage_data,
1785  int* globalIds )
1786 {
1787  ErrorCode rval;
1788  std::string tag_names( tag_storage_names );
1789  // exactly the same code as for int tag :) maybe should check the type of tag too
1790  std::vector< std::string > tagNames;
1791  std::vector< Tag > tagHandles;
1792  std::string separator( ":" );
1793  split_tag_names( tag_names, separator, tagNames );
1794 
1795  appData& data = context.appDatas[*pid];
1796  Range* ents_to_set = NULL;
1797 
1798  if( *ent_type == 0 ) // vertices
1799  {
1800  ents_to_set = &data.all_verts;
1801  }
1802  else if( *ent_type == 1 )
1803  {
1804  ents_to_set = &data.primary_elems;
1805  }
1806 
1807  int nents_to_be_set = (int)( *ents_to_set ).size();
1808 
1809  Tag gidTag = context.MBI->globalId_tag();
1810  std::vector< int > gids;
1811  gids.resize( nents_to_be_set );
1812  rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval );
1813 
1814  // so we will need to set the tags according to the global id passed;
1815  // so the order in tag_storage_data is the same as the order in globalIds, but the order
1816  // in local range is gids
1817  std::map< int, EntityHandle > eh_by_gid;
1818  int i = 0;
1819  for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i )
1820  {
1821  eh_by_gid[gids[i]] = *it;
1822  }
1823 
1824  std::vector< int > tagLengths( tagNames.size() );
1825  std::vector< Tag > tagList;
1826  size_t total_tag_len = 0;
1827  for( size_t i = 0; i < tagNames.size(); i++ )
1828  {
1829  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1830  {
1831  MB_SET_ERR( moab::MB_FAILURE, "tag missing" );
1832  } // some tag not defined yet in the app
1833 
1834  Tag tag = data.tagMap[tagNames[i]];
1835  tagList.push_back( tag );
1836 
1837  int tagLength = 0;
1838  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1839 
1840  total_tag_len += tagLength;
1841  tagLengths[i] = tagLength;
1842  DataType dtype;
1843  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1844 
1845  if( dtype != MB_TYPE_DOUBLE )
1846  {
1847  MB_SET_ERR( moab::MB_FAILURE, "tag not double type" );
1848  }
1849  }
1850  bool serial = true;
1851 #ifdef MOAB_HAVE_MPI
1852  ParallelComm* pco = context.pcomms[*pid];
1853  unsigned num_procs = pco->size();
1854  if( num_procs > 1 ) serial = false;
1855 #endif
1856 
1857  if( serial )
1858  {
1859  // we do not assume anymore that the number of entities has to match
1860  // we will set only what matches, and skip entities that do not have corresponding global ids
1861  //assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 );
1862  // tags are unrolled, we loop over global ids first, then careful about tags
1863  for( int i = 0; i < nents_to_be_set; i++ )
1864  {
1865  int gid = globalIds[i];
1866  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1867  if( mapIt == eh_by_gid.end() ) continue;
1868  EntityHandle eh = mapIt->second;
1869  // now loop over tags
1870  int indexInTagValues = 0; //
1871  for( size_t j = 0; j < tagList.size(); j++ )
1872  {
1873  indexInTagValues += i * tagLengths[j];
1874  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval );
1875  // advance the pointer/index
1876  indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j]; // at the end of tag data
1877  }
1878  }
1879  }
1880 #ifdef MOAB_HAVE_MPI
1881  else // it can be not serial only if pco->size() > 1, parallel
1882  {
1883  // in this case, we have to use 2 crystal routers, to send data to the processor that needs it
1884  // we will create first a tuple to rendevous points, then from there send to the processor that requested it
1885  // it is a 2-hop global gather scatter
1886  // TODO: allow for tags of different length; this is wrong
1887  int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() ); // assumes all tags have the same length?
1888  // we do not expect the sizes to match
1889  //assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 );
1890  TupleList TLsend;
1891  TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals ); // to proc, marker(gid), total_tag_len doubles
1892  TLsend.enableWriteAccess();
1893  // the processor id that processes global_id is global_id / num_ents_per_proc
1894 
1895  int indexInRealLocal = 0;
1896  for( int i = 0; i < nbLocalVals; i++ )
1897  {
1898  // to proc, marker, element local index, index in el
1899  int marker = globalIds[i];
1900  int to_proc = marker % num_procs;
1901  int n = TLsend.get_n();
1902  TLsend.vi_wr[2 * n] = to_proc; // send to processor
1903  TLsend.vi_wr[2 * n + 1] = marker;
1904  int indexInTagValues = 0;
1905  // tag data collect by number of tags
1906  for( size_t j = 0; j < tagList.size(); j++ )
1907  {
1908  indexInTagValues += i * tagLengths[j];
1909  for( int k = 0; k < tagLengths[j]; k++ )
1910  {
1911  TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1912  }
1913  indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1914  }
1915  TLsend.inc_n();
1916  }
1917 
1918  //assert( nbLocalVals * total_tag_len - indexInRealLocal == 0 );
1919  // send now requests, basically inform the rendez-vous point who needs a particular global id
1920  // send the data to the other processors:
1921  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 );
1922  TupleList TLreq;
1923  TLreq.initialize( 2, 0, 0, 0, nents_to_be_set );
1924  TLreq.enableWriteAccess();
1925  for( int i = 0; i < nents_to_be_set; i++ )
1926  {
1927  // to proc, marker
1928  int marker = gids[i];
1929  int to_proc = marker % num_procs;
1930  int n = TLreq.get_n();
1931  TLreq.vi_wr[2 * n] = to_proc; // send to processor
1932  TLreq.vi_wr[2 * n + 1] = marker;
1933  // tag data collect by number of tags
1934  TLreq.inc_n();
1935  }
1936  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
1937 
1938  // we know now that process TLreq.vi_wr[2 * n] needs tags for gid TLreq.vi_wr[2 * n + 1]
1939  // we should first order by global id, and then build the new TL with send to proc, global id and
1940  // tags for it
1941  // sort by global ids the tuple lists
1942  moab::TupleList::buffer sort_buffer;
1943  sort_buffer.buffer_init( TLreq.get_n() );
1944  TLreq.sort( 1, &sort_buffer );
1945  sort_buffer.reset();
1946  sort_buffer.buffer_init( TLsend.get_n() );
1947  TLsend.sort( 1, &sort_buffer );
1948  sort_buffer.reset();
1949  // now send the tag values to the proc that requested it
1950  // in theory, for a full partition, TLreq and TLsend should have the same size, and
1951  // each dof should have exactly one target proc. Is that true or not in general ?
1952  // how do we plan to use this? Is it better to store the comm graph for future
1953 
1954  // start copy from comm graph settle
1955  TupleList TLBack;
1956  TLBack.initialize( 3, 0, 0, total_tag_len, 0 ); // to proc, marker, tag from proc , tag values
1957  TLBack.enableWriteAccess();
1958 
1959  int n1 = TLreq.get_n();
1960  int n2 = TLsend.get_n();
1961 
1962  int indexInTLreq = 0;
1963  int indexInTLsend = 0; // advance both, according to the marker
1964  if( n1 > 0 && n2 > 0 )
1965  {
1966 
1967  while( indexInTLreq < n1 && indexInTLsend < n2 ) // if any is over, we are done
1968  {
1969  int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1];
1970  int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1];
1971  if( currentValue1 < currentValue2 )
1972  {
1973  // we have a big problem; basically, we are saying that
1974  // dof currentValue is on one model and not on the other
1975  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
1976  indexInTLreq++;
1977  continue;
1978  }
1979  if( currentValue1 > currentValue2 )
1980  {
1981  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
1982  indexInTLsend++;
1983  continue;
1984  }
1985  int size1 = 1;
1986  int size2 = 1;
1987  while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1988  size1++;
1989  while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1990  size2++;
1991  // must be found in both lists, find the start and end indices
1992  for( int i1 = 0; i1 < size1; i1++ )
1993  {
1994  for( int i2 = 0; i2 < size2; i2++ )
1995  {
1996  // send the info back to components
1997  int n = TLBack.get_n();
1998  TLBack.reserve();
1999  TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )]; // send back to the proc marker
2000  // came from, info from comp2
2001  TLBack.vi_wr[3 * n + 1] = currentValue1; // initial value (resend, just for verif ?)
2002  TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )]; // from proc on comp2
2003  // also fill tag values
2004  for( size_t k = 0; k < total_tag_len; k++ )
2005  {
2006  TLBack.vr_rd[total_tag_len * n + k] =
2007  TLsend.vr_rd[total_tag_len * indexInTLsend + k]; // deep copy of tag values
2008  }
2009  }
2010  }
2011  indexInTLreq += size1;
2012  indexInTLsend += size2;
2013  }
2014  }
2015  ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
2016  // end copy from comm graph
2017  // after we are done sending, we need to set those tag values, in a reverse process compared to send
2018  n1 = TLBack.get_n();
2019  double* ptrVal = &TLBack.vr_rd[0]; //
2020  for( int i = 0; i < n1; i++ )
2021  {
2022  int gid = TLBack.vi_rd[3 * i + 1]; // marker
2023  std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
2024  if( mapIt == eh_by_gid.end() ) continue;
2025  EntityHandle eh = mapIt->second;
2026  // now loop over tags
2027 
2028  for( size_t j = 0; j < tagList.size(); j++ )
2029  {
2030  rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval );
2031  // advance the pointer/index
2032  ptrVal += tagLengths[j]; // at the end of tag data per call
2033  }
2034  }
2035  }
2036 #endif
2037  return MB_SUCCESS;
2038 }
2040  const iMOAB_String tag_storage_names,
2041  int* num_tag_storage_length,
2042  int* ent_type,
2043  double* tag_storage_data )
2044 {
2045  ErrorCode rval;
2046  // exactly the same code, except tag type check
2047  std::string tag_names( tag_storage_names );
2048  // exactly the same code as for int tag :) maybe should check the type of tag too
2049  std::vector< std::string > tagNames;
2050  std::vector< Tag > tagHandles;
2051  std::string separator( ":" );
2052  split_tag_names( tag_names, separator, tagNames );
2053 
2054  appData& data = context.appDatas[*pid];
2055 
2056  // set it on a subset of entities, based on type and length
2057  Range* ents_to_get = NULL;
2058 
2059  if( *ent_type == 0 ) // vertices
2060  {
2061  ents_to_get = &data.all_verts;
2062  }
2063  else if( *ent_type == 1 )
2064  {
2065  ents_to_get = &data.primary_elems;
2066  }
2067  int nents_to_get = (int)ents_to_get->size();
2068  int position = 0;
2069  for( size_t i = 0; i < tagNames.size(); i++ )
2070  {
2071  if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
2072  {
2073  return moab::MB_FAILURE;
2074  } // tag not defined
2075 
2076  Tag tag = data.tagMap[tagNames[i]];
2077 
2078  int tagLength = 0;
2079  rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
2080 
2081  DataType dtype;
2082  rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
2083 
2084  if( dtype != MB_TYPE_DOUBLE )
2085  {
2086  return moab::MB_FAILURE;
2087  }
2088 
2089  if( position + nents_to_get * tagLength > *num_tag_storage_length )
2090  return moab::MB_FAILURE; // too many entity values to get
2091 
2092  rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
2093  position = position + nents_to_get * tagLength;
2094  }
2095 
2096  return moab::MB_SUCCESS; // no error
2097 }
2098 
2099 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
2100 {
2101 #ifdef MOAB_HAVE_MPI
2102  appData& data = context.appDatas[*pid];
2103  Range ent_exchange;
2104  std::vector< Tag > tags;
2105 
2106  for( int i = 0; i < *num_tag; i++ )
2107  {
2108  if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
2109  {
2110  return moab::MB_FAILURE;
2111  } // error in tag index
2112 
2113  tags.push_back( data.tagList[tag_indices[i]] );
2114  }
2115 
2116  if( *ent_type == 0 )
2117  {
2118  ent_exchange = data.all_verts;
2119  }
2120  else if( *ent_type == 1 )
2121  {
2122  ent_exchange = data.primary_elems;
2123  }
2124  else
2125  {
2126  return moab::MB_FAILURE;
2127  } // unexpected type
2128 
2129  ParallelComm* pco = context.pcomms[*pid];
2130 
2131  ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
2132 
2133 #else
2134  /* do nothing if serial */
2135  // just silence the warning
2136  // do not call sync tags in serial!
2137  int k = *pid + *num_tag + *tag_indices + *ent_type;
2138  k++;
2139 #endif
2140 
2141  return moab::MB_SUCCESS;
2142 }
2143 
2144 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
2145 {
2146 #ifdef MOAB_HAVE_MPI
2147  appData& data = context.appDatas[*pid];
2148  Range ent_exchange;
2149 
2150  if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
2151  {
2152  return moab::MB_FAILURE;
2153  } // error in tag index
2154 
2155  Tag tagh = data.tagList[*tag_index];
2156 
2157  if( *ent_type == 0 )
2158  {
2159  ent_exchange = data.all_verts;
2160  }
2161  else if( *ent_type == 1 )
2162  {
2163  ent_exchange = data.primary_elems;
2164  }
2165  else
2166  {
2167  return moab::MB_FAILURE;
2168  } // unexpected type
2169 
2170  ParallelComm* pco = context.pcomms[*pid];
2171  // we could do different MPI_Op; do not bother now, we will call from fortran
2172  ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
2173 
2174 #else
2175  /* do nothing if serial */
2176  // just silence the warning
2177  // do not call sync tags in serial!
2178  int k = *pid + *tag_index + *ent_type;
2179  k++; // just do junk, to avoid complaints
2180 #endif
2181  return moab::MB_SUCCESS;
2182 }
2183 
2185  iMOAB_LocalID* local_index,
2186  int* num_adjacent_elements,
2187  iMOAB_LocalID* adjacent_element_IDs )
2188 {
2189  ErrorCode rval;
2190 
2191  // one neighbor for each subentity of dimension-1
2192  MeshTopoUtil mtu( context.MBI );
2193  appData& data = context.appDatas[*pid];
2194  EntityHandle eh = data.primary_elems[*local_index];
2195  Range adjs;
2196  rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
2197 
2198  if( *num_adjacent_elements < (int)adjs.size() )
2199  {
2200  return moab::MB_FAILURE;
2201  } // not dimensioned correctly
2202 
2203  *num_adjacent_elements = (int)adjs.size();
2204 
2205  for( int i = 0; i < *num_adjacent_elements; i++ )
2206  {
2207  adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
2208  }
2209 
2210  return moab::MB_SUCCESS;
2211 }
2212 
2213 #if 0
2214 
2215 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
2216 {
2217  return moab::MB_SUCCESS;
2218 }
2219 
2220 #endif
2221 
2222 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
2223 {
2224  ErrorCode rval;
2225  appData& data = context.appDatas[*pid];
2226 
2227  if( !data.local_verts.empty() ) // we should have no vertices in the app
2228  {
2229  return moab::MB_FAILURE;
2230  }
2231 
2232  int nverts = *coords_len / *dim;
2233 
2234  rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
2235 
2236  rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
2237 
2238  // also add the vertices to the all_verts range
2239  data.all_verts.merge( data.local_verts );
2240  return moab::MB_SUCCESS;
2241 }
2242 
2244  int* num_elem,
2245  int* type,
2246  int* num_nodes_per_element,
2247  int* connectivity,
2248  int* block_ID )
2249 {
2250  // Create elements
2251  appData& data = context.appDatas[*pid];
2252 
2253  ReadUtilIface* read_iface;
2254  ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
2255 
2256  EntityType mbtype = (EntityType)( *type );
2257  EntityHandle actual_start_handle;
2258  EntityHandle* array = NULL;
2259  rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
2260 
2261  // fill up with actual connectivity from input; assume the vertices are in order, and start
2262  // vertex is the first in the current data vertex range
2263  EntityHandle firstVertex = data.local_verts[0];
2264 
2265  for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2266  {
2267  array[j] = connectivity[j] + firstVertex - 1;
2268  } // assumes connectivity uses 1 based array (from fortran, mostly)
2269 
2270  Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2271 
2272  rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
2273 
2274  data.primary_elems.merge( new_elems );
2275 
2276  // add to adjacency
2277  rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
2278  // organize all new elements in block, with the given block ID; if the block set is not
2279  // existing, create a new mesh set;
2280  Range sets;
2281  int set_no = *block_ID;
2282  const void* setno_ptr = &set_no;
2284  sets );
2285  EntityHandle block_set;
2286 
2287  if( MB_FAILURE == rval || sets.empty() )
2288  {
2289  // create a new set, with this block ID
2290  rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
2291 
2292  rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
2293 
2294  // add the material set to file set
2295  rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
2296  }
2297  else
2298  {
2299  block_set = sets[0];
2300  } // first set is the one we want
2301 
2302  /// add the new ents to the clock set
2303  rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
2304 
2305  return moab::MB_SUCCESS;
2306 }
2307 
2308 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
2309 {
2310  appData& data = context.appDatas[*pid];
2311  data.num_global_vertices = *num_global_verts;
2312  data.num_global_elements = *num_global_elems;
2313  return moab::MB_SUCCESS;
2314 }
2315 
2316 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
2317 {
2318  appData& data = context.appDatas[*pid];
2319  if( NULL != num_global_verts )
2320  {
2321  *num_global_verts = data.num_global_vertices;
2322  }
2323  if( NULL != num_global_elems )
2324  {
2325  *num_global_elems = data.num_global_elements;
2326  }
2327 
2328  return moab::MB_SUCCESS;
2329 }
2330 
2331 #ifdef MOAB_HAVE_MPI
2332 
2333 // this makes sense only for parallel runs
2334 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
2335 {
2336  appData& data = context.appDatas[*pid];
2337  ParallelComm* pco = context.pcomms[*pid];
2338  EntityHandle cset = data.file_set;
2339  int dum_id = 0;
2340  ErrorCode rval;
2341  if( data.primary_elems.empty() )
2342  {
2343  // skip actual resolve, assume vertices are distributed already ,
2344  // no need to share them
2345  }
2346  else
2347  {
2348  // create an integer tag for resolving ; maybe it can be a long tag in the future
2349  // (more than 2 B vertices;)
2350 
2351  Tag stag;
2352  rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
2353  &dum_id );MB_CHK_ERR( rval );
2354 
2355  if( *num_verts > (int)data.local_verts.size() )
2356  {
2357  return moab::MB_FAILURE;
2358  } // we are not setting the size
2359 
2360  rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval ); // assumes integer tag
2361 
2362  rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
2363 
2364  rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
2365  }
2366  // provide partition tag equal to rank
2367  Tag part_tag;
2368  dum_id = -1;
2369  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2370  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2371 
2372  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2373  {
2374  std::cout << " can't get par part tag.\n";
2375  return moab::MB_FAILURE;
2376  }
2377 
2378  int rank = pco->rank();
2379  rval = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
2380 
2381  return moab::MB_SUCCESS;
2382 }
2383 
2384 // this assumes that this was not called before
2385 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
2386 {
2387  ErrorCode rval;
2388 
2389  // verify we have valid ghost layers input specified. If invalid, exit quick.
2390  if( num_ghost_layers && *num_ghost_layers <= 0 )
2391  {
2392  return moab::MB_SUCCESS;
2393  } // nothing to do
2394 
2395  appData& data = context.appDatas[*pid];
2396  ParallelComm* pco = context.pcomms[*pid];
2397 
2398  // maybe we should be passing this too; most of the time we do not need additional ents collective call
2399  constexpr int addl_ents = 0;
2400  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, 1, // get only one layer of ghost entities
2401  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2402  for( int i = 2; i <= *num_ghost_layers; i++ )
2403  {
2404  rval = pco->correct_thin_ghost_layers();MB_CHK_ERR( rval ); // correct for thin layers
2405  rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, i, // iteratively get one extra layer
2406  addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2407  }
2408 
2409  // Update ghost layer information
2410  data.num_ghost_layers = *num_ghost_layers;
2411 
2412  // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
2413  return iMOAB_UpdateMeshInfo( pid );
2414 }
2415 
2416 ErrCode iMOAB_SendMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* receivingGroup, int* rcompid, int* method )
2417 {
2418  assert( join != nullptr );
2419  assert( receivingGroup != nullptr );
2420  assert( rcompid != nullptr );
2421 
2422  ErrorCode rval;
2423  int ierr;
2424  appData& data = context.appDatas[*pid];
2425  ParallelComm* pco = context.pcomms[*pid];
2426 
2427  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2428  MPI_Group recvGroup =
2429  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
2430  MPI_Comm sender = pco->comm(); // the sender comm is obtained from parallel comm in moab; no need to pass it along
2431  // first see what are the processors in each group; get the sender group too, from the sender communicator
2432  MPI_Group senderGroup;
2433  ierr = MPI_Comm_group( sender, &senderGroup );
2434  if( ierr != 0 ) return moab::MB_FAILURE;
2435 
2436  // instantiate the par comm graph
2437  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2438  // int coid2)
2439  ParCommGraph* cgraph =
2440  new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
2441  // we should search if we have another pcomm with the same comp ids in the list already
2442  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2443  context.appDatas[*pid].pgraph[*rcompid] = cgraph;
2444 
2445  int sender_rank = -1;
2446  MPI_Comm_rank( sender, &sender_rank );
2447 
2448  // decide how to distribute elements to each processor
2449  // now, get the entities on local processor, and pack them into a buffer for various processors
2450  // we will do trivial partition: first get the total number of elements from "sender"
2451  std::vector< int > number_elems_per_part;
2452  // how to distribute local elements to receiving tasks?
2453  // trivial partition: compute first the total number of elements need to be sent
2454  Range owned = context.appDatas[*pid].owned_elems;
2455  if( owned.size() == 0 )
2456  {
2457  // must be vertices that we want to send then
2458  owned = context.appDatas[*pid].local_verts;
2459  // we should have some vertices here
2460  }
2461 
2462  if( *method == 0 ) // trivial partitioning, old method
2463  {
2464  int local_owned_elem = (int)owned.size();
2465  int size = pco->size();
2466  int rank = pco->rank();
2467  number_elems_per_part.resize( size ); //
2468  number_elems_per_part[rank] = local_owned_elem;
2469 #if( MPI_VERSION >= 2 )
2470  // Use "in place" option
2471  ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2472 #else
2473  {
2474  std::vector< int > all_tmp( size );
2475  ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2476  number_elems_per_part = all_tmp;
2477  }
2478 #endif
2479 
2480  if( ierr != 0 )
2481  {
2482  return moab::MB_FAILURE;
2483  }
2484 
2485  // every sender computes the trivial partition, it is cheap, and we need to send it anyway
2486  // to each sender
2487  rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
2488 
2489  rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
2490  }
2491  else // *method != 0, so it is either graph or geometric, parallel
2492  {
2493  // owned are the primary elements on this app
2494  rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
2495 
2496  // basically, send the graph to the receiver side, with unblocking send
2497  rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
2498  }
2499  // pco is needed to pack, not for communication
2500  rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
2501 
2502  // mark for deletion
2503  MPI_Group_free( &senderGroup );
2504  return moab::MB_SUCCESS;
2505 }
2506 
2507 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* sendingGroup, int* scompid )
2508 {
2509  assert( join != nullptr );
2510  assert( sendingGroup != nullptr );
2511  assert( scompid != nullptr );
2512 
2513  ErrorCode rval;
2514  appData& data = context.appDatas[*pid];
2515  ParallelComm* pco = context.pcomms[*pid];
2516  MPI_Comm receive = pco->comm();
2517  EntityHandle local_set = data.file_set;
2518 
2519  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2520  MPI_Group sendGroup =
2521  ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
2522 
2523  // first see what are the processors in each group; get the sender group too, from the sender
2524  // communicator
2525  MPI_Group receiverGroup;
2526  int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
2527 
2528  // instantiate the par comm graph
2529  ParCommGraph* cgraph =
2530  new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
2531  // TODO we should search if we have another pcomm with the same comp ids in the list already
2532  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2533  context.appDatas[*pid].pgraph[*scompid] = cgraph;
2534 
2535  int receiver_rank = -1;
2536  MPI_Comm_rank( receive, &receiver_rank );
2537 
2538  // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
2539  // knows what data to expect
2540  std::vector< int > pack_array;
2541  rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
2542 
2543  // senders across for the current receiver
2544  int current_receiver = cgraph->receiver( receiver_rank );
2545 
2546  std::vector< int > senders_local;
2547  size_t n = 0;
2548 
2549  while( n < pack_array.size() )
2550  {
2551  if( current_receiver == pack_array[n] )
2552  {
2553  for( int j = 0; j < pack_array[n + 1]; j++ )
2554  {
2555  senders_local.push_back( pack_array[n + 2 + j] );
2556  }
2557 
2558  break;
2559  }
2560 
2561  n = n + 2 + pack_array[n + 1];
2562  }
2563 
2564 #ifdef VERBOSE
2565  std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
2566  << senders_local.size() << " tasks: ";
2567 
2568  for( int k = 0; k < (int)senders_local.size(); k++ )
2569  {
2570  std::cout << " " << senders_local[k];
2571  }
2572 
2573  std::cout << "\n";
2574 #endif
2575 
2576  if( senders_local.empty() )
2577  {
2578  std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
2579  }
2580  rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
2581 
2582  // after we are done, we could merge vertices that come from different senders, but
2583  // have the same global id
2584  Tag idtag;
2585  rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
2586 
2587  // data.point_cloud = false;
2588  Range local_ents;
2589  rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
2590 
2591  // do not do merge if point cloud
2592  if( !local_ents.all_of_type( MBVERTEX ) )
2593  {
2594  if( (int)senders_local.size() >= 2 ) // need to remove duplicate vertices
2595  // that might come from different senders
2596  {
2597 
2598  Range local_verts = local_ents.subset_by_type( MBVERTEX );
2599  Range local_elems = subtract( local_ents, local_verts );
2600 
2601  // remove from local set the vertices
2602  rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
2603 
2604 #ifdef VERBOSE
2605  std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
2606 #endif
2607  MergeMesh mm( context.MBI );
2608 
2609  rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
2610 
2611  Range new_verts; // local elems are local entities without vertices
2612  rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
2613 
2614 #ifdef VERBOSE
2615  std::cout << "after merging: new verts: " << new_verts.size() << "\n";
2616 #endif
2617  rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
2618  }
2619  }
2620  else
2621  data.point_cloud = true;
2622 
2623  if( !data.point_cloud )
2624  {
2625  // still need to resolve shared entities (in this case, vertices )
2626  rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
2627  }
2628  else
2629  {
2630  // if partition tag exists, set it to current rank; just to make it visible in VisIt
2631  Tag densePartTag;
2632  rval = context.MBI->tag_get_handle( "partition", densePartTag );
2633  if( NULL != densePartTag && MB_SUCCESS == rval )
2634  {
2635  Range local_verts;
2636  rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
2637  std::vector< int > vals;
2638  int rank = pco->rank();
2639  vals.resize( local_verts.size(), rank );
2640  rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
2641  }
2642  }
2643  // set the parallel partition tag
2644  Tag part_tag;
2645  int dum_id = -1;
2646  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2647  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2648 
2649  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2650  {
2651  std::cout << " can't get par part tag.\n";
2652  return moab::MB_FAILURE;
2653  }
2654 
2655  int rank = pco->rank();
2656  rval = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
2657 
2658  // make sure that the GLOBAL_ID is defined as a tag
2659  int tagtype = 0; // dense, integer
2660  int numco = 1; // size
2661  int tagindex; // not used
2662  rval = iMOAB_DefineTagStorage( pid, "GLOBAL_ID", &tagtype, &numco, &tagindex );MB_CHK_ERR( rval );
2663  // populate the mesh with current data info
2664  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
2665 
2666  // mark for deletion
2667  MPI_Group_free( &receiverGroup );
2668 
2669  return moab::MB_SUCCESS;
2670 }
2671 
2672 ErrCode iMOAB_SendElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
2673 {
2674  appData& data = context.appDatas[*pid];
2675  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2676  if( mt == data.pgraph.end() )
2677  {
2678  return moab::MB_FAILURE;
2679  }
2680  ParCommGraph* cgraph = mt->second;
2681  ParallelComm* pco = context.pcomms[*pid];
2682  Range owned = data.owned_elems;
2683  ErrorCode rval;
2684  EntityHandle cover_set;
2685 
2686  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2687  if( data.point_cloud )
2688  {
2689  owned = data.local_verts;
2690  }
2691 
2692  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2693  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2694  // from ints remapper
2695 #ifdef MOAB_HAVE_TEMPESTREMAP
2696  if( data.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2697  {
2698  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2699  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2700  // should still have only quads ?
2701  }
2702 #else
2703  // now, in case we are sending from intx between ocn and atm, we involve coverage set
2704  // how do I know if this receiver already participated in an intersection driven by coupler?
2705  // also, what if this was the "source" mesh in intx?
2706  // in that case, the elements might have been instantiated in the coverage set locally, the
2707  // "owned" range can be different the elements are now in tempestRemap coverage_set
2708  cover_set = cgraph->get_cover_set(); // this will be non null only for intx app ?
2709 
2710  if( 0 != cover_set )
2711  {
2712  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2713  }
2714 #endif
2715 
2716  std::string tag_name( tag_storage_name );
2717 
2718  // basically, we assume everything is defined already on the tag,
2719  // and we can get the tags just by its name
2720  // we assume that there are separators ":" between the tag names
2721  std::vector< std::string > tagNames;
2722  std::vector< Tag > tagHandles;
2723  std::string separator( ":" );
2724  split_tag_names( tag_name, separator, tagNames );
2725  for( size_t i = 0; i < tagNames.size(); i++ )
2726  {
2727  Tag tagHandle;
2728  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2729  if( MB_SUCCESS != rval || NULL == tagHandle )
2730  {
2731  std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
2732  }
2733  tagHandles.push_back( tagHandle );
2734  }
2735 
2736  // pco is needed to pack, and for moab instance, not for communication!
2737  // still use nonblocking communication, over the joint comm
2738  rval = cgraph->send_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
2739  // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
2740 
2741  return moab::MB_SUCCESS;
2742 }
2743 
2744 ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
2745 {
2746  appData& data = context.appDatas[*pid];
2747 
2748  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2749  if( mt == data.pgraph.end() )
2750  {
2751  return moab::MB_FAILURE;
2752  }
2753  ParCommGraph* cgraph = mt->second;
2754 
2755  MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2756  ParallelComm* pco = context.pcomms[*pid];
2757  Range owned = data.owned_elems;
2758 
2759  // how do I know if this receiver already participated in an intersection driven by coupler?
2760  // also, what if this was the "source" mesh in intx?
2761  // in that case, the elements might have been instantiated in the coverage set locally, the
2762  // "owned" range can be different the elements are now in tempestRemap coverage_set
2763  EntityHandle cover_set = cgraph->get_cover_set();
2764  ErrorCode rval;
2765  if( 0 != cover_set )
2766  {
2767  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2768  }
2769  if( data.point_cloud )
2770  {
2771  owned = data.local_verts;
2772  }
2773  // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2774  // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2775  // from ints remapper
2776 #ifdef MOAB_HAVE_TEMPESTREMAP
2777  if( data.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2778  {
2779  cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2780  rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
2781  // should still have only quads ?
2782  }
2783 #endif
2784  /*
2785  * data_intx.remapper exists though only on the intersection application
2786  * how do we get from here ( we know the pid that receives, and the commgraph used by migrate
2787  * mesh )
2788  */
2789 
2790  std::string tag_name( tag_storage_name );
2791 
2792  // we assume that there are separators ";" between the tag names
2793  std::vector< std::string > tagNames;
2794  std::vector< Tag > tagHandles;
2795  std::string separator( ":" );
2796  split_tag_names( tag_name, separator, tagNames );
2797  for( size_t i = 0; i < tagNames.size(); i++ )
2798  {
2799  Tag tagHandle;
2800  rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2801  if( MB_SUCCESS != rval || NULL == tagHandle )
2802  {
2803  std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
2804  }
2805  tagHandles.push_back( tagHandle );
2806  }
2807 
2808 #ifdef VERBOSE
2809  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
2810  << " and file set = " << ( data.file_set ) << "\n";
2811 #endif
2812  // pco is needed to pack, and for moab instance, not for communication!
2813  // still use nonblocking communication
2814  rval = cgraph->receive_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
2815 
2816 #ifdef VERBOSE
2817  std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
2818 #endif
2819 
2820  return moab::MB_SUCCESS;
2821 }
2822 
2824 {
2825  // need first to find the pgraph that holds the information we need
2826  // this will be called on sender side only
2827  appData& data = context.appDatas[*pid];
2828  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2829  if( mt == data.pgraph.end() ) return moab::MB_FAILURE; // error
2830 
2831  mt->second->release_send_buffers();
2832  return moab::MB_SUCCESS;
2833 }
2834 
2835 /**
2836 \brief compute a comm graph between 2 moab apps, based on ID matching
2837 <B>Operations:</B> Collective
2838 */
2839 //#define VERBOSE
2841  iMOAB_AppID pid2,
2842  MPI_Comm* join,
2843  MPI_Group* group1,
2844  MPI_Group* group2,
2845  int* type1,
2846  int* type2,
2847  int* comp1,
2848  int* comp2 )
2849 {
2850  assert( join );
2851  assert( group1 );
2852  assert( group2 );
2853  ErrorCode rval = MB_SUCCESS;
2854  int localRank = 0, numProcs = 1;
2855 
2856  bool isFortran = false;
2857  if( *pid1 >= 0 ) isFortran = isFortran || context.appDatas[*pid1].is_fortran;
2858  if( *pid2 >= 0 ) isFortran = isFortran || context.appDatas[*pid2].is_fortran;
2859 
2860  MPI_Comm global = ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
2861  MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
2862  MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
2863 
2864  MPI_Comm_rank( global, &localRank );
2865  MPI_Comm_size( global, &numProcs );
2866  // instantiate the par comm graph
2867 
2868  // we should search if we have another pcomm with the same comp ids in the list already
2869  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2870  bool already_exists = false;
2871  if( *pid1 >= 0 )
2872  {
2873  appData& data = context.appDatas[*pid1];
2874  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp2 );
2875  if( mt != data.pgraph.end() ) already_exists = true;
2876  }
2877  if( *pid2 >= 0 )
2878  {
2879  appData& data = context.appDatas[*pid2];
2880  std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp1 );
2881  if( mt != data.pgraph.end() ) already_exists = true;
2882  }
2883  // nothing to do if it already exists
2884  if( already_exists )
2885  {
2886 #ifdef VERBOSE
2887  if( !localRank )
2888  std::cout << " parcomgraph already existing between components " << *comp1 << " and " << *comp2
2889  << ". Do not compute again\n";
2890 #endif
2891  return moab::MB_SUCCESS;
2892  }
2893 
2894  // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2895  // int coid2)
2896  ParCommGraph* cgraph = NULL;
2897  if( *pid1 >= 0 ) cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2898  ParCommGraph* cgraph_rev = NULL;
2899  if( *pid2 >= 0 ) cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2900 
2901  if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
2902  if( *pid2 >= 0 ) context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev; // from 2 to 1
2903  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
2904  // model on the joint comm
2905  TupleList TLcomp1;
2906  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2907  TupleList TLcomp2;
2908  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2909  // will push_back a new tuple, if needed
2910 
2911  TLcomp1.enableWriteAccess();
2912 
2913  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
2914  Tag gdsTag;
2915  // find the values on first cell
2916  int lenTagType1 = 1;
2917  if( 1 == *type1 || 1 == *type2 )
2918  {
2919  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
2920  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
2921  }
2922  Tag tagType2 = context.MBI->globalId_tag();
2923 
2924  std::vector< int > valuesComp1;
2925  // populate first tuple
2926  if( *pid1 >= 0 )
2927  {
2928  appData& data1 = context.appDatas[*pid1];
2929  EntityHandle fset1 = data1.file_set;
2930  // in case of tempest remap, get the coverage set
2931 #ifdef MOAB_HAVE_TEMPESTREMAP
2932  if( data1.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2933  {
2934  fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2935  // should still have only quads ?
2936  }
2937 #endif
2938  Range ents_of_interest;
2939  if( *type1 == 1 )
2940  {
2941  assert( gdsTag );
2942  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2943  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
2944  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
2945  }
2946  else if( *type1 == 2 )
2947  {
2948  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2949  valuesComp1.resize( ents_of_interest.size() );
2950  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2951  }
2952  else if( *type1 == 3 ) // for FV meshes, just get the global id of cell
2953  {
2954  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
2955  valuesComp1.resize( ents_of_interest.size() );
2956  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
2957  }
2958  else
2959  {
2960  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
2961  }
2962  // now fill the tuple list with info and markers
2963  // because we will send only the ids, order and compress the list
2964  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2965  TLcomp1.resize( uniq.size() );
2966  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2967  {
2968  // to proc, marker, element local index, index in el
2969  int marker = *sit;
2970  int to_proc = marker % numProcs;
2971  int n = TLcomp1.get_n();
2972  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
2973  TLcomp1.vi_wr[2 * n + 1] = marker;
2974  TLcomp1.inc_n();
2975  }
2976  }
2977 
2978  ProcConfig pc( global ); // proc config does the crystal router
2979  pc.crystal_router()->gs_transfer( 1, TLcomp1,
2980  0 ); // communication towards joint tasks, with markers
2981  // sort by value (key 1)
2982 #ifdef VERBOSE
2983  std::stringstream ff1;
2984  ff1 << "TLcomp1_" << localRank << ".txt";
2985  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2986 #endif
2987  moab::TupleList::buffer sort_buffer;
2988  sort_buffer.buffer_init( TLcomp1.get_n() );
2989  TLcomp1.sort( 1, &sort_buffer );
2990  sort_buffer.reset();
2991 #ifdef VERBOSE
2992  // after sorting
2993  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2994 #endif
2995  // do the same, for the other component, number2, with type2
2996  // start copy
2997  TLcomp2.enableWriteAccess();
2998  // populate second tuple
2999  std::vector< int > valuesComp2;
3000  if( *pid2 >= 0 )
3001  {
3002  appData& data2 = context.appDatas[*pid2];
3003  EntityHandle fset2 = data2.file_set;
3004  // in case of tempest remap, get the coverage set
3005 #ifdef MOAB_HAVE_TEMPESTREMAP
3006  if( data2.tempestData.remapper != NULL ) // this is the case this is part of intx;;
3007  {
3008  fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3009  // should still have only quads ?
3010  }
3011 #endif
3012 
3013  Range ents_of_interest;
3014  if( *type2 == 1 )
3015  {
3016  assert( gdsTag );
3017  rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3018  valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
3019  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
3020  }
3021  else if( *type2 == 2 )
3022  {
3023  rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3024  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
3025  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
3026  }
3027  else if( *type2 == 3 )
3028  {
3029  rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
3030  valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
3031  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval ); // just global ids
3032  }
3033  else
3034  {
3035  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2
3036  }
3037  // now fill the tuple list with info and markers
3038  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3039  TLcomp2.resize( uniq.size() );
3040  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3041  {
3042  // to proc, marker, element local index, index in el
3043  int marker = *sit;
3044  int to_proc = marker % numProcs;
3045  int n = TLcomp2.get_n();
3046  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
3047  TLcomp2.vi_wr[2 * n + 1] = marker;
3048  TLcomp2.inc_n();
3049  }
3050  }
3051  pc.crystal_router()->gs_transfer( 1, TLcomp2,
3052  0 ); // communication towards joint tasks, with markers
3053  // sort by value (key 1)
3054 #ifdef VERBOSE
3055  std::stringstream ff2;
3056  ff2 << "TLcomp2_" << localRank << ".txt";
3057  TLcomp2.print_to_file( ff2.str().c_str() );
3058 #endif
3059  sort_buffer.buffer_reserve( TLcomp2.get_n() );
3060  TLcomp2.sort( 1, &sort_buffer );
3061  sort_buffer.reset();
3062  // end copy
3063 #ifdef VERBOSE
3064  TLcomp2.print_to_file( ff2.str().c_str() );
3065 #endif
3066  // need to send back the info, from the rendezvous point, for each of the values
3067  /* so go over each value, on local process in joint communicator
3068 
3069  now have to send back the info needed for communication;
3070  loop in in sync over both TLComp1 and TLComp2, in local process;
3071  So, build new tuple lists, to send synchronous communication
3072  populate them at the same time, based on marker, that is indexed
3073  */
3074 
3075  TupleList TLBackToComp1;
3076  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
3077  TLBackToComp1.enableWriteAccess();
3078 
3079  TupleList TLBackToComp2;
3080  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
3081  TLBackToComp2.enableWriteAccess();
3082 
3083  int n1 = TLcomp1.get_n();
3084  int n2 = TLcomp2.get_n();
3085 
3086  int indexInTLComp1 = 0;
3087  int indexInTLComp2 = 0; // advance both, according to the marker
3088  if( n1 > 0 && n2 > 0 )
3089  {
3090 
3091  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
3092  {
3093  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3094  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3095  if( currentValue1 < currentValue2 )
3096  {
3097  // we have a big problem; basically, we are saying that
3098  // dof currentValue is on one model and not on the other
3099  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
3100  indexInTLComp1++;
3101  continue;
3102  }
3103  if( currentValue1 > currentValue2 )
3104  {
3105  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
3106  indexInTLComp2++;
3107  continue;
3108  }
3109  int size1 = 1;
3110  int size2 = 1;
3111  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3112  size1++;
3113  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3114  size2++;
3115  // must be found in both lists, find the start and end indices
3116  for( int i1 = 0; i1 < size1; i1++ )
3117  {
3118  for( int i2 = 0; i2 < size2; i2++ )
3119  {
3120  // send the info back to components
3121  int n = TLBackToComp1.get_n();
3122  TLBackToComp1.reserve();
3123  TLBackToComp1.vi_wr[3 * n] =
3124  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
3125  // came from, info from comp2
3126  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3127  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
3128  n = TLBackToComp2.get_n();
3129  TLBackToComp2.reserve();
3130  TLBackToComp2.vi_wr[3 * n] =
3131  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
3132  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3133  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
3134  // what if there are repeated markers in TLcomp2? increase just index2
3135  }
3136  }
3137  indexInTLComp1 += size1;
3138  indexInTLComp2 += size2;
3139  }
3140  }
3141  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
3142  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3143 
3144  if( *pid1 >= 0 )
3145  {
3146  // we are on original comp 1 tasks
3147  // before ordering
3148  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3149  // processors it communicates with
3150 #ifdef VERBOSE
3151  std::stringstream f1;
3152  f1 << "TLBack1_" << localRank << ".txt";
3153  TLBackToComp1.print_to_file( f1.str().c_str() );
3154 #endif
3155  sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
3156  TLBackToComp1.sort( 1, &sort_buffer );
3157  sort_buffer.reset();
3158 #ifdef VERBOSE
3159  TLBackToComp1.print_to_file( f1.str().c_str() );
3160 #endif
3161  // so we are now on pid1, we know now each marker were it has to go
3162  // add a new method to ParCommGraph, to set up the involved_IDs_map
3163  cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
3164  }
3165  if( *pid2 >= 0 )
3166  {
3167  // we are on original comp 1 tasks
3168  // before ordering
3169  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3170  // processors it communicates with
3171 #ifdef VERBOSE
3172  std::stringstream f2;
3173  f2 << "TLBack2_" << localRank << ".txt";
3174  TLBackToComp2.print_to_file( f2.str().c_str() );
3175 #endif
3176  sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
3177  TLBackToComp2.sort( 2, &sort_buffer );
3178  sort_buffer.reset();
3179 #ifdef VERBOSE
3180  TLBackToComp2.print_to_file( f2.str().c_str() );
3181 #endif
3182  cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
3183  //
3184  }
3185  return moab::MB_SUCCESS;
3186 }
3187 
3188 //#undef VERBOSE
3189 
3191 {
3192  appData& data = context.appDatas[*pid];
3193  ParallelComm* pco = context.pcomms[*pid];
3194  // collapse vertices and transform cells into triangles/quads /polys
3195  // tags we care about: area, frac, global id
3196  std::vector< Tag > tagsList;
3197  Tag tag;
3198  ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
3199  if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE; // fatal error, abort
3200  tagsList.push_back( tag );
3201  rval = context.MBI->tag_get_handle( "area", tag );
3202  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3203  rval = context.MBI->tag_get_handle( "frac", tag );
3204  if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3205  double tol = 1.0e-9;
3206  rval = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
3207 
3208  // clean material sets of cells that were deleted
3210  data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
3211 
3212  if( !data.mat_sets.empty() )
3213  {
3214  EntityHandle matSet = data.mat_sets[0];
3215  Range elems;
3216  rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
3217  rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
3218  // put back only cells from data.file_set
3219  elems.clear();
3220  rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
3221  rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
3222  }
3223  rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
3224 
3225  ParallelMergeMesh pmm( pco, tol );
3226  rval = pmm.merge( data.file_set,
3227  /* do not do local merge*/ false,
3228  /* 2d cells*/ 2 );MB_CHK_ERR( rval );
3229 
3230  // assign global ids only for vertices, cells have them fine
3231  rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
3232 
3233  // set the partition tag on the file set
3234  Tag part_tag;
3235  int dum_id = -1;
3236  rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
3237  MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
3238 
3239  if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
3240  {
3241  std::cout << " can't get par part tag.\n";
3242  return moab::MB_FAILURE;
3243  }
3244 
3245  int rank = pco->rank();
3246  rval = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
3247 
3248  return moab::MB_SUCCESS;
3249 }
3250 
3251 #ifdef MOAB_HAVE_TEMPESTREMAP
3252 
3253 // this call must be collective on the joint communicator
3254 // intersection tasks on coupler will need to send to the components tasks the list of
3255 // id elements that are relevant: they intersected some of the target elements (which are not needed
3256 // here)
3257 // in the intersection
3258 ErrCode iMOAB_CoverageGraph( MPI_Comm* join,
3259  iMOAB_AppID pid_src,
3260  iMOAB_AppID pid_migr,
3261  iMOAB_AppID pid_intx,
3262  int* src_id,
3263  int* migr_id,
3264  int* context_id )
3265 {
3266  // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this
3267  // exchange
3268  ErrorCode rval;
3269  std::vector< int > srcSenders;
3270  std::vector< int > receivers;
3271  ParCommGraph* sendGraph = NULL;
3272  int ierr;
3273  int default_context_id = -1;
3274  bool is_fortran_context = false;
3275 
3276  // First, find the original graph between PE sets
3277  // And based on this one, we will build the newly modified one for coverage
3278  if( *pid_src >= 0 )
3279  {
3280  default_context_id = *migr_id; // the other one
3281  assert( context.appDatas[*pid_src].global_id == *src_id );
3282  is_fortran_context = context.appDatas[*pid_src].is_fortran || is_fortran_context;
3283  sendGraph = context.appDatas[*pid_src].pgraph[default_context_id]; // maybe check if it does not exist
3284 
3285  // report the sender and receiver tasks in the joint comm
3286  srcSenders = sendGraph->senders();
3287  receivers = sendGraph->receivers();
3288 #ifdef VERBOSE
3289  std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
3290 #endif
3291  }
3292 
3293  ParCommGraph* recvGraph = NULL; // will be non null on receiver tasks (intx tasks)
3294  if( *pid_migr >= 0 )
3295  {
3296  is_fortran_context = context.appDatas[*pid_migr].is_fortran || is_fortran_context;
3297  // find the original one
3298  default_context_id = *src_id;
3299  assert( context.appDatas[*pid_migr].global_id == *migr_id );
3300  recvGraph = context.appDatas[*pid_migr].pgraph[default_context_id];
3301  // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
3302  srcSenders = recvGraph->senders();
3303  receivers = recvGraph->receivers();
3304 #ifdef VERBOSE
3305  std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
3306 #endif
3307  }
3308 
3309  if( *pid_intx >= 0 ) is_fortran_context = context.appDatas[*pid_intx].is_fortran || is_fortran_context;
3310 
3311  // loop over pid_intx elements, to see what original processors in joint comm have sent the
3312  // coverage mesh;
3313  // If we are on intx tasks, send coverage info towards original component tasks,
3314  // about needed cells
3315  TupleList TLcovIDs;
3316  TLcovIDs.initialize( 2, 0, 0, 0, 0 ); // to proc, GLOBAL ID; estimate about 100 IDs to be sent
3317  // will push_back a new tuple, if needed
3318  TLcovIDs.enableWriteAccess();
3319  // the crystal router will send ID cell to the original source, on the component task
3320  // if we are on intx tasks, loop over all intx elements and
3321 
3322  MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
3323  int currentRankInJointComm = -1;
3324  ierr = MPI_Comm_rank( global, &currentRankInJointComm );CHK_MPI_ERR( ierr );
3325 
3326  // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we
3327  // need to send information towards component tasks
3328  if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
3329  receivers.end() ) // we are on receivers tasks, we can request intx info
3330  {
3331  // find the pcomm for the intx pid
3332  if( *pid_intx >= (int)context.appDatas.size() ) return moab::MB_FAILURE;
3333 
3334  appData& dataIntx = context.appDatas[*pid_intx];
3335  Tag parentTag, orgSendProcTag;
3336 
3337  rval = context.MBI->tag_get_handle( "SourceParent", parentTag );MB_CHK_ERR( rval ); // global id of the blue, source element
3338  if( !parentTag ) return moab::MB_FAILURE; // fatal error, abort
3339 
3340  rval = context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag );MB_CHK_ERR( rval );
3341  if( !orgSendProcTag ) return moab::MB_FAILURE; // fatal error, abort
3342 
3343  // find the file set, red parents for intx cells, and put them in tuples
3344  EntityHandle intxSet = dataIntx.file_set;
3345  Range cells;
3346  // get all entities from the set, and look at their RedParent
3347  rval = context.MBI->get_entities_by_dimension( intxSet, 2, cells );MB_CHK_ERR( rval );
3348 
3349  std::map< int, std::set< int > > idsFromProcs; // send that info back to enhance parCommGraph cache
3350  for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
3351  {
3352  EntityHandle intx_cell = *it;
3353  int gidCell, origProc; // look at receivers
3354 
3355  rval = context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell );MB_CHK_ERR( rval );
3356  rval = context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc );MB_CHK_ERR( rval );
3357  // we have augmented the overlap set with ghost cells ; in that case, the
3358  // orig_sending_processor is not set so it will be -1;
3359  if( origProc < 0 ) continue;
3360 
3361  std::set< int >& setInts = idsFromProcs[origProc];
3362  setInts.insert( gidCell );
3363  }
3364 
3365  // if we have no intx cells, it means we are on point clouds; quick fix just use all cells
3366  // from coverage set
3367  // if( cells.empty() )
3368  {
3369  cells.clear();
3370  // get coverage set
3371  assert( *pid_intx >= 0 );
3372  appData& dataIntx = context.appDatas[*pid_intx];
3373  EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3374 
3375  // get all cells from coverage set
3376  Tag gidTag;
3377  rval = context.MBI->tag_get_handle( "GLOBAL_ID", gidTag );MB_CHK_ERR( rval );
3378  rval = context.MBI->get_entities_by_dimension( cover_set, 2, cells );MB_CHK_ERR( rval );
3379  // look at their orig_sending_processor
3380  for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
3381  {
3382  EntityHandle covCell = *it;
3383  int gidCell, origProc; // look at o
3384 
3385  rval = context.MBI->tag_get_data( gidTag, &covCell, 1, &gidCell );MB_CHK_ERR( rval );
3386  rval = context.MBI->tag_get_data( orgSendProcTag, &covCell, 1, &origProc );MB_CHK_ERR( rval );
3387  // we have augmented the overlap set with ghost cells ; in that case, the
3388  // orig_sending_processor is not set so it will be -1;
3389  if( origProc < 0 ) // it cannot < 0, I think
3390  continue;
3391  std::set< int >& setInts = idsFromProcs[origProc];
3392  setInts.insert( gidCell );
3393  }
3394  }
3395 
3396 #ifdef VERBOSE
3397  std::ofstream dbfile;
3398  std::stringstream outf;
3399  outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
3400  dbfile.open( outf.str().c_str() );
3401  dbfile << "Writing this to a file.\n";
3402 
3403  dbfile << " map size:" << idsFromProcs.size()
3404  << std::endl; // on the receiver side, these show how much data to receive
3405  // from the sender (how many ids, and how much tag data later; we need to size up the
3406  // receiver buffer) arrange in tuples , use map iterators to send the ids
3407  for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
3408  {
3409  std::set< int >& setIds = mt->second;
3410  dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
3411  int counter = 0;
3412  for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
3413  {
3414  int valueID = *st;
3415  dbfile << " " << valueID;
3416  counter++;
3417  if( counter % 10 == 0 ) dbfile << "\n";
3418  }
3419  dbfile << "\n";
3420  }
3421  dbfile.close();
3422 #endif
3423  if( NULL != recvGraph )
3424  {
3425  ParCommGraph* recvGraph1 = new ParCommGraph( *recvGraph ); // just copy
3426  recvGraph1->set_context_id( *context_id );
3427  recvGraph1->SetReceivingAfterCoverage( idsFromProcs );
3428  // this par comm graph will need to use the coverage set
3429  // so we are for sure on intx pes (the receiver is the coupler mesh)
3430  assert( *pid_intx >= 0 );
3431  appData& dataIntx = context.appDatas[*pid_intx];
3432  EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3433  recvGraph1->set_cover_set( cover_set );
3434  context.appDatas[*pid_migr].pgraph[*context_id] = recvGraph1; // possible memory leak if context_id is same
3435  }
3436  for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
3437  {
3438  int procToSendTo = mit->first;
3439  std::set< int >& idSet = mit->second;
3440  for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
3441  {
3442  int n = TLcovIDs.get_n();
3443  TLcovIDs.reserve();
3444  TLcovIDs.vi_wr[2 * n] = procToSendTo; // send to processor
3445  TLcovIDs.vi_wr[2 * n + 1] = *sit; // global id needs index in the local_verts range
3446  }
3447  }
3448  }
3449 
3450  ProcConfig pc( global ); // proc config does the crystal router
3451  pc.crystal_router()->gs_transfer( 1, TLcovIDs,
3452  0 ); // communication towards component tasks, with what ids are needed
3453  // for each task from receiver
3454 
3455  // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
3456  if( NULL != sendGraph )
3457  {
3458  // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each
3459  // receiver task
3460  ParCommGraph* sendGraph1 = new ParCommGraph( *sendGraph ); // just copy
3461  sendGraph1->set_context_id( *context_id );
3462  context.appDatas[*pid_src].pgraph[*context_id] = sendGraph1;
3463  rval = sendGraph1->settle_send_graph( TLcovIDs );MB_CHK_ERR( rval );
3464  }
3465  return moab::MB_SUCCESS; // success
3466 }
3467 
3468 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix )
3469 {
3470  assert( prefix && strlen( prefix ) );
3471 
3472  ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
3473  std::string prefix_str( prefix );
3474 
3475  if( NULL != cgraph )
3476  cgraph->dump_comm_information( prefix_str, *is_sender );
3477  else
3478  {
3479  std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
3480  << " context: " << *context_id << "\n";
3481  }
3482  return moab::MB_SUCCESS;
3483 }
3484 
3485 #endif // #ifdef MOAB_HAVE_TEMPESTREMAP
3486 
3487 #endif // #ifdef MOAB_HAVE_MPI
3488 
3489 #ifdef MOAB_HAVE_TEMPESTREMAP
3490 
3491 #ifdef MOAB_HAVE_NETCDF
3492 ErrCode iMOAB_LoadMappingWeightsFromFile(
3493  iMOAB_AppID pid_intersection,
3494  iMOAB_AppID pid_cpl,
3495  int* col_or_row,
3496  int* type,
3497  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
3498  const iMOAB_String remap_weights_filename )
3499 {
3500  ErrorCode rval;
3501  bool row_based_partition = true;
3502  if( *col_or_row == 1 ) row_based_partition = false; // do a column based partition;
3503 
3504  // get the local degrees of freedom, from the pid_cpl and type of mesh
3505 
3506  // Get the source and target data and pcomm objects
3507  appData& data_intx = context.appDatas[*pid_intersection];
3508  TempestMapAppData& tdata = data_intx.tempestData;
3509 
3510  // Get the handle to the remapper object
3511  if( tdata.remapper == NULL )
3512  {
3513  // Now allocate and initialize the remapper object
3514 #ifdef MOAB_HAVE_MPI
3515  ParallelComm* pco = context.pcomms[*pid_intersection];
3516  tdata.remapper = new moab::TempestRemapper( context.MBI, pco );
3517 #else
3518  tdata.remapper = new moab::TempestRemapper( context.MBI );
3519 #endif
3520  tdata.remapper->meshValidate = true;
3521  tdata.remapper->constructEdgeMap = true;
3522  // Do not create new filesets; Use the sets from our respective applications
3523  tdata.remapper->initialize( false );
3524  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
3525  }
3526 
3527  // Setup loading of weights onto TempestOnlineMap
3528  // Set the context for the remapping weights computation
3529  tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
3530 
3531  // Now allocate and initialize the remapper object
3532  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3533  assert( weightMap != NULL );
3534 
3535  if( *pid_cpl >= 0 ) // it means we are looking for how to distribute the degrees of freedom, new map reader
3536  {
3537  appData& data1 = context.appDatas[*pid_cpl];
3538  EntityHandle fset1 = data1.file_set; // this is source or target, depending on direction
3539 
3540  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
3541  Tag gdsTag;
3542 
3543  // find the values on first cell
3544  int lenTagType1 = 1;
3545  if( *type == 1 )
3546  {
3547  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
3548  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
3549  }
3550  Tag tagType2 = context.MBI->globalId_tag();
3551 
3552  std::vector< int > dofValues;
3553 
3554  // populate first tuple
3555  Range
3556  ents_of_interest; // will be filled with entities on coupler, from which we will get the DOFs, based on type
3557  int ndofPerEl = 1;
3558 
3559  if( *type == 1 )
3560  {
3561  assert( gdsTag );
3562  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3563  dofValues.resize( ents_of_interest.size() * lenTagType1 );
3564  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );
3565  ndofPerEl = lenTagType1;
3566  }
3567  else if( *type == 2 )
3568  {
3569  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3570  dofValues.resize( ents_of_interest.size() );
3571  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval ); // just global ids
3572  }
3573  else if( *type == 3 ) // for FV meshes, just get the global id of cell
3574  {
3575  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
3576  dofValues.resize( ents_of_interest.size() );
3577  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval ); // just global ids
3578  }
3579  else
3580  {
3581  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
3582  }
3583  // pass ordered dofs, and unique
3584  std::vector< int > orderDofs( dofValues.begin(), dofValues.end() );
3585 
3586  std::sort( orderDofs.begin(), orderDofs.end() );
3587  orderDofs.erase( std::unique( orderDofs.begin(), orderDofs.end() ), orderDofs.end() ); // remove duplicates
3588 
3589  rval = weightMap->ReadParallelMap( remap_weights_filename, orderDofs, row_based_partition );MB_CHK_ERR( rval );
3590 
3591  // if we are on target mesh (row based partition)
3592  if( row_based_partition )
3593  {
3594  tdata.pid_dest = pid_cpl;
3595  tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset1, &ents_of_interest );
3596  weightMap->SetDestinationNDofsPerElement( ndofPerEl );
3597  weightMap->set_row_dc_dofs( dofValues ); // will set row_dtoc_dofmap
3598  }
3599  else
3600  {
3601  tdata.pid_src = pid_cpl;
3602  tdata.remapper->SetMeshSet( Remapper::SourceMesh, fset1, &ents_of_interest );
3603  weightMap->SetSourceNDofsPerElement( ndofPerEl );
3604  weightMap->set_col_dc_dofs( dofValues ); // will set col_dtoc_dofmap
3605  }
3606  }
3607  else // old reader, trivial distribution by row
3608  {
3609  std::vector< int > tmp_owned_ids; // this will do a trivial row distribution
3610  rval = weightMap->ReadParallelMap( remap_weights_filename, tmp_owned_ids, row_based_partition );MB_CHK_ERR( rval );
3611  }
3612 
3613  return moab::MB_SUCCESS;
3614 }
3615 
3616 ErrCode iMOAB_WriteMappingWeightsToFile(
3617  iMOAB_AppID pid_intersection,
3618  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
3619  const iMOAB_String remap_weights_filename )
3620 {
3621  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
3622  assert( remap_weights_filename && strlen( remap_weights_filename ) );
3623 
3624  ErrorCode rval;
3625 
3626  // Get the source and target data and pcomm objects
3627  appData& data_intx = context.appDatas[*pid_intersection];
3628  TempestMapAppData& tdata = data_intx.tempestData;
3629 
3630  // Get the handle to the remapper object
3631  assert( tdata.remapper != NULL );
3632 
3633  // Now get online weights object and ensure it is valid
3634  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3635  assert( weightMap != NULL );
3636 
3637  std::string filename = std::string( remap_weights_filename );
3638 
3639  std::string metadataStr = data_intx.metadataMap[std::string( solution_weights_identifier )];
3640  std::map< std::string, std::string > attrMap;
3641  attrMap["title"] = "MOAB-TempestRemap Online Regridding Weight Generator";
3642  attrMap["normalization"] = "ovarea";
3643  attrMap["map_aPb"] = filename;
3644 
3645  const std::string delim = ";";
3646  size_t pos = 0, index = 0;
3647  std::vector< std::string > stringAttr( 3 );
3648  // use find() function to get the position of the delimiters
3649  while( ( pos = metadataStr.find( delim ) ) != std::string::npos )
3650  {
3651  std::string token1 = metadataStr.substr( 0, pos ); // store the substring
3652  if( token1.size() > 0 || index == 0 ) stringAttr[index++] = token1;
3653  metadataStr.erase(
3654  0, pos + delim.length() ); /* erase() function store the current positon and move to next token. */
3655  }
3656  stringAttr[index] = metadataStr; // store the last token of the string.
3657  assert( index == 2 );
3658  attrMap["remap_options"] = stringAttr[0];
3659  attrMap["methodorder_b"] = stringAttr[1];
3660  attrMap["methodorder_a"] = stringAttr[2];
3661  attrMap["concave_a"] = "false"; // defaults
3662  attrMap["concave_b"] = "false"; // defaults
3663  attrMap["bubble"] = "true"; // defaults
3664  attrMap["MOABversion"] = std::string( MOAB_VERSION );
3665 
3666  // Write the map file to disk in parallel using either HDF5 or SCRIP interface
3667  rval = weightMap->WriteParallelMap( filename, attrMap );MB_CHK_ERR( rval );
3668 
3669  return moab::MB_SUCCESS;
3670 }
3671 
3672 #ifdef MOAB_HAVE_MPI
3673 ErrCode iMOAB_MigrateMapMesh( iMOAB_AppID pid1,
3674  iMOAB_AppID pid2,
3675  iMOAB_AppID pid3,
3676  MPI_Comm* jointcomm,
3677  MPI_Group* groupA,
3678  MPI_Group* groupB,
3679  int* type,
3680  int* comp1,
3681  int* comp2,
3682  int* direction )
3683 {
3684  assert( jointcomm );
3685  assert( groupA );
3686  assert( groupB );
3687  bool is_fortran = false;
3688  if( *pid1 >= 0 ) is_fortran = context.appDatas[*pid1].is_fortran || is_fortran;
3689  if( *pid2 >= 0 ) is_fortran = context.appDatas[*pid2].is_fortran || is_fortran;
3690  if( *pid3 >= 0 ) is_fortran = context.appDatas[*pid3].is_fortran || is_fortran;
3691 
3692  MPI_Comm joint_communicator =
3693  ( is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( jointcomm ) ) : *jointcomm );
3694  MPI_Group group_first = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupA ) ) : *groupA );
3695  MPI_Group group_second = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupB ) ) : *groupB );
3696 
3697  ErrorCode rval = MB_SUCCESS;
3698  int localRank = 0, numProcs = 1;
3699 
3700  // Get the local rank and number of processes in the joint communicator
3701  MPI_Comm_rank( joint_communicator, &localRank );
3702  MPI_Comm_size( joint_communicator, &numProcs );
3703 
3704  // Next instantiate the par comm graph
3705  // here we need to look at direction
3706  // this direction is good for atm source -> ocn target coupler example
3707  ParCommGraph* cgraph = NULL;
3708  if( *pid1 >= 0 ) cgraph = new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 );
3709 
3710  ParCommGraph* cgraph_rev = NULL;
3711  if( *pid3 >= 0 ) cgraph_rev = new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 );
3712 
3713  // we should search if we have another pcomm with the same comp ids in the list already
3714  // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
3715  if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
3716  if( *pid3 >= 0 ) context.appDatas[*pid3].pgraph[*comp1] = cgraph_rev; // from 2 to 1
3717 
3718  // each model has a list of global ids that will need to be sent by gs to rendezvous the other
3719  // model on the joint_communicator
3720  TupleList TLcomp1;
3721  TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
3722  TupleList TLcomp2;
3723  TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
3724 
3725  // will push_back a new tuple, if needed
3726  TLcomp1.enableWriteAccess();
3727 
3728  // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
3729  Tag gdsTag;
3730 
3731  // find the values on first cell
3732  int lenTagType1 = 1;
3733  if( *type == 1 )
3734  {
3735  rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
3736  rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval ); // usually it is 16
3737  }
3738  Tag tagType2 = context.MBI->globalId_tag();
3739 
3740  std::vector< int > valuesComp1;
3741 
3742  // populate first tuple
3743  Range ents_of_interest; // will be filled with entities on pid1, that need to be distributed,
3744  // rearranged in split_ranges map
3745  if( *pid1 >= 0 )
3746  {
3747  appData& data1 = context.appDatas[*pid1];
3748  EntityHandle fset1 = data1.file_set;
3749 
3750  if( *type == 1 )
3751  {
3752  assert( gdsTag );
3753  rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3754  valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
3755  rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
3756  }
3757  else if( *type == 2 )
3758  {
3759  rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3760  valuesComp1.resize( ents_of_interest.size() );
3761  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
3762  }
3763  else if( *type == 3 ) // for FV meshes, just get the global id of cell
3764  {
3765  rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
3766  valuesComp1.resize( ents_of_interest.size() );
3767  rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval ); // just global ids
3768  }
3769  else
3770  {
3771  MB_CHK_ERR( MB_FAILURE ); // we know only type 1 or 2 or 3
3772  }
3773  // now fill the tuple list with info and markers
3774  // because we will send only the ids, order and compress the list
3775  std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
3776  TLcomp1.resize( uniq.size() );
3777  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3778  {
3779  // to proc, marker, element local index, index in el
3780  int marker = *sit;
3781  int to_proc = marker % numProcs;
3782  int n = TLcomp1.get_n();
3783  TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
3784  TLcomp1.vi_wr[2 * n + 1] = marker;
3785  TLcomp1.inc_n();
3786  }
3787  }
3788 
3789  ProcConfig pc( joint_communicator ); // proc config does the crystal router
3790  pc.crystal_router()->gs_transfer( 1, TLcomp1,
3791  0 ); // communication towards joint tasks, with markers
3792  // sort by value (key 1)
3793 #ifdef VERBOSE
3794  std::stringstream ff1;
3795  ff1 << "TLcomp1_" << localRank << ".txt";
3796  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
3797 #endif
3798  moab::TupleList::buffer sort_buffer;
3799  sort_buffer.buffer_init( TLcomp1.get_n() );
3800  TLcomp1.sort( 1, &sort_buffer );
3801  sort_buffer.reset();
3802 #ifdef VERBOSE
3803  // after sorting
3804  TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
3805 #endif
3806  // do the same, for the other component, number2, with type2
3807  // start copy
3808  TLcomp2.enableWriteAccess();
3809 
3810  moab::TempestOnlineMap* weightMap = NULL; // declare it outside, but it will make sense only for *pid >= 0
3811  // we know that :) (or *pid2 >= 0, it means we are on the coupler PEs, read map exists, and coupler procs exist)
3812  // populate second tuple with ids from read map: we need row_gdofmap and col_gdofmap
3813  std::vector< int > valuesComp2;
3814  if( *pid2 >= 0 ) // we are now on coupler, map side
3815  {
3816  appData& data2 = context.appDatas[*pid2];
3817  TempestMapAppData& tdata = data2.tempestData;
3818  // should be only one map, read from file
3819  assert( tdata.weightMaps.size() == 1 );
3820  // maybe we need to check it is the map we expect
3821  weightMap = tdata.weightMaps.begin()->second;
3822  // std::vector<int> ids_of_interest;
3823  // do a deep copy of the ids of interest: row ids or col ids, target or source direction
3824  if( *direction == 1 )
3825  {
3826  // we are interested in col ids, source
3827  // new method from moab::TempestOnlineMap
3828  rval = weightMap->fill_col_ids( valuesComp2 );MB_CHK_ERR( rval );
3829  }
3830  else if( *direction == 2 )
3831  {
3832  // we are interested in row ids, for target ids
3833  rval = weightMap->fill_row_ids( valuesComp2 );MB_CHK_ERR( rval );
3834  }
3835  //
3836 
3837  // now fill the tuple list with info and markers
3838  std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3839  TLcomp2.resize( uniq.size() );
3840  for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3841  {
3842  // to proc, marker, element local index, index in el
3843  int marker = *sit;
3844  int to_proc = marker % numProcs;
3845  int n = TLcomp2.get_n();
3846  TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
3847  TLcomp2.vi_wr[2 * n + 1] = marker;
3848  TLcomp2.inc_n();
3849  }
3850  }
3851  pc.crystal_router()->gs_transfer( 1, TLcomp2,
3852  0 ); // communication towards joint tasks, with markers
3853  // sort by value (key 1)
3854  // in the rendez-vous approach, markers meet at meet point (rendez-vous) processor marker % nprocs
3855 #ifdef VERBOSE
3856  std::stringstream ff2;
3857  ff2 << "TLcomp2_" << localRank << ".txt";
3858  TLcomp2.print_to_file( ff2.str().c_str() );
3859 #endif
3860  sort_buffer.buffer_reserve( TLcomp2.get_n() );
3861  TLcomp2.sort( 1, &sort_buffer );
3862  sort_buffer.reset();
3863  // end copy
3864 #ifdef VERBOSE
3865  TLcomp2.print_to_file( ff2.str().c_str() );
3866 #endif
3867  // need to send back the info, from the rendezvous point, for each of the values
3868  /* so go over each value, on local process in joint communicator,
3869 
3870  now have to send back the info needed for communication;
3871  loop in in sync over both TLComp1 and TLComp2, in local process;
3872  So, build new tuple lists, to send synchronous communication
3873  populate them at the same time, based on marker, that is indexed
3874  */
3875 
3876  TupleList TLBackToComp1;
3877  TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
3878  TLBackToComp1.enableWriteAccess();
3879 
3880  TupleList TLBackToComp2;
3881  TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
3882  TLBackToComp2.enableWriteAccess();
3883 
3884  int n1 = TLcomp1.get_n();
3885  int n2 = TLcomp2.get_n();
3886 
3887  int indexInTLComp1 = 0;
3888  int indexInTLComp2 = 0; // advance both, according to the marker
3889  if( n1 > 0 && n2 > 0 )
3890  {
3891 
3892  while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
3893  {
3894  int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3895  int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3896  if( currentValue1 < currentValue2 )
3897  {
3898  // we have a big problem; basically, we are saying that
3899  // dof currentValue is on one model and not on the other
3900  // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
3901  indexInTLComp1++;
3902  continue;
3903  }
3904  if( currentValue1 > currentValue2 )
3905  {
3906  // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
3907  indexInTLComp2++;
3908  continue;
3909  }
3910  int size1 = 1;
3911  int size2 = 1;
3912  while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3913  size1++;
3914  while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3915  size2++;
3916  // must be found in both lists, find the start and end indices
3917  for( int i1 = 0; i1 < size1; i1++ )
3918  {
3919  for( int i2 = 0; i2 < size2; i2++ )
3920  {
3921  // send the info back to components
3922  int n = TLBackToComp1.get_n();
3923  TLBackToComp1.reserve();
3924  TLBackToComp1.vi_wr[3 * n] =
3925  TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
3926  // came from, info from comp2
3927  TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3928  TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
3929  n = TLBackToComp2.get_n();
3930  TLBackToComp2.reserve();
3931  TLBackToComp2.vi_wr[3 * n] =
3932  TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
3933  TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
3934  TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
3935  // what if there are repeated markers in TLcomp2? increase just index2
3936  }
3937  }
3938  indexInTLComp1 += size1;
3939  indexInTLComp2 += size2;
3940  }
3941  }
3942  pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
3943  pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3944 
3945  TupleList TLv; // vertices
3946  TupleList TLc; // cells if needed (not type 2)
3947 
3948  if( *pid1 >= 0 )
3949  {
3950  // we are on original comp 1 tasks
3951  // before ordering
3952  // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
3953  // processors it communicates with
3954 #ifdef VERBOSE
3955  std::stringstream f1;
3956  f1 << "TLBack1_" << localRank << ".txt";
3957  TLBackToComp1.print_to_file( f1.str().c_str() );
3958 #endif
3959  // so we are now on pid1, we know now each marker were it has to go
3960  // add a new method to ParCommGraph, to set up the split_ranges and involved_IDs_map
3961  rval = cgraph->set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );MB_CHK_ERR( rval );
3962  // we can just send vertices and elements, with crystal routers;
3963  // on the receiving end, make sure they are not duplicated, by looking at the global id
3964  // if *type is 1, also send global_dofs tag in the element tuple
3965  rval = cgraph->form_tuples_to_migrate_mesh( context.MBI, TLv, TLc, *type, lenTagType1 );MB_CHK_ERR( rval );
3966  }
3967  else
3968  {
3969  TLv.initialize( 2, 0, 0, 3, 0 ); // no vertices here, for sure
3970  TLv.enableWriteAccess(); // to be able to receive stuff, even if nothing is here yet, on this task
3971  if( *type != 2 ) // for point cloud, we do not need to initialize TLc (for cells)
3972  {
3973  // we still need to initialize the tuples with the right size, as in form_tuples_to_migrate_mesh
3974  int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 +
3975  10; // 10 is the max number of vertices in cell; kind of arbitrary
3976  TLc.initialize( size_tuple, 0, 0, 0, 0 );
3977  TLc.enableWriteAccess();
3978  }
3979  }
3980  pc.crystal_router()->gs_transfer( 1, TLv, 0 ); // communication towards coupler tasks, with mesh vertices
3981  if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 ); // those are cells
3982 
3983  if( *pid3 >= 0 ) // will receive the mesh, on coupler pes!
3984  {
3985  appData& data3 = context.appDatas[*pid3];
3986  EntityHandle fset3 = data3.file_set;
3987  Range primary_ents3; // vertices for type 2, cells of dim 2 for type 1 or 3
3988  std::vector< int > values_entities; // will be the size of primary_ents3 * lenTagType1
3989  rval = cgraph_rev->form_mesh_from_tuples( context.MBI, TLv, TLc, *type, lenTagType1, fset3, primary_ents3,
3990  values_entities );MB_CHK_ERR( rval );
3991  iMOAB_UpdateMeshInfo( pid3 );
3992  int ndofPerEl = 1;
3993  if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
3994  // because we are on the coupler, we know that the read map pid2 exists
3995  assert( *pid2 >= 0 );
3996  appData& dataIntx = context.appDatas[*pid2];
3997  TempestMapAppData& tdata = dataIntx.tempestData;
3998 
3999  // if we are on source coverage, direction 1, we can set covering mesh, covering cells
4000  if( 1 == *direction )
4001  {
4002  tdata.pid_src = pid3;
4003  tdata.remapper->SetMeshSet( Remapper::CoveringMesh, fset3, &primary_ents3 );
4004  weightMap->SetSourceNDofsPerElement( ndofPerEl );
4005  weightMap->set_col_dc_dofs( values_entities ); // will set col_dtoc_dofmap
4006  }
4007  // if we are on target, we can set the target cells
4008  else
4009  {
4010  tdata.pid_dest = pid3;
4011  tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset3, &primary_ents3 );
4012  weightMap->SetDestinationNDofsPerElement( ndofPerEl );
4013  weightMap->set_row_dc_dofs( values_entities ); // will set row_dtoc_dofmap
4014  }
4015  }
4016 
4017  return moab::MB_SUCCESS;
4018 }
4019 
4020 #endif // #ifdef MOAB_HAVE_MPI
4021 
4022 #endif // #ifdef MOAB_HAVE_NETCDF
4023 
4024 #define USE_API
4025 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
4026 {
4027  ErrorCode rval;
4028  moab::CartVect pos;
4029 
4030  Range& verts = context.appDatas[*pid].all_verts;
4031  *radius = 1.0;
4032  if( verts.empty() ) return moab::MB_SUCCESS;
4033  moab::EntityHandle firstVertex = ( verts[0] );
4034 
4035  // coordinate data
4036  rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );MB_CHK_ERR( rval );
4037 
4038  // compute the distance from origin
4039  // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
4040  *radius = pos.length();
4041  return moab::MB_SUCCESS;
4042 }
4043 
4044 ErrCode iMOAB_SetMapGhostLayers( iMOAB_AppID pid, int* n_src_ghost_layers, int* n_tgt_ghost_layers )
4045 {
4046  appData& data = context.appDatas[*pid];
4047 
4048  // Set the number of ghost layers
4049  data.tempestData.num_src_ghost_layers = *n_src_ghost_layers; // number of ghost layers for source
4050  data.tempestData.num_tgt_ghost_layers = *n_tgt_ghost_layers; // number of ghost layers for source
4051 
4052  return moab::MB_SUCCESS;
4053 }
4054 
4055 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4056 {
4057  // Default constant parameters
4058  constexpr bool validate = true;
4059  constexpr bool enforceConvexity = true;
4060  constexpr bool use_kdtree_search = true;
4061  constexpr bool gnomonic = true;
4062  constexpr double defaultradius = 1.0;
4063  constexpr double boxeps = 1.e-10;
4064 
4065  // Other constant parameters
4066  const double epsrel = ReferenceTolerance; // ReferenceTolerance is defined in Defines.h in tempestremap source ;
4067  double radius_source = 1.0;
4068  double radius_target = 1.0;
4069 
4070  // Error code definitions
4071  ErrorCode rval;
4072  ErrCode ierr;
4073 
4074  // Get the source and target data and pcomm objects
4075  appData& data_src = context.appDatas[*pid_src];
4076  appData& data_tgt = context.appDatas[*pid_tgt];
4077  appData& data_intx = context.appDatas[*pid_intx];
4078 #ifdef MOAB_HAVE_MPI
4079  ParallelComm* pco_intx = context.pcomms[*pid_intx];
4080 #endif
4081 
4082  // Mesh intersection has already been computed; Return early.
4083  TempestMapAppData& tdata = data_intx.tempestData;
4084  if( tdata.remapper != NULL ) return moab::MB_SUCCESS; // nothing to do
4085 
4086  bool is_parallel = false, is_root = true;
4087  int rank = 0;
4088 #ifdef MOAB_HAVE_MPI
4089  if( pco_intx )
4090  {
4091  rank = pco_intx->rank();
4092  is_parallel = ( pco_intx->size() > 1 );
4093  is_root = ( rank == 0 );
4094  rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
4095  }
4096 #endif
4097 
4098  moab::DebugOutput outputFormatter( std::cout, rank, 0 );
4099  outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
4100 
4101  ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
4102  ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
4103 
4104  // Rescale the radius of both to compute the intersection
4105  rval = ComputeSphereRadius( pid_src, &radius_source );MB_CHK_ERR( rval );
4106  rval = ComputeSphereRadius( pid_tgt, &radius_target );MB_CHK_ERR( rval );
4107 #ifdef VERBOSE
4108  if( is_root )
4109  outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
4110  radius_target );
4111 #endif
4112 
4113  /* Let make sure that the radius match for source and target meshes. If not, rescale now and
4114  * unscale later. */
4115  if( fabs( radius_source - radius_target ) > 1e-10 )
4116  { /* the radii are different */
4117  rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval );
4118  rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval );
4119  }
4120 
4121  // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
4122 #ifdef MOAB_HAVE_TEMPESTREMAP
4124 #else
4125  IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
4126 #endif
4127 
4128  // fixes to enforce positive orientation of the vertices (outward normal)
4129  // fixes to clean up any degenerate quadrangular elements present in the mesh (RLL specifically?)
4130  // fixes to enforce convexity in case concave elements are present
4131  rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );MB_CHK_ERR( rval );
4132  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
4133  if( enforceConvexity )
4134  {
4135  rval = moab::IntxUtils::enforce_convexity( context.MBI, data_src.file_set, rank );MB_CHK_ERR( rval );
4136  }
4137 
4138  rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );MB_CHK_ERR( rval );
4139  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
4140  if( enforceConvexity )
4141  {
4142  rval = moab::IntxUtils::enforce_convexity( context.MBI, data_tgt.file_set, rank );MB_CHK_ERR( rval );
4143  }
4144 
4145  // print verbosely about the problem setting
4146 #ifdef VERBOSE
4147  {
4148  moab::Range rintxverts, rintxelems;
4149  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
4150  rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );MB_CHK_ERR( rval );
4151 
4152  moab::Range bintxverts, bintxelems;
4153  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
4154  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );MB_CHK_ERR( rval );
4155 
4156  if( is_root )
4157  {
4158  outputFormatter.printf( 0, "The source set contains %d vertices and %d elements \n", rintxverts.size(),
4159  rintxelems.size() );
4160  outputFormatter.printf( 0, "The target set contains %d vertices and %d elements \n", bintxverts.size(),
4161  bintxelems.size() );
4162  }
4163  }
4164 #endif
4165  // use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] );
4166  // advancing front method will fail unless source mesh has no holes (area = 4 * pi) on unit sphere
4167  // use_kdtree_search = true;
4168 
4169  data_intx.dimension = data_tgt.dimension;
4170  // set the context for the source and destination applications
4171  tdata.pid_src = pid_src;
4172  tdata.pid_dest = pid_tgt;
4173 
4174  // Now allocate and initialize the remapper object
4175 #ifdef MOAB_HAVE_MPI
4176  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
4177 #else
4178  tdata.remapper = new moab::TempestRemapper( context.MBI );
4179 #endif
4180  tdata.remapper->meshValidate = validate;
4181  tdata.remapper->constructEdgeMap = true;
4182 
4183  // Do not create new filesets; Use the sets from our respective applications
4184  tdata.remapper->initialize( false );
4185  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
4186  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
4187  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
4188 
4189  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
4190  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
4191 
4192  // First, compute the covering source set.
4193  rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false, gnomonic, tdata.num_src_ghost_layers );MB_CHK_ERR( rval );
4194 
4195  // Next, compute intersections with MOAB.
4196  // for bilinear, this is an overkill
4197  rval = tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false );MB_CHK_ERR( rval );
4198 
4199  // Mapping computation done
4200  if( validate )
4201  {
4202  double local_areas[3], global_areas[3];
4203  local_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );
4204  local_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );
4205  local_areas[2] = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, radius_source );
4206 #ifdef MOAB_HAVE_MPI
4207  global_areas[0] = global_areas[1] = global_areas[2] = 0.0;
4208  MPI_Reduce( &local_areas[0], &global_areas[0], 3, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() );
4209 #else
4210  global_areas[0] = local_areas[0];
4211  global_areas[1] = local_areas[1];
4212  global_areas[2] = local_areas[2];
4213 #endif
4214 
4215  if( is_root )
4216  {
4217  outputFormatter.printf( 0,
4218  "initial area: source mesh = %12.14f, target mesh = %12.14f, "
4219  "overlap mesh = %12.14f\n",
4220  global_areas[0], global_areas[1], global_areas[2] );
4221  outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
4222  fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
4223  fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
4224  }
4225  }
4226 
4227  return moab::MB_SUCCESS;
4228 }
4229 
4230 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4231 {
4232  ErrorCode rval;
4233  ErrCode ierr;
4234 
4235  double radius_source = 1.0;
4236  double radius_target = 1.0;
4237  const double epsrel = ReferenceTolerance; // ReferenceTolerance is defined in Defines.h in tempestremap source ;
4238  const double boxeps = 1.e-8;
4239 
4240  // Get the source and target data and pcomm objects
4241  appData& data_src = context.appDatas[*pid_src];
4242  appData& data_tgt = context.appDatas[*pid_tgt];
4243  appData& data_intx = context.appDatas[*pid_intx];
4244 #ifdef MOAB_HAVE_MPI
4245  ParallelComm* pco_intx = context.pcomms[*pid_intx];
4246 #endif
4247 
4248  // Mesh intersection has already been computed; Return early.
4249  TempestMapAppData& tdata = data_intx.tempestData;
4250  if( tdata.remapper != NULL ) return moab::MB_SUCCESS; // nothing to do
4251 
4252 #ifdef MOAB_HAVE_MPI
4253  if( pco_intx )
4254  {
4255  rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
4256  }
4257 #endif
4258 
4259  ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
4260  ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
4261 
4262  // Rescale the radius of both to compute the intersection
4263  ComputeSphereRadius( pid_src, &radius_source );
4264  ComputeSphereRadius( pid_tgt, &radius_target );
4265 
4266  IntxAreaUtils areaAdaptor;
4267  // print verbosely about the problem setting
4268  {
4269  moab::Range rintxverts, rintxelems;
4270  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
4271  rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );MB_CHK_ERR( rval );
4272  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
4273  rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );MB_CHK_ERR( rval );
4274 #ifdef VERBOSE
4275  std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
4276  << " elements \n";
4277 #endif
4278 
4279  moab::Range bintxverts, bintxelems;
4280  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
4281  rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );MB_CHK_ERR( rval );
4282  rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
4283  rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );MB_CHK_ERR( rval );
4284 #ifdef VERBOSE
4285  std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
4286  << " elements \n";
4287 #endif
4288  }
4289 
4290  data_intx.dimension = data_tgt.dimension;
4291  // set the context for the source and destination applications
4292  // set the context for the source and destination applications
4293  tdata.pid_src = pid_src;
4294  tdata.pid_dest = pid_tgt;
4295  data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
4296  assert( data_intx.point_cloud == true );
4297 
4298  // Now allocate and initialize the remapper object
4299 #ifdef MOAB_HAVE_MPI
4300  tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
4301 #else
4302  tdata.remapper = new moab::TempestRemapper( context.MBI );
4303 #endif
4304  tdata.remapper->meshValidate = true;
4305  tdata.remapper->constructEdgeMap = true;
4306 
4307  // Do not create new filesets; Use the sets from our respective applications
4308  tdata.remapper->initialize( false );
4309  tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
4310  tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
4311  tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
4312 
4313  /* Let make sure that the radius match for source and target meshes. If not, rescale now and
4314  * unscale later. */
4315  if( fabs( radius_source - radius_target ) > 1e-10 )
4316  { /* the radii are different */
4317  rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );MB_CHK_ERR( rval );
4318  rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );MB_CHK_ERR( rval );
4319  }
4320 
4321  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
4322  rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
4323 
4324  // First, compute the covering source set.
4325  rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
4326 
4327 #ifdef MOAB_HAVE_MPI
4328  /* VSM: This context should be set on the data_src but it would overwrite the source
4329  covering set context in case it is coupled to another APP as well.
4330  This needs a redesign. */
4331  // data_intx.covering_set = tdata.remapper->GetCoveringSet();
4332  // data_src.covering_set = tdata.remapper->GetCoveringSet();
4333 #endif
4334 
4335  // Now let us re-convert the MOAB mesh back to Tempest representation
4336  rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval );
4337 
4338  return moab::MB_SUCCESS;
4339 }
4340 
4341 ErrCode iMOAB_ComputeScalarProjectionWeights(
4342  iMOAB_AppID pid_intx,
4343  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
4344  const iMOAB_String disc_method_source,
4345  int* disc_order_source,
4346  const iMOAB_String disc_method_target,
4347  int* disc_order_target,
4348  const iMOAB_String fv_method,
4349  int* fNoBubble,
4350  int* fMonotoneTypeID,
4351  int* fVolumetric,
4352  int* fInverseDistanceMap,
4353  int* fNoConservation,
4354  int* fValidate,
4355  const iMOAB_String source_solution_tag_dof_name,
4356  const iMOAB_String target_solution_tag_dof_name )
4357 {
4358  moab::ErrorCode rval;
4359 
4360  assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
4361  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4362  assert( disc_method_source && strlen( disc_method_source ) );
4363  assert( disc_method_target && strlen( disc_method_target ) );
4364  assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
4365  assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
4366 
4367  // Get the source and target data and pcomm objects
4368  appData& data_intx = context.appDatas[*pid_intx];
4369  TempestMapAppData& tdata = data_intx.tempestData;
4370 
4371  // Setup computation of weights
4372  // Set the context for the remapping weights computation
4373  tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
4374 
4375  // Call to generate the remap weights with the tempest meshes
4376  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
4377  assert( weightMap != NULL );
4378 
4379  GenerateOfflineMapAlgorithmOptions mapOptions;
4380  mapOptions.nPin = *disc_order_source;
4381  mapOptions.nPout = *disc_order_target;
4382  mapOptions.fSourceConcave = false;
4383  mapOptions.fTargetConcave = false;
4384 
4385  mapOptions.strMethod = "";
4386  if( fv_method ) mapOptions.strMethod += std::string( fv_method ) + ";";
4387  if( fMonotoneTypeID )
4388  {
4389  switch( *fMonotoneTypeID )
4390  {
4391  case 0:
4392  mapOptions.fMonotone = false;
4393  break;
4394  case 3:
4395  mapOptions.strMethod += "mono3;";
4396  break;
4397  case 2:
4398  mapOptions.strMethod += "mono2;";
4399  break;
4400  default:
4401  mapOptions.fMonotone = true;
4402  }
4403  }
4404  else
4405  mapOptions.fMonotone = false;
4406 
4407  mapOptions.fNoBubble = ( fNoBubble ? *fNoBubble : false );
4408  mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
4409  mapOptions.fNoCorrectAreas = false;
4410  // mapOptions.fNoCheck = !( fValidate ? *fValidate : true );
4411  mapOptions.fNoCheck = true;
4412  if( fVolumetric && *fVolumetric ) mapOptions.strMethod += "volumetric;";
4413  if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod += "invdist;";
4414 
4415  std::string metadataStr = mapOptions.strMethod + ";" + std::string( disc_method_source ) + ":" +
4416  std::to_string( *disc_order_source ) + ":" + std::string( source_solution_tag_dof_name ) +
4417  ";" + std::string( disc_method_target ) + ":" + std::to_string( *disc_order_target ) +
4418  ":" + std::string( target_solution_tag_dof_name );
4419 
4420  data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
4421 
4422  // Now let us compute the local-global mapping and store it in the context
4423  // We need this mapping when computing matvec products and to do reductions in parallel
4424  // Additionally, the call below will also compute weights with TempestRemap
4425  rval = weightMap->GenerateRemappingWeights(
4426  std::string( disc_method_source ), // const std::string& strInputType
4427  std::string( disc_method_target ), // const std::string& strOutputType
4428  mapOptions, // GenerateOfflineMapAlgorithmOptions& mapOptions
4429  std::string( source_solution_tag_dof_name ), // const std::string& srcDofTagName = "GLOBAL_ID"
4430  std::string( target_solution_tag_dof_name ) // const std::string& tgtDofTagName = "GLOBAL_ID"
4431  );MB_CHK_ERR( rval );
4432 
4433  // print some map statistics
4434  weightMap->PrintMapStatistics();
4435 
4436  if( fValidate && *fValidate )
4437  {
4438  const double dNormalTolerance = 1.0E-8;
4439  const double dStrictTolerance = 1.0E-12;
4440  weightMap->CheckMap( true, true, ( fMonotoneTypeID && *fMonotoneTypeID ), dNormalTolerance, dStrictTolerance );
4441  }
4442 
4443  return moab::MB_SUCCESS;
4444 }
4445 
4446 ErrCode iMOAB_ApplyScalarProjectionWeights(
4447  iMOAB_AppID pid_intersection,
4448  int* filter_type,
4449  const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
4450  const iMOAB_String source_solution_tag_name,
4451  const iMOAB_String target_solution_tag_name )
4452 {
4453  assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4454  assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
4455  assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
4456 
4457  moab::ErrorCode rval;
4458 
4459  // Get the source and target data and pcomm objects
4460  appData& data_intx = context.appDatas[*pid_intersection];
4461  TempestMapAppData& tdata = data_intx.tempestData;
4462 
4463  // Now allocate and initialize the remapper object
4464  moab::TempestRemapper* remapper = tdata.remapper;
4465  if( !tdata.weightMaps.count( std::string( solution_weights_identifier ) ) ) // key does not exist
4467  moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
4468 
4469  // we assume that there are separators ";" between the tag names
4470  std::vector< std::string > srcNames;
4471  std::vector< std::string > tgtNames;
4472  std::vector< Tag > srcTagHandles;
4473  std::vector< Tag > tgtTagHandles;
4474  std::string separator( ":" );
4475  std::string src_name( source_solution_tag_name );
4476  std::string tgt_name( target_solution_tag_name );
4477  split_tag_names( src_name, separator, srcNames );
4478  split_tag_names( tgt_name, separator, tgtNames );
4479  if( srcNames.size() != tgtNames.size() )
4480  {
4481  std::cout << " error in parsing source and target tag names. \n";
4482  return moab::MB_FAILURE;
4483  }
4484 
4485  for( size_t i = 0; i < srcNames.size(); i++ )
4486  {
4487  Tag tagHandle;
4488  rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
4489  if( MB_SUCCESS != rval || NULL == tagHandle )
4490  {
4491  return moab::MB_FAILURE;
4492  }
4493  srcTagHandles.push_back( tagHandle );
4494  rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
4495  if( MB_SUCCESS != rval || NULL == tagHandle )
4496  {
4497  return moab::MB_FAILURE;
4498  }
4499  tgtTagHandles.push_back( tagHandle );
4500  }
4501 
4502  std::vector< double > solSTagVals;
4503  std::vector< double > solTTagVals;
4504 
4505  moab::Range sents, tents;
4506  if( data_intx.point_cloud )
4507  {
4508  appData& data_src = context.appDatas[*tdata.pid_src];
4509  appData& data_tgt = context.appDatas[*tdata.pid_dest];
4510  if( data_src.point_cloud )
4511  {
4512  moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
4513  solSTagVals.resize( covSrcEnts.size(), 0. );
4514  sents = covSrcEnts;
4515  }
4516  else
4517  {
4518  moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
4519  solSTagVals.resize(
4520  covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), 0. );
4521  sents = covSrcEnts;
4522  }
4523  if( data_tgt.point_cloud )
4524  {
4525  moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
4526  solTTagVals.resize( tgtEnts.size(), 0. );
4527  tents = tgtEnts;
4528  }
4529  else
4530  {
4531  moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
4532  solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
4533  weightMap->GetDestinationNDofsPerElement(),
4534  0. );
4535  tents = tgtEnts;
4536  }
4537  }
4538  else
4539  {
4540  moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
4541  moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
4542  solSTagVals.resize(
4543  covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
4544  solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
4545  weightMap->GetDestinationNDofsPerElement(),
4546  0. );
4547 
4548  sents = covSrcEnts;
4549  tents = tgtEnts;
4550  }
4551 
4553  if( filter_type )
4554  {
4555  switch( *filter_type )
4556  {
4557  case 1:
4559  break;
4560  case 2:
4562  break;
4563  case 3:
4565  break;
4566  default:
4568  }
4569  }
4570 
4571  for( size_t i = 0; i < srcTagHandles.size(); i++ )
4572  {
4573  // Compute the application of weights on the suorce solution data and store it in the
4574  // destination solution vector data Optionally, can also perform the transpose application
4575  // of the weight matrix. Set the 3rd argument to true if this is needed
4576  rval = weightMap->ApplyWeights( srcTagHandles[i], tgtTagHandles[i], false, caasType );MB_CHK_ERR( rval );
4577  }
4578 
4579 // #define VERBOSE
4580 #ifdef VERBOSE
4581  ParallelComm* pco_intx = context.pcomms[*pid_intersection];
4582 
4583  int ivar = 0;
4584  {
4585  Tag ssolnTag = srcTagHandles[ivar];
4586  std::stringstream sstr;
4587  sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
4588  std::ofstream output_file( sstr.str().c_str() );
4589  for( unsigned i = 0; i < sents.size(); ++i )
4590  {
4591  EntityHandle elem = sents[i];
4592  std::vector< double > locsolSTagVals( 16 );
4593  rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );MB_CHK_ERR( rval );
4594  output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
4595  for( unsigned j = 0; j < 16; ++j )
4596  output_file << locsolSTagVals[j] << " ";
4597  }
4598  output_file.flush(); // required here
4599  output_file.close();
4600  }
4601  {
4602  std::stringstream sstr;
4603  sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
4604  EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set,
4605  context.appDatas[*tdata.pid_dest].file_set };
4606  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
4607  }
4608  {
4609  std::stringstream sstr;
4610  sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
4611  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
4612  EntityHandle covering_set = remapper->GetCoveringSet();
4613  EntityHandle sets[2] = { covering_set, context.appDatas[*tdata.pid_dest].file_set };
4614  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
4615  }
4616  {
4617  std::stringstream sstr;
4618  sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
4619  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
4620  EntityHandle covering_set = remapper->GetCoveringSet();
4621  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", &covering_set, 1 );MB_CHK_ERR( rval );
4622  }
4623  {
4624  std::stringstream sstr;
4625  sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
4626  // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
4627  EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
4628  rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", &target_set, 1 );MB_CHK_ERR( rval );
4629  }
4630  {
4631  std::stringstream sstr;
4632  sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
4633  std::ofstream output_file( sstr.str().c_str() );
4634  for( unsigned i = 0; i < solSTagVals.size(); ++i )
4635  output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
4636  << solSTagVals[i] << "\n";
4637  output_file.flush(); // required here
4638  output_file.close();
4639  }
4640 #endif
4641  // #undef VERBOSE
4642 
4643  return moab::MB_SUCCESS;
4644 }
4645 
4646 #endif // MOAB_HAVE_TEMPESTREMAP
4647 
4648 #ifdef __cplusplus
4649 }
4650 #endif