1
3
4 #include "moab/MOABConfig.h"
5 #include "moab/Core.hpp"
6
7 #ifdef MOAB_HAVE_MPI
8 #include "moab_mpi.h"
9 #include "moab/ParallelComm.hpp"
10 #include "moab/ParCommGraph.hpp"
11 #include "moab/ParallelMergeMesh.hpp"
12 #include "moab/IntxMesh/IntxUtils.hpp"
13 #endif
14 #include "DebugOutput.hpp"
15 #include "moab/iMOAB.h"
16
17
18 #ifdef MOAB_HAVE_HDF5
19 #include "mhdf.h"
20 #include <H5Tpublic.h>
21 #endif
22
23 #include "moab/CartVect.hpp"
24 #include "MBTagConventions.hpp"
25 #include "moab/MeshTopoUtil.hpp"
26 #include "moab/ReadUtilIface.hpp"
27 #include "moab/MergeMesh.hpp"
28
29 #ifdef MOAB_HAVE_TEMPESTREMAP
30 #include "STLStringHelper.h"
31 #include "moab/IntxMesh/IntxUtils.hpp"
32
33 #include "moab/Remapping/TempestRemapper.hpp"
34 #include "moab/Remapping/TempestOnlineMap.hpp"
35 #endif
36
37
38 #include <cassert>
39 #include <sstream>
40 #include <iostream>
41
42 using namespace moab;
43
44
45
46
47
48
49 #ifdef __cplusplus
50 extern "C" {
51 #endif
52
53 #ifdef MOAB_HAVE_TEMPESTREMAP
54 struct TempestMapAppData
55 {
56 moab::TempestRemapper* remapper;
57 std::map< std::string, moab::TempestOnlineMap* > weightMaps;
58 iMOAB_AppID pid_src;
59 iMOAB_AppID pid_dest;
60 int num_src_ghost_layers;
61 int num_tgt_ghost_layers;
62 };
63 #endif
64
65 struct appData
66 {
67 EntityHandle file_set;
68 int global_id;
69 std::string name;
70 Range all_verts;
71 Range local_verts;
72 Range owned_verts;
73 Range ghost_vertices;
74 Range primary_elems;
75 Range owned_elems;
76 Range ghost_elems;
77 int dimension;
78 long num_global_elements;
79
80 long num_global_vertices;
81
82 int num_ghost_layers;
83 Range mat_sets;
84 std::map< int, int > matIndex;
85 Range neu_sets;
86 Range diri_sets;
87 std::map< std::string, Tag > tagMap;
88 std::vector< Tag > tagList;
89 bool point_cloud;
90 bool is_fortran;
91
92 #ifdef MOAB_HAVE_MPI
93 ParallelComm* pcomm;
94
95
96 std::map< int, ParCommGraph* > pgraph;
97 #endif
98
99 #ifdef MOAB_HAVE_TEMPESTREMAP
100 EntityHandle secondary_file_set;
101 TempestMapAppData tempestData;
102 std::map< std::string, std::string > metadataMap;
103 #endif
104 };
105
106 struct GlobalContext
107 {
108
109 Interface* MBI;
110
111 Tag material_tag, neumann_tag, dirichlet_tag,
112 globalID_tag;
113 int refCountMB;
114 int iArgc;
115 iMOAB_String* iArgv;
116
117 std::map< std::string, int > appIdMap;
118 std::map< int, appData > appDatas;
119 int globalrank, worldprocs;
120 bool MPI_initialized;
121
122 GlobalContext()
123 {
124 MBI = 0;
125 refCountMB = 0;
126 }
127 };
128
129 static struct GlobalContext context;
130
131 ErrCode iMOAB_Initialize( int argc, iMOAB_String* argv )
132 {
133 if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
134
135 context.iArgc = argc;
136 context.iArgv = argv;
137
138 if( 0 == context.refCountMB )
139 {
140 context.MBI = new( std::nothrow ) moab::Core;
141
142 const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
143 DIRICHLET_SET_TAG_NAME, GLOBAL_ID_TAG_NAME };
144
145 Tag gtags[4];
146 for( int i = 0; i < 4; i++ )
147 {
148 MB_CHK_ERR(
149 context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY ) );
150 }
151
152 context.material_tag = gtags[0];
153 context.neumann_tag = gtags[1];
154 context.dirichlet_tag = gtags[2];
155 context.globalID_tag = gtags[3];
156
157 context.MPI_initialized = false;
158 #ifdef MOAB_HAVE_MPI
159 int flagInit;
160 MPI_Initialized( &flagInit );
161
162 if( flagInit && !context.MPI_initialized )
163 {
164 MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
165 MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
166 context.MPI_initialized = true;
167 }
168 #endif
169 }
170
171 context.refCountMB++;
172 return moab::MB_SUCCESS;
173 }
174
175 ErrCode iMOAB_InitializeFortran()
176 {
177 return iMOAB_Initialize( 0, 0 );
178 }
179
180 ErrCode iMOAB_Finalize()
181 {
182 context.refCountMB--;
183
184 if( 0 == context.refCountMB )
185 {
186 delete context.MBI;
187 }
188
189 return MB_SUCCESS;
190 }
191
192
193
194 static int apphash( const iMOAB_String str, int identifier = 0 )
195 {
196
197 std::string appstr( str );
198
199
200
201 unsigned int h = 2166136261u;
202 for( char c : appstr )
203 {
204 h ^= static_cast< unsigned char >( c );
205 h *= 16777619u;
206 }
207
208 if( identifier )
209 {
210 h ^= static_cast< unsigned int >( identifier & 0xFFFFFFFF );
211 h *= 16777619u;
212 }
213 return static_cast< int >( h & 0x7FFFFFFF );
214 }
215
216 ErrCode iMOAB_RegisterApplication( const iMOAB_String app_name,
217 #ifdef MOAB_HAVE_MPI
218 MPI_Comm* comm,
219 #endif
220 int* compid,
221 iMOAB_AppID pid )
222 {
223 IMOAB_CHECKPOINTER( app_name, 1 );
224 #ifdef MOAB_HAVE_MPI
225 IMOAB_CHECKPOINTER( comm, 2 );
226 IMOAB_CHECKPOINTER( compid, 3 );
227 #else
228 IMOAB_CHECKPOINTER( compid, 2 );
229 #endif
230
231
232
233 std::string name( app_name );
234
235 if( context.appIdMap.find( name ) != context.appIdMap.end() )
236 {
237 std::cout << " application " << name << " already registered \n";
238 return moab::MB_FAILURE;
239 }
240
241
242
243
244 *pid = apphash( app_name, *compid );
245
246 context.appIdMap[name] = *pid;
247 int rankHere = 0;
248 #ifdef MOAB_HAVE_MPI
249 MPI_Comm_rank( *comm, &rankHere );
250 #endif
251 if( !rankHere )
252 std::cout << " application " << name << " with ID = " << *pid << " and external id: " << *compid
253 << " is registered now \n";
254 if( *compid <= 0 )
255 {
256 std::cout << " convention for external application is to have its id positive \n";
257 return moab::MB_FAILURE;
258 }
259
260
261 EntityHandle file_set;
262 ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
263
264 appData app_data;
265 app_data.file_set = file_set;
266 app_data.global_id = *compid;
267 app_data.name = name;
268
269 #ifdef MOAB_HAVE_TEMPESTREMAP
270 app_data.tempestData.remapper = nullptr;
271 app_data.tempestData.num_src_ghost_layers = 0;
272 app_data.tempestData.num_tgt_ghost_layers = 0;
273 #endif
274
275
276 app_data.num_ghost_layers = 0;
277 app_data.point_cloud = false;
278 app_data.is_fortran = false;
279 #ifdef MOAB_HAVE_TEMPESTREMAP
280 app_data.secondary_file_set = app_data.file_set;
281 #endif
282
283 #ifdef MOAB_HAVE_MPI
284 if( *comm ) app_data.pcomm = new ParallelComm( context.MBI, *comm );
285 #endif
286 context.appDatas[*pid] = app_data;
287 return moab::MB_SUCCESS;
288 }
289
290 ErrCode iMOAB_RegisterApplicationFortran( const iMOAB_String app_name,
291 #ifdef MOAB_HAVE_MPI
292 int* comm,
293 #endif
294 int* compid,
295 iMOAB_AppID pid )
296 {
297 IMOAB_CHECKPOINTER( app_name, 1 );
298 #ifdef MOAB_HAVE_MPI
299 IMOAB_CHECKPOINTER( comm, 2 );
300 IMOAB_CHECKPOINTER( compid, 3 );
301 #else
302 IMOAB_CHECKPOINTER( compid, 2 );
303 #endif
304
305 ErrCode err;
306 assert( app_name != nullptr );
307 std::string name( app_name );
308
309 #ifdef MOAB_HAVE_MPI
310 MPI_Comm ccomm;
311 if( comm )
312 {
313
314
315
316 ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
317 }
318 #endif
319
320
321 err = iMOAB_RegisterApplication( app_name,
322 #ifdef MOAB_HAVE_MPI
323 &ccomm,
324 #endif
325 compid, pid );
326
327
328
329 context.appDatas[*pid].is_fortran = true;
330
331 return err;
332 }
333
334 ErrCode iMOAB_DeregisterApplication( iMOAB_AppID pid )
335 {
336
337
338
339 auto appIterator = context.appDatas.find( *pid );
340
341 if( appIterator == context.appDatas.end() ) return MB_FAILURE;
342
343
344 appData& data = appIterator->second;
345 int rankHere = 0;
346 #ifdef MOAB_HAVE_MPI
347 rankHere = data.pcomm->rank();
348 #endif
349 if( !rankHere )
350 std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
351 << " is de-registered now \n";
352
353 EntityHandle fileSet = data.file_set;
354
355 Range fileents;
356 MB_CHK_ERR( context.MBI->get_entities_by_handle( fileSet, fileents, true ) );
357 fileents.insert( fileSet );
358 MB_CHK_ERR( context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents ) );
359
360 #ifdef MOAB_HAVE_TEMPESTREMAP
361 if( data.tempestData.remapper ) delete data.tempestData.remapper;
362 if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
363 #endif
364
365 #ifdef MOAB_HAVE_MPI
366
367
368
369 auto& pargs = data.pgraph;
370
371 for( auto mt = pargs.begin(); mt != pargs.end(); ++mt )
372 {
373 ParCommGraph* pgr = mt->second;
374 if( pgr != nullptr )
375 {
376 delete pgr;
377 pgr = nullptr;
378 }
379 }
380
381 if( data.pcomm )
382 {
383 delete data.pcomm;
384 data.pcomm = nullptr;
385 }
386 #endif
387
388
389 Range vertices = fileents.subset_by_type( MBVERTEX );
390 Range noverts = subtract( fileents, vertices );
391
392 MB_CHK_ERR( context.MBI->delete_entities( noverts ) );
393
394 Range adj_ents_left;
395 MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION ) );
396 MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION ) );
397 MB_CHK_ERR( context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION ) );
398
399 if( !adj_ents_left.empty() )
400 {
401 Range conn_verts;
402 MB_CHK_ERR( context.MBI->get_connectivity( adj_ents_left, conn_verts ) );
403 vertices = subtract( vertices, conn_verts );
404 }
405
406 MB_CHK_ERR( context.MBI->delete_entities( vertices ) );
407
408 for( auto mit = context.appIdMap.begin(); mit != context.appIdMap.end(); ++mit )
409 {
410 if( *pid == mit->second )
411 {
412 #ifdef MOAB_HAVE_MPI
413 if( data.pcomm )
414 {
415 delete data.pcomm;
416 data.pcomm = nullptr;
417 }
418 #endif
419 context.appIdMap.erase( mit );
420 break;
421 }
422 }
423
424
425 context.appDatas.erase( appIterator );
426
427 return moab::MB_SUCCESS;
428 }
429
430 ErrCode iMOAB_DeregisterApplicationFortran( iMOAB_AppID pid )
431 {
432
433 context.appDatas[*pid].is_fortran = false;
434
435
436 return iMOAB_DeregisterApplication( pid );
437 }
438
439 ErrCode iMOAB_ReadHeaderInfo( const iMOAB_String filename,
440 int* num_global_vertices,
441 int* num_global_elements,
442 int* num_dimension,
443 int* num_parts )
444 {
445 IMOAB_CHECKPOINTER( filename, 1 );
446 IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
447
448 #ifdef MOAB_HAVE_HDF5
449 std::string filen( filename );
450
451 int edges = 0;
452 int faces = 0;
453 int regions = 0;
454 if( num_global_vertices ) *num_global_vertices = 0;
455 if( num_global_elements ) *num_global_elements = 0;
456 if( num_dimension ) *num_dimension = 0;
457 if( num_parts ) *num_parts = 0;
458
459 mhdf_FileHandle file;
460 mhdf_Status status;
461 unsigned long max_id;
462 struct mhdf_FileDesc* data;
463
464 file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
465
466 if( mhdf_isError( &status ) )
467 {
468 fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
469 return moab::MB_FAILURE;
470 }
471
472 data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
473 1 );
474
475 if( mhdf_isError( &status ) )
476 {
477 fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
478 return moab::MB_FAILURE;
479 }
480
481 if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
482 if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
483
484 for( int i = 0; i < data->num_elem_desc; i++ )
485 {
486 struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
487 struct mhdf_EntDesc* ent_d = &( el_desc->desc );
488
489 if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
490 {
491 edges += ent_d->count;
492 }
493
494 if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
495 {
496 faces += ent_d->count;
497 }
498
499 if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
500 {
501 faces += ent_d->count;
502 }
503
504 if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
505 {
506 faces += ent_d->count;
507 }
508
509 if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
510 {
511 regions += ent_d->count;
512 }
513
514 if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
515 {
516 regions += ent_d->count;
517 }
518
519 if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
520 {
521 regions += ent_d->count;
522 }
523
524 if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
525 {
526 regions += ent_d->count;
527 }
528
529 if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
530 {
531 regions += ent_d->count;
532 }
533
534 if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
535 {
536 regions += ent_d->count;
537 }
538
539 if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
540 {
541 regions += ent_d->count;
542 }
543 }
544
545 if( num_parts ) *num_parts = data->numEntSets[0];
546
547
548 if( edges > 0 )
549 {
550 if( num_dimension ) *num_dimension = 1;
551 if( num_global_elements ) *num_global_elements = edges;
552 }
553
554 if( faces > 0 )
555 {
556 if( num_dimension ) *num_dimension = 2;
557 if( num_global_elements ) *num_global_elements = faces;
558 }
559
560 if( regions > 0 )
561 {
562 if( num_dimension ) *num_dimension = 3;
563 if( num_global_elements ) *num_global_elements = regions;
564 }
565
566 mhdf_closeFile( file, &status );
567
568 free( data );
569
570 #else
571 std::cout << filename
572 << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
573 "formats other than a h5m file.\n";
574 if( num_global_vertices ) *num_global_vertices = 0;
575 if( num_global_elements ) *num_global_elements = 0;
576 if( num_dimension ) *num_dimension = 0;
577 if( num_parts ) *num_parts = 0;
578 #endif
579
580 return moab::MB_SUCCESS;
581 }
582
583 ErrCode iMOAB_LoadMesh( iMOAB_AppID pid,
584 const iMOAB_String filename,
585 const iMOAB_String read_options,
586 int* num_ghost_layers )
587 {
588 IMOAB_CHECKPOINTER( filename, 2 );
589 IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
590 IMOAB_CHECKPOINTER( num_ghost_layers, 4 );
591
592
593 std::ostringstream newopts;
594 if( read_options ) newopts << read_options;
595
596 #ifdef MOAB_HAVE_MPI
597
598 if( context.MPI_initialized )
599 {
600 if( context.worldprocs > 1 )
601 {
602 std::string opts( ( read_options ? read_options : "" ) );
603 std::string pcid( "PARALLEL_COMM=" );
604 std::size_t found = opts.find( pcid );
605
606 if( found != std::string::npos )
607 {
608 std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
609 return moab::MB_FAILURE;
610 }
611
612
613
614 std::string filen( filename );
615 std::string::size_type idx = filen.rfind( '.' );
616
617 if( idx != std::string::npos )
618 {
619 ParallelComm* pco = context.appDatas[*pid].pcomm;
620 std::string extension = filen.substr( idx + 1 );
621 if( ( extension == std::string( "h5m" ) ) || ( extension == std::string( "nc" ) ) )
622 newopts << ";;PARALLEL_COMM=" << pco->get_id();
623 }
624
625 if( *num_ghost_layers >= 1 )
626 {
627
628
629 std::string pcid2( "PARALLEL_GHOSTS=" );
630 std::size_t found2 = opts.find( pcid2 );
631
632 if( found2 != std::string::npos )
633 {
634 std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
635 "number of layers \n";
636 }
637 else
638 {
639
640
641
642 newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
643 }
644 }
645 }
646 }
647 #else
648 IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
649 #endif
650
651
652 ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
653
654 #ifdef VERBOSE
655
656 std::ostringstream outfile;
657 #ifdef MOAB_HAVE_MPI
658 ParallelComm* pco = context.appDatas[*pid].pcomm;
659 int rank = pco->rank();
660 int nprocs = pco->size();
661 outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
662 #else
663 outfile << "TaskMesh_n1.0.h5m";
664 #endif
665
666
667 rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval );
668 #endif
669
670
671 context.appDatas[*pid].num_ghost_layers = *num_ghost_layers;
672
673
674 return iMOAB_UpdateMeshInfo( pid );
675 }
676
677 static ErrCode internal_WriteMesh( iMOAB_AppID pid,
678 const iMOAB_String filename,
679 const iMOAB_String write_options,
680 bool primary_set = true )
681 {
682 IMOAB_CHECKPOINTER( filename, 2 );
683 IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
684
685 appData& data = context.appDatas[*pid];
686 EntityHandle fileSet = data.file_set;
687
688 std::ostringstream newopts;
689 #ifdef MOAB_HAVE_MPI
690 std::string write_opts( ( write_options ? write_options : "" ) );
691 std::string pcid( "PARALLEL_COMM=" );
692
693 if( write_opts.find( pcid ) != std::string::npos )
694 {
695 std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
696 return moab::MB_FAILURE;
697 }
698
699
700 std::string pw( "PARALLEL=WRITE_PART" );
701 if( write_opts.find( pw ) != std::string::npos )
702 {
703 ParallelComm* pco = data.pcomm;
704 newopts << "PARALLEL_COMM=" << pco->get_id() << ";";
705 }
706
707 #endif
708
709 #ifdef MOAB_HAVE_TEMPESTREMAP
710 if( !primary_set )
711 {
712 if( data.tempestData.remapper != nullptr )
713 fileSet = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
714 else if( data.file_set != data.secondary_file_set )
715 fileSet = data.secondary_file_set;
716 else
717 MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid secondary file set handle" );
718 }
719 #endif
720
721
722 if( write_options ) newopts << write_options;
723
724 std::vector< Tag > copyTagList = data.tagList;
725
726 std::string gid_name_tag( "GLOBAL_ID" );
727
728
729 if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
730 {
731 Tag gid = context.MBI->globalId_tag();
732 copyTagList.push_back( gid );
733 }
734
735 std::string pp_name_tag( "PARALLEL_PARTITION" );
736
737
738 if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
739 {
740 Tag ptag;
741 context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
742 if( ptag ) copyTagList.push_back( ptag );
743 }
744
745
746
747
748 MB_CHK_ERR( context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1 ) );
749
750 return moab::MB_SUCCESS;
751 }
752
753 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options )
754 {
755 return internal_WriteMesh( pid, filename, write_options );
756 }
757
758 ErrCode iMOAB_WriteLocalMesh( iMOAB_AppID pid, iMOAB_String prefix )
759 {
760 IMOAB_CHECKPOINTER( prefix, 2 );
761 IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
762
763 std::ostringstream file_name;
764 int rank = 0, size = 1;
765 #ifdef MOAB_HAVE_MPI
766 ParallelComm* pcomm = context.appDatas[*pid].pcomm;
767 rank = pcomm->rank();
768 size = pcomm->size();
769 #endif
770 file_name << prefix << "_" << size << "_" << rank << ".h5m";
771
772 ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
773
774 return moab::MB_SUCCESS;
775 }
776
777 ErrCode iMOAB_UpdateMeshInfo( iMOAB_AppID pid )
778 {
779
780 appData& data = context.appDatas[*pid];
781 EntityHandle fileSet = data.file_set;
782
783
784 data.all_verts.clear();
785 data.primary_elems.clear();
786 data.local_verts.clear();
787 data.owned_verts.clear();
788 data.ghost_vertices.clear();
789 data.owned_elems.clear();
790 data.ghost_elems.clear();
791 data.mat_sets.clear();
792 data.neu_sets.clear();
793 data.diri_sets.clear();
794
795
796 ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval );
797
798
799 data.dimension = 3;
800 rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );
801
802 if( data.primary_elems.empty() )
803 {
804
805 data.dimension = 2;
806 rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );
807
808 if( data.primary_elems.empty() )
809 {
810
811 data.dimension = 1;
812 rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );
813
814 if( data.primary_elems.empty() )
815 {
816
817 data.dimension = 0;
818 }
819 }
820 }
821
822
823 data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
824
825 #ifdef MOAB_HAVE_MPI
826
827 if( context.MPI_initialized )
828 {
829 ParallelComm* pco = context.appDatas[*pid].pcomm;
830
831
832 rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
833
834
835 data.ghost_vertices = subtract( data.all_verts, data.local_verts );
836
837
838 rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
839
840 data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
841
842
843
844 rval = pco->filter_pstatus( data.all_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &data.owned_verts );MB_CHK_ERR( rval );
845 int local[2], global[2];
846 local[0] = data.owned_verts.size();
847 local[1] = data.owned_elems.size();
848 MPI_Allreduce( local, global, 2, MPI_INT, MPI_SUM, pco->comm() );
849 rval = iMOAB_SetGlobalInfo( pid, &( global[0] ), &( global[1] ) );MB_CHK_ERR( rval );
850 }
851 else
852 {
853 data.local_verts = data.all_verts;
854 data.owned_elems = data.primary_elems;
855 }
856
857 #else
858
859 data.local_verts = data.all_verts;
860 data.owned_elems = data.primary_elems;
861
862 #endif
863
864
865 rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
866 data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
867
868 rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
869 data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
870
871 rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
872 data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
873
874 return moab::MB_SUCCESS;
875 }
876
877 ErrCode iMOAB_GetMeshInfo( iMOAB_AppID pid,
878 int* num_visible_vertices,
879 int* num_visible_elements,
880 int* num_visible_blocks,
881 int* num_visible_surfaceBC,
882 int* num_visible_vertexBC )
883 {
884 ErrorCode rval;
885 appData& data = context.appDatas[*pid];
886 EntityHandle fileSet = data.file_set;
887
888
889
890 if( num_visible_elements )
891 {
892 num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
893
894 num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
895 num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
896 }
897 if( num_visible_vertices )
898 {
899 num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
900 num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
901
902 num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
903 }
904
905 if( num_visible_blocks )
906 {
907 rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
908 data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
909
910 num_visible_blocks[2] = data.mat_sets.size();
911 num_visible_blocks[0] = num_visible_blocks[2];
912 num_visible_blocks[1] = 0;
913 }
914
915 if( num_visible_surfaceBC )
916 {
917 rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
918 data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
919
920 num_visible_surfaceBC[2] = 0;
921
922
923 int numNeuSets = (int)data.neu_sets.size();
924
925 for( int i = 0; i < numNeuSets; i++ )
926 {
927 Range subents;
928 EntityHandle nset = data.neu_sets[i];
929 rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
930
931 for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
932 {
933 EntityHandle subent = *it;
934 Range adjPrimaryEnts;
935 rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
936
937 num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
938 }
939 }
940
941 num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
942 num_visible_surfaceBC[1] = 0;
943 }
944
945 if( num_visible_vertexBC )
946 {
947 rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
948 data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
949
950 num_visible_vertexBC[2] = 0;
951 int numDiriSets = (int)data.diri_sets.size();
952
953 for( int i = 0; i < numDiriSets; i++ )
954 {
955 Range verts;
956 EntityHandle diset = data.diri_sets[i];
957 rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
958
959 num_visible_vertexBC[2] += (int)verts.size();
960 }
961
962 num_visible_vertexBC[0] = num_visible_vertexBC[2];
963 num_visible_vertexBC[1] = 0;
964 }
965
966 return moab::MB_SUCCESS;
967 }
968
969 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
970 {
971 IMOAB_CHECKPOINTER( vertices_length, 2 );
972 IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
973
974 const Range& verts = context.appDatas[*pid].all_verts;
975
976 IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
977
978
979 return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
980 }
981
982 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
983 {
984 assert( vertices_length && *vertices_length );
985 assert( visible_global_rank_ID );
986
987 Range& verts = context.appDatas[*pid].all_verts;
988 int i = 0;
989 #ifdef MOAB_HAVE_MPI
990 ParallelComm* pco = context.appDatas[*pid].pcomm;
991
992 for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
993 {
994 ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
995 }
996
997 if( i != *vertices_length )
998 {
999 return moab::MB_FAILURE;
1000 }
1001
1002 #else
1003
1004
1005 if( (int)verts.size() != *vertices_length )
1006 {
1007 return moab::MB_FAILURE;
1008 }
1009
1010 for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
1011 {
1012 visible_global_rank_ID[i] = 0;
1013 }
1014
1015 #endif
1016
1017 return moab::MB_SUCCESS;
1018 }
1019
1020 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
1021 {
1022 Range& verts = context.appDatas[*pid].all_verts;
1023
1024
1025 if( *coords_length != 3 * (int)verts.size() )
1026 {
1027 return moab::MB_FAILURE;
1028 }
1029
1030 ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
1031
1032 return moab::MB_SUCCESS;
1033 }
1034
1035 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
1036 {
1037
1038
1039 Range& matSets = context.appDatas[*pid].mat_sets;
1040
1041 if( *block_length != (int)matSets.size() )
1042 {
1043 return moab::MB_FAILURE;
1044 }
1045
1046
1047 ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
1048
1049
1050 std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
1051 for( unsigned i = 0; i < matSets.size(); i++ )
1052 {
1053 matIdx[global_block_IDs[i]] = i;
1054 }
1055
1056 return moab::MB_SUCCESS;
1057 }
1058
1059 ErrCode iMOAB_GetBlockInfo( iMOAB_AppID pid,
1060 iMOAB_GlobalID* global_block_ID,
1061 int* vertices_per_element,
1062 int* num_elements_in_block )
1063 {
1064 assert( global_block_ID );
1065
1066 std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1067 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1068
1069 if( it == matMap.end() )
1070 {
1071 return moab::MB_FAILURE;
1072 }
1073
1074 int blockIndex = matMap[*global_block_ID];
1075 EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1076 Range blo_elems;
1077 ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
1078
1079 if( MB_SUCCESS != rval || blo_elems.empty() )
1080 {
1081 return moab::MB_FAILURE;
1082 }
1083
1084 EntityType type = context.MBI->type_from_handle( blo_elems[0] );
1085
1086 if( !blo_elems.all_of_type( type ) )
1087 {
1088 return moab::MB_FAILURE;
1089 }
1090
1091 const EntityHandle* conn = nullptr;
1092 int num_verts = 0;
1093 rval = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
1094
1095 *vertices_per_element = num_verts;
1096 *num_elements_in_block = (int)blo_elems.size();
1097
1098 return moab::MB_SUCCESS;
1099 }
1100
1101 ErrCode iMOAB_GetVisibleElementsInfo( iMOAB_AppID pid,
1102 int* num_visible_elements,
1103 iMOAB_GlobalID* element_global_IDs,
1104 int* ranks,
1105 iMOAB_GlobalID* block_IDs )
1106 {
1107 appData& data = context.appDatas[*pid];
1108 #ifdef MOAB_HAVE_MPI
1109 ParallelComm* pco = context.appDatas[*pid].pcomm;
1110 #endif
1111
1112 ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
1113
1114 int i = 0;
1115
1116 for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
1117 {
1118 #ifdef MOAB_HAVE_MPI
1119 rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
1120
1121 #else
1122
1123 ranks[i] = 0;
1124 #endif
1125 }
1126
1127 for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
1128 {
1129 EntityHandle matMeshSet = *mit;
1130 Range elems;
1131 rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1132
1133 int valMatTag;
1134 rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
1135
1136 for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
1137 {
1138 EntityHandle eh = *eit;
1139 int index = data.primary_elems.index( eh );
1140
1141 if( -1 == index )
1142 {
1143 return moab::MB_FAILURE;
1144 }
1145
1146 if( -1 >= *num_visible_elements )
1147 {
1148 return moab::MB_FAILURE;
1149 }
1150
1151 block_IDs[index] = valMatTag;
1152 }
1153 }
1154
1155 return moab::MB_SUCCESS;
1156 }
1157
1158 ErrCode iMOAB_GetBlockElementConnectivities( iMOAB_AppID pid,
1159 iMOAB_GlobalID* global_block_ID,
1160 int* connectivity_length,
1161 int* element_connectivity )
1162 {
1163 assert( global_block_ID );
1164 assert( connectivity_length );
1165
1166 appData& data = context.appDatas[*pid];
1167 std::map< int, int >& matMap = data.matIndex;
1168 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1169
1170 if( it == matMap.end() )
1171 {
1172 return moab::MB_FAILURE;
1173 }
1174
1175 int blockIndex = matMap[*global_block_ID];
1176 EntityHandle matMeshSet = data.mat_sets[blockIndex];
1177 std::vector< EntityHandle > elems;
1178
1179 ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1180
1181 if( elems.empty() )
1182 {
1183 return moab::MB_FAILURE;
1184 }
1185
1186 std::vector< EntityHandle > vconnect;
1187 rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
1188
1189 if( *connectivity_length != (int)vconnect.size() )
1190 {
1191 return moab::MB_FAILURE;
1192 }
1193
1194 for( int i = 0; i < *connectivity_length; i++ )
1195 {
1196 int inx = data.all_verts.index( vconnect[i] );
1197
1198 if( -1 == inx )
1199 {
1200 return moab::MB_FAILURE;
1201 }
1202
1203 element_connectivity[i] = inx;
1204 }
1205
1206 return moab::MB_SUCCESS;
1207 }
1208
1209 ErrCode iMOAB_GetElementConnectivity( iMOAB_AppID pid,
1210 iMOAB_LocalID* elem_index,
1211 int* connectivity_length,
1212 int* element_connectivity )
1213 {
1214 assert( elem_index );
1215 assert( connectivity_length );
1216
1217 appData& data = context.appDatas[*pid];
1218 assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
1219
1220 int num_nodes;
1221 const EntityHandle* conn;
1222
1223 EntityHandle eh = data.primary_elems[*elem_index];
1224
1225 ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
1226
1227 if( *connectivity_length < num_nodes )
1228 {
1229 return moab::MB_FAILURE;
1230 }
1231
1232 for( int i = 0; i < num_nodes; i++ )
1233 {
1234 int index = data.all_verts.index( conn[i] );
1235
1236 if( -1 == index )
1237 {
1238 return moab::MB_FAILURE;
1239 }
1240
1241 element_connectivity[i] = index;
1242 }
1243
1244 *connectivity_length = num_nodes;
1245
1246 return moab::MB_SUCCESS;
1247 }
1248
1249 ErrCode iMOAB_GetElementOwnership( iMOAB_AppID pid,
1250 iMOAB_GlobalID* global_block_ID,
1251 int* num_elements_in_block,
1252 int* element_ownership )
1253 {
1254 assert( global_block_ID );
1255 assert( num_elements_in_block );
1256
1257 std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1258
1259 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1260
1261 if( it == matMap.end() )
1262 {
1263 return moab::MB_FAILURE;
1264 }
1265
1266 int blockIndex = matMap[*global_block_ID];
1267 EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1268 Range elems;
1269
1270 ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1271
1272 if( elems.empty() )
1273 {
1274 return moab::MB_FAILURE;
1275 }
1276
1277 if( *num_elements_in_block != (int)elems.size() )
1278 {
1279 return moab::MB_FAILURE;
1280 }
1281
1282 int i = 0;
1283 #ifdef MOAB_HAVE_MPI
1284 ParallelComm* pco = context.appDatas[*pid].pcomm;
1285 #endif
1286
1287 for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
1288 {
1289 #ifdef MOAB_HAVE_MPI
1290 rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
1291 #else
1292 element_ownership[i] = 0;
1293 #endif
1294 }
1295
1296 return moab::MB_SUCCESS;
1297 }
1298
1299 ErrCode iMOAB_GetElementID( iMOAB_AppID pid,
1300 iMOAB_GlobalID* global_block_ID,
1301 int* num_elements_in_block,
1302 iMOAB_GlobalID* global_element_ID,
1303 iMOAB_LocalID* local_element_ID )
1304 {
1305 assert( global_block_ID );
1306 assert( num_elements_in_block );
1307
1308 appData& data = context.appDatas[*pid];
1309 std::map< int, int >& matMap = data.matIndex;
1310
1311 std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1312
1313 if( it == matMap.end() )
1314 {
1315 return moab::MB_FAILURE;
1316 }
1317
1318 int blockIndex = matMap[*global_block_ID];
1319 EntityHandle matMeshSet = data.mat_sets[blockIndex];
1320 Range elems;
1321 ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
1322
1323 if( elems.empty() )
1324 {
1325 return moab::MB_FAILURE;
1326 }
1327
1328 if( *num_elements_in_block != (int)elems.size() )
1329 {
1330 return moab::MB_FAILURE;
1331 }
1332
1333 rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
1334
1335
1336 for( int i = 0; i < *num_elements_in_block; i++ )
1337 {
1338 local_element_ID[i] = data.primary_elems.index( elems[i] );
1339
1340 if( -1 == local_element_ID[i] )
1341 {
1342 return moab::MB_FAILURE;
1343 }
1344 }
1345
1346 return moab::MB_SUCCESS;
1347 }
1348
1349 ErrCode iMOAB_GetPointerToSurfaceBC( iMOAB_AppID pid,
1350 int* surface_BC_length,
1351 iMOAB_LocalID* local_element_ID,
1352 int* reference_surface_ID,
1353 int* boundary_condition_value )
1354 {
1355
1356 ErrorCode rval;
1357
1358
1359 appData& data = context.appDatas[*pid];
1360 int numNeuSets = (int)data.neu_sets.size();
1361
1362 int index = 0;
1363
1364 for( int i = 0; i < numNeuSets; i++ )
1365 {
1366 Range subents;
1367 EntityHandle nset = data.neu_sets[i];
1368 rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
1369
1370 int neuVal;
1371 rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
1372
1373 for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
1374 {
1375 EntityHandle subent = *it;
1376 Range adjPrimaryEnts;
1377 rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
1378
1379
1380
1381 for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
1382 {
1383 EntityHandle primaryEnt = *pit;
1384
1385
1390
1391
1392 local_element_ID[index] = data.primary_elems.index( primaryEnt );
1393
1394 if( -1 == local_element_ID[index] )
1395 {
1396 return moab::MB_FAILURE;
1397 }
1398
1399 int side_number, sense, offset;
1400 rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
1401
1402 reference_surface_ID[index] = side_number + 1;
1403 boundary_condition_value[index] = neuVal;
1404 index++;
1405 }
1406 }
1407 }
1408
1409 if( index != *surface_BC_length )
1410 {
1411 return moab::MB_FAILURE;
1412 }
1413
1414 return moab::MB_SUCCESS;
1415 }
1416
1417 ErrCode iMOAB_GetPointerToVertexBC( iMOAB_AppID pid,
1418 int* vertex_BC_length,
1419 iMOAB_LocalID* local_vertex_ID,
1420 int* boundary_condition_value )
1421 {
1422 ErrorCode rval;
1423
1424
1425 appData& data = context.appDatas[*pid];
1426 int numDiriSets = (int)data.diri_sets.size();
1427 int index = 0;
1428
1429 for( int i = 0; i < numDiriSets; i++ )
1430 {
1431 Range verts;
1432 EntityHandle diset = data.diri_sets[i];
1433 rval = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
1434
1435 int diriVal;
1436 rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
1437
1438 for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
1439 {
1440 EntityHandle vt = *vit;
1441
1446 local_vertex_ID[index] = data.all_verts.index( vt );
1447
1448 if( -1 == local_vertex_ID[index] )
1449 {
1450 return moab::MB_FAILURE;
1451 }
1452
1453 boundary_condition_value[index] = diriVal;
1454 index++;
1455 }
1456 }
1457
1458 if( *vertex_BC_length != index )
1459 {
1460 return moab::MB_FAILURE;
1461 }
1462
1463 return moab::MB_SUCCESS;
1464 }
1465
1466
1467 static void split_tag_names( std::string input_names,
1468 std::string& separator,
1469 std::vector< std::string >& list_tag_names )
1470 {
1471 size_t pos = 0;
1472 std::string token;
1473 while( ( pos = input_names.find( separator ) ) != std::string::npos )
1474 {
1475 token = input_names.substr( 0, pos );
1476 if( !token.empty() ) list_tag_names.push_back( token );
1477
1478 input_names.erase( 0, pos + separator.length() );
1479 }
1480 if( !input_names.empty() )
1481 {
1482
1483 list_tag_names.push_back( input_names );
1484 }
1485 return;
1486 }
1487
1488 ErrCode iMOAB_DefineTagStorage( iMOAB_AppID pid,
1489 const iMOAB_String tag_storage_name,
1490 int* tag_type,
1491 int* components_per_entity,
1492 int* tag_index )
1493 {
1494
1495
1496 if( *tag_type < 0 || *tag_type > 5 ) return moab::MB_FAILURE;
1497
1498 DataType tagDataType;
1499 TagType tagType;
1500 void* defaultVal = nullptr;
1501 int* defInt = new int[*components_per_entity];
1502 double* defDouble = new double[*components_per_entity];
1503 EntityHandle* defHandle = new EntityHandle[*components_per_entity];
1504
1505 for( int i = 0; i < *components_per_entity; i++ )
1506 {
1507 defInt[i] = 0;
1508 defDouble[i] = -1e+10;
1509 defHandle[i] = static_cast< EntityHandle >( 0 );
1510 }
1511
1512 switch( *tag_type )
1513 {
1514 case 0:
1515 tagDataType = MB_TYPE_INTEGER;
1516 tagType = MB_TAG_DENSE;
1517 defaultVal = defInt;
1518 break;
1519
1520 case 1:
1521 tagDataType = MB_TYPE_DOUBLE;
1522 tagType = MB_TAG_DENSE;
1523 defaultVal = defDouble;
1524 break;
1525
1526 case 2:
1527 tagDataType = MB_TYPE_HANDLE;
1528 tagType = MB_TAG_DENSE;
1529 defaultVal = defHandle;
1530 break;
1531
1532 case 3:
1533 tagDataType = MB_TYPE_INTEGER;
1534 tagType = MB_TAG_SPARSE;
1535 defaultVal = defInt;
1536 break;
1537
1538 case 4:
1539 tagDataType = MB_TYPE_DOUBLE;
1540 tagType = MB_TAG_SPARSE;
1541 defaultVal = defDouble;
1542 break;
1543
1544 case 5:
1545 tagDataType = MB_TYPE_HANDLE;
1546 tagType = MB_TAG_SPARSE;
1547 defaultVal = defHandle;
1548 break;
1549
1550 default: {
1551 delete[] defInt;
1552 delete[] defDouble;
1553 delete[] defHandle;
1554 return moab::MB_FAILURE;
1555 }
1556 }
1557
1558
1559 std::string tag_name( tag_storage_name );
1560
1561
1562 std::vector< std::string > tagNames;
1563 std::string separator( ":" );
1564 split_tag_names( tag_name, separator, tagNames );
1565
1566 ErrorCode rval = moab::MB_SUCCESS;
1567 appData& data = context.appDatas[*pid];
1568 int already_defined_tags = 0;
1569
1570 Tag tagHandle;
1571 for( size_t i = 0; i < tagNames.size(); i++ )
1572 {
1573 rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
1574 tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal );
1575
1576 if( MB_ALREADY_ALLOCATED == rval )
1577 {
1578 std::map< std::string, Tag >& mTags = data.tagMap;
1579 std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
1580
1581 if( mit == mTags.end() )
1582 {
1583
1584 mTags[tagNames[i]] = tagHandle;
1585
1586 *tag_index = (int)data.tagList.size();
1587 data.tagList.push_back( tagHandle );
1588 }
1589 rval = MB_SUCCESS;
1590 already_defined_tags++;
1591 }
1592 else if( MB_SUCCESS == rval )
1593 {
1594 data.tagMap[tagNames[i]] = tagHandle;
1595 *tag_index = (int)data.tagList.size();
1596 data.tagList.push_back( tagHandle );
1597 }
1598 else
1599 {
1600 rval = moab::MB_FAILURE;
1601 }
1602 }
1603
1604 int rankHere = 0;
1605 #ifdef MOAB_HAVE_MPI
1606 ParallelComm* pco = context.appDatas[*pid].pcomm;
1607 rankHere = pco->rank();
1608 #endif
1609 if( !rankHere && already_defined_tags )
1610 std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
1611 << " has " << already_defined_tags << " already defined tags out of " << tagNames.size()
1612 << " tags \n";
1613 delete[] defInt;
1614 delete[] defDouble;
1615 delete[] defHandle;
1616 return rval;
1617 }
1618
1619 ErrCode iMOAB_SetIntTagStorage( iMOAB_AppID pid,
1620 const iMOAB_String tag_storage_name,
1621 int* num_tag_storage_length,
1622 int* ent_type,
1623 int* tag_storage_data )
1624 {
1625 std::string tag_name( tag_storage_name );
1626
1627
1628 appData& data = context.appDatas[*pid];
1629
1630 if( data.tagMap.find( tag_name ) == data.tagMap.end() ) return moab::MB_FAILURE;
1631
1632 Tag tag = data.tagMap[tag_name];
1633
1634 int tagLength = 0;
1635 MB_CHK_ERR( context.MBI->tag_get_length( tag, tagLength ) );
1636
1637 DataType dtype;
1638 MB_CHK_ERR( context.MBI->tag_get_data_type( tag, dtype ) );
1639
1640 if( dtype != MB_TYPE_INTEGER )
1641 {
1642 MB_CHK_SET_ERR( moab::MB_FAILURE, "The tag is not of integer type." );
1643 }
1644
1645
1646
1647 Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1648 int nents_to_be_set = *num_tag_storage_length / tagLength;
1649
1650 if( nents_to_be_set > (int)ents_to_set->size() )
1651 {
1652 return moab::MB_FAILURE;
1653 }
1654
1655
1656 MB_CHK_ERR( context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data ) );
1657
1658 return moab::MB_SUCCESS;
1659 }
1660
1661 ErrCode iMOAB_GetIntTagStorage( iMOAB_AppID pid,
1662 const iMOAB_String tag_storage_name,
1663 int* num_tag_storage_length,
1664 int* ent_type,
1665 int* tag_storage_data )
1666 {
1667 ErrorCode rval;
1668 std::string tag_name( tag_storage_name );
1669
1670 appData& data = context.appDatas[*pid];
1671
1672 if( data.tagMap.find( tag_name ) == data.tagMap.end() )
1673 {
1674 return moab::MB_FAILURE;
1675 }
1676
1677 Tag tag = data.tagMap[tag_name];
1678
1679 int tagLength = 0;
1680 MB_CHK_ERR( context.MBI->tag_get_length( tag, tagLength ) );
1681
1682 DataType dtype;
1683 MB_CHK_ERR( context.MBI->tag_get_data_type( tag, dtype ) );
1684
1685 if( dtype != MB_TYPE_INTEGER )
1686 {
1687 MB_CHK_SET_ERR( moab::MB_FAILURE, "The tag is not of integer type." );
1688 }
1689
1690
1691
1692 Range* ents_to_get = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1693 int nents_to_get = *num_tag_storage_length / tagLength;
1694
1695 if( nents_to_get > (int)ents_to_get->size() )
1696 {
1697 return moab::MB_FAILURE;
1698 }
1699
1700
1701 rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
1702
1703 return moab::MB_SUCCESS;
1704 }
1705
1706 ErrCode iMOAB_SetDoubleTagStorage( iMOAB_AppID pid,
1707 const iMOAB_String tag_storage_names,
1708 int* num_tag_storage_length,
1709 int* ent_type,
1710 double* tag_storage_data )
1711 {
1712 ErrorCode rval;
1713 std::string tag_names( tag_storage_names );
1714
1715 std::vector< std::string > tagNames;
1716 std::vector< Tag > tagHandles;
1717 std::string separator( ":" );
1718 split_tag_names( tag_names, separator, tagNames );
1719
1720 appData& data = context.appDatas[*pid];
1721
1722
1723 Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1724
1725 int nents_to_be_set = (int)( *ents_to_set ).size();
1726 int position = 0;
1727 for( size_t i = 0; i < tagNames.size(); i++ )
1728 {
1729 if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1730 {
1731 return moab::MB_FAILURE;
1732 }
1733
1734 Tag tag = data.tagMap[tagNames[i]];
1735
1736 int tagLength = 0;
1737 rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1738
1739 DataType dtype;
1740 rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1741
1742 if( dtype != MB_TYPE_DOUBLE )
1743 {
1744 return moab::MB_FAILURE;
1745 }
1746
1747
1748 if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
1749 return moab::MB_FAILURE;
1750
1751 rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
1752
1753 position = position + tagLength * nents_to_be_set;
1754 }
1755 return moab::MB_SUCCESS;
1756 }
1757
1758 ErrCode iMOAB_SetDoubleTagStorageWithGid( iMOAB_AppID pid,
1759 const iMOAB_String tag_storage_names,
1760 int* num_tag_storage_length,
1761 int* ent_type,
1762 double* tag_storage_data,
1763 int* globalIds )
1764 {
1765 ErrorCode rval;
1766 std::string tag_names( tag_storage_names );
1767
1768 std::vector< std::string > tagNames;
1769 std::vector< Tag > tagHandles;
1770 std::string separator( ":" );
1771 split_tag_names( tag_names, separator, tagNames );
1772
1773 appData& data = context.appDatas[*pid];
1774
1775
1776 Range* ents_to_set = ( *ent_type == 0 ? &data.all_verts : &data.primary_elems );
1777 int nents_to_be_set = (int)( *ents_to_set ).size();
1778
1779 Tag gidTag = context.MBI->globalId_tag();
1780 std::vector< int > gids;
1781 gids.resize( nents_to_be_set );
1782 rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval );
1783
1784
1785
1786
1787 std::map< int, EntityHandle > eh_by_gid;
1788 int i = 0;
1789 for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i )
1790 {
1791 eh_by_gid[gids[i]] = *it;
1792 }
1793
1794 std::vector< int > tagLengths( tagNames.size() );
1795 std::vector< Tag > tagList;
1796 size_t total_tag_len = 0;
1797 for( size_t i = 0; i < tagNames.size(); i++ )
1798 {
1799 if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
1800 {
1801 MB_SET_ERR( moab::MB_FAILURE, "tag missing" );
1802 }
1803
1804 Tag tag = data.tagMap[tagNames[i]];
1805 tagList.push_back( tag );
1806
1807 int tagLength = 0;
1808 rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
1809
1810 total_tag_len += tagLength;
1811 tagLengths[i] = tagLength;
1812 DataType dtype;
1813 rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
1814
1815 if( dtype != MB_TYPE_DOUBLE )
1816 {
1817 MB_SET_ERR( moab::MB_FAILURE, "tag not double type" );
1818 }
1819 }
1820 bool serial = true;
1821 #ifdef MOAB_HAVE_MPI
1822 ParallelComm* pco = context.appDatas[*pid].pcomm;
1823 unsigned num_procs = pco->size();
1824 if( num_procs > 1 ) serial = false;
1825 #endif
1826
1827 if( serial )
1828 {
1829
1830
1831
1832
1833 for( int i = 0; i < nents_to_be_set; i++ )
1834 {
1835 int gid = globalIds[i];
1836 std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1837 if( mapIt == eh_by_gid.end() ) continue;
1838 EntityHandle eh = mapIt->second;
1839
1840 int indexInTagValues = 0;
1841 for( size_t j = 0; j < tagList.size(); j++ )
1842 {
1843 indexInTagValues += i * tagLengths[j];
1844 rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval );
1845
1846 indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j];
1847 }
1848 }
1849 }
1850 #ifdef MOAB_HAVE_MPI
1851 else
1852 {
1853
1854
1855
1856
1857 int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() );
1858
1859
1860 TupleList TLsend;
1861 TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals );
1862 TLsend.enableWriteAccess();
1863
1864
1865 int indexInRealLocal = 0;
1866 for( int i = 0; i < nbLocalVals; i++ )
1867 {
1868
1869 int marker = globalIds[i];
1870 int to_proc = marker % num_procs;
1871 int n = TLsend.get_n();
1872 TLsend.vi_wr[2 * n] = to_proc;
1873 TLsend.vi_wr[2 * n + 1] = marker;
1874 int indexInTagValues = 0;
1875
1876 for( size_t j = 0; j < tagList.size(); j++ )
1877 {
1878 indexInTagValues += i * tagLengths[j];
1879 for( int k = 0; k < tagLengths[j]; k++ )
1880 {
1881 TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
1882 }
1883 indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
1884 }
1885 TLsend.inc_n();
1886 }
1887
1888
1889
1890
1891 ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 );
1892 TupleList TLreq;
1893 TLreq.initialize( 2, 0, 0, 0, nents_to_be_set );
1894 TLreq.enableWriteAccess();
1895 for( int i = 0; i < nents_to_be_set; i++ )
1896 {
1897
1898 int marker = gids[i];
1899 int to_proc = marker % num_procs;
1900 int n = TLreq.get_n();
1901 TLreq.vi_wr[2 * n] = to_proc;
1902 TLreq.vi_wr[2 * n + 1] = marker;
1903
1904 TLreq.inc_n();
1905 }
1906 ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
1907
1908
1909
1910
1911
1912 moab::TupleList::buffer sort_buffer;
1913 sort_buffer.buffer_init( TLreq.get_n() );
1914 TLreq.sort( 1, &sort_buffer );
1915 sort_buffer.reset();
1916 sort_buffer.buffer_init( TLsend.get_n() );
1917 TLsend.sort( 1, &sort_buffer );
1918 sort_buffer.reset();
1919
1920
1921
1922
1923
1924
1925 TupleList TLBack;
1926 TLBack.initialize( 3, 0, 0, total_tag_len, 0 );
1927 TLBack.enableWriteAccess();
1928
1929 int n1 = TLreq.get_n();
1930 int n2 = TLsend.get_n();
1931
1932 int indexInTLreq = 0;
1933 int indexInTLsend = 0;
1934 if( n1 > 0 && n2 > 0 )
1935 {
1936
1937 while( indexInTLreq < n1 && indexInTLsend < n2 )
1938 {
1939 int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1];
1940 int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1];
1941 if( currentValue1 < currentValue2 )
1942 {
1943
1944
1945
1946 indexInTLreq++;
1947 continue;
1948 }
1949 if( currentValue1 > currentValue2 )
1950 {
1951
1952 indexInTLsend++;
1953 continue;
1954 }
1955 int size1 = 1;
1956 int size2 = 1;
1957 while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
1958 size1++;
1959 while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
1960 size2++;
1961
1962 for( int i1 = 0; i1 < size1; i1++ )
1963 {
1964 for( int i2 = 0; i2 < size2; i2++ )
1965 {
1966
1967 int n = TLBack.get_n();
1968 TLBack.reserve();
1969 TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )];
1970
1971 TLBack.vi_wr[3 * n + 1] = currentValue1;
1972 TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )];
1973
1974 for( size_t k = 0; k < total_tag_len; k++ )
1975 {
1976 TLBack.vr_rd[total_tag_len * n + k] =
1977 TLsend.vr_rd[total_tag_len * indexInTLsend + k];
1978 }
1979 }
1980 }
1981 indexInTLreq += size1;
1982 indexInTLsend += size2;
1983 }
1984 }
1985 ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
1986
1987
1988 n1 = TLBack.get_n();
1989 double* ptrVal = &TLBack.vr_rd[0];
1990 for( int i = 0; i < n1; i++ )
1991 {
1992 int gid = TLBack.vi_rd[3 * i + 1];
1993 std::map< int, EntityHandle >::iterator mapIt = eh_by_gid.find( gid );
1994 if( mapIt == eh_by_gid.end() ) continue;
1995 EntityHandle eh = mapIt->second;
1996
1997
1998 for( size_t j = 0; j < tagList.size(); j++ )
1999 {
2000 rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval );
2001
2002 ptrVal += tagLengths[j];
2003 }
2004 }
2005 }
2006 #endif
2007 return MB_SUCCESS;
2008 }
2009 ErrCode iMOAB_GetDoubleTagStorage( iMOAB_AppID pid,
2010 const iMOAB_String tag_storage_names,
2011 int* num_tag_storage_length,
2012 int* ent_type,
2013 double* tag_storage_data )
2014 {
2015 ErrorCode rval;
2016
2017 std::string tag_names( tag_storage_names );
2018
2019 std::vector< std::string > tagNames;
2020 std::vector< Tag > tagHandles;
2021 std::string separator( ":" );
2022 split_tag_names( tag_names, separator, tagNames );
2023
2024 appData& data = context.appDatas[*pid];
2025
2026
2027 Range* ents_to_get = nullptr;
2028
2029 if( *ent_type == 0 )
2030 {
2031 ents_to_get = &data.all_verts;
2032 }
2033 else if( *ent_type == 1 )
2034 {
2035 ents_to_get = &data.primary_elems;
2036 }
2037 int nents_to_get = (int)ents_to_get->size();
2038 int position = 0;
2039 for( size_t i = 0; i < tagNames.size(); i++ )
2040 {
2041 if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
2042 {
2043 return moab::MB_FAILURE;
2044 }
2045
2046 Tag tag = data.tagMap[tagNames[i]];
2047
2048 int tagLength = 0;
2049 rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
2050
2051 DataType dtype;
2052 rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
2053
2054 if( dtype != MB_TYPE_DOUBLE )
2055 {
2056 return moab::MB_FAILURE;
2057 }
2058
2059 if( position + nents_to_get * tagLength > *num_tag_storage_length )
2060 return moab::MB_FAILURE;
2061
2062 rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
2063 position = position + nents_to_get * tagLength;
2064 }
2065
2066 return moab::MB_SUCCESS;
2067 }
2068
2069 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
2070 {
2071 #ifdef MOAB_HAVE_MPI
2072 appData& data = context.appDatas[*pid];
2073 Range ent_exchange;
2074 std::vector< Tag > tags;
2075
2076 for( int i = 0; i < *num_tag; i++ )
2077 {
2078 if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
2079 {
2080 return moab::MB_FAILURE;
2081 }
2082
2083 tags.push_back( data.tagList[tag_indices[i]] );
2084 }
2085
2086 if( *ent_type == 0 )
2087 {
2088 ent_exchange = data.all_verts;
2089 }
2090 else if( *ent_type == 1 )
2091 {
2092 ent_exchange = data.primary_elems;
2093 }
2094 else
2095 {
2096 return moab::MB_FAILURE;
2097 }
2098
2099 ParallelComm* pco = context.appDatas[*pid].pcomm;
2100
2101 ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
2102
2103 #else
2104
2105
2106
2107 int k = *pid + *num_tag + *tag_indices + *ent_type;
2108 k++;
2109 #endif
2110
2111 return moab::MB_SUCCESS;
2112 }
2113
2114 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
2115 {
2116 #ifdef MOAB_HAVE_MPI
2117 appData& data = context.appDatas[*pid];
2118 Range ent_exchange;
2119
2120 if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
2121 {
2122 return moab::MB_FAILURE;
2123 }
2124
2125 Tag tagh = data.tagList[*tag_index];
2126
2127 if( *ent_type == 0 )
2128 {
2129 ent_exchange = data.all_verts;
2130 }
2131 else if( *ent_type == 1 )
2132 {
2133 ent_exchange = data.primary_elems;
2134 }
2135 else
2136 {
2137 return moab::MB_FAILURE;
2138 }
2139
2140 ParallelComm* pco = context.appDatas[*pid].pcomm;
2141
2142 ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
2143
2144 #else
2145
2146
2147
2148 int k = *pid + *tag_index + *ent_type;
2149 k++;
2150 #endif
2151 return moab::MB_SUCCESS;
2152 }
2153
2154 ErrCode iMOAB_GetNeighborElements( iMOAB_AppID pid,
2155 iMOAB_LocalID* local_index,
2156 int* num_adjacent_elements,
2157 iMOAB_LocalID* adjacent_element_IDs )
2158 {
2159 ErrorCode rval;
2160
2161
2162 MeshTopoUtil mtu( context.MBI );
2163 appData& data = context.appDatas[*pid];
2164 EntityHandle eh = data.primary_elems[*local_index];
2165 Range adjs;
2166 rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
2167
2168 if( *num_adjacent_elements < (int)adjs.size() )
2169 {
2170 return moab::MB_FAILURE;
2171 }
2172
2173 *num_adjacent_elements = (int)adjs.size();
2174
2175 for( int i = 0; i < *num_adjacent_elements; i++ )
2176 {
2177 adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
2178 }
2179
2180 return moab::MB_SUCCESS;
2181 }
2182
2183 #if 0
2184
2185 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
2186 {
2187 return moab::MB_SUCCESS;
2188 }
2189
2190 #endif
2191
2192 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
2193 {
2194 ErrorCode rval;
2195 appData& data = context.appDatas[*pid];
2196
2197 if( !data.local_verts.empty() )
2198 {
2199 return moab::MB_FAILURE;
2200 }
2201
2202 int nverts = *coords_len / *dim;
2203
2204 rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
2205
2206 rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
2207
2208
2209 data.all_verts.merge( data.local_verts );
2210 return moab::MB_SUCCESS;
2211 }
2212
2213 ErrCode iMOAB_CreateElements( iMOAB_AppID pid,
2214 int* num_elem,
2215 int* type,
2216 int* num_nodes_per_element,
2217 int* connectivity,
2218 int* block_ID )
2219 {
2220
2221 appData& data = context.appDatas[*pid];
2222
2223 ReadUtilIface* read_iface;
2224 ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
2225
2226 EntityType mbtype = (EntityType)( *type );
2227 EntityHandle actual_start_handle;
2228 EntityHandle* array = nullptr;
2229 rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
2230
2231
2232
2233 EntityHandle firstVertex = data.local_verts[0];
2234
2235 for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
2236 {
2237 array[j] = connectivity[j] + firstVertex - 1;
2238 }
2239
2240 Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
2241
2242 rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
2243
2244 data.primary_elems.merge( new_elems );
2245
2246
2247 rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
2248
2249
2250 Range sets;
2251 int set_no = *block_ID;
2252 const void* setno_ptr = &set_no;
2253 rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &context.material_tag, &setno_ptr, 1,
2254 sets );
2255 EntityHandle block_set;
2256
2257 if( MB_FAILURE == rval || sets.empty() )
2258 {
2259
2260 rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
2261
2262 rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
2263
2264
2265 rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
2266 }
2267 else
2268 {
2269 block_set = sets[0];
2270 }
2271
2272
2273 rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
2274
2275 return moab::MB_SUCCESS;
2276 }
2277
2278 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
2279 {
2280 appData& data = context.appDatas[*pid];
2281 data.num_global_vertices = *num_global_verts;
2282 data.num_global_elements = *num_global_elems;
2283 return moab::MB_SUCCESS;
2284 }
2285
2286 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
2287 {
2288 appData& data = context.appDatas[*pid];
2289 if( nullptr != num_global_verts )
2290 {
2291 *num_global_verts = data.num_global_vertices;
2292 }
2293 if( nullptr != num_global_elems )
2294 {
2295 *num_global_elems = data.num_global_elements;
2296 }
2297
2298 return moab::MB_SUCCESS;
2299 }
2300
2301 #ifdef MOAB_HAVE_MPI
2302
2303
2304 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
2305 {
2306 appData& data = context.appDatas[*pid];
2307 ParallelComm* pco = context.appDatas[*pid].pcomm;
2308 EntityHandle cset = data.file_set;
2309 int dum_id = 0;
2310 ErrorCode rval;
2311 if( data.primary_elems.empty() )
2312 {
2313
2314
2315 }
2316 else
2317 {
2318
2319
2320
2321 Tag stag;
2322 rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
2323 &dum_id );MB_CHK_ERR( rval );
2324
2325 if( *num_verts > (int)data.local_verts.size() )
2326 {
2327 return moab::MB_FAILURE;
2328 }
2329
2330 rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval );
2331
2332 rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
2333
2334 rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
2335 }
2336
2337 Tag part_tag;
2338 dum_id = -1;
2339 rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2340 MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2341
2342 if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2343 {
2344 std::cout << " can't get par part tag.\n";
2345 return moab::MB_FAILURE;
2346 }
2347
2348 int rank = pco->rank();
2349 rval = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
2350
2351 return moab::MB_SUCCESS;
2352 }
2353
2354
2355 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
2356 {
2357 ErrorCode rval;
2358
2359
2360 if( num_ghost_layers && *num_ghost_layers <= 0 )
2361 {
2362 return moab::MB_SUCCESS;
2363 }
2364
2365 appData& data = context.appDatas[*pid];
2366 ParallelComm* pco = context.appDatas[*pid].pcomm;
2367
2368
2369 constexpr int addl_ents = 0;
2370 rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, 1,
2371 addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2372 for( int i = 2; i <= *num_ghost_layers; i++ )
2373 {
2374 rval = pco->correct_thin_ghost_layers();MB_CHK_ERR( rval );
2375 rval = pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, i,
2376 addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
2377 }
2378
2379
2380 data.num_ghost_layers = *num_ghost_layers;
2381
2382
2383 return iMOAB_UpdateMeshInfo( pid );
2384 }
2385
2386 ErrCode iMOAB_SendMesh( iMOAB_AppID pid,
2387 MPI_Comm* joint_communicator,
2388 MPI_Group* receivingGroup,
2389 int* rcompid,
2390 int* method )
2391 {
2392 assert( joint_communicator != nullptr );
2393 assert( receivingGroup != nullptr );
2394 assert( rcompid != nullptr );
2395
2396 ErrorCode rval;
2397 int ierr;
2398 appData& data = context.appDatas[*pid];
2399 ParallelComm* pco = context.appDatas[*pid].pcomm;
2400
2401 MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2402 : *joint_communicator );
2403 MPI_Group recvGroup =
2404 ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
2405 MPI_Comm sender = pco->comm();
2406
2407 MPI_Group senderGroup;
2408 ierr = MPI_Comm_group( sender, &senderGroup );
2409 if( ierr != 0 ) return moab::MB_FAILURE;
2410
2411
2412
2413
2414 ParCommGraph* cgraph =
2415 new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
2416
2417
2418 context.appDatas[*pid].pgraph[*rcompid] = cgraph;
2419
2420 int sender_rank = -1;
2421 MPI_Comm_rank( sender, &sender_rank );
2422
2423
2424
2425
2426 std::vector< int > number_elems_per_part;
2427
2428
2429 Range owned = context.appDatas[*pid].owned_elems;
2430 if( owned.size() == 0 )
2431 {
2432
2433 owned = context.appDatas[*pid].local_verts;
2434
2435 }
2436
2437 if( *method == 0 )
2438 {
2439 int local_owned_elem = (int)owned.size();
2440 int size = pco->size();
2441 int rank = pco->rank();
2442 number_elems_per_part.resize( size );
2443 number_elems_per_part[rank] = local_owned_elem;
2444 #if( MPI_VERSION >= 2 )
2445
2446 ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
2447 #else
2448 {
2449 std::vector< int > all_tmp( size );
2450 ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
2451 number_elems_per_part = all_tmp;
2452 }
2453 #endif
2454
2455 if( ierr != 0 )
2456 {
2457 return moab::MB_FAILURE;
2458 }
2459
2460
2461
2462 rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
2463
2464 rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
2465 }
2466 else
2467 {
2468
2469 rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
2470
2471
2472 rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
2473 }
2474
2475 rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
2476
2477
2478 MPI_Group_free( &senderGroup );
2479 return moab::MB_SUCCESS;
2480 }
2481
2482 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* joint_communicator, MPI_Group* sendingGroup, int* scompid )
2483 {
2484 assert( joint_communicator != nullptr );
2485 assert( sendingGroup != nullptr );
2486 assert( scompid != nullptr );
2487
2488 ErrorCode rval;
2489 appData& data = context.appDatas[*pid];
2490 ParallelComm* pco = context.appDatas[*pid].pcomm;
2491 MPI_Comm receive = pco->comm();
2492 EntityHandle local_set = data.file_set;
2493
2494 MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2495 : *joint_communicator );
2496 MPI_Group sendGroup =
2497 ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
2498
2499
2500
2501 MPI_Group receiverGroup;
2502 int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
2503
2504
2505 ParCommGraph* cgraph =
2506 new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
2507
2508
2509 context.appDatas[*pid].pgraph[*scompid] = cgraph;
2510
2511 int receiver_rank = -1;
2512 MPI_Comm_rank( receive, &receiver_rank );
2513
2514
2515
2516 std::vector< int > pack_array;
2517 rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
2518
2519
2520 int current_receiver = cgraph->receiver( receiver_rank );
2521
2522 std::vector< int > senders_local;
2523 size_t n = 0;
2524
2525 while( n < pack_array.size() )
2526 {
2527 if( current_receiver == pack_array[n] )
2528 {
2529 for( int j = 0; j < pack_array[n + 1]; j++ )
2530 {
2531 senders_local.push_back( pack_array[n + 2 + j] );
2532 }
2533
2534 break;
2535 }
2536
2537 n = n + 2 + pack_array[n + 1];
2538 }
2539
2540 #ifdef VERBOSE
2541 std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
2542 << senders_local.size() << " tasks: ";
2543
2544 for( int k = 0; k < (int)senders_local.size(); k++ )
2545 {
2546 std::cout << " " << senders_local[k];
2547 }
2548
2549 std::cout << "\n";
2550 #endif
2551
2552 if( senders_local.empty() )
2553 {
2554 std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
2555 }
2556 rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
2557
2558
2559
2560 Tag idtag;
2561 rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
2562
2563
2564 Range local_ents;
2565 rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
2566
2567
2568 if( !local_ents.all_of_type( MBVERTEX ) )
2569 {
2570 if( (int)senders_local.size() >= 2 )
2571
2572 {
2573
2574 Range local_verts = local_ents.subset_by_type( MBVERTEX );
2575 Range local_elems = subtract( local_ents, local_verts );
2576
2577
2578 rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
2579
2580 #ifdef VERBOSE
2581 std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
2582 #endif
2583 MergeMesh mm( context.MBI );
2584
2585 rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
2586
2587 Range new_verts;
2588 rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
2589
2590 #ifdef VERBOSE
2591 std::cout << "after merging: new verts: " << new_verts.size() << "\n";
2592 #endif
2593 rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
2594 }
2595 }
2596 else
2597 data.point_cloud = true;
2598
2599 if( !data.point_cloud )
2600 {
2601
2602 rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
2603 }
2604 else
2605 {
2606
2607 Tag densePartTag;
2608 rval = context.MBI->tag_get_handle( "partition", densePartTag );
2609 if( nullptr != densePartTag && MB_SUCCESS == rval )
2610 {
2611 Range local_verts;
2612 rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
2613 std::vector< int > vals;
2614 int rank = pco->rank();
2615 vals.resize( local_verts.size(), rank );
2616 rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
2617 }
2618 }
2619
2620 Tag part_tag;
2621 int dum_id = -1;
2622 rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2623 MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2624
2625 if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
2626 {
2627 std::cout << " can't get par part tag.\n";
2628 return moab::MB_FAILURE;
2629 }
2630
2631 int rank = pco->rank();
2632 rval = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
2633
2634
2635 int tagtype = 0;
2636 int numco = 1;
2637 int tagindex;
2638 rval = iMOAB_DefineTagStorage( pid, "GLOBAL_ID", &tagtype, &numco, &tagindex );MB_CHK_ERR( rval );
2639
2640 rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
2641
2642
2643 MPI_Group_free( &receiverGroup );
2644
2645 return moab::MB_SUCCESS;
2646 }
2647
2648 ErrCode iMOAB_SendElementTag( iMOAB_AppID pid,
2649 const iMOAB_String tag_storage_name,
2650 MPI_Comm* joint_communicator,
2651 int* context_id )
2652 {
2653 appData& data = context.appDatas[*pid];
2654 std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2655 if( mt == data.pgraph.end() )
2656 {
2657 std::cout << " no par com graph for context_id:" << *context_id << " available contexts:";
2658 for( auto mit = data.pgraph.begin(); mit != data.pgraph.end(); mit++ )
2659 std::cout << " " << mit->first;
2660 std::cout << "\n";
2661 return moab::MB_FAILURE;
2662 }
2663
2664 ParCommGraph* cgraph = mt->second;
2665 ParallelComm* pco = context.appDatas[*pid].pcomm;
2666 MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2667 : *joint_communicator );
2668 Range owned = ( data.point_cloud ? data.local_verts : data.owned_elems );
2669
2670 #ifdef MOAB_HAVE_TEMPESTREMAP
2671 if( data.tempestData.remapper != nullptr )
2672 {
2673 EntityHandle cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2674 MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2675 }
2676 #else
2677
2678
2679
2680
2681
2682 EntityHandle cover_set = cgraph->get_cover_set();
2683 if( 0 != cover_set ) MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2684 #endif
2685
2686 std::string tag_name( tag_storage_name );
2687
2688
2689
2690
2691 std::vector< std::string > tagNames;
2692 std::vector< Tag > tagHandles;
2693 std::string separator( ":" );
2694 split_tag_names( tag_name, separator, tagNames );
2695 for( size_t i = 0; i < tagNames.size(); i++ )
2696 {
2697 Tag tagHandle;
2698 ErrorCode rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2699 if( MB_SUCCESS != rval || nullptr == tagHandle )
2700 {
2701 MB_CHK_SET_ERR( moab::MB_FAILURE,
2702 "can't get tag handle with name: " << tagNames[i].c_str() << " at index " << i );
2703 }
2704 tagHandles.push_back( tagHandle );
2705 }
2706
2707
2708
2709 MB_CHK_ERR( cgraph->send_tag_values( global, pco, owned, tagHandles ) );
2710
2711
2712 return moab::MB_SUCCESS;
2713 }
2714
2715 ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid,
2716 const iMOAB_String tag_storage_name,
2717 MPI_Comm* joint_communicator,
2718 int* context_id )
2719 {
2720 appData& data = context.appDatas[*pid];
2721 std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2722 if( mt == data.pgraph.end() ) return moab::MB_FAILURE;
2723
2724 ParCommGraph* cgraph = mt->second;
2725 ParallelComm* pco = context.appDatas[*pid].pcomm;
2726 MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
2727 : *joint_communicator );
2728 Range owned = ( data.point_cloud ? data.local_verts : data.owned_elems );
2729
2730
2731
2732
2733
2734 EntityHandle cover_set = cgraph->get_cover_set();
2735 if( 0 != cover_set ) MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2736
2737
2738
2739
2740 #ifdef MOAB_HAVE_TEMPESTREMAP
2741 if( data.tempestData.remapper != nullptr )
2742 {
2743 cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2744 MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, owned ) );
2745 }
2746 #endif
2747
2748 std::string tag_name( tag_storage_name );
2749
2750 std::vector< std::string > tagNames;
2751 std::vector< Tag > tagHandles;
2752 std::string separator( ":" );
2753 split_tag_names( tag_name, separator, tagNames );
2754 for( size_t i = 0; i < tagNames.size(); i++ )
2755 {
2756 Tag tagHandle;
2757 ErrorCode rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2758 if( MB_SUCCESS != rval || nullptr == tagHandle )
2759 {
2760 MB_CHK_SET_ERR( moab::MB_FAILURE,
2761 " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i );
2762 }
2763 tagHandles.push_back( tagHandle );
2764 }
2765
2766 #ifdef VERBOSE
2767 std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
2768 << " and file set = " << ( data.file_set ) << "\n";
2769 #endif
2770
2771
2772 MB_CHK_SET_ERR( cgraph->receive_tag_values( global, pco, owned, tagHandles ), "failed to receive tag values" );
2773
2774 #ifdef VERBOSE
2775 std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
2776 #endif
2777
2778 return moab::MB_SUCCESS;
2779 }
2780
2781 ErrCode iMOAB_FreeSenderBuffers( iMOAB_AppID pid, int* context_id )
2782 {
2783
2784
2785 appData& data = context.appDatas[*pid];
2786 std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2787 if( mt == data.pgraph.end() ) return moab::MB_FAILURE;
2788
2789 mt->second->release_send_buffers();
2790 return moab::MB_SUCCESS;
2791 }
2792
2793
2794 ErrCode iMOAB_ComputeCommGraph( iMOAB_AppID pid1,
2795 iMOAB_AppID pid2,
2796 MPI_Comm* joint_communicator,
2797 MPI_Group* group1,
2798 MPI_Group* group2,
2799 int* type1,
2800 int* type2,
2801 int* comp1,
2802 int* comp2 )
2803 {
2804 assert( joint_communicator );
2805 assert( group1 );
2806 assert( group2 );
2807 ErrorCode rval = MB_SUCCESS;
2808 int localRank = 0, numProcs = 1;
2809
2810 bool isFortran = false;
2811 if( *pid1 >= 0 ) isFortran = isFortran || context.appDatas[*pid1].is_fortran;
2812 if( *pid2 >= 0 ) isFortran = isFortran || context.appDatas[*pid2].is_fortran;
2813
2814 MPI_Comm global =
2815 ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) ) : *joint_communicator );
2816 MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
2817 MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
2818
2819 MPI_Comm_rank( global, &localRank );
2820 MPI_Comm_size( global, &numProcs );
2821
2822
2823
2824
2825 ParCommGraph* cgraph = nullptr;
2826 ParCommGraph* cgraph_rev = nullptr;
2827 if( *pid1 >= 0 )
2828 {
2829 appData& data = context.appDatas[*pid1];
2830 auto mt = data.pgraph.find( *comp2 );
2831 if( mt != data.pgraph.end() ) data.pgraph.erase( mt );
2832
2833 cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
2834 context.appDatas[*pid1].pgraph[*comp2] = cgraph;
2835 }
2836 if( *pid2 >= 0 )
2837 {
2838 appData& data = context.appDatas[*pid2];
2839 auto mt = data.pgraph.find( *comp1 );
2840 if( mt != data.pgraph.end() ) data.pgraph.erase( mt );
2841
2842 cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
2843 context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev;
2844 }
2845
2846
2847
2848 TupleList TLcomp1;
2849 TLcomp1.initialize( 2, 0, 0, 0, 0 );
2850 TupleList TLcomp2;
2851 TLcomp2.initialize( 2, 0, 0, 0, 0 );
2852
2853
2854 TLcomp1.enableWriteAccess();
2855
2856
2857 Tag gdsTag;
2858
2859 int lenTagType1 = 1;
2860 if( 1 == *type1 || 1 == *type2 )
2861 {
2862 rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
2863 rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );
2864 }
2865 Tag gidTag = context.MBI->globalId_tag();
2866
2867 std::vector< int > valuesComp1;
2868
2869 if( *pid1 >= 0 )
2870 {
2871 appData& data1 = context.appDatas[*pid1];
2872 EntityHandle fset1 = data1.file_set;
2873
2874 #ifdef MOAB_HAVE_TEMPESTREMAP
2875 if( data1.tempestData.remapper != nullptr )
2876 fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2877 #endif
2878 Range ents_of_interest;
2879 if( *type1 == 1 )
2880 {
2881 assert( gdsTag );
2882 rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2883 valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
2884 rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
2885 }
2886 else if( *type1 == 2 )
2887 {
2888 rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2889 valuesComp1.resize( ents_of_interest.size() );
2890 rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
2891 }
2892 else if( *type1 == 3 )
2893 {
2894 rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
2895 valuesComp1.resize( ents_of_interest.size() );
2896 rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
2897 }
2898 else
2899 {
2900 MB_CHK_ERR( MB_FAILURE );
2901 }
2902
2903
2904 std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2905 TLcomp1.resize( uniq.size() );
2906 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2907 {
2908
2909 int marker = *sit;
2910 int to_proc = marker % numProcs;
2911 int n = TLcomp1.get_n();
2912 TLcomp1.vi_wr[2 * n] = to_proc;
2913 TLcomp1.vi_wr[2 * n + 1] = marker;
2914 TLcomp1.inc_n();
2915 }
2916 }
2917
2918 ProcConfig pc( global );
2919 pc.crystal_router()->gs_transfer( 1, TLcomp1,
2920 0 );
2921
2922 #ifdef VERBOSE
2923 std::stringstream ff1;
2924 ff1 << "TLcomp1_" << localRank << ".txt";
2925 TLcomp1.print_to_file( ff1.str().c_str() );
2926 #endif
2927 moab::TupleList::buffer sort_buffer;
2928 sort_buffer.buffer_init( TLcomp1.get_n() );
2929 TLcomp1.sort( 1, &sort_buffer );
2930 sort_buffer.reset();
2931 #ifdef VERBOSE
2932
2933 TLcomp1.print_to_file( ff1.str().c_str() );
2934 #endif
2935
2936
2937 TLcomp2.enableWriteAccess();
2938
2939 std::vector< int > valuesComp2;
2940 if( *pid2 >= 0 )
2941 {
2942 appData& data2 = context.appDatas[*pid2];
2943 EntityHandle fset2 = data2.file_set;
2944
2945 #ifdef MOAB_HAVE_TEMPESTREMAP
2946 if( data2.tempestData.remapper != nullptr )
2947 fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2948 #endif
2949
2950 Range ents_of_interest;
2951 if( *type2 == 1 )
2952 {
2953 assert( gdsTag );
2954 rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
2955 valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
2956 rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
2957 }
2958 else if( *type2 == 2 )
2959 {
2960 rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
2961 valuesComp2.resize( ents_of_interest.size() );
2962 rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
2963 }
2964 else if( *type2 == 3 )
2965 {
2966 rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
2967 valuesComp2.resize( ents_of_interest.size() );
2968 rval = context.MBI->tag_get_data( gidTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
2969 }
2970 else
2971 {
2972 MB_CHK_ERR( MB_FAILURE );
2973 }
2974
2975 std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
2976 TLcomp2.resize( uniq.size() );
2977 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
2978 {
2979
2980 int marker = *sit;
2981 int to_proc = marker % numProcs;
2982 int n = TLcomp2.get_n();
2983 TLcomp2.vi_wr[2 * n] = to_proc;
2984 TLcomp2.vi_wr[2 * n + 1] = marker;
2985 TLcomp2.inc_n();
2986 }
2987 }
2988 pc.crystal_router()->gs_transfer( 1, TLcomp2,
2989 0 );
2990
2991 #ifdef VERBOSE
2992 std::stringstream ff2;
2993 ff2 << "TLcomp2_" << localRank << ".txt";
2994 TLcomp2.print_to_file( ff2.str().c_str() );
2995 #endif
2996 sort_buffer.buffer_reserve( TLcomp2.get_n() );
2997 TLcomp2.sort( 1, &sort_buffer );
2998 sort_buffer.reset();
2999
3000 #ifdef VERBOSE
3001 TLcomp2.print_to_file( ff2.str().c_str() );
3002 #endif
3003
3004
3011
3012 TupleList TLBackToComp1;
3013 TLBackToComp1.initialize( 3, 0, 0, 0, 0 );
3014 TLBackToComp1.enableWriteAccess();
3015
3016 TupleList TLBackToComp2;
3017 TLBackToComp2.initialize( 3, 0, 0, 0, 0 );
3018 TLBackToComp2.enableWriteAccess();
3019
3020 int n1 = TLcomp1.get_n();
3021 int n2 = TLcomp2.get_n();
3022
3023 int indexInTLComp1 = 0;
3024 int indexInTLComp2 = 0;
3025 if( n1 > 0 && n2 > 0 )
3026 {
3027 while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )
3028 {
3029 int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
3030 int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
3031 if( currentValue1 < currentValue2 )
3032 {
3033
3034
3035
3036 indexInTLComp1++;
3037 continue;
3038 }
3039 if( currentValue1 > currentValue2 )
3040 {
3041
3042 indexInTLComp2++;
3043 continue;
3044 }
3045 int size1 = 1;
3046 int size2 = 1;
3047 while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
3048 size1++;
3049 while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
3050 size2++;
3051
3052 for( int i1 = 0; i1 < size1; i1++ )
3053 {
3054 for( int i2 = 0; i2 < size2; i2++ )
3055 {
3056
3057 int n = TLBackToComp1.get_n();
3058 TLBackToComp1.reserve();
3059 TLBackToComp1.vi_wr[3 * n] =
3060 TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];
3061
3062 TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;
3063 TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];
3064 n = TLBackToComp2.get_n();
3065 TLBackToComp2.reserve();
3066 TLBackToComp2.vi_wr[3 * n] =
3067 TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];
3068 TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;
3069 TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];
3070
3071 }
3072 }
3073 indexInTLComp1 += size1;
3074 indexInTLComp2 += size2;
3075 }
3076 }
3077 pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );
3078 pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
3079
3080 if( *pid1 >= 0 )
3081 {
3082
3083
3084
3085
3086 #ifdef VERBOSE
3087 std::stringstream f1;
3088 f1 << "TLBack1_" << localRank << ".txt";
3089 TLBackToComp1.print_to_file( f1.str().c_str() );
3090 #endif
3091 sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
3092 TLBackToComp1.sort( 1, &sort_buffer );
3093 sort_buffer.reset();
3094 #ifdef VERBOSE
3095 TLBackToComp1.print_to_file( f1.str().c_str() );
3096 #endif
3097
3098
3099 cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
3100 }
3101 if( *pid2 >= 0 )
3102 {
3103
3104
3105
3106
3107 #ifdef VERBOSE
3108 std::stringstream f2;
3109 f2 << "TLBack2_" << localRank << ".txt";
3110 TLBackToComp2.print_to_file( f2.str().c_str() );
3111 #endif
3112 sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
3113 TLBackToComp2.sort( 2, &sort_buffer );
3114 sort_buffer.reset();
3115 #ifdef VERBOSE
3116 TLBackToComp2.print_to_file( f2.str().c_str() );
3117 #endif
3118 cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
3119 }
3120 return moab::MB_SUCCESS;
3121 }
3122
3123 ErrCode iMOAB_MergeVertices( iMOAB_AppID pid )
3124 {
3125 appData& data = context.appDatas[*pid];
3126 ParallelComm* pco = context.appDatas[*pid].pcomm;
3127
3128
3129 std::vector< Tag > tagsList;
3130 Tag tag;
3131 ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
3132 if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE;
3133 tagsList.push_back( tag );
3134 rval = context.MBI->tag_get_handle( "area", tag );
3135 if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3136 rval = context.MBI->tag_get_handle( "frac", tag );
3137 if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
3138 double tol = 1.0e-9;
3139 rval = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
3140
3141
3142 rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &( context.material_tag ), 0, 1,
3143 data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
3144
3145 if( !data.mat_sets.empty() )
3146 {
3147 EntityHandle matSet = data.mat_sets[0];
3148 Range elems;
3149 rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
3150 rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
3151
3152 elems.clear();
3153 rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
3154 rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
3155 }
3156 rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
3157
3158 ParallelMergeMesh pmm( pco, tol );
3159 rval = pmm.merge( data.file_set,
3160 false,
3161 2 );MB_CHK_ERR( rval );
3162
3163
3164 rval = pco->assign_global_ids( data.file_set, 0 );MB_CHK_ERR( rval );
3165
3166
3167 Tag part_tag;
3168 int dum_id = -1;
3169 rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
3170 MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
3171
3172 if( part_tag == nullptr || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
3173 {
3174 std::cout << " can't get par part tag.\n";
3175 return moab::MB_FAILURE;
3176 }
3177
3178 int rank = pco->rank();
3179 rval = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
3180
3181 return moab::MB_SUCCESS;
3182 }
3183
3184 #ifdef MOAB_HAVE_TEMPESTREMAP
3185
3186
3187
3188
3189
3190
3191 ErrCode iMOAB_CoverageGraph( MPI_Comm* joint_communicator,
3192 iMOAB_AppID pid_src,
3193 iMOAB_AppID pid_migr,
3194 iMOAB_AppID pid_intx,
3195 int* source_id,
3196 int* migration_id,
3197 int* context_id )
3198 {
3199
3200 std::vector< int > srcSenders, receivers;
3201 ParCommGraph* sendGraph = nullptr;
3202 bool is_fortran_context = false;
3203
3204 if( pid_src && *pid_src > 0 && context.appDatas.find( *pid_src ) == context.appDatas.end() )
3205 MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid source application ID specified: " << *pid_src );
3206 if( pid_migr && *pid_migr > 0 && context.appDatas.find( *pid_migr ) == context.appDatas.end() )
3207 MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid migration/coverage application ID specified: " << *pid_migr );
3208 if( pid_intx && *pid_intx > 0 && context.appDatas.find( *pid_intx ) == context.appDatas.end() )
3209 MB_CHK_SET_ERR( moab::MB_FAILURE, "Invalid intersection application ID specified: " << *pid_intx );
3210
3211
3212
3213 if( *pid_src >= 0 )
3214 {
3215 appData& dataSrc = context.appDatas[*pid_src];
3216 int default_context_id = *migration_id;
3217 assert( dataSrc.global_id == *source_id );
3218 is_fortran_context = dataSrc.is_fortran || is_fortran_context;
3219 if( dataSrc.pgraph.find( default_context_id ) != dataSrc.pgraph.end() )
3220 sendGraph = dataSrc.pgraph[default_context_id];
3221 else
3222 MB_CHK_SET_ERR( moab::MB_FAILURE, "Could not find source ParCommGraph with default migration context" );
3223
3224
3225 srcSenders = sendGraph->senders();
3226 receivers = sendGraph->receivers();
3227 #ifdef VERBOSE
3228 std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
3229 #endif
3230 }
3231
3232 ParCommGraph* recvGraph = nullptr;
3233 if( *pid_migr >= 0 )
3234 {
3235 appData& dataMigr = context.appDatas[*pid_migr];
3236 is_fortran_context = dataMigr.is_fortran || is_fortran_context;
3237
3238 int default_context_id = *source_id;
3239 assert( dataMigr.global_id == *migration_id );
3240 if( dataMigr.pgraph.find( default_context_id ) != dataMigr.pgraph.end() )
3241 recvGraph = dataMigr.pgraph[default_context_id];
3242 else
3243 MB_CHK_SET_ERR( moab::MB_FAILURE,
3244 "Could not find coverage receiver ParCommGraph with default migration context" );
3245
3246 srcSenders = recvGraph->senders();
3247 receivers = recvGraph->receivers();
3248 #ifdef VERBOSE
3249 std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
3250 #endif
3251 }
3252
3253 if( *pid_intx >= 0 ) is_fortran_context = context.appDatas[*pid_intx].is_fortran || is_fortran_context;
3254
3255
3256
3257
3258 TupleList TLcovIDs;
3259 TLcovIDs.initialize( 2, 0, 0, 0, 0 );
3260
3261 TLcovIDs.enableWriteAccess();
3262
3263
3264
3265 MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint_communicator ) )
3266 : *joint_communicator );
3267 int currentRankInJointComm = -1;CHK_MPI_ERR( MPI_Comm_rank( global, ¤tRankInJointComm ) );
3268
3269
3270 Tag gidTag = context.MBI->globalId_tag();
3271 if( !gidTag ) return moab::MB_TAG_NOT_FOUND;
3272
3273
3274
3275 if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
3276 receivers.end() )
3277 {
3278 appData& dataIntx = context.appDatas[*pid_intx];
3279 EntityHandle intx_set = dataIntx.file_set;
3280 EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
3281
3282 Tag orgSendProcTag;
3283 MB_CHK_ERR( context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag ) );
3284 if( !orgSendProcTag ) return moab::MB_TAG_NOT_FOUND;
3285
3286 Range intx_cells;
3287
3288 MB_CHK_ERR( context.MBI->get_entities_by_dimension( intx_set, 2, intx_cells ) );
3289
3290
3291
3292
3293 std::map< int, std::set< int > > idsFromProcs;
3294 if( intx_cells.size() )
3295 {
3296 Tag parentTag;
3297
3298 MB_CHK_ERR( context.MBI->tag_get_handle( "SourceParent", parentTag ) );
3299 if( !parentTag ) return moab::MB_TAG_NOT_FOUND;
3300
3301 for( auto it = intx_cells.begin(); it != intx_cells.end(); ++it )
3302 {
3303 EntityHandle intx_cell = *it;
3304
3305 int origProc;
3306 MB_CHK_ERR( context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc ) );
3307
3308
3309
3310
3311 if( origProc < 0 ) continue;
3312
3313 int gidCell;
3314 MB_CHK_ERR( context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell ) );
3315 idsFromProcs[origProc].insert( gidCell );
3316
3317 }
3318 }
3319
3320
3321
3322
3323 {
3324
3325 Range cover_cells;
3326 MB_CHK_ERR( context.MBI->get_entities_by_dimension( cover_set, 2, cover_cells ) );
3327
3328
3329 Tag gidTag = context.MBI->globalId_tag();
3330
3331
3332 for( auto it = cover_cells.begin(); it != cover_cells.end(); ++it )
3333 {
3334 const EntityHandle cov_cell = *it;
3335
3336 int origProc;
3337 MB_CHK_ERR( context.MBI->tag_get_data( orgSendProcTag, &cov_cell, 1, &origProc ) );
3338
3339
3340
3341 if( origProc < 0 ) continue;
3342
3343 int gidCell;
3344 MB_CHK_ERR( context.MBI->tag_get_data( gidTag, &cov_cell, 1, &gidCell ) );
3345 assert( gidCell > 0 );
3346 idsFromProcs[origProc].insert( gidCell );
3347 }
3348 }
3349
3350 #ifdef VERBOSE
3351 std::ofstream dbfile;
3352 std::stringstream outf;
3353 outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
3354 dbfile.open( outf.str().c_str() );
3355 dbfile << "Writing this to a file.\n";
3356
3357 dbfile << " map size:" << idsFromProcs.size()
3358 << std::endl;
3359
3360
3361 for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
3362 {
3363 std::set< int >& setIds = mt->second;
3364 dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
3365 int counter = 0;
3366 for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
3367 {
3368 int valueID = *st;
3369 dbfile << " " << valueID;
3370 counter++;
3371 if( counter % 10 == 0 ) dbfile << "\n";
3372 }
3373 dbfile << "\n";
3374 }
3375 dbfile.close();
3376 #endif
3377
3378 if( nullptr != recvGraph )
3379 {
3380 ParCommGraph* newRecvGraph = new ParCommGraph( *recvGraph );
3381 newRecvGraph->set_context_id( *context_id );
3382 newRecvGraph->SetReceivingAfterCoverage( idsFromProcs );
3383
3384
3385 newRecvGraph->set_cover_set( cover_set );
3386 if( context.appDatas[*pid_migr].pgraph.find( *context_id ) == context.appDatas[*pid_migr].pgraph.end() )
3387 context.appDatas[*pid_migr].pgraph[*context_id] = newRecvGraph;
3388 else
3389 MB_CHK_SET_ERR( moab::MB_FAILURE, "ParCommGraph for context_id="
3390 << *context_id << " already exists. Check the workflow" );
3391 }
3392 for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); ++mit )
3393 {
3394 int procToSendTo = mit->first;
3395 std::set< int >& idSet = mit->second;
3396 for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); ++sit )
3397 {
3398 int n = TLcovIDs.get_n();
3399 TLcovIDs.reserve();
3400 TLcovIDs.vi_wr[2 * n] = procToSendTo;
3401 TLcovIDs.vi_wr[2 * n + 1] = *sit;
3402 }
3403 }
3404 }
3405
3406 ProcConfig pc( global );
3407
3408
3409 pc.crystal_router()->gs_transfer( 1, TLcovIDs, 0 );
3410
3411
3412 if( nullptr != sendGraph )
3413 {
3414 appData& dataSrc = context.appDatas[*pid_src];
3415
3416 ParCommGraph* newSendGraph = new ParCommGraph( *sendGraph );
3417 newSendGraph->set_context_id( *context_id );
3418 dataSrc.pgraph[*context_id] = newSendGraph;
3419 MB_CHK_ERR( newSendGraph->settle_send_graph( TLcovIDs ) );
3420 }
3421 return moab::MB_SUCCESS;
3422 }
3423
3424 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix )
3425 {
3426 assert( prefix && strlen( prefix ) );
3427
3428 ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
3429 std::string prefix_str( prefix );
3430
3431 if( nullptr != cgraph )
3432 cgraph->dump_comm_information( prefix_str, *is_sender );
3433 else
3434 {
3435 std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
3436 << " context: " << *context_id << "\n";
3437 }
3438 return moab::MB_SUCCESS;
3439 }
3440
3441 #endif
3442
3443 #endif
3444
3445 #ifdef MOAB_HAVE_TEMPESTREMAP
3446
3447 #ifdef MOAB_HAVE_NETCDF
3448
3449 static ErrCode set_aream_from_trivial_distribution( iMOAB_AppID pid, int N, std::vector< double >& trvArea )
3450 {
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460 appData& data = context.appDatas[*pid];
3461 int size = 1, rank = 0;
3462 #ifdef MOAB_HAVE_MPI
3463 ParallelComm* pcomm = context.appDatas[*pid].pcomm;
3464 size = pcomm->size();
3465 rank = pcomm->rank();
3466 #endif
3467
3468
3469
3470
3471 Tag areaTag;
3472 MB_CHK_ERR( context.MBI->tag_get_handle( "aream", areaTag ) );
3473
3474
3475 const Range& ents_to_set = data.primary_elems;
3476 size_t nents_to_be_set = ents_to_set.size();
3477
3478 Tag gidTag = context.MBI->globalId_tag();
3479 std::vector< int > globalIds( nents_to_be_set );
3480 MB_CHK_ERR( context.MBI->tag_get_data( gidTag, ents_to_set, &globalIds[0] ) );
3481
3482 const bool serial = ( size == 1 );
3483 if( serial )
3484 {
3485
3486
3487
3488
3489 for( size_t i = 0; i < nents_to_be_set; i++ )
3490 {
3491 int gid = globalIds[i];
3492 int indexInVal =
3493 gid - 1;
3494 assert( indexInVal < N );
3495 EntityHandle eh = ents_to_set[i];
3496 MB_CHK_ERR( context.MBI->tag_set_data( areaTag, &eh, 1, &trvArea[indexInVal] ) );
3497 }
3498 }
3499 #ifdef MOAB_HAVE_MPI
3500 else
3501 {
3502 int nL = N / size;
3503
3504
3505
3506
3507
3508
3509 TupleList TLreq;
3510 TLreq.initialize( 3, 0, 0, 0, nents_to_be_set );
3511 TLreq.enableWriteAccess();
3512
3513
3514
3515 for( size_t i = 0; i < nents_to_be_set; i++ )
3516 {
3517
3518 int marker = globalIds[i];
3519 int to_proc = ( marker - 1 ) / nL;
3520
3521 if( to_proc == size ) to_proc = size - 1;
3522 int n = TLreq.get_n();
3523 TLreq.vi_wr[3 * n] = to_proc;
3524 TLreq.vi_wr[3 * n + 1] = marker;
3525 TLreq.vi_wr[3 * n + 2] = i;
3526 TLreq.inc_n();
3527 }
3528
3529
3530
3531 ( pcomm->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
3532
3533 int sizeBack = TLreq.get_n();
3534
3535 TupleList TLBack;
3536 TLBack.initialize( 3, 0, 0, 1, sizeBack );
3537 TLBack.enableWriteAccess();
3538 for( int i = 0; i < sizeBack; i++ )
3539 {
3540 int from_proc = TLreq.vi_wr[3 * i];
3541 TLBack.vi_wr[3 * i] = from_proc;
3542 int marker = TLreq.vi_wr[3 * i + 1];
3543 TLBack.vi_wr[3 * i + 1] = marker;
3544 TLBack.vi_wr[3 * i + 2] = TLreq.vi_wr[3 * i + 2];
3545
3546 int index = marker - 1 - rank * nL;
3547 TLBack.vr_wr[i] = trvArea[index];
3548 TLBack.inc_n();
3549 }
3550
3551 ( pcomm->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
3552
3553 int n1 = TLBack.get_n();
3554 for( int i = 0; i < n1; i++ )
3555 {
3556
3557 int origIndex = TLBack.vi_rd[3 * i + 2];
3558 EntityHandle eh = ents_to_set[origIndex];
3559 MB_CHK_ERR( context.MBI->tag_set_data( areaTag, &eh, 1, &TLBack.vr_rd[i] ) );
3560 }
3561 }
3562 #endif
3563
3564
3565 return MB_SUCCESS;
3566 }
3567
3568 ErrCode iMOAB_LoadMappingWeightsFromFile(
3569 iMOAB_AppID pid_source,
3570 iMOAB_AppID pid_target,
3571 iMOAB_AppID pid_intersection,
3572 int* srctype,
3573 int* tgttype,
3574 const iMOAB_String solution_weights_identifier,
3575 const iMOAB_String remap_weights_filename )
3576 {
3577 assert( srctype && tgttype );
3578
3579
3580
3581 appData& data_source = context.appDatas[*pid_source];
3582 appData& data_target = context.appDatas[*pid_target];
3583 appData& data_intx = context.appDatas[*pid_intersection];
3584 TempestMapAppData& tdata = data_intx.tempestData;
3585
3586
3587 if( tdata.remapper == nullptr )
3588 {
3589
3590
3591
3592 MB_CHK_ERR( iMOAB_ComputeCoverageMesh( pid_source, pid_target, pid_intersection ) );
3593 }
3594
3595
3596
3597 tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
3598
3599
3600 moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3601 assert( weightMap != nullptr );
3602
3603 EntityHandle source_set = data_source.file_set;
3604 EntityHandle covering_set = tdata.remapper->GetMeshSet( Remapper::CoveringMesh );
3605 EntityHandle target_set = data_target.file_set;
3606
3607 int src_elem_dof_length = 1, tgt_elem_dof_length = 1;
3608
3609 Tag gdsTag = nullptr;
3610 if( *srctype == 1 || *tgttype == 1 )
3611 {
3612 MB_CHK_ERR( context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag ) );
3613 assert( gdsTag );
3614 }
3615
3616
3617
3618
3619 if( *srctype == 1 )
3620 MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, src_elem_dof_length ) );
3621
3622
3623 if( *tgttype == 1 )
3624 MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, tgt_elem_dof_length ) );
3625
3626 Tag gidTag = context.MBI->globalId_tag();
3627 std::vector< int > srcDofValues, tgtDofValues;
3628
3629
3630
3631 Range srcc_ents_of_interest, src_ents_of_interest, tgt_ents_of_interest;
3632
3633 if( *srctype == 1 )
3634 {
3635 MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, src_elem_dof_length ) );
3636 MB_CHK_ERR( context.MBI->get_entities_by_type( covering_set, MBQUAD, src_ents_of_interest ) );
3637 srcDofValues.resize( src_ents_of_interest.size() * src_elem_dof_length );
3638 MB_CHK_ERR( context.MBI->tag_get_data( gdsTag, src_ents_of_interest, &srcDofValues[0] ) );
3639 }
3640 else if( *srctype == 2 )
3641 {
3642
3643 MB_CHK_ERR( context.MBI->get_entities_by_type( covering_set, MBVERTEX, src_ents_of_interest ) );
3644 srcDofValues.resize( src_ents_of_interest.size() * src_elem_dof_length );
3645 MB_CHK_ERR( context.MBI->tag_get_data( gidTag, src_ents_of_interest, &srcDofValues[0] ) );
3646 }
3647 else if( *srctype == 3 )
3648 {
3649
3650 MB_CHK_ERR( context.MBI->get_entities_by_dimension( covering_set, 2, src_ents_of_interest ) );
3651 srcDofValues.resize( src_ents_of_interest.size() * src_elem_dof_length );
3652 MB_CHK_ERR( context.MBI->tag_get_data( gidTag, src_ents_of_interest, &srcDofValues[0] ) );
3653 }
3654 else
3655 {
3656 MB_CHK_ERR( MB_FAILURE );
3657 }
3658
3659 if( *tgttype == 1 )
3660 {
3661 MB_CHK_ERR( context.MBI->tag_get_length( gdsTag, tgt_elem_dof_length ) );
3662 MB_CHK_ERR( context.MBI->get_entities_by_type( target_set, MBQUAD, tgt_ents_of_interest ) );
3663 tgtDofValues.resize( tgt_ents_of_interest.size() * tgt_elem_dof_length );
3664 MB_CHK_ERR( context.MBI->tag_get_data( gdsTag, tgt_ents_of_interest, &tgtDofValues[0] ) );
3665 }
3666 else if( *tgttype == 2 )
3667 {
3668
3669 MB_CHK_ERR( context.MBI->get_entities_by_type( target_set, MBVERTEX, tgt_ents_of_interest ) );
3670 tgtDofValues.resize( tgt_ents_of_interest.size() * tgt_elem_dof_length );
3671 MB_CHK_ERR( context.MBI->tag_get_data( gidTag, tgt_ents_of_interest, &tgtDofValues[0] ) );
3672 }
3673 else if( *tgttype == 3 )
3674 {
3675
3676 MB_CHK_ERR( context.MBI->get_entities_by_dimension( target_set, 2, tgt_ents_of_interest ) );
3677 tgtDofValues.resize( tgt_ents_of_interest.size() * tgt_elem_dof_length );
3678 MB_CHK_ERR( context.MBI->tag_get_data( gidTag, tgt_ents_of_interest, &tgtDofValues[0] ) );
3679 }
3680 else
3681 {
3682 MB_CHK_ERR( MB_FAILURE );
3683 }
3684
3685
3686
3687 std::vector< int > sortTgtDofs( tgtDofValues.begin(), tgtDofValues.end() );
3688 std::sort( sortTgtDofs.begin(), sortTgtDofs.end() );
3689 sortTgtDofs.erase( std::unique( sortTgtDofs.begin(), sortTgtDofs.end() ), sortTgtDofs.end() );
3690
3691
3692 Tag areaTag;
3693 ErrorCode rval =
3694 context.MBI->tag_get_handle( "aream", 1, MB_TYPE_DOUBLE, areaTag, MB_TAG_DENSE | MB_TAG_EXCL | MB_TAG_CREAT );
3695 if( MB_ALREADY_ALLOCATED == rval )
3696 {
3697 #ifdef MOAB_HAVE_MPI
3698 if( 0 == data_intx.pcomm->rank() )
3699 #endif
3700 std::cout << " aream tag already defined \n ";
3701 }
3702
3703 std::vector< double > trvAreaA, trvAreaB;
3704 int nA, nB;
3705 MB_CHK_SET_ERR( weightMap->ReadParallelMap( remap_weights_filename, sortTgtDofs, trvAreaA, nA, trvAreaB, nB ),
3706 "reading map from disk failed" );
3707
3708
3709
3710 tdata.pid_src = pid_source;
3711 tdata.pid_dest = pid_target;
3712
3713
3714
3715
3716
3717
3718 tdata.remapper->SetMeshSet( Remapper::SourceMesh, source_set, &srcc_ents_of_interest );
3719
3720
3721
3722 MB_CHK_SET_ERR( set_aream_from_trivial_distribution( pid_source, nA, trvAreaA ), " fail to set aream on source " );
3723 MB_CHK_SET_ERR( set_aream_from_trivial_distribution( pid_target, nB, trvAreaB ), " fail to set aream on target " );
3724
3725 tdata.remapper->SetMeshSet( Remapper::CoveringMesh, covering_set, &src_ents_of_interest );
3726 weightMap->SetSourceNDofsPerElement( src_elem_dof_length );
3727 weightMap->set_col_dc_dofs( srcDofValues );
3728
3729 tdata.remapper->SetMeshSet( Remapper::TargetMesh, target_set, &tgt_ents_of_interest );
3730 weightMap->SetDestinationNDofsPerElement( tgt_elem_dof_length );
3731 weightMap->set_row_dc_dofs( tgtDofValues );
3732
3733
3734 std::string metadataStr = std::string( remap_weights_filename ) + ";FV:1:GLOBAL_ID;FV:1:GLOBAL_ID";
3735
3736 data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
3737
3738 return moab::MB_SUCCESS;
3739 }
3740
3741 ErrCode iMOAB_WriteMappingWeightsToFile(
3742 iMOAB_AppID pid_intersection,
3743 const iMOAB_String solution_weights_identifier,
3744 const iMOAB_String remap_weights_filename )
3745 {
3746 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
3747 assert( remap_weights_filename && strlen( remap_weights_filename ) );
3748
3749 ErrorCode rval;
3750
3751
3752 appData& data_intx = context.appDatas[*pid_intersection];
3753 TempestMapAppData& tdata = data_intx.tempestData;
3754
3755
3756 assert( tdata.remapper != nullptr );
3757
3758
3759 moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3760 assert( weightMap != nullptr );
3761
3762 std::string filename = std::string( remap_weights_filename );
3763
3764 std::string metadataStr = data_intx.metadataMap[std::string( solution_weights_identifier )];
3765 std::map< std::string, std::string > attrMap;
3766 attrMap["title"] = "MOAB-TempestRemap Online Regridding Weight Generator";
3767 attrMap["normalization"] = "ovarea";
3768 attrMap["map_aPb"] = filename;
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788 attrMap["concave_a"] = "false";
3789 attrMap["concave_b"] = "false";
3790 attrMap["bubble"] = "true";
3791 attrMap["MOABversion"] = std::string( MOAB_VERSION );
3792
3793
3794 rval = weightMap->WriteParallelMap( filename, attrMap );MB_CHK_ERR( rval );
3795
3796 return moab::MB_SUCCESS;
3797 }
3798
3799 #ifdef MOAB_HAVE_MPI
3800 ErrCode iMOAB_MigrateMapMesh( iMOAB_AppID pid1,
3801 iMOAB_AppID pid2,
3802 iMOAB_AppID pid3,
3803 MPI_Comm* jointcomm,
3804 MPI_Group* groupA,
3805 MPI_Group* groupB,
3806 int* type,
3807 int* comp1,
3808 int* comp2,
3809 int* direction )
3810 {
3811 assert( jointcomm );
3812 assert( groupA );
3813 assert( groupB );
3814 bool is_fortran = false;
3815 if( *pid1 >= 0 ) is_fortran = context.appDatas[*pid1].is_fortran || is_fortran;
3816 if( *pid2 >= 0 ) is_fortran = context.appDatas[*pid2].is_fortran || is_fortran;
3817 if( *pid3 >= 0 ) is_fortran = context.appDatas[*pid3].is_fortran || is_fortran;
3818
3819 MPI_Comm joint_communicator =
3820 ( is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( jointcomm ) ) : *jointcomm );
3821 MPI_Group group_first = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupA ) ) : *groupA );
3822 MPI_Group group_second = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupB ) ) : *groupB );
3823
3824 ErrorCode rval = MB_SUCCESS;
3825 int localRank = 0, numProcs = 1;
3826
3827
3828 MPI_Comm_rank( joint_communicator, &localRank );
3829 MPI_Comm_size( joint_communicator, &numProcs );
3830
3831
3832
3833
3834 ParCommGraph* cgraph = nullptr;
3835 if( *pid1 >= 0 ) cgraph = new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 );
3836
3837 ParCommGraph* cgraph_rev = nullptr;
3838 if( *pid3 >= 0 ) cgraph_rev = new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 );
3839
3840
3841
3842 if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph;
3843 if( *pid3 >= 0 ) context.appDatas[*pid3].pgraph[*comp1] = cgraph_rev;
3844
3845
3846
3847 TupleList TLcomp1;
3848 TLcomp1.initialize( 2, 0, 0, 0, 0 );
3849 TupleList TLcomp2;
3850 TLcomp2.initialize( 2, 0, 0, 0, 0 );
3851
3852
3853 TLcomp1.enableWriteAccess();
3854
3855
3856 Tag gdsTag;
3857
3858
3859 int lenTagType1 = 1;
3860 if( *type == 1 )
3861 {
3862 rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
3863 rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );
3864 }
3865 Tag tagType2 = context.MBI->globalId_tag();
3866
3867 std::vector< int > valuesComp1;
3868
3869
3870 Range ents_of_interest;
3871
3872 if( *pid1 >= 0 )
3873 {
3874 appData& data1 = context.appDatas[*pid1];
3875 EntityHandle fset1 = data1.file_set;
3876
3877 if( *type == 1 )
3878 {
3879 assert( gdsTag );
3880 rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
3881 valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
3882 rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
3883 }
3884 else if( *type == 2 )
3885 {
3886 rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
3887 valuesComp1.resize( ents_of_interest.size() );
3888 rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
3889 }
3890 else if( *type == 3 )
3891 {
3892 rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
3893 valuesComp1.resize( ents_of_interest.size() );
3894 rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
3895 }
3896 else
3897 {
3898 MB_CHK_ERR( MB_FAILURE );
3899 }
3900
3901
3902 std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
3903 TLcomp1.resize( uniq.size() );
3904 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3905 {
3906
3907 int marker = *sit;
3908 int to_proc = marker % numProcs;
3909 int n = TLcomp1.get_n();
3910 TLcomp1.vi_wr[2 * n] = to_proc;
3911 TLcomp1.vi_wr[2 * n + 1] = marker;
3912 TLcomp1.inc_n();
3913 }
3914 }
3915
3916 ProcConfig pc( joint_communicator );
3917 pc.crystal_router()->gs_transfer( 1, TLcomp1,
3918 0 );
3919
3920 #ifdef VERBOSE
3921 std::stringstream ff1;
3922 ff1 << "TLcomp1_" << localRank << ".txt";
3923 TLcomp1.print_to_file( ff1.str().c_str() );
3924 #endif
3925 moab::TupleList::buffer sort_buffer;
3926 sort_buffer.buffer_init( TLcomp1.get_n() );
3927 TLcomp1.sort( 1, &sort_buffer );
3928 sort_buffer.reset();
3929 #ifdef VERBOSE
3930
3931 TLcomp1.print_to_file( ff1.str().c_str() );
3932 #endif
3933
3934
3935 TLcomp2.enableWriteAccess();
3936
3937 moab::TempestOnlineMap* weightMap = nullptr;
3938
3939
3940 std::vector< int > valuesComp2;
3941 if( *pid2 >= 0 )
3942 {
3943 appData& data2 = context.appDatas[*pid2];
3944 TempestMapAppData& tdata = data2.tempestData;
3945
3946 assert( tdata.weightMaps.size() == 1 );
3947
3948 weightMap = tdata.weightMaps.begin()->second;
3949
3950
3951 if( *direction == 1 )
3952 {
3953
3954
3955 rval = weightMap->fill_col_ids( valuesComp2 );MB_CHK_ERR( rval );
3956 }
3957 else if( *direction == 2 )
3958 {
3959
3960 rval = weightMap->fill_row_ids( valuesComp2 );MB_CHK_ERR( rval );
3961 }
3962
3963
3964
3965 std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
3966 TLcomp2.resize( uniq.size() );
3967 for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
3968 {
3969
3970 int marker = *sit;
3971 int to_proc = marker % numProcs;
3972 int n = TLcomp2.get_n();
3973 TLcomp2.vi_wr[2 * n] = to_proc;
3974 TLcomp2.vi_wr[2 * n + 1] = marker;
3975 TLcomp2.inc_n();
3976 }
3977 }
3978 pc.crystal_router()->gs_transfer( 1, TLcomp2,
3979 0 );
3980
3981
3982 #ifdef VERBOSE
3983 std::stringstream ff2;
3984 ff2 << "TLcomp2_" << localRank << ".txt";
3985 TLcomp2.print_to_file( ff2.str().c_str() );
3986 #endif
3987 sort_buffer.buffer_reserve( TLcomp2.get_n() );
3988 TLcomp2.sort( 1, &sort_buffer );
3989 sort_buffer.reset();
3990
3991 #ifdef VERBOSE
3992 TLcomp2.print_to_file( ff2.str().c_str() );
3993 #endif
3994
3995
4002
4003 TupleList TLBackToComp1;
4004 TLBackToComp1.initialize( 3, 0, 0, 0, 0 );
4005 TLBackToComp1.enableWriteAccess();
4006
4007 TupleList TLBackToComp2;
4008 TLBackToComp2.initialize( 3, 0, 0, 0, 0 );
4009 TLBackToComp2.enableWriteAccess();
4010
4011 int n1 = TLcomp1.get_n();
4012 int n2 = TLcomp2.get_n();
4013
4014 int indexInTLComp1 = 0;
4015 int indexInTLComp2 = 0;
4016 if( n1 > 0 && n2 > 0 )
4017 {
4018
4019 while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )
4020 {
4021 int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
4022 int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
4023 if( currentValue1 < currentValue2 )
4024 {
4025
4026
4027
4028 indexInTLComp1++;
4029 continue;
4030 }
4031 if( currentValue1 > currentValue2 )
4032 {
4033
4034 indexInTLComp2++;
4035 continue;
4036 }
4037 int size1 = 1;
4038 int size2 = 1;
4039 while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
4040 size1++;
4041 while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
4042 size2++;
4043
4044 for( int i1 = 0; i1 < size1; i1++ )
4045 {
4046 for( int i2 = 0; i2 < size2; i2++ )
4047 {
4048
4049 int n = TLBackToComp1.get_n();
4050 TLBackToComp1.reserve();
4051 TLBackToComp1.vi_wr[3 * n] =
4052 TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];
4053
4054 TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;
4055 TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];
4056 n = TLBackToComp2.get_n();
4057 TLBackToComp2.reserve();
4058 TLBackToComp2.vi_wr[3 * n] =
4059 TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];
4060 TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;
4061 TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];
4062
4063 }
4064 }
4065 indexInTLComp1 += size1;
4066 indexInTLComp2 += size2;
4067 }
4068 }
4069 pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );
4070 pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
4071
4072 TupleList TLv;
4073 TupleList TLc;
4074
4075 if( *pid1 >= 0 )
4076 {
4077
4078
4079
4080
4081 #ifdef VERBOSE
4082 std::stringstream f1;
4083 f1 << "TLBack1_" << localRank << ".txt";
4084 TLBackToComp1.print_to_file( f1.str().c_str() );
4085 #endif
4086
4087
4088 rval = cgraph->set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );MB_CHK_ERR( rval );
4089
4090
4091
4092 rval = cgraph->form_tuples_to_migrate_mesh( context.MBI, TLv, TLc, *type, lenTagType1 );MB_CHK_ERR( rval );
4093 }
4094 else
4095 {
4096 TLv.initialize( 2, 0, 0, 3, 0 );
4097 TLv.enableWriteAccess();
4098 if( *type != 2 )
4099 {
4100
4101 int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 +
4102 10;
4103 TLc.initialize( size_tuple, 0, 0, 0, 0 );
4104 TLc.enableWriteAccess();
4105 }
4106 }
4107 pc.crystal_router()->gs_transfer( 1, TLv, 0 );
4108 if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 );
4109
4110 if( *pid3 >= 0 )
4111 {
4112 appData& data3 = context.appDatas[*pid3];
4113 EntityHandle fset3 = data3.file_set;
4114 Range primary_ents3;
4115 std::vector< int > values_entities;
4116 rval = cgraph_rev->form_mesh_from_tuples( context.MBI, TLv, TLc, *type, lenTagType1, fset3, primary_ents3,
4117 values_entities );MB_CHK_ERR( rval );
4118 iMOAB_UpdateMeshInfo( pid3 );
4119 int ndofPerEl = 1;
4120 if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
4121
4122 assert( *pid2 >= 0 );
4123 appData& dataIntx = context.appDatas[*pid2];
4124 TempestMapAppData& tdata = dataIntx.tempestData;
4125
4126
4127 if( 1 == *direction )
4128 {
4129 tdata.pid_src = pid3;
4130 tdata.remapper->SetMeshSet( Remapper::CoveringMesh, fset3, &primary_ents3 );
4131 weightMap->SetSourceNDofsPerElement( ndofPerEl );
4132 weightMap->set_col_dc_dofs( values_entities );
4133 }
4134
4135 else
4136 {
4137 tdata.pid_dest = pid3;
4138 tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset3, &primary_ents3 );
4139 weightMap->SetDestinationNDofsPerElement( ndofPerEl );
4140 weightMap->set_row_dc_dofs( values_entities );
4141 }
4142 }
4143
4144 return moab::MB_SUCCESS;
4145 }
4146
4147 #endif
4148
4149 #endif
4150
4151 #define USE_API
4152 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
4153 {
4154 ErrorCode rval;
4155 moab::CartVect pos;
4156
4157 Range& verts = context.appDatas[*pid].all_verts;
4158 *radius = 1.0;
4159 if( verts.empty() ) return moab::MB_SUCCESS;
4160 moab::EntityHandle firstVertex = ( verts[0] );
4161
4162
4163 rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );MB_CHK_ERR( rval );
4164
4165
4166
4167 *radius = pos.length();
4168 return moab::MB_SUCCESS;
4169 }
4170
4171 ErrCode iMOAB_SetMapGhostLayers( iMOAB_AppID pid, int* n_src_ghost_layers, int* n_tgt_ghost_layers )
4172 {
4173 appData& data = context.appDatas[*pid];
4174
4175
4176 data.tempestData.num_src_ghost_layers = *n_src_ghost_layers;
4177 data.tempestData.num_tgt_ghost_layers = *n_tgt_ghost_layers;
4178
4179 return moab::MB_SUCCESS;
4180 }
4181
4182 ErrCode iMOAB_ComputeCoverageMesh( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4183 {
4184
4185 constexpr bool validate = true;
4186 constexpr bool meshCleanup = true;
4187 constexpr bool gnomonic = true;
4188 constexpr double defaultradius = 1.0;
4189 constexpr double boxeps = 1.e-10;
4190
4191
4192 const double epsrel = ReferenceTolerance;
4193 double radius_source = 1.0;
4194 double radius_target = 1.0;
4195
4196
4197 ErrorCode rval;
4198 ErrCode ierr;
4199
4200
4201 appData& data_src = context.appDatas[*pid_src];
4202 appData& data_tgt = context.appDatas[*pid_tgt];
4203 appData& data_intx = context.appDatas[*pid_intx];
4204 #ifdef MOAB_HAVE_MPI
4205 ParallelComm* pco_intx = context.appDatas[*pid_intx].pcomm;
4206 #endif
4207
4208
4209 TempestMapAppData& tdata = data_intx.tempestData;
4210 if( tdata.remapper != nullptr ) return moab::MB_SUCCESS;
4211
4212 int rank = 0;
4213 #ifdef MOAB_HAVE_MPI
4214 if( pco_intx )
4215 {
4216 rank = pco_intx->rank();
4217 rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
4218 }
4219 #endif
4220
4221 moab::DebugOutput outputFormatter( std::cout, rank, 0 );
4222 outputFormatter.set_prefix( "[iMOAB_ComputeCoverageMesh]: " );
4223
4224 ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
4225 ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
4226
4227
4228 rval = ComputeSphereRadius( pid_src, &radius_source );MB_CHK_ERR( rval );
4229 rval = ComputeSphereRadius( pid_tgt, &radius_target );MB_CHK_ERR( rval );
4230 #ifdef VERBOSE
4231 if( !rank )
4232 outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
4233 radius_target );
4234 #endif
4235
4236
4238 if( fabs( radius_source - radius_target ) > 1e-10 )
4239 {
4240 rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval );
4241 rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval );
4242 }
4243
4244
4245 #ifdef MOAB_HAVE_TEMPESTREMAP
4246 IntxAreaUtils areaAdaptor( IntxAreaUtils::GaussQuadrature );
4247 #else
4248 IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
4249 #endif
4250
4251 if( meshCleanup )
4252 {
4253
4254
4255 MB_CHK_ERR(
4256 areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius ) );
4257
4258
4259 MB_CHK_ERR( IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set ) );
4260
4261
4262 MB_CHK_ERR( moab::IntxUtils::enforce_convexity( context.MBI, data_src.file_set, rank ) );
4263
4264
4265
4266 MB_CHK_ERR(
4267 areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius ) );
4268
4269
4270 MB_CHK_ERR( IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set ) );
4271
4272
4273 MB_CHK_ERR( moab::IntxUtils::enforce_convexity( context.MBI, data_tgt.file_set, rank ) );
4274 }
4275
4276
4277 #ifdef VERBOSE
4278 {
4279 moab::Range rintxverts, rintxelems;
4280 rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
4281 rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );MB_CHK_ERR( rval );
4282
4283 moab::Range bintxverts, bintxelems;
4284 rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
4285 rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );MB_CHK_ERR( rval );
4286
4287 if( !rank )
4288 {
4289 outputFormatter.printf( 0, "The source set contains %zu vertices and %zu elements\n", rintxverts.size(),
4290 rintxelems.size() );
4291 outputFormatter.printf( 0, "The target set contains %zu vertices and %zu elements\n", bintxverts.size(),
4292 bintxelems.size() );
4293 }
4294 }
4295 #endif
4296
4297
4298
4299
4300 data_intx.dimension = data_tgt.dimension;
4301
4302 tdata.pid_src = pid_src;
4303 tdata.pid_dest = pid_tgt;
4304
4305
4306 #ifdef MOAB_HAVE_MPI
4307 tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
4308 #else
4309 tdata.remapper = new moab::TempestRemapper( context.MBI );
4310 #endif
4311 tdata.remapper->meshValidate = validate;
4312 tdata.remapper->constructEdgeMap = true;
4313
4314
4315 tdata.remapper->initialize( false );
4316 tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
4317 tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
4318 tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
4319
4320
4321 rval =
4322 tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false, gnomonic, tdata.num_src_ghost_layers );MB_CHK_ERR( rval );
4323
4324 #ifdef MOAB_HAVE_TEMPESTREMAP
4325
4326 data_intx.secondary_file_set = tdata.remapper->GetMeshSet( moab::Remapper::CoveringMesh );
4327 #endif
4328
4329 return moab::MB_SUCCESS;
4330 }
4331
4332 ErrCode iMOAB_WriteCoverageMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options )
4333 {
4334 return internal_WriteMesh( pid, filename, write_options, false );
4335 }
4336
4337 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4338 {
4339
4340 constexpr bool validate = true;
4341 constexpr bool use_kdtree_search = true;
4342 constexpr double defaultradius = 1.0;
4343
4344
4345 appData& data_src = context.appDatas[*pid_src];
4346 appData& data_tgt = context.appDatas[*pid_tgt];
4347 appData& data_intx = context.appDatas[*pid_intx];
4348 #ifdef MOAB_HAVE_MPI
4349 ParallelComm* pco_intx = context.appDatas[*pid_intx].pcomm;
4350 #endif
4351
4352
4353 TempestMapAppData& tdata = data_intx.tempestData;
4354
4355 if( tdata.remapper == nullptr )
4356 {
4357
4358
4359
4360 MB_CHK_ERR( iMOAB_ComputeCoverageMesh( pid_src, pid_tgt, pid_intx ) );
4361 }
4362
4363 int rank = 0;
4364 #ifdef MOAB_HAVE_MPI
4365 rank = pco_intx->rank();
4366 #endif
4367 moab::DebugOutput outputFormatter( std::cout, rank, 0 );
4368 outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
4369
4370
4371
4372 MB_CHK_ERR( tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false ) );
4373
4374
4375 if( validate )
4376 {
4377
4378 IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
4379 double local_areas[3] = { 0.0, 0.0, 0.0 }, global_areas[3] = { 0.0, 0.0, 0.0 };
4380 local_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius );
4381 local_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius );
4382 local_areas[2] = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, defaultradius );
4383 #ifdef MOAB_HAVE_MPI
4384 global_areas[0] = global_areas[1] = global_areas[2] = 0.0;
4385 MPI_Reduce( &local_areas[0], &global_areas[0], 3, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() );
4386 #else
4387 global_areas[0] = local_areas[0];
4388 global_areas[1] = local_areas[1];
4389 global_areas[2] = local_areas[2];
4390 #endif
4391
4392 if( rank == 0 )
4393 {
4394 outputFormatter.printf( 0,
4395 "initial area: source mesh = %12.14f, target mesh = %12.14f, "
4396 "overlap mesh = %12.14f\n",
4397 global_areas[0], global_areas[1], global_areas[2] );
4398 outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
4399 fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
4400 fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
4401 }
4402 }
4403
4404 return moab::MB_SUCCESS;
4405 }
4406
4407 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
4408 {
4409 ErrorCode rval;
4410 ErrCode ierr;
4411
4412 double radius_source = 1.0;
4413 double radius_target = 1.0;
4414 const double epsrel = ReferenceTolerance;
4415 const double boxeps = 1.e-8;
4416
4417
4418 appData& data_src = context.appDatas[*pid_src];
4419 appData& data_tgt = context.appDatas[*pid_tgt];
4420 appData& data_intx = context.appDatas[*pid_intx];
4421 #ifdef MOAB_HAVE_MPI
4422 ParallelComm* pco_intx = context.appDatas[*pid_intx].pcomm;
4423 #endif
4424
4425
4426 TempestMapAppData& tdata = data_intx.tempestData;
4427 if( tdata.remapper != nullptr ) return moab::MB_SUCCESS;
4428
4429 #ifdef MOAB_HAVE_MPI
4430 if( pco_intx )
4431 {
4432 rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
4433 }
4434 #endif
4435
4436 ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
4437 ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
4438
4439
4440 ComputeSphereRadius( pid_src, &radius_source );
4441 ComputeSphereRadius( pid_tgt, &radius_target );
4442
4443 IntxAreaUtils areaAdaptor;
4444
4445 {
4446 moab::Range rintxverts, rintxelems;
4447 rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
4448 rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );MB_CHK_ERR( rval );
4449 rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
4450 rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );MB_CHK_ERR( rval );
4451 #ifdef VERBOSE
4452 std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
4453 << " elements \n";
4454 #endif
4455
4456 moab::Range bintxverts, bintxelems;
4457 rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
4458 rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );MB_CHK_ERR( rval );
4459 rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
4460 rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );MB_CHK_ERR( rval );
4461 #ifdef VERBOSE
4462 std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
4463 << " elements \n";
4464 #endif
4465 }
4466
4467 data_intx.dimension = data_tgt.dimension;
4468
4469
4470 tdata.pid_src = pid_src;
4471 tdata.pid_dest = pid_tgt;
4472 data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
4473 assert( data_intx.point_cloud == true );
4474
4475
4476 #ifdef MOAB_HAVE_MPI
4477 tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
4478 #else
4479 tdata.remapper = new moab::TempestRemapper( context.MBI );
4480 #endif
4481 tdata.remapper->meshValidate = true;
4482 tdata.remapper->constructEdgeMap = true;
4483
4484
4485 tdata.remapper->initialize( false );
4486 tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
4487 tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
4488 tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
4489
4490
4492 if( fabs( radius_source - radius_target ) > 1e-10 )
4493 {
4494 rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );MB_CHK_ERR( rval );
4495 rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );MB_CHK_ERR( rval );
4496 }
4497
4498 rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
4499 rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
4500
4501
4502 rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
4503
4504 #ifdef MOAB_HAVE_MPI
4505
4508
4509
4510 #endif
4511
4512
4513
4514
4515 return moab::MB_SUCCESS;
4516 }
4517
4518 ErrCode iMOAB_ComputeScalarProjectionWeights(
4519 iMOAB_AppID pid_intx,
4520 const iMOAB_String solution_weights_identifier,
4521 const iMOAB_String disc_method_source,
4522 int* disc_order_source,
4523 const iMOAB_String disc_method_target,
4524 int* disc_order_target,
4525 const iMOAB_String fv_method,
4526 int* fNoBubble,
4527 int* fMonotoneTypeID,
4528 int* fVolumetric,
4529 int* fInverseDistanceMap,
4530 int* fNoConservation,
4531 int* fValidate,
4532 const iMOAB_String source_solution_tag_dof_name,
4533 const iMOAB_String target_solution_tag_dof_name )
4534 {
4535 assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
4536 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4537 assert( disc_method_source && strlen( disc_method_source ) );
4538 assert( disc_method_target && strlen( disc_method_target ) );
4539 assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
4540 assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
4541
4542
4543 appData& data_intx = context.appDatas[*pid_intx];
4544 TempestMapAppData& tdata = data_intx.tempestData;
4545
4546
4547
4548 tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
4549
4550
4551 moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
4552 assert( weightMap != nullptr );
4553
4554 GenerateOfflineMapAlgorithmOptions mapOptions;
4555 mapOptions.nPin = *disc_order_source;
4556 mapOptions.nPout = *disc_order_target;
4557 mapOptions.fSourceConcave = false;
4558 mapOptions.fTargetConcave = false;
4559
4560 mapOptions.strMethod = "";
4561 if( fv_method ) mapOptions.strMethod += std::string( fv_method ) + ";";
4562 if( fMonotoneTypeID )
4563 {
4564 switch( *fMonotoneTypeID )
4565 {
4566 case 0:
4567 mapOptions.fMonotone = false;
4568 break;
4569 case 3:
4570 mapOptions.strMethod += "mono3;";
4571 break;
4572 case 2:
4573 mapOptions.strMethod += "mono2;";
4574 break;
4575 default:
4576 mapOptions.fMonotone = true;
4577 }
4578 }
4579 else
4580 mapOptions.fMonotone = false;
4581
4582 mapOptions.fNoBubble = ( fNoBubble ? *fNoBubble : false );
4583 mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
4584 mapOptions.fNoCorrectAreas = false;
4585
4586 mapOptions.fNoCheck = true;
4587 if( fVolumetric && *fVolumetric ) mapOptions.strMethod += "volumetric;";
4588 if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod += "invdist;";
4589
4590 std::string metadataStr = mapOptions.strMethod + ";" + std::string( disc_method_source ) + ":" +
4591 std::to_string( *disc_order_source ) + ":" + std::string( source_solution_tag_dof_name ) +
4592 ";" + std::string( disc_method_target ) + ":" + std::to_string( *disc_order_target ) +
4593 ":" + std::string( target_solution_tag_dof_name );
4594
4595 data_intx.metadataMap[std::string( solution_weights_identifier )] = metadataStr;
4596
4597
4598
4599
4600 MB_CHK_ERR( weightMap->GenerateRemappingWeights(
4601 std::string( disc_method_source ),
4602 std::string( disc_method_target ),
4603 mapOptions,
4604 std::string( source_solution_tag_dof_name ),
4605 std::string( target_solution_tag_dof_name )
4606 ) );
4607
4608
4609 weightMap->PrintMapStatistics();
4610
4611 if( fValidate && *fValidate )
4612 {
4613 const double dNormalTolerance = 1.0E-8;
4614 const double dStrictTolerance = 1.0E-12;
4615 weightMap->CheckMap( true, true, ( fMonotoneTypeID && *fMonotoneTypeID ), dNormalTolerance, dStrictTolerance );
4616 }
4617
4618 return moab::MB_SUCCESS;
4619 }
4620
4621 ErrCode iMOAB_ApplyScalarProjectionWeights(
4622 iMOAB_AppID pid_intersection,
4623 int* filter_type,
4624 const iMOAB_String solution_weights_identifier,
4625 const iMOAB_String source_solution_tag_name,
4626 const iMOAB_String target_solution_tag_name )
4627 {
4628 assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
4629 assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
4630 assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
4631
4632 moab::ErrorCode rval;
4633
4634
4635 appData& data_intx = context.appDatas[*pid_intersection];
4636 TempestMapAppData& tdata = data_intx.tempestData;
4637
4638 if( !tdata.weightMaps.count( std::string( solution_weights_identifier ) ) )
4639 return moab::MB_INDEX_OUT_OF_RANGE;
4640 moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
4641
4642
4643 std::vector< std::string > srcNames;
4644 std::vector< std::string > tgtNames;
4645 std::vector< Tag > srcTagHandles;
4646 std::vector< Tag > tgtTagHandles;
4647 std::string separator( ":" );
4648 std::string src_name( source_solution_tag_name );
4649 std::string tgt_name( target_solution_tag_name );
4650 split_tag_names( src_name, separator, srcNames );
4651 split_tag_names( tgt_name, separator, tgtNames );
4652 if( srcNames.size() != tgtNames.size() )
4653 {
4654 std::cout << " error in parsing source and target tag names. \n";
4655 return moab::MB_FAILURE;
4656 }
4657
4658
4659 for( size_t i = 0; i < srcNames.size(); i++ )
4660 {
4661 Tag tagHandle;
4662
4663 rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
4664 if( MB_SUCCESS != rval || nullptr == tagHandle )
4665 {
4666 return moab::MB_TAG_NOT_FOUND;
4667 }
4668 srcTagHandles.push_back( tagHandle );
4669
4670
4671 rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
4672 if( MB_SUCCESS != rval || nullptr == tagHandle )
4673 {
4674 return moab::MB_TAG_NOT_FOUND;
4675 }
4676 tgtTagHandles.push_back( tagHandle );
4677 }
4678
4679 #ifdef VERBOSE
4680
4681 moab::TempestRemapper* remapper = tdata.remapper;
4682 std::vector< double > solSTagVals;
4683 std::vector< double > solTTagVals;
4684
4685 moab::Range sents, tents;
4686 if( data_intx.point_cloud )
4687 {
4688 appData& data_src = context.appDatas[*tdata.pid_src];
4689 appData& data_tgt = context.appDatas[*tdata.pid_dest];
4690 if( data_src.point_cloud )
4691 {
4692 moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
4693 solSTagVals.resize( covSrcEnts.size(), 0. );
4694 sents = covSrcEnts;
4695 }
4696 else
4697 {
4698 moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
4699 solSTagVals.resize(
4700 covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), 0. );
4701 sents = covSrcEnts;
4702 }
4703 if( data_tgt.point_cloud )
4704 {
4705 moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
4706 solTTagVals.resize( tgtEnts.size(), 0. );
4707 tents = tgtEnts;
4708 }
4709 else
4710 {
4711 moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
4712 solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
4713 weightMap->GetDestinationNDofsPerElement(),
4714 0. );
4715 tents = tgtEnts;
4716 }
4717 }
4718 else
4719 {
4720 moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
4721 moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
4722 solSTagVals.resize(
4723 covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
4724 solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
4725 weightMap->GetDestinationNDofsPerElement(),
4726 0. );
4727
4728 sents = covSrcEnts;
4729 tents = tgtEnts;
4730 }
4731 #endif
4732
4733 moab::TempestOnlineMap::CAASType caasType = moab::TempestOnlineMap::CAAS_NONE;
4734 if( filter_type )
4735 {
4736 switch( *filter_type )
4737 {
4738 case 1:
4739 caasType = moab::TempestOnlineMap::CAAS_GLOBAL;
4740 break;
4741 case 2:
4742 caasType = moab::TempestOnlineMap::CAAS_LOCAL;
4743 break;
4744 case 3:
4745 caasType = moab::TempestOnlineMap::CAAS_LOCAL_ADJACENT;
4746 break;
4747 default:
4748 caasType = moab::TempestOnlineMap::CAAS_NONE;
4749 }
4750 }
4751
4752 for( size_t i = 0; i < srcTagHandles.size(); i++ )
4753 {
4754
4755
4756
4757 rval = weightMap->ApplyWeights( srcTagHandles[i], tgtTagHandles[i], false, caasType );MB_CHK_ERR( rval );
4758 }
4759
4760
4761 #ifdef VERBOSE
4762 ParallelComm* pco_intx = context.appDatas[*pid_intersection].pcomm;
4763
4764 int ivar = 0;
4765 {
4766 Tag ssolnTag = srcTagHandles[ivar];
4767 std::stringstream sstr;
4768 sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
4769 std::ofstream output_file( sstr.str().c_str() );
4770 for( unsigned i = 0; i < sents.size(); ++i )
4771 {
4772 EntityHandle elem = sents[i];
4773 std::vector< double > locsolSTagVals( 16 );
4774 rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );MB_CHK_ERR( rval );
4775 output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
4776 for( unsigned j = 0; j < 16; ++j )
4777 output_file << locsolSTagVals[j] << " ";
4778 }
4779 output_file.flush();
4780 output_file.close();
4781 }
4782 {
4783 std::stringstream sstr;
4784 sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
4785 EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set,
4786 context.appDatas[*tdata.pid_dest].file_set };
4787 rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", sets, 2 );MB_CHK_ERR( rval );
4788 }
4789 {
4790 std::stringstream sstr;
4791 sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
4792
4793 EntityHandle covering_set = remapper->GetCoveringSet();
4794 EntityHandle sets[2] = { covering_set, context.appDatas[*tdata.pid_dest].file_set };
4795 rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", sets, 2 );MB_CHK_ERR( rval );
4796 }
4797 {
4798 std::stringstream sstr;
4799 sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
4800
4801 EntityHandle covering_set = remapper->GetCoveringSet();
4802 rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", &covering_set, 1 );MB_CHK_ERR( rval );
4803 }
4804 {
4805 std::stringstream sstr;
4806 sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
4807
4808 EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
4809 rval = context.MBI->write_file( sstr.str().c_str(), nullptr, "", &target_set, 1 );MB_CHK_ERR( rval );
4810 }
4811 {
4812 std::stringstream sstr;
4813 sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
4814 std::ofstream output_file( sstr.str().c_str() );
4815 for( unsigned i = 0; i < solSTagVals.size(); ++i )
4816 output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
4817 << solSTagVals[i] << "\n";
4818 output_file.flush();
4819 output_file.close();
4820 }
4821 #endif
4822
4823
4824 return moab::MB_SUCCESS;
4825 }
4826
4827 #endif
4828
4829 #ifdef __cplusplus
4830 }
4831 #endif