1 #include "NCHelperDomain.hpp"
2 #include "moab/FileOptions.hpp"
3 #include "moab/ReadUtilIface.hpp"
4 #include "moab/IntxMesh/IntxUtils.hpp"
5 #include "AEntityFactory.hpp"
6 #ifdef MOAB_HAVE_MPI
7 #include "moab/ParallelMergeMesh.hpp"
8 #endif
9 #ifdef MOAB_HAVE_ZOLTAN
10 #include "moab/ZoltanPartitioner.hpp"
11 #include "moab/TupleList.hpp"
12 #endif
13
14 #include <cmath>
15 #include <sstream>
16
17 namespace moab
18 {
19
20 bool NCHelperDomain::can_read_file( ReadNC* readNC, int )
21 {
22 std::vector< std::string >& dimNames = readNC->dimNames;
23
24
25
26 if( ( std::find( dimNames.begin(), dimNames.end(), std::string( "ni" ) ) != dimNames.end() ) &&
27 ( std::find( dimNames.begin(), dimNames.end(), std::string( "nj" ) ) != dimNames.end() ) &&
28 ( std::find( dimNames.begin(), dimNames.end(), std::string( "nv" ) ) != dimNames.end() ) )
29 {
30
31 return true;
32 }
33
34 return false;
35 }
36
37 ErrorCode NCHelperDomain::init_mesh_vals()
38 {
39 Interface*& mbImpl = _readNC->mbImpl;
40 std::vector< std::string >& dimNames = _readNC->dimNames;
41 std::vector< int >& dimLens = _readNC->dimLens;
42 std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
43 DebugOutput& dbgOut = _readNC->dbgOut;
44 #ifdef MOAB_HAVE_MPI
45 bool& isParallel = _readNC->isParallel;
46 #endif
47 int& partMethod = _readNC->partMethod;
48 ScdParData& parData = _readNC->parData;
49
50 ErrorCode rval;
51
52
53
54 std::vector< std::string >::iterator vit;
55 unsigned int idx;
56 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "ni" ) ) != dimNames.end() )
57 idx = vit - dimNames.begin();
58 else
59 {
60 MB_SET_ERR( MB_FAILURE, "Couldn't find 'ni' variable" );
61 }
62 iDim = idx;
63 gDims[0] = 0;
64 gDims[3] = dimLens[idx];
65
66
67 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nj" ) ) != dimNames.end() )
68 idx = vit - dimNames.begin();
69 else
70 {
71 MB_SET_ERR( MB_FAILURE, "Couldn't find 'nj' variable" );
72 }
73 jDim = idx;
74 gDims[1] = 0;
75 gDims[4] = dimLens[idx];
76
77
78
79
80 gDims[2] = -1;
81 gDims[5] = -1;
82
83
84 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nv" ) ) != dimNames.end() )
85 idx = vit - dimNames.begin();
86 else
87 {
88 MB_SET_ERR( MB_FAILURE, "Couldn't find 'nv' dimension" );
89 }
90 nvDim = idx;
91 nv = dimLens[idx];
92
93
94 int rank = 0, procs = 1;
95 #ifdef MOAB_HAVE_MPI
96 if( isParallel )
97 {
98 ParallelComm*& myPcomm = _readNC->myPcomm;
99 rank = myPcomm->proc_config().proc_rank();
100 procs = myPcomm->proc_config().proc_size();
101 }
102 #endif
103 if( procs > 1 )
104 {
105 for( int i = 0; i < 6; i++ )
106 parData.gDims[i] = gDims[i];
107 parData.partMethod = partMethod;
108 int pdims[3];
109
110 locallyPeriodic[0] = locallyPeriodic[1] = locallyPeriodic[2] = 0;
111 rval = ScdInterface::compute_partition( procs, rank, parData, lDims, locallyPeriodic, pdims );MB_CHK_ERR( rval );
112 for( int i = 0; i < 3; i++ )
113 parData.pDims[i] = pdims[i];
114
115 dbgOut.tprintf( 1, "Partition: %dx%d (out of %dx%d)\n", lDims[3] - lDims[0], lDims[4] - lDims[1],
116 gDims[3] - gDims[0], gDims[4] - gDims[1] );
117 if( 0 == rank )
118 dbgOut.tprintf( 1, "Contiguous chunks of size %d bytes.\n",
119 8 * ( lDims[3] - lDims[0] ) * ( lDims[4] - lDims[1] ) );
120 }
121 else
122 {
123 for( int i = 0; i < 6; i++ )
124 lDims[i] = gDims[i];
125 locallyPeriodic[0] = globallyPeriodic[0];
126 }
127
128
129 lCDims[0] = lDims[0];
130
131 lCDims[3] = lDims[3];
132
133
134 lCDims[1] = lDims[1];
135 lCDims[4] = lDims[4];
136
137 #if 0
138
139 if (-1 != lDims[0])
140 ilVals.resize(lDims[3] - lDims[0] + 1);
141 if (-1 != lCDims[0])
142 ilCVals.resize(lCDims[3] - lCDims[0] + 1);
143 if (-1 != lDims[1])
144 jlVals.resize(lDims[4] - lDims[1] + 1);
145 if (-1 != lCDims[1])
146 jlCVals.resize(lCDims[4] - lCDims[1] + 1);
147 if (nTimeSteps > 0)
148 tVals.resize(nTimeSteps);
149 #endif
150
151 dbgOut.tprintf( 1, "I=%d-%d, J=%d-%d\n", lDims[0], lDims[3], lDims[1], lDims[4] );
152 dbgOut.tprintf( 1, "%d elements, %d vertices\n", ( lDims[3] - lDims[0] ) * ( lDims[4] - lDims[1] ),
153 ( lDims[3] - lDims[0] ) * ( lDims[4] - lDims[1] ) * nv );
154
155
156 std::map< std::string, ReadNC::VarData >::iterator mit;
157 for( mit = varInfo.begin(); mit != varInfo.end(); ++mit )
158 {
159 ReadNC::VarData& vd = ( *mit ).second;
160
161
162 if( std::find( vd.varDims.begin(), vd.varDims.end(), tDim ) != vd.varDims.end() )
163 {
164 if( ( std::find( vd.varDims.begin(), vd.varDims.end(), iCDim ) != vd.varDims.end() ) &&
165 ( std::find( vd.varDims.begin(), vd.varDims.end(), jCDim ) != vd.varDims.end() ) )
166 vd.entLoc = ReadNC::ENTLOCFACE;
167 else if( ( std::find( vd.varDims.begin(), vd.varDims.end(), jDim ) != vd.varDims.end() ) &&
168 ( std::find( vd.varDims.begin(), vd.varDims.end(), iCDim ) != vd.varDims.end() ) )
169 vd.entLoc = ReadNC::ENTLOCNSEDGE;
170 else if( ( std::find( vd.varDims.begin(), vd.varDims.end(), jCDim ) != vd.varDims.end() ) &&
171 ( std::find( vd.varDims.begin(), vd.varDims.end(), iDim ) != vd.varDims.end() ) )
172 vd.entLoc = ReadNC::ENTLOCEWEDGE;
173 }
174
175
176 if( std::find( vd.varDims.begin(), vd.varDims.end(), levDim ) != vd.varDims.end() ) vd.numLev = nLevels;
177 }
178
179 std::vector< std::string > ijdimNames( 2 );
180 ijdimNames[0] = "__ni";
181 ijdimNames[1] = "__nj";
182 std::string tag_name;
183 Tag tagh;
184
185
186 for( unsigned int i = 0; i != ijdimNames.size(); i++ )
187 {
188 std::vector< int > val( 2, 0 );
189 if( ijdimNames[i] == "__ni" )
190 {
191 val[0] = lDims[0];
192 val[1] = lDims[3];
193 }
194 else if( ijdimNames[i] == "__nj" )
195 {
196 val[0] = lDims[1];
197 val[1] = lDims[4];
198 }
199
200 std::stringstream ss_tag_name;
201 ss_tag_name << ijdimNames[i] << "_LOC_MINMAX";
202 tag_name = ss_tag_name.str();
203 rval = mbImpl->tag_get_handle( tag_name.c_str(), 2, MB_TYPE_INTEGER, tagh, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating conventional tag " << tag_name );
204 rval = mbImpl->tag_set_data( tagh, &_fileSet, 1, &val[0] );MB_CHK_SET_ERR( rval, "Trouble setting data to conventional tag " << tag_name );
205 if( MB_SUCCESS == rval ) dbgOut.tprintf( 2, "Conventional tag %s is created.\n", tag_name.c_str() );
206 }
207
208
209
210 switch( varInfo["xc"].varDataType )
211 {
212 case NC_FLOAT:
213 case NC_DOUBLE:
214 break;
215 default:
216 MB_SET_ERR( MB_FAILURE, "Unexpected variable data type for 'lon'" );
217 }
218
219
220 Tag convTagsCreated = 0;
221 int def_val = 0;
222 rval = mbImpl->tag_get_handle( "__CONV_TAGS_CREATED", 1, MB_TYPE_INTEGER, convTagsCreated,
223 MB_TAG_SPARSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble getting _CONV_TAGS_CREATED tag" );
224 int create_conv_tags_flag = 1;
225 rval = mbImpl->tag_set_data( convTagsCreated, &_fileSet, 1, &create_conv_tags_flag );MB_CHK_SET_ERR( rval, "Trouble setting _CONV_TAGS_CREATED tag" );
226
227 return MB_SUCCESS;
228 }
229
230 ErrorCode NCHelperDomain::create_mesh( Range& faces )
231 {
232 Interface*& mbImpl = _readNC->mbImpl;
233
234 Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
235
236 DebugOutput& dbgOut = _readNC->dbgOut;
237
238 bool& culling = _readNC->culling;
239 bool& repartition = _readNC->repartition;
240
241 ErrorCode rval;
242 int success = 0;
243
244 int local_elems = ( lDims[4] - lDims[1] ) * ( lDims[3] - lDims[0] );
245 dbgOut.tprintf( 1, "local cells: %d \n", local_elems );
246
247
248
249 std::string maskstr( "mask" );
250 ReadNC::VarData& vmask = _readNC->varInfo[maskstr];
251
252
253 vmask.readStarts.push_back( lDims[1] );
254 vmask.readStarts.push_back( lDims[0] );
255 vmask.readCounts.push_back( lDims[4] - lDims[1] );
256 vmask.readCounts.push_back( lDims[3] - lDims[0] );
257 std::vector< int > mask( local_elems );
258 success = NCFUNCAG( _vara_int )( _fileId, vmask.varId, &vmask.readStarts[0], &vmask.readCounts[0], &mask[0] );
259 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read int data for mask variable " );
260
261 std::vector< int > gids( local_elems );
262 int elem_index = 0;
263 int global_row_size = gDims[3] - gDims[0];
264
265 for( int j = lCDims[1]; j < lCDims[4]; j++ )
266 for( int i = lCDims[0]; i < lCDims[3]; i++ )
267 {
268 gids[elem_index] = j * global_row_size + i + 1;
269 elem_index++;
270 }
271 std::vector< NCDF_SIZE > startsv( 3 );
272 startsv[0] = vmask.readStarts[0];
273 startsv[1] = vmask.readStarts[1];
274 startsv[2] = 0;
275 std::vector< NCDF_SIZE > countsv( 3 );
276 countsv[0] = vmask.readCounts[0];
277 countsv[1] = vmask.readCounts[1];
278 countsv[2] = nv;
279
280
281 std::string xvstr( "xv" );
282 ReadNC::VarData& var_xv = _readNC->varInfo[xvstr];
283 std::vector< double > xv( local_elems * nv );
284
285 std::vector< NCDF_SIZE > starts_vv, counts_vv;
286 starts_vv.resize( 3 );
287 counts_vv.resize( 3 );
288 bool nv_last = true;
289 if( _readNC->dimNames[var_xv.varDims[0]] == std::string( "nv" ) )
290 {
291 starts_vv[0] = 0;
292 starts_vv[1] = startsv[0];
293 starts_vv[2] = startsv[1];
294 counts_vv[0] = nv;
295 counts_vv[1] = countsv[0];
296 counts_vv[2] = countsv[1];
297 nv_last = false;
298 }
299 else
300 {
301 starts_vv = startsv;
302 counts_vv = countsv;
303 }
304 dbgOut.tprintf( 1, " nv is the last dimension in xv array (0 or 1) %d \n", (int)nv_last );
305 success = NCFUNCAG( _vara_double )( _fileId, var_xv.varId, &starts_vv[0], &counts_vv[0], &xv[0] );
306 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for xv variable " );
307
308 std::string yvstr( "yv" );
309 ReadNC::VarData& var_yv = _readNC->varInfo[yvstr];
310 std::vector< double > yv( local_elems * nv );
311 success = NCFUNCAG( _vara_double )( _fileId, var_yv.varId, &starts_vv[0], &counts_vv[0], &yv[0] );
312 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for yv variable " );
313
314
315 std::string xcstr( "xc" );
316 ReadNC::VarData& var_xc = _readNC->varInfo[xcstr];
317 std::vector< double > xc( local_elems );
318 success = NCFUNCAG( _vara_double )( _fileId, var_xc.varId, &vmask.readStarts[0], &vmask.readCounts[0], &xc[0] );
319 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for xc variable " );
320
321 std::string ycstr( "yc" );
322 ReadNC::VarData& var_yc = _readNC->varInfo[ycstr];
323 std::vector< double > yc( local_elems );
324 success = NCFUNCAG( _vara_double )( _fileId, var_yc.varId, &vmask.readStarts[0], &vmask.readCounts[0], &yc[0] );
325 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for yc variable " );
326
327 std::string fracstr( "frac" );
328 ReadNC::VarData& var_frac = _readNC->varInfo[fracstr];
329 std::vector< double > frac( local_elems );
330 if( var_frac.varId >= 0 )
331 {
332 success =
333 NCFUNCAG( _vara_double )( _fileId, var_frac.varId, &vmask.readStarts[0], &vmask.readCounts[0], &frac[0] );
334 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for frac variable " );
335 }
336 std::string areastr( "area" );
337 std::vector< double > area( local_elems );
338 ReadNC::VarData& var_area = _readNC->varInfo[areastr];
339 success = NCFUNCAG( _vara_double )( _fileId, var_area.varId, &vmask.readStarts[0], &vmask.readCounts[0], &area[0] );
340 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for area variable " );
341
342 Tag areaTag, fracTag, xcTag, ycTag;
343 rval = mbImpl->tag_get_handle( "area", 1, MB_TYPE_DOUBLE, areaTag, MB_TAG_DENSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating area tag" );
344 rval = mbImpl->tag_get_handle( "frac", 1, MB_TYPE_DOUBLE, fracTag, MB_TAG_DENSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating frac tag" );
345 rval = mbImpl->tag_get_handle( "xc", 1, MB_TYPE_DOUBLE, xcTag, MB_TAG_DENSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating xc tag" );
346 rval = mbImpl->tag_get_handle( "yc", 1, MB_TYPE_DOUBLE, ycTag, MB_TAG_DENSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating yc tag" );
347
348
349
350 Tag maskTag;
351 int def_val = 1;
352 rval = mbImpl->tag_get_handle( "GRID_IMASK", 1, MB_TYPE_INTEGER, maskTag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble creating GRID_IMASK tag" );
353
354
355
356 #ifdef MOAB_HAVE_MPI
357 int procs = 1;
358 bool& isParallel = _readNC->isParallel;
359 ParallelComm* myPcomm = NULL;
360 if( isParallel )
361 {
362 myPcomm = _readNC->myPcomm;
363 procs = myPcomm->proc_config().proc_size();
364 }
365
366 if( procs >= 2 && repartition )
367 {
368
369 rval = redistribute_cells( myPcomm, xc, yc, xv, yv, frac, mask, area, gids, nv, nv_last );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells" );
370 local_elems = (int)xc.size();
371 dbgOut.tprintf( 1, "local cells after repartition: %d \n", local_elems );
372 }
373
374 #endif
375
376 int nb_with_mask1 = 0;
377 for( int i = 0; i < local_elems; i++ )
378 if( 1 == mask[i] ) nb_with_mask1++;
379 dbgOut.tprintf( 1, "local cells with mask 1: %d \n", nb_with_mask1 );
380
381 EntityHandle* conn_arr;
382 EntityHandle vtx_handle;
383 Range tmp_range;
384
385
386
387 EntityHandle start_cell;
388 EntityType mdb_type = MBVERTEX;
389 if( nv == 3 )
390 mdb_type = MBTRI;
391 else if( nv == 4 )
392 mdb_type = MBQUAD;
393 else if( nv > 4 )
394 mdb_type = MBPOLYGON;
395
396
397 int num_actual_cells = local_elems;
398 if( culling ) num_actual_cells = nb_with_mask1;
399
400 if( nv > 1 && num_actual_cells > 0 )
401 {
402 rval = _readNC->readMeshIface->get_element_connect( num_actual_cells, nv, mdb_type, 0, start_cell, conn_arr );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
403 tmp_range.insert( start_cell, start_cell + num_actual_cells - 1 );
404 }
405
406
407 std::map< Node3D, EntityHandle > vertex_map;
408
409 if( num_actual_cells > 0 )
410 {
411
412
413
414
415 const double pideg = acos( -1.0 ) / 180.0;
416
417 for( elem_index = 0; elem_index < local_elems; elem_index++ )
418 {
419 if( culling && 0 == mask[elem_index] )
420 continue;
421
422 dbgOut.tprintf( 3, "elem index %d \n", elem_index );
423 for( int k = 0; k < nv; k++ )
424 {
425 int index_v_arr = nv * elem_index + k;
426 if( !nv_last ) index_v_arr = k * local_elems + elem_index;
427 double x, y;
428 if( nv > 1 )
429 {
430 x = xv[index_v_arr];
431 y = yv[index_v_arr];
432 double cosphi = cos( pideg * y );
433 double zmult = sin( pideg * y );
434 double xmult = cosphi * cos( x * pideg );
435 double ymult = cosphi * sin( x * pideg );
436 Node3D pt( xmult, ymult, zmult );
437 vertex_map[pt] = 0;
438 dbgOut.tprintf( 3, " %d x=%5.1f y=%5.1f pt: %f %f %f \n", k, x, y, pt.coords[0], pt.coords[1],
439 pt.coords[2] );
440 }
441 else
442 {
443 x = xc[elem_index];
444 y = yc[elem_index];
445 Node3D pt( x, y, 0 );
446 vertex_map[pt] = 0;
447 }
448 }
449 }
450 int nLocalVertices = (int)vertex_map.size();
451 std::vector< double* > arrays;
452 EntityHandle start_vertex;
453 rval = _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
454
455 vtx_handle = start_vertex;
456
457
458 double *x = arrays[0], *y = arrays[1], *z = arrays[2];
459 for( auto i = vertex_map.begin(); i != vertex_map.end(); ++i )
460 {
461 i->second = vtx_handle;
462 ++vtx_handle;
463 *x = i->first.coords[0];
464 ++x;
465 *y = i->first.coords[1];
466 ++y;
467 *z = i->first.coords[2];
468 ++z;
469 }
470
471
472
473
474 int index = 0;
475
476
477 for( elem_index = 0; elem_index < local_elems; elem_index++ )
478 {
479 if( culling && 0 == mask[elem_index] )
480 continue;
481
482 for( int k = 0; k < nv; k++ )
483 {
484 int index_v_arr = nv * elem_index + k;
485 if( !nv_last ) index_v_arr = k * local_elems + elem_index;
486 if( nv > 1 )
487 {
488 double x = xv[index_v_arr];
489 double y = yv[index_v_arr];
490 double cosphi = cos( pideg * y );
491 double zmult = sin( pideg * y );
492 double xmult = cosphi * cos( x * pideg );
493 double ymult = cosphi * sin( x * pideg );
494 Node3D pt( xmult, ymult, zmult );
495 conn_arr[index * nv + k] = vertex_map[pt];
496 }
497 }
498 EntityHandle cell = start_vertex + index;
499 if( nv > 1 ) cell = start_cell + index;
500
501 rval = mbImpl->tag_set_data( xcTag, &cell, 1, &xc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set xc tag" );
502 rval = mbImpl->tag_set_data( ycTag, &cell, 1, &yc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set yc tag" );
503 rval = mbImpl->tag_set_data( areaTag, &cell, 1, &area[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set area tag" );
504 rval = mbImpl->tag_set_data( fracTag, &cell, 1, &frac[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set frac tag" );
505 rval = mbImpl->tag_set_data( maskTag, &cell, 1, &mask[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set mask tag" );
506
507
508 int globalId = gids[elem_index];
509 rval = mbImpl->tag_set_data( mGlobalIdTag, &cell, 1, &globalId );MB_CHK_SET_ERR( rval, "Failed to set global id tag" );
510 index++;
511 }
512
513 rval = mbImpl->add_entities( _fileSet, tmp_range );MB_CHK_SET_ERR( rval, "Failed to add new cells to current file set" );
514
515
516 std::vector< Tag > tagList;
517 tagList.push_back( mGlobalIdTag );
518 tagList.push_back( xcTag );
519 tagList.push_back( ycTag );
520 tagList.push_back( areaTag );
521 tagList.push_back( fracTag );
522 tagList.push_back( maskTag );
523 rval = IntxUtils::remove_padded_vertices( mbImpl, _fileSet, tagList );MB_CHK_SET_ERR( rval, "Failed to remove duplicate vertices" );
524
525 rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_ERR( rval );
526 Range all_verts;
527 rval = mbImpl->get_connectivity( faces, all_verts );MB_CHK_ERR( rval );
528
529 rval = mbImpl->add_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
530
531
532
533 Core* mb = (Core*)mbImpl;
534 AEntityFactory* adj_fact = mb->a_entity_factory();
535 if( !adj_fact->vert_elem_adjacencies() )
536 adj_fact->create_vert_elem_adjacencies();
537 else
538 {
539 for( Range::iterator it = faces.begin(); it != faces.end(); ++it )
540 {
541 EntityHandle eh = *it;
542 const EntityHandle* conn = NULL;
543 int num_nodes = 0;
544 rval = mb->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
545 adj_fact->notify_create_entity( eh, conn, num_nodes );
546 }
547 }
548 }
549
550 #ifdef MOAB_HAVE_MPI
551 myPcomm = _readNC->myPcomm;
552 if( myPcomm && procs >= 2 )
553 {
554 double tol = 1.e-12;
555 ParallelMergeMesh pmm( myPcomm, tol );
556 rval = pmm.merge( _fileSet,
557 false,
558 2 );MB_CHK_SET_ERR( rval, "Failed to merge vertices in parallel" );
559
560 rval = myPcomm->assign_global_ids( _fileSet, 0 );MB_CHK_ERR( rval );
561 }
562 #endif
563
564 return MB_SUCCESS;
565 }
566
567 #ifdef MOAB_HAVE_MPI
568 ErrorCode NCHelperDomain::redistribute_cells( ParallelComm* myPcomm,
569 std::vector< double >& xc,
570 std::vector< double >& yc,
571 std::vector< double >& xv,
572 std::vector< double >& yv,
573 std::vector< double >& frac,
574 std::vector< int >& masks,
575 std::vector< double >& area,
576 std::vector< int >& gids,
577 int nv,
578 bool nv_last )
579 {
580
581 #ifdef MOAB_HAVE_ZOLTAN
582
583 size_t num_local_cells = gids.size();
584 bool& culling = _readNC->culling;
585 if (culling)
586 {
587
588
589 num_local_cells = 0;
590 for (size_t i = 0; i< masks.size(); i++)
591 if (1 == masks[i]) ++num_local_cells;
592 }
593
594 std::vector< double > xi( num_local_cells ), yi( num_local_cells ), zi( num_local_cells );
595 std::vector< int > gids2( num_local_cells );
596 const double pideg = acos( -1.0 ) / 180.0;
597 size_t actual_index = 0;
598 for( size_t i = 0; i < xc.size(); i++ )
599 {
600 if (culling && 0 == masks[i])
601 continue;
602 double x = xc[i];
603 double y = yc[i];
604 double cosphi = cos( pideg * y );
605 double zmult = sin( pideg * y );
606 double xmult = cosphi * cos( x * pideg );
607 double ymult = cosphi * sin( x * pideg );
608 xi[actual_index] = xmult;
609 yi[actual_index] = ymult;
610 zi[actual_index] = zmult;
611 gids2[actual_index] = gids[i];
612 actual_index++;
613 }
614 Interface*& mbImpl = _readNC->mbImpl;
615 ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, myPcomm, false, 0, NULL );
616 std::vector< int > dest( num_local_cells );
617 ErrorCode rval = mbZTool->repartition_to_procs( xi, yi, zi, gids2, "RCB", dest );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
618 delete mbZTool;
619
620 moab::TupleList tl;
621 unsigned numr = 2 * nv + 4;
622 tl.initialize( 3, 0, 0, numr, num_local_cells );
623 tl.enableWriteAccess();
624
625 int index_in_dest = 0;
626 for( size_t i = 0; i < xc.size(); i++ )
627 {
628 if (culling && 0 == masks[i])
629 continue;
630 int gdof = gids2[index_in_dest];
631 int to_proc = dest[index_in_dest];
632 int mask = masks[i];
633 int n = tl.get_n();
634 tl.vi_wr[3 * n] = to_proc;
635 tl.vi_wr[3 * n + 1] = gdof;
636 tl.vi_wr[3 * n + 2] = mask;
637 tl.vr_wr[n * numr] = area[i];
638 tl.vr_wr[n * numr + 1] = xc[i];
639 tl.vr_wr[n * numr + 2] = yc[i];
640 tl.vr_wr[n * numr + 3] = frac[i];
641 for( int k = 0; k < nv; k++ )
642 {
643 int index_v_arr = nv * i + k;
644 if( !nv_last ) index_v_arr = k * xc.size() + n;
645 tl.vr_wr[n * numr + 4 + k] = xv[index_v_arr];
646 tl.vr_wr[n * numr + 4 + nv + k] = yv[index_v_arr];
647 }
648 tl.inc_n();
649 index_in_dest++;
650 }
651
652
653 ( myPcomm->proc_config().crystal_router() )->gs_transfer( 1, tl, 0 );
654
655
656
657 moab::TupleList::buffer sort_buffer;
658 int N = tl.get_n();
659 sort_buffer.buffer_init( N );
660 tl.sort( 1, &sort_buffer );
661 xc.resize( N );
662 yc.resize( N );
663 xv.resize( N * nv );
664 yv.resize( N * nv );
665 frac.resize( N );
666 masks.resize( N );
667 area.resize( N );
668 gids.resize( N );
669 for( int n = 0; n < N; n++ )
670 {
671
672 gids[n] = tl.vi_wr[3 * n + 1];
673 masks[n] = tl.vi_wr[3 * n + 2];
674 area[n] = tl.vr_wr[n * numr];
675 xc[n] = tl.vr_wr[n * numr + 1];
676 yc[n] = tl.vr_wr[n * numr + 2];
677 frac[n] = tl.vr_wr[n * numr + 3];
678 for( int k = 0; k < nv; k++ )
679 {
680 int index_v_arr = nv * n + k;
681 if( !nv_last ) index_v_arr = k * N + n;
682 xv[index_v_arr] = tl.vr_wr[n * numr + 4 + k];
683 yv[index_v_arr] = tl.vr_wr[n * numr + 4 + nv + k];
684 }
685 }
686
687 return MB_SUCCESS;
688 #else
689 MB_CHK_SET_ERR( MB_FAILURE, "need to configure with Zoltan " );
690 #endif
691 }
692
693 #endif
694
695 }