Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
NCHelperMPAS.cpp
Go to the documentation of this file.
1 #include "NCHelperMPAS.hpp"
2 #include "moab/ReadUtilIface.hpp"
3 #include "moab/FileOptions.hpp"
5 #include "MBTagConventions.hpp"
6 
7 #ifdef MOAB_HAVE_ZOLTAN
9 #endif
10 
11 #include <cmath>
12 
13 namespace moab
14 {
15 
16 const int DEFAULT_MAX_EDGES_PER_CELL = 10;
17 
18 NCHelperMPAS::NCHelperMPAS( ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet )
19  : UcdNCHelper( readNC, fileId, opts, fileSet ), maxEdgesPerCell( DEFAULT_MAX_EDGES_PER_CELL ), numCellGroups( 0 ),
20  createGatherSet( false )
21 {
22  // Ignore variables containing topological information
23  ignoredVarNames.insert( "nEdgesOnEdge" );
24  ignoredVarNames.insert( "nEdgesOnCell" );
25  ignoredVarNames.insert( "edgesOnVertex" );
26  ignoredVarNames.insert( "cellsOnVertex" );
27  ignoredVarNames.insert( "verticesOnEdge" );
28  ignoredVarNames.insert( "edgesOnEdge" );
29  ignoredVarNames.insert( "cellsOnEdge" );
30  ignoredVarNames.insert( "verticesOnCell" );
31  ignoredVarNames.insert( "edgesOnCell" );
32  ignoredVarNames.insert( "cellsOnCell" );
33  // ignore variables containing mesh related info, that are currently saved as variable tags on
34  // file set ; if needed, these should be saved as dense tags, and read accordingly, in parallel
35  ignoredVarNames.insert( "weightsOnEdge" );
36  ignoredVarNames.insert( "angleEdge" );
37  ignoredVarNames.insert( "areaCell" );
38  ignoredVarNames.insert( "areaTriangle" );
39  ignoredVarNames.insert( "dcEdge" );
40  ignoredVarNames.insert( "dv1Edge" );
41  ignoredVarNames.insert( "dv2Edge" );
42  ignoredVarNames.insert( "fEdge" );
43  ignoredVarNames.insert( "fVertex" );
44  ignoredVarNames.insert( "h_s" );
45  ignoredVarNames.insert( "kiteAreasOnVertex" );
46  ignoredVarNames.insert( "latCell" );
47  ignoredVarNames.insert( "latEdge" );
48  ignoredVarNames.insert( "latVertex" );
49  ignoredVarNames.insert( "lonCell" );
50  ignoredVarNames.insert( "lonEdge" );
51  ignoredVarNames.insert( "lonVertex" );
52  ignoredVarNames.insert( "meshDensity" );
53  ignoredVarNames.insert( "xCell" );
54  ignoredVarNames.insert( "xEdge" );
55  ignoredVarNames.insert( "xVertex" );
56  ignoredVarNames.insert( "yCell" );
57  ignoredVarNames.insert( "yEdge" );
58  ignoredVarNames.insert( "yVertex" );
59  ignoredVarNames.insert( "zCell" );
60  ignoredVarNames.insert( "zEdge" );
61  ignoredVarNames.insert( "zVertex" );
62  // Ignore variables for index conversion
63  ignoredVarNames.insert( "indexToVertexID" );
64  ignoredVarNames.insert( "indexToEdgeID" );
65  ignoredVarNames.insert( "indexToCellID" );
66 }
67 
69 {
70  std::vector< std::string >& dimNames = readNC->dimNames;
71 
72  // If dimension name "vertexDegree" exists then it should be the MPAS grid
73  if( std::find( dimNames.begin(), dimNames.end(), std::string( "vertexDegree" ) ) != dimNames.end() ) return true;
74 
75  return false;
76 }
77 
79 {
80  std::vector< std::string >& dimNames = _readNC->dimNames;
81  std::vector< int >& dimLens = _readNC->dimLens;
82  std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
83 
84  ErrorCode rval;
85  int idx;
86  std::vector< std::string >::iterator vit;
87 
88  // Get max edges per cell reported in the MPAS file header
89  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "maxEdges" ) ) != dimNames.end() )
90  {
91  idx = vit - dimNames.begin();
92  maxEdgesPerCell = dimLens[idx];
94  {
96  "maxEdgesPerCell read from the MPAS file header has exceeded " << DEFAULT_MAX_EDGES_PER_CELL );
97  }
98  }
99 
100  // Look for time dimension
101  idx = -1;
102  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "Time" ) ) != dimNames.end() )
103  idx = vit - dimNames.begin();
104  else if( ( vit = std::find( dimNames.begin(), dimNames.end(), "time" ) ) != dimNames.end() )
105  idx = vit - dimNames.begin();
106  else
107  {
108  tDim = -1;
109  }
110  if( idx >= 0 )
111  {
112  tDim = idx;
113  nTimeSteps = dimLens[idx];
114  }
115  else
116  nTimeSteps = 0;
117 
118  // Get number of cells
119  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nCells" ) ) != dimNames.end() )
120  idx = vit - dimNames.begin();
121  else
122  {
123  MB_SET_ERR( MB_FAILURE, "Couldn't find 'nCells' dimension" );
124  }
125  cDim = idx;
126  nCells = dimLens[idx];
127 
128  // Get number of edges
129  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nEdges" ) ) != dimNames.end() )
130  idx = vit - dimNames.begin();
131  else
132  {
133  MB_SET_ERR( MB_FAILURE, "Couldn't find 'nEdges' dimension" );
134  }
135  eDim = idx;
136  nEdges = dimLens[idx];
137 
138  // Get number of vertices
139  vDim = -1;
140  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertices" ) ) != dimNames.end() )
141  idx = vit - dimNames.begin();
142  else
143  {
144  MB_SET_ERR( MB_FAILURE, "Couldn't find 'nVertices' dimension" );
145  }
146  vDim = idx;
147  nVertices = dimLens[idx];
148 
149  // Get number of vertex levels
150  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevels" ) ) != dimNames.end() )
151  idx = vit - dimNames.begin();
152  else
153  {
154  std::cerr << "Warning: dimension nVertLevels not found in header.\nThe file may contain "
155  "just the mesh"
156  << std::endl;
157  }
158  levDim = idx;
159  nLevels = dimLens[idx];
160 
161  // Dimension indices for other optional levels
162  std::vector< unsigned int > opt_lev_dims;
163 
164  // Get index of vertex levels P1
165  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevelsP1" ) ) != dimNames.end() )
166  {
167  idx = vit - dimNames.begin();
168  opt_lev_dims.push_back( idx );
169  }
170 
171  // Get index of vertex levels P2
172  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevelsP2" ) ) != dimNames.end() )
173  {
174  idx = vit - dimNames.begin();
175  opt_lev_dims.push_back( idx );
176  }
177 
178  // Get index of soil levels
179  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nSoilLevels" ) ) != dimNames.end() )
180  {
181  idx = vit - dimNames.begin();
182  opt_lev_dims.push_back( idx );
183  }
184 
185  std::map< std::string, ReadNC::VarData >::iterator vmit;
186 
187  // Store time coordinate values in tVals
188  if( nTimeSteps > 0 )
189  {
190  // Note, two possible types for xtime variable: double(Time) or char(Time, StrLen)
191  if( ( vmit = varInfo.find( "xtime" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 )
192  {
193  // If xtime variable is double type, read time coordinate values to tVals
194  rval = read_coordinate( "xtime", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 'xtime' variable" );
195  }
196  else
197  {
198  // If xtime variable does not exist, or it is string type, set dummy values to tVals
199  for( int t = 0; t < nTimeSteps; t++ )
200  tVals.push_back( (double)t );
201  }
202  }
203 
204  // For each variable, determine the entity location type and number of levels
205  for( vmit = varInfo.begin(); vmit != varInfo.end(); ++vmit )
206  {
207  ReadNC::VarData& vd = ( *vmit ).second;
208 
209  // Default entLoc is ENTLOCSET
210  if( std::find( vd.varDims.begin(), vd.varDims.end(), tDim ) != vd.varDims.end() )
211  {
212  if( std::find( vd.varDims.begin(), vd.varDims.end(), vDim ) != vd.varDims.end() )
214  else if( std::find( vd.varDims.begin(), vd.varDims.end(), eDim ) != vd.varDims.end() )
216  else if( std::find( vd.varDims.begin(), vd.varDims.end(), cDim ) != vd.varDims.end() )
218  }
219 
220  // Default numLev is 0
221  if( std::find( vd.varDims.begin(), vd.varDims.end(), levDim ) != vd.varDims.end() )
222  vd.numLev = nLevels;
223  else
224  {
225  // If nVertLevels dimension is not found, try other optional levels such as
226  // nVertLevelsP1
227  for( unsigned int i = 0; i < opt_lev_dims.size(); i++ )
228  {
229  if( std::find( vd.varDims.begin(), vd.varDims.end(), opt_lev_dims[i] ) != vd.varDims.end() )
230  {
231  vd.numLev = dimLens[opt_lev_dims[i]];
232  break;
233  }
234  }
235  }
236 
237  // Hack: ignore variables with more than 3 dimensions, e.g. tracers(Time, nCells,
238  // nVertLevels, nTracers)
239  if( vd.varDims.size() > 3 ) ignoredVarNames.insert( vd.varName );
240  }
241 
242  // Hack: create dummy variables for dimensions (like nCells) with no corresponding coordinate
243  // variables
244  rval = create_dummy_variables();MB_CHK_SET_ERR( rval, "Failed to create dummy variables" );
245 
246  return MB_SUCCESS;
247 }
248 
249 // When noMesh option is used on this read, the old ReadNC class instance for last read can get out
250 // of scope (and deleted). The old instance initialized some variables properly when the mesh was
251 // created, but they are now lost. The new instance (will not create the mesh with noMesh option)
252 // has to restore them based on the existing mesh from last read
254 {
255  Interface*& mbImpl = _readNC->mbImpl;
256  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
257  bool& noMesh = _readNC->noMesh;
258 
259  if( noMesh )
260  {
261  ErrorCode rval;
262 
263  // Restore numCellGroups
264  if( 0 == numCellGroups )
265  {
266  Tag numCellGroupsTag;
267  rval = mbImpl->tag_get_handle( "__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag );MB_CHK_SET_ERR( rval, "Trouble getting __NUM_CELL_GROUPS tag" );
268  if( MB_SUCCESS == rval ) rval = mbImpl->tag_get_data( numCellGroupsTag, &_fileSet, 1, &numCellGroups );MB_CHK_SET_ERR( rval, "Trouble getting data of __NUM_CELL_GROUPS tag" );
269  }
270 
271  if( localGidVerts.empty() )
272  {
273  // Get all vertices from current file set (it is the input set in no_mesh scenario)
274  Range local_verts;
275  rval = mbImpl->get_entities_by_dimension( _fileSet, 0, local_verts );MB_CHK_SET_ERR( rval, "Trouble getting local vertices in current file set" );
276 
277  if( !local_verts.empty() )
278  {
279  std::vector< int > gids( local_verts.size() );
280 
281  // !IMPORTANT : this has to be the GLOBAL_ID tag
282  rval = mbImpl->tag_get_data( mGlobalIdTag, local_verts, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of vertices" );
283 
284  // Restore localGidVerts
285  std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidVerts ) );
287  }
288  }
289 
290  if( localGidEdges.empty() )
291  {
292  // Get all edges from current file set (it is the input set in no_mesh scenario)
293  Range local_edges;
294  rval = mbImpl->get_entities_by_dimension( _fileSet, 1, local_edges );MB_CHK_SET_ERR( rval, "Trouble getting local edges in current file set" );
295 
296  if( !local_edges.empty() )
297  {
298  std::vector< int > gids( local_edges.size() );
299 
300  // !IMPORTANT : this has to be the GLOBAL_ID tag
301  rval = mbImpl->tag_get_data( mGlobalIdTag, local_edges, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of edges" );
302 
303  // Restore localGidEdges
304  std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidEdges ) );
306  }
307  }
308 
309  if( localGidCells.empty() )
310  {
311  // Get all cells from current file set (it is the input set in no_mesh scenario)
312  Range local_cells;
313  rval = mbImpl->get_entities_by_dimension( _fileSet, 2, local_cells );MB_CHK_SET_ERR( rval, "Trouble getting local cells in current file set" );
314 
315  if( !local_cells.empty() )
316  {
317  std::vector< int > gids( local_cells.size() );
318 
319  // !IMPORTANT : this has to be the GLOBAL_ID tag
320  rval = mbImpl->tag_get_data( mGlobalIdTag, local_cells, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of cells" );
321 
322  // Restore localGidCells
323  std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidCells ) );
325 
326  if( numCellGroups > 1 )
327  {
328  // Restore cellHandleToGlobalID map
330  int i;
331  for( rit = local_cells.begin(), i = 0; rit != local_cells.end(); ++rit, i++ )
332  cellHandleToGlobalID[*rit] = gids[i];
333  }
334  }
335  }
336  }
337 
338  return MB_SUCCESS;
339 }
340 
342 {
343  Interface*& mbImpl = _readNC->mbImpl;
344  bool& noMixedElements = _readNC->noMixedElements;
345  bool& noEdges = _readNC->noEdges;
346  DebugOutput& dbgOut = _readNC->dbgOut;
347 
348 #ifdef MOAB_HAVE_MPI
349  int rank = 0;
350  int procs = 1;
351  bool& isParallel = _readNC->isParallel;
352  ParallelComm* myPcomm = NULL;
353  if( isParallel )
354  {
355  myPcomm = _readNC->myPcomm;
356  rank = myPcomm->proc_config().proc_rank();
357  procs = myPcomm->proc_config().proc_size();
358  }
359 
360  // Need to know whether we'll be creating gather mesh
361  if( rank == _readNC->gatherSetRank ) createGatherSet = true;
362 
363  if( procs >= 2 )
364  {
365  // Shift rank to obtain a rotated trivial partition
366  int shifted_rank = rank;
367  int& trivialPartitionShift = _readNC->trivialPartitionShift;
368  if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
369 
370  // Compute the number of local cells on this proc
371  nLocalCells = int( std::floor( 1.0 * nCells / procs ) );
372 
373  // The starting global cell index in the MPAS file for this proc
374  int start_cell_idx = shifted_rank * nLocalCells;
375 
376  // Number of extra cells after equal split over procs
377  int iextra = nCells % procs;
378 
379  // Allocate extra cells over procs
380  if( shifted_rank < iextra ) nLocalCells++;
381  start_cell_idx += std::min( shifted_rank, iextra );
382 
383  start_cell_idx++; // 0 based -> 1 based
384 
385  // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
386  ErrorCode rval = redistribute_local_cells( start_cell_idx, myPcomm );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
387  }
388  else
389  {
392  }
393 #else
396 #endif
397  dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
398  dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
399 
400  // Read number of edges on each local cell, to calculate actual maxEdgesPerCell
401  int nEdgesOnCellVarId;
402  int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
403  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
404  std::vector< int > num_edges_on_local_cells( nLocalCells );
405 #ifdef MOAB_HAVE_PNETCDF
406  size_t nb_reads = localGidCells.psize();
407  std::vector< int > requests( nb_reads );
408  std::vector< int > statuss( nb_reads );
409  size_t idxReq = 0;
410 #endif
411  size_t indexInArray = 0;
412  for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
413  ++pair_iter )
414  {
415  EntityHandle starth = pair_iter->first;
416  EntityHandle endh = pair_iter->second;
417  NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 );
418  NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 );
419 
420  // Do a partial read in each subrange
421 #ifdef MOAB_HAVE_PNETCDF
422  success = NCFUNCREQG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count,
423  &( num_edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
424 #else
425  success = NCFUNCAG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count,
426  &( num_edges_on_local_cells[indexInArray] ) );
427 #endif
428  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data in a loop" );
429 
430  // Increment the index for next subrange
431  indexInArray += ( endh - starth + 1 );
432  }
433 
434 #ifdef MOAB_HAVE_PNETCDF
435  // Wait outside the loop
436  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
437  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
438 #endif
439 
440  // Get local maxEdgesPerCell on this proc
441  int local_max_edges_per_cell =
442  *( std::max_element( num_edges_on_local_cells.begin(), num_edges_on_local_cells.end() ) );
443  maxEdgesPerCell = local_max_edges_per_cell;
444 
445  // If parallel, do a MPI_Allreduce to get global maxEdgesPerCell across all procs
446 #ifdef MOAB_HAVE_MPI
447  if( procs >= 2 )
448  {
449  int global_max_edges_per_cell;
450  ParallelComm*& myPcomm = _readNC->myPcomm;
451  MPI_Allreduce( &local_max_edges_per_cell, &global_max_edges_per_cell, 1, MPI_INT, MPI_MAX,
452  myPcomm->proc_config().proc_comm() );
453  assert( local_max_edges_per_cell <= global_max_edges_per_cell );
454  maxEdgesPerCell = global_max_edges_per_cell;
455  if( 0 == rank ) dbgOut.tprintf( 1, " global_max_edges_per_cell = %d\n", global_max_edges_per_cell );
456  }
457 #endif
458 
459  // Read vertices on each local cell, to get localGidVerts and cell connectivity later
460  int verticesOnCellVarId;
461  success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
462  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
463  std::vector< int > vertices_on_local_cells( nLocalCells * maxEdgesPerCell );
464 #ifdef MOAB_HAVE_PNETCDF
465  idxReq = 0;
466 #endif
467  indexInArray = 0;
468  for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
469  ++pair_iter )
470  {
471  EntityHandle starth = pair_iter->first;
472  EntityHandle endh = pair_iter->second;
473  NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
474  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
475  static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
476 
477  // Do a partial read in each subrange
478 #ifdef MOAB_HAVE_PNETCDF
479  success = NCFUNCREQG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
480  &( vertices_on_local_cells[indexInArray] ), &requests[idxReq++] );
481 #else
482  success = NCFUNCAG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
483  &( vertices_on_local_cells[indexInArray] ) );
484 #endif
485  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data in a loop" );
486 
487  // Increment the index for next subrange
488  indexInArray += ( endh - starth + 1 ) * maxEdgesPerCell;
489  }
490 
491 #ifdef MOAB_HAVE_PNETCDF
492  // Wait outside the loop
493  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
494  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
495 #endif
496 
497  // Correct local cell vertices array, replace the padded vertices with the last vertices
498  // in the corresponding cells; sometimes the padded vertices are 0, sometimes a large
499  // vertex id. Make sure they are consistent to our padded option
500  for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
501  {
502  int num_edges = num_edges_on_local_cells[local_cell_idx];
503  int idx_in_local_vert_arr = local_cell_idx * maxEdgesPerCell;
504  int last_vert_idx = vertices_on_local_cells[idx_in_local_vert_arr + num_edges - 1];
505  for( int i = num_edges; i < maxEdgesPerCell; i++ )
506  vertices_on_local_cells[idx_in_local_vert_arr + i] = last_vert_idx;
507  }
508 
509  // Create local vertices
510  EntityHandle start_vertex;
511  ErrorCode rval = create_local_vertices( vertices_on_local_cells, start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local vertices for MPAS mesh" );
512 
513  // Create local edges (unless NO_EDGES read option is set)
514  if( !noEdges )
515  {
516  rval = create_local_edges( start_vertex, num_edges_on_local_cells );MB_CHK_SET_ERR( rval, "Failed to create local edges for MPAS mesh" );
517  }
518 
519  // Create local cells, either unpadded or padded
520  if( noMixedElements )
521  {
522  rval = create_padded_local_cells( vertices_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create padded local cells for MPAS mesh" );
523  }
524  else
525  {
526  rval = create_local_cells( vertices_on_local_cells, num_edges_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create local cells for MPAS mesh" );
527  }
528 
529  // Set tag for numCellGroups
530  Tag numCellGroupsTag = 0;
531  rval = mbImpl->tag_get_handle( "__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag,
532  MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating __NUM_CELL_GROUPS tag" );
533  rval = mbImpl->tag_set_data( numCellGroupsTag, &_fileSet, 1, &numCellGroups );MB_CHK_SET_ERR( rval, "Trouble setting data to __NUM_CELL_GROUPS tag" );
534 
535  if( createGatherSet )
536  {
537  EntityHandle gather_set;
538  rval = _readNC->readMeshIface->create_gather_set( gather_set );MB_CHK_SET_ERR( rval, "Failed to create gather set" );
539 
540  // Create gather set vertices
541  EntityHandle start_gather_set_vertex;
542  rval = create_gather_set_vertices( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices for MPAS mesh" );
543 
544  // Create gather set edges (unless NO_EDGES read option is set)
545  if( !noEdges )
546  {
547  rval = create_gather_set_edges( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set edges for MPAS mesh" );
548  }
549 
550  // Create gather set cells, either unpadded or padded
551  if( noMixedElements )
552  {
553  rval = create_padded_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create padded gather set cells for MPAS mesh" );
554  }
555  else
556  {
557  rval = create_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set cells for MPAS mesh" );
558  }
559  }
560 
561  return MB_SUCCESS;
562 }
563 
564 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset_allocate( std::vector< ReadNC::VarData >& vdatas,
565  std::vector< int >& tstep_nums )
566 {
567  Interface*& mbImpl = _readNC->mbImpl;
568  std::vector< int >& dimLens = _readNC->dimLens;
569  bool& noEdges = _readNC->noEdges;
570  DebugOutput& dbgOut = _readNC->dbgOut;
571 
572  Range* range = NULL;
573 
574  // Get vertices
575  Range verts;
576  ErrorCode rval = mbImpl->get_entities_by_dimension( _fileSet, 0, verts );MB_CHK_SET_ERR( rval, "Trouble getting vertices in current file set" );
577  assert( "Should only have a single vertex subrange, since they were read in one shot" && verts.psize() == 1 );
578 
579  // Get edges
580  Range edges;
581  rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges );MB_CHK_SET_ERR( rval, "Trouble getting edges in current file set" );
582 
583  // Get faces
584  Range faces;
585  rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_SET_ERR( rval, "Trouble getting faces in current file set" );
586  // Note, for MPAS faces.psize() can be more than 1
587 
588 #ifdef MOAB_HAVE_MPI
589  bool& isParallel = _readNC->isParallel;
590  if( isParallel )
591  {
592  ParallelComm*& myPcomm = _readNC->myPcomm;
593  rval = myPcomm->filter_pstatus( faces, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &facesOwned );MB_CHK_SET_ERR( rval, "Trouble getting owned faces in current file set" );
594  }
595  else
596  facesOwned = faces; // not running in parallel, but still with MPI
597 #else
598  facesOwned = faces;
599 #endif
600 
601  for( unsigned int i = 0; i < vdatas.size(); i++ )
602  {
603  // Skip edge variables, if specified by the read options
604  if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
605 
606  // Support non-set variables with 3 dimensions like (Time, nCells, nVertLevels), or
607  // 2 dimensions like (Time, nCells)
608  assert( 3 == vdatas[i].varDims.size() || 2 == vdatas[i].varDims.size() );
609 
610  // For a non-set variable, time should be the first dimension
611  assert( tDim == vdatas[i].varDims[0] );
612 
613  // Set up readStarts and readCounts
614  vdatas[i].readStarts.resize( 3 );
615  vdatas[i].readCounts.resize( 3 );
616 
617  // First: Time
618  vdatas[i].readStarts[0] = 0; // This value is timestep dependent, will be set later
619  vdatas[i].readCounts[0] = 1;
620 
621  // Next: nVertices / nCells / nEdges
622  switch( vdatas[i].entLoc )
623  {
624  case ReadNC::ENTLOCVERT:
625  // Vertices
626  // Start from the first localGidVerts
627  // Actually, this will be reset later on in a loop
628  vdatas[i].readStarts[1] = localGidVerts[0] - 1;
629  vdatas[i].readCounts[1] = nLocalVertices;
630  range = &verts;
631  break;
632  case ReadNC::ENTLOCFACE:
633  // Faces
634  // Start from the first localGidCells
635  // Actually, this will be reset later on in a loop
636  vdatas[i].readStarts[1] = localGidCells[0] - 1;
637  vdatas[i].readCounts[1] = nLocalCells;
638  range = &facesOwned;
639  break;
640  case ReadNC::ENTLOCEDGE:
641  // Edges
642  // Start from the first localGidEdges
643  // Actually, this will be reset later on in a loop
644  vdatas[i].readStarts[1] = localGidEdges[0] - 1;
645  vdatas[i].readCounts[1] = nLocalEdges;
646  range = &edges;
647  break;
648  default:
649  MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
650  }
651 
652  // Finally: nVertLevels or other optional levels, it is possible that there is no
653  // level dimension (numLev is 0) for this non-set variable, e.g. (Time, nCells)
654  if( vdatas[i].numLev < 1 ) vdatas[i].numLev = 1;
655  vdatas[i].readStarts[2] = 0;
656  vdatas[i].readCounts[2] = vdatas[i].numLev;
657 
658  // Get variable size
659  vdatas[i].sz = 1;
660  for( std::size_t idx = 0; idx != 3; idx++ )
661  vdatas[i].sz *= vdatas[i].readCounts[idx];
662 
663  for( unsigned int t = 0; t < tstep_nums.size(); t++ )
664  {
665  dbgOut.tprintf( 2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t] );
666 
667  if( tstep_nums[t] >= dimLens[tDim] )
668  {
669  MB_SET_ERR( MB_INDEX_OUT_OF_RANGE, "Wrong value for timestep number " << tstep_nums[t] );
670  }
671 
672  // Get the tag to read into
673  if( !vdatas[i].varTags[t] )
674  {
675  rval = get_tag_to_nonset( vdatas[i], tstep_nums[t], vdatas[i].varTags[t], vdatas[i].numLev );MB_CHK_SET_ERR( rval, "Trouble getting tag for variable " << vdatas[i].varName );
676  }
677 
678  // Get ptr to tag space
679  if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
680  {
681  // For a cell variable that is NOT on one contiguous chunk of faces, defer its tag
682  // space allocation
683  vdatas[i].varDatas[t] = NULL;
684  }
685  else
686  {
687  assert( 1 == range->psize() );
688  void* data;
689  int count;
690  rval = mbImpl->tag_iterate( vdatas[i].varTags[t], range->begin(), range->end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate tag for variable " << vdatas[i].varName );
691  assert( (unsigned)count == range->size() );
692  vdatas[i].varDatas[t] = data;
693  }
694  }
695  }
696 
697  return rval;
698 }
699 
700 #ifdef MOAB_HAVE_PNETCDF
701 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset_async( std::vector< ReadNC::VarData >& vdatas,
702  std::vector< int >& tstep_nums )
703 {
704  Interface*& mbImpl = _readNC->mbImpl;
705  bool& noEdges = _readNC->noEdges;
706  DebugOutput& dbgOut = _readNC->dbgOut;
707 
708  ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
709 
710  // Finally, read into that space
711  int success;
712  Range* pLocalGid = NULL;
713 
714  for( unsigned int i = 0; i < vdatas.size(); i++ )
715  {
716  // Skip edge variables, if specified by the read options
717  if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
718 
719  switch( vdatas[i].entLoc )
720  {
721  case ReadNC::ENTLOCVERT:
722  pLocalGid = &localGidVerts;
723  break;
724  case ReadNC::ENTLOCFACE:
725  pLocalGid = &localGidCells;
726  break;
727  case ReadNC::ENTLOCEDGE:
728  pLocalGid = &localGidEdges;
729  break;
730  default:
731  MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
732  }
733 
734  std::size_t sz = vdatas[i].sz;
735 
736  for( unsigned int t = 0; t < tstep_nums.size(); t++ )
737  {
738  // We will synchronize all these reads with the other processors,
739  // so the wait will be inside this double loop; is it too much?
740  size_t nb_reads = pLocalGid->psize();
741  std::vector< int > requests( nb_reads ), statuss( nb_reads );
742  size_t idxReq = 0;
743 
744  // Set readStart for each timestep along time dimension
745  vdatas[i].readStarts[0] = tstep_nums[t];
746 
747  switch( vdatas[i].varDataType )
748  {
749  case NC_FLOAT:
750  case NC_DOUBLE: {
751  // Read float as double
752  std::vector< double > tmpdoubledata( sz );
753 
754  // In the case of ucd mesh, and on multiple proc,
755  // we need to read as many times as subranges we have in the
756  // localGid range;
757  // basically, we have to give a different point
758  // for data to start, for every subrange :(
759  size_t indexInDoubleArray = 0;
760  size_t ic = 0;
761  for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
762  ++pair_iter, ic++ )
763  {
764  EntityHandle starth = pair_iter->first;
765  EntityHandle endh = pair_iter->second; // Inclusive
766  vdatas[i].readStarts[1] = (NCDF_SIZE)( starth - 1 );
767  vdatas[i].readCounts[1] = (NCDF_SIZE)( endh - starth + 1 );
768 
769  // Do a partial read, in each subrange
770  // Wait outside this loop
771  success =
772  NCFUNCREQG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
773  &( vdatas[i].readCounts[0] ),
774  &( tmpdoubledata[indexInDoubleArray] ), &requests[idxReq++] );
775  if( success )
776  MB_SET_ERR( MB_FAILURE,
777  "Failed to read double data in a loop for variable " << vdatas[i].varName );
778  // We need to increment the index in double array for the
779  // next subrange
780  indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
781  }
782  assert( ic == pLocalGid->psize() );
783 
784  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
785  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
786 
787  if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
788  {
789  // For a cell variable that is NOT on one contiguous chunk of faces,
790  // allocate tag space for each cell group, and utilize cellHandleToGlobalID
791  // map to read tag data
793  while( iter != facesOwned.end() )
794  {
795  int count;
796  void* ptr;
797  rval = mbImpl->tag_iterate( vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr );MB_CHK_SET_ERR( rval, "Failed to iterate tag on owned faces" );
798 
799  for( int j = 0; j < count; j++ )
800  {
801  int global_cell_idx =
802  cellHandleToGlobalID[*( iter + j )]; // Global cell index, 1 based
803  int local_cell_idx =
804  localGidCells.index( global_cell_idx ); // Local cell index, 0 based
805  assert( local_cell_idx != -1 );
806  for( int level = 0; level < vdatas[i].numLev; level++ )
807  ( (double*)ptr )[j * vdatas[i].numLev + level] =
808  tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
809  }
810 
811  iter += count;
812  }
813  }
814  else
815  {
816  void* data = vdatas[i].varDatas[t];
817  for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
818  ( (double*)data )[idx] = tmpdoubledata[idx];
819  }
820 
821  break;
822  }
823  default:
824  MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
825  }
826  }
827  }
828 
829  // Debug output, if requested
830  if( 1 == dbgOut.get_verbosity() )
831  {
832  dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
833  for( unsigned int i = 1; i < vdatas.size(); i++ )
834  dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
835  dbgOut.tprintf( 1, "\n" );
836  }
837 
838  return rval;
839 }
840 #else
841 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset( std::vector< ReadNC::VarData >& vdatas,
842  std::vector< int >& tstep_nums )
843 {
844  Interface*& mbImpl = _readNC->mbImpl;
845  bool& noEdges = _readNC->noEdges;
846  DebugOutput& dbgOut = _readNC->dbgOut;
847 
848  ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
849 
850  // Finally, read into that space
851  int success;
852  Range* pLocalGid = NULL;
853 
854  for( unsigned int i = 0; i < vdatas.size(); i++ )
855  {
856  // Skip edge variables, if specified by the read options
857  if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
858 
859  switch( vdatas[i].entLoc )
860  {
861  case ReadNC::ENTLOCVERT:
862  pLocalGid = &localGidVerts;
863  break;
864  case ReadNC::ENTLOCFACE:
865  pLocalGid = &localGidCells;
866  break;
867  case ReadNC::ENTLOCEDGE:
868  pLocalGid = &localGidEdges;
869  break;
870  default:
871  MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
872  }
873 
874  std::size_t sz = vdatas[i].sz;
875 
876  for( unsigned int t = 0; t < tstep_nums.size(); t++ )
877  {
878  // Set readStart for each timestep along time dimension
879  vdatas[i].readStarts[0] = tstep_nums[t];
880 
881  switch( vdatas[i].varDataType )
882  {
883  case NC_FLOAT:
884  case NC_DOUBLE: {
885  // Read float as double
886  std::vector< double > tmpdoubledata( sz );
887 
888  // In the case of ucd mesh, and on multiple proc,
889  // we need to read as many times as subranges we have in the
890  // localGid range;
891  // basically, we have to give a different point
892  // for data to start, for every subrange :(
893  size_t indexInDoubleArray = 0;
894  size_t ic = 0;
895  for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
896  ++pair_iter, ic++ )
897  {
898  EntityHandle starth = pair_iter->first;
899  EntityHandle endh = pair_iter->second; // Inclusive
900  vdatas[i].readStarts[1] = (NCDF_SIZE)( starth - 1 );
901  vdatas[i].readCounts[1] = (NCDF_SIZE)( endh - starth + 1 );
902 
903  success = NCFUNCAG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
904  &( vdatas[i].readCounts[0] ),
905  &( tmpdoubledata[indexInDoubleArray] ) );
906  if( success )
907  MB_SET_ERR( MB_FAILURE,
908  "Failed to read double data in a loop for variable " << vdatas[i].varName );
909  // We need to increment the index in double array for the
910  // next subrange
911  indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
912  }
913  assert( ic == pLocalGid->psize() );
914 
915  if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
916  {
917  // For a cell variable that is NOT on one contiguous chunk of faces,
918  // allocate tag space for each cell group, and utilize cellHandleToGlobalID
919  // map to read tag data
921  while( iter != facesOwned.end() )
922  {
923  int count;
924  void* ptr;
925  rval = mbImpl->tag_iterate( vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr );MB_CHK_SET_ERR( rval, "Failed to iterate tag on owned faces" );
926 
927  for( int j = 0; j < count; j++ )
928  {
929  int global_cell_idx =
930  cellHandleToGlobalID[*( iter + j )]; // Global cell index, 1 based
931  int local_cell_idx =
932  localGidCells.index( global_cell_idx ); // Local cell index, 0 based
933  assert( local_cell_idx != -1 );
934  for( int level = 0; level < vdatas[i].numLev; level++ )
935  ( (double*)ptr )[j * vdatas[i].numLev + level] =
936  tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
937  }
938 
939  iter += count;
940  }
941  }
942  else
943  {
944  void* data = vdatas[i].varDatas[t];
945  for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
946  ( (double*)data )[idx] = tmpdoubledata[idx];
947  }
948 
949  break;
950  }
951  default:
952  MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
953  }
954  }
955  }
956 
957  // Debug output, if requested
958  if( 1 == dbgOut.get_verbosity() )
959  {
960  dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
961  for( unsigned int i = 1; i < vdatas.size(); i++ )
962  dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
963  dbgOut.tprintf( 1, "\n" );
964  }
965 
966  return rval;
967 }
968 #endif
969 
970 #ifdef MOAB_HAVE_MPI
971 ErrorCode NCHelperMPAS::redistribute_local_cells( int start_cell_idx, ParallelComm* pco )
972 {
973  // If possible, apply Zoltan partition
974 #ifdef MOAB_HAVE_ZOLTAN
976  {
977  // Read x coordinates of cell centers
978  int xCellVarId;
979  int success = NCFUNC( inq_varid )( _fileId, "xCell", &xCellVarId );
980  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xCell" );
981  std::vector< double > xCell( nLocalCells );
982  NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
983  NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
984  success = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xCell[0] );
985  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xCell data" );
986 
987  // Read y coordinates of cell centers
988  int yCellVarId;
989  success = NCFUNC( inq_varid )( _fileId, "yCell", &yCellVarId );
990  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yCell" );
991  std::vector< double > yCell( nLocalCells );
992  success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yCell[0] );
993  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yCell data" );
994 
995  // Read z coordinates of cell centers
996  int zCellVarId;
997  success = NCFUNC( inq_varid )( _fileId, "zCell", &zCellVarId );
998  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zCell" );
999  std::vector< double > zCell( nLocalCells );
1000  success = NCFUNCAG( _vara_double )( _fileId, zCellVarId, &read_start, &read_count, &zCell[0] );
1001  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zCell data" );
1002 
1003  // Zoltan partition using RCB; maybe more studies would be good, as to which partition
1004  // is better
1005  Interface*& mbImpl = _readNC->mbImpl;
1006  DebugOutput& dbgOut = _readNC->dbgOut;
1007  ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, pco, false, 0, NULL );
1008  ErrorCode rval = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
1009  delete mbZTool;
1010 
1011  dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
1012  dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
1013 
1014  // This is important: local cells are now redistributed, so nLocalCells might be different!
1016 
1017  return MB_SUCCESS;
1018  }
1019 #endif
1020 
1021  // By default, apply trivial partition
1022  localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
1023 
1024  return MB_SUCCESS;
1025 }
1026 #endif
1027 
1028 ErrorCode NCHelperMPAS::create_local_vertices( const std::vector< int >& vertices_on_local_cells,
1029  EntityHandle& start_vertex )
1030 {
1031  Interface*& mbImpl = _readNC->mbImpl;
1032  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
1033  const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
1034  DebugOutput& dbgOut = _readNC->dbgOut;
1035 
1036  // Make a copy of vertices_on_local_cells for sorting (keep original one to set cell
1037  // connectivity later)
1038  std::vector< int > vertices_on_local_cells_sorted( vertices_on_local_cells );
1039  std::sort( vertices_on_local_cells_sorted.begin(), vertices_on_local_cells_sorted.end() );
1040  std::copy( vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(),
1043 
1044  dbgOut.tprintf( 1, " localGidVerts.psize() = %d\n", (int)localGidVerts.psize() );
1045  dbgOut.tprintf( 1, " localGidVerts.size() = %d\n", (int)localGidVerts.size() );
1046 
1047  // Create local vertices
1048  std::vector< double* > arrays;
1049  ErrorCode rval =
1050  _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays,
1051  // Might have to create gather mesh later
1052  ( createGatherSet ? nLocalVertices + nVertices : nLocalVertices ) );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
1053 
1054  // Add local vertices to current file set
1055  Range local_verts_range( start_vertex, start_vertex + nLocalVertices - 1 );
1056  rval = _readNC->mbImpl->add_entities( _fileSet, local_verts_range );MB_CHK_SET_ERR( rval, "Failed to add local vertices to current file set" );
1057 
1058  // Get ptr to GID memory for local vertices
1059  int count = 0;
1060  void* data = NULL;
1061  rval = mbImpl->tag_iterate( mGlobalIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local vertices" );
1062  assert( count == nLocalVertices );
1063  int* gid_data = (int*)data;
1064  std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
1065 
1066  // Duplicate GID data, which will be used to resolve sharing
1067  if( mpFileIdTag )
1068  {
1069  rval = mbImpl->tag_iterate( *mpFileIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on local vertices" );
1070  assert( count == nLocalVertices );
1071  int bytes_per_tag = 4;
1072  rval = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
1073  if( 4 == bytes_per_tag )
1074  {
1075  gid_data = (int*)data;
1076  std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
1077  }
1078  else if( 8 == bytes_per_tag )
1079  { // Should be a handle tag on 64 bit machine?
1080  long* handle_tag_data = (long*)data;
1081  std::copy( localGidVerts.begin(), localGidVerts.end(), handle_tag_data );
1082  }
1083  }
1084 
1085 #ifdef MOAB_HAVE_PNETCDF
1086  size_t nb_reads = localGidVerts.psize();
1087  std::vector< int > requests( nb_reads );
1088  std::vector< int > statuss( nb_reads );
1089  size_t idxReq = 0;
1090 #endif
1091 
1092  // Read x coordinates for local vertices
1093  double* xptr = arrays[0];
1094  int xVertexVarId;
1095  int success = NCFUNC( inq_varid )( _fileId, "xVertex", &xVertexVarId );
1096  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xVertex" );
1097  size_t indexInArray = 0;
1098  for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
1099  ++pair_iter )
1100  {
1101  EntityHandle starth = pair_iter->first;
1102  EntityHandle endh = pair_iter->second;
1103  NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 );
1104  NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 );
1105 
1106  // Do a partial read in each subrange
1107 #ifdef MOAB_HAVE_PNETCDF
1108  success = NCFUNCREQG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ),
1109  &requests[idxReq++] );
1110 #else
1111  success = NCFUNCAG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ) );
1112 #endif
1113  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data in a loop" );
1114 
1115  // Increment the index for next subrange
1116  indexInArray += ( endh - starth + 1 );
1117  }
1118 
1119 #ifdef MOAB_HAVE_PNETCDF
1120  // Wait outside the loop
1121  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
1122  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
1123 #endif
1124 
1125  // Read y coordinates for local vertices
1126  double* yptr = arrays[1];
1127  int yVertexVarId;
1128  success = NCFUNC( inq_varid )( _fileId, "yVertex", &yVertexVarId );
1129  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yVertex" );
1130 #ifdef MOAB_HAVE_PNETCDF
1131  idxReq = 0;
1132 #endif
1133  indexInArray = 0;
1134  for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
1135  ++pair_iter )
1136  {
1137  EntityHandle starth = pair_iter->first;
1138  EntityHandle endh = pair_iter->second;
1139  NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 );
1140  NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 );
1141 
1142  // Do a partial read in each subrange
1143 #ifdef MOAB_HAVE_PNETCDF
1144  success = NCFUNCREQG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ),
1145  &requests[idxReq++] );
1146 #else
1147  success = NCFUNCAG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ) );
1148 #endif
1149  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data in a loop" );
1150 
1151  // Increment the index for next subrange
1152  indexInArray += ( endh - starth + 1 );
1153  }
1154 
1155 #ifdef MOAB_HAVE_PNETCDF
1156  // Wait outside the loop
1157  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
1158  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
1159 #endif
1160 
1161  // Read z coordinates for local vertices
1162  double* zptr = arrays[2];
1163  int zVertexVarId;
1164  success = NCFUNC( inq_varid )( _fileId, "zVertex", &zVertexVarId );
1165  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zVertex" );
1166 #ifdef MOAB_HAVE_PNETCDF
1167  idxReq = 0;
1168 #endif
1169  indexInArray = 0;
1170  for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
1171  ++pair_iter )
1172  {
1173  EntityHandle starth = pair_iter->first;
1174  EntityHandle endh = pair_iter->second;
1175  NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 );
1176  NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 );
1177 
1178  // Do a partial read in each subrange
1179 #ifdef MOAB_HAVE_PNETCDF
1180  success = NCFUNCREQG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, &( zptr[indexInArray] ),
1181  &requests[idxReq++] );
1182 #else
1183  success = NCFUNCAG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, &( zptr[indexInArray] ) );
1184 #endif
1185  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data in a loop" );
1186 
1187  // Increment the index for next subrange
1188  indexInArray += ( endh - starth + 1 );
1189  }
1190 
1191 #ifdef MOAB_HAVE_PNETCDF
1192  // Wait outside the loop
1193  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
1194  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
1195 #endif
1196 
1197  return MB_SUCCESS;
1198 }
1199 
1201  const std::vector< int >& num_edges_on_local_cells )
1202 {
1203  Interface*& mbImpl = _readNC->mbImpl;
1204  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
1205  DebugOutput& dbgOut = _readNC->dbgOut;
1206 
1207  // Read edges on each local cell, to get localGidEdges
1208  int edgesOnCellVarId;
1209  int success = NCFUNC( inq_varid )( _fileId, "edgesOnCell", &edgesOnCellVarId );
1210  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edgesOnCell" );
1211 
1212  std::vector< int > edges_on_local_cells( nLocalCells * maxEdgesPerCell );
1213  dbgOut.tprintf( 1, " edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size() );
1214 
1215 #ifdef MOAB_HAVE_PNETCDF
1216  size_t nb_reads = localGidCells.psize();
1217  std::vector< int > requests( nb_reads );
1218  std::vector< int > statuss( nb_reads );
1219  size_t idxReq = 0;
1220 #endif
1221  size_t indexInArray = 0;
1222  for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
1223  ++pair_iter )
1224  {
1225  EntityHandle starth = pair_iter->first;
1226  EntityHandle endh = pair_iter->second;
1227  NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
1228  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
1229  static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
1230 
1231  // Do a partial read in each subrange
1232 #ifdef MOAB_HAVE_PNETCDF
1233  success = NCFUNCREQG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
1234  &( edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
1235 #else
1236  success = NCFUNCAG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
1237  &( edges_on_local_cells[indexInArray] ) );
1238 #endif
1239  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edgesOnCell data in a loop" );
1240 
1241  // Increment the index for next subrange
1242  indexInArray += ( endh - starth + 1 ) * maxEdgesPerCell;
1243  }
1244 
1245 #ifdef MOAB_HAVE_PNETCDF
1246  // Wait outside the loop
1247  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
1248  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
1249 #endif
1250 
1251  // Correct local cell edges array in the same way as local cell vertices array, replace the
1252  // padded edges with the last edges in the corresponding cells
1253  for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
1254  {
1255  int num_edges = num_edges_on_local_cells[local_cell_idx];
1256  int idx_in_local_edge_arr = local_cell_idx * maxEdgesPerCell;
1257  int last_edge_idx = edges_on_local_cells[idx_in_local_edge_arr + num_edges - 1];
1258  for( int i = num_edges; i < maxEdgesPerCell; i++ )
1259  edges_on_local_cells[idx_in_local_edge_arr + i] = last_edge_idx;
1260  }
1261 
1262  // Collect local edges
1263  std::sort( edges_on_local_cells.begin(), edges_on_local_cells.end() );
1264  std::copy( edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter( localGidEdges ) );
1266 
1267  dbgOut.tprintf( 1, " localGidEdges.psize() = %d\n", (int)localGidEdges.psize() );
1268  dbgOut.tprintf( 1, " localGidEdges.size() = %d\n", (int)localGidEdges.size() );
1269 
1270  // Create local edges
1271  EntityHandle start_edge;
1272  EntityHandle* conn_arr_edges = NULL;
1273  ErrorCode rval =
1274  _readNC->readMeshIface->get_element_connect( nLocalEdges, 2, MBEDGE, 0, start_edge, conn_arr_edges,
1275  // Might have to create gather mesh later
1276  ( createGatherSet ? nLocalEdges + nEdges : nLocalEdges ) );MB_CHK_SET_ERR( rval, "Failed to create local edges" );
1277 
1278  // Add local edges to current file set
1279  Range local_edges_range( start_edge, start_edge + nLocalEdges - 1 );
1280  rval = _readNC->mbImpl->add_entities( _fileSet, local_edges_range );MB_CHK_SET_ERR( rval, "Failed to add local edges to current file set" );
1281 
1282  // Get ptr to GID memory for edges
1283  int count = 0;
1284  void* data = NULL;
1285  rval = mbImpl->tag_iterate( mGlobalIdTag, local_edges_range.begin(), local_edges_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local edges" );
1286  assert( count == nLocalEdges );
1287  int* gid_data = (int*)data;
1288  std::copy( localGidEdges.begin(), localGidEdges.end(), gid_data );
1289 
1290  int verticesOnEdgeVarId;
1291 
1292  // Read vertices on each local edge, to get edge connectivity
1293  success = NCFUNC( inq_varid )( _fileId, "verticesOnEdge", &verticesOnEdgeVarId );
1294  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnEdge" );
1295  // Utilize the memory storage pointed by conn_arr_edges
1296  int* vertices_on_local_edges = (int*)conn_arr_edges;
1297 #ifdef MOAB_HAVE_PNETCDF
1298  nb_reads = localGidEdges.psize();
1299  requests.resize( nb_reads );
1300  statuss.resize( nb_reads );
1301  idxReq = 0;
1302 #endif
1303  indexInArray = 0;
1304  for( Range::pair_iterator pair_iter = localGidEdges.pair_begin(); pair_iter != localGidEdges.pair_end();
1305  ++pair_iter )
1306  {
1307  EntityHandle starth = pair_iter->first;
1308  EntityHandle endh = pair_iter->second;
1309  NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
1310  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ), 2 };
1311 
1312  // Do a partial read in each subrange
1313 #ifdef MOAB_HAVE_PNETCDF
1314  success = NCFUNCREQG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
1315  &( vertices_on_local_edges[indexInArray] ), &requests[idxReq++] );
1316 #else
1317  success = NCFUNCAG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
1318  &( vertices_on_local_edges[indexInArray] ) );
1319 #endif
1320  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data in a loop" );
1321 
1322  // Increment the index for next subrange
1323  indexInArray += ( endh - starth + 1 ) * 2;
1324  }
1325 
1326 #ifdef MOAB_HAVE_PNETCDF
1327  // Wait outside the loop
1328  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
1329  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
1330 #endif
1331 
1332  // Populate connectivity data for local edges
1333  // Convert in-place from int (stored in the first half) to EntityHandle
1334  // Reading backward is the trick
1335  for( int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
1336  {
1337  int global_vert_idx = vertices_on_local_edges[edge_vert]; // Global vertex index, 1 based
1338  int local_vert_idx = localGidVerts.index( global_vert_idx ); // Local vertex index, 0 based
1339  assert( local_vert_idx != -1 );
1340  conn_arr_edges[edge_vert] = start_vertex + local_vert_idx;
1341  }
1342 
1343  return MB_SUCCESS;
1344 }
1345 
1346 ErrorCode NCHelperMPAS::create_local_cells( const std::vector< int >& vertices_on_local_cells,
1347  const std::vector< int >& num_edges_on_local_cells,
1348  EntityHandle start_vertex,
1349  Range& faces )
1350 {
1351  Interface*& mbImpl = _readNC->mbImpl;
1352  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
1353 
1354  // Divide local cells into groups based on the number of edges
1355  Range local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
1356  // Insert larger values before smaller ones to increase efficiency
1357  for( int i = nLocalCells - 1; i >= 0; i-- )
1358  {
1359  int num_edges = num_edges_on_local_cells[i];
1360  local_cells_with_n_edges[num_edges].insert( localGidCells[i] ); // Global cell index
1361  }
1362 
1363  std::vector< int > num_edges_on_cell_groups;
1364  for( int i = 3; i <= maxEdgesPerCell; i++ )
1365  {
1366  if( local_cells_with_n_edges[i].size() > 0 ) num_edges_on_cell_groups.push_back( i );
1367  }
1368  numCellGroups = num_edges_on_cell_groups.size();
1369 
1370  EntityHandle* conn_arr_local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
1371  for( int i = 0; i < numCellGroups; i++ )
1372  {
1373  int num_edges_per_cell = num_edges_on_cell_groups[i];
1374  int num_group_cells = (int)local_cells_with_n_edges[num_edges_per_cell].size();
1375 
1376  // Create local cells for each non-empty cell group
1377  EntityHandle start_element;
1379  num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
1380  conn_arr_local_cells_with_n_edges[num_edges_per_cell], num_group_cells );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
1381  faces.insert( start_element, start_element + num_group_cells - 1 );
1382 
1383  // Add local cells to current file set
1384  Range local_cells_range( start_element, start_element + num_group_cells - 1 );
1385  rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
1386 
1387  // Get ptr to gid memory for local cells
1388  int count = 0;
1389  void* data = NULL;
1390  rval = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
1391  assert( count == num_group_cells );
1392  int* gid_data = (int*)data;
1393  std::copy( local_cells_with_n_edges[num_edges_per_cell].begin(),
1394  local_cells_with_n_edges[num_edges_per_cell].end(), gid_data );
1395 
1396  // Set connectivity array with proper local vertices handles
1397  for( int j = 0; j < num_group_cells; j++ )
1398  {
1399  EntityHandle global_cell_idx =
1400  local_cells_with_n_edges[num_edges_per_cell][j]; // Global cell index, 1 based
1401  int local_cell_idx = localGidCells.index( global_cell_idx ); // Local cell index, 0 based
1402  assert( local_cell_idx != -1 );
1403 
1404  if( numCellGroups > 1 )
1405  {
1406  // Populate cellHandleToGlobalID map to read cell variables
1407  cellHandleToGlobalID[start_element + j] = global_cell_idx;
1408  }
1409 
1410  for( int k = 0; k < num_edges_per_cell; k++ )
1411  {
1412  EntityHandle global_vert_idx =
1413  vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + k]; // Global vertex index, 1 based
1414  int local_vert_idx = localGidVerts.index( global_vert_idx ); // Local vertex index, 0 based
1415  assert( local_vert_idx != -1 );
1416  conn_arr_local_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
1417  start_vertex + local_vert_idx;
1418  }
1419  }
1420  }
1421 
1422  return MB_SUCCESS;
1423 }
1424 
1425 ErrorCode NCHelperMPAS::create_padded_local_cells( const std::vector< int >& vertices_on_local_cells,
1426  EntityHandle start_vertex,
1427  Range& faces )
1428 {
1429  Interface*& mbImpl = _readNC->mbImpl;
1430  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
1431 
1432  // Only one group of cells (each cell is represented by a polygon with maxEdgesPerCell edges)
1433  numCellGroups = 1;
1434 
1435  // Create cells for this cell group
1436  EntityHandle start_element;
1437  EntityHandle* conn_arr_local_cells = NULL;
1438  ErrorCode rval =
1440  conn_arr_local_cells,
1441  // Might have to create gather mesh later
1442  ( createGatherSet ? nLocalCells + nCells : nLocalCells ) );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
1443  faces.insert( start_element, start_element + nLocalCells - 1 );
1444 
1445  // Add local cells to current file set
1446  Range local_cells_range( start_element, start_element + nLocalCells - 1 );
1447  rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
1448 
1449  // Get ptr to GID memory for local cells
1450  int count = 0;
1451  void* data = NULL;
1452  rval = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
1453  assert( count == nLocalCells );
1454  int* gid_data = (int*)data;
1455  std::copy( localGidCells.begin(), localGidCells.end(), gid_data );
1456 
1457  // Set connectivity array with proper local vertices handles
1458  // vertices_on_local_cells array was already corrected to have the last vertices padded
1459  // no need for extra checks considering
1460  for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
1461  {
1462  for( int i = 0; i < maxEdgesPerCell; i++ )
1463  {
1464  EntityHandle global_vert_idx =
1465  vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + i]; // Global vertex index, 1 based
1466  int local_vert_idx = localGidVerts.index( global_vert_idx ); // Local vertex index, 0 based
1467  assert( local_vert_idx != -1 );
1468  conn_arr_local_cells[local_cell_idx * maxEdgesPerCell + i] = start_vertex + local_vert_idx;
1469  }
1470  }
1471 
1472  return MB_SUCCESS;
1473 }
1474 
1476 {
1477  Interface*& mbImpl = _readNC->mbImpl;
1478  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
1479  const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
1480 
1481  // Create gather set vertices
1482  std::vector< double* > arrays;
1483  // Don't need to specify allocation number here, because we know enough vertices were created
1484  // before
1485  ErrorCode rval = _readNC->readMeshIface->get_node_coords( 3, nVertices, 0, gather_set_start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices" );
1486 
1487  // Add vertices to the gather set
1488  Range gather_set_verts_range( gather_set_start_vertex, gather_set_start_vertex + nVertices - 1 );
1489  rval = mbImpl->add_entities( gather_set, gather_set_verts_range );MB_CHK_SET_ERR( rval, "Failed to add vertices to the gather set" );
1490 
1491  // Read x coordinates for gather set vertices
1492  double* xptr = arrays[0];
1493  int xVertexVarId;
1494  int success = NCFUNC( inq_varid )( _fileId, "xVertex", &xVertexVarId );
1495  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xVertex" );
1496  NCDF_SIZE read_start = 0;
1497  NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nVertices );
1498 #ifdef MOAB_HAVE_PNETCDF
1499  // Enter independent I/O mode, since this read is only for the gather processor
1500  success = NCFUNC( begin_indep_data )( _fileId );
1501  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1502  success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
1503  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data" );
1504  success = NCFUNC( end_indep_data )( _fileId );
1505  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1506 #else
1507  success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
1508  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data" );
1509 #endif
1510 
1511  // Read y coordinates for gather set vertices
1512  double* yptr = arrays[1];
1513  int yVertexVarId;
1514  success = NCFUNC( inq_varid )( _fileId, "yVertex", &yVertexVarId );
1515  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yVertex" );
1516 #ifdef MOAB_HAVE_PNETCDF
1517  // Enter independent I/O mode, since this read is only for the gather processor
1518  success = NCFUNC( begin_indep_data )( _fileId );
1519  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1520  success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
1521  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data" );
1522  success = NCFUNC( end_indep_data )( _fileId );
1523  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1524 #else
1525  success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
1526  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data" );
1527 #endif
1528 
1529  // Read z coordinates for gather set vertices
1530  double* zptr = arrays[2];
1531  int zVertexVarId;
1532  success = NCFUNC( inq_varid )( _fileId, "zVertex", &zVertexVarId );
1533  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zVertex" );
1534 #ifdef MOAB_HAVE_PNETCDF
1535  // Enter independent I/O mode, since this read is only for the gather processor
1536  success = NCFUNC( begin_indep_data )( _fileId );
1537  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1538  success = NCFUNCG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, zptr );
1539  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data" );
1540  success = NCFUNC( end_indep_data )( _fileId );
1541  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1542 #else
1543  success = NCFUNCG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, zptr );
1544  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data" );
1545 #endif
1546 
1547  // Get ptr to GID memory for gather set vertices
1548  int count = 0;
1549  void* data = NULL;
1550  rval =
1551  mbImpl->tag_iterate( mGlobalIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on gather set vertices" );
1552  assert( count == nVertices );
1553  int* gid_data = (int*)data;
1554  for( int j = 1; j <= nVertices; j++ )
1555  gid_data[j - 1] = j;
1556 
1557  // Set the file id tag too, it should be bigger something not interfering with global id
1558  if( mpFileIdTag )
1559  {
1560  rval = mbImpl->tag_iterate( *mpFileIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count,
1561  data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on gather set vertices" );
1562  assert( count == nVertices );
1563  int bytes_per_tag = 4;
1564  rval = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
1565  if( 4 == bytes_per_tag )
1566  {
1567  gid_data = (int*)data;
1568  for( int j = 1; j <= nVertices; j++ )
1569  gid_data[j - 1] = nVertices + j; // Bigger than global id tag
1570  }
1571  else if( 8 == bytes_per_tag )
1572  { // Should be a handle tag on 64 bit machine?
1573  long* handle_tag_data = (long*)data;
1574  for( int j = 1; j <= nVertices; j++ )
1575  handle_tag_data[j - 1] = nVertices + j; // Bigger than global id tag
1576  }
1577  }
1578 
1579  return MB_SUCCESS;
1580 }
1581 
1583 {
1584  Interface*& mbImpl = _readNC->mbImpl;
1585 
1586  // Create gather set edges
1587  EntityHandle start_edge;
1588  EntityHandle* conn_arr_gather_set_edges = NULL;
1589  // Don't need to specify allocation number here, because we know enough edges were created
1590  // before
1591  ErrorCode rval =
1592  _readNC->readMeshIface->get_element_connect( nEdges, 2, MBEDGE, 0, start_edge, conn_arr_gather_set_edges );MB_CHK_SET_ERR( rval, "Failed to create gather set edges" );
1593 
1594  // Add edges to the gather set
1595  Range gather_set_edges_range( start_edge, start_edge + nEdges - 1 );
1596  rval = mbImpl->add_entities( gather_set, gather_set_edges_range );MB_CHK_SET_ERR( rval, "Failed to add edges to the gather set" );
1597 
1598  // Read vertices on each edge
1599  int verticesOnEdgeVarId;
1600  int success = NCFUNC( inq_varid )( _fileId, "verticesOnEdge", &verticesOnEdgeVarId );
1601  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnEdge" );
1602  // Utilize the memory storage pointed by conn_arr_gather_set_edges
1603  int* vertices_on_gather_set_edges = (int*)conn_arr_gather_set_edges;
1604  NCDF_SIZE read_starts[2] = { 0, 0 };
1605  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nEdges ), 2 };
1606 #ifdef MOAB_HAVE_PNETCDF
1607  // Enter independent I/O mode, since this read is only for the gather processor
1608  success = NCFUNC( begin_indep_data )( _fileId );
1609  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1610  success =
1611  NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
1612  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data" );
1613  success = NCFUNC( end_indep_data )( _fileId );
1614  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1615 #else
1616  success =
1617  NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
1618  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data" );
1619 #endif
1620 
1621  // Populate connectivity data for gather set edges
1622  // Convert in-place from int (stored in the first half) to EntityHandle
1623  // Reading backward is the trick
1624  for( int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
1625  {
1626  int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert]; // Global vertex index, 1 based
1627  gather_set_vert_idx--; // 1 based -> 0 based
1628  // Connectivity array is shifted by where the gather set vertices start
1629  conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx;
1630  }
1631 
1632  return MB_SUCCESS;
1633 }
1634 
1636 {
1637  Interface*& mbImpl = _readNC->mbImpl;
1638 
1639  // Read number of edges on each gather set cell
1640  int nEdgesOnCellVarId;
1641  int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
1642  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
1643  std::vector< int > num_edges_on_gather_set_cells( nCells );
1644  NCDF_SIZE read_start = 0;
1645  NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nCells );
1646 #ifdef MOAB_HAVE_PNETCDF
1647  // Enter independent I/O mode, since this read is only for the gather processor
1648  success = NCFUNC( begin_indep_data )( _fileId );
1649  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1650  success =
1651  NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
1652  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
1653  success = NCFUNC( end_indep_data )( _fileId );
1654  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1655 #else
1656  success =
1657  NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
1658  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
1659 #endif
1660 
1661  // Read vertices on each gather set cell (connectivity)
1662  int verticesOnCellVarId;
1663  success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
1664  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
1665  std::vector< int > vertices_on_gather_set_cells( nCells * maxEdgesPerCell );
1666  NCDF_SIZE read_starts[2] = { 0, 0 };
1667  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
1668 #ifdef MOAB_HAVE_PNETCDF
1669  // Enter independent I/O mode, since this read is only for the gather processor
1670  success = NCFUNC( begin_indep_data )( _fileId );
1671  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1672  success = NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
1673  &vertices_on_gather_set_cells[0] );
1674  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
1675  success = NCFUNC( end_indep_data )( _fileId );
1676  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1677 #else
1678  success = NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
1679  &vertices_on_gather_set_cells[0] );
1680  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
1681 #endif
1682 
1683  // Divide gather set cells into groups based on the number of edges
1684  Range gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
1685  // Insert larger values before smaller values to increase efficiency
1686  for( int i = nCells - 1; i >= 0; i-- )
1687  {
1688  int num_edges = num_edges_on_gather_set_cells[i];
1689  gather_set_cells_with_n_edges[num_edges].insert( i + 1 ); // 0 based -> 1 based
1690  }
1691 
1692  // Create gather set cells
1693  EntityHandle* conn_arr_gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
1694  for( int num_edges_per_cell = 3; num_edges_per_cell <= maxEdgesPerCell; num_edges_per_cell++ )
1695  {
1696  int num_group_cells = gather_set_cells_with_n_edges[num_edges_per_cell].size();
1697  if( num_group_cells > 0 )
1698  {
1699  EntityHandle start_element;
1701  num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
1702  conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell], num_group_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
1703 
1704  // Add cells to the gather set
1705  Range gather_set_cells_range( start_element, start_element + num_group_cells - 1 );
1706  rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
1707 
1708  for( int j = 0; j < num_group_cells; j++ )
1709  {
1710  int gather_set_cell_idx =
1711  gather_set_cells_with_n_edges[num_edges_per_cell][j]; // Global cell index, 1 based
1712  gather_set_cell_idx--; // 1 based -> 0 based
1713 
1714  for( int k = 0; k < num_edges_per_cell; k++ )
1715  {
1716  EntityHandle gather_set_vert_idx =
1717  vertices_on_gather_set_cells[gather_set_cell_idx * maxEdgesPerCell +
1718  k]; // Global vertex index, 1 based
1719  gather_set_vert_idx--; // 1 based -> 0 based
1720 
1721  // Connectivity array is shifted by where the gather set vertices start
1722  conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
1723  gather_set_start_vertex + gather_set_vert_idx;
1724  }
1725  }
1726  }
1727  }
1728 
1729  return MB_SUCCESS;
1730 }
1731 
1733 {
1734  Interface*& mbImpl = _readNC->mbImpl;
1735 
1736  // Read number of edges on each gather set cell
1737  int nEdgesOnCellVarId;
1738  int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
1739  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
1740  std::vector< int > num_edges_on_gather_set_cells( nCells );
1741  NCDF_SIZE read_start = 0;
1742  NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nCells );
1743 #ifdef MOAB_HAVE_PNETCDF
1744  // Enter independent I/O mode, since this read is only for the gather processor
1745  success = NCFUNC( begin_indep_data )( _fileId );
1746  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1747  success =
1748  NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
1749  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
1750  success = NCFUNC( end_indep_data )( _fileId );
1751  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1752 #else
1753  success =
1754  NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
1755  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
1756 #endif
1757 
1758  // Create gather set cells
1759  EntityHandle start_element;
1760  EntityHandle* conn_arr_gather_set_cells = NULL;
1761  // Don't need to specify allocation number here, because we know enough cells were created
1762  // before
1764  conn_arr_gather_set_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
1765 
1766  // Add cells to the gather set
1767  Range gather_set_cells_range( start_element, start_element + nCells - 1 );
1768  rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
1769 
1770  // Read vertices on each gather set cell (connectivity)
1771  int verticesOnCellVarId;
1772  success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
1773  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
1774  // Utilize the memory storage pointed by conn_arr_gather_set_cells
1775  int* vertices_on_gather_set_cells = (int*)conn_arr_gather_set_cells;
1776  NCDF_SIZE read_starts[2] = { 0, 0 };
1777  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
1778 #ifdef MOAB_HAVE_PNETCDF
1779  // Enter independent I/O mode, since this read is only for the gather processor
1780  success = NCFUNC( begin_indep_data )( _fileId );
1781  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1782  success =
1783  NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
1784  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
1785  success = NCFUNC( end_indep_data )( _fileId );
1786  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1787 #else
1788  success =
1789  NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
1790  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
1791 #endif
1792 
1793  // Correct gather set cell vertices array in the same way as local cell vertices array,
1794  // replace the padded vertices with the last vertices in the corresponding cells
1795  for( int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++ )
1796  {
1797  int num_edges = num_edges_on_gather_set_cells[gather_set_cell_idx];
1798  int idx_in_gather_set_vert_arr = gather_set_cell_idx * maxEdgesPerCell;
1799  int last_vert_idx = vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + num_edges - 1];
1800  for( int i = num_edges; i < maxEdgesPerCell; i++ )
1801  vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + i] = last_vert_idx;
1802  }
1803 
1804  // Populate connectivity data for gather set cells
1805  // Convert in-place from int (stored in the first half) to EntityHandle
1806  // Reading backward is the trick
1807  for( int cell_vert = nCells * maxEdgesPerCell - 1; cell_vert >= 0; cell_vert-- )
1808  {
1809  int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert]; // Global vertex index, 1 based
1810  gather_set_vert_idx--; // 1 based -> 0 based
1811  // Connectivity array is shifted by where the gather set vertices start
1812  conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx;
1813  }
1814 
1815  return MB_SUCCESS;
1816 }
1817 
1818 } // namespace moab