Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
NCHelperGCRM.cpp
Go to the documentation of this file.
1 #include "NCHelperGCRM.hpp"
2 #include "moab/ReadUtilIface.hpp"
3 #include "moab/FileOptions.hpp"
5 #include "MBTagConventions.hpp"
6 
7 #ifdef MOAB_HAVE_ZOLTAN
9 #endif
10 
11 #include <cmath>
12 
13 namespace moab
14 {
15 
16 // GCRM cells are either pentagons or hexagons, and pentagons are always padded to hexagons
17 const int EDGES_PER_CELL = 6;
18 
19 NCHelperGCRM::NCHelperGCRM( ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet )
20  : UcdNCHelper( readNC, fileId, opts, fileSet ), createGatherSet( false )
21 {
22  // Ignore variables containing topological information
23  ignoredVarNames.insert( "grid" );
24  ignoredVarNames.insert( "cell_corners" );
25  ignoredVarNames.insert( "cell_edges" );
26  ignoredVarNames.insert( "edge_corners" );
27  ignoredVarNames.insert( "cell_neighbors" );
28 }
29 
31 {
32  std::vector< std::string >& dimNames = readNC->dimNames;
33 
34  // If dimension name "cells" exists then it should be the GCRM grid
35  if( std::find( dimNames.begin(), dimNames.end(), std::string( "cells" ) ) != dimNames.end() ) return true;
36 
37  return false;
38 }
39 
41 {
42  std::vector< std::string >& dimNames = _readNC->dimNames;
43  std::vector< int >& dimLens = _readNC->dimLens;
44  std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
45 
46  ErrorCode rval;
47  unsigned int idx;
48  std::vector< std::string >::iterator vit;
49 
50  // Look for time dimension
51  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "Time" ) ) != dimNames.end() )
52  idx = vit - dimNames.begin();
53  else if( ( vit = std::find( dimNames.begin(), dimNames.end(), "time" ) ) != dimNames.end() )
54  idx = vit - dimNames.begin();
55  else
56  {
57  MB_SET_ERR( MB_FAILURE, "Couldn't find 'Time' or 'time' dimension" );
58  }
59  tDim = idx;
60  nTimeSteps = dimLens[idx];
61 
62  // Get number of cells
63  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "cells" ) ) != dimNames.end() )
64  idx = vit - dimNames.begin();
65  else
66  {
67  MB_SET_ERR( MB_FAILURE, "Couldn't find 'cells' dimension" );
68  }
69  cDim = idx;
70  nCells = dimLens[idx];
71 
72  // Get number of edges
73  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "edges" ) ) != dimNames.end() )
74  idx = vit - dimNames.begin();
75  else
76  {
77  MB_SET_ERR( MB_FAILURE, "Couldn't find 'edges' dimension" );
78  }
79  eDim = idx;
80  nEdges = dimLens[idx];
81 
82  // Get number of vertices
83  vDim = -1;
84  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "corners" ) ) != dimNames.end() )
85  idx = vit - dimNames.begin();
86  else
87  {
88  MB_SET_ERR( MB_FAILURE, "Couldn't find 'corners' dimension" );
89  }
90  vDim = idx;
91  nVertices = dimLens[idx];
92 
93  // Get number of layers
94  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "layers" ) ) != dimNames.end() )
95  idx = vit - dimNames.begin();
96  else
97  {
98  MB_SET_ERR( MB_FAILURE, "Couldn't find 'layers' dimension" );
99  }
100  levDim = idx;
101  nLevels = dimLens[idx];
102 
103  // Dimension indices for other optional levels
104  std::vector< unsigned int > opt_lev_dims;
105 
106  // Get index of interface levels
107  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "interfaces" ) ) != dimNames.end() )
108  {
109  idx = vit - dimNames.begin();
110  opt_lev_dims.push_back( idx );
111  }
112 
113  std::map< std::string, ReadNC::VarData >::iterator vmit;
114 
115  // Store time coordinate values in tVals
116  if( nTimeSteps > 0 )
117  {
118  if( ( vmit = varInfo.find( "time" ) ) != varInfo.end() )
119  {
120  rval = read_coordinate( "time", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 'time' variable" );
121  }
122  else if( ( vmit = varInfo.find( "t" ) ) != varInfo.end() )
123  {
124  rval = read_coordinate( "t", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 't' variable" );
125  }
126  else
127  {
128  // If expected time variable is not available, set dummy time coordinate values to tVals
129  for( int t = 0; t < nTimeSteps; t++ )
130  tVals.push_back( (double)t );
131  }
132  }
133 
134  // For each variable, determine the entity location type and number of levels
135  for( vmit = varInfo.begin(); vmit != varInfo.end(); ++vmit )
136  {
137  ReadNC::VarData& vd = ( *vmit ).second;
138 
139  // Default entLoc is ENTLOCSET
140  if( std::find( vd.varDims.begin(), vd.varDims.end(), tDim ) != vd.varDims.end() )
141  {
142  if( std::find( vd.varDims.begin(), vd.varDims.end(), vDim ) != vd.varDims.end() )
144  else if( std::find( vd.varDims.begin(), vd.varDims.end(), eDim ) != vd.varDims.end() )
146  else if( std::find( vd.varDims.begin(), vd.varDims.end(), cDim ) != vd.varDims.end() )
148  }
149 
150  // Default numLev is 0
151  if( std::find( vd.varDims.begin(), vd.varDims.end(), levDim ) != vd.varDims.end() )
152  vd.numLev = nLevels;
153  else
154  {
155  // If layers dimension is not found, try other optional levels such as interfaces
156  for( unsigned int i = 0; i < opt_lev_dims.size(); i++ )
157  {
158  if( std::find( vd.varDims.begin(), vd.varDims.end(), opt_lev_dims[i] ) != vd.varDims.end() )
159  {
160  vd.numLev = dimLens[opt_lev_dims[i]];
161  break;
162  }
163  }
164  }
165  }
166 
167  // Hack: create dummy variables for dimensions (like cells) with no corresponding coordinate
168  // variables
169  rval = create_dummy_variables();MB_CHK_SET_ERR( rval, "Failed to create dummy variables" );
170 
171  return MB_SUCCESS;
172 }
173 
174 // When noMesh option is used on this read, the old ReadNC class instance for last read can get out
175 // of scope (and deleted). The old instance initialized some variables properly when the mesh was
176 // created, but they are now lost. The new instance (will not create the mesh with noMesh option)
177 // has to restore them based on the existing mesh from last read
179 {
180  Interface*& mbImpl = _readNC->mbImpl;
181  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
182  bool& noMesh = _readNC->noMesh;
183 
184  if( noMesh )
185  {
186  ErrorCode rval;
187 
188  if( localGidVerts.empty() )
189  {
190  // Get all vertices from current file set (it is the input set in no_mesh scenario)
191  Range local_verts;
192  rval = mbImpl->get_entities_by_dimension( _fileSet, 0, local_verts );MB_CHK_SET_ERR( rval, "Trouble getting local vertices in current file set" );
193 
194  if( !local_verts.empty() )
195  {
196  std::vector< int > gids( local_verts.size() );
197 
198  // !IMPORTANT : this has to be the GLOBAL_ID tag
199  rval = mbImpl->tag_get_data( mGlobalIdTag, local_verts, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of vertices" );
200 
201  // Restore localGidVerts
202  std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidVerts ) );
204  }
205  }
206 
207  if( localGidEdges.empty() )
208  {
209  // Get all edges from current file set (it is the input set in no_mesh scenario)
210  Range local_edges;
211  rval = mbImpl->get_entities_by_dimension( _fileSet, 1, local_edges );MB_CHK_SET_ERR( rval, "Trouble getting local edges in current file set" );
212 
213  if( !local_edges.empty() )
214  {
215  std::vector< int > gids( local_edges.size() );
216 
217  // !IMPORTANT : this has to be the GLOBAL_ID tag
218  rval = mbImpl->tag_get_data( mGlobalIdTag, local_edges, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of edges" );
219 
220  // Restore localGidEdges
221  std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidEdges ) );
223  }
224  }
225 
226  if( localGidCells.empty() )
227  {
228  // Get all cells from current file set (it is the input set in no_mesh scenario)
229  Range local_cells;
230  rval = mbImpl->get_entities_by_dimension( _fileSet, 2, local_cells );MB_CHK_SET_ERR( rval, "Trouble getting local cells in current file set" );
231 
232  if( !local_cells.empty() )
233  {
234  std::vector< int > gids( local_cells.size() );
235 
236  // !IMPORTANT : this has to be the GLOBAL_ID tag
237  rval = mbImpl->tag_get_data( mGlobalIdTag, local_cells, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of cells" );
238 
239  // Restore localGidCells
240  std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidCells ) );
242  }
243  }
244  }
245 
246  return MB_SUCCESS;
247 }
248 
250 {
251  bool& noEdges = _readNC->noEdges;
252  DebugOutput& dbgOut = _readNC->dbgOut;
253 
254 #ifdef MOAB_HAVE_MPI
255  int rank = 0;
256  int procs = 1;
257  bool& isParallel = _readNC->isParallel;
258  ParallelComm* myPcomm = NULL;
259  if( isParallel )
260  {
261  myPcomm = _readNC->myPcomm;
262  rank = myPcomm->proc_config().proc_rank();
263  procs = myPcomm->proc_config().proc_size();
264  }
265 
266  // Need to know whether we'll be creating gather mesh
267  if( rank == _readNC->gatherSetRank ) createGatherSet = true;
268 
269  if( procs >= 2 )
270  {
271  // Shift rank to obtain a rotated trivial partition
272  int shifted_rank = rank;
273  int& trivialPartitionShift = _readNC->trivialPartitionShift;
274  if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
275 
276  // Compute the number of local cells on this proc
277  nLocalCells = int( std::floor( 1.0 * nCells / procs ) );
278 
279  // The starting global cell index in the GCRM file for this proc
280  int start_cell_idx = shifted_rank * nLocalCells;
281 
282  // Number of extra cells after equal split over procs
283  int iextra = nCells % procs;
284 
285  // Allocate extra cells over procs
286  if( shifted_rank < iextra ) nLocalCells++;
287  start_cell_idx += std::min( shifted_rank, iextra );
288 
289  start_cell_idx++; // 0 based -> 1 based
290 
291  // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
292  ErrorCode rval = redistribute_local_cells( start_cell_idx, myPcomm );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
293  }
294  else
295  {
298  }
299 #else
302 #endif
303  dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
304  dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
305 
306  // Read vertices on each local cell, to get localGidVerts and cell connectivity later
307  int verticesOnCellVarId;
308  int success = NCFUNC( inq_varid )( _fileId, "cell_corners", &verticesOnCellVarId );
309  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_corners" );
310  std::vector< int > vertices_on_local_cells( nLocalCells * EDGES_PER_CELL );
311  dbgOut.tprintf( 1, " nLocalCells = %d\n", (int)nLocalCells );
312  dbgOut.tprintf( 1, " vertices_on_local_cells.size() = %d\n", (int)vertices_on_local_cells.size() );
313 #ifdef MOAB_HAVE_PNETCDF
314  size_t nb_reads = localGidCells.psize();
315  std::vector< int > requests( nb_reads );
316  std::vector< int > statuss( nb_reads );
317  size_t idxReq = 0;
318 #endif
319  size_t indexInArray = 0;
320  for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
321  ++pair_iter )
322  {
323  EntityHandle starth = pair_iter->first;
324  EntityHandle endh = pair_iter->second;
325  dbgOut.tprintf( 1, " cell_corners starth = %d\n", (int)starth );
326  dbgOut.tprintf( 1, " cell_corners endh = %d\n", (int)endh );
327  NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
328  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
329  static_cast< NCDF_SIZE >( EDGES_PER_CELL ) };
330 
331  // Do a partial read in each subrange
332 #ifdef MOAB_HAVE_PNETCDF
333  success = NCFUNCREQG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
334  &( vertices_on_local_cells[indexInArray] ), &requests[idxReq++] );
335 #else
336  success = NCFUNCAG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
337  &( vertices_on_local_cells[indexInArray] ) );
338 #endif
339  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data in a loop" );
340 
341  // Increment the index for next subrange
342  indexInArray += ( endh - starth + 1 ) * EDGES_PER_CELL;
343  }
344 
345 #ifdef MOAB_HAVE_PNETCDF
346  // Wait outside the loop
347  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
348  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
349 #endif
350 
351  // Correct vertices_on_local_cells array. Pentagons as hexagons should have
352  // a connectivity like 123455 and not 122345
353  for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
354  {
355  int* pvertex = &vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL];
356  for( int k = 0; k < EDGES_PER_CELL - 2; k++ )
357  {
358  if( *( pvertex + k ) == *( pvertex + k + 1 ) )
359  {
360  // Shift the connectivity
361  for( int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++ )
362  *( pvertex + kk ) = *( pvertex + kk + 1 );
363  // No need to try next k
364  break;
365  }
366  }
367  }
368 
369  // GCRM is 0 based, convert vertex indices from 0 to 1 based
370  for( std::size_t idx = 0; idx < vertices_on_local_cells.size(); idx++ )
371  vertices_on_local_cells[idx] += 1;
372 
373  // Create local vertices
374  EntityHandle start_vertex;
375  ErrorCode rval = create_local_vertices( vertices_on_local_cells, start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local vertices for GCRM mesh" );
376 
377  // Create local edges (unless NO_EDGES read option is set)
378  if( !noEdges )
379  {
380  rval = create_local_edges( start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local edges for GCRM mesh" );
381  }
382 
383  // Create local cells, padded
384  rval = create_padded_local_cells( vertices_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create padded local cells for GCRM mesh" );
385 
386  if( createGatherSet )
387  {
388  EntityHandle gather_set;
389  rval = _readNC->readMeshIface->create_gather_set( gather_set );MB_CHK_SET_ERR( rval, "Failed to create gather set" );
390 
391  // Create gather set vertices
392  EntityHandle start_gather_set_vertex;
393  rval = create_gather_set_vertices( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices for GCRM mesh" );
394 
395  // Create gather set edges (unless NO_EDGES read option is set)
396  if( !noEdges )
397  {
398  rval = create_gather_set_edges( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set edges for GCRM mesh" );
399  }
400 
401  // Create gather set cells with padding
402  rval = create_padded_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create padded gather set cells for GCRM mesh" );
403  }
404 
405  return MB_SUCCESS;
406 }
407 
408 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset_allocate( std::vector< ReadNC::VarData >& vdatas,
409  std::vector< int >& tstep_nums )
410 {
411  Interface*& mbImpl = _readNC->mbImpl;
412  std::vector< int >& dimLens = _readNC->dimLens;
413  bool& noEdges = _readNC->noEdges;
414  DebugOutput& dbgOut = _readNC->dbgOut;
415 
416  Range* range = NULL;
417 
418  // Get vertices
419  Range verts;
420  ErrorCode rval = mbImpl->get_entities_by_dimension( _fileSet, 0, verts );MB_CHK_SET_ERR( rval, "Trouble getting vertices in current file set" );
421  assert( "Should only have a single vertex subrange, since they were read in one shot" && verts.psize() == 1 );
422 
423  // Get edges
424  Range edges;
425  rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges );MB_CHK_SET_ERR( rval, "Trouble getting edges in current file set" );
426 
427  // Get faces
428  Range faces;
429  rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_SET_ERR( rval, "Trouble getting faces in current file set" );
430  assert( "Should only have a single face subrange, since they were read in one shot" && faces.psize() == 1 );
431 
432 #ifdef MOAB_HAVE_MPI
433  bool& isParallel = _readNC->isParallel;
434  if( isParallel )
435  {
436  ParallelComm*& myPcomm = _readNC->myPcomm;
437  rval = myPcomm->filter_pstatus( faces, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &facesOwned );MB_CHK_SET_ERR( rval, "Trouble getting owned faces in current file set" );
438  }
439  else
440  facesOwned = faces; // not running in parallel, but still with MPI
441 #else
442  facesOwned = faces;
443 #endif
444 
445  for( unsigned int i = 0; i < vdatas.size(); i++ )
446  {
447  // Skip edge variables, if specified by the read options
448  if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
449 
450  // Support non-set variables with 3 dimensions like (time, cells, layers), or
451  // 2 dimensions like (time, cells)
452  assert( 3 == vdatas[i].varDims.size() || 2 == vdatas[i].varDims.size() );
453 
454  // For a non-set variable, time should be the first dimension
455  assert( tDim == vdatas[i].varDims[0] );
456 
457  // Set up readStarts and readCounts
458  vdatas[i].readStarts.resize( 3 );
459  vdatas[i].readCounts.resize( 3 );
460 
461  // First: Time
462  vdatas[i].readStarts[0] = 0; // This value is timestep dependent, will be set later
463  vdatas[i].readCounts[0] = 1;
464 
465  // Next: nVertices / nCells / nEdges
466  switch( vdatas[i].entLoc )
467  {
468  case ReadNC::ENTLOCVERT:
469  // Vertices
470  // Start from the first localGidVerts
471  // Actually, this will be reset later on in a loop
472  vdatas[i].readStarts[1] = localGidVerts[0] - 1;
473  vdatas[i].readCounts[1] = nLocalVertices;
474  range = &verts;
475  break;
476  case ReadNC::ENTLOCFACE:
477  // Faces
478  // Start from the first localGidCells
479  // Actually, this will be reset later on in a loop
480  vdatas[i].readStarts[1] = localGidCells[0] - 1;
481  vdatas[i].readCounts[1] = nLocalCells;
482  range = &facesOwned;
483  break;
484  case ReadNC::ENTLOCEDGE:
485  // Edges
486  // Start from the first localGidEdges
487  // Actually, this will be reset later on in a loop
488  vdatas[i].readStarts[1] = localGidEdges[0] - 1;
489  vdatas[i].readCounts[1] = nLocalEdges;
490  range = &edges;
491  break;
492  default:
493  MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
494  }
495 
496  // Finally: layers or other optional levels, it is possible that there is no
497  // level dimension (numLev is 0) for this non-set variable
498  if( vdatas[i].numLev < 1 ) vdatas[i].numLev = 1;
499  vdatas[i].readStarts[2] = 0;
500  vdatas[i].readCounts[2] = vdatas[i].numLev;
501 
502  // Get variable size
503  vdatas[i].sz = 1;
504  for( std::size_t idx = 0; idx != 3; idx++ )
505  vdatas[i].sz *= vdatas[i].readCounts[idx];
506 
507  for( unsigned int t = 0; t < tstep_nums.size(); t++ )
508  {
509  dbgOut.tprintf( 2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t] );
510 
511  if( tstep_nums[t] >= dimLens[tDim] )
512  {
513  MB_SET_ERR( MB_INDEX_OUT_OF_RANGE, "Wrong value for timestep number " << tstep_nums[t] );
514  }
515 
516  // Get the tag to read into
517  if( !vdatas[i].varTags[t] )
518  {
519  rval = get_tag_to_nonset( vdatas[i], tstep_nums[t], vdatas[i].varTags[t], vdatas[i].numLev );MB_CHK_SET_ERR( rval, "Trouble getting tag for variable " << vdatas[i].varName );
520  }
521 
522  // Get ptr to tag space
523  assert( 1 == range->psize() );
524  void* data;
525  int count;
526  rval = mbImpl->tag_iterate( vdatas[i].varTags[t], range->begin(), range->end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate tag for variable " << vdatas[i].varName );
527  assert( (unsigned)count == range->size() );
528  vdatas[i].varDatas[t] = data;
529  }
530  }
531 
532  return rval;
533 }
534 
535 #ifdef MOAB_HAVE_PNETCDF
536 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset_async( std::vector< ReadNC::VarData >& vdatas,
537  std::vector< int >& tstep_nums )
538 {
539  bool& noEdges = _readNC->noEdges;
540  DebugOutput& dbgOut = _readNC->dbgOut;
541 
542  ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
543 
544  // Finally, read into that space
545  int success;
546  Range* pLocalGid = NULL;
547 
548  for( unsigned int i = 0; i < vdatas.size(); i++ )
549  {
550  // Skip edge variables, if specified by the read options
551  if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
552 
553  switch( vdatas[i].entLoc )
554  {
555  case ReadNC::ENTLOCVERT:
556  pLocalGid = &localGidVerts;
557  break;
558  case ReadNC::ENTLOCFACE:
559  pLocalGid = &localGidCells;
560  break;
561  case ReadNC::ENTLOCEDGE:
562  pLocalGid = &localGidEdges;
563  break;
564  default:
565  MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
566  }
567 
568  std::size_t sz = vdatas[i].sz;
569 
570  for( unsigned int t = 0; t < tstep_nums.size(); t++ )
571  {
572  // We will synchronize all these reads with the other processors,
573  // so the wait will be inside this double loop; is it too much?
574  size_t nb_reads = pLocalGid->psize();
575  std::vector< int > requests( nb_reads ), statuss( nb_reads );
576  size_t idxReq = 0;
577 
578  // Set readStart for each timestep along time dimension
579  vdatas[i].readStarts[0] = tstep_nums[t];
580 
581  switch( vdatas[i].varDataType )
582  {
583  case NC_FLOAT:
584  case NC_DOUBLE: {
585  // Read float as double
586  std::vector< double > tmpdoubledata( sz );
587 
588  // In the case of ucd mesh, and on multiple proc,
589  // we need to read as many times as subranges we have in the
590  // localGid range;
591  // basically, we have to give a different point
592  // for data to start, for every subrange :(
593  size_t indexInDoubleArray = 0;
594  size_t ic = 0;
595  for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
596  ++pair_iter, ic++ )
597  {
598  EntityHandle starth = pair_iter->first;
599  EntityHandle endh = pair_iter->second; // inclusive
600  vdatas[i].readStarts[1] = (NCDF_SIZE)( starth - 1 );
601  vdatas[i].readCounts[1] = (NCDF_SIZE)( endh - starth + 1 );
602 
603  // Do a partial read, in each subrange
604  // wait outside this loop
605  success =
606  NCFUNCREQG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
607  &( vdatas[i].readCounts[0] ),
608  &( tmpdoubledata[indexInDoubleArray] ), &requests[idxReq++] );
609  if( success )
610  MB_SET_ERR( MB_FAILURE,
611  "Failed to read double data in a loop for variable " << vdatas[i].varName );
612  // We need to increment the index in double array for the
613  // next subrange
614  indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
615  }
616  assert( ic == pLocalGid->psize() );
617 
618  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
619  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
620 
621  void* data = vdatas[i].varDatas[t];
622  for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
623  ( (double*)data )[idx] = tmpdoubledata[idx];
624 
625  break;
626  }
627  default:
628  MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
629  }
630  }
631  }
632 
633  // Debug output, if requested
634  if( 1 == dbgOut.get_verbosity() )
635  {
636  dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
637  for( unsigned int i = 1; i < vdatas.size(); i++ )
638  dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
639  dbgOut.tprintf( 1, "\n" );
640  }
641 
642  return rval;
643 }
644 #else
645 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset( std::vector< ReadNC::VarData >& vdatas,
646  std::vector< int >& tstep_nums )
647 {
648  bool& noEdges = _readNC->noEdges;
649  DebugOutput& dbgOut = _readNC->dbgOut;
650 
651  ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
652 
653  // Finally, read into that space
654  int success;
655  Range* pLocalGid = NULL;
656 
657  for( unsigned int i = 0; i < vdatas.size(); i++ )
658  {
659  // Skip edge variables, if specified by the read options
660  if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
661 
662  switch( vdatas[i].entLoc )
663  {
664  case ReadNC::ENTLOCVERT:
665  pLocalGid = &localGidVerts;
666  break;
667  case ReadNC::ENTLOCFACE:
668  pLocalGid = &localGidCells;
669  break;
670  case ReadNC::ENTLOCEDGE:
671  pLocalGid = &localGidEdges;
672  break;
673  default:
674  MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
675  }
676 
677  std::size_t sz = vdatas[i].sz;
678 
679  for( unsigned int t = 0; t < tstep_nums.size(); t++ )
680  {
681  // Set readStart for each timestep along time dimension
682  vdatas[i].readStarts[0] = tstep_nums[t];
683 
684  switch( vdatas[i].varDataType )
685  {
686  case NC_FLOAT:
687  case NC_DOUBLE: {
688  // Read float as double
689  std::vector< double > tmpdoubledata( sz );
690 
691  // In the case of ucd mesh, and on multiple proc,
692  // we need to read as many times as subranges we have in the
693  // localGid range;
694  // basically, we have to give a different point
695  // for data to start, for every subrange :(
696  size_t indexInDoubleArray = 0;
697  size_t ic = 0;
698  for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
699  ++pair_iter, ic++ )
700  {
701  EntityHandle starth = pair_iter->first;
702  EntityHandle endh = pair_iter->second; // Inclusive
703  vdatas[i].readStarts[1] = (NCDF_SIZE)( starth - 1 );
704  vdatas[i].readCounts[1] = (NCDF_SIZE)( endh - starth + 1 );
705 
706  success = NCFUNCAG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
707  &( vdatas[i].readCounts[0] ),
708  &( tmpdoubledata[indexInDoubleArray] ) );
709  if( success )
710  MB_SET_ERR( MB_FAILURE,
711  "Failed to read double data in a loop for variable " << vdatas[i].varName );
712  // We need to increment the index in double array for the
713  // next subrange
714  indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
715  }
716  assert( ic == pLocalGid->psize() );
717 
718  void* data = vdatas[i].varDatas[t];
719  for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
720  ( (double*)data )[idx] = tmpdoubledata[idx];
721 
722  break;
723  }
724  default:
725  MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
726  }
727  }
728  }
729 
730  // Debug output, if requested
731  if( 1 == dbgOut.get_verbosity() )
732  {
733  dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
734  for( unsigned int i = 1; i < vdatas.size(); i++ )
735  dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
736  dbgOut.tprintf( 1, "\n" );
737  }
738 
739  return rval;
740 }
741 #endif
742 
743 #ifdef MOAB_HAVE_MPI
744 ErrorCode NCHelperGCRM::redistribute_local_cells( int start_cell_idx, ParallelComm* pco )
745 {
746  // If possible, apply Zoltan partition
747 #ifdef MOAB_HAVE_ZOLTAN
749  {
750  // Read lat/lon coordinates of cell centers, then convert spherical to
751  // Cartesian, and use them as input to Zoltan partition
752  int xCellVarId;
753  int success = NCFUNC( inq_varid )( _fileId, "grid_center_lat", &xCellVarId );
754  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lat" );
755  std::vector< double > xCell( nLocalCells );
756  NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
757  NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
758  success = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xCell[0] );
759  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lat data" );
760 
761  // Read y coordinates of cell centers
762  int yCellVarId;
763  success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &yCellVarId );
764  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lon" );
765  std::vector< double > yCell( nLocalCells );
766  success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yCell[0] );
767  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lon data" );
768 
769  // Convert lon/lat/rad to x/y/z
770  std::vector< double > zCell( nLocalCells );
771  double rad = 8000.0; // This is just a approximate radius
772  for( int i = 0; i < nLocalCells; i++ )
773  {
774  double cosphi = cos( xCell[i] );
775  double zmult = sin( xCell[i] );
776  double xmult = cosphi * cos( yCell[i] );
777  double ymult = cosphi * sin( yCell[i] );
778  xCell[i] = rad * xmult;
779  yCell[i] = rad * ymult;
780  zCell[i] = rad * zmult;
781  }
782 
783  // Zoltan partition using RCB; maybe more studies would be good, as to which partition
784  // is better
785  Interface*& mbImpl = _readNC->mbImpl;
786  DebugOutput& dbgOut = _readNC->dbgOut;
787  ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, pco, false, 0, NULL );
788  ErrorCode rval = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
789  delete mbZTool;
790 
791  dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
792  dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
793 
794  // This is important: local cells are now redistributed, so nLocalCells might be different!
796 
797  return MB_SUCCESS;
798  }
799 #endif
800 
801  // By default, apply trivial partition
802  localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
803 
804  return MB_SUCCESS;
805 }
806 #endif
807 
808 ErrorCode NCHelperGCRM::create_local_vertices( const std::vector< int >& vertices_on_local_cells,
809  EntityHandle& start_vertex )
810 {
811  Interface*& mbImpl = _readNC->mbImpl;
812  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
813  const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
814  DebugOutput& dbgOut = _readNC->dbgOut;
815  std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
816 
817  // Make a copy of vertices_on_local_cells for sorting (keep original one to set cell
818  // connectivity later)
819  std::vector< int > vertices_on_local_cells_sorted( vertices_on_local_cells );
820  std::sort( vertices_on_local_cells_sorted.begin(), vertices_on_local_cells_sorted.end() );
821  std::copy( vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(),
824 
825  dbgOut.tprintf( 1, " localGidVerts.psize() = %d\n", (int)localGidVerts.psize() );
826  dbgOut.tprintf( 1, " localGidVerts.size() = %d\n", (int)localGidVerts.size() );
827 
828  // Create local vertices
829  std::vector< double* > arrays;
830  ErrorCode rval =
831  _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays,
832  // Might have to create gather mesh later
833  ( createGatherSet ? nLocalVertices + nVertices : nLocalVertices ) );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
834 
835  // Add local vertices to current file set
836  Range local_verts_range( start_vertex, start_vertex + nLocalVertices - 1 );
837  rval = _readNC->mbImpl->add_entities( _fileSet, local_verts_range );MB_CHK_SET_ERR( rval, "Failed to add local vertices to current file set" );
838 
839  // Get ptr to GID memory for local vertices
840  int count = 0;
841  void* data = NULL;
842  rval = mbImpl->tag_iterate( mGlobalIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local vertices" );
843  assert( count == nLocalVertices );
844  int* gid_data = (int*)data;
845  std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
846 
847  // Duplicate GID data, which will be used to resolve sharing
848  if( mpFileIdTag )
849  {
850  rval = mbImpl->tag_iterate( *mpFileIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on local vertices" );
851  assert( count == nLocalVertices );
852  int bytes_per_tag = 4;
853  rval = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "can't get number of bytes for file id tag" );
854  if( 4 == bytes_per_tag )
855  {
856  gid_data = (int*)data;
857  std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
858  }
859  else if( 8 == bytes_per_tag )
860  { // Should be a handle tag on 64 bit machine?
861  long* handle_tag_data = (long*)data;
862  std::copy( localGidVerts.begin(), localGidVerts.end(), handle_tag_data );
863  }
864  }
865 
866 #ifdef MOAB_HAVE_PNETCDF
867  size_t nb_reads = localGidVerts.psize();
868  std::vector< int > requests( nb_reads );
869  std::vector< int > statuss( nb_reads );
870  size_t idxReq = 0;
871 #endif
872 
873  // Store lev values in levVals
874  std::map< std::string, ReadNC::VarData >::iterator vmit;
875  if( ( vmit = varInfo.find( "layers" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 )
876  {
877  rval = read_coordinate( "layers", 0, nLevels - 1, levVals );MB_CHK_SET_ERR( rval, "Trouble reading 'layers' variable" );
878  }
879  else if( ( vmit = varInfo.find( "interfaces" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 )
880  {
881  rval = read_coordinate( "interfaces", 0, nLevels - 1, levVals );MB_CHK_SET_ERR( rval, "Trouble reading 'interfaces' variable" );
882  }
883  else
884  {
885  MB_SET_ERR( MB_FAILURE, "Couldn't find 'layers' or 'interfaces' variable" );
886  }
887 
888  // Decide whether down is positive
889  char posval[10] = { 0 };
890  int success = NCFUNC( get_att_text )( _fileId, ( *vmit ).second.varId, "positive", posval );
891  if( 0 == success && !strncmp( posval, "down", 4 ) )
892  {
893  for( std::vector< double >::iterator dvit = levVals.begin(); dvit != levVals.end(); ++dvit )
894  ( *dvit ) *= -1.0;
895  }
896 
897  // Read x coordinates for local vertices
898  double* xptr = arrays[0];
899  int xVertexVarId;
900  success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xVertexVarId );
901  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
902  size_t indexInArray = 0;
903  for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
904  ++pair_iter )
905  {
906  EntityHandle starth = pair_iter->first;
907  EntityHandle endh = pair_iter->second;
908  NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 );
909  NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 );
910 
911  // Do a partial read in each subrange
912 #ifdef MOAB_HAVE_PNETCDF
913  success = NCFUNCREQG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ),
914  &requests[idxReq++] );
915 #else
916  success = NCFUNCAG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ) );
917 #endif
918  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data in a loop" );
919 
920  // Increment the index for next subrange
921  indexInArray += ( endh - starth + 1 );
922  }
923 
924 #ifdef MOAB_HAVE_PNETCDF
925  // Wait outside the loop
926  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
927  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
928 #endif
929 
930  // Read y coordinates for local vertices
931  double* yptr = arrays[1];
932  int yVertexVarId;
933  success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yVertexVarId );
934  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
935 #ifdef MOAB_HAVE_PNETCDF
936  idxReq = 0;
937 #endif
938  indexInArray = 0;
939  for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
940  ++pair_iter )
941  {
942  EntityHandle starth = pair_iter->first;
943  EntityHandle endh = pair_iter->second;
944  NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 );
945  NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 );
946 
947  // Do a partial read in each subrange
948 #ifdef MOAB_HAVE_PNETCDF
949  success = NCFUNCREQG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ),
950  &requests[idxReq++] );
951 #else
952  success = NCFUNCAG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ) );
953 #endif
954  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data in a loop" );
955 
956  // Increment the index for next subrange
957  indexInArray += ( endh - starth + 1 );
958  }
959 
960 #ifdef MOAB_HAVE_PNETCDF
961  // Wait outside the loop
962  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
963  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
964 #endif
965 
966  // Convert lon/lat/rad to x/y/z
967  double* zptr = arrays[2];
968  double rad = 8000.0 + levVals[0];
969  for( int i = 0; i < nLocalVertices; i++ )
970  {
971  double cosphi = cos( yptr[i] );
972  double zmult = sin( yptr[i] );
973  double xmult = cosphi * cos( xptr[i] );
974  double ymult = cosphi * sin( xptr[i] );
975  xptr[i] = rad * xmult;
976  yptr[i] = rad * ymult;
977  zptr[i] = rad * zmult;
978  }
979 
980  return MB_SUCCESS;
981 }
982 
984 {
985  Interface*& mbImpl = _readNC->mbImpl;
986  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
987  DebugOutput& dbgOut = _readNC->dbgOut;
988 
989  // Read edges on each local cell, to get localGidEdges
990  int edgesOnCellVarId;
991  int success = NCFUNC( inq_varid )( _fileId, "cell_edges", &edgesOnCellVarId );
992  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_edges" );
993 
994  std::vector< int > edges_on_local_cells( nLocalCells * EDGES_PER_CELL );
995  dbgOut.tprintf( 1, " edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size() );
996 
997 #ifdef MOAB_HAVE_PNETCDF
998  size_t nb_reads = localGidCells.psize();
999  std::vector< int > requests( nb_reads );
1000  std::vector< int > statuss( nb_reads );
1001  size_t idxReq = 0;
1002 #endif
1003  size_t indexInArray = 0;
1004  for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
1005  ++pair_iter )
1006  {
1007  EntityHandle starth = pair_iter->first;
1008  EntityHandle endh = pair_iter->second;
1009  dbgOut.tprintf( 1, " starth = %d\n", (int)starth );
1010  dbgOut.tprintf( 1, " endh = %d\n", (int)endh );
1011  NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
1012  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
1013  static_cast< NCDF_SIZE >( EDGES_PER_CELL ) };
1014 
1015  // Do a partial read in each subrange
1016 #ifdef MOAB_HAVE_PNETCDF
1017  success = NCFUNCREQG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
1018  &( edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
1019 #else
1020  success = NCFUNCAG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
1021  &( edges_on_local_cells[indexInArray] ) );
1022 #endif
1023  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_edges data in a loop" );
1024 
1025  // Increment the index for next subrange
1026  indexInArray += ( endh - starth + 1 ) * EDGES_PER_CELL;
1027  }
1028 
1029 #ifdef MOAB_HAVE_PNETCDF
1030  // Wait outside the loop
1031  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
1032  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
1033 #endif
1034 
1035  // GCRM is 0 based, convert edge indices from 0 to 1 based
1036  for( std::size_t idx = 0; idx < edges_on_local_cells.size(); idx++ )
1037  edges_on_local_cells[idx] += 1;
1038 
1039  // Collect local edges
1040  std::sort( edges_on_local_cells.begin(), edges_on_local_cells.end() );
1041  std::copy( edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter( localGidEdges ) );
1043 
1044  dbgOut.tprintf( 1, " localGidEdges.psize() = %d\n", (int)localGidEdges.psize() );
1045  dbgOut.tprintf( 1, " localGidEdges.size() = %d\n", (int)localGidEdges.size() );
1046 
1047  // Create local edges
1048  EntityHandle start_edge;
1049  EntityHandle* conn_arr_edges = NULL;
1050  ErrorCode rval =
1051  _readNC->readMeshIface->get_element_connect( nLocalEdges, 2, MBEDGE, 0, start_edge, conn_arr_edges,
1052  // Might have to create gather mesh later
1053  ( createGatherSet ? nLocalEdges + nEdges : nLocalEdges ) );MB_CHK_SET_ERR( rval, "Failed to create local edges" );
1054 
1055  // Add local edges to current file set
1056  Range local_edges_range( start_edge, start_edge + nLocalEdges - 1 );
1057  rval = _readNC->mbImpl->add_entities( _fileSet, local_edges_range );MB_CHK_SET_ERR( rval, "Failed to add local edges to current file set" );
1058 
1059  // Get ptr to GID memory for edges
1060  int count = 0;
1061  void* data = NULL;
1062  rval = mbImpl->tag_iterate( mGlobalIdTag, local_edges_range.begin(), local_edges_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local edges" );
1063  assert( count == nLocalEdges );
1064  int* gid_data = (int*)data;
1065  std::copy( localGidEdges.begin(), localGidEdges.end(), gid_data );
1066 
1067  int verticesOnEdgeVarId;
1068 
1069  // Read vertices on each local edge, to get edge connectivity
1070  success = NCFUNC( inq_varid )( _fileId, "edge_corners", &verticesOnEdgeVarId );
1071  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edge_corners" );
1072  // Utilize the memory storage pointed by conn_arr_edges
1073  int* vertices_on_local_edges = (int*)conn_arr_edges;
1074 #ifdef MOAB_HAVE_PNETCDF
1075  nb_reads = localGidEdges.psize();
1076  requests.resize( nb_reads );
1077  statuss.resize( nb_reads );
1078  idxReq = 0;
1079 #endif
1080  indexInArray = 0;
1081  for( Range::pair_iterator pair_iter = localGidEdges.pair_begin(); pair_iter != localGidEdges.pair_end();
1082  ++pair_iter )
1083  {
1084  EntityHandle starth = pair_iter->first;
1085  EntityHandle endh = pair_iter->second;
1086  NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
1087  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ), 2 };
1088 
1089  // Do a partial read in each subrange
1090 #ifdef MOAB_HAVE_PNETCDF
1091  success = NCFUNCREQG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
1092  &( vertices_on_local_edges[indexInArray] ), &requests[idxReq++] );
1093 #else
1094  success = NCFUNCAG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
1095  &( vertices_on_local_edges[indexInArray] ) );
1096 #endif
1097  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data in a loop" );
1098 
1099  // Increment the index for next subrange
1100  indexInArray += ( endh - starth + 1 ) * 2;
1101  }
1102 
1103 #ifdef MOAB_HAVE_PNETCDF
1104  // Wait outside the loop
1105  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
1106  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
1107 #endif
1108 
1109  // Populate connectivity data for local edges
1110  // Convert in-place from int (stored in the first half) to EntityHandle
1111  // Reading backward is the trick
1112  for( int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
1113  {
1114  // Note, indices stored in vertices_on_local_edges are 0 based
1115  int global_vert_idx = vertices_on_local_edges[edge_vert] + 1; // Global vertex index, 1 based
1116  int local_vert_idx = localGidVerts.index( global_vert_idx ); // Local vertex index, 0 based
1117  assert( local_vert_idx != -1 );
1118  conn_arr_edges[edge_vert] = start_vertex + local_vert_idx;
1119  }
1120 
1121  return MB_SUCCESS;
1122 }
1123 
1124 ErrorCode NCHelperGCRM::create_padded_local_cells( const std::vector< int >& vertices_on_local_cells,
1125  EntityHandle start_vertex,
1126  Range& faces )
1127 {
1128  Interface*& mbImpl = _readNC->mbImpl;
1129  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
1130 
1131  // Create cells
1132  EntityHandle start_element;
1133  EntityHandle* conn_arr_local_cells = NULL;
1134  ErrorCode rval =
1136  conn_arr_local_cells,
1137  // Might have to create gather mesh later
1138  ( createGatherSet ? nLocalCells + nCells : nLocalCells ) );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
1139  faces.insert( start_element, start_element + nLocalCells - 1 );
1140 
1141  // Add local cells to current file set
1142  Range local_cells_range( start_element, start_element + nLocalCells - 1 );
1143  rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
1144 
1145  // Get ptr to GID memory for local cells
1146  int count = 0;
1147  void* data = NULL;
1148  rval = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
1149  assert( count == nLocalCells );
1150  int* gid_data = (int*)data;
1151  std::copy( localGidCells.begin(), localGidCells.end(), gid_data );
1152 
1153  // Set connectivity array with proper local vertices handles
1154  // vertices_on_local_cells array was already corrected to have
1155  // the last vertices repeated for pentagons, e.g. 122345 => 123455
1156  for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
1157  {
1158  for( int i = 0; i < EDGES_PER_CELL; i++ )
1159  {
1160  // Note, indices stored in vertices_on_local_cells are 1 based
1161  EntityHandle global_vert_idx =
1162  vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL + i]; // Global vertex index, 1 based
1163  int local_vert_idx = localGidVerts.index( global_vert_idx ); // Local vertex index, 0 based
1164  assert( local_vert_idx != -1 );
1165  conn_arr_local_cells[local_cell_idx * EDGES_PER_CELL + i] = start_vertex + local_vert_idx;
1166  }
1167  }
1168 
1169  return MB_SUCCESS;
1170 }
1171 
1173 {
1174  Interface*& mbImpl = _readNC->mbImpl;
1175  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
1176  const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
1177 
1178  // Create gather set vertices
1179  std::vector< double* > arrays;
1180  // Don't need to specify allocation number here, because we know enough vertices were created
1181  // before
1182  ErrorCode rval = _readNC->readMeshIface->get_node_coords( 3, nVertices, 0, gather_set_start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices" );
1183 
1184  // Add vertices to the gather set
1185  Range gather_set_verts_range( gather_set_start_vertex, gather_set_start_vertex + nVertices - 1 );
1186  rval = mbImpl->add_entities( gather_set, gather_set_verts_range );MB_CHK_SET_ERR( rval, "Failed to add vertices to the gather set" );
1187 
1188  // Read x coordinates for gather set vertices
1189  double* xptr = arrays[0];
1190  int xVertexVarId;
1191  int success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xVertexVarId );
1192  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
1193  NCDF_SIZE read_start = 0;
1194  NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nVertices );
1195 #ifdef MOAB_HAVE_PNETCDF
1196  // Enter independent I/O mode, since this read is only for the gather processor
1197  success = NCFUNC( begin_indep_data )( _fileId );
1198  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1199  success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
1200  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data" );
1201  success = NCFUNC( end_indep_data )( _fileId );
1202  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1203 #else
1204  success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
1205  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data" );
1206 #endif
1207 
1208  // Read y coordinates for gather set vertices
1209  double* yptr = arrays[1];
1210  int yVertexVarId;
1211  success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yVertexVarId );
1212  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
1213 #ifdef MOAB_HAVE_PNETCDF
1214  // Enter independent I/O mode, since this read is only for the gather processor
1215  success = NCFUNC( begin_indep_data )( _fileId );
1216  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1217  success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
1218  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data" );
1219  success = NCFUNC( end_indep_data )( _fileId );
1220  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1221 #else
1222  success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
1223  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data" );
1224 #endif
1225 
1226  // Convert lon/lat/rad to x/y/z
1227  double* zptr = arrays[2];
1228  double rad = 8000.0 + levVals[0];
1229  for( int i = 0; i < nVertices; i++ )
1230  {
1231  double cosphi = cos( yptr[i] );
1232  double zmult = sin( yptr[i] );
1233  double xmult = cosphi * cos( xptr[i] );
1234  double ymult = cosphi * sin( xptr[i] );
1235  xptr[i] = rad * xmult;
1236  yptr[i] = rad * ymult;
1237  zptr[i] = rad * zmult;
1238  }
1239 
1240  // Get ptr to GID memory for gather set vertices
1241  int count = 0;
1242  void* data = NULL;
1243  rval =
1244  mbImpl->tag_iterate( mGlobalIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on gather set vertices" );
1245  assert( count == nVertices );
1246  int* gid_data = (int*)data;
1247  for( int j = 1; j <= nVertices; j++ )
1248  gid_data[j - 1] = j;
1249 
1250  // Set the file id tag too, it should be bigger something not interfering with global id
1251  if( mpFileIdTag )
1252  {
1253  rval = mbImpl->tag_iterate( *mpFileIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count,
1254  data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on gather set vertices" );
1255  assert( count == nVertices );
1256  int bytes_per_tag = 4;
1257  rval = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
1258  if( 4 == bytes_per_tag )
1259  {
1260  gid_data = (int*)data;
1261  for( int j = 1; j <= nVertices; j++ )
1262  gid_data[j - 1] = nVertices + j; // Bigger than global id tag
1263  }
1264  else if( 8 == bytes_per_tag )
1265  { // Should be a handle tag on 64 bit machine?
1266  long* handle_tag_data = (long*)data;
1267  for( int j = 1; j <= nVertices; j++ )
1268  handle_tag_data[j - 1] = nVertices + j; // Bigger than global id tag
1269  }
1270  }
1271 
1272  return MB_SUCCESS;
1273 }
1274 
1276 {
1277  Interface*& mbImpl = _readNC->mbImpl;
1278 
1279  // Create gather set edges
1280  EntityHandle start_edge;
1281  EntityHandle* conn_arr_gather_set_edges = NULL;
1282  // Don't need to specify allocation number here, because we know enough edges were created
1283  // before
1284  ErrorCode rval =
1285  _readNC->readMeshIface->get_element_connect( nEdges, 2, MBEDGE, 0, start_edge, conn_arr_gather_set_edges );MB_CHK_SET_ERR( rval, "Failed to create gather set edges" );
1286 
1287  // Add edges to the gather set
1288  Range gather_set_edges_range( start_edge, start_edge + nEdges - 1 );
1289  rval = mbImpl->add_entities( gather_set, gather_set_edges_range );MB_CHK_SET_ERR( rval, "Failed to add edges to the gather set" );
1290 
1291  // Read vertices on each edge
1292  int verticesOnEdgeVarId;
1293  int success = NCFUNC( inq_varid )( _fileId, "edge_corners", &verticesOnEdgeVarId );
1294  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edge_corners" );
1295  // Utilize the memory storage pointed by conn_arr_gather_set_edges
1296  int* vertices_on_gather_set_edges = (int*)conn_arr_gather_set_edges;
1297  NCDF_SIZE read_starts[2] = { 0, 0 };
1298  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nEdges ), 2 };
1299 #ifdef MOAB_HAVE_PNETCDF
1300  // Enter independent I/O mode, since this read is only for the gather processor
1301  success = NCFUNC( begin_indep_data )( _fileId );
1302  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1303  success =
1304  NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
1305  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data" );
1306  success = NCFUNC( end_indep_data )( _fileId );
1307  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1308 #else
1309  success =
1310  NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
1311  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data" );
1312 #endif
1313 
1314  // Populate connectivity data for gather set edges
1315  // Convert in-place from int (stored in the first half) to EntityHandle
1316  // Reading backward is the trick
1317  for( int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
1318  {
1319  // Note, indices stored in vertices_on_gather_set_edges are 0 based
1320  int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert]; // Global vertex index, 0 based
1321  // Connectivity array is shifted by where the gather set vertices start
1322  conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx;
1323  }
1324 
1325  return MB_SUCCESS;
1326 }
1327 
1329 {
1330  Interface*& mbImpl = _readNC->mbImpl;
1331 
1332  // Create gather set cells
1333  EntityHandle start_element;
1334  EntityHandle* conn_arr_gather_set_cells = NULL;
1335  // Don't need to specify allocation number here, because we know enough cells were created
1336  // before
1338  conn_arr_gather_set_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
1339 
1340  // Add cells to the gather set
1341  Range gather_set_cells_range( start_element, start_element + nCells - 1 );
1342  rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
1343 
1344  // Read vertices on each gather set cell (connectivity)
1345  int verticesOnCellVarId;
1346  int success = NCFUNC( inq_varid )( _fileId, "cell_corners", &verticesOnCellVarId );
1347  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_corners" );
1348  // Utilize the memory storage pointed by conn_arr_gather_set_cells
1349  int* vertices_on_gather_set_cells = (int*)conn_arr_gather_set_cells;
1350  NCDF_SIZE read_starts[2] = { 0, 0 };
1351  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( EDGES_PER_CELL ) };
1352 #ifdef MOAB_HAVE_PNETCDF
1353  // Enter independent I/O mode, since this read is only for the gather processor
1354  success = NCFUNC( begin_indep_data )( _fileId );
1355  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
1356  success =
1357  NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
1358  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data" );
1359  success = NCFUNC( end_indep_data )( _fileId );
1360  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
1361 #else
1362  success =
1363  NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
1364  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data" );
1365 #endif
1366 
1367  // Correct gather set cell vertices array in the same way as local cell vertices array
1368  // Pentagons as hexagons should have a connectivity like 123455 and not 122345
1369  for( int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++ )
1370  {
1371  int* pvertex = vertices_on_gather_set_cells + gather_set_cell_idx * EDGES_PER_CELL;
1372  for( int k = 0; k < EDGES_PER_CELL - 2; k++ )
1373  {
1374  if( *( pvertex + k ) == *( pvertex + k + 1 ) )
1375  {
1376  // Shift the connectivity
1377  for( int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++ )
1378  *( pvertex + kk ) = *( pvertex + kk + 1 );
1379  // No need to try next k
1380  break;
1381  }
1382  }
1383  }
1384 
1385  // Populate connectivity data for gather set cells
1386  // Convert in-place from int (stored in the first half) to EntityHandle
1387  // Reading backward is the trick
1388  for( int cell_vert = nCells * EDGES_PER_CELL - 1; cell_vert >= 0; cell_vert-- )
1389  {
1390  // Note, indices stored in vertices_on_gather_set_cells are 0 based
1391  int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert]; // Global vertex index, 0 based
1392  // Connectivity array is shifted by where the gather set vertices start
1393  conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx;
1394  }
1395 
1396  return MB_SUCCESS;
1397 }
1398 
1399 } // namespace moab