Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
NCHelperScrip.cpp
Go to the documentation of this file.
1 /*
2  * NCHelperScrip.cpp
3  */
4 
5 #include "NCHelperScrip.hpp"
6 #include "moab/ReadUtilIface.hpp"
7 #include "AEntityFactory.hpp"
9 #ifdef MOAB_HAVE_MPI
11 #endif
12 #ifdef MOAB_HAVE_ZOLTAN
14 #endif
15 
16 namespace moab
17 {
18 
19 bool NCHelperScrip::can_read_file( ReadNC* readNC, int /*fileId*/ )
20 {
21  std::vector< std::string >& dimNames = readNC->dimNames;
22 
23  // If dimension names "grid_size" AND "grid_corners" AND "grid_rank" exist then it should be the Scrip grid
24  if( ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_size" ) ) != dimNames.end() ) &&
25  ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_corners" ) ) != dimNames.end() ) &&
26  ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_rank" ) ) != dimNames.end() ) )
27  {
28  return true;
29  }
30 
31  return false;
32 }
34 {
35  Interface*& mbImpl = _readNC->mbImpl;
36  std::vector< std::string >& dimNames = _readNC->dimNames;
37  std::vector< int >& dimLens = _readNC->dimLens;
38 
39  unsigned int idx;
40  std::vector< std::string >::iterator vit;
41 
42  // get grid_size
43  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_size" ) ) != dimNames.end() )
44  {
45  idx = vit - dimNames.begin();
46  grid_size = dimLens[idx];
47  }
48 
49  // get grid_corners
50  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_corners" ) ) != dimNames.end() )
51  {
52  idx = vit - dimNames.begin();
53  grid_corners = dimLens[idx];
54  }
55 
56  // get grid_rank
57  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_rank" ) ) != dimNames.end() )
58  {
59  idx = vit - dimNames.begin();
60  grid_rank = dimLens[idx];
61  }
62 
63  // do not need conventional tags
64  Tag convTagsCreated = 0;
65  int def_val = 0;
66  ErrorCode rval = mbImpl->tag_get_handle( "__CONV_TAGS_CREATED", 1, MB_TYPE_INTEGER, convTagsCreated,
67  MB_TAG_SPARSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble getting _CONV_TAGS_CREATED tag" );
68  int create_conv_tags_flag = 1;
69  rval = mbImpl->tag_set_data( convTagsCreated, &_fileSet, 1, &create_conv_tags_flag );MB_CHK_SET_ERR( rval, "Trouble setting _CONV_TAGS_CREATED tag" );
70 
71  // decide now the units, by looking at grid_center_lon
72  int xCellVarId;
73  int success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &xCellVarId );
74  if( success ) MB_CHK_SET_ERR( MB_FAILURE, "Trouble getting grid_center_lon" );
75  std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
76  auto vmit = varInfo.find( "grid_center_lon" );
77  if( varInfo.end() == vmit )
78  MB_SET_ERR( MB_FAILURE, "Couldn't find variable "
79  << "grid_center_lon" );
80  ReadNC::VarData& glData = vmit->second;
81  auto attIt = glData.varAtts.find( "units" );
82  if( attIt != glData.varAtts.end() )
83  {
84  unsigned int sz = attIt->second.attLen;
85  std::string att_data;
86  att_data.resize( sz + 1 );
87  att_data[sz] = '\000';
88  success =
89  NCFUNC( get_att_text )( _fileId, attIt->second.attVarId, attIt->second.attName.c_str(), &att_data[0] );
90  if( 0 == success && att_data.find( "radians" ) != std::string::npos ) degrees = false;
91  }
92 
93  return MB_SUCCESS;
94 }
96 {
97  Interface*& mbImpl = _readNC->mbImpl;
98  DebugOutput& dbgOut = _readNC->dbgOut;
99  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
100  ErrorCode rval;
101 
102 #ifdef MOAB_HAVE_MPI
103  int rank = 0;
104  int procs = 1;
105  bool& isParallel = _readNC->isParallel;
106  ParallelComm* myPcomm = NULL;
107  if( isParallel )
108  {
109  myPcomm = _readNC->myPcomm;
110  rank = myPcomm->proc_config().proc_rank();
111  procs = myPcomm->proc_config().proc_size();
112  }
113 
114  if( procs >= 2 )
115  {
116  // Shift rank to obtain a rotated trivial partition
117  int shifted_rank = rank;
118  int& trivialPartitionShift = _readNC->trivialPartitionShift;
119  if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
120 
121  // Compute the number of local cells on this proc
122  nLocalCells = int( std::floor( 1.0 * grid_size / procs ) );
123 
124  // The starting global cell index in the MPAS file for this proc
125  int start_cell_idx = shifted_rank * nLocalCells;
126 
127  // Number of extra cells after equal split over procs
128  int iextra = grid_size % procs;
129 
130  // Allocate extra cells over procs
131  if( shifted_rank < iextra ) nLocalCells++;
132  start_cell_idx += std::min( shifted_rank, iextra );
133 
134  start_cell_idx++; // 0 based -> 1 based
135 
136  // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
137  ErrorCode rval = redistribute_local_cells( start_cell_idx, myPcomm );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
138  }
139  else
140  {
143  }
144 #else
147 #endif
148  dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
149  dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
150 
151  // double grid_corner_lat(grid_size, grid_corners) ;
152  // double grid_corner_lon(grid_size, grid_corners) ;
153  int xvId, yvId;
154  int success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xvId );
155  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
156  success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yvId );
157  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
158 
159  // important upgrade: read masks if they exist, and save them as tags
160  int gmId = -1;
161  int sizeMasks = 0;
162 #ifdef MOAB_HAVE_PNETCDF
163  int factorRequests = 2; // we would read in general only 2 variables, xv and yv
164 #endif
165  success = NCFUNC( inq_varid )( _fileId, "grid_imask", &gmId );
166  Tag maskTag = 0; // not sure yet if we have the masks or not
167  if( success )
168  {
169  gmId = -1; // we do not have masks
170  }
171  else
172  {
173  sizeMasks = nLocalCells;
174 #ifdef MOAB_HAVE_PNETCDF
175  factorRequests = 3; // we also need to read masks distributed
176 #endif
177  // create the maskTag GRID_IMASK, with default value of 1
178  int def_val = 1;
179  rval =
180  mbImpl->tag_get_handle( "GRID_IMASK", 1, MB_TYPE_INTEGER, maskTag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble creating GRID_IMASK tag" );
181  }
182 
183  std::vector< double > xv( nLocalCells * grid_corners );
184  std::vector< double > yv( nLocalCells * grid_corners );
185  std::vector< int > masks( sizeMasks );
186 #ifdef MOAB_HAVE_PNETCDF
187  size_t nb_reads = localGidCells.psize();
188  std::vector< int > requests( nb_reads * factorRequests );
189  std::vector< int > statuss( nb_reads * factorRequests );
190  size_t idxReq = 0;
191 #endif
192  size_t indexInArray = 0;
193  size_t indexInMaskArray = 0;
194  for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
195  ++pair_iter )
196  {
197  EntityHandle starth = pair_iter->first;
198  EntityHandle endh = pair_iter->second;
199  NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
200  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
201  static_cast< NCDF_SIZE >( grid_corners ) };
202 
203  // Do a partial read in each subrange
204 #ifdef MOAB_HAVE_PNETCDF
205  success = NCFUNCREQG( _vara_double )( _fileId, xvId, read_starts, read_counts, &( xv[indexInArray] ),
206  &requests[idxReq++] );
207 #else
208  success = NCFUNCAG( _vara_double )( _fileId, xvId, read_starts, read_counts, &( xv[indexInArray] ) );
209 #endif
210  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data in a loop" );
211 
212  // Do a partial read in each subrange
213 #ifdef MOAB_HAVE_PNETCDF
214  success = NCFUNCREQG( _vara_double )( _fileId, yvId, read_starts, read_counts, &( yv[indexInArray] ),
215  &requests[idxReq++] );
216 #else
217  success = NCFUNCAG( _vara_double )( _fileId, yvId, read_starts, read_counts, &( yv[indexInArray] ) );
218 #endif
219  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data in a loop" );
220  // Increment the index for next subrange
221  indexInArray += ( endh - starth + 1 ) * grid_corners;
222 
223  if( gmId >= 0 ) // it means we need to read masks too, distributed:
224  {
225  NCDF_SIZE read_st = static_cast< NCDF_SIZE >( starth - 1 );
226  NCDF_SIZE read_ct = static_cast< NCDF_SIZE >( endh - starth + 1 );
227  // Do a partial read in each subrange, for mask variable:
228 #ifdef MOAB_HAVE_PNETCDF
229  success = NCFUNCREQG( _vara_int )( _fileId, gmId, &read_st, &read_ct, &( masks[indexInMaskArray] ),
230  &requests[idxReq++] );
231 #else
232  success = NCFUNCAG( _vara_int )( _fileId, gmId, &read_st, &read_ct, &( masks[indexInMaskArray] ) );
233 #endif
234  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on mask read " );
235  indexInMaskArray += endh - starth + 1;
236  }
237  }
238 
239 #ifdef MOAB_HAVE_PNETCDF
240  // Wait outside the loop
241  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
242  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
243 #endif
244 
245  // int grid_dims(grid_rank)
246  {
247  int gdId;
248  int success = NCFUNC( inq_varid )( _fileId, "grid_dims", &gdId );
249  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_dims" );
250 
251  // If rectilinear attribute present, mark it
252  std::vector< int > vecDimSizes( 3, 0 );
253  Tag rectilinearTag;
254  // Tag data contains: guessed mesh type, mesh size1, mesh size 2
255  // Example: CS(0)/ICO(1)/ICOD(2), num_elements, num_nodes
256  // : RLL(3), num_lat, num_lon
257 
258  // create the maskTag GRID_IMASK, with default value of 1
259 
260  vecDimSizes[0] = ( grid_rank == 2 ? 1 /* moab::TempestRemapper::RLL */ : 0 /* moab::TempestRemapper::CS */ );
261  vecDimSizes[1] = grid_size; // number of elements
262  vecDimSizes[2] = grid_size; // number of elements
263  rval = mbImpl->tag_get_handle( "ClimateMetadata", 3, MB_TYPE_INTEGER, rectilinearTag,
264  MB_TAG_SPARSE | MB_TAG_CREAT, vecDimSizes.data() );
265  if( MB_ALREADY_ALLOCATED != rval && MB_SUCCESS != rval )
266  MB_CHK_SET_ERR( rval, "can't create rectilinear sizes tag" );
267 
268  if( grid_rank == 2 )
269  {
270  NCDF_SIZE read_starts[1] = { static_cast< NCDF_SIZE >( 0 ) };
271  NCDF_SIZE read_counts[1] = { static_cast< NCDF_SIZE >( grid_rank ) };
272 
273  // Do a partial read in each subrange
274 #ifdef MOAB_HAVE_PNETCDF
275  std::vector< int > requeststatus( 2 );
276  success = NCFUNCREQG( _vara_int )( _fileId, gdId, read_starts, read_counts, vecDimSizes.data() + 1,
277  &requeststatus[0] );
278  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_dims data" );
279 
280  // Wait outside the loop
281  success = NCFUNC( wait_all )( _fileId, 1, &requeststatus[0], &requeststatus[1] );
282  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
283 #else
284  success = NCFUNCAG( _vara_int )( _fileId, gdId, read_starts, read_counts, vecDimSizes.data() + 1 );
285  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_dims data" );
286 #endif
287  }
288 
289  rval = mbImpl->tag_set_data( rectilinearTag, &_fileSet, 1, vecDimSizes.data() );
290  }
291 
292  // so we read xv, yv for all corners in the local mesh, and masks if they exist
293 
294  // Create vertices; first identify different ones, with a tolerance
295  std::map< Node3D, EntityHandle > vertex_map;
296 
297  // Set vertex coordinates
298  // will read all xv, yv, but use only those with correct mask on
299 
300  int elem_index = 0; // local index in netcdf arrays
301  double pideg = 1.; // radians
302  if( degrees ) pideg = acos( -1.0 ) / 180.0;
303 
304  for( ; elem_index < nLocalCells; elem_index++ )
305  {
306  // set area and fraction on those elements too
307  for( int k = 0; k < grid_corners; k++ )
308  {
309  int index_v_arr = grid_corners * elem_index + k;
310  double x, y;
311  x = xv[index_v_arr];
312  y = yv[index_v_arr];
313  double cosphi = cos( pideg * y );
314  double zmult = sin( pideg * y );
315  double xmult = cosphi * cos( x * pideg );
316  double ymult = cosphi * sin( x * pideg );
317  Node3D pt( xmult, ymult, zmult );
318  vertex_map[pt] = 0;
319  }
320  }
321  int nLocalVertices = (int)vertex_map.size();
322  std::vector< double* > arrays;
323  EntityHandle start_vertex, vtx_handle;
324  rval = _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
325 
326  vtx_handle = start_vertex;
327  // Copy vertex coordinates into entity sequence coordinate arrays
328  // and copy handle into vertex_map.
329  double *x = arrays[0], *y = arrays[1], *z = arrays[2];
330  for( auto i = vertex_map.begin(); i != vertex_map.end(); ++i )
331  {
332  i->second = vtx_handle;
333  ++vtx_handle;
334  *x = i->first.coords[0];
335  ++x;
336  *y = i->first.coords[1];
337  ++y;
338  *z = i->first.coords[2];
339  ++z;
340  }
341 
342  EntityHandle start_cell;
343  int nv = grid_corners;
344  EntityType mdb_type = MBVERTEX;
345  if( nv == 3 )
346  mdb_type = MBTRI;
347  else if( nv == 4 )
348  mdb_type = MBQUAD;
349  else if( nv > 4 ) // (nv > 4)
350  mdb_type = MBPOLYGON;
351 
352  Range tmp_range;
353  EntityHandle* conn_arr;
354 
355  rval = _readNC->readMeshIface->get_element_connect( nLocalCells, nv, mdb_type, 0, start_cell, conn_arr );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
356  tmp_range.insert( start_cell, start_cell + nLocalCells - 1 );
357 
358  elem_index = 0;
359 
360  for( ; elem_index < nLocalCells; elem_index++ )
361  {
362  for( int k = 0; k < nv; k++ )
363  {
364  int index_v_arr = nv * elem_index + k;
365  if( nv > 1 )
366  {
367  double x = xv[index_v_arr];
368  double y = yv[index_v_arr];
369  double cosphi = cos( pideg * y );
370  double zmult = sin( pideg * y );
371  double xmult = cosphi * cos( x * pideg );
372  double ymult = cosphi * sin( x * pideg );
373  Node3D pt( xmult, ymult, zmult );
374  conn_arr[elem_index * nv + k] = vertex_map[pt];
375  }
376  }
377  EntityHandle cell = start_cell + elem_index;
378  // set other tags, like xc, yc, frac, area
379  /*rval = mbImpl->tag_set_data( xcTag, &cell, 1, &xc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set xc tag" );
380  rval = mbImpl->tag_set_data( ycTag, &cell, 1, &yc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set yc tag" );
381  rval = mbImpl->tag_set_data( areaTag, &cell, 1, &area[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set area tag" );
382  rval = mbImpl->tag_set_data( fracTag, &cell, 1, &frac[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set frac tag" );
383 */
384  // set the global id too:
385  int globalId = localGidCells[elem_index];
386 
387  rval = mbImpl->tag_set_data( mGlobalIdTag, &cell, 1, &globalId );MB_CHK_SET_ERR( rval, "Failed to set global id tag" );
388  if( gmId >= 0 )
389  {
390  int localMask = masks[elem_index];
391  rval = mbImpl->tag_set_data( maskTag, &cell, 1, &localMask );MB_CHK_SET_ERR( rval, "Failed to set mask tag" );
392  }
393  }
394 
395  rval = mbImpl->add_entities( _fileSet, tmp_range );MB_CHK_SET_ERR( rval, "Failed to add new cells to current file set" );
396 
397  // modify local file set, to merge coincident vertices, and to correct repeated vertices in elements
398  std::vector< Tag > tagList;
399  tagList.push_back( mGlobalIdTag );
400  if( gmId >= 0 ) tagList.push_back( maskTag );
401  rval = IntxUtils::remove_padded_vertices( mbImpl, _fileSet, tagList );MB_CHK_SET_ERR( rval, "Failed to remove duplicate vertices" );
402 
403  rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_ERR( rval );
404  Range all_verts;
405  rval = mbImpl->get_connectivity( faces, all_verts );MB_CHK_ERR( rval );
406  rval = mbImpl->add_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
407  // need to add adjacencies; TODO: fix this for all nc readers
408  // copy this logic from migrate mesh in par comm graph
409  Core* mb = (Core*)mbImpl;
410  AEntityFactory* adj_fact = mb->a_entity_factory();
411  if( !adj_fact->vert_elem_adjacencies() )
412  adj_fact->create_vert_elem_adjacencies();
413  else
414  {
415  for( Range::iterator it = faces.begin(); it != faces.end(); ++it )
416  {
417  EntityHandle eh = *it;
418  const EntityHandle* conn = NULL;
419  int num_nodes = 0;
420  rval = mb->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
421  adj_fact->notify_create_entity( eh, conn, num_nodes );
422  }
423  }
424 
425 #ifdef MOAB_HAVE_MPI
426  if( myPcomm )
427  {
428  double tol = 1.e-12; // this is the same as static tolerance in NCHelper
429  ParallelMergeMesh pmm( myPcomm, tol );
430  rval = pmm.merge( _fileSet,
431  /* do not do local merge*/ false,
432  /* 2d cells*/ 2 );MB_CHK_SET_ERR( rval, "Failed to merge vertices in parallel" );
433 
434  // assign global ids only for vertices, cells have them fine
435  rval = myPcomm->assign_global_ids( _fileSet, /*dim*/ 0 );MB_CHK_ERR( rval );
436  // remove all sets, edges and vertices from the file set
437  Range edges, vertices;
438  rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges, /*recursive*/ true );MB_CHK_ERR( rval );
439  rval = mbImpl->get_entities_by_dimension( _fileSet, 0, vertices, /*recursive*/ true );MB_CHK_ERR( rval );
440  rval = mbImpl->remove_entities( _fileSet, edges );MB_CHK_ERR( rval );
441  rval = mbImpl->remove_entities( _fileSet, vertices );MB_CHK_ERR( rval );
442 
443  Range intfSets = myPcomm->interface_sets();
444  // empty intf sets
445  rval = mbImpl->clear_meshset( intfSets );MB_CHK_ERR( rval );
446  // delete the sets without shame :)
447  //sets.merge(intfSets);
448  //rval = myPcomm->delete_entities(sets);MB_CHK_ERR( rval ); // will also clean shared ents !
449  rval = myPcomm->delete_entities( edges );MB_CHK_ERR( rval ); // will also clean shared ents !
450  }
451 #else
452  rval = mbImpl->remove_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
453 #endif
454 
455  return MB_SUCCESS;
456 }
457 
458 #ifdef MOAB_HAVE_MPI
459 ErrorCode NCHelperScrip::redistribute_local_cells( int start_cell_idx, ParallelComm* pco )
460 {
461  // If possible, apply Zoltan partition
462 #ifdef MOAB_HAVE_ZOLTAN
464  {
465  // Read grid_center_lat coordinates of cell centers
466  int xCellVarId;
467  int success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &xCellVarId );
468  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lon" );
469  std::vector< double > xc( nLocalCells );
470  NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
471  NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
472  success = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xc[0] );
473  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lat data" );
474 
475  // Read grid_center_lon coordinates of cell centers
476  int yCellVarId;
477  success = NCFUNC( inq_varid )( _fileId, "grid_center_lat", &yCellVarId );
478  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lat" );
479  std::vector< double > yc( nLocalCells );
480  success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yc[0] );
481  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lon data" );
482 
483  // Zoltan partition using RCB; maybe more studies would be good, as to which partition
484  // is better
485  Interface*& mbImpl = _readNC->mbImpl;
486  DebugOutput& dbgOut = _readNC->dbgOut;
487  ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, pco, false, 0, NULL );
488  std::vector< double > xCell( nLocalCells );
489  std::vector< double > yCell( nLocalCells );
490  std::vector< double > zCell( nLocalCells );
491  double pideg = 1.; // radians
492  if( degrees ) pideg = acos( -1.0 ) / 180.0;
493  double x, y, cosphi;
494  for( int i = 0; i < nLocalCells; i++ )
495  {
496  x = xc[i];
497  y = yc[i];
498  cosphi = cos( pideg * y );
499  zCell[i] = sin( pideg * y );
500  xCell[i] = cosphi * cos( x * pideg );
501  yCell[i] = cosphi * sin( x * pideg );
502  }
503  ErrorCode rval = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
504  delete mbZTool;
505 
506  dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
507  dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
508 
509  // This is important: local cells are now redistributed, so nLocalCells might be different!
511 
512  return MB_SUCCESS;
513  }
514 #endif
515 
516  // By default, apply trivial partition
517  localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
518 
519  return MB_SUCCESS;
520 }
521 #endif
522 
523 } /* namespace moab */