Mesh Oriented datABase  (version 5.5.0)
An array-based unstructured mesh library
NCHelperScrip.cpp
Go to the documentation of this file.
1 /*
2  * NCHelperScrip.cpp
3  */
4 
5 #include "NCHelperScrip.hpp"
6 #include "moab/ReadUtilIface.hpp"
7 #include "AEntityFactory.hpp"
9 #ifdef MOAB_HAVE_MPI
11 #endif
12 #ifdef MOAB_HAVE_ZOLTAN
14 #endif
15 
16 namespace moab
17 {
18 
19 bool NCHelperScrip::can_read_file( ReadNC* readNC, int /*fileId*/ )
20 {
21  std::vector< std::string >& dimNames = readNC->dimNames;
22 
23  // If dimension names "grid_size" AND "grid_corners" AND "grid_rank" exist then it should be the Scrip grid
24  if( ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_size" ) ) != dimNames.end() ) &&
25  ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_corners" ) ) != dimNames.end() ) &&
26  ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_rank" ) ) != dimNames.end() ) )
27  {
28 
29  return true;
30  }
31 
32  return false;
33 }
35 {
36  Interface*& mbImpl = _readNC->mbImpl;
37  std::vector< std::string >& dimNames = _readNC->dimNames;
38  std::vector< int >& dimLens = _readNC->dimLens;
39 
40  unsigned int idx;
41  std::vector< std::string >::iterator vit;
42 
43  // get grid_size
44  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_size" ) ) != dimNames.end() )
45  {
46  idx = vit - dimNames.begin();
47  grid_size = dimLens[idx];
48  }
49 
50  // get grid_corners
51  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_corners" ) ) != dimNames.end() )
52  {
53  idx = vit - dimNames.begin();
54  grid_corners = dimLens[idx];
55  }
56 
57  // get grid_rank
58  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_rank" ) ) != dimNames.end() )
59  {
60  idx = vit - dimNames.begin();
61  grid_rank = dimLens[idx];
62  }
63 
64  // do not need conventional tags
65  Tag convTagsCreated = 0;
66  int def_val = 0;
67  ErrorCode rval = mbImpl->tag_get_handle( "__CONV_TAGS_CREATED", 1, MB_TYPE_INTEGER, convTagsCreated,
68  MB_TAG_SPARSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble getting _CONV_TAGS_CREATED tag" );
69  int create_conv_tags_flag = 1;
70  rval = mbImpl->tag_set_data( convTagsCreated, &_fileSet, 1, &create_conv_tags_flag );MB_CHK_SET_ERR( rval, "Trouble setting _CONV_TAGS_CREATED tag" );
71 
72  // decide now the units, by looking at grid_center_lon
73  int xCellVarId;
74  int success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &xCellVarId );
75  if( success ) MB_CHK_SET_ERR( MB_FAILURE, "Trouble getting grid_center_lon" );
76  std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
77  auto vmit = varInfo.find( "grid_center_lon" );
78  if( varInfo.end() == vmit )
79  MB_SET_ERR( MB_FAILURE, "Couldn't find variable "
80  << "grid_center_lon" );
81  ReadNC::VarData& glData = vmit->second;
82  auto attIt = glData.varAtts.find( "units" );
83  if( attIt != glData.varAtts.end() )
84  {
85  unsigned int sz = attIt->second.attLen;
86  std::string att_data;
87  att_data.resize( sz + 1 );
88  att_data[sz] = '\000';
89  success =
90  NCFUNC( get_att_text )( _fileId, attIt->second.attVarId, attIt->second.attName.c_str(), &att_data[0] );
91  if( 0 == success && att_data.find( "radians" ) != std::string::npos ) degrees = false;
92  }
93 
94  return MB_SUCCESS;
95 }
97 {
98  Interface*& mbImpl = _readNC->mbImpl;
99  DebugOutput& dbgOut = _readNC->dbgOut;
100  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
101  ErrorCode rval;
102 
103 #ifdef MOAB_HAVE_MPI
104  int rank = 0;
105  int procs = 1;
106  bool& isParallel = _readNC->isParallel;
107  ParallelComm* myPcomm = NULL;
108  if( isParallel )
109  {
110  myPcomm = _readNC->myPcomm;
111  rank = myPcomm->proc_config().proc_rank();
112  procs = myPcomm->proc_config().proc_size();
113  }
114 
115  if( procs >= 2 )
116  {
117  // Shift rank to obtain a rotated trivial partition
118  int shifted_rank = rank;
119  int& trivialPartitionShift = _readNC->trivialPartitionShift;
120  if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
121 
122  // Compute the number of local cells on this proc
123  nLocalCells = int( std::floor( 1.0 * grid_size / procs ) );
124 
125  // The starting global cell index in the MPAS file for this proc
126  int start_cell_idx = shifted_rank * nLocalCells;
127 
128  // Number of extra cells after equal split over procs
129  int iextra = grid_size % procs;
130 
131  // Allocate extra cells over procs
132  if( shifted_rank < iextra ) nLocalCells++;
133  start_cell_idx += std::min( shifted_rank, iextra );
134 
135  start_cell_idx++; // 0 based -> 1 based
136 
137  // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
138  ErrorCode rval = redistribute_local_cells( start_cell_idx, myPcomm );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
139  }
140  else
141  {
144  }
145 #else
148 #endif
149  dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
150  dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
151 
152  // double grid_corner_lat(grid_size, grid_corners) ;
153  // double grid_corner_lon(grid_size, grid_corners) ;
154  int xvId, yvId;
155  int success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xvId );
156  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
157  success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yvId );
158  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
159 
160  // important upgrade: read masks if they exist, and save them as tags
161  int gmId = -1;
162  int sizeMasks = 0;
163 #ifdef MOAB_HAVE_PNETCDF
164  int factorRequests = 2; // we would read in general only 2 variables, xv and yv
165 #endif
166  success = NCFUNC( inq_varid )( _fileId, "grid_imask", &gmId );
167  Tag maskTag = 0; // not sure yet if we have the masks or not
168  if( success )
169  {
170  gmId = -1; // we do not have masks
171  }
172  else
173  {
174  sizeMasks = nLocalCells;
175 #ifdef MOAB_HAVE_PNETCDF
176  factorRequests = 3; // we also need to read masks distributed
177 #endif
178  // create the maskTag GRID_IMASK, with default value of 1
179  int def_val = 1;
180  rval =
181  mbImpl->tag_get_handle( "GRID_IMASK", 1, MB_TYPE_INTEGER, maskTag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble creating GRID_IMASK tag" );
182  }
183 
184  std::vector< double > xv( nLocalCells * grid_corners );
185  std::vector< double > yv( nLocalCells * grid_corners );
186  std::vector< int > masks( sizeMasks );
187 #ifdef MOAB_HAVE_PNETCDF
188  size_t nb_reads = localGidCells.psize();
189  std::vector< int > requests( nb_reads * factorRequests );
190  std::vector< int > statuss( nb_reads * factorRequests );
191  size_t idxReq = 0;
192 #endif
193  size_t indexInArray = 0;
194  size_t indexInMaskArray = 0;
195  for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
196  ++pair_iter )
197  {
198  EntityHandle starth = pair_iter->first;
199  EntityHandle endh = pair_iter->second;
200  NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
201  NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
202  static_cast< NCDF_SIZE >( grid_corners ) };
203 
204  // Do a partial read in each subrange
205 #ifdef MOAB_HAVE_PNETCDF
206  success = NCFUNCREQG( _vara_double )( _fileId, xvId, read_starts, read_counts, &( xv[indexInArray] ),
207  &requests[idxReq++] );
208 #else
209  success = NCFUNCAG( _vara_double )( _fileId, xvId, read_starts, read_counts, &( xv[indexInArray] ) );
210 #endif
211  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data in a loop" );
212 
213  // Do a partial read in each subrange
214 #ifdef MOAB_HAVE_PNETCDF
215  success = NCFUNCREQG( _vara_double )( _fileId, yvId, read_starts, read_counts, &( yv[indexInArray] ),
216  &requests[idxReq++] );
217 #else
218  success = NCFUNCAG( _vara_double )( _fileId, yvId, read_starts, read_counts, &( yv[indexInArray] ) );
219 #endif
220  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data in a loop" );
221  // Increment the index for next subrange
222  indexInArray += ( endh - starth + 1 ) * grid_corners;
223 
224  if( gmId >= 0 ) // it means we need to read masks too, distributed:
225  {
226  NCDF_SIZE read_st = static_cast< NCDF_SIZE >( starth - 1 );
227  NCDF_SIZE read_ct = static_cast< NCDF_SIZE >( endh - starth + 1 );
228  // Do a partial read in each subrange, for mask variable:
229 #ifdef MOAB_HAVE_PNETCDF
230  success = NCFUNCREQG( _vara_int )( _fileId, gmId, &read_st, &read_ct, &( masks[indexInMaskArray] ),
231  &requests[idxReq++] );
232 #else
233  success = NCFUNCAG( _vara_int )( _fileId, gmId, &read_st, &read_ct, &( masks[indexInMaskArray] ) );
234 #endif
235  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on mask read " );
236  indexInMaskArray += endh - starth + 1;
237  }
238  }
239 
240 #ifdef MOAB_HAVE_PNETCDF
241  // Wait outside the loop
242  success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
243  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
244 #endif
245 
246  // int grid_dims(grid_rank)
247  {
248  int gdId;
249  int success = NCFUNC( inq_varid )( _fileId, "grid_dims", &gdId );
250  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_dims" );
251 
252  // If rectilinear attribute present, mark it
253  std::vector< int > vecDimSizes( 3, 0 );
254  Tag rectilinearTag;
255  // Tag data contains: guessed mesh type, mesh size1, mesh size 2
256  // Example: CS(0)/ICO(1)/ICOD(2), num_elements, num_nodes
257  // : RLL(3), num_lat, num_lon
258 
259  // create the maskTag GRID_IMASK, with default value of 1
260 
261  vecDimSizes[0] = ( grid_rank == 2 ? 1 /* moab::TempestRemapper::RLL */ : 0 /* moab::TempestRemapper::CS */ );
262  vecDimSizes[1] = grid_size; // number of elements
263  vecDimSizes[2] = grid_size; // number of elements
264  rval = mbImpl->tag_get_handle( "ClimateMetadata", 3, MB_TYPE_INTEGER, rectilinearTag,
265  MB_TAG_SPARSE | MB_TAG_CREAT, vecDimSizes.data() );
266  if( MB_ALREADY_ALLOCATED != rval && MB_SUCCESS != rval )
267  MB_CHK_SET_ERR( rval, "can't create rectilinear sizes tag" );
268 
269  if( grid_rank == 2 )
270  {
271  NCDF_SIZE read_starts[1] = { static_cast< NCDF_SIZE >( 0 ) };
272  NCDF_SIZE read_counts[1] = { static_cast< NCDF_SIZE >( grid_rank ) };
273 
274  // Do a partial read in each subrange
275 #ifdef MOAB_HAVE_PNETCDF
276  std::vector< int > requeststatus( 2 );
277  success = NCFUNCREQG( _vara_int )( _fileId, gdId, read_starts, read_counts, vecDimSizes.data() + 1,
278  &requeststatus[0] );
279  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_dims data" );
280 
281  // Wait outside the loop
282  success = NCFUNC( wait_all )( _fileId, 1, &requeststatus[0], &requeststatus[1] );
283  if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
284 #else
285  success = NCFUNCAG( _vara_int )( _fileId, gdId, read_starts, read_counts, vecDimSizes.data() + 1 );
286  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_dims data" );
287 #endif
288  }
289 
290  rval = mbImpl->tag_set_data( rectilinearTag, &_fileSet, 1, vecDimSizes.data() );
291  }
292 
293  // so we read xv, yv for all corners in the local mesh, and masks if they exist
294 
295  // Create vertices; first identify different ones, with a tolerance
296  std::map< Node3D, EntityHandle > vertex_map;
297 
298  // Set vertex coordinates
299  // will read all xv, yv, but use only those with correct mask on
300 
301  int elem_index = 0; // local index in netcdf arrays
302  double pideg = 1.; // radians
303  if( degrees ) pideg = acos( -1.0 ) / 180.0;
304 
305  for( ; elem_index < nLocalCells; elem_index++ )
306  {
307  // set area and fraction on those elements too
308  for( int k = 0; k < grid_corners; k++ )
309  {
310  int index_v_arr = grid_corners * elem_index + k;
311  double x, y;
312  x = xv[index_v_arr];
313  y = yv[index_v_arr];
314  double cosphi = cos( pideg * y );
315  double zmult = sin( pideg * y );
316  double xmult = cosphi * cos( x * pideg );
317  double ymult = cosphi * sin( x * pideg );
318  Node3D pt( xmult, ymult, zmult );
319  vertex_map[pt] = 0;
320  }
321  }
322  int nLocalVertices = (int)vertex_map.size();
323  std::vector< double* > arrays;
324  EntityHandle start_vertex, vtx_handle;
325  rval = _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
326 
327  vtx_handle = start_vertex;
328  // Copy vertex coordinates into entity sequence coordinate arrays
329  // and copy handle into vertex_map.
330  double *x = arrays[0], *y = arrays[1], *z = arrays[2];
331  for( auto i = vertex_map.begin(); i != vertex_map.end(); ++i )
332  {
333  i->second = vtx_handle;
334  ++vtx_handle;
335  *x = i->first.coords[0];
336  ++x;
337  *y = i->first.coords[1];
338  ++y;
339  *z = i->first.coords[2];
340  ++z;
341  }
342 
343  EntityHandle start_cell;
344  int nv = grid_corners;
345  EntityType mdb_type = MBVERTEX;
346  if( nv == 3 )
347  mdb_type = MBTRI;
348  else if( nv == 4 )
349  mdb_type = MBQUAD;
350  else if( nv > 4 ) // (nv > 4)
351  mdb_type = MBPOLYGON;
352 
353  Range tmp_range;
354  EntityHandle* conn_arr;
355 
356  rval = _readNC->readMeshIface->get_element_connect( nLocalCells, nv, mdb_type, 0, start_cell, conn_arr );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
357  tmp_range.insert( start_cell, start_cell + nLocalCells - 1 );
358 
359  elem_index = 0;
360 
361  for( ; elem_index < nLocalCells; elem_index++ )
362  {
363  for( int k = 0; k < nv; k++ )
364  {
365  int index_v_arr = nv * elem_index + k;
366  if( nv > 1 )
367  {
368  double x = xv[index_v_arr];
369  double y = yv[index_v_arr];
370  double cosphi = cos( pideg * y );
371  double zmult = sin( pideg * y );
372  double xmult = cosphi * cos( x * pideg );
373  double ymult = cosphi * sin( x * pideg );
374  Node3D pt( xmult, ymult, zmult );
375  conn_arr[elem_index * nv + k] = vertex_map[pt];
376  }
377  }
378  EntityHandle cell = start_cell + elem_index;
379  // set other tags, like xc, yc, frac, area
380  /*rval = mbImpl->tag_set_data( xcTag, &cell, 1, &xc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set xc tag" );
381  rval = mbImpl->tag_set_data( ycTag, &cell, 1, &yc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set yc tag" );
382  rval = mbImpl->tag_set_data( areaTag, &cell, 1, &area[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set area tag" );
383  rval = mbImpl->tag_set_data( fracTag, &cell, 1, &frac[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set frac tag" );
384 */
385  // set the global id too:
386  int globalId = localGidCells[elem_index];
387 
388  rval = mbImpl->tag_set_data( mGlobalIdTag, &cell, 1, &globalId );MB_CHK_SET_ERR( rval, "Failed to set global id tag" );
389  if( gmId >= 0 )
390  {
391  int localMask = masks[elem_index];
392  rval = mbImpl->tag_set_data( maskTag, &cell, 1, &localMask );MB_CHK_SET_ERR( rval, "Failed to set mask tag" );
393  }
394  }
395 
396  rval = mbImpl->add_entities( _fileSet, tmp_range );MB_CHK_SET_ERR( rval, "Failed to add new cells to current file set" );
397 
398  // modify local file set, to merge coincident vertices, and to correct repeated vertices in elements
399  std::vector< Tag > tagList;
400  tagList.push_back( mGlobalIdTag );
401  if( gmId >= 0 ) tagList.push_back( maskTag );
402  rval = IntxUtils::remove_padded_vertices( mbImpl, _fileSet, tagList );MB_CHK_SET_ERR( rval, "Failed to remove duplicate vertices" );
403 
404  rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_ERR( rval );
405  Range all_verts;
406  rval = mbImpl->get_connectivity( faces, all_verts );MB_CHK_ERR( rval );
407  rval = mbImpl->add_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
408  // need to add adjacencies; TODO: fix this for all nc readers
409  // copy this logic from migrate mesh in par comm graph
410  Core* mb = (Core*)mbImpl;
411  AEntityFactory* adj_fact = mb->a_entity_factory();
412  if( !adj_fact->vert_elem_adjacencies() )
413  adj_fact->create_vert_elem_adjacencies();
414  else
415  {
416  for( Range::iterator it = faces.begin(); it != faces.end(); ++it )
417  {
418  EntityHandle eh = *it;
419  const EntityHandle* conn = NULL;
420  int num_nodes = 0;
421  rval = mb->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
422  adj_fact->notify_create_entity( eh, conn, num_nodes );
423  }
424  }
425 
426 #ifdef MOAB_HAVE_MPI
427  if( myPcomm )
428  {
429  double tol = 1.e-12; // this is the same as static tolerance in NCHelper
430  ParallelMergeMesh pmm( myPcomm, tol );
431  rval = pmm.merge( _fileSet,
432  /* do not do local merge*/ false,
433  /* 2d cells*/ 2 );MB_CHK_SET_ERR( rval, "Failed to merge vertices in parallel" );
434 
435  // assign global ids only for vertices, cells have them fine
436  rval = myPcomm->assign_global_ids( _fileSet, /*dim*/ 0 );MB_CHK_ERR( rval );
437  // remove all sets, edges and vertices from the file set
438  Range edges, vertices;
439  rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges, /*recursive*/ true );MB_CHK_ERR( rval );
440  rval = mbImpl->get_entities_by_dimension( _fileSet, 0, vertices, /*recursive*/ true );MB_CHK_ERR( rval );
441  rval = mbImpl->remove_entities( _fileSet, edges );MB_CHK_ERR( rval );
442  rval = mbImpl->remove_entities( _fileSet, vertices );MB_CHK_ERR( rval );
443 
444  Range intfSets = myPcomm->interface_sets();
445  // empty intf sets
446  rval = mbImpl->clear_meshset( intfSets );MB_CHK_ERR( rval );
447  // delete the sets without shame :)
448  //sets.merge(intfSets);
449  //rval = myPcomm->delete_entities(sets);MB_CHK_ERR( rval ); // will also clean shared ents !
450  rval = myPcomm->delete_entities( edges );MB_CHK_ERR( rval ); // will also clean shared ents !
451  }
452 #else
453  rval = mbImpl->remove_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
454 #endif
455 
456  return MB_SUCCESS;
457 }
458 
459 #ifdef MOAB_HAVE_MPI
460 ErrorCode NCHelperScrip::redistribute_local_cells( int start_cell_idx, ParallelComm* pco )
461 {
462  // If possible, apply Zoltan partition
463 #ifdef MOAB_HAVE_ZOLTAN
465  {
466  // Read grid_center_lat coordinates of cell centers
467  int xCellVarId;
468  int success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &xCellVarId );
469  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lon" );
470  std::vector< double > xc( nLocalCells );
471  NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
472  NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
473  success = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xc[0] );
474  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lat data" );
475 
476  // Read grid_center_lon coordinates of cell centers
477  int yCellVarId;
478  success = NCFUNC( inq_varid )( _fileId, "grid_center_lat", &yCellVarId );
479  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lat" );
480  std::vector< double > yc( nLocalCells );
481  success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yc[0] );
482  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lon data" );
483 
484  // Zoltan partition using RCB; maybe more studies would be good, as to which partition
485  // is better
486  Interface*& mbImpl = _readNC->mbImpl;
487  DebugOutput& dbgOut = _readNC->dbgOut;
488  ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, pco, false, 0, NULL );
489  std::vector< double > xCell( nLocalCells );
490  std::vector< double > yCell( nLocalCells );
491  std::vector< double > zCell( nLocalCells );
492  double pideg = 1.; // radians
493  if( degrees ) pideg = acos( -1.0 ) / 180.0;
494  double x, y, cosphi;
495  for( int i = 0; i < nLocalCells; i++ )
496  {
497  x = xc[i];
498  y = yc[i];
499  cosphi = cos( pideg * y );
500  zCell[i] = sin( pideg * y );
501  xCell[i] = cosphi * cos( x * pideg );
502  yCell[i] = cosphi * sin( x * pideg );
503  }
504  ErrorCode rval = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
505  delete mbZTool;
506 
507  dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
508  dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
509 
510  // This is important: local cells are now redistributed, so nLocalCells might be different!
512 
513  return MB_SUCCESS;
514  }
515 #endif
516 
517  // By default, apply trivial partition
518  localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
519 
520  return MB_SUCCESS;
521 }
522 #endif
523 
524 } /* namespace moab */