Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
NCHelperDomain.cpp
Go to the documentation of this file.
1 #include "NCHelperDomain.hpp"
2 #include "moab/FileOptions.hpp"
3 #include "moab/ReadUtilIface.hpp"
5 #include "AEntityFactory.hpp"
6 #ifdef MOAB_HAVE_MPI
8 #endif
9 #ifdef MOAB_HAVE_ZOLTAN
11 #include "moab/TupleList.hpp"
12 #endif
13 
14 #include <cmath>
15 #include <sstream>
16 
17 namespace moab
18 {
19 
20 bool NCHelperDomain::can_read_file( ReadNC* readNC, int fileId )
21 {
22  std::vector< std::string >& dimNames = readNC->dimNames;
23 
24  // If dimension names "n" AND "ni" AND "nj" AND "nv" exist then it should be the Domain grid
25  // some files do not have n, which should be just ni*nj
26  if( ( std::find( dimNames.begin(), dimNames.end(), std::string( "ni" ) ) != dimNames.end() ) &&
27  ( std::find( dimNames.begin(), dimNames.end(), std::string( "nj" ) ) != dimNames.end() ) &&
28  ( std::find( dimNames.begin(), dimNames.end(), std::string( "nv" ) ) != dimNames.end() ) )
29  {
30  // we do not test anymore if it is an actual CAM file
31  return true;
32  }
33 
34  return false;
35 }
36 
38 {
39  Interface*& mbImpl = _readNC->mbImpl;
40  std::vector< std::string >& dimNames = _readNC->dimNames;
41  std::vector< int >& dimLens = _readNC->dimLens;
42  std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
43  DebugOutput& dbgOut = _readNC->dbgOut;
44 #ifdef MOAB_HAVE_MPI
45  bool& isParallel = _readNC->isParallel;
46 #endif
47  int& partMethod = _readNC->partMethod;
48  ScdParData& parData = _readNC->parData;
49 
50  ErrorCode rval;
51 
52  // Look for names of i/j dimensions
53  // First i
54  std::vector< std::string >::iterator vit;
55  unsigned int idx;
56  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "ni" ) ) != dimNames.end() )
57  idx = vit - dimNames.begin();
58  else
59  {
60  MB_SET_ERR( MB_FAILURE, "Couldn't find 'ni' variable" );
61  }
62  iDim = idx;
63  gDims[0] = 0;
64  gDims[3] = dimLens[idx];
65 
66  // Then j
67  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nj" ) ) != dimNames.end() )
68  idx = vit - dimNames.begin();
69  else
70  {
71  MB_SET_ERR( MB_FAILURE, "Couldn't find 'nj' variable" );
72  }
73  jDim = idx;
74  gDims[1] = 0;
75  gDims[4] = dimLens[idx]; // Add 2 for the pole points ? not needed
76 
77  // do not use gcdims ? or use only gcdims?
78 
79  // Try a truly 2D mesh
80  gDims[2] = -1;
81  gDims[5] = -1;
82 
83  // Get number of vertices per cell
84  if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nv" ) ) != dimNames.end() )
85  idx = vit - dimNames.begin();
86  else
87  {
88  MB_SET_ERR( MB_FAILURE, "Couldn't find 'nv' dimension" );
89  }
90  nvDim = idx;
91  nv = dimLens[idx];
92 
93  // Parse options to get subset
94  int rank = 0, procs = 1;
95 #ifdef MOAB_HAVE_MPI
96  if( isParallel )
97  {
98  ParallelComm*& myPcomm = _readNC->myPcomm;
99  rank = myPcomm->proc_config().proc_rank();
100  procs = myPcomm->proc_config().proc_size();
101  }
102 #endif
103  if( procs > 1 )
104  {
105  for( int i = 0; i < 6; i++ )
106  parData.gDims[i] = gDims[i];
107  parData.partMethod = partMethod;
108  int pdims[3];
109 
111  rval = ScdInterface::compute_partition( procs, rank, parData, lDims, locallyPeriodic, pdims );MB_CHK_ERR( rval );
112  for( int i = 0; i < 3; i++ )
113  parData.pDims[i] = pdims[i];
114 
115  dbgOut.tprintf( 1, "Partition: %dx%d (out of %dx%d)\n", lDims[3] - lDims[0], lDims[4] - lDims[1],
116  gDims[3] - gDims[0], gDims[4] - gDims[1] );
117  if( 0 == rank )
118  dbgOut.tprintf( 1, "Contiguous chunks of size %d bytes.\n",
119  8 * ( lDims[3] - lDims[0] ) * ( lDims[4] - lDims[1] ) );
120  }
121  else
122  {
123  for( int i = 0; i < 6; i++ )
124  lDims[i] = gDims[i];
126  }
127 
128  // Now get actual coordinate values for vertices and cell centers
129  lCDims[0] = lDims[0];
130 
131  lCDims[3] = lDims[3];
132 
133  // For FV models, will always be non-periodic in j
134  lCDims[1] = lDims[1];
135  lCDims[4] = lDims[4];
136 
137 #if 0
138  // Resize vectors to store values later
139  if (-1 != lDims[0])
140  ilVals.resize(lDims[3] - lDims[0] + 1);
141  if (-1 != lCDims[0])
142  ilCVals.resize(lCDims[3] - lCDims[0] + 1);
143  if (-1 != lDims[1])
144  jlVals.resize(lDims[4] - lDims[1] + 1);
145  if (-1 != lCDims[1])
146  jlCVals.resize(lCDims[4] - lCDims[1] + 1);
147  if (nTimeSteps > 0)
148  tVals.resize(nTimeSteps);
149 #endif
150 
151  dbgOut.tprintf( 1, "I=%d-%d, J=%d-%d\n", lDims[0], lDims[3], lDims[1], lDims[4] );
152  dbgOut.tprintf( 1, "%d elements, %d vertices\n", ( lDims[3] - lDims[0] ) * ( lDims[4] - lDims[1] ),
153  ( lDims[3] - lDims[0] ) * ( lDims[4] - lDims[1] ) * nv );
154 
155  // For each variable, determine the entity location type and number of levels
156  std::map< std::string, ReadNC::VarData >::iterator mit;
157  for( mit = varInfo.begin(); mit != varInfo.end(); ++mit )
158  {
159  ReadNC::VarData& vd = ( *mit ).second;
160 
161  // Default entLoc is ENTLOCSET
162  if( std::find( vd.varDims.begin(), vd.varDims.end(), tDim ) != vd.varDims.end() )
163  {
164  if( ( std::find( vd.varDims.begin(), vd.varDims.end(), iCDim ) != vd.varDims.end() ) &&
165  ( std::find( vd.varDims.begin(), vd.varDims.end(), jCDim ) != vd.varDims.end() ) )
167  else if( ( std::find( vd.varDims.begin(), vd.varDims.end(), jDim ) != vd.varDims.end() ) &&
168  ( std::find( vd.varDims.begin(), vd.varDims.end(), iCDim ) != vd.varDims.end() ) )
170  else if( ( std::find( vd.varDims.begin(), vd.varDims.end(), jCDim ) != vd.varDims.end() ) &&
171  ( std::find( vd.varDims.begin(), vd.varDims.end(), iDim ) != vd.varDims.end() ) )
173  }
174 
175  // Default numLev is 0
176  if( std::find( vd.varDims.begin(), vd.varDims.end(), levDim ) != vd.varDims.end() ) vd.numLev = nLevels;
177  }
178 
179  std::vector< std::string > ijdimNames( 2 );
180  ijdimNames[0] = "__ni";
181  ijdimNames[1] = "__nj";
182  std::string tag_name;
183  Tag tagh;
184 
185  // __<dim_name>_LOC_MINMAX (for slon, slat, lon and lat)
186  for( unsigned int i = 0; i != ijdimNames.size(); i++ )
187  {
188  std::vector< int > val( 2, 0 );
189  if( ijdimNames[i] == "__ni" )
190  {
191  val[0] = lDims[0];
192  val[1] = lDims[3];
193  }
194  else if( ijdimNames[i] == "__nj" )
195  {
196  val[0] = lDims[1];
197  val[1] = lDims[4];
198  }
199 
200  std::stringstream ss_tag_name;
201  ss_tag_name << ijdimNames[i] << "_LOC_MINMAX";
202  tag_name = ss_tag_name.str();
203  rval = mbImpl->tag_get_handle( tag_name.c_str(), 2, MB_TYPE_INTEGER, tagh, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating conventional tag " << tag_name );
204  rval = mbImpl->tag_set_data( tagh, &_fileSet, 1, &val[0] );MB_CHK_SET_ERR( rval, "Trouble setting data to conventional tag " << tag_name );
205  if( MB_SUCCESS == rval ) dbgOut.tprintf( 2, "Conventional tag %s is created.\n", tag_name.c_str() );
206  }
207 
208  // __<dim_name>_LOC_VALS (for slon, slat, lon and lat)
209  // Assume all have the same data type as lon (expected type is float or double)
210  switch( varInfo["xc"].varDataType )
211  {
212  case NC_FLOAT:
213  case NC_DOUBLE:
214  break;
215  default:
216  MB_SET_ERR( MB_FAILURE, "Unexpected variable data type for 'lon'" );
217  }
218 
219  // do not need conventional tags
220  Tag convTagsCreated = 0;
221  int def_val = 0;
222  rval = mbImpl->tag_get_handle( "__CONV_TAGS_CREATED", 1, MB_TYPE_INTEGER, convTagsCreated,
223  MB_TAG_SPARSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble getting _CONV_TAGS_CREATED tag" );
224  int create_conv_tags_flag = 1;
225  rval = mbImpl->tag_set_data( convTagsCreated, &_fileSet, 1, &create_conv_tags_flag );MB_CHK_SET_ERR( rval, "Trouble setting _CONV_TAGS_CREATED tag" );
226 
227  return MB_SUCCESS;
228 }
229 
231 {
232  Interface*& mbImpl = _readNC->mbImpl;
233  // std::string& fileName = _readNC->fileName;
234  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
235  // const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
236  DebugOutput& dbgOut = _readNC->dbgOut;
237 
238  bool& culling = _readNC->culling;
239  bool& repartition = _readNC->repartition;
240 
241  ErrorCode rval;
242  int success = 0;
243 
244  int local_elems = ( lDims[4] - lDims[1] ) * ( lDims[3] - lDims[0] );
245  dbgOut.tprintf( 1, "local cells: %d \n", local_elems );
246 
247  // count how many will be with mask 1 here
248  // basically, read the mask variable on the local elements;
249  std::string maskstr( "mask" );
250  ReadNC::VarData& vmask = _readNC->varInfo[maskstr];
251 
252  // mask is (nj, ni)
253  vmask.readStarts.push_back( lDims[1] );
254  vmask.readStarts.push_back( lDims[0] );
255  vmask.readCounts.push_back( lDims[4] - lDims[1] );
256  vmask.readCounts.push_back( lDims[3] - lDims[0] );
257  std::vector< int > mask( local_elems );
258  success = NCFUNCAG( _vara_int )( _fileId, vmask.varId, &vmask.readStarts[0], &vmask.readCounts[0], &mask[0] );
259  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read int data for mask variable " );
260 
261  std::vector< int > gids( local_elems );
262  int elem_index = 0;
263  int global_row_size = gDims[3] - gDims[0]; // this is along first dimension in global decomposition
264  // create global id array for cells, for all cells, including those with 0 mask; which will be not used eventually
265  for( int j = lCDims[1]; j < lCDims[4]; j++ )
266  for( int i = lCDims[0]; i < lCDims[3]; i++ )
267  {
268  gids[elem_index] = j * global_row_size + i + 1;
269  elem_index++;
270  }
271  std::vector< NCDF_SIZE > startsv( 3 );
272  startsv[0] = vmask.readStarts[0];
273  startsv[1] = vmask.readStarts[1];
274  startsv[2] = 0;
275  std::vector< NCDF_SIZE > countsv( 3 );
276  countsv[0] = vmask.readCounts[0];
277  countsv[1] = vmask.readCounts[1];
278  countsv[2] = nv; // number of vertices per element
279 
280  // read xv and yv coords for vertices, and create elements;
281  std::string xvstr( "xv" );
282  ReadNC::VarData& var_xv = _readNC->varInfo[xvstr];
283  std::vector< double > xv( local_elems * nv );
284 
285  std::vector< NCDF_SIZE > starts_vv, counts_vv;
286  starts_vv.resize( 3 );
287  counts_vv.resize( 3 );
288  bool nv_last = true;
289  if( _readNC->dimNames[var_xv.varDims[0]] == std::string( "nv" ) ) // it means that nv is the first dimension
290  {
291  starts_vv[0] = 0;
292  starts_vv[1] = startsv[0];
293  starts_vv[2] = startsv[1];
294  counts_vv[0] = nv;
295  counts_vv[1] = countsv[0];
296  counts_vv[2] = countsv[1];
297  nv_last = false;
298  }
299  else
300  {
301  starts_vv = startsv;
302  counts_vv = countsv;
303  }
304  dbgOut.tprintf( 1, " nv is the last dimension in xv array (0 or 1) %d \n", (int)nv_last );
305  success = NCFUNCAG( _vara_double )( _fileId, var_xv.varId, &starts_vv[0], &counts_vv[0], &xv[0] );
306  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for xv variable " );
307 
308  std::string yvstr( "yv" );
309  ReadNC::VarData& var_yv = _readNC->varInfo[yvstr];
310  std::vector< double > yv( local_elems * nv );
311  success = NCFUNCAG( _vara_double )( _fileId, var_yv.varId, &starts_vv[0], &counts_vv[0], &yv[0] );
312  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for yv variable " );
313 
314  // read other variables, like xc, yc, frac, area
315  std::string xcstr( "xc" );
316  ReadNC::VarData& var_xc = _readNC->varInfo[xcstr];
317  std::vector< double > xc( local_elems );
318  success = NCFUNCAG( _vara_double )( _fileId, var_xc.varId, &vmask.readStarts[0], &vmask.readCounts[0], &xc[0] );
319  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for xc variable " );
320 
321  std::string ycstr( "yc" );
322  ReadNC::VarData& var_yc = _readNC->varInfo[ycstr];
323  std::vector< double > yc( local_elems );
324  success = NCFUNCAG( _vara_double )( _fileId, var_yc.varId, &vmask.readStarts[0], &vmask.readCounts[0], &yc[0] );
325  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for yc variable " );
326 
327  std::string fracstr( "frac" );
328  ReadNC::VarData& var_frac = _readNC->varInfo[fracstr];
329  std::vector< double > frac( local_elems );
330  if( var_frac.varId >= 0 )
331  {
332  success =
333  NCFUNCAG( _vara_double )( _fileId, var_frac.varId, &vmask.readStarts[0], &vmask.readCounts[0], &frac[0] );
334  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for frac variable " );
335  }
336  std::string areastr( "area" );
337  std::vector< double > area( local_elems );
338  ReadNC::VarData& var_area = _readNC->varInfo[areastr];
339  success = NCFUNCAG( _vara_double )( _fileId, var_area.varId, &vmask.readStarts[0], &vmask.readCounts[0], &area[0] );
340  if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read double data for area variable " );
341  // create tags for them
342  Tag areaTag, fracTag, xcTag, ycTag;
343  rval = mbImpl->tag_get_handle( "area", 1, MB_TYPE_DOUBLE, areaTag, MB_TAG_DENSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating area tag" );
344  rval = mbImpl->tag_get_handle( "frac", 1, MB_TYPE_DOUBLE, fracTag, MB_TAG_DENSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating frac tag" );
345  rval = mbImpl->tag_get_handle( "xc", 1, MB_TYPE_DOUBLE, xcTag, MB_TAG_DENSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating xc tag" );
346  rval = mbImpl->tag_get_handle( "yc", 1, MB_TYPE_DOUBLE, ycTag, MB_TAG_DENSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating yc tag" );
347 
348  // create tags for GRID_IMASK, which will be the same name as the Scrip helper tag that holds the mask
349  // create the maskTag GRID_IMASK, with default value of 1
350  Tag maskTag;
351  int def_val = 1;
352  rval = mbImpl->tag_get_handle( "GRID_IMASK", 1, MB_TYPE_INTEGER, maskTag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble creating GRID_IMASK tag" );
353 
354  // will now look to repartition the cells, using zoltan, looking at the xc and yc coordinates in 2d, convert to 3d,
355  // and decide based on those partitioning info to what task to send each cell, along with its vertices, and global id
356 #ifdef MOAB_HAVE_MPI
357  int rank = 0;
358  int procs = 1;
359  bool& isParallel = _readNC->isParallel;
360  ParallelComm* myPcomm = NULL;
361  if( isParallel )
362  {
363  myPcomm = _readNC->myPcomm;
364  rank = myPcomm->proc_config().proc_rank();
365  procs = myPcomm->proc_config().proc_size();
366  }
367 
368  if( procs >= 2 && repartition )
369  {
370  // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
371  rval = redistribute_cells( myPcomm, xc, yc, xv, yv, frac, mask, area, gids, nv, nv_last );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells" );
372  local_elems = (int)xc.size();
373  dbgOut.tprintf( 1, "local cells after repartition: %d \n", local_elems );
374  }
375 
376 #endif
377 
378  int nb_with_mask1 = 0;
379  for( int i = 0; i < local_elems; i++ )
380  if( 1 == mask[i] ) nb_with_mask1++;
381  dbgOut.tprintf( 1, "local cells with mask 1: %d \n", nb_with_mask1 );
382 
383  EntityHandle* conn_arr;
384  EntityHandle vtx_handle;
385  Range tmp_range;
386 
387  // set connectivity into that space
388 
389  EntityHandle start_cell;
390  EntityType mdb_type = MBVERTEX;
391  if( nv == 3 )
392  mdb_type = MBTRI;
393  else if( nv == 4 )
394  mdb_type = MBQUAD;
395  else if( nv > 4 ) // (nv > 4)
396  mdb_type = MBPOLYGON;
397  // for nv = 1 , type is vertex
398 
399  int num_actual_cells = local_elems;
400  if( culling ) num_actual_cells = nb_with_mask1;
401 
402  if( nv > 1 && num_actual_cells > 0 )
403  {
404  rval = _readNC->readMeshIface->get_element_connect( num_actual_cells, nv, mdb_type, 0, start_cell, conn_arr );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
405  tmp_range.insert( start_cell, start_cell + num_actual_cells - 1 );
406  }
407 
408  // Create vertices; first identify different ones, with a tolerance
409  std::map< Node3D, EntityHandle > vertex_map;
410 
411  if( num_actual_cells > 0 )
412  {
413  // Set vertex coordinates
414  // will read all xv, yv, but use only those with correct mask on
415 
416  // total index in netcdf arrays
417  const double pideg = acos( -1.0 ) / 180.0;
418 
419  for( elem_index = 0; elem_index < local_elems; elem_index++ )
420  {
421  if( culling && 0 == mask[elem_index] )
422  continue; // nothing to do, do not advance elem_index in actual moab arrays
423  // set area and fraction on those elements too
424  dbgOut.tprintf( 3, "elem index %d \n", elem_index );
425  for( int k = 0; k < nv; k++ )
426  {
427  int index_v_arr = nv * elem_index + k;
428  if( !nv_last ) index_v_arr = k * local_elems + elem_index;
429  double x, y;
430  if( nv > 1 )
431  {
432  x = xv[index_v_arr];
433  y = yv[index_v_arr];
434  double cosphi = cos( pideg * y );
435  double zmult = sin( pideg * y );
436  double xmult = cosphi * cos( x * pideg );
437  double ymult = cosphi * sin( x * pideg );
438  Node3D pt( xmult, ymult, zmult );
439  vertex_map[pt] = 0;
440  dbgOut.tprintf( 3, " %d x=%5.1f y=%5.1f pt: %f %f %f \n", k, x, y, pt.coords[0], pt.coords[1],
441  pt.coords[2] );
442  }
443  else
444  {
445  x = xc[elem_index];
446  y = yc[elem_index];
447  Node3D pt( x, y, 0 );
448  vertex_map[pt] = 0;
449  }
450  }
451  }
452  int nLocalVertices = (int)vertex_map.size();
453  std::vector< double* > arrays;
454  EntityHandle start_vertex;
455  rval = _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
456 
457  vtx_handle = start_vertex;
458  // Copy vertex coordinates into entity sequence coordinate arrays
459  // and copy handle into vertex_map.
460  double *x = arrays[0], *y = arrays[1], *z = arrays[2];
461  for( auto i = vertex_map.begin(); i != vertex_map.end(); ++i )
462  {
463  i->second = vtx_handle;
464  ++vtx_handle;
465  *x = i->first.coords[0];
466  ++x;
467  *y = i->first.coords[1];
468  ++y;
469  *z = i->first.coords[2];
470  ++z;
471  }
472 
473  // int nj = gDims[4]-gDims[1]; // is it about 1 in irregular cases
474 
475  // int local_row_size = lCDims[3] - lCDims[0];
476  int index = 0; // consider the mask for advancing in moab arrays;
477 
478  // create now vertex arrays, size vertex_map.size()
479  for( elem_index = 0; elem_index < local_elems; elem_index++ )
480  {
481  if( culling && 0 == mask[elem_index] )
482  continue; // nothing to do, do not advance elem_index in actual moab arrays
483  // set area and fraction on those elements too
484  for( int k = 0; k < nv; k++ )
485  {
486  int index_v_arr = nv * elem_index + k;
487  if( !nv_last ) index_v_arr = k * local_elems + elem_index;
488  if( nv > 1 )
489  {
490  double x = xv[index_v_arr];
491  double y = yv[index_v_arr];
492  double cosphi = cos( pideg * y );
493  double zmult = sin( pideg * y );
494  double xmult = cosphi * cos( x * pideg );
495  double ymult = cosphi * sin( x * pideg );
496  Node3D pt( xmult, ymult, zmult );
497  conn_arr[index * nv + k] = vertex_map[pt];
498  }
499  }
500  EntityHandle cell = start_vertex + index;
501  if( nv > 1 ) cell = start_cell + index;
502  // set other tags, like xc, yc, frac, area
503  rval = mbImpl->tag_set_data( xcTag, &cell, 1, &xc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set xc tag" );
504  rval = mbImpl->tag_set_data( ycTag, &cell, 1, &yc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set yc tag" );
505  rval = mbImpl->tag_set_data( areaTag, &cell, 1, &area[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set area tag" );
506  rval = mbImpl->tag_set_data( fracTag, &cell, 1, &frac[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set frac tag" );
507  rval = mbImpl->tag_set_data( maskTag, &cell, 1, &mask[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set mask tag" );
508 
509  // set the global id too:
510  int globalId = gids[elem_index];
511  rval = mbImpl->tag_set_data( mGlobalIdTag, &cell, 1, &globalId );MB_CHK_SET_ERR( rval, "Failed to set global id tag" );
512  index++;
513  }
514 
515  rval = mbImpl->add_entities( _fileSet, tmp_range );MB_CHK_SET_ERR( rval, "Failed to add new cells to current file set" );
516 
517  // modify local file set, to merge coincident vertices, and to correct repeated vertices in elements
518  std::vector< Tag > tagList;
519  tagList.push_back( mGlobalIdTag );
520  tagList.push_back( xcTag );
521  tagList.push_back( ycTag );
522  tagList.push_back( areaTag );
523  tagList.push_back( fracTag );
524  tagList.push_back( maskTag ); // not sure this is needed though ? on cells or on vertices?
525  rval = IntxUtils::remove_padded_vertices( mbImpl, _fileSet, tagList );MB_CHK_SET_ERR( rval, "Failed to remove duplicate vertices" );
526 
527  rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_ERR( rval );
528  Range all_verts;
529  rval = mbImpl->get_connectivity( faces, all_verts );MB_CHK_ERR( rval );
530  //printf(" range vert size :%ld \n", all_verts.size());
531  rval = mbImpl->add_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
532 
533  // need to add adjacencies; TODO: fix this for all nc readers
534  // copy this logic from migrate mesh in par comm graph
535  Core* mb = (Core*)mbImpl;
536  AEntityFactory* adj_fact = mb->a_entity_factory();
537  if( !adj_fact->vert_elem_adjacencies() )
538  adj_fact->create_vert_elem_adjacencies();
539  else
540  {
541  for( Range::iterator it = faces.begin(); it != faces.end(); ++it )
542  {
543  EntityHandle eh = *it;
544  const EntityHandle* conn = NULL;
545  int num_nodes = 0;
546  rval = mb->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
547  adj_fact->notify_create_entity( eh, conn, num_nodes );
548  }
549  }
550  }
551 
552 #ifdef MOAB_HAVE_MPI
553  myPcomm = _readNC->myPcomm; // we will have to set the global id on vertices in any case, even
554  if( myPcomm && procs >= 2 )
555  {
556  double tol = 1.e-12; // this is the same as static tolerance in NCHelper
557  ParallelMergeMesh pmm( myPcomm, tol );
558  rval = pmm.merge( _fileSet,
559  /* do not do local merge*/ false,
560  /* 2d cells*/ 2 );MB_CHK_SET_ERR( rval, "Failed to merge vertices in parallel" );
561  // assign global ids only for vertices, cells have them fine
562  rval = myPcomm->assign_global_ids( _fileSet, /*dim*/ 0 );MB_CHK_ERR( rval );
563  }
564 #endif
565 
566  return MB_SUCCESS;
567 }
568 
569 #ifdef MOAB_HAVE_MPI
570 ErrorCode NCHelperDomain::redistribute_cells( ParallelComm* myPcomm,
571  std::vector< double >& xc, // center x
572  std::vector< double >& yc, // center y
573  std::vector< double >& xv, // vertex coords
574  std::vector< double >& yv,
575  std::vector< double >& frac, // fractions
576  std::vector< int >& masks, // mask
577  std::vector< double >& area, // area
578  std::vector< int >& gids, // global ids
579  int nv, // number of vertices per cell
580  bool nv_last ) // type of xv, yv, first or last
581 {
582 
583 #ifdef MOAB_HAVE_ZOLTAN
584  // use zoltan and
585  int num_local_cells = (int)gids.size();
586  std::vector< double > xi( num_local_cells ), yi( num_local_cells ), zi( num_local_cells );
587  const double pideg = acos( -1.0 ) / 180.0;
588  for( size_t i = 0; i < xc.size(); i++ )
589  {
590  double x = xc[i];
591  double y = yc[i];
592  double cosphi = cos( pideg * y );
593  double zmult = sin( pideg * y );
594  double xmult = cosphi * cos( x * pideg );
595  double ymult = cosphi * sin( x * pideg );
596  xi[i] = xmult;
597  yi[i] = ymult;
598  zi[i] = zmult;
599  }
600  Interface*& mbImpl = _readNC->mbImpl;
601  ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, myPcomm, false, 0, NULL );
602  std::vector< int > dest( num_local_cells );
603  ErrorCode rval = mbZTool->repartition_to_procs( xi, yi, zi, gids, "RCB", dest );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
604  delete mbZTool;
605  // now use crystal router to send the arrays to the right places
606  moab::TupleList tl;
607  unsigned numr = 2 * nv + 4; // doubles: area, centerlon, centerlat, frac, xv, yv,
608  tl.initialize( 3, 0, 0, numr, num_local_cells ); // to proc, dof, mask
609  tl.enableWriteAccess();
610  // populate
611  for( unsigned i = 0; i < num_local_cells; i++ )
612  {
613  int gdof = gids[i];
614  int to_proc = dest[i];
615  int mask = masks[i];
616  int n = tl.get_n();
617  tl.vi_wr[3 * n] = to_proc;
618  tl.vi_wr[3 * n + 1] = gdof;
619  tl.vi_wr[3 * n + 2] = mask;
620  tl.vr_wr[n * numr] = area[i];
621  tl.vr_wr[n * numr + 1] = xc[i];
622  tl.vr_wr[n * numr + 2] = yc[i];
623  tl.vr_wr[n * numr + 3] = frac[i];
624  for( int k = 0; k < nv; k++ )
625  {
626  int index_v_arr = nv * i + k;
627  if( !nv_last ) index_v_arr = k * num_local_cells + n;
628  tl.vr_wr[n * numr + 4 + k] = xv[index_v_arr];
629  tl.vr_wr[n * numr + 4 + nv + k] = yv[index_v_arr];
630  }
631  tl.inc_n();
632  }
633 
634  // now do the heavy communication
635  ( myPcomm->proc_config().crystal_router() )->gs_transfer( 1, tl, 0 );
636 
637  // after communication, on each processor we should have tuples coming in
638  // rearrange the vectors by global id
639  moab::TupleList::buffer sort_buffer;
640  int N = tl.get_n();
641  sort_buffer.buffer_init( N );
642  tl.sort( 1, &sort_buffer ); // 1 is the index for global id
643  xc.resize( N );
644  yc.resize( N );
645  xv.resize( N * nv );
646  yv.resize( N * nv );
647  frac.resize( N );
648  masks.resize( N );
649  area.resize( N );
650  gids.resize( N );
651  for( int n = 0; n < N; n++ )
652  {
653 
654  gids[n] = tl.vi_wr[3 * n + 1];
655  masks[n] = tl.vi_wr[3 * n + 2];
656  area[n] = tl.vr_wr[n * numr];
657  xc[n] = tl.vr_wr[n * numr + 1];
658  yc[n] = tl.vr_wr[n * numr + 2];
659  frac[n] = tl.vr_wr[n * numr + 3];
660  for( int k = 0; k < nv; k++ )
661  {
662  int index_v_arr = nv * n + k;
663  if( !nv_last ) index_v_arr = k * N + n;
664  xv[index_v_arr] = tl.vr_wr[n * numr + 4 + k];
665  yv[index_v_arr] = tl.vr_wr[n * numr + 4 + nv + k];
666  }
667  }
668 
669  return MB_SUCCESS;
670 #else
671  MB_CHK_SET_ERR( MB_FAILURE, "need to configure with Zoltan " );
672 #endif
673 }
674 
675 #endif
676 
677 } // namespace moab