Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
quality.cpp
Go to the documentation of this file.
1 #include <iostream>
2 #include <cstdlib>
3 #include <vector>
4 #include <map>
5 #include <string>
6 #include <cstdio>
7 #include <iomanip>
8 #include <fstream>
9 #include "moab/MOABConfig.h"
10 #ifdef MOAB_HAVE_MPI
11 #include "moab_mpi.h"
12 #include "moab/ParallelComm.hpp"
13 #endif
14 #if !defined( _MSC_VER ) && !defined( __MINGW32__ )
15 #include <termios.h>
16 #include <sys/ioctl.h>
17 #endif
18 #include <cmath>
19 #include <cassert>
20 #include <cfloat>
21 
22 #include "moab/Core.hpp"
23 #include "moab/Range.hpp"
24 #include "MBTagConventions.hpp"
25 #include "moab/Interface.hpp"
26 
28 
29 using namespace moab;
30 
31 static void print_usage( const char* name, std::ostream& stream )
32 {
33  stream << "Usage: " << name << " <input_file> [<output_file>]" << std::endl;
34 }
35 
36 int main( int argc, char* argv[] )
37 {
38  int proc_id = 0, size = 1;
39 #ifdef MOAB_HAVE_MPI
40  MPI_Init( &argc, &argv );
41  MPI_Comm_rank( MPI_COMM_WORLD, &proc_id );
42  MPI_Comm_size( MPI_COMM_WORLD, &size );
43 #endif
44  if( argc < 2 && 0 == proc_id )
45  {
46  print_usage( argv[0], std::cerr );
47 #ifdef MOAB_HAVE_MPI
48  MPI_Finalize();
49 #endif
50  return 1;
51  }
52  Core mb;
53  std::string read_options;
54  if( size > 1 ) read_options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
55 
56  if( size > 1 && argc > 2 )
57  {
58  std::cerr << " cannot use verbose option in parallel \n";
59 #ifdef MOAB_HAVE_MPI
60  MPI_Finalize();
61 #endif
62  return 1;
63  }
64 
65  char* out_file = NULL;
66  std::ofstream ofile;
67 
68  if( argc == 3 )
69  {
70  std::cout << " write verbose output to a CSV file " << argv[2] << "\n";
71  out_file = argv[2];
72  ofile.open( out_file );
73  }
74  if( MB_SUCCESS != mb.load_file( argv[1], 0, read_options.c_str() ) )
75  {
76  fprintf( stderr, "Error reading file: %s\n", argv[1] );
77 #ifdef MOAB_HAVE_MPI
78  MPI_Finalize();
79 #endif
80  return 1;
81  }
82 
83  VerdictWrapper vw( &mb );
84  vw.set_size( 1. ); // for relative size measures; maybe it should be user input
87  if( MB_SUCCESS != rval )
88  {
89  fprintf( stderr, "Error getting entities from file %s\n", argv[1] );
90 #ifdef MOAB_HAVE_MPI
91  MPI_Finalize();
92 #endif
93  return 1;
94  }
95  // get all edges and faces, force them to be created
96  Range allfaces, alledges;
97  Range cells = entities.subset_by_dimension( 3 );
98  rval = mb.get_adjacencies( cells, 2, true, allfaces, Interface::UNION );
99  if( MB_SUCCESS != rval )
100  {
101  fprintf( stderr, "Error getting all faces" );
102 #ifdef MOAB_HAVE_MPI
103  MPI_Finalize();
104 #endif
105  return 1;
106  }
107 
108  rval = mb.get_adjacencies( allfaces, 1, true, alledges, Interface::UNION );
109  if( MB_SUCCESS != rval )
110  {
111  fprintf( stderr, "Error getting all edges" );
112 #ifdef MOAB_HAVE_MPI
113  MPI_Finalize();
114 #endif
115  return 1;
116  }
117  entities.merge( allfaces );
118  entities.merge( alledges );
119  for( EntityType et = MBENTITYSET; et >= MBEDGE; et-- )
120  {
121  int num_qualities = vw.num_qualities( et );
122  if( !num_qualities ) continue;
123  Range owned = entities.subset_by_type( et );
124  std::map< QualityType, double > qualities, minq, maxq;
125  int ne_local = (int)owned.size();
126  int ne_global = ne_local;
127 
128 #ifdef MOAB_HAVE_MPI
129  int mpi_err;
130  if( size > 1 )
131  {
132  // filter the entities not owned, so we do not process them more than once
133  ParallelComm* pcomm = ParallelComm::get_pcomm( &mb, 0 );
134  Range current = owned;
135  owned.clear();
136  rval = pcomm->filter_pstatus( current, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned );
137  if( rval != MB_SUCCESS )
138  {
139  MPI_Finalize();
140  return 1;
141  }
142  ne_local = (int)owned.size();
143  mpi_err = MPI_Reduce( &ne_local, &ne_global, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
144  if( mpi_err )
145  {
146  MPI_Finalize();
147  return 1;
148  }
149  }
150 #endif
151  if( ne_global > 0 )
152  {
153  if( ne_local > 0 )
154  {
155  Range::iterator it = owned.begin();
156  rval = vw.all_quality_measures( *it, qualities );
157  if( MB_SUCCESS != rval )
158  {
159  fprintf( stderr, "Error getting quality for entity type %d with id %ld \n", et,
160  (long)mb.id_from_handle( *it ) );
161 #ifdef MOAB_HAVE_MPI
162  MPI_Finalize();
163 #endif
164  return 1;
165  }
166  if( ofile.is_open() )
167  {
168  // write first header or this entity type, then the first values, separated by
169  // commas
170  ofile << " There are " << ne_local << " entities of type " << vw.entity_type_name( et ) << " with "
171  << qualities.size() << " qualities:\n"
172  << " Entity id ";
173  for( std::map< QualityType, double >::iterator qit = qualities.begin(); qit != qualities.end();
174  ++qit )
175  {
176  ofile << ", " << vw.quality_name( qit->first );
177  }
178  ofile << "\n";
179  ofile << mb.id_from_handle( *it );
180  for( std::map< QualityType, double >::iterator qit = qualities.begin(); qit != qualities.end();
181  ++qit )
182  {
183  ofile << ", " << qit->second;
184  }
185  ofile << "\n";
186  }
187  minq = qualities;
188  maxq = qualities;
189  ++it;
190  for( ; it != owned.end(); ++it )
191  {
192  rval = vw.all_quality_measures( *it, qualities );
193  if( MB_SUCCESS != rval )
194  {
195  fprintf( stderr, "Error getting quality for entity type %d with id %ld \n", et,
196  (long)mb.id_from_handle( *it ) );
197 #ifdef MOAB_HAVE_MPI
198  MPI_Finalize();
199 #endif
200  return 1;
201  }
202  if( ofile.is_open() )
203  {
204  ofile << mb.id_from_handle( *it );
205  for( std::map< QualityType, double >::iterator qit = qualities.begin(); qit != qualities.end();
206  ++qit )
207  {
208  ofile << ", " << qit->second;
209  }
210  ofile << "\n";
211  }
212  std::map< QualityType, double >::iterator minit = minq.begin();
213  std::map< QualityType, double >::iterator maxit = maxq.begin();
214  for( std::map< QualityType, double >::iterator mit = qualities.begin(); mit != qualities.end();
215  ++mit, ++minit, ++maxit )
216  {
217  if( mit->second > maxit->second ) maxit->second = mit->second;
218  if( mit->second < minit->second ) minit->second = mit->second;
219  }
220  }
221  }
222  if( 0 == proc_id )
223  {
224  std::cout << " \n\n " << ne_global << " entities of type " << vw.entity_type_name( et ) << "\n";
225  std::cout << std::setw( 30 ) << "Quality Name" << std::setw( 15 ) << " MIN" << std::setw( 15 )
226  << " MAX"
227  << "\n";
228  }
229 
230  QualityType quality_type = MB_EDGE_RATIO;
231  for( int i = 0; i < num_qualities; i++, quality_type = (QualityType)( quality_type + 1 ) )
232  {
233  while( !( vw.possible_quality( et, quality_type ) ) && quality_type < MB_QUALITY_COUNT )
234  quality_type = (QualityType)( quality_type + 1 ); // will get them in order
235  const char* name_q = vw.quality_name( quality_type );
236  double local_min, global_min;
237  double local_max, global_max;
238  if( ne_local > 0 )
239  {
240  local_min = minq[quality_type];
241  local_max = maxq[quality_type];
242  }
243  else
244  {
245  local_min = 1.e38; // so this task has no entities of this type
246  local_max = -1.e38; // it can get here only in parallel
247  }
248 #ifdef MOAB_HAVE_MPI
249  mpi_err = MPI_Reduce( &local_min, &global_min, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD );
250  if( mpi_err )
251  {
252  MPI_Finalize();
253  return 1;
254  }
255  mpi_err = MPI_Reduce( &local_max, &global_max, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD );
256  if( mpi_err )
257  {
258  MPI_Finalize();
259  return 1;
260  }
261 #else
262  global_min = local_min;
263  global_max = local_max;
264 #endif
265  if( 0 == proc_id )
266  {
267  std::cout << std::setw( 30 ) << name_q << std::setw( 15 ) << global_min << std::setw( 15 )
268  << global_max << "\n";
269  }
270  }
271  }
272  } // etype
273  if( ofile.is_open() )
274  {
275  ofile.close();
276  }
277 #ifdef MOAB_HAVE_MPI
278  MPI_Finalize();
279 #endif
280  return 0;
281 }