1
33
34 #include "moab/Core.hpp"
35 #include "moab/ParallelComm.hpp"
36 #include "MBParallelConventions.h"
37 #ifdef MOAB_HAVE_MBCOUPLER
38 #include "mbcoupler/Coupler.hpp"
39 #include "mbcoupler/ElemUtil.hpp"
40 #else
41 #error Requires MOAB to be built with MBCoupler
42 #endif
43 #include "moab/MeshGeneration.hpp"
44 #include "moab/ProgOptions.hpp"
45
46 using namespace moab;
47 using std::string;
48
49 double physField( double x, double y, double z )
50 {
51
52 double out = sin( M_PI * x ) * cos( M_PI * y ) * sin( M_PI * z );
53
54 return out;
55 }
56
57 int main( int argc, char* argv[] )
58 {
59 int proc_id = 0, size = 1;
60
61 MPI_Init( &argc, &argv );
62 MPI_Comm_rank( MPI_COMM_WORLD, &proc_id );
63 MPI_Comm_size( MPI_COMM_WORLD, &size );
64
65 Core mcore;
66 Interface* mb = &mcore;
67 EntityHandle fileset1, fileset2;
68 MeshGeneration::BrickOpts opts;
69
70 opts.A = opts.B = opts.C = 1;
71 opts.M = opts.N = opts.K = 1;
72 opts.blockSize = 4;
73 opts.xsize = opts.ysize = opts.zsize = 1.;
74 opts.ui = CartVect( 1., 0, 0. );
75 opts.uj = CartVect( 0., 1., 0. );
76 opts.uk = CartVect( 0., 0., 1. );
77 opts.newMergeMethod = opts.quadratic = opts.keep_skins = opts.tetra = false;
78 opts.adjEnts = opts.parmerge = false;
79 opts.GL = 0;
80
81 ProgOptions popts;
82
83 popts.addOpt< int >( string( "blockSize,b" ), string( "Block size of mesh (default=4)" ), &opts.blockSize );
84 popts.addOpt< int >( string( "xproc,M" ), string( "Number of processors in x dir (default=1)" ), &opts.M );
85 popts.addOpt< int >( string( "yproc,N" ), string( "Number of processors in y dir (default=1)" ), &opts.N );
86 popts.addOpt< int >( string( "zproc,K" ), string( "Number of processors in z dir (default=1)" ), &opts.K );
87
88 popts.addOpt< int >( string( "xblocks,A" ), string( "Number of blocks on a task in x dir (default=2)" ), &opts.A );
89 popts.addOpt< int >( string( "yblocks,B" ), string( "Number of blocks on a task in y dir (default=2)" ), &opts.B );
90 popts.addOpt< int >( string( "zblocks,C" ), string( "Number of blocks on a task in x dir (default=2)" ), &opts.C );
91
92 popts.addOpt< double >( string( "xsize,x" ), string( "Total size in x direction (default=1.)" ), &opts.xsize );
93 popts.addOpt< double >( string( "ysize,y" ), string( "Total size in y direction (default=1.)" ), &opts.ysize );
94 popts.addOpt< double >( string( "zsize,z" ), string( "Total size in z direction (default=1.)" ), &opts.zsize );
95
96 popts.addOpt< void >( "newMerge,w", "use new merging method", &opts.newMergeMethod );
97
98 popts.addOpt< void >( "quadratic,q", "use hex 27 elements", &opts.quadratic );
99
100 popts.addOpt< void >( "keep_skins,k", "keep skins with shared entities", &opts.keep_skins );
101
102 popts.addOpt< void >( "tetrahedrons,t", "generate tetrahedrons", &opts.tetra );
103
104 popts.addOpt< void >( "faces_edges,f", "create all faces and edges", &opts.adjEnts );
105
106 popts.addOpt< int >( string( "ghost_layers,g" ), string( "Number of ghost layers (default=0)" ), &opts.GL );
107
108 popts.addOpt< void >( "parallel_merge,p", "use parallel mesh merge, not vertex ID based merge", &opts.parmerge );
109
110 Coupler::Method method = Coupler::LINEAR_FE;
111
112 double toler = 1.e-6;
113 popts.addOpt< double >( string( "eps,e" ), string( "tolerance for coupling, used in locating points" ), &toler );
114
115 bool writeMeshes = false;
116 popts.addOpt< void >( "print,p", "write meshes", &writeMeshes );
117
118 popts.parseCommandLine( argc, argv );
119
120 double start_time = MPI_Wtime();
121
122 ErrorCode rval = mb->create_meshset( MESHSET_SET, fileset1 );MB_CHK_ERR( rval );
123 rval = mb->create_meshset( MESHSET_SET, fileset2 );MB_CHK_ERR( rval );
124
125 ParallelComm* pc1 = new ParallelComm( mb, MPI_COMM_WORLD );
126 MeshGeneration* mgen1 = new MeshGeneration( mb, pc1, fileset1 );
127
128 rval = mgen1->BrickInstance( opts );MB_CHK_ERR( rval );
129
130 double instance_time = MPI_Wtime();
131 double current = instance_time;
132 if( !proc_id ) std::cout << " instantiate first mesh " << instance_time - start_time << "\n";
133
134 std::string interpTag( "interp_tag" );
135 Tag tag;
136 rval = mb->tag_get_handle( interpTag.c_str(), 1, MB_TYPE_DOUBLE, tag, MB_TAG_CREAT | MB_TAG_DENSE );MB_CHK_ERR( rval );
137
138 Range src_elems;
139 rval = pc1->get_part_entities( src_elems, 3 );MB_CHK_ERR( rval );
140 Range src_verts;
141 rval = mb->get_connectivity( src_elems, src_verts );MB_CHK_ERR( rval );
142 for( Range::iterator vit = src_verts.begin(); vit != src_verts.end(); ++vit )
143 {
144 EntityHandle vert = *vit;
145
146 double vertPos[3];
147 mb->get_coords( &vert, 1, vertPos );
148
149 double fieldValue = physField( vertPos[0], vertPos[1], vertPos[2] );
150
151 rval = mb->tag_set_data( tag, &vert, 1, &fieldValue );MB_CHK_ERR( rval );
152 }
153
154 double setTag_time = MPI_Wtime();
155 if( !proc_id ) std::cout << " set tag " << setTag_time - current;
156 current = instance_time;
157
158 int tmp1 = opts.K;
159 opts.K = opts.M;
160 opts.M = tmp1;
161 opts.tetra = !opts.tetra;
162 opts.blockSize++;
163
164 ParallelComm* pc2 = new ParallelComm( mb, MPI_COMM_WORLD );
165 MeshGeneration* mgen2 = new MeshGeneration( mb, pc2, fileset2 );
166
167 rval = mgen2->BrickInstance( opts );MB_CHK_ERR( rval );
168
169 double instance_second = MPI_Wtime();
170 if( !proc_id ) std::cout << " instance second mesh" << instance_second - current << "\n";
171 current = instance_second;
172
173
174 if( writeMeshes )
175 {
176 rval = mb->write_file( "mesh1.h5m", 0, ";;PARALLEL=WRITE_PART;CPUTIME;PARALLEL_COMM=0;", &fileset1, 1 );MB_CHK_SET_ERR( rval, "Can't write in parallel mesh 1" );
177 rval = mb->write_file( "mesh2.h5m", 0, ";;PARALLEL=WRITE_PART;CPUTIME;PARALLEL_COMM=1;", &fileset2, 1 );MB_CHK_SET_ERR( rval, "Can't write in parallel mesh 1" );
178 double write_files = MPI_Wtime();
179 if( !proc_id ) std::cout << " write files " << write_files - current << "\n";
180 current = write_files;
181 }
182
183
184 Coupler mbc( mb, pc1, src_elems, 0 );
185
186 double instancecoupler = MPI_Wtime();
187 if( !proc_id ) std::cout << " instance coupler " << instancecoupler - current << "\n";
188 current = instancecoupler;
189
190
191
192
193 std::vector< double > vpos;
194 int numPointsOfInterest = 0;
195
196 Range targ_elems;
197 Range targ_verts;
198
199
200 rval = pc2->get_part_entities( targ_elems, 3 );MB_CHK_ERR( rval );
201
202 rval = mb->get_adjacencies( targ_elems, 0, false, targ_verts, Interface::UNION );MB_CHK_ERR( rval );
203 Range tmp_verts;
204
205 rval = pc2->get_pstatus_entities( 0, PSTATUS_NOT_OWNED, tmp_verts );MB_CHK_ERR( rval );
206 targ_verts = subtract( targ_verts, tmp_verts );
207
208 numPointsOfInterest = (int)targ_verts.size();
209 vpos.resize( 3 * targ_verts.size() );
210 rval = mb->get_coords( targ_verts, &vpos[0] );MB_CHK_ERR( rval );
211
212
213 rval = mbc.locate_points( &vpos[0], numPointsOfInterest, 0, toler );MB_CHK_ERR( rval );
214
215 double locatetime = MPI_Wtime();
216 if( !proc_id ) std::cout << " locate points: " << locatetime - current << "\n";
217 current = locatetime;
218
219
220 std::vector< double > field( numPointsOfInterest );
221
222 rval = mbc.interpolate( method, interpTag, &field[0] );MB_CHK_ERR( rval );
223
224
225 double err_max = 0;
226 for( int i = 0; i < numPointsOfInterest; i++ )
227 {
228 double trval = physField( vpos[3 * i], vpos[3 * i + 1], vpos[3 * i + 2] );
229 double err2 = fabs( trval - field[i] );
230 if( err2 > err_max ) err_max = err2;
231 }
232
233 double interpolateTime = MPI_Wtime();
234 if( !proc_id ) std::cout << " interpolate points: " << interpolateTime - current << "\n";
235 current = interpolateTime;
236
237 double gerr;
238 MPI_Allreduce( &err_max, &gerr, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
239 if( 0 == proc_id ) std::cout << "max err " << gerr << "\n";
240
241 delete mgen1;
242 delete mgen2;
243
244 MPI_Finalize();
245
246 return 0;
247 }