Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
PushParMeshIntoMoabF90.F90
Go to the documentation of this file.
1 ! PushParMeshIntoMoabF90: push parallel mesh into moab, F90 version
2 !
3 ! This program shows how to push a mesh into MOAB in parallel from Fortran90, with sufficient
4 ! information to resolve boundary sharing and exchange a layer of ghost information.
5 ! To successfully link this example, you need to specify FCFLAGS that include:
6 ! a) -DMOAB_HAVE_MPI, and
7 ! b) flags required to link Fortran90 MPI programs with the C++ compiler; these flags
8 ! can often be found on your system by inspecting the output of 'mpif90 -show'
9 ! For example, using gcc, the link line looks like:
10 ! make MOAB_DIR=<moab install dir> FCFLAGS="-DMOAB_HAVE_MPI -I/usr/lib/openmpi/include -pthread -I/usr/lib/openmpi/lib -L/usr/lib/openmpi/lib -lmpi_f90 -lmpi_f77 -lmpi -lopen-rte -lopen-pal -ldl -Wl,--export-dynamic -lnsl -lutil -lm -ldl" PushParMeshIntoMoabF90
11 !
12 ! Usage: PushParMeshIntoMoab
13 #define ERROR(rval) if (0 .ne. rval) call exit(1)
14 
16 
17  use iso_c_binding
18  implicit none
19 
20 #include "moab/MOABConfig.h"
21 #ifdef MOAB_HAVE_MPI
22 # include "mpif.h"
23 # include "iMeshP_f.h"
24 #else
25 # include "iMesh_f.h"
26 #endif
27 
28  ! declarations
29  ! imesh is the instance handle
30  imesh_instance imesh
31  ! NUMV, NUME, NVPERE are the hardwired here; these are for the whole mesh,
32  ! local mesh determined later
33  integer numv, nume, nvpere
34  parameter(numv = 8) ! # vertices in whole mesh
35  parameter(nume = 6) ! # elements in whole mesh
36  parameter(nvpere = 4) ! # vertices per element
37  ! ents, verts will be arrays storing vertex/entity handles
38  ibase_entityhandle, pointer :: ents(:), verts(:)
39  ibase_entitysethandle root_set
40  TYPE(c_ptr) :: vertsptr, entsptr
41  ! storage for vertex positions, element connectivity indices, global vertex ids
42  real*8 coords(0:3*numv-1)
43  integer iconn(0:4*nume-1), gids(0:numv-1)
44  !
45  ! local variables
46  integer lgids(0:numv-1), lconn(0:4*nume-1)
47  real*8 lcoords(0:3*numv-1)
48  integer lnumv, lvids(0:numv-1), gvids(0:numv-1)
49  integer lvpe, ltp ! lvpe = # vertices per entity, ltp = element type
50  integer ic, ie, iv, istart, iend, ierr, indv, lnume, rank, sz
51 
52 #ifdef MOAB_HAVE_MPI
53  ! local variables for parallel runs
54  imeshp_partitionhandle imeshp
55 ! integer MPI_COMM_WORLD
56 #endif
57 
58  ! vertex positions, cube; side 2
59  ! (first index varying fastest)
60  data coords / &
61  -1., -1., -1, 1., -1., -1., 1., 1., -1., -1., 1., -1., &
62  -1., -1., 1, 1., -1., 1., 1., 1., 1., -1., 1., 1. /
63 
64  ! quad index numbering, each quad ccw, sides then bottom then top
65  data iconn / &
66  0, 1, 5, 4, &
67  1, 2, 6, 5, &
68  2, 3, 7, 6, &
69  3, 0, 4, 7, &
70  0, 3, 2, 1, &
71  4, 5, 6, 7 /
72 
73  data lvpe /4/ ! quads in this example
74  data ltp / imesh_quadrilateral / ! from iBase_f.h
75 
76  ! initialize global vertex ids
77  do iv = 0, numv-1
78  lgids(iv) = iv+1
79  end do
80 
81 #ifdef MOAB_HAVE_MPI
82  ! init the parallel partition
83  call mpi_init(ierr)
84  call mpi_comm_size(mpi_comm_world, sz, ierr)
85  call mpi_comm_rank(mpi_comm_world, rank, ierr)
86  error(ierr)
87  ! compute starting/ending element numbers
88  lnume = nume / sz
89  istart = rank * lnume
90  iend = istart + lnume - 1
91  if (rank .eq. sz-1) then
92  iend = nume-1
93  lnume = iend - istart + 1
94  endif
95  ! for my elements, figure out which vertices I use and accumulate local indices and coords
96  ! lvids stores the local 0-based index for each vertex; -1 means vertex i isn't used locally
97  ! also build up connectivity indices for local elements, in lconn
98  do iv = 0, numv-1
99  lvids(iv) = -1
100  end do
101  lnumv = -1
102  do ie = istart, iend
103  do iv = 0, lvpe-1
104  indv = iconn(lvpe*ie + iv)
105  if (lvids(indv) .eq. -1) then
106  lnumv = lnumv + 1 ! increment local # verts
107  do ic = 0, 2 ! cache local coords
108  lcoords(3*lnumv+ic) = coords(3*indv+ic)
109  end do
110  lvids(indv) = lnumv
111  gvids(lnumv) = 1+indv
112  end if
113  lconn(lvpe*(ie-istart)+iv) = lvids(indv)
114  end do ! do iv
115  end do ! do ie
116 
117  lnumv = lnumv + 1
118 
119  ! now create the mesh; this also initializes parallel sharing and ghost exchange
120  imesh = 0
121  imeshp = 0
122  call create_mesh(imesh, imeshp, mpi_comm_world, lnumv, lnume, gvids, lvpe, ltp, lcoords, lconn, &
123  vertsptr, entsptr, ierr)
124  error(ierr)
125  call c_f_pointer(vertsptr, verts, [lnumv])
126  call c_f_pointer(entsptr, ents, [lnume])
127 
128  ! get/report number of vertices, elements
129  call imesh_getrootset(%VAL(imesh), root_set, ierr)
130  error(ierr)
131  iv = 0
132  ie = 0
133  call imeshp_getnumoftypeall(%VAL(imesh), %VAL(imeshp), %VAL(root_set), %VAL(ibase_vertex), iv, ierr)
134  error(ierr)
135  call imeshp_getnumoftypeall(%VAL(imesh), %VAL(imeshp), %VAL(root_set), %VAL(ibase_face), ie, ierr)
136  error(ierr)
137  if (rank .eq. 0) then
138  write(0,*) "Number of vertices = ", iv
139  write(0,*) "Number of entities = ", ie
140  endif
141 
142  ! from here, can use verts and ents as (1-based) arrays of entity handles for input to other iMesh functions
143 
144  call mpi_finalize(ierr)
145 #else
146  write(0, *) "compile with MPI for better experience\n"
147 #endif
148  stop
149 end program pushparmeshintomoab
150 
151 #ifdef MOAB_HAVE_MPI
152 subroutine create_mesh( &
153  ! interfaces
154  imesh, imeshp, &
155  ! input
156  comm, numv, nume, vgids, nvpe, tp, posn, iconn, &
157  ! output
158  vertsPtr, entsPtr, ierr)
159  !
160  ! create a mesh with numv vertices and nume elements, with elements of type tp
161  ! vertices have positions in posn (3 coordinates each, interleaved xyzxyz...), indexed from 0
162  ! elements have nvpe vertices per entity, with connectivity indices stored in iconn, referencing
163  ! vertices using 0-based indices; vertex and entity handles are output in arrays passed in
164  !
165  ! if imesh/imeshp are 0, imesh/imeshp are initialized in this subroutine
166  !
167 
168  use iso_c_binding
169  implicit none
170 
171 # include "iMeshP_f.h"
172 # include "mpif.h"
173 
174  ! subroutine arguments
175  imesh_instance imesh
176  TYPE(c_ptr) :: vertsptr, entsptr
177  integer numv, nume, nvpe, vgids(0:*), iconn(0:*), ierr, tp
178  real*8 posn(0:*)
179 #ifdef MOAB_HAVE_MPI
180  imeshp_partitionhandle imeshp
181  integer comm
182 #endif
183 
184  ! local variables
185  integer comm_sz, comm_rank, numa, numo, iv, ie
186  TYPE(c_ptr) :: statsptr
187  integer, allocatable, target :: stats(:)
188  ibase_taghandle tagh
189  integer i
190  ibase_entityhandle, pointer :: verts(:), ents(:)
191  ibase_entityhandle, allocatable :: conn(:)
192  ibase_entitysethandle root_set
193  ibase_entitysethandle file_set
194 #ifdef MOAB_HAVE_MPI
195  ibase_handle_t mpi_comm_c
196  TYPE(c_ptr) :: partsptr
197  imeshp_parthandle, pointer :: parts(:)
198  imeshp_parthandle part
199  integer partsa, partso
200  character (len=10) filename
201 #endif
202 
203  ! create the Mesh instance
204  if (imesh .eq. 0) then
205  call imesh_newmesh("MOAB", imesh, ierr)
206  end if
207 
208 #ifdef MOAB_HAVE_MPI
209  if (imeshp .eq. 0) then
210  call imeshp_getcommunicator(%VAL(imesh), mpi_comm_world, mpi_comm_c, ierr)
211  error(ierr)
212  call imeshp_createpartitionall(%VAL(imesh), %VAL(mpi_comm_c), imeshp, ierr)
213  error(ierr)
214  call imeshp_createpart(%VAL(imesh), %VAL(imeshp), part, ierr)
215  error(ierr)
216  else
217  partsa = 0
218  call imeshp_getlocalparts(%VAL(imesh), %VAL(imeshp), partsptr, partsa, partso, ierr)
219  error(ierr)
220  call c_f_pointer(partsptr, parts, [partso])
221  part = parts(1)
222  end if
223  call mpi_comm_rank(comm, comm_rank, ierr)
224  error(ierr)
225  call mpi_comm_size(comm, comm_sz, ierr)
226  error(ierr)
227 #endif
228 
229  ! create the vertices, all in one call
230  numa = 0
231  call imesh_createvtxarr(%VAL(imesh), %VAL(numv), %VAL(ibase_interleaved), posn, %VAL(3*numv), &
232  vertsptr, numa, numo, ierr)
233  error(ierr)
234 
235  ! fill in the connectivity array, based on indexing from iconn
236  allocate (conn(0:nvpe*nume-1))
237  call c_f_pointer(vertsptr, verts, [numv])
238  do i = 0, nvpe*nume-1
239  conn(i) = verts(1+iconn(i))
240  end do
241  ! create the elements
242  numa = 0
243  allocate(stats(0:nume-1))
244  statsptr = c_loc(stats(0))
245  call imesh_createentarr(%VAL(imesh), %VAL(tp), conn, %VAL(nvpe*nume), &
246  entsptr, numa, numo, statsptr, numa, numo, ierr)
247  deallocate(stats)
248  deallocate(conn)
249 
250 #ifdef MOAB_HAVE_MPI
251  ! take care of parallel stuff
252 
253  ! add entities to part, using iMesh
254  call c_f_pointer(entsptr, ents, [numo])
255  call imesh_addentarrtoset(%VAL(imesh), ents, %VAL(numo), %VAL(part), ierr)
256  error(ierr)
257  ! set global ids on vertices, needed for sharing between procs
258  call imesh_gettaghandle(%VAL(imesh), "GLOBAL_ID", tagh, ierr, %VAL(9))
259  if (ibase_success .ne. ierr) then
260  ! didn't get handle, need to create the tag
261  call imesh_createtag(%VAL(imesh), "GLOBAL_ID", %VAL(ibase_integer), tagh, ierr)
262  error(ierr)
263  end if
264  call imesh_setintarrdata(%VAL(imesh), verts, %VAL(numv), %VAL(tagh), vgids, %VAL(numv), ierr)
265  error(ierr)
266  ! now resolve shared verts and exchange ghost cells
267  call imeshp_syncmeshall(%VAL(imesh), %VAL(imeshp), ierr)
268  error(ierr)
269 
270  call imeshp_createghostentsall(%VAL(imesh), %VAL(imeshp), %VAL(2), %VAL(1), %VAL(1), %VAL(0), ierr)
271  error(ierr)
272 
273 #endif
274 
275  return
276 end subroutine create_mesh
277 #endif