Mesh Oriented datABase  (version 5.5.1)
An array-based unstructured mesh library
MigrateMesh.F90
Go to the documentation of this file.
1 
2 program migratemesh
3  implicit none
4 
5 #include "moab/MOABConfig.h"
6 #ifdef MOAB_HAVE_MPI
7 # include "mpif.h"
8 #else
9 # error "enable parallel build"
10 #endif
11 
12 !#define NONOVERLAP
13 
14  ! init the parallel partition
15  integer ierr, sz, rank, i
16  integer newcomm
17  integer gcomm, comm1, comm2
18  integer pid1, pid2 ! this is for physics ids
19  integer compid1, compid2 ! component ids are unique over all pes, and established in
20  ! advance;
21  integer nghlay ! number of ghost layers for loading
22  integer grouptasks(9) ! run on at most 9 processes
23  integer startg1, startg2, endg1, endg2 ! start and end for group tasks, for creation
24  integer sizeg1, sizeg2 ! size of the group that gets created
25  character*10 appname
26  character*132 readopts
27  character*132 filename
28  character*132 outfile
29  character*132 wopts
30  integer allgroup, group1, group2 ! Corresponding to MPI_Group in C
31  integer tagcomm1, tagcomm2
32  integer imoab_initializefortran, imoab_registerfortranapplication
33  integer imoab_loadmesh, imoab_sendmesh, imoab_receivemesh, imoab_writemesh
34  integer imoab_freesenderbuffers
35  integer imoab_deregisterapplication, imoab_finalize
36  integer repart_scheme , context_id
37 
38  call mpi_init(ierr)
39  call mpi_comm_dup(mpi_comm_world, gcomm, ierr)
40  call mpi_comm_size(gcomm, sz, ierr)
41  call mpi_comm_rank(gcomm, rank, ierr)
42  if (rank .eq. 0) print *, "size:", sz
43  call errorout(ierr, 'cannot get rank' )
44  if ( (0 .eq. rank) .and. (sz>9) ) then
45  print *, "size is " , sz, ". run on at most 9 tasks "
46  call exit(1)
47  endif
48  ! create 2 overlapping groups, for generality
49  ! create communicators for each group;
50  ! one group will represent the sender, the other group the receiver
51  ! about one third of tasks will be on group 1 only, and one fourth will be on group 2 only
52  ! about (1-1./3 -1./4) will be overlapping, these tasks will be common to both groups
53  ! the mesh will be read on the sender comm, will be sent to receiver comm
54 
55  ! create new MPI groups for processors 1/3*sz, 1/3*sz+1, ..., sz-1 (group 1) and 0, 1, .., 3/4*sz-1 (group 2)
56 
57 
58  call mpi_comm_group (gcomm, allgroup, ierr)
59  call errorout(ierr, 'cannot get world group' )
60  ! first group, sz/3 to sz-1
61  startg1 = sz/2
62  endg1 = sz-1
63  sizeg1 = endg1 - startg1 + 1
64  ! used for new API in iMOAB, for tag migrate, release buffers
65  context_id = -1
66 
67  do i=1, sizeg1
68  grouptasks(i) = startg1+i-1
69  end do
70 
71  call mpi_group_incl(allgroup, sizeg1, grouptasks, group1, ierr)
72  call errorout(ierr, 'cannot create group 1' )
73 
74  ! second group, 0, 1, 3/4*sz
75  startg2 = 0
76  endg2 = 3*sz/4 -1
77  if (endg2 <0) endg2 = 0 ! so it will work even for 1 task
78 #ifdef NONOVERLAP
79  endg2 = startg1-1
80 #endif
81  sizeg2 = endg2 - startg2 + 1
82  do i=1, sizeg2
83  grouptasks(i) = startg2+i-1
84  enddo
85 
86  call mpi_group_incl(allgroup, sizeg2, grouptasks, group2, ierr)
87  call errorout(ierr, 'cannot create group 2' )
88 
89  if ( (0 .eq. rank) ) then
90  print *, "group 1 tasks: ", (i, i=startg1, endg1)
91  print *, "group 2 tasks: ", (i, i=startg2, endg2)
92  endif
93  ! now create both communicators
94  ! when we are not on tasks in the communicator, the MPI_Comm created will be null
95  tagcomm1 = 1
96  call mpi_comm_create_group(gcomm, group1, tagcomm1, comm1, ierr)
97  call errorout(ierr, 'cannot create communicator 1' )
98 
99  tagcomm2 = 2
100  call mpi_comm_create_group(gcomm, group2, tagcomm2, comm2, ierr)
101  call errorout(ierr, 'cannot create communicator 2' )
102 
103 
104  ierr = imoab_initializefortran()
105 
106  repart_scheme = 0 ! this is for trivial partitioning
107 #ifdef MOAB_HAVE_ZOLTAN
108  repart_scheme = 1 ! use the graph partitioner in that case
109 #endif
110  ! give some dummy values to component ids, just to differentiate between them
111  ! the par comm graph is unique between components
112  compid1 = 4
113  compid2 = 7
114  call errorout(ierr, 'did not initialize fortran' )
115  if (rank == 0) print *, "initialize iMOAB fortran applications"
116 
117  if (comm1 /= mpi_comm_null) then
118  appname='phis1'//char(0)
119  ierr = imoab_registerfortranapplication(trim(appname), comm1, compid1, pid1)
120  print *, ' register ', appname, " on rank ", rank, " pid1 ", pid1
121  endif
122  if (comm2 /= mpi_comm_null) then
123  appname = 'phis2'//char(0)
124  ierr = imoab_registerfortranapplication(trim(appname), comm2, compid2, pid2)
125  print *, ' register ', appname, " on rank ", rank, " pid2 ", pid2
126  endif
127 
128 
129  if (comm1 /= mpi_comm_null) then
130  filename = 'spherecube.h5m'//char(0)
131  readopts = 'PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS'//char(0)
132  if (rank .eq. sz-2 ) print *, "loading " , trim(filename) , " with options " , trim(readopts)
133  nghlay = 0
134 
135  ierr = imoab_loadmesh(pid1, trim(filename), trim(readopts), nghlay)
136  if (rank .eq. sz-1 ) print *, "loaded in parallel ", trim(filename), " error: ", ierr
137  ierr = imoab_sendmesh(pid1, gcomm, group2, compid2, repart_scheme); ! send to component 2
138  call errorout(ierr, 'cannot send elements' )
139  endif
140 
141  if (comm2 /= mpi_comm_null) then
142  ierr = imoab_receivemesh(pid2, gcomm, group1, compid1); ! receive from component 1
143  call errorout(ierr, 'cannot receive elements' )
144  endif
145 
146  ! we can now free the sender buffers
147  if (comm1 /= mpi_comm_null) then
148 
149  ierr = imoab_freesenderbuffers(pid1, context_id)
150  endif
151  call mpi_barrier(gcomm, ierr)
152  call errorout(ierr, 'cannot stop at barrier' )
153 
154 if (comm2 /= mpi_comm_null) then
155  outfile = 'receivedMesh.h5m'//char(0)
156  wopts = 'PARALLEL=WRITE_PART;'//char(0)
157 ! write out the mesh file to disk
158  ierr = imoab_writemesh(pid2, trim(outfile), trim(wopts))
159  call errorout(ierr, 'cannot write received mesh' )
160  endif
161 
162 
163  if (comm2 /= mpi_comm_null) then
164  ierr = imoab_deregisterapplication(pid2)
165  call errorout(ierr, 'cannot deregister app 2 receiver' )
166  endif
167  if (comm1 /= mpi_comm_null) then
168  ierr = imoab_deregisterapplication(pid1)
169  call errorout(ierr, 'cannot deregister app 1 sender' )
170  endif
171 
172  ierr = imoab_finalize()
173  call errorout(ierr, 'did not finalize iMOAB' )
174 
175  if (mpi_comm_null /= comm1) call mpi_comm_free(comm1, ierr)
176  call errorout(ierr, 'did not free comm1' )
177 
178  if (mpi_comm_null /= comm2) call mpi_comm_free(comm2, ierr)
179  call errorout(ierr, 'did not free comm2' )
180 
181  call mpi_group_free(allgroup, ierr)
182  call mpi_group_free(group1, ierr)
183  call mpi_group_free(group2, ierr)
184  call mpi_comm_free(gcomm, ierr)
185 
186  call mpi_finalize(ierr)
187  call errorout(ierr, 'did not finalize MPI' )
188 contains
189  SUBROUTINE errorout(ierr, message)
190  integer ierr
191  character*(*) message
192  if (ierr.ne.0) then
193  print *, message
194  call exit (1)
195  end if
196  end
197 
198 end program migratemesh