1
2 program migratemesh
3 implicit none
4
5 #include "moab/MOABConfig.h"
6 #ifdef MOAB_HAVE_MPI
7 # include "mpif.h"
8 #else
9 # error "enable parallel build"
10 #endif
11
12
13
14
15 integer ierr, sz, rank, i
16 integer newcomm
17 integer gcomm, comm1, comm2
18 integer pid1, pid2
19 integer compid1, compid2
20
21 integer nghlay
22 integer grouptasks(9)
23 integer startg1, startg2, endg1, endg2
24 integer sizeg1, sizeg2
25 character*10 appname
26 character*132 readopts
27 character*132 filename
28 character*132 outfile
29 character*132 wopts
30 integer allgroup, group1, group2
31 integer tagcomm1, tagcomm2
32 integer imoab_initializefortran, imoab_registerfortranapplication
33 integer imoab_loadmesh, imoab_sendmesh, imoab_receivemesh, imoab_writemesh
34 integer imoab_freesenderbuffers
35 integer imoab_deregisterapplication, imoab_finalize
36 integer repart_scheme , context_id
37
38 call mpi_init(ierr)
39 call mpi_comm_dup(mpi_comm_world, gcomm, ierr)
40 call mpi_comm_size(gcomm, sz, ierr)
41 call mpi_comm_rank(gcomm, rank, ierr)
42 if (rank .eq. 0) print *, "size:", sz
43 call errorout(ierr, 'cannot get rank' )
44 if ( (0 .eq. rank) .and. (sz>9) ) then
45 print *, "size is " , sz, ". run on at most 9 tasks "
46 call exit(1)
47 endif
48
49
50
51
52
53
54
55
56
57
58 call mpi_comm_group (gcomm, allgroup, ierr)
59 call errorout(ierr, 'cannot get world group' )
60
61 startg1 = sz/2
62 endg1 = sz-1
63 sizeg1 = endg1 - startg1 + 1
64
65 context_id = -1
66
67 do i=1, sizeg1
68 grouptasks(i) = startg1+i-1
69 end do
70
71 call mpi_group_incl(allgroup, sizeg1, grouptasks, group1, ierr)
72 call errorout(ierr, 'cannot create group 1' )
73
74
75 startg2 = 0
76 endg2 = 3*sz/4 -1
77 if (endg2 <0) endg2 = 0
78 #ifdef NONOVERLAP
79 endg2 = startg1-1
80 #endif
81 sizeg2 = endg2 - startg2 + 1
82 do i=1, sizeg2
83 grouptasks(i) = startg2+i-1
84 enddo
85
86 call mpi_group_incl(allgroup, sizeg2, grouptasks, group2, ierr)
87 call errorout(ierr, 'cannot create group 2' )
88
89 if ( (0 .eq. rank) ) then
90 print *, "group 1 tasks: ", (i, i=startg1, endg1)
91 print *, "group 2 tasks: ", (i, i=startg2, endg2)
92 endif
93
94
95 tagcomm1 = 1
96 call mpi_comm_create_group(gcomm, group1, tagcomm1, comm1, ierr)
97 call errorout(ierr, 'cannot create communicator 1' )
98
99 tagcomm2 = 2
100 call mpi_comm_create_group(gcomm, group2, tagcomm2, comm2, ierr)
101 call errorout(ierr, 'cannot create communicator 2' )
102
103
104 ierr = imoab_initializefortran()
105
106 repart_scheme = 0
107 #ifdef MOAB_HAVE_ZOLTAN
108 repart_scheme = 1
109 #endif
110
111
112 compid1 = 4
113 compid2 = 7
114 call errorout(ierr, 'did not initialize fortran' )
115 if (rank == 0) print *, "initialize iMOAB fortran applications"
116
117 if (comm1 /= mpi_comm_null) then
118 appname='phis1'//char(0)
119 ierr = imoab_registerfortranapplication(trim(appname), comm1, compid1, pid1)
120 print *, ' register ', appname, " on rank ", rank, " pid1 ", pid1
121 endif
122 if (comm2 /= mpi_comm_null) then
123 appname = 'phis2'//char(0)
124 ierr = imoab_registerfortranapplication(trim(appname), comm2, compid2, pid2)
125 print *, ' register ', appname, " on rank ", rank, " pid2 ", pid2
126 endif
127
128
129 if (comm1 /= mpi_comm_null) then
130 filename = 'spherecube.h5m'//char(0)
131 readopts = 'PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS'//char(0)
132 if (rank .eq. sz-2 ) print *, "loading " , trim(filename) , " with options " , trim(readopts)
133 nghlay = 0
134
135 ierr = imoab_loadmesh(pid1, trim(filename), trim(readopts), nghlay)
136 if (rank .eq. sz-1 ) print *, "loaded in parallel ", trim(filename), " error: ", ierr
137 ierr = imoab_sendmesh(pid1, gcomm, group2, compid2, repart_scheme);
138 call errorout(ierr, 'cannot send elements' )
139 endif
140
141 if (comm2 /= mpi_comm_null) then
142 ierr = imoab_receivemesh(pid2, gcomm, group1, compid1);
143 call errorout(ierr, 'cannot receive elements' )
144 endif
145
146
147 if (comm1 /= mpi_comm_null) then
148
149 ierr = imoab_freesenderbuffers(pid1, context_id)
150 endif
151 call mpi_barrier(gcomm, ierr)
152 call errorout(ierr, 'cannot stop at barrier' )
153
154 if (comm2 /= mpi_comm_null) then
155 outfile = 'receivedMesh.h5m'//char(0)
156 wopts = 'PARALLEL=WRITE_PART;'//char(0)
157
158 ierr = imoab_writemesh(pid2, trim(outfile), trim(wopts))
159 call errorout(ierr, 'cannot write received mesh' )
160 endif
161
162
163 if (comm2 /= mpi_comm_null) then
164 ierr = imoab_deregisterapplication(pid2)
165 call errorout(ierr, 'cannot deregister app 2 receiver' )
166 endif
167 if (comm1 /= mpi_comm_null) then
168 ierr = imoab_deregisterapplication(pid1)
169 call errorout(ierr, 'cannot deregister app 1 sender' )
170 endif
171
172 ierr = imoab_finalize()
173 call errorout(ierr, 'did not finalize iMOAB' )
174
175 if (mpi_comm_null /= comm1) call mpi_comm_free(comm1, ierr)
176 call errorout(ierr, 'did not free comm1' )
177
178 if (mpi_comm_null /= comm2) call mpi_comm_free(comm2, ierr)
179 call errorout(ierr, 'did not free comm2' )
180
181 call mpi_group_free(allgroup, ierr)
182 call mpi_group_free(group1, ierr)
183 call mpi_group_free(group2, ierr)
184 call mpi_comm_free(gcomm, ierr)
185
186 call mpi_finalize(ierr)
187 call errorout(ierr, 'did not finalize MPI' )
188 contains
189 SUBROUTINE errorout(ierr, message)
190 integer ierr
191 character*(*) message
192 if (ierr.ne.0) then
193 print *, message
194 call exit (1)
195 end if
196 end
197
198 end program migratemesh