MOAB: Mesh Oriented datABase  (version 5.5.0)
imoab_coupler_fortran.F90
Go to the documentation of this file.
1 ! This program shows how to do an imoab_coupler type test in fortran 90
2 ! the atm and ocean files are read from repo, meshes are migrated to coupler pes,
3 ! the intx is caried on, weight generation,; tag migration and projection,
4 ! migrate back to ocean pes, and compare against baseline1.txt
5 ! cannot change source/target files, or the layout, just test the imoab
6 ! calls, that need to give the same results as C=++ equivalent test (imoab_coupler)
7 ! between atm SE and ocn FV
8 
9 SUBROUTINE errorout(ierr, message)
10  integer ierr
11  character*(*) message
12  if (ierr .ne. 0) then
13  print *, message
14  call exit(1)
15  end if
16  return
17 end
18 !
19 #include "moab/MOABConfig.h"
20 
21 #ifndef MOAB_MESH_DIR
22 #error Specify MOAB_MESH_DIR path
23 #endif
24 
26 
27  use iso_c_binding
28  use imoab
29 
30 #include "mpif.h"
31 #include "moab/MOABConfig.h"
32  !implicit none
33  integer :: m ! for number of arguments ; if less than 1, exit
34  integer :: global_comm
35  integer :: rankinglobalcomm, numprocesses
36  integer :: ierr
37  integer :: my_id, num_procs
38  integer :: jgroup ! group for global comm
39  character(:), allocatable :: atmfilename
40  character(:), allocatable :: ocnfilename
41  character(:), allocatable :: baselinefilename
42  character(:), allocatable :: readopts, filewriteoptions
43  character :: appname*128
44  character(:), allocatable :: weights_identifier1
45  character(:), allocatable :: disc_methods1, disc_methods2, dof_tag_names1, dof_tag_names2
46  integer :: disc_orders1, disc_orders2
47  character(:), allocatable :: atmocn_map_file_name, intx_from_file_identifier
48 
49  ! all groups and comms are on the same number of processes, for simplicity
50  integer :: atmgroup, atmcomm, ocngroup, ocncomm, cplgroup, cplcomm
51  integer :: atmcoucomm, ocncoucomm
52 
53  integer :: cmpatm, cplatm, cmpocn, cplocn, atmocnid ! global IDs
54  integer :: cmpatmpid, cplatmpid ! iMOAB app ids
55  integer :: cmpocnpid, cplocnpid ! iMOAB app ids
56  integer :: cplatmocnpid ! intx pid
57  integer :: nghlay, partscheme, context_id
58  integer :: fnobubble, fmonotonetypeid, fvolumetric, fnoconserve, fvalidate, finversedistancemap
59 
60  integer, dimension(2) :: tagindex
61  integer, dimension (2) :: tagtypes! { DENSE_DOUBLE, DENSE_DOUBLE }
62  integer :: atmcompndofs ! = disc_orders[0] * disc_orders[0],
63  integer :: ocncompndofs ! = 1 /*FV*/
64  character(:), allocatable :: bottomfields, bottomprojectedfields
65  integer, dimension(3) :: nverts, nelem, nblocks, nsbc, ndbc
66  double precision, allocatable :: vals(:) ! to set the double values to 0
67  integer :: i ! for loops
68  integer :: storleng, eetype ! for tags defs
69  character(:), allocatable :: concat_fieldname, concat_fieldnamet, outputfileocn
70  integer :: tagindexin2 ! not really needed
71  integer :: dummycpl, dummyrc, dummytype
72  real*8 :: boxeps
73  integer :: gnomonic
74 
75  cmpatm = 5
76  cplatm = 6
77  cmpocn = 17
78  cplocn = 18
79  atmocnid = 618
80 
81  call mpi_init(ierr)
82  call errorout(ierr,'mpi_init')
83  call mpi_comm_rank (mpi_comm_world, my_id, ierr)
84  call errorout(ierr, 'fail to get MPI rank')
85 
86  call mpi_comm_size (mpi_comm_world, num_procs, ierr)
87  call errorout(ierr, 'fail to get MPI size')
88  call mpi_comm_dup(mpi_comm_world, global_comm, ierr)
89  call errorout(ierr, 'fail to get global comm duplicate')
90 
91  call mpi_comm_group( global_comm, jgroup, ierr ); ! all processes in jgroup
92  call errorout(ierr, 'fail to get joint group')
93  ! readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" )
94  ! readoptsLnd( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION" )
95  atmfilename = &
96  moab_mesh_dir &
97  //'unittest/wholeATM_T.h5m'//c_null_char
98  ocnfilename = &
99  moab_mesh_dir &
100  //'unittest/recMeshOcn.h5m'//c_null_char
101  baselinefilename = &
102  moab_mesh_dir &
103  //'unittest/baseline1.txt'//c_null_char
104 
105  ! all comms span the whole world, for simplicity
106  atmcomm = mpi_comm_null
107  ocncomm = mpi_comm_null
108  cplcomm = mpi_comm_null
109  atmcoucomm = mpi_comm_null
110  ocncoucomm = mpi_comm_null
111  call mpi_comm_dup(global_comm, atmcomm, ierr)
112  call mpi_comm_dup(global_comm, ocncomm, ierr)
113  call mpi_comm_dup(global_comm, cplcomm, ierr)
114  call mpi_comm_dup(global_comm, atmcoucomm, ierr)
115  call mpi_comm_dup(global_comm, ocncoucomm, ierr)
116 
117  ! all groups of interest are easy breezy
118  call mpi_comm_group( atmcomm, atmgroup, ierr )
119  call mpi_comm_group( ocncomm, ocngroup, ierr )
120  call mpi_comm_group( cplcomm, cplgroup, ierr )
121 
122  readopts ='PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS'//c_null_char
123  nghlay = 0 ! no ghost layers
124 #ifdef MOAB_HAVE_ZOLTAN
125  partscheme = 2 ! RCB with zoltan
126 #else
127  partscheme = 0 ! Trivial partitioner
128 #endif
129 
130  if (my_id .eq. 0) then
131  print *, ' number of tasks: ', num_procs
132  print *, ' Atm file: ', atmfilename
133  print *, ' Ocn file: ', ocnfilename
134  print *, ' baseline file: ', baselinefilename
135  print *, ' using partitioner: ', partscheme
136  end if
137 
138  ierr = imoab_initialize()
139  appname = 'ATM'//c_null_char
140  ierr = imoab_registerapplication(appname, atmcomm, cmpatm, cmpatmpid)
141  appname = 'ATMX'//c_null_char
142  ierr = imoab_registerapplication(appname, cplcomm, cplatm, cplatmpid)
143  appname = 'OCN'//c_null_char
144  ierr = imoab_registerapplication(appname, ocncomm, cmpocn, cmpocnpid)
145  appname = 'OCNX'//c_null_char
146  ierr = imoab_registerapplication(appname, cplcomm, cplocn, cplocnpid)
147 
148  appname = 'ATMOCN'//c_null_char
149  ierr = imoab_registerapplication(appname, cplcomm, atmocnid, cplatmocnpid)
150 
151  ! read atm and migrate
152  if (atmcomm .NE. mpi_comm_null) then
153  ierr = imoab_loadmesh(cmpatmpid, atmfilename, readopts, nghlay)
154  call errorout(ierr, 'fail to load atm')
155  ierr = imoab_sendmesh(cmpatmpid, atmcoucomm, cplgroup, cplatm, partscheme)
156  call errorout(ierr, 'fail to send atm')
157  end if
158  if (cplcomm .NE. mpi_comm_null) then
159  ierr = imoab_receivemesh(cplatmpid, atmcoucomm, atmgroup, cmpatm) !
160  call errorout(ierr, 'fail to receive atm')
161  end if
162 
163  ! we can now free the sender buffers
164  if (atmcomm .NE. mpi_comm_null) then
165  context_id = cplatm
166  ierr = imoab_freesenderbuffers(cmpatmpid, context_id)
167  call errorout(ierr, 'fail to free atm buffers')
168  end if
169 
170  ! read ocn and migrate
171  if (ocncomm .NE. mpi_comm_null) then
172  ierr = imoab_loadmesh(cmpocnpid, ocnfilename, readopts, nghlay)
173  call errorout(ierr, 'fail to load ocn')
174  ierr = imoab_sendmesh(cmpocnpid, ocncoucomm, cplgroup, cplocn, partscheme)
175  call errorout(ierr, 'fail to send ocn')
176  end if
177  if (cplcomm .NE. mpi_comm_null) then
178  ierr = imoab_receivemesh(cplocnpid, ocncoucomm, ocngroup, cmpocn) !
179  call errorout(ierr, 'fail to receive ocn')
180  end if
181 
182  ! we can now free the sender buffers
183  if (ocncomm .NE. mpi_comm_null) then
184  context_id = cplocn
185  ierr = imoab_freesenderbuffers(cmpocnpid, context_id)
186  call errorout(ierr, 'fail to free ocn buffers')
187  end if
188 
189  if (cplcomm .NE. mpi_comm_null) then
190  ierr = imoab_computemeshintersectiononsphere(cplatmpid, cplocnpid, cplatmocnpid)
191  ! coverage mesh was computed here, for cplAtmPID, atm on coupler pes
192  ! basically, atm was redistributed according to target (ocean) partition, to "cover" the
193  !ocean partitions check if intx valid, write some h5m intx file
194  call errorout(ierr, 'cannot compute intersection')
195  end if
196 
197  if (atmcoucomm .NE. mpi_comm_null) then
198  ! the new graph will be for sending data from atm comp to coverage mesh.
199  ! it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
200  ! results are in cplAtmOcnPID, intx mesh; remapper also has some info about coverage mesh
201  ! after this, the sending of tags from atm pes to coupler pes will use the new par comm
202  ! graph, that has more precise info about what to send for ocean cover ; every time, we
203  ! will use the element global id, which should uniquely identify the element
204  ierr = imoab_coveragegraph(atmcoucomm, cmpatmpid, cplatmpid, cplatmocnpid, cmpatm, cplatm, cplocn) ! it happens over joint communicator
205  call errorout(ierr, 'cannot recompute direct coverage graph for ocean')
206  end if
207 
208  weights_identifier1 = 'scalar'//c_null_char
209  disc_methods1 = 'cgll'//c_null_char
210  disc_methods2 = 'fv'//c_null_char
211  disc_orders1 = 4
212  disc_orders2 = 1
213  dof_tag_names1 = 'GLOBAL_DOFS'//c_null_char
214  dof_tag_names2 = 'GLOBAL_ID'//c_null_char
215  ! fMonotoneTypeID = 0, fVolumetric = 0, fValidate = 1, fNoConserve = 0, fNoBubble = 1
216  fnobubble = 1
217  fmonotonetypeid = 0
218  fvolumetric = 0
219  fnoconserve = 0
220  fvalidate = 0
221  finversedistancemap = 0
222 
223  if (cplcomm .NE. mpi_comm_null) then
224 
225  ierr = imoab_computescalarprojectionweights( &
226  cplatmocnpid, weights_identifier1, disc_methods1, disc_orders1, &
227  disc_methods2, disc_orders2, ""//c_null_char, fnobubble, fmonotonetypeid, fvolumetric, &
228  finversedistancemap, fnoconserve, &
229  fvalidate, dof_tag_names1, dof_tag_names2)
230  call errorout(ierr, 'cannot compute scalar projection weights')
231 
232  ierr = imoab_computescalarprojectionweights( &
233  cplatmocnpid, "bilinear"//c_null_char, "fv"//c_null_char, 1, &
234  "fv"//c_null_char, 1, "bilin"//c_null_char, fnobubble, fmonotonetypeid, fvolumetric, &
235  finversedistancemap, fnoconserve, &
236  fvalidate, "GLOBAL_ID"//c_null_char, "GLOBAL_ID"//c_null_char)
237  call errorout(ierr, 'cannot compute scalar projection weights')
238 
239 #ifdef MOAB_HAVE_NETCDF
240  atmocn_map_file_name = 'atm_ocn_map_f.nc'//c_null_char
241  ierr = imoab_writemappingweightstofile( cplatmocnpid, weights_identifier1, atmocn_map_file_name)
242  call errorout(ierr, 'failed to write map file to disk')
243  intx_from_file_identifier = 'map-from-file'//c_null_char
244  dummycpl = -1
245  dummyrc = -1
246  dummytype = 0
247  ierr = imoab_loadmappingweightsfromfile( cplatmocnpid, dummycpl, dummyrc, dummytype, &
248  intx_from_file_identifier, atmocn_map_file_name)
249  call errorout(ierr, 'failed to load map file from disk')
250 #endif
251  end if
252 
253  ! start copy
254  tagtypes(1) = 1 ! somehow, DENSE_DOUBLE give 0, while it should be 1; maybe moab::DENSE_DOUBLE ?
255  tagtypes(2) = 1 ! ! DENSE_DOUBLE
256  atmcompndofs = disc_orders1*disc_orders1
257  ocncompndofs = 1 ! /*FV*/
258 
259  bottomfields = 'a2oTbot:a2oUbot:a2oVbot'//c_null_char
260  bottomprojectedfields = 'a2oTbot_proj:a2oUbot_proj:a2oVbot_proj'//c_null_char
261 
262  if (cplcomm .NE. mpi_comm_null) then
263  ierr = imoab_definetagstorage(cplatmpid, bottomfields, tagtypes(1), atmcompndofs, tagindex(1))
264  call errorout(ierr, 'failed to define the field tags a2oTbot:a2oUbot:a2oVbot ')
265  ierr = imoab_definetagstorage(cplocnpid, bottomprojectedfields, tagtypes(2), ocncompndofs, tagindex(2))
266  call errorout(ierr, 'failed to define the field tags a2oTbot_proj:a2oUbot_proj:a2oVbot_proj')
267  end if
268 
269  ! make the tag 0, to check we are actually sending needed data
270  if (cplatmpid .ge. 0) then
271 
272  ! Each process in the communicator will have access to a local mesh instance, which
273  ! will contain the original cells in the local partition and ghost entities. Number of
274  ! vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
275  ! conditions will be returned in numProcesses 3 arrays, for local, ghost and total
276  ! numbers.
277 
278  ierr = imoab_getmeshinfo(cplatmpid, nverts, nelem, nblocks, nsbc, ndbc)
279  call errorout(ierr, 'failed to get num primary elems')
280  storleng = nelem(3)*atmcompndofs*3 ! 3 tags
281  allocate (vals(storleng))
282  eetype = 1 ! double type
283 
284  do i = 1, storleng
285  vals(:) = 0.
286  end do
287 
288  ! set the tag values to 0.0
289  ierr = imoab_setdoubletagstorage(cplatmpid, bottomfields, storleng, eetype, vals)
290  call errorout(ierr, 'cannot make tag nul')
291 
292  end if
293 
294  ! Define the field variables to project
295  concat_fieldname = 'a2oTbot:a2oUbot:a2oVbot'//c_null_char
296  concat_fieldnamet = 'a2oTbot_proj:a2oUbot_proj:a2oVbot_proj'//c_null_char
297 
298  if (atmcomm .NE. mpi_comm_null) then
299 
300  ! As always, use nonblocking sends
301  ! this is for projection to ocean:
302  ierr = imoab_sendelementtag(cmpatmpid, concat_fieldname, atmcoucomm, cplocn)
303  call errorout(ierr, 'cannot send tag values')
304 
305  end if
306 
307  if (cplcomm .NE. mpi_comm_null) then
308  !// receive on atm on coupler pes, that was redistributed according to coverage
309  ierr = imoab_receiveelementtag(cplatmpid, concat_fieldname, atmcoucomm, cplocn)
310  call errorout(ierr, 'cannot receive tag values')
311  end if
312 
313  ! we can now free the sender buffers
314  if (atmcomm .NE. mpi_comm_null) then
315 
316  ierr = imoab_freesenderbuffers(cmpatmpid, cplocn) !context is for ocean
317  call errorout(ierr, 'cannot free buffers used to resend atm tag towards the coverage mesh')
318 
319  end if
320  if (cplcomm .ne. mpi_comm_null) then
321 
322  outputfileocn = "AtmOnCplF.h5m"//c_null_char
323  filewriteoptions = 'PARALLEL=WRITE_PART'//c_null_char
324  ierr = imoab_writemesh(cplatmpid, outputfileocn, filewriteoptions)
325  call errorout(ierr, 'could not write AtmOnCpl.h5m to disk')
326 
327  end if
328  if (cplcomm .ne. mpi_comm_null) then
329 
330  ! We have the remapping weights now. Let us apply the weights onto the tag we defined
331  ! on the source mesh and get the projection on the target mesh
332  ierr = imoab_applyscalarprojectionweights(cplatmocnpid, weights_identifier1, concat_fieldname, &
333  concat_fieldnamet)
334  call errorout(ierr, 'failed to compute projection weight application')
335 
336  outputfileocn = "OcnOnCplF.h5m"//c_null_char
337  filewriteoptions = 'PARALLEL=WRITE_PART'//c_null_char
338  ierr = imoab_writemesh(cplocnpid, outputfileocn, filewriteoptions)
339  call errorout(ierr, 'could not write OcnOnCpl.h5m to disk')
340 
341  end if
342  ! send the projected tag back to ocean pes, with send/receive tag
343  ! first makje sure the tags are defined, otherwise they cannot be received
344  if (ocncomm .ne. mpi_comm_null) then
345 
346  ierr = imoab_definetagstorage(cmpocnpid, bottomprojectedfields, tagtypes(2), ocncompndofs, tagindexin2)
347  call errorout(ierr, 'failed to define the field tag for receiving back the tag a2oTbot_proj, on ocn pes')
348 
349  end if
350 
351  ! send the tag to ocean pes, from ocean mesh on coupler pes
352  ! from couComm, using common joint comm ocn_coupler
353  ! as always, use nonblocking sends
354  ! original graph (context is -1_
355  if (cplcomm .ne. mpi_comm_null) then
356  context_id = cmpocn
357  ierr = imoab_sendelementtag(cplocnpid, concat_fieldnamet, ocncoucomm, context_id)
358  call errorout(ierr, 'cannot send tag values back to ocean pes')
359  end if
360 
361  if (ocncomm .ne. mpi_comm_null) then
362  context_id = cplocn
363  ierr = imoab_receiveelementtag(cmpocnpid, concat_fieldnamet, ocncoucomm, context_id)
364  call errorout(ierr, 'cannot receive tag values from ocean mesh on coupler pes')
365  end if
366 
367  if (cplcomm .ne. mpi_comm_null) then
368  context_id = cmpocn
369  ierr = imoab_freesenderbuffers(cplocnpid, context_id)
370  call errorout(ierr, 'cannot free sender buffers on coupler')
371  end if
372 
373  if (ocncomm .ne. mpi_comm_null) then
374 
375  outputfileocn = "OcnWithProjF.h5m"//c_null_char
376  filewriteoptions = 'PARALLEL=WRITE_PART'//c_null_char
377  if (my_id .eq. 0) then
378  print *, ' Writing ocean mesh file with projected solution to disk: ', outputfileocn
379  end if
380  ierr = imoab_writemesh(cmpocnpid, outputfileocn, filewriteoptions)
381  call errorout(ierr, 'could not write OcnWithProjF.h5m to disk')
382 
383  end if
384 
385  ! free up resources
386  ierr = imoab_deregisterapplication(cplatmocnpid)
387  call errorout(ierr, 'could not de-register OCN component')
388 
389  ierr = imoab_deregisterapplication(cplocnpid)
390  call errorout(ierr, 'could not de-register OCN component')
391  ierr = imoab_deregisterapplication(cmpocnpid)
392  call errorout(ierr, 'could not de-register OCN component')
393  ierr = imoab_deregisterapplication(cplatmpid)
394  call errorout(ierr, 'could not de-register OCN component')
395  ierr = imoab_deregisterapplication(cmpatmpid)
396  call errorout(ierr, 'could not de-register OCN component')
397 
398  ! Free all MPI datastructures
399  call mpi_comm_free(atmcomm, ierr)
400  call mpi_group_free(atmgroup, ierr)
401  call mpi_comm_free(ocncomm, ierr)
402  call mpi_group_free(ocngroup, ierr)
403  call mpi_comm_free(cplcomm, ierr)
404  call mpi_group_free(cplgroup, ierr)
405 
406  call mpi_comm_free(atmcoucomm, ierr)
407  call mpi_comm_free(ocncoucomm, ierr)
408  call mpi_comm_free(global_comm, ierr)
409  call mpi_group_free(jgroup, ierr)
410  call mpi_finalize(ierr)
411 
412 end program imoab_coupler_fortran