Actual source code: sfwindow.c

petsc-3.11.4 2019-09-28
Report Typos and Errors
  1:  #include <petsc/private/sfimpl.h>

  3: typedef struct _n_PetscSFDataLink *PetscSFDataLink;
  4: typedef struct _n_PetscSFWinLink  *PetscSFWinLink;

  6: typedef struct {
  7:   PetscSFWindowSyncType sync; /* FENCE, LOCK, or ACTIVE synchronization */
  8:   PetscSFDataLink       link;   /* List of MPI data types and windows, lazily constructed for each data type */
  9:   PetscSFWinLink        wins;   /* List of active windows */
 10: } PetscSF_Window;

 12: struct _n_PetscSFDataLink {
 13:   MPI_Datatype    unit;
 14:   MPI_Datatype    *mine;
 15:   MPI_Datatype    *remote;
 16:   PetscSFDataLink next;
 17: };

 19: struct _n_PetscSFWinLink {
 20:   PetscBool      inuse;
 21:   size_t         bytes;
 22:   const void     *rkey,*lkey;
 23:   MPI_Win        win;
 24:   PetscBool      epoch;
 25:   PetscSFWinLink next;
 26: };

 28: const char *const PetscSFWindowSyncTypes[] = {"FENCE","LOCK","ACTIVE","PetscSFWindowSyncType","PETSCSF_WINDOW_SYNC_",0};

 30: /* Built-in MPI_Ops act elementwise inside MPI_Accumulate, but cannot be used with composite types inside collectives (MPIU_Allreduce) */
 31: static PetscErrorCode PetscSFWindowOpTranslate(MPI_Op *op)
 32: {

 35:   if (*op == MPIU_SUM) *op = MPI_SUM;
 36:   else if (*op == MPIU_MAX) *op = MPI_MAX;
 37:   else if (*op == MPIU_MIN) *op = MPI_MIN;
 38:   return(0);
 39: }

 41: /*@C
 42:    PetscSFWindowGetDataTypes - gets composite local and remote data types for each rank

 44:    Not Collective

 46:    Input Arguments:
 47: +  sf - star forest
 48: -  unit - data type for each node

 50:    Output Arguments:
 51: +  localtypes - types describing part of local leaf buffer referencing each remote rank
 52: -  remotetypes - types describing part of remote root buffer referenced for each remote rank

 54:    Level: developer

 56: .seealso: PetscSFSetGraph(), PetscSFView()
 57: @*/
 58: static PetscErrorCode PetscSFWindowGetDataTypes(PetscSF sf,MPI_Datatype unit,const MPI_Datatype **localtypes,const MPI_Datatype **remotetypes)
 59: {
 60:   PetscSF_Window    *w = (PetscSF_Window*)sf->data;
 61:   PetscErrorCode    ierr;
 62:   PetscSFDataLink   link;
 63:   PetscInt          i,nranks;
 64:   const PetscInt    *roffset,*rmine,*rremote;
 65:   const PetscMPIInt *ranks;

 68:   /* Look for types in cache */
 69:   for (link=w->link; link; link=link->next) {
 70:     PetscBool match;
 71:     MPIPetsc_Type_compare(unit,link->unit,&match);
 72:     if (match) {
 73:       *localtypes  = link->mine;
 74:       *remotetypes = link->remote;
 75:       return(0);
 76:     }
 77:   }

 79:   /* Create new composite types for each send rank */
 80:   PetscSFGetRanks(sf,&nranks,&ranks,&roffset,&rmine,&rremote);
 81:   PetscNew(&link);
 82:   MPI_Type_dup(unit,&link->unit);
 83:   PetscMalloc2(nranks,&link->mine,nranks,&link->remote);
 84:   for (i=0; i<nranks; i++) {
 85:     PETSC_UNUSED PetscInt rcount = roffset[i+1] - roffset[i];
 86:     PetscMPIInt           *rmine,*rremote;
 87: #if !defined(PETSC_USE_64BIT_INDICES)
 88:     rmine   = sf->rmine + sf->roffset[i];
 89:     rremote = sf->rremote + sf->roffset[i];
 90: #else
 91:     PetscInt j;
 92:     PetscMalloc2(rcount,&rmine,rcount,&rremote);
 93:     for (j=0; j<rcount; j++) {
 94:       PetscMPIIntCast(sf->rmine[sf->roffset[i]+j],rmine+j);
 95:       PetscMPIIntCast(sf->rremote[sf->roffset[i]+j],rremote+j);
 96:     }
 97: #endif
 98:     MPI_Type_create_indexed_block(rcount,1,rmine,link->unit,&link->mine[i]);
 99:     MPI_Type_create_indexed_block(rcount,1,rremote,link->unit,&link->remote[i]);
100: #if defined(PETSC_USE_64BIT_INDICES)
101:     PetscFree2(rmine,rremote);
102: #endif
103:     MPI_Type_commit(&link->mine[i]);
104:     MPI_Type_commit(&link->remote[i]);
105:   }
106:   link->next = w->link;
107:   w->link    = link;

109:   *localtypes  = link->mine;
110:   *remotetypes = link->remote;
111:   return(0);
112: }

114: /*@C
115:    PetscSFWindowSetSyncType - set synchrozitaion type for PetscSF communication

117:    Logically Collective

119:    Input Arguments:
120: +  sf - star forest for communication
121: -  sync - synchronization type

123:    Options Database Key:
124: .  -sf_window_sync <sync> - sets the synchronization type FENCE, LOCK, or ACTIVE (see PetscSFWindowSyncType)

126:    Level: advanced

128: .seealso: PetscSFSetFromOptions(), PetscSFWindowGetSyncType()
129: @*/
130: PetscErrorCode PetscSFWindowSetSyncType(PetscSF sf,PetscSFWindowSyncType sync)
131: {

137:   PetscUseMethod(sf,"PetscSFWindowSetSyncType_C",(PetscSF,PetscSFWindowSyncType),(sf,sync));
138:   return(0);
139: }

141: static PetscErrorCode PetscSFWindowSetSyncType_Window(PetscSF sf,PetscSFWindowSyncType sync)
142: {
143:   PetscSF_Window *w = (PetscSF_Window*)sf->data;

146:   w->sync = sync;
147:   return(0);
148: }

150: /*@C
151:    PetscSFWindowGetSyncType - get synchrozitaion type for PetscSF communication

153:    Logically Collective

155:    Input Argument:
156: .  sf - star forest for communication

158:    Output Argument:
159: .  sync - synchronization type

161:    Level: advanced

163: .seealso: PetscSFGetFromOptions(), PetscSFWindowSetSyncType()
164: @*/
165: PetscErrorCode PetscSFWindowGetSyncType(PetscSF sf,PetscSFWindowSyncType *sync)
166: {

172:   PetscUseMethod(sf,"PetscSFWindowGetSyncType_C",(PetscSF,PetscSFWindowSyncType*),(sf,sync));
173:   return(0);
174: }

176: static PetscErrorCode PetscSFWindowGetSyncType_Window(PetscSF sf,PetscSFWindowSyncType *sync)
177: {
178:   PetscSF_Window *w = (PetscSF_Window*)sf->data;

181:   *sync = w->sync;
182:   return(0);
183: }

185: /*@C
186:    PetscSFGetWindow - Get a window for use with a given data type

188:    Collective on PetscSF

190:    Input Arguments:
191: +  sf - star forest
192: .  unit - data type
193: .  array - array to be sent
194: .  epoch - PETSC_TRUE to acquire the window and start an epoch, PETSC_FALSE to just acquire the window
195: .  fenceassert - assert parameter for call to MPI_Win_fence(), if PETSCSF_WINDOW_SYNC_FENCE
196: .  postassert - assert parameter for call to MPI_Win_post(), if PETSCSF_WINDOW_SYNC_ACTIVE
197: -  startassert - assert parameter for call to MPI_Win_start(), if PETSCSF_WINDOW_SYNC_ACTIVE

199:    Output Arguments:
200: .  win - window

202:    Level: developer

204:    Developer Notes:
205:    This currently always creates a new window. This is more synchronous than necessary. An alternative is to try to
206:    reuse an existing window created with the same array. Another alternative is to maintain a cache of windows and reuse
207:    whichever one is available, by copying the array into it if necessary.

209: .seealso: PetscSFGetRanks(), PetscSFWindowGetDataTypes()
210: @*/
211: static PetscErrorCode PetscSFGetWindow(PetscSF sf,MPI_Datatype unit,const void *rkey,const void *lkey,PetscBool epoch,PetscMPIInt fenceassert,PetscMPIInt postassert,PetscMPIInt startassert,MPI_Win *win)
212: {
213:   PetscSF_Window *w = (PetscSF_Window*)sf->data;
215:   MPI_Aint       lb,lb_true,bytes,bytes_true;
216:   PetscSFWinLink link;

219:   MPI_Type_get_extent(unit,&lb,&bytes);
220:   MPI_Type_get_true_extent(unit,&lb_true,&bytes_true);
221:   if (lb != 0 || lb_true != 0) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_SUP,"No support for unit type with nonzero lower bound, write petsc-maint@mcs.anl.gov if you want this feature");
222:   if (bytes != bytes_true) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_SUP,"No support for unit type with modified extent, write petsc-maint@mcs.anl.gov if you want this feature");

224:   if (rkey || lkey) {
225:     for (link=w->wins; link; link=link->next) {
226:       if (rkey == link->rkey && lkey == link->lkey) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"No support for overlapped PetscSF communications with the same SF, rootdata and leafdatadata. You can undo the overlap to avoid the error.");
227:     }
228:   }

230:   PetscNew(&link);

232:   link->bytes = bytes;
233:   link->rkey  = rkey;
234:   link->lkey  = lkey;

236:   MPI_Win_create((void *)rkey,(MPI_Aint)bytes*sf->nroots,(PetscMPIInt)bytes,MPI_INFO_NULL,PetscObjectComm((PetscObject)sf),&link->win);

238:   link->epoch = epoch;
239:   link->next  = w->wins;
240:   link->inuse = PETSC_TRUE;
241:   w->wins     = link;
242:   *win        = link->win;

244:   if (epoch) {
245:     switch (w->sync) {
246:     case PETSCSF_WINDOW_SYNC_FENCE:
247:       MPI_Win_fence(fenceassert,*win);
248:       break;
249:     case PETSCSF_WINDOW_SYNC_LOCK: /* Handled outside */
250:       break;
251:     case PETSCSF_WINDOW_SYNC_ACTIVE: {
252:       MPI_Group ingroup,outgroup;
253:       PetscSFGetGroups(sf,&ingroup,&outgroup);
254:       MPI_Win_post(ingroup,postassert,*win);
255:       MPI_Win_start(outgroup,startassert,*win);
256:     } break;
257:     default: SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_PLIB,"Unknown synchronization type");
258:     }
259:   }
260:   return(0);
261: }

263: /*@C
264:    PetscSFFindWindow - Finds a window that is already in use

266:    Not Collective

268:    Input Arguments:
269: +  sf - star forest
270: .  unit - data type
271: -  array - array with which the window is associated

273:    Output Arguments:
274: .  win - window

276:    Level: developer

278: .seealso: PetscSFGetWindow(), PetscSFRestoreWindow()
279: @*/
280: static PetscErrorCode PetscSFFindWindow(PetscSF sf,MPI_Datatype unit,const void *rkey,const void *lkey,MPI_Win *win)
281: {
282:   PetscSF_Window *w = (PetscSF_Window*)sf->data;
283:   PetscSFWinLink link;

286:   *win = MPI_WIN_NULL;
287:   for (link=w->wins; link; link=link->next) {
288:     if (rkey == link->rkey && lkey == link->lkey) {
289:       *win = link->win;
290:       return(0);
291:     }
292:   }
293:   SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Requested window not in use");
294:   return(0);
295: }

297: /*@C
298:    PetscSFRestoreWindow - Restores a window obtained with PetscSFGetWindow()

300:    Collective

302:    Input Arguments:
303: +  sf - star forest
304: .  unit - data type
305: .  array - array associated with window
306: .  epoch - close an epoch, must match argument to PetscSFGetWindow()
307: -  win - window

309:    Level: developer

311: .seealso: PetscSFFindWindow()
312: @*/
313: static PetscErrorCode PetscSFRestoreWindow(PetscSF sf,MPI_Datatype unit,const void *rkey,const void *lkey,PetscBool epoch,PetscMPIInt fenceassert,MPI_Win *win)
314: {
315:   PetscSF_Window *w = (PetscSF_Window*)sf->data;
317:   PetscSFWinLink *p,link;

320:   for (p=&w->wins; *p; p=&(*p)->next) {
321:     link = *p;
322:     if (*win == link->win) {
323:       if (rkey != link->rkey || lkey != link->lkey) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Matched window, but not keys");
324:       if (epoch != link->epoch) {
325:         if (epoch) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"No epoch to end");
326:         else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Restoring window without ending epoch");
327:       }
328:       *p = link->next;
329:       goto found;
330:     }
331:   }
332:   SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Requested window not in use");

334: found:
335:   if (epoch) {
336:     switch (w->sync) {
337:     case PETSCSF_WINDOW_SYNC_FENCE:
338:       MPI_Win_fence(fenceassert,*win);
339:       break;
340:     case PETSCSF_WINDOW_SYNC_LOCK:
341:       break;                    /* handled outside */
342:     case PETSCSF_WINDOW_SYNC_ACTIVE: {
343:       MPI_Win_complete(*win);
344:       MPI_Win_wait(*win);
345:     } break;
346:     default: SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_PLIB,"Unknown synchronization type");
347:     }
348:   }

350:   MPI_Win_free(&link->win);
351:   PetscFree(link);
352:   *win = MPI_WIN_NULL;
353:   return(0);
354: }

356: static PetscErrorCode PetscSFSetUp_Window(PetscSF sf)
357: {
358:   PetscSF_Window *w = (PetscSF_Window*)sf->data;
360:   MPI_Group      ingroup,outgroup;

363:   PetscSFSetUpRanks(sf,MPI_GROUP_EMPTY);
364:   switch (w->sync) {
365:   case PETSCSF_WINDOW_SYNC_ACTIVE:
366:     PetscSFGetGroups(sf,&ingroup,&outgroup);
367:   default:
368:     break;
369:   }
370:   return(0);
371: }

373: static PetscErrorCode PetscSFSetFromOptions_Window(PetscOptionItems *PetscOptionsObject,PetscSF sf)
374: {
375:   PetscSF_Window *w = (PetscSF_Window*)sf->data;

379:   PetscOptionsHead(PetscOptionsObject,"PetscSF Window options");
380:   PetscOptionsEnum("-sf_window_sync","synchronization type to use for PetscSF Window communication","PetscSFWindowSetSyncType",PetscSFWindowSyncTypes,(PetscEnum)w->sync,(PetscEnum*)&w->sync,NULL);
381:   PetscOptionsTail();
382:   return(0);
383: }

385: static PetscErrorCode PetscSFReset_Window(PetscSF sf)
386: {
387:   PetscSF_Window  *w = (PetscSF_Window*)sf->data;
388:   PetscErrorCode  ierr;
389:   PetscSFDataLink link,next;
390:   PetscSFWinLink  wlink,wnext;
391:   PetscInt        i;

394:   for (link=w->link; link; link=next) {
395:     next = link->next;
396:     MPI_Type_free(&link->unit);
397:     for (i=0; i<sf->nranks; i++) {
398:       MPI_Type_free(&link->mine[i]);
399:       MPI_Type_free(&link->remote[i]);
400:     }
401:     PetscFree2(link->mine,link->remote);
402:     PetscFree(link);
403:   }
404:   w->link = NULL;
405:   for (wlink=w->wins; wlink; wlink=wnext) {
406:     wnext = wlink->next;
407:     if (wlink->inuse) SETERRQ1(PetscObjectComm((PetscObject)sf),PETSC_ERR_ARG_WRONGSTATE,"Window still in use with address %p",(void*)wlink->rkey);
408:     MPI_Win_free(&wlink->win);
409:     PetscFree(wlink);
410:   }
411:   w->wins = NULL;
412:   return(0);
413: }

415: static PetscErrorCode PetscSFDestroy_Window(PetscSF sf)
416: {

420:   PetscSFReset_Window(sf);
421:   PetscFree(sf->data);
422:   PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowSetSyncType_C",NULL);
423:   PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowGetSyncType_C",NULL);
424:   return(0);
425: }

427: static PetscErrorCode PetscSFView_Window(PetscSF sf,PetscViewer viewer)
428: {
429:   PetscSF_Window *w = (PetscSF_Window*)sf->data;
431:   PetscBool      iascii;

434:   PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
435:   if (iascii) {
436:     PetscViewerASCIIPrintf(viewer,"  synchronization=%s sort=%s\n",PetscSFWindowSyncTypes[w->sync],sf->rankorder ? "rank-order" : "unordered");
437:   }
438:   return(0);
439: }

441: static PetscErrorCode PetscSFDuplicate_Window(PetscSF sf,PetscSFDuplicateOption opt,PetscSF newsf)
442: {
443:   PetscSF_Window        *w = (PetscSF_Window*)sf->data;
444:   PetscErrorCode        ierr;
445:   PetscSFWindowSyncType synctype;

448:   synctype = w->sync;
449:   if (!sf->setupcalled) {
450:     /* HACK: Must use FENCE or LOCK when called from PetscSFGetGroups() because ACTIVE here would cause recursion. */
451:     synctype = PETSCSF_WINDOW_SYNC_LOCK;
452:   }
453:   PetscSFWindowSetSyncType(newsf,synctype);
454:   return(0);
455: }

457: static PetscErrorCode PetscSFBcastBegin_Window(PetscSF sf,MPI_Datatype unit,const void *rootdata,void *leafdata)
458: {
459:   PetscSF_Window     *w = (PetscSF_Window*)sf->data;
460:   PetscErrorCode     ierr;
461:   PetscInt           i,nranks;
462:   const PetscMPIInt  *ranks;
463:   const MPI_Datatype *mine,*remote;
464:   MPI_Win            win;

467:   PetscSFGetRanks(sf,&nranks,&ranks,NULL,NULL,NULL);
468:   PetscSFWindowGetDataTypes(sf,unit,&mine,&remote);
469:   PetscSFGetWindow(sf,unit,(void*)rootdata,leafdata,PETSC_TRUE,MPI_MODE_NOPUT|MPI_MODE_NOPRECEDE,MPI_MODE_NOPUT,0,&win);
470:   for (i=0; i<nranks; i++) {
471:     if (w->sync == PETSCSF_WINDOW_SYNC_LOCK) {MPI_Win_lock(MPI_LOCK_SHARED,ranks[i],MPI_MODE_NOCHECK,win);}
472:     MPI_Get(leafdata,1,mine[i],ranks[i],0,1,remote[i],win);
473:     if (w->sync == PETSCSF_WINDOW_SYNC_LOCK) {MPI_Win_unlock(ranks[i],win);}
474:   }
475:   return(0);
476: }

478: PetscErrorCode PetscSFBcastEnd_Window(PetscSF sf,MPI_Datatype unit,const void *rootdata,void *leafdata)
479: {
481:   MPI_Win        win;

484:   PetscSFFindWindow(sf,unit,rootdata,leafdata,&win);
485:   PetscSFRestoreWindow(sf,unit,rootdata,leafdata,PETSC_TRUE,MPI_MODE_NOSTORE|MPI_MODE_NOSUCCEED,&win);
486:   return(0);
487: }

489: static PetscErrorCode PetscSFBcastAndOpBegin_Window(PetscSF sf,MPI_Datatype unit,const void *rootdata,void *leafdata,MPI_Op op)
490: {

494:   if (op == MPI_REPLACE) { PetscSFBcastBegin_Window(sf,unit,rootdata,leafdata); }
495:   else SETERRQ(PetscObjectComm((PetscObject)sf), PETSC_ERR_SUP, "PetscSFBcastAndOpBegin_Window with reduction op has not been implemented");
496:   return(0);
497: }

499: PetscErrorCode PetscSFBcastAndOpEnd_Window(PetscSF sf,MPI_Datatype unit,const void *rootdata,void *leafdata,MPI_Op op)
500: {

504:   if (op == MPI_REPLACE) { PetscSFBcastEnd_Window(sf,unit,rootdata,leafdata); }
505:   else SETERRQ(PetscObjectComm((PetscObject)sf), PETSC_ERR_SUP, "PetscSFBcastAndOpEnd_Window with reduction op has not been implemented");
506:   return(0);
507: }

509: PetscErrorCode PetscSFReduceBegin_Window(PetscSF sf,MPI_Datatype unit,const void *leafdata,void *rootdata,MPI_Op op)
510: {
511:   PetscSF_Window     *w = (PetscSF_Window*)sf->data;
512:   PetscErrorCode     ierr;
513:   PetscInt           i,nranks;
514:   const PetscMPIInt  *ranks;
515:   const MPI_Datatype *mine,*remote;
516:   MPI_Win            win;

519:   PetscSFGetRanks(sf,&nranks,&ranks,NULL,NULL,NULL);
520:   PetscSFWindowGetDataTypes(sf,unit,&mine,&remote);
521:   PetscSFWindowOpTranslate(&op);
522:   PetscSFGetWindow(sf,unit,rootdata,leafdata,PETSC_TRUE,MPI_MODE_NOPRECEDE,0,0,&win);
523:   for (i=0; i<nranks; i++) {
524:     if (w->sync == PETSCSF_WINDOW_SYNC_LOCK) {MPI_Win_lock(MPI_LOCK_SHARED,ranks[i],MPI_MODE_NOCHECK,win);}
525:     MPI_Accumulate((void*)leafdata,1,mine[i],ranks[i],0,1,remote[i],op,win);
526:     if (w->sync == PETSCSF_WINDOW_SYNC_LOCK) {MPI_Win_unlock(ranks[i],win);}
527:   }
528:   return(0);
529: }

531: static PetscErrorCode PetscSFReduceEnd_Window(PetscSF sf,MPI_Datatype unit,const void *leafdata,void *rootdata,MPI_Op op)
532: {
533:   PetscSF_Window *w = (PetscSF_Window*)sf->data;
535:   MPI_Win        win;

538:   if (!w->wins) return(0);
539:   PetscSFFindWindow(sf,unit,rootdata,leafdata,&win);
540:   MPI_Win_fence(MPI_MODE_NOSUCCEED,win);
541:   PetscSFRestoreWindow(sf,unit,rootdata,leafdata,PETSC_TRUE,MPI_MODE_NOSUCCEED,&win);
542:   return(0);
543: }
544: static PetscErrorCode PetscSFFetchAndOpBegin_Window(PetscSF sf,MPI_Datatype unit,void *rootdata,const void *leafdata,void *leafupdate,MPI_Op op)
545: {
546:   PetscErrorCode     ierr;
547:   PetscInt           i,nranks;
548:   const PetscMPIInt  *ranks;
549:   const MPI_Datatype *mine,*remote;
550:   MPI_Win            win;

553:   PetscSFGetRanks(sf,&nranks,&ranks,NULL,NULL,NULL);
554:   PetscSFWindowGetDataTypes(sf,unit,&mine,&remote);
555:   PetscSFWindowOpTranslate(&op);
556:   PetscSFGetWindow(sf,unit,rootdata,leafdata,PETSC_FALSE,0,0,0,&win);
557:   for (i=0; i<sf->nranks; i++) {
558:     MPI_Win_lock(MPI_LOCK_EXCLUSIVE,sf->ranks[i],0,win);
559:     MPI_Get(leafupdate,1,mine[i],ranks[i],0,1,remote[i],win);
560:     MPI_Accumulate((void*)leafdata,1,mine[i],ranks[i],0,1,remote[i],op,win);
561:     MPI_Win_unlock(ranks[i],win);
562:   }
563:   return(0);
564: }

566: static PetscErrorCode PetscSFFetchAndOpEnd_Window(PetscSF sf,MPI_Datatype unit,void *rootdata,const void *leafdata,void *leafupdate,MPI_Op op)
567: {
569:   MPI_Win        win;

572:   PetscSFFindWindow(sf,unit,rootdata,leafdata,&win);
573:   /* Nothing to do currently because MPI_LOCK_EXCLUSIVE is used in PetscSFFetchAndOpBegin(), rendering this implementation synchronous. */
574:   PetscSFRestoreWindow(sf,unit,rootdata,leafdata,PETSC_FALSE,0,&win);
575:   return(0);
576: }

578: PETSC_EXTERN PetscErrorCode PetscSFCreate_Window(PetscSF sf)
579: {
580:   PetscSF_Window *w = (PetscSF_Window*)sf->data;

584:   sf->ops->SetUp           = PetscSFSetUp_Window;
585:   sf->ops->SetFromOptions  = PetscSFSetFromOptions_Window;
586:   sf->ops->Reset           = PetscSFReset_Window;
587:   sf->ops->Destroy         = PetscSFDestroy_Window;
588:   sf->ops->View            = PetscSFView_Window;
589:   sf->ops->Duplicate       = PetscSFDuplicate_Window;
590:   sf->ops->BcastBegin      = PetscSFBcastBegin_Window;
591:   sf->ops->BcastEnd        = PetscSFBcastEnd_Window;
592:   sf->ops->BcastAndOpBegin = PetscSFBcastAndOpBegin_Window;
593:   sf->ops->BcastAndOpEnd   = PetscSFBcastAndOpEnd_Window;
594:   sf->ops->ReduceBegin     = PetscSFReduceBegin_Window;
595:   sf->ops->ReduceEnd       = PetscSFReduceEnd_Window;
596:   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Window;
597:   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Window;

599:   PetscNewLog(sf,&w);
600:   sf->data = (void*)w;
601:   w->sync  = PETSCSF_WINDOW_SYNC_FENCE;

603:   PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowSetSyncType_C",PetscSFWindowSetSyncType_Window);
604:   PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowGetSyncType_C",PetscSFWindowGetSyncType_Window);

606: #if defined(OMPI_MAJOR_VERSION) && (OMPI_MAJOR_VERSION < 1 || (OMPI_MAJOR_VERSION == 1 && OMPI_MINOR_VERSION <= 6))
607:   {
608:     PetscBool ackbug = PETSC_FALSE;
609:     PetscOptionsGetBool(NULL,NULL,"-acknowledge_ompi_onesided_bug",&ackbug,NULL);
610:     if (ackbug) {
611:       PetscInfo(sf,"Acknowledged Open MPI bug, proceeding anyway. Expect memory corruption.\n");
612:     } else SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_LIB,"Open MPI is known to be buggy (https://svn.open-mpi.org/trac/ompi/ticket/1905 and 2656), use -acknowledge_ompi_onesided_bug to proceed");
613:   }
614: #endif
615:   return(0);
616: }