Actual source code: sfwindow.c
petsc-3.13.6 2020-09-29
1: #include <petsc/private/sfimpl.h>
3: typedef struct _n_PetscSFDataLink *PetscSFDataLink;
4: typedef struct _n_PetscSFWinLink *PetscSFWinLink;
6: typedef struct {
7: PetscSFWindowSyncType sync; /* FENCE, LOCK, or ACTIVE synchronization */
8: PetscSFDataLink link; /* List of MPI data types, lazily constructed for each data type */
9: PetscSFWinLink wins; /* List of active windows */
10: PetscSFWindowFlavorType flavor; /* Current PETSCSF_WINDOW_FLAVOR_ */
11: PetscSF dynsf;
12: MPI_Info info;
13: } PetscSF_Window;
15: struct _n_PetscSFDataLink {
16: MPI_Datatype unit;
17: MPI_Datatype *mine;
18: MPI_Datatype *remote;
19: PetscSFDataLink next;
20: };
22: struct _n_PetscSFWinLink {
23: PetscBool inuse;
24: size_t bytes;
25: void *addr;
26: void *paddr;
27: MPI_Win win;
28: MPI_Request *reqs;
29: PetscSFWindowFlavorType flavor;
30: MPI_Aint *dyn_target_addr;
31: PetscBool epoch;
32: PetscSFWinLink next;
33: };
35: const char *const PetscSFWindowSyncTypes[] = {"FENCE","LOCK","ACTIVE","PetscSFWindowSyncType","PETSCSF_WINDOW_SYNC_",0};
36: const char *const PetscSFWindowFlavorTypes[] = {"CREATE","DYNAMIC","ALLOCATE","SHARED","PetscSFWindowFlavorType","PETSCSF_WINDOW_FLAVOR_",0};
38: /* Built-in MPI_Ops act elementwise inside MPI_Accumulate, but cannot be used with composite types inside collectives (MPIU_Allreduce) */
39: static PetscErrorCode PetscSFWindowOpTranslate(MPI_Op *op)
40: {
42: if (*op == MPIU_SUM) *op = MPI_SUM;
43: else if (*op == MPIU_MAX) *op = MPI_MAX;
44: else if (*op == MPIU_MIN) *op = MPI_MIN;
45: return(0);
46: }
48: /*@C
49: PetscSFWindowGetDataTypes - gets composite local and remote data types for each rank
51: Not Collective
53: Input Arguments:
54: + sf - star forest
55: - unit - data type for each node
57: Output Arguments:
58: + localtypes - types describing part of local leaf buffer referencing each remote rank
59: - remotetypes - types describing part of remote root buffer referenced for each remote rank
61: Level: developer
63: .seealso: PetscSFSetGraph(), PetscSFView()
64: @*/
65: static PetscErrorCode PetscSFWindowGetDataTypes(PetscSF sf,MPI_Datatype unit,const MPI_Datatype **localtypes,const MPI_Datatype **remotetypes)
66: {
67: PetscSF_Window *w = (PetscSF_Window*)sf->data;
68: PetscErrorCode ierr;
69: PetscSFDataLink link;
70: PetscInt i,nranks;
71: const PetscInt *roffset,*rmine,*rremote;
72: const PetscMPIInt *ranks;
75: /* Look for types in cache */
76: for (link=w->link; link; link=link->next) {
77: PetscBool match;
78: MPIPetsc_Type_compare(unit,link->unit,&match);
79: if (match) {
80: *localtypes = link->mine;
81: *remotetypes = link->remote;
82: return(0);
83: }
84: }
86: /* Create new composite types for each send rank */
87: PetscSFGetRootRanks(sf,&nranks,&ranks,&roffset,&rmine,&rremote);
88: PetscNew(&link);
89: MPI_Type_dup(unit,&link->unit);
90: PetscMalloc2(nranks,&link->mine,nranks,&link->remote);
91: for (i=0; i<nranks; i++) {
92: PetscInt rcount = roffset[i+1] - roffset[i];
93: PetscMPIInt *rmine,*rremote;
94: #if !defined(PETSC_USE_64BIT_INDICES)
95: rmine = sf->rmine + sf->roffset[i];
96: rremote = sf->rremote + sf->roffset[i];
97: #else
98: PetscInt j;
99: PetscMalloc2(rcount,&rmine,rcount,&rremote);
100: for (j=0; j<rcount; j++) {
101: PetscMPIIntCast(sf->rmine[sf->roffset[i]+j],rmine+j);
102: PetscMPIIntCast(sf->rremote[sf->roffset[i]+j],rremote+j);
103: }
104: #endif
106: MPI_Type_create_indexed_block(rcount,1,rmine,link->unit,&link->mine[i]);
107: MPI_Type_create_indexed_block(rcount,1,rremote,link->unit,&link->remote[i]);
108: #if defined(PETSC_USE_64BIT_INDICES)
109: PetscFree2(rmine,rremote);
110: #endif
111: MPI_Type_commit(&link->mine[i]);
112: MPI_Type_commit(&link->remote[i]);
113: }
114: link->next = w->link;
115: w->link = link;
117: *localtypes = link->mine;
118: *remotetypes = link->remote;
119: return(0);
120: }
122: /*@C
123: PetscSFWindowSetFlavorType - Set flavor type for MPI_Win creation
125: Logically Collective
127: Input Arguments:
128: + sf - star forest for communication
129: - flavor - flavor type
131: Options Database Key:
132: . -sf_window_flavor <flavor> - sets the flavor type CREATE, DYNAMIC, ALLOCATE or SHARED (see PetscSFWindowFlavorType)
134: Level: advanced
136: Notes: Windows reusage follow this rules:
138: PETSCSF_WINDOW_FLAVOR_CREATE: creates a new window every time, uses MPI_Win_create
140: PETSCSF_WINDOW_FLAVOR_DYNAMIC: uses MPI_Win_create_dynamic/MPI_Win_attach and tries to reuse windows by comparing the root array. Intended to be used on repeated applications of the same SF, e.g.
141: for i=1 to K
142: PetscSFOperationBegin(rootdata1,leafdata_whatever);
143: PetscSFOperationEnd(rootdata1,leafdata_whatever);
144: ...
145: PetscSFOperationBegin(rootdataN,leafdata_whatever);
146: PetscSFOperationEnd(rootdataN,leafdata_whatever);
147: endfor
148: The following pattern will instead raise an error
149: PetscSFOperationBegin(rootdata1,leafdata_whatever);
150: PetscSFOperationEnd(rootdata1,leafdata_whatever);
151: PetscSFOperationBegin(rank ? rootdata1 : rootdata2,leafdata_whatever);
152: PetscSFOperationEnd(rank ? rootdata1 : rootdata2,leafdata_whatever);
154: PETSCSF_WINDOW_FLAVOR_ALLOCATE: uses MPI_Win_allocate, reuses any pre-existing window which fits the data and it is not in use
156: PETSCSF_WINDOW_FLAVOR_SHARED: uses MPI_Win_allocate_shared, reusage policy as for PETSCSF_WINDOW_FLAVOR_ALLOCATE
158: .seealso: PetscSFSetFromOptions(), PetscSFWindowGetFlavorType()
159: @*/
160: PetscErrorCode PetscSFWindowSetFlavorType(PetscSF sf,PetscSFWindowFlavorType flavor)
161: {
167: PetscTryMethod(sf,"PetscSFWindowSetFlavorType_C",(PetscSF,PetscSFWindowFlavorType),(sf,flavor));
168: return(0);
169: }
171: static PetscErrorCode PetscSFWindowSetFlavorType_Window(PetscSF sf,PetscSFWindowFlavorType flavor)
172: {
173: PetscSF_Window *w = (PetscSF_Window*)sf->data;
176: w->flavor = flavor;
177: return(0);
178: }
180: /*@C
181: PetscSFWindowGetFlavorType - Get flavor type for PetscSF communication
183: Logically Collective
185: Input Argument:
186: . sf - star forest for communication
188: Output Argument:
189: . flavor - flavor type
191: Level: advanced
193: .seealso: PetscSFSetFromOptions(), PetscSFWindowSetFlavorType()
194: @*/
195: PetscErrorCode PetscSFWindowGetFlavorType(PetscSF sf,PetscSFWindowFlavorType *flavor)
196: {
202: PetscUseMethod(sf,"PetscSFWindowGetFlavorType_C",(PetscSF,PetscSFWindowFlavorType*),(sf,flavor));
203: return(0);
204: }
206: static PetscErrorCode PetscSFWindowGetFlavorType_Window(PetscSF sf,PetscSFWindowFlavorType *flavor)
207: {
208: PetscSF_Window *w = (PetscSF_Window*)sf->data;
211: *flavor = w->flavor;
212: return(0);
213: }
215: /*@C
216: PetscSFWindowSetSyncType - Set synchronization type for PetscSF communication
218: Logically Collective
220: Input Arguments:
221: + sf - star forest for communication
222: - sync - synchronization type
224: Options Database Key:
225: . -sf_window_sync <sync> - sets the synchronization type FENCE, LOCK, or ACTIVE (see PetscSFWindowSyncType)
227: Level: advanced
229: .seealso: PetscSFSetFromOptions(), PetscSFWindowGetSyncType()
230: @*/
231: PetscErrorCode PetscSFWindowSetSyncType(PetscSF sf,PetscSFWindowSyncType sync)
232: {
238: PetscTryMethod(sf,"PetscSFWindowSetSyncType_C",(PetscSF,PetscSFWindowSyncType),(sf,sync));
239: return(0);
240: }
242: static PetscErrorCode PetscSFWindowSetSyncType_Window(PetscSF sf,PetscSFWindowSyncType sync)
243: {
244: PetscSF_Window *w = (PetscSF_Window*)sf->data;
247: w->sync = sync;
248: return(0);
249: }
251: /*@C
252: PetscSFWindowGetSyncType - Get synchronization type for PetscSF communication
254: Logically Collective
256: Input Argument:
257: . sf - star forest for communication
259: Output Argument:
260: . sync - synchronization type
262: Level: advanced
264: .seealso: PetscSFSetFromOptions(), PetscSFWindowSetSyncType()
265: @*/
266: PetscErrorCode PetscSFWindowGetSyncType(PetscSF sf,PetscSFWindowSyncType *sync)
267: {
273: PetscUseMethod(sf,"PetscSFWindowGetSyncType_C",(PetscSF,PetscSFWindowSyncType*),(sf,sync));
274: return(0);
275: }
277: static PetscErrorCode PetscSFWindowGetSyncType_Window(PetscSF sf,PetscSFWindowSyncType *sync)
278: {
279: PetscSF_Window *w = (PetscSF_Window*)sf->data;
282: *sync = w->sync;
283: return(0);
284: }
286: /*@C
287: PetscSFWindowSetInfo - Set the MPI_Info handle that will be used for subsequent windows allocation
289: Logically Collective
291: Input Argument:
292: + sf - star forest for communication
293: - info - MPI_Info handle
295: Level: advanced
297: Notes: the info handle is duplicated with a call to MPI_Info_dup unless info = MPI_INFO_NULL.
299: .seealso: PetscSFSetFromOptions(), PetscSFWindowGetInfo()
300: @*/
301: PetscErrorCode PetscSFWindowSetInfo(PetscSF sf,MPI_Info info)
302: {
307: PetscTryMethod(sf,"PetscSFWindowSetInfo_C",(PetscSF,MPI_Info),(sf,info));
308: return(0);
309: }
311: static PetscErrorCode PetscSFWindowSetInfo_Window(PetscSF sf,MPI_Info info)
312: {
313: PetscSF_Window *w = (PetscSF_Window*)sf->data;
317: if (w->info != MPI_INFO_NULL) {
318: MPI_Info_free(&w->info);
319: }
320: if (info != MPI_INFO_NULL) {
321: MPI_Info_dup(info,&w->info);
322: }
323: return(0);
324: }
326: /*@C
327: PetscSFWindowGetInfo - Get the MPI_Info handle used for windows allocation
329: Logically Collective
331: Input Argument:
332: . sf - star forest for communication
334: Output Argument:
335: . info - MPI_Info handle
337: Level: advanced
339: Notes: if PetscSFWindowSetInfo() has not be called, this returns MPI_INFO_NULL
341: .seealso: PetscSFSetFromOptions(), PetscSFWindowSetInfo()
342: @*/
343: PetscErrorCode PetscSFWindowGetInfo(PetscSF sf,MPI_Info *info)
344: {
350: PetscUseMethod(sf,"PetscSFWindowGetInfo_C",(PetscSF,MPI_Info*),(sf,info));
351: return(0);
352: }
354: static PetscErrorCode PetscSFWindowGetInfo_Window(PetscSF sf,MPI_Info *info)
355: {
356: PetscSF_Window *w = (PetscSF_Window*)sf->data;
359: *info = w->info;
360: return(0);
361: }
363: /*
364: PetscSFGetWindow - Get a window for use with a given data type
366: Collective on PetscSF
368: Input Arguments:
369: + sf - star forest
370: . unit - data type
371: . array - array to be sent
372: . sync - type of synchronization PetscSFWindowSyncType
373: . epoch - PETSC_TRUE to acquire the window and start an epoch, PETSC_FALSE to just acquire the window
374: . fenceassert - assert parameter for call to MPI_Win_fence(), if sync == PETSCSF_WINDOW_SYNC_FENCE
375: . postassert - assert parameter for call to MPI_Win_post(), if sync == PETSCSF_WINDOW_SYNC_ACTIVE
376: - startassert - assert parameter for call to MPI_Win_start(), if sync == PETSCSF_WINDOW_SYNC_ACTIVE
378: Output Arguments:
379: + target_disp - target_disp argument for RMA calls (significative for PETSCSF_WINDOW_FLAVOR_DYNAMIC only)
380: + reqs - array of requests (significative for sync == PETSCSF_WINDOW_SYNC_LOCK only)
381: - win - window
383: Level: developer
384: .seealso: PetscSFGetRootRanks(), PetscSFWindowGetDataTypes()
385: */
386: static PetscErrorCode PetscSFGetWindow(PetscSF sf,MPI_Datatype unit,void *array,PetscSFWindowSyncType sync,PetscBool epoch,PetscMPIInt fenceassert,PetscMPIInt postassert,PetscMPIInt startassert,const MPI_Aint **target_disp, MPI_Request **reqs, MPI_Win *win)
387: {
388: PetscSF_Window *w = (PetscSF_Window*)sf->data;
390: MPI_Aint lb,lb_true,bytes,bytes_true;
391: PetscSFWinLink link;
392: MPI_Aint winaddr;
393: PetscInt nranks;
394: PetscBool reuse = PETSC_FALSE, update = PETSC_FALSE;
395: #if defined(PETSC_USE_DEBUG)
396: PetscBool dummy[2];
397: #endif
398: MPI_Aint wsize;
401: MPI_Type_get_extent(unit,&lb,&bytes);
402: MPI_Type_get_true_extent(unit,&lb_true,&bytes_true);
403: if (lb != 0 || lb_true != 0) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_SUP,"No support for unit type with nonzero lower bound, write petsc-maint@mcs.anl.gov if you want this feature");
404: if (bytes != bytes_true) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_SUP,"No support for unit type with modified extent, write petsc-maint@mcs.anl.gov if you want this feature");
405: if (w->flavor != PETSCSF_WINDOW_FLAVOR_CREATE) reuse = PETSC_TRUE;
406: for (link=w->wins; reuse && link; link=link->next) {
407: PetscBool winok = PETSC_FALSE;
408: if (w->flavor != link->flavor) continue;
409: switch (w->flavor) {
410: case PETSCSF_WINDOW_FLAVOR_DYNAMIC: /* check available matching array, error if in use (we additionally check that the matching condition is the same across processes) */
411: if (array == link->addr) {
412: #if defined(PETSC_USE_DEBUG)
413: dummy[0] = PETSC_TRUE;
414: dummy[1] = PETSC_TRUE;
415: MPI_Allreduce(MPI_IN_PLACE,dummy ,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)sf));
416: MPI_Allreduce(MPI_IN_PLACE,dummy+1,1,MPIU_BOOL,MPI_LOR ,PetscObjectComm((PetscObject)sf));
417: if (dummy[0] != dummy[1]) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_SUP,"PETSCSF_WINDOW_FLAVOR_DYNAMIC requires root pointers to be consistently used across the comm. Use PETSCSF_WINDOW_FLAVOR_CREATE or PETSCSF_WINDOW_FLAVOR_ALLOCATE instead");
418: #endif
419: if (link->inuse) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_PLIB,"Window in use");
420: if (epoch && link->epoch) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_PLIB,"Window epoch not finished");
421: winok = PETSC_TRUE;
422: link->paddr = array;
423: #if defined(PETSC_USE_DEBUG)
424: } else {
425: dummy[0] = PETSC_FALSE;
426: dummy[1] = PETSC_FALSE;
427: MPI_Allreduce(MPI_IN_PLACE,dummy ,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)sf));
428: MPI_Allreduce(MPI_IN_PLACE,dummy+1,1,MPIU_BOOL,MPI_LOR ,PetscObjectComm((PetscObject)sf));
429: if (dummy[0] != dummy[1]) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_SUP,"PETSCSF_WINDOW_FLAVOR_DYNAMIC requires root pointers to be consistently used across the comm. Use PETSCSF_WINDOW_FLAVOR_CREATE or PETSCSF_WINDOW_FLAVOR_ALLOCATE instead");
430: #endif
431: }
432: break;
433: case PETSCSF_WINDOW_FLAVOR_ALLOCATE: /* check available by matching size, allocate if in use */
434: case PETSCSF_WINDOW_FLAVOR_SHARED:
435: if (!link->inuse && bytes == (MPI_Aint)link->bytes) {
436: update = PETSC_TRUE;
437: link->paddr = array;
438: winok = PETSC_TRUE;
439: }
440: break;
441: default: SETERRQ1(PetscObjectComm((PetscObject)sf),PETSC_ERR_SUP,"No support for flavor %s",PetscSFWindowFlavorTypes[w->flavor]);
442: }
443: if (winok) {
444: *win = link->win;
445: PetscInfo3(sf,"Reusing window %d of flavor %d for comm %d\n",link->win,link->flavor,PetscObjectComm((PetscObject)sf));
446: goto found;
447: }
448: }
450: wsize = (MPI_Aint)bytes*sf->nroots;
451: PetscNew(&link);
452: link->bytes = bytes;
453: link->next = w->wins;
454: link->flavor = w->flavor;
455: link->dyn_target_addr = NULL;
456: link->reqs = NULL;
457: w->wins = link;
458: if (sync == PETSCSF_WINDOW_SYNC_LOCK) {
459: PetscInt i;
461: PetscMalloc1(sf->nranks,&link->reqs);
462: for (i = 0; i < sf->nranks; i++) link->reqs[i] = MPI_REQUEST_NULL;
463: }
464: switch (w->flavor) {
465: case PETSCSF_WINDOW_FLAVOR_CREATE:
466: MPI_Win_create(array,wsize,(PetscMPIInt)bytes,w->info,PetscObjectComm((PetscObject)sf),&link->win);
467: link->addr = array;
468: link->paddr = array;
469: break;
470: case PETSCSF_WINDOW_FLAVOR_DYNAMIC:
471: MPI_Win_create_dynamic(w->info,PetscObjectComm((PetscObject)sf),&link->win);
472: #if defined(PETSC_HAVE_OMPI_MAJOR_VERSION) /* some OpenMPI versions do not support MPI_Win_attach(win,NULL,0); */
473: MPI_Win_attach(link->win,wsize ? array : &ierr,wsize);
474: #else
475: MPI_Win_attach(link->win,array,wsize);
476: #endif
477: link->addr = array;
478: link->paddr = array;
479: if (!w->dynsf) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_ORDER,"Must call PetscSFSetUp()");
480: PetscSFSetUp(w->dynsf);
481: PetscSFGetRootRanks(w->dynsf,&nranks,NULL,NULL,NULL,NULL);
482: PetscMalloc1(nranks,&link->dyn_target_addr);
483: MPI_Get_address(array,&winaddr);
484: PetscSFBcastBegin(w->dynsf,MPI_AINT,&winaddr,link->dyn_target_addr);
485: PetscSFBcastEnd(w->dynsf,MPI_AINT,&winaddr,link->dyn_target_addr);
486: break;
487: case PETSCSF_WINDOW_FLAVOR_ALLOCATE:
488: MPI_Win_allocate(wsize,(PetscMPIInt)bytes,w->info,PetscObjectComm((PetscObject)sf),&link->addr,&link->win);
489: update = PETSC_TRUE;
490: link->paddr = array;
491: break;
492: #if defined(PETSC_HAVE_MPI_PROCESS_SHARED_MEMORY)
493: case PETSCSF_WINDOW_FLAVOR_SHARED:
494: MPI_Win_allocate_shared(wsize,(PetscMPIInt)bytes,w->info,PetscObjectComm((PetscObject)sf),&link->addr,&link->win);
495: update = PETSC_TRUE;
496: link->paddr = array;
497: break;
498: #endif
499: default: SETERRQ1(PetscObjectComm((PetscObject)sf),PETSC_ERR_SUP,"No support for flavor %s",PetscSFWindowFlavorTypes[w->flavor]);
500: }
501: PetscInfo3(sf,"New window %d of flavor %d for comm %d\n",link->win,link->flavor,PetscObjectComm((PetscObject)sf));
502: *win = link->win;
504: found:
506: if (target_disp) *target_disp = link->dyn_target_addr;
507: if (reqs) *reqs = link->reqs;
508: if (update) { /* locks are needed for the "separate" memory model only, the fence guaranties memory-synchronization */
509: PetscMPIInt rank;
511: MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);
512: if (sync == PETSCSF_WINDOW_SYNC_LOCK) { MPI_Win_lock(MPI_LOCK_EXCLUSIVE,rank,MPI_MODE_NOCHECK,*win); }
513: PetscMemcpy(link->addr,array,sf->nroots*bytes);
514: if (sync == PETSCSF_WINDOW_SYNC_LOCK) {
515: MPI_Win_unlock(rank,*win);
516: MPI_Win_fence(0,*win);
517: }
518: }
519: link->inuse = PETSC_TRUE;
520: link->epoch = epoch;
521: if (epoch) {
522: switch (sync) {
523: case PETSCSF_WINDOW_SYNC_FENCE:
524: MPI_Win_fence(fenceassert,*win);
525: break;
526: case PETSCSF_WINDOW_SYNC_LOCK: /* Handled outside */
527: break;
528: case PETSCSF_WINDOW_SYNC_ACTIVE: {
529: MPI_Group ingroup,outgroup;
530: PetscMPIInt isize,osize;
532: /* OpenMPI 4.0.2 with btl=vader does not like calling
533: - MPI_Win_complete when ogroup is empty
534: - MPI_Win_wait when igroup is empty
535: So, we do not even issue the corresponding start and post calls
536: The MPI standard (Sec. 11.5.2 of MPI 3.1) only requires that
537: start(outgroup) has a matching post(ingroup)
538: and this is guaranteed by PetscSF
539: */
540: PetscSFGetGroups(sf,&ingroup,&outgroup);
541: MPI_Group_size(ingroup,&isize);
542: MPI_Group_size(outgroup,&osize);
543: if (isize) { MPI_Win_post(ingroup,postassert,*win); }
544: if (osize) { MPI_Win_start(outgroup,startassert,*win); }
545: } break;
546: default: SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_PLIB,"Unknown synchronization type");
547: }
548: }
549: return(0);
550: }
552: /*
553: PetscSFFindWindow - Finds a window that is already in use
555: Not Collective
557: Input Arguments:
558: + sf - star forest
559: . unit - data type
560: - array - array with which the window is associated
562: Output Arguments:
563: + win - window
564: - reqs - outstanding requests associated to the window
566: Level: developer
568: .seealso: PetscSFGetWindow(), PetscSFRestoreWindow()
569: */
570: static PetscErrorCode PetscSFFindWindow(PetscSF sf,MPI_Datatype unit,const void *array,MPI_Win *win,MPI_Request **reqs)
571: {
572: PetscSF_Window *w = (PetscSF_Window*)sf->data;
573: PetscSFWinLink link;
577: *win = MPI_WIN_NULL;
578: for (link=w->wins; link; link=link->next) {
579: if (array == link->paddr) {
580: PetscInfo3(sf,"Window %d of flavor %d for comm %d\n",link->win,link->flavor,PetscObjectComm((PetscObject)sf));
581: *win = link->win;
582: *reqs = link->reqs;
583: return(0);
584: }
585: }
586: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Requested window not in use");
587: return(0);
588: }
590: /*
591: PetscSFRestoreWindow - Restores a window obtained with PetscSFGetWindow()
593: Collective
595: Input Arguments:
596: + sf - star forest
597: . unit - data type
598: . array - array associated with window
599: . sync - type of synchronization PetscSFWindowSyncType
600: . epoch - close an epoch, must match argument to PetscSFGetWindow()
601: . update - if we have to update the local window array
602: - win - window
604: Level: developer
606: .seealso: PetscSFFindWindow()
607: */
608: static PetscErrorCode PetscSFRestoreWindow(PetscSF sf,MPI_Datatype unit,void *array,PetscSFWindowSyncType sync,PetscBool epoch,PetscMPIInt fenceassert,PetscBool update,MPI_Win *win)
609: {
610: PetscSF_Window *w = (PetscSF_Window*)sf->data;
611: PetscErrorCode ierr;
612: PetscSFWinLink *p,link;
613: PetscBool reuse = PETSC_FALSE;
614: PetscSFWindowFlavorType flavor;
615: void* laddr;
616: size_t bytes;
619: for (p=&w->wins; *p; p=&(*p)->next) {
620: link = *p;
621: if (*win == link->win) {
622: if (array != link->paddr) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Matched window, but not array");
623: if (epoch != link->epoch) {
624: if (epoch) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"No epoch to end");
625: else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Restoring window without ending epoch");
626: }
627: laddr = link->addr;
628: flavor = link->flavor;
629: bytes = link->bytes;
630: if (flavor != PETSCSF_WINDOW_FLAVOR_CREATE) reuse = PETSC_TRUE;
631: else { *p = link->next; update = PETSC_FALSE; } /* remove from list */
632: goto found;
633: }
634: }
635: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Requested window not in use");
637: found:
638: PetscInfo3(sf,"Window %d of flavor %d for comm %d\n",link->win,link->flavor,PetscObjectComm((PetscObject)sf));
639: if (epoch) {
640: switch (sync) {
641: case PETSCSF_WINDOW_SYNC_FENCE:
642: MPI_Win_fence(fenceassert,*win);
643: break;
644: case PETSCSF_WINDOW_SYNC_LOCK: /* Handled outside */
645: break;
646: case PETSCSF_WINDOW_SYNC_ACTIVE: {
647: MPI_Group ingroup,outgroup;
648: PetscMPIInt isize,osize;
650: /* OpenMPI 4.0.2 with btl=wader does not like calling
651: - MPI_Win_complete when ogroup is empty
652: - MPI_Win_wait when igroup is empty
653: The MPI standard (Sec. 11.5.2 of MPI 3.1) only requires that
654: - each process who issues a call to MPI_Win_start issues a call to MPI_Win_Complete
655: - each process who issues a call to MPI_Win_post issues a call to MPI_Win_Wait
656: */
657: PetscSFGetGroups(sf,&ingroup,&outgroup);
658: MPI_Group_size(ingroup,&isize);
659: MPI_Group_size(outgroup,&osize);
660: if (osize) { MPI_Win_complete(*win); }
661: if (isize) { MPI_Win_wait(*win); }
662: } break;
663: default: SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_PLIB,"Unknown synchronization type");
664: }
665: }
666: if (update) {
667: if (sync == PETSCSF_WINDOW_SYNC_LOCK) {
668: MPI_Win_fence(MPI_MODE_NOPUT|MPI_MODE_NOSUCCEED,*win);
669: }
670: PetscMemcpy(array,laddr,sf->nroots*bytes);
671: }
672: link->epoch = PETSC_FALSE;
673: link->inuse = PETSC_FALSE;
674: link->paddr = NULL;
675: if (!reuse) {
676: PetscFree(link->dyn_target_addr);
677: PetscFree(link->reqs);
678: MPI_Win_free(&link->win);
679: PetscFree(link);
680: *win = MPI_WIN_NULL;
681: }
682: return(0);
683: }
685: static PetscErrorCode PetscSFSetUp_Window(PetscSF sf)
686: {
687: PetscSF_Window *w = (PetscSF_Window*)sf->data;
689: MPI_Group ingroup,outgroup;
692: PetscSFSetUpRanks(sf,MPI_GROUP_EMPTY);
693: if (!w->dynsf) {
694: PetscInt i;
695: PetscSFNode *remotes;
697: PetscMalloc1(sf->nranks,&remotes);
698: for (i=0;i<sf->nranks;i++) {
699: remotes[i].rank = sf->ranks[i];
700: remotes[i].index = 0;
701: }
702: PetscSFDuplicate(sf,PETSCSF_DUPLICATE_RANKS,&w->dynsf);
703: PetscSFWindowSetFlavorType(w->dynsf,PETSCSF_WINDOW_FLAVOR_CREATE); /* break recursion */
704: PetscSFSetGraph(w->dynsf,1,sf->nranks,NULL,PETSC_OWN_POINTER,remotes,PETSC_OWN_POINTER);
705: PetscLogObjectParent((PetscObject)sf,(PetscObject)w->dynsf);
706: }
707: switch (w->sync) {
708: case PETSCSF_WINDOW_SYNC_ACTIVE:
709: PetscSFGetGroups(sf,&ingroup,&outgroup);
710: default:
711: break;
712: }
713: return(0);
714: }
716: static PetscErrorCode PetscSFSetFromOptions_Window(PetscOptionItems *PetscOptionsObject,PetscSF sf)
717: {
718: PetscSF_Window *w = (PetscSF_Window*)sf->data;
719: PetscErrorCode ierr;
720: PetscSFWindowFlavorType flavor = w->flavor;
723: PetscOptionsHead(PetscOptionsObject,"PetscSF Window options");
724: PetscOptionsEnum("-sf_window_sync","synchronization type to use for PetscSF Window communication","PetscSFWindowSetSyncType",PetscSFWindowSyncTypes,(PetscEnum)w->sync,(PetscEnum*)&w->sync,NULL);
725: PetscOptionsEnum("-sf_window_flavor","flavor to use for PetscSF Window creation","PetscSFWindowSetFlavorType",PetscSFWindowFlavorTypes,(PetscEnum)flavor,(PetscEnum*)&flavor,NULL);
726: PetscSFWindowSetFlavorType(sf,flavor);
727: PetscOptionsTail();
728: return(0);
729: }
731: static PetscErrorCode PetscSFReset_Window(PetscSF sf)
732: {
733: PetscSF_Window *w = (PetscSF_Window*)sf->data;
734: PetscErrorCode ierr;
735: PetscSFDataLink link,next;
736: PetscSFWinLink wlink,wnext;
737: PetscInt i;
740: for (link=w->link; link; link=next) {
741: next = link->next;
742: MPI_Type_free(&link->unit);
743: for (i=0; i<sf->nranks; i++) {
744: MPI_Type_free(&link->mine[i]);
745: MPI_Type_free(&link->remote[i]);
746: }
747: PetscFree2(link->mine,link->remote);
748: PetscFree(link);
749: }
750: w->link = NULL;
751: for (wlink=w->wins; wlink; wlink=wnext) {
752: wnext = wlink->next;
753: if (wlink->inuse) SETERRQ1(PetscObjectComm((PetscObject)sf),PETSC_ERR_ARG_WRONGSTATE,"Window still in use with address %p",(void*)wlink->addr);
754: PetscFree(wlink->dyn_target_addr);
755: PetscFree(wlink->reqs);
756: MPI_Win_free(&wlink->win);
757: PetscFree(wlink);
758: }
759: w->wins = NULL;
760: PetscSFDestroy(&w->dynsf);
761: if (w->info != MPI_INFO_NULL) {
762: MPI_Info_free(&w->info);
763: }
764: return(0);
765: }
767: static PetscErrorCode PetscSFDestroy_Window(PetscSF sf)
768: {
772: PetscSFReset_Window(sf);
773: PetscFree(sf->data);
774: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowSetSyncType_C",NULL);
775: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowGetSyncType_C",NULL);
776: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowSetFlavorType_C",NULL);
777: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowGetFlavorType_C",NULL);
778: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowSetInfo_C",NULL);
779: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowGetInfo_C",NULL);
780: return(0);
781: }
783: static PetscErrorCode PetscSFView_Window(PetscSF sf,PetscViewer viewer)
784: {
785: PetscSF_Window *w = (PetscSF_Window*)sf->data;
786: PetscErrorCode ierr;
787: PetscBool iascii;
788: PetscViewerFormat format;
791: PetscViewerGetFormat(viewer,&format);
792: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
793: if (iascii) {
794: PetscViewerASCIIPrintf(viewer," current flavor=%s synchronization=%s sort=%s\n",PetscSFWindowFlavorTypes[w->flavor],PetscSFWindowSyncTypes[w->sync],sf->rankorder ? "rank-order" : "unordered");
795: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
796: if (w->info != MPI_INFO_NULL) {
797: PetscMPIInt k,nkeys;
798: char key[MPI_MAX_INFO_KEY], value[MPI_MAX_INFO_VAL];
800: MPI_Info_get_nkeys(w->info,&nkeys);
801: PetscViewerASCIIPrintf(viewer," current info with %d keys. Ordered key-value pairs follow:\n",nkeys);
802: for (k = 0; k < nkeys; k++) {
803: PetscMPIInt flag;
805: MPI_Info_get_nthkey(w->info,k,key);
806: MPI_Info_get(w->info,key,MPI_MAX_INFO_VAL,value,&flag);
807: if (!flag) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing key %s",key);
808: PetscViewerASCIIPrintf(viewer," %s = %s\n",key,value);
809: }
810: } else {
811: PetscViewerASCIIPrintf(viewer," current info=MPI_INFO_NULL\n");
812: }
813: }
814: }
815: return(0);
816: }
818: static PetscErrorCode PetscSFDuplicate_Window(PetscSF sf,PetscSFDuplicateOption opt,PetscSF newsf)
819: {
820: PetscSF_Window *w = (PetscSF_Window*)sf->data;
821: PetscErrorCode ierr;
822: PetscSFWindowSyncType synctype;
825: synctype = w->sync;
826: /* HACK: Must use FENCE or LOCK when called from PetscSFGetGroups() because ACTIVE here would cause recursion. */
827: if (!sf->setupcalled) synctype = PETSCSF_WINDOW_SYNC_LOCK;
828: PetscSFWindowSetSyncType(newsf,synctype);
829: PetscSFWindowSetFlavorType(newsf,w->flavor);
830: PetscSFWindowSetInfo(newsf,w->info);
831: return(0);
832: }
834: static PetscErrorCode PetscSFBcastAndOpBegin_Window(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
835: {
836: PetscSF_Window *w = (PetscSF_Window*)sf->data;
837: PetscErrorCode ierr;
838: PetscInt i,nranks;
839: const PetscMPIInt *ranks;
840: const MPI_Aint *target_disp;
841: const MPI_Datatype *mine,*remote;
842: MPI_Request *reqs;
843: MPI_Win win;
846: if (op != MPI_REPLACE || op != MPIU_REPLACE) SETERRQ(PetscObjectComm((PetscObject)sf), PETSC_ERR_SUP, "PetscSFBcastAndOpBegin_Window with op!=MPI_REPLACE has not been implemented");
847: PetscSFGetRootRanks(sf,&nranks,&ranks,NULL,NULL,NULL);
848: PetscSFWindowGetDataTypes(sf,unit,&mine,&remote);
849: PetscSFGetWindow(sf,unit,(void*)rootdata,w->sync,PETSC_TRUE,MPI_MODE_NOPUT|MPI_MODE_NOPRECEDE,MPI_MODE_NOPUT,0,&target_disp,&reqs,&win);
850: for (i=0; i<nranks; i++) {
851: MPI_Aint tdp = target_disp ? target_disp[i] : 0;
853: if (w->sync == PETSCSF_WINDOW_SYNC_LOCK) {
854: MPI_Win_lock(MPI_LOCK_SHARED,ranks[i],MPI_MODE_NOCHECK,win);
855: #if defined(PETSC_HAVE_MPI_RGET)
856: MPI_Rget(leafdata,1,mine[i],ranks[i],tdp,1,remote[i],win,&reqs[i]);
857: #else
858: MPI_Get(leafdata,1,mine[i],ranks[i],tdp,1,remote[i],win);
859: #endif
860: } else {
861: MPI_Get(leafdata,1,mine[i],ranks[i],tdp,1,remote[i],win);
862: }
863: }
864: return(0);
865: }
867: PetscErrorCode PetscSFBcastAndOpEnd_Window(PetscSF sf,MPI_Datatype unit,const void *rootdata,void *leafdata,MPI_Op op)
868: {
869: PetscSF_Window *w = (PetscSF_Window*)sf->data;
871: MPI_Win win;
872: MPI_Request *reqs = NULL;
875: PetscSFFindWindow(sf,unit,rootdata,&win,&reqs);
876: if (reqs) { MPI_Waitall(sf->nranks,reqs,MPI_STATUSES_IGNORE); }
877: if (w->sync == PETSCSF_WINDOW_SYNC_LOCK) {
878: PetscInt i,nranks;
879: const PetscMPIInt *ranks;
881: PetscSFGetRootRanks(sf,&nranks,&ranks,NULL,NULL,NULL);
882: for (i=0; i<nranks; i++) {
883: MPI_Win_unlock(ranks[i],win);
884: }
885: }
886: PetscSFRestoreWindow(sf,unit,(void*)rootdata,w->sync,PETSC_TRUE,MPI_MODE_NOSTORE|MPI_MODE_NOSUCCEED,PETSC_FALSE,&win);
887: return(0);
888: }
890: PetscErrorCode PetscSFReduceBegin_Window(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
891: {
892: PetscSF_Window *w = (PetscSF_Window*)sf->data;
893: PetscErrorCode ierr;
894: PetscInt i,nranks;
895: const PetscMPIInt *ranks;
896: const MPI_Aint *target_disp;
897: const MPI_Datatype *mine,*remote;
898: MPI_Win win;
901: PetscSFGetRootRanks(sf,&nranks,&ranks,NULL,NULL,NULL);
902: PetscSFWindowGetDataTypes(sf,unit,&mine,&remote);
903: PetscSFWindowOpTranslate(&op);
904: PetscSFGetWindow(sf,unit,rootdata,w->sync,PETSC_TRUE,MPI_MODE_NOPRECEDE,0,0,&target_disp,NULL,&win);
905: for (i=0; i<nranks; i++) {
906: MPI_Aint tdp = target_disp ? target_disp[i] : 0;
908: if (w->sync == PETSCSF_WINDOW_SYNC_LOCK) {MPI_Win_lock(MPI_LOCK_SHARED,ranks[i],MPI_MODE_NOCHECK,win);}
909: MPI_Accumulate((void*)leafdata,1,mine[i],ranks[i],tdp,1,remote[i],op,win);
910: if (ierr) { /* intercept the MPI error since the combination of unit and op is not supported */
911: PetscMPIInt len;
912: char errstring[MPI_MAX_ERROR_STRING];
914: MPI_Error_string(ierr,errstring,&len);
915: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Internal error in MPI: %s",errstring);
916: }
917: if (w->sync == PETSCSF_WINDOW_SYNC_LOCK) {MPI_Win_unlock(ranks[i],win);}
918: }
919: return(0);
920: }
922: static PetscErrorCode PetscSFReduceEnd_Window(PetscSF sf,MPI_Datatype unit,const void *leafdata,void *rootdata,MPI_Op op)
923: {
924: PetscSF_Window *w = (PetscSF_Window*)sf->data;
926: MPI_Win win;
927: MPI_Request *reqs = NULL;
930: PetscSFFindWindow(sf,unit,rootdata,&win,&reqs);
931: if (reqs) { MPI_Waitall(sf->nranks,reqs,MPI_STATUSES_IGNORE); }
932: PetscSFRestoreWindow(sf,unit,rootdata,w->sync,PETSC_TRUE,MPI_MODE_NOSUCCEED,PETSC_TRUE,&win);
933: return(0);
934: }
936: static PetscErrorCode PetscSFFetchAndOpBegin_Window(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void *rootdata,PetscMemType leafmtype,const void *leafdata,void *leafupdate,MPI_Op op)
937: {
938: PetscErrorCode ierr;
939: PetscInt i,nranks;
940: const PetscMPIInt *ranks;
941: const MPI_Datatype *mine,*remote;
942: const MPI_Aint *target_disp;
943: MPI_Win win;
944: PetscSF_Window *w = (PetscSF_Window*)sf->data;
945: #if !defined(PETSC_HAVE_MPI_GET_ACCUMULATE)
946: PetscSFWindowFlavorType oldf;
947: #endif
950: PetscSFGetRootRanks(sf,&nranks,&ranks,NULL,NULL,NULL);
951: PetscSFWindowGetDataTypes(sf,unit,&mine,&remote);
952: PetscSFWindowOpTranslate(&op);
953: #if !defined(PETSC_HAVE_MPI_GET_ACCUMULATE)
954: /* FetchAndOp without MPI_Get_Accumulate requires locking.
955: we create a new window every time to not interfere with user-defined MPI_Info which may have used "no_locks"="true" */
956: oldf = w->flavor;
957: w->flavor = PETSCSF_WINDOW_FLAVOR_CREATE;
958: PetscSFGetWindow(sf,unit,rootdata,PETSCSF_WINDOW_SYNC_LOCK,PETSC_FALSE,0,0,0,&target_disp,NULL,&win);
959: #else
960: PetscSFGetWindow(sf,unit,rootdata,w->sync,PETSC_TRUE,MPI_MODE_NOPRECEDE,0,0,&target_disp,NULL,&win);
961: #endif
962: for (i=0; i<nranks; i++) {
963: MPI_Aint tdp = target_disp ? target_disp[i] : 0;
965: #if !defined(PETSC_HAVE_MPI_GET_ACCUMULATE)
966: MPI_Win_lock(MPI_LOCK_EXCLUSIVE,ranks[i],0,win);
967: MPI_Get(leafupdate,1,mine[i],ranks[i],tdp,1,remote[i],win);
968: MPI_Accumulate((void*)leafdata,1,mine[i],ranks[i],tdp,1,remote[i],op,win);
969: if (ierr) { /* intercept the MPI error since the combination of unit and op is not supported */
970: PetscMPIInt len;
971: char errstring[MPI_MAX_ERROR_STRING];
973: MPI_Error_string(ierr,errstring,&len);
974: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Internal error in MPI: %s",errstring);
975: }
976: MPI_Win_unlock(ranks[i],win);
977: #else
978: if (w->sync == PETSCSF_WINDOW_SYNC_LOCK) { MPI_Win_lock(MPI_LOCK_SHARED,ranks[i],0,win); }
979: MPI_Get_accumulate((void*)leafdata,1,mine[i],leafupdate,1,mine[i],ranks[i],tdp,1,remote[i],op,win);
980: if (ierr) { /* intercept the MPI error since the combination of unit and op is not supported */
981: PetscMPIInt len;
982: char errstring[MPI_MAX_ERROR_STRING];
984: MPI_Error_string(ierr,errstring,&len);
985: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Internal error in MPI: %s",errstring);
986: }
987: if (w->sync == PETSCSF_WINDOW_SYNC_LOCK) { MPI_Win_unlock(ranks[i],win); }
988: #endif
989: }
990: #if !defined(PETSC_HAVE_MPI_GET_ACCUMULATE)
991: w->flavor = oldf;
992: #endif
993: return(0);
994: }
996: static PetscErrorCode PetscSFFetchAndOpEnd_Window(PetscSF sf,MPI_Datatype unit,void *rootdata,const void *leafdata,void *leafupdate,MPI_Op op)
997: {
999: MPI_Win win;
1000: #if defined(PETSC_HAVE_MPI_GET_ACCUMULATE)
1001: PetscSF_Window *w = (PetscSF_Window*)sf->data;
1002: #endif
1003: MPI_Request *reqs = NULL;
1006: PetscSFFindWindow(sf,unit,rootdata,&win,&reqs);
1007: if (reqs) { MPI_Waitall(sf->nranks,reqs,MPI_STATUSES_IGNORE); }
1008: #if defined(PETSC_HAVE_MPI_GET_ACCUMULATE)
1009: PetscSFRestoreWindow(sf,unit,rootdata,w->sync,PETSC_TRUE,MPI_MODE_NOSUCCEED,PETSC_TRUE,&win);
1010: #else
1011: PetscSFRestoreWindow(sf,unit,rootdata,PETSCSF_WINDOW_SYNC_LOCK,PETSC_FALSE,0,PETSC_TRUE,&win);
1012: #endif
1013: return(0);
1014: }
1016: PETSC_INTERN PetscErrorCode PetscSFCreate_Window(PetscSF sf)
1017: {
1018: PetscSF_Window *w = (PetscSF_Window*)sf->data;
1022: sf->ops->SetUp = PetscSFSetUp_Window;
1023: sf->ops->SetFromOptions = PetscSFSetFromOptions_Window;
1024: sf->ops->Reset = PetscSFReset_Window;
1025: sf->ops->Destroy = PetscSFDestroy_Window;
1026: sf->ops->View = PetscSFView_Window;
1027: sf->ops->Duplicate = PetscSFDuplicate_Window;
1028: sf->ops->BcastAndOpBegin = PetscSFBcastAndOpBegin_Window;
1029: sf->ops->BcastAndOpEnd = PetscSFBcastAndOpEnd_Window;
1030: sf->ops->ReduceBegin = PetscSFReduceBegin_Window;
1031: sf->ops->ReduceEnd = PetscSFReduceEnd_Window;
1032: sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Window;
1033: sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Window;
1035: PetscNewLog(sf,&w);
1036: sf->data = (void*)w;
1037: w->sync = PETSCSF_WINDOW_SYNC_FENCE;
1038: w->flavor = PETSCSF_WINDOW_FLAVOR_CREATE;
1039: w->info = MPI_INFO_NULL;
1041: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowSetSyncType_C",PetscSFWindowSetSyncType_Window);
1042: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowGetSyncType_C",PetscSFWindowGetSyncType_Window);
1043: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowSetFlavorType_C",PetscSFWindowSetFlavorType_Window);
1044: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowGetFlavorType_C",PetscSFWindowGetFlavorType_Window);
1045: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowSetInfo_C",PetscSFWindowSetInfo_Window);
1046: PetscObjectComposeFunction((PetscObject)sf,"PetscSFWindowGetInfo_C",PetscSFWindowGetInfo_Window);
1048: #if defined(OMPI_MAJOR_VERSION) && (OMPI_MAJOR_VERSION < 1 || (OMPI_MAJOR_VERSION == 1 && OMPI_MINOR_VERSION <= 6))
1049: {
1050: PetscBool ackbug = PETSC_FALSE;
1051: PetscOptionsGetBool(NULL,NULL,"-acknowledge_ompi_onesided_bug",&ackbug,NULL);
1052: if (ackbug) {
1053: PetscInfo(sf,"Acknowledged Open MPI bug, proceeding anyway. Expect memory corruption.\n");
1054: } else SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_LIB,"Open MPI is known to be buggy (https://svn.open-mpi.org/trac/ompi/ticket/1905 and 2656), use -acknowledge_ompi_onesided_bug to proceed");
1055: }
1056: #endif
1057: return(0);
1058: }