Actual source code: mpitr.c
petsc-3.12.5 2020-03-29
2: /*
3: Code for tracing mistakes in MPI usage. For example, sends that are never received,
4: nonblocking messages that are not correctly waited for, etc.
5: */
7: #include <petscsys.h>
9: #if defined(PETSC_USE_LOG) && !defined(PETSC_HAVE_MPIUNI)
11: /*@C
12: PetscMPIDump - Dumps a listing of incomplete MPI operations, such as sends that
13: have never been received, etc.
15: Collective on PETSC_COMM_WORLD
17: Input Parameter:
18: . fp - file pointer. If fp is NULL, stdout is assumed.
20: Options Database Key:
21: . -mpidump - Dumps MPI incompleteness during call to PetscFinalize()
23: Level: developer
25: .seealso: PetscMallocDump()
26: @*/
27: PetscErrorCode PetscMPIDump(FILE *fd)
28: {
30: PetscMPIInt rank;
31: double tsends,trecvs,work;
32: int err;
35: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
36: if (!fd) fd = PETSC_STDOUT;
38: /* Did we wait on all the non-blocking sends and receives? */
39: PetscSequentialPhaseBegin(PETSC_COMM_WORLD,1);
40: if (petsc_irecv_ct + petsc_isend_ct != petsc_sum_of_waits_ct) {
41: PetscFPrintf(PETSC_COMM_SELF,fd,"[%d]You have not waited on all non-blocking sends and receives",rank);
42: PetscFPrintf(PETSC_COMM_SELF,fd,"[%d]Number non-blocking sends %g receives %g number of waits %g\n",rank,petsc_isend_ct,petsc_irecv_ct,petsc_sum_of_waits_ct);
43: err = fflush(fd);
44: if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fflush() failed on file");
45: }
46: PetscSequentialPhaseEnd(PETSC_COMM_WORLD,1);
47: /* Did we receive all the messages that we sent? */
48: work = petsc_irecv_ct + petsc_recv_ct;
49: MPI_Reduce(&work,&trecvs,1,MPI_DOUBLE,MPI_SUM,0,PETSC_COMM_WORLD);
50: work = petsc_isend_ct + petsc_send_ct;
51: MPI_Reduce(&work,&tsends,1,MPI_DOUBLE,MPI_SUM,0,PETSC_COMM_WORLD);
52: if (!rank && tsends != trecvs) {
53: PetscFPrintf(PETSC_COMM_SELF,fd,"Total number sends %g not equal receives %g\n",tsends,trecvs);
54: err = fflush(fd);
55: if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fflush() failed on file");
56: }
57: return(0);
58: }
60: #else
62: PetscErrorCode PetscMPIDump(FILE *fd)
63: {
65: return(0);
66: }
68: #endif
70: #if defined(PETSC_HAVE_MPI_PROCESS_SHARED_MEMORY)
71: /*
72: OpenMPI version of MPI_Win_allocate_shared() does not provide __float128 alignment so we provide
73: a utility that insures alignment up to data item size.
74: */
75: PetscErrorCode MPIU_Win_allocate_shared(MPI_Aint sz,PetscMPIInt szind,MPI_Info info,MPI_Comm comm,void *ptr,MPI_Win *win)
76: {
78: float *tmp;
81: MPI_Win_allocate_shared(16+sz,szind,info,comm,&tmp,win);
82: tmp += ((size_t)tmp) % szind ? szind/4 - ((((size_t)tmp) % szind)/4) : 0;
83: *(void**)ptr = (void*)tmp;
84: return(0);
85: return 0;
86: }
88: PETSC_EXTERN PetscErrorCode MPIU_Win_shared_query(MPI_Win win,PetscMPIInt rank,MPI_Aint *sz,PetscMPIInt *szind,void *ptr)
89: {
91: float *tmp;
94: MPI_Win_shared_query(win,rank,sz,szind,&tmp);
95: if (*szind <= 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"szkind %d must be positive\n",*szind);
96: tmp += ((size_t)tmp) % *szind ? *szind/4 - ((((size_t)tmp) % *szind)/4) : 0;
97: *(void**)ptr = (void*)tmp;
98: return(0);
99: }
101: #endif