Actual source code: mpiu.c
petsc-3.14.6 2021-03-30
2: #include <petscsys.h>
3: #include <petsc/private/petscimpl.h>
4: /*
5: Note that tag of 0 is ok because comm is a private communicator
6: generated below just for these routines.
7: */
9: PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm comm,int ng)
10: {
12: PetscMPIInt rank,size,tag = 0;
13: MPI_Status status;
16: MPI_Comm_size(comm,&size);
17: if (size == 1) return(0);
18: MPI_Comm_rank(comm,&rank);
19: if (rank) {
20: MPI_Recv(NULL,0,MPI_INT,rank-1,tag,comm,&status);
21: }
22: /* Send to the next process in the group unless we are the last process */
23: if ((rank % ng) < ng - 1 && rank != size - 1) {
24: MPI_Send(NULL,0,MPI_INT,rank + 1,tag,comm);
25: }
26: return(0);
27: }
29: PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm comm,int ng)
30: {
32: PetscMPIInt rank,size,tag = 0;
33: MPI_Status status;
36: MPI_Comm_rank(comm,&rank);
37: MPI_Comm_size(comm,&size);
38: if (size == 1) return(0);
40: /* Send to the first process in the next group */
41: if ((rank % ng) == ng - 1 || rank == size - 1) {
42: MPI_Send(NULL,0,MPI_INT,(rank + 1) % size,tag,comm);
43: }
44: if (!rank) {
45: MPI_Recv(NULL,0,MPI_INT,size-1,tag,comm,&status);
46: }
47: return(0);
48: }
50: /* ---------------------------------------------------------------------*/
51: /*
52: The variable Petsc_Seq_keyval is used to indicate an MPI attribute that
53: is attached to a communicator that manages the sequential phase code below.
54: */
55: PetscMPIInt Petsc_Seq_keyval = MPI_KEYVAL_INVALID;
57: /*@
58: PetscSequentialPhaseBegin - Begins a sequential section of code.
60: Collective
62: Input Parameters:
63: + comm - Communicator to sequentialize.
64: - ng - Number in processor group. This many processes are allowed to execute
65: at the same time (usually 1)
67: Level: intermediate
69: Notes:
70: PetscSequentialPhaseBegin() and PetscSequentialPhaseEnd() provide a
71: way to force a section of code to be executed by the processes in
72: rank order. Typically, this is done with
73: .vb
74: PetscSequentialPhaseBegin(comm, 1);
75: <code to be executed sequentially>
76: PetscSequentialPhaseEnd(comm, 1);
77: .ve
79: Often, the sequential code contains output statements (e.g., printf) to
80: be executed. Note that you may need to flush the I/O buffers before
81: calling PetscSequentialPhaseEnd(). Also, note that some systems do
82: not propagate I/O in any order to the controling terminal (in other words,
83: even if you flush the output, you may not get the data in the order
84: that you want).
86: .seealso: PetscSequentialPhaseEnd()
88: @*/
89: PetscErrorCode PetscSequentialPhaseBegin(MPI_Comm comm,int ng)
90: {
92: PetscMPIInt size;
93: MPI_Comm local_comm,*addr_local_comm;
96: PetscSysInitializePackage();
97: MPI_Comm_size(comm,&size);
98: if (size == 1) return(0);
100: /* Get the private communicator for the sequential operations */
101: if (Petsc_Seq_keyval == MPI_KEYVAL_INVALID) {
102: MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN,MPI_COMM_NULL_DELETE_FN,&Petsc_Seq_keyval,NULL);
103: }
105: MPI_Comm_dup(comm,&local_comm);
106: PetscMalloc1(1,&addr_local_comm);
108: *addr_local_comm = local_comm;
110: MPI_Comm_set_attr(comm,Petsc_Seq_keyval,(void*)addr_local_comm);
111: PetscSequentialPhaseBegin_Private(local_comm,ng);
112: return(0);
113: }
115: /*@
116: PetscSequentialPhaseEnd - Ends a sequential section of code.
118: Collective
120: Input Parameters:
121: + comm - Communicator to sequentialize.
122: - ng - Number in processor group. This many processes are allowed to execute
123: at the same time (usually 1)
125: Level: intermediate
127: Notes:
128: See PetscSequentialPhaseBegin() for more details.
130: .seealso: PetscSequentialPhaseBegin()
132: @*/
133: PetscErrorCode PetscSequentialPhaseEnd(MPI_Comm comm,int ng)
134: {
136: PetscMPIInt size,flag;
137: MPI_Comm local_comm,*addr_local_comm;
140: MPI_Comm_size(comm,&size);
141: if (size == 1) return(0);
143: MPI_Comm_get_attr(comm,Petsc_Seq_keyval,(void**)&addr_local_comm,&flag);
144: if (!flag) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Wrong MPI communicator; must pass in one used with PetscSequentialPhaseBegin()");
145: local_comm = *addr_local_comm;
147: PetscSequentialPhaseEnd_Private(local_comm,ng);
149: PetscFree(addr_local_comm);
150: MPI_Comm_free(&local_comm);
151: MPI_Comm_delete_attr(comm,Petsc_Seq_keyval);
152: return(0);
153: }
155: /*@C
156: PetscGlobalMinMaxInt - Get the global min/max from local min/max input
158: Collective
160: Input Parameter:
161: . minMaxVal - An array with the local min and max
163: Output Parameter:
164: . minMaxValGlobal - An array with the global min and max
166: Level: beginner
168: .seealso: PetscSplitOwnership()
169: @*/
170: PetscErrorCode PetscGlobalMinMaxInt(MPI_Comm comm, PetscInt minMaxVal[2], PetscInt minMaxValGlobal[2])
171: {
175: minMaxVal[1] = -minMaxVal[1];
176: MPI_Allreduce(minMaxVal, minMaxValGlobal, 2, MPIU_INT, MPI_MIN, comm);
177: minMaxValGlobal[1] = -minMaxValGlobal[1];
178: return(0);
179: }
181: /*@C
182: PetscGlobalMinMaxReal - Get the global min/max from local min/max input
184: Collective
186: Input Parameter:
187: . minMaxVal - An array with the local min and max
189: Output Parameter:
190: . minMaxValGlobal - An array with the global min and max
192: Level: beginner
194: .seealso: PetscSplitOwnership()
195: @*/
196: PetscErrorCode PetscGlobalMinMaxReal(MPI_Comm comm, PetscReal minMaxVal[2], PetscReal minMaxValGlobal[2])
197: {
201: minMaxVal[1] = -minMaxVal[1];
202: MPI_Allreduce(minMaxVal, minMaxValGlobal, 2, MPIU_REAL, MPI_MIN, comm);
203: minMaxValGlobal[1] = -minMaxValGlobal[1];
204: return(0);
205: }