Actual source code: mpiu.c
petsc-3.10.5 2019-03-28
2: #include <petscsys.h>
3: #include <petsc/private/petscimpl.h>
4: /*
5: Note that tag of 0 is ok because comm is a private communicator
6: generated below just for these routines.
7: */
9: PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm comm,int ng)
10: {
12: PetscMPIInt rank,size,tag = 0;
13: MPI_Status status;
16: MPI_Comm_size(comm,&size);
17: if (size == 1) return(0);
18: MPI_Comm_rank(comm,&rank);
19: if (rank) {
20: MPI_Recv(0,0,MPI_INT,rank-1,tag,comm,&status);
21: }
22: /* Send to the next process in the group unless we are the last process */
23: if ((rank % ng) < ng - 1 && rank != size - 1) {
24: MPI_Send(0,0,MPI_INT,rank + 1,tag,comm);
25: }
26: return(0);
27: }
29: PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm comm,int ng)
30: {
32: PetscMPIInt rank,size,tag = 0;
33: MPI_Status status;
36: MPI_Comm_rank(comm,&rank);
37: MPI_Comm_size(comm,&size);
38: if (size == 1) return(0);
40: /* Send to the first process in the next group */
41: if ((rank % ng) == ng - 1 || rank == size - 1) {
42: MPI_Send(0,0,MPI_INT,(rank + 1) % size,tag,comm);
43: }
44: if (!rank) {
45: MPI_Recv(0,0,MPI_INT,size-1,tag,comm,&status);
46: }
47: return(0);
48: }
50: /* ---------------------------------------------------------------------*/
51: /*
52: The variable Petsc_Seq_keyval is used to indicate an MPI attribute that
53: is attached to a communicator that manages the sequential phase code below.
54: */
55: PetscMPIInt Petsc_Seq_keyval = MPI_KEYVAL_INVALID;
57: /*@
58: PetscSequentialPhaseBegin - Begins a sequential section of code.
60: Collective on MPI_Comm
62: Input Parameters:
63: + comm - Communicator to sequentialize.
64: - ng - Number in processor group. This many processes are allowed to execute
65: at the same time (usually 1)
67: Level: intermediate
69: Notes:
70: PetscSequentialPhaseBegin() and PetscSequentialPhaseEnd() provide a
71: way to force a section of code to be executed by the processes in
72: rank order. Typically, this is done with
73: .vb
74: PetscSequentialPhaseBegin(comm, 1);
75: <code to be executed sequentially>
76: PetscSequentialPhaseEnd(comm, 1);
77: .ve
79: Often, the sequential code contains output statements (e.g., printf) to
80: be executed. Note that you may need to flush the I/O buffers before
81: calling PetscSequentialPhaseEnd(). Also, note that some systems do
82: not propagate I/O in any order to the controling terminal (in other words,
83: even if you flush the output, you may not get the data in the order
84: that you want).
86: .seealso: PetscSequentialPhaseEnd()
88: Concepts: sequential stage
90: @*/
91: PetscErrorCode PetscSequentialPhaseBegin(MPI_Comm comm,int ng)
92: {
94: PetscMPIInt size;
95: MPI_Comm local_comm,*addr_local_comm;
98: PetscSysInitializePackage();
99: MPI_Comm_size(comm,&size);
100: if (size == 1) return(0);
102: /* Get the private communicator for the sequential operations */
103: if (Petsc_Seq_keyval == MPI_KEYVAL_INVALID) {
104: MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN,MPI_COMM_NULL_DELETE_FN,&Petsc_Seq_keyval,0);
105: }
107: MPI_Comm_dup(comm,&local_comm);
108: PetscMalloc1(1,&addr_local_comm);
110: *addr_local_comm = local_comm;
112: MPI_Comm_set_attr(comm,Petsc_Seq_keyval,(void*)addr_local_comm);
113: PetscSequentialPhaseBegin_Private(local_comm,ng);
114: return(0);
115: }
117: /*@
118: PetscSequentialPhaseEnd - Ends a sequential section of code.
120: Collective on MPI_Comm
122: Input Parameters:
123: + comm - Communicator to sequentialize.
124: - ng - Number in processor group. This many processes are allowed to execute
125: at the same time (usually 1)
127: Level: intermediate
129: Notes:
130: See PetscSequentialPhaseBegin() for more details.
132: .seealso: PetscSequentialPhaseBegin()
134: Concepts: sequential stage
136: @*/
137: PetscErrorCode PetscSequentialPhaseEnd(MPI_Comm comm,int ng)
138: {
140: PetscMPIInt size,flag;
141: MPI_Comm local_comm,*addr_local_comm;
144: MPI_Comm_size(comm,&size);
145: if (size == 1) return(0);
147: MPI_Comm_get_attr(comm,Petsc_Seq_keyval,(void**)&addr_local_comm,&flag);
148: if (!flag) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Wrong MPI communicator; must pass in one used with PetscSequentialPhaseBegin()");
149: local_comm = *addr_local_comm;
151: PetscSequentialPhaseEnd_Private(local_comm,ng);
153: PetscFree(addr_local_comm);
154: MPI_Comm_free(&local_comm);
155: MPI_Comm_delete_attr(comm,Petsc_Seq_keyval);
156: return(0);
157: }
159: /*@C
160: PetscGlobalMinMaxInt - Get the global min/max from local min/max input
162: Collective on comm
164: Input Parameter:
165: . minMaxVal - An array with the local min and max
167: Output Parameter:
168: . minMaxValGlobal - An array with the global min and max
170: Level: beginner
172: .keywords: minimum, maximum
173: .seealso: PetscSplitOwnership()
174: @*/
175: PetscErrorCode PetscGlobalMinMaxInt(MPI_Comm comm, PetscInt minMaxVal[2], PetscInt minMaxValGlobal[2])
176: {
180: minMaxVal[1] = -minMaxVal[1];
181: MPI_Allreduce(minMaxVal, minMaxValGlobal, 2, MPIU_INT, MPI_MIN, comm);
182: minMaxValGlobal[1] = -minMaxValGlobal[1];
183: return(0);
184: }
186: /*@C
187: PetscGlobalMinMaxReal - Get the global min/max from local min/max input
189: Collective on comm
191: Input Parameter:
192: . minMaxVal - An array with the local min and max
194: Output Parameter:
195: . minMaxValGlobal - An array with the global min and max
197: Level: beginner
199: .keywords: minimum, maximum
200: .seealso: PetscSplitOwnership()
201: @*/
202: PetscErrorCode PetscGlobalMinMaxReal(MPI_Comm comm, PetscReal minMaxVal[2], PetscReal minMaxValGlobal[2])
203: {
207: minMaxVal[1] = -minMaxVal[1];
208: MPI_Allreduce(minMaxVal, minMaxValGlobal, 2, MPIU_REAL, MPI_MIN, comm);
209: minMaxValGlobal[1] = -minMaxValGlobal[1];
210: return(0);
211: }