Actual source code: mpiu.c
2: #include <petscsys.h>
3: #include <petsc/private/petscimpl.h>
4: /*
5: Note that tag of 0 is ok because comm is a private communicator
6: generated below just for these routines.
7: */
9: PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm comm,int ng)
10: {
11: PetscMPIInt rank,size,tag = 0;
12: MPI_Status status;
14: MPI_Comm_size(comm,&size);
15: if (size == 1) return 0;
16: MPI_Comm_rank(comm,&rank);
17: if (rank) {
18: MPI_Recv(NULL,0,MPI_INT,rank-1,tag,comm,&status);
19: }
20: /* Send to the next process in the group unless we are the last process */
21: if ((rank % ng) < ng - 1 && rank != size - 1) {
22: MPI_Send(NULL,0,MPI_INT,rank + 1,tag,comm);
23: }
24: return 0;
25: }
27: PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm comm,int ng)
28: {
29: PetscMPIInt rank,size,tag = 0;
30: MPI_Status status;
32: MPI_Comm_rank(comm,&rank);
33: MPI_Comm_size(comm,&size);
34: if (size == 1) return 0;
36: /* Send to the first process in the next group */
37: if ((rank % ng) == ng - 1 || rank == size - 1) {
38: MPI_Send(NULL,0,MPI_INT,(rank + 1) % size,tag,comm);
39: }
40: if (rank == 0) {
41: MPI_Recv(NULL,0,MPI_INT,size-1,tag,comm,&status);
42: }
43: return 0;
44: }
46: /* ---------------------------------------------------------------------*/
47: /*
48: The variable Petsc_Seq_keyval is used to indicate an MPI attribute that
49: is attached to a communicator that manages the sequential phase code below.
50: */
51: PetscMPIInt Petsc_Seq_keyval = MPI_KEYVAL_INVALID;
53: /*@
54: PetscSequentialPhaseBegin - Begins a sequential section of code.
56: Collective
58: Input Parameters:
59: + comm - Communicator to sequentialize.
60: - ng - Number in processor group. This many processes are allowed to execute
61: at the same time (usually 1)
63: Level: intermediate
65: Notes:
66: PetscSequentialPhaseBegin() and PetscSequentialPhaseEnd() provide a
67: way to force a section of code to be executed by the processes in
68: rank order. Typically, this is done with
69: .vb
70: PetscSequentialPhaseBegin(comm, 1);
71: <code to be executed sequentially>
72: PetscSequentialPhaseEnd(comm, 1);
73: .ve
75: Often, the sequential code contains output statements (e.g., printf) to
76: be executed. Note that you may need to flush the I/O buffers before
77: calling PetscSequentialPhaseEnd(). Also, note that some systems do
78: not propagate I/O in any order to the controling terminal (in other words,
79: even if you flush the output, you may not get the data in the order
80: that you want).
82: .seealso: PetscSequentialPhaseEnd()
84: @*/
85: PetscErrorCode PetscSequentialPhaseBegin(MPI_Comm comm,int ng)
86: {
87: PetscMPIInt size;
88: MPI_Comm local_comm,*addr_local_comm;
90: PetscSysInitializePackage();
91: MPI_Comm_size(comm,&size);
92: if (size == 1) return 0;
94: /* Get the private communicator for the sequential operations */
95: if (Petsc_Seq_keyval == MPI_KEYVAL_INVALID) {
96: MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN,MPI_COMM_NULL_DELETE_FN,&Petsc_Seq_keyval,NULL);
97: }
99: MPI_Comm_dup(comm,&local_comm);
100: PetscMalloc1(1,&addr_local_comm);
102: *addr_local_comm = local_comm;
104: MPI_Comm_set_attr(comm,Petsc_Seq_keyval,(void*)addr_local_comm);
105: PetscSequentialPhaseBegin_Private(local_comm,ng);
106: return 0;
107: }
109: /*@
110: PetscSequentialPhaseEnd - Ends a sequential section of code.
112: Collective
114: Input Parameters:
115: + comm - Communicator to sequentialize.
116: - ng - Number in processor group. This many processes are allowed to execute
117: at the same time (usually 1)
119: Level: intermediate
121: Notes:
122: See PetscSequentialPhaseBegin() for more details.
124: .seealso: PetscSequentialPhaseBegin()
126: @*/
127: PetscErrorCode PetscSequentialPhaseEnd(MPI_Comm comm,int ng)
128: {
129: PetscMPIInt size,flag;
130: MPI_Comm local_comm,*addr_local_comm;
132: MPI_Comm_size(comm,&size);
133: if (size == 1) return 0;
135: MPI_Comm_get_attr(comm,Petsc_Seq_keyval,(void**)&addr_local_comm,&flag);
137: local_comm = *addr_local_comm;
139: PetscSequentialPhaseEnd_Private(local_comm,ng);
141: PetscFree(addr_local_comm);
142: MPI_Comm_free(&local_comm);
143: MPI_Comm_delete_attr(comm,Petsc_Seq_keyval);
144: return 0;
145: }
147: /*@C
148: PetscGlobalMinMaxInt - Get the global min/max from local min/max input
150: Collective
152: Input Parameter:
153: . minMaxVal - An array with the local min and max
155: Output Parameter:
156: . minMaxValGlobal - An array with the global min and max
158: Level: beginner
160: .seealso: PetscSplitOwnership()
161: @*/
162: PetscErrorCode PetscGlobalMinMaxInt(MPI_Comm comm, const PetscInt minMaxVal[2], PetscInt minMaxValGlobal[2])
163: {
164: PetscInt sendbuf[3],recvbuf[3];
166: sendbuf[0] = -minMaxVal[0]; /* Note that -PETSC_MIN_INT = PETSC_MIN_INT */
167: sendbuf[1] = minMaxVal[1];
168: sendbuf[2] = (minMaxVal[0] == PETSC_MIN_INT) ? 1 : 0; /* Are there PETSC_MIN_INT in minMaxVal[0]? */
169: MPI_Allreduce(sendbuf, recvbuf, 3, MPIU_INT, MPI_MAX, comm);
170: minMaxValGlobal[0] = recvbuf[2] ? PETSC_MIN_INT : -recvbuf[0];
171: minMaxValGlobal[1] = recvbuf[1];
172: return 0;
173: }
175: /*@C
176: PetscGlobalMinMaxReal - Get the global min/max from local min/max input
178: Collective
180: Input Parameter:
181: . minMaxVal - An array with the local min and max
183: Output Parameter:
184: . minMaxValGlobal - An array with the global min and max
186: Level: beginner
188: .seealso: PetscSplitOwnership()
189: @*/
190: PetscErrorCode PetscGlobalMinMaxReal(MPI_Comm comm, const PetscReal minMaxVal[2], PetscReal minMaxValGlobal[2])
191: {
192: PetscReal sendbuf[2];
194: sendbuf[0] = -minMaxVal[0];
195: sendbuf[1] = minMaxVal[1];
196: MPIU_Allreduce(sendbuf,minMaxValGlobal,2,MPIU_REAL,MPIU_MAX,comm);
197: minMaxValGlobal[0] = -minMaxValGlobal[0];
198: return 0;
199: }