2: /*
3: Some PETSc utilites
4: */
5: #include <petsc-private/petscimpl.h> /*I "petscsys.h" I*/
6: #include <petsc-private/threadcommimpl.h>
7: /* ---------------------------------------------------------------- */
8: /*
9: A simple way to manage tags inside a communicator.
11: It uses the attributes to determine if a new communicator
12: is needed and to store the available tags.
14: */
19: /*@C
20: PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
21: processors that share the object MUST call this routine EXACTLY the same
22: number of times. This tag should only be used with the current objects
23: communicator; do NOT use it with any other MPI communicator.
25: Collective on PetscObject 27: Input Parameter:
28: . obj - the PETSc object; this must be cast with a (PetscObject), for example,
29: PetscObjectGetNewTag((PetscObject)mat,&tag);
31: Output Parameter:
32: . tag - the new tag
34: Level: developer
36: Concepts: tag^getting
37: Concepts: message tag^getting
38: Concepts: MPI message tag^getting
40: .seealso: PetscCommGetNewTag()
41: @*/
42: PetscErrorCodePetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag) 43: {
47: PetscCommGetNewTag(obj->comm,tag);
48: return(0);
49: }
53: /*@
54: PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
55: processors that share the communicator MUST call this routine EXACTLY the same
56: number of times. This tag should only be used with the current objects
57: communicator; do NOT use it with any other MPI communicator.
59: Collective on comm
61: Input Parameter:
62: . comm - the MPI communicator
64: Output Parameter:
65: . tag - the new tag
67: Level: developer
69: Concepts: tag^getting
70: Concepts: message tag^getting
71: Concepts: MPI message tag^getting
73: .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
74: @*/
75: PetscErrorCodePetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag) 76: {
77: PetscErrorCode ierr;
78: PetscCommCounter *counter;
79: PetscMPIInt *maxval,flg;
84: MPI_Attr_get(comm,Petsc_Counter_keyval,&counter,&flg);
85: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");
87: if (counter->tag < 1) {
88: PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
89: MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
90: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
91: counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
92: }
94: *tag = counter->tag--;
95: #if defined(PETSC_USE_DEBUG)
96: /*
97: Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
98: */
99: MPI_Barrier(comm);
100: #endif
101: return(0);
102: }
106: /*@C
107: PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.
109: Collective on MPI_Comm111: Input Parameters:
112: . comm_in - Input communicator
114: Output Parameters:
115: + comm_out - Output communicator. May be comm_in.
116: - first_tag - Tag available that has not already been used with this communicator (you may
117: pass in NULL if you do not need a tag)
119: PETSc communicators are just regular MPI communicators that keep track of which
120: tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
121: a PETSc creation routine it will attach a private communicator for use in the objects communications.
122: The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
123: level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.
125: Level: developer
127: Concepts: communicator^duplicate
129: .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
130: @*/
131: PetscErrorCodePetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)132: {
133: PetscErrorCode ierr;
134: PetscCommCounter *counter;
135: PetscMPIInt *maxval,flg;
136: PetscInt trank;
137: PetscThreadComm tcomm;
140: MPI_Attr_get(comm_in,Petsc_Counter_keyval,&counter,&flg);
142: if (!flg) { /* this is NOT a PETSc comm */
143: union {MPI_Comm comm; void *ptr;} ucomm;
144: /* check if this communicator has a PETSc communicator imbedded in it */
145: MPI_Attr_get(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);
146: if (!flg) {
147: /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
148: MPI_Comm_dup(comm_in,comm_out);
149: MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
150: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
151: PetscMalloc(sizeof(PetscCommCounter),&counter);
153: counter->tag = *maxval;
154: counter->refcount = 0;
155: counter->namecount = 0;
157: MPI_Attr_put(*comm_out,Petsc_Counter_keyval,counter);
158: PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);
160: /* save PETSc communicator inside user communicator, so we can get it next time */
161: ucomm.comm = *comm_out; /* ONLY the comm part of the union is significant. */
162: MPI_Attr_put(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);
163: ucomm.comm = comm_in;
164: MPI_Attr_put(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);
165: } else {
166: *comm_out = ucomm.comm;
167: /* pull out the inner MPI_Comm and hand it back to the caller */
168: MPI_Attr_get(*comm_out,Petsc_Counter_keyval,&counter,&flg);
169: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
170: PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);
171: }
172: } else *comm_out = comm_in;
174: #if defined(PETSC_USE_DEBUG)
175: /*
176: Hanging here means that some processes have called PetscCommDuplicate() and others have not.
177: This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
178: ALL processes that share a communicator MUST shared objects created from that communicator.
179: */
180: MPI_Barrier(comm_in);
181: #endif
183: if (counter->tag < 1) {
184: PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
185: MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
186: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
187: counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
188: }
190: if (first_tag) *first_tag = counter->tag--;
192: MPI_Attr_get(*comm_out,Petsc_ThreadComm_keyval,(PetscThreadComm*)&tcomm,&flg);
193: if (!flg) {
194: /* Threadcomm does not exist on this communicator, get the global threadcomm and attach it to this communicator */
195: PetscCommGetThreadComm(*comm_out,&tcomm);
196: PetscThreadCommAttach(*comm_out,tcomm);
197: }
198: /* Only the main thread updates counter->refcount */
199: PetscThreadCommGetRank(tcomm,&trank);
200: if (!trank) counter->refcount++; /* number of references to this comm */
201: return(0);
202: }
206: /*@C
207: PetscCommDestroy - Frees communicator. Use in conjunction with PetscCommDuplicate().
209: Collective on MPI_Comm211: Input Parameter:
212: . comm - the communicator to free
214: Level: developer
216: Concepts: communicator^destroy
218: .seealso: PetscCommDuplicate()
219: @*/
220: PetscErrorCodePetscCommDestroy(MPI_Comm *comm)221: {
222: PetscErrorCode ierr;
223: PetscCommCounter *counter;
224: PetscMPIInt flg;
225: MPI_Comm icomm = *comm,ocomm;
226: PetscThreadComm tcomm;
227: union {MPI_Comm comm; void *ptr;} ucomm;
230: if (*comm == MPI_COMM_NULL) return(0);
231: MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);
232: if (!flg) { /* not a PETSc comm, check if it has an inner comm */
233: MPI_Attr_get(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);
234: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
235: icomm = ucomm.comm;
236: MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);
237: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
238: }
240: /* Only the main thread updates counter->refcount */
241: MPI_Attr_get(icomm,Petsc_ThreadComm_keyval,(PetscThreadComm*)&tcomm,&flg);
242: if (flg) {
243: PetscInt trank;
244: PetscThreadCommGetRank(tcomm,&trank);
245: /* Only thread rank 0 updates the counter */
246: if (!trank) counter->refcount--;
247: } else counter->refcount--;
249: if (!counter->refcount) {
250: /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
251: MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);
252: if (flg) {
253: ocomm = ucomm.comm;
254: MPI_Attr_get(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);
255: if (flg) {
256: MPI_Attr_delete(ocomm,Petsc_InnerComm_keyval);
257: } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
258: }
260: PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);
261: MPI_Comm_free(&icomm);
262: }
263: *comm = MPI_COMM_NULL;
264: return(0);
265: }
267: #undef __FUNCT__269: /*@C
270: PetscObjectsGetGlobalNumbering - computes a global numbering
271: of PetscObjects living on subcommunicators of a given communicator.
272: This results in a deadlock-free ordering of the subcommunicators
273: and, hence, the objects.
276: Collective on comm.
278: Input Parameters:
279: + comm - MPI_Comm280: . len - length of objlist
281: - objlist - a list of PETSc objects living on subcommunicators of comm
282: (subcommunicator ordering is assumed to be deadlock-free)
284: Output Parameters:
285: + count - number of globally-distinct subcommunicators on objlist
286: . numbering - global numbers of objlist entries (allocated by user)
289: Level: developer
291: Concepts: MPI subcomm^numbering
293: @*/
294: PetscErrorCodePetscObjectsGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)295: {
297: PetscInt i, roots, offset;
298: PetscMPIInt size, rank;
304: MPI_Comm_size(comm, &size);
305: MPI_Comm_rank(comm, &rank);
306: roots = 0;
307: for (i = 0; i < len; ++i) {
308: PetscMPIInt srank;
309: MPI_Comm_rank(objlist[i]->comm, &srank);
310: /* Am I the root of the i-th subcomm? */
311: if (!srank) ++roots;
312: }
313: /* Obtain the sum of all roots -- the global number of distinct subcomms. */
314: MPI_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);
315: /* Now introduce a global numbering for subcomms, initially known only by subcomm roots. */
316: /*
317: At the subcomm roots number the subcomms in the subcomm-root local manner,
318: and make it global by calculating the shift.
319: */
320: MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);
321: offset -= roots;
322: /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
323: /*
324: This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
325: broadcast is collective on the subcomm.
326: */
327: roots = 0;
328: for (i = 0; i < len; ++i) {
329: PetscMPIInt srank;
330: numbering[i] = offset + roots; /* only meaningful if !srank. */
332: MPI_Comm_rank(objlist[i]->comm, &srank);
333: MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);
334: if (!srank) ++roots;
335: }
336: return(0);
337: }