2: /*
3: Some PETSc utilites
4: */
5: #include <petsc/private/petscimpl.h> /*I "petscsys.h" I*/
6: /* ---------------------------------------------------------------- */
7: /*
8: A simple way to manage tags inside a communicator.
10: It uses the attributes to determine if a new communicator
11: is needed and to store the available tags.
13: */
18: /*@C
19: PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
20: processors that share the object MUST call this routine EXACTLY the same
21: number of times. This tag should only be used with the current objects
22: communicator; do NOT use it with any other MPI communicator.
24: Collective on PetscObject 26: Input Parameter:
27: . obj - the PETSc object; this must be cast with a (PetscObject), for example,
28: PetscObjectGetNewTag((PetscObject)mat,&tag);
30: Output Parameter:
31: . tag - the new tag
33: Level: developer
35: Concepts: tag^getting
36: Concepts: message tag^getting
37: Concepts: MPI message tag^getting
39: .seealso: PetscCommGetNewTag()
40: @*/
41: PetscErrorCodePetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag) 42: {
46: PetscCommGetNewTag(obj->comm,tag);
47: return(0);
48: }
52: /*@
53: PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
54: processors that share the communicator MUST call this routine EXACTLY the same
55: number of times. This tag should only be used with the current objects
56: communicator; do NOT use it with any other MPI communicator.
58: Collective on comm
60: Input Parameter:
61: . comm - the MPI communicator
63: Output Parameter:
64: . tag - the new tag
66: Level: developer
68: Concepts: tag^getting
69: Concepts: message tag^getting
70: Concepts: MPI message tag^getting
72: .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
73: @*/
74: PetscErrorCodePetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag) 75: {
76: PetscErrorCode ierr;
77: PetscCommCounter *counter;
78: PetscMPIInt *maxval,flg;
83: MPI_Attr_get(comm,Petsc_Counter_keyval,&counter,&flg);
84: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");
86: if (counter->tag < 1) {
87: PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
88: MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
89: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
90: counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
91: }
93: *tag = counter->tag--;
94: #if defined(PETSC_USE_DEBUG)
95: /*
96: Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
97: */
98: MPI_Barrier(comm);
99: #endif
100: return(0);
101: }
105: /*@C
106: PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.
108: Collective on MPI_Comm110: Input Parameters:
111: . comm_in - Input communicator
113: Output Parameters:
114: + comm_out - Output communicator. May be comm_in.
115: - first_tag - Tag available that has not already been used with this communicator (you may
116: pass in NULL if you do not need a tag)
118: PETSc communicators are just regular MPI communicators that keep track of which
119: tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
120: a PETSc creation routine it will attach a private communicator for use in the objects communications.
121: The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
122: level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.
124: Level: developer
126: Concepts: communicator^duplicate
128: .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
129: @*/
130: PetscErrorCodePetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)131: {
132: PetscErrorCode ierr;
133: PetscCommCounter *counter;
134: PetscMPIInt *maxval,flg;
137: PetscSpinlockLock(&PetscCommSpinLock);
138: MPI_Attr_get(comm_in,Petsc_Counter_keyval,&counter,&flg);
140: if (!flg) { /* this is NOT a PETSc comm */
141: union {MPI_Comm comm; void *ptr;} ucomm;
142: /* check if this communicator has a PETSc communicator imbedded in it */
143: MPI_Attr_get(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);
144: if (!flg) {
145: /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
146: MPI_Comm_dup(comm_in,comm_out);
147: MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
148: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
149: PetscNew(&counter);
151: counter->tag = *maxval;
152: counter->refcount = 0;
153: counter->namecount = 0;
155: MPI_Attr_put(*comm_out,Petsc_Counter_keyval,counter);
156: PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);
158: /* save PETSc communicator inside user communicator, so we can get it next time */
159: ucomm.comm = *comm_out; /* ONLY the comm part of the union is significant. */
160: MPI_Attr_put(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);
161: ucomm.comm = comm_in;
162: MPI_Attr_put(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);
163: } else {
164: *comm_out = ucomm.comm;
165: /* pull out the inner MPI_Comm and hand it back to the caller */
166: MPI_Attr_get(*comm_out,Petsc_Counter_keyval,&counter,&flg);
167: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
168: PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);
169: }
170: } else *comm_out = comm_in;
172: #if defined(PETSC_USE_DEBUG)
173: /*
174: Hanging here means that some processes have called PetscCommDuplicate() and others have not.
175: This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
176: ALL processes that share a communicator MUST shared objects created from that communicator.
177: */
178: MPI_Barrier(comm_in);
179: #endif
181: if (counter->tag < 1) {
182: PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
183: MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
184: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
185: counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
186: }
188: if (first_tag) *first_tag = counter->tag--;
190: counter->refcount++; /* number of references to this comm */
191: PetscSpinlockUnlock(&PetscCommSpinLock);
192: return(0);
193: }
197: /*@C
198: PetscCommDestroy - Frees communicator. Use in conjunction with PetscCommDuplicate().
200: Collective on MPI_Comm202: Input Parameter:
203: . comm - the communicator to free
205: Level: developer
207: Concepts: communicator^destroy
209: .seealso: PetscCommDuplicate()
210: @*/
211: PetscErrorCodePetscCommDestroy(MPI_Comm *comm)212: {
213: PetscErrorCode ierr;
214: PetscCommCounter *counter;
215: PetscMPIInt flg;
216: MPI_Comm icomm = *comm,ocomm;
217: union {MPI_Comm comm; void *ptr;} ucomm;
220: if (*comm == MPI_COMM_NULL) return(0);
221: PetscSpinlockLock(&PetscCommSpinLock);
222: MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);
223: if (!flg) { /* not a PETSc comm, check if it has an inner comm */
224: MPI_Attr_get(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);
225: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
226: icomm = ucomm.comm;
227: MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);
228: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
229: }
231: counter->refcount--;
233: if (!counter->refcount) {
234: /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
235: MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);
236: if (flg) {
237: ocomm = ucomm.comm;
238: MPI_Attr_get(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);
239: if (flg) {
240: MPI_Attr_delete(ocomm,Petsc_InnerComm_keyval);
241: } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
242: }
244: PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);
245: MPI_Comm_free(&icomm);
246: }
247: *comm = MPI_COMM_NULL;
248: PetscSpinlockUnlock(&PetscCommSpinLock);
249: return(0);
250: }
252: #undef __FUNCT__254: /*@C
255: PetscObjectsListGetGlobalNumbering - computes a global numbering
256: of PetscObjects living on subcommunicators of a given communicator.
259: Collective on comm.
261: Input Parameters:
262: + comm - MPI_Comm263: . len - local length of objlist
264: - objlist - a list of PETSc objects living on subcomms of comm and containing this comm rank
265: (subcomm ordering is assumed to be deadlock-free)
267: Output Parameters:
268: + count - global number of distinct subcommunicators on objlist (may be > len)
269: - numbering - global numbers of objlist entries (allocated by user)
272: Level: developer
274: Concepts: MPI subcomm^numbering
276: @*/
277: PetscErrorCodePetscObjectsListGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)278: {
280: PetscInt i, roots, offset;
281: PetscMPIInt size, rank;
285: if (!count && !numbering) return(0);
287: MPI_Comm_size(comm, &size);
288: MPI_Comm_rank(comm, &rank);
289: roots = 0;
290: for (i = 0; i < len; ++i) {
291: PetscMPIInt srank;
292: MPI_Comm_rank(objlist[i]->comm, &srank);
293: /* Am I the root of the i-th subcomm? */
294: if (!srank) ++roots;
295: }
296: if (count) {
297: /* Obtain the sum of all roots -- the global number of distinct subcomms. */
298: MPIU_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);
299: }
300: if (numbering){
301: /* Introduce a global numbering for subcomms, initially known only by subcomm roots. */
302: /*
303: At each subcomm root number all of the subcomms it owns locally
304: and make it global by calculating the shift among all of the roots.
305: The roots are ordered using the comm ordering.
306: */
307: MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);
308: offset -= roots;
309: /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
310: /*
311: This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
312: broadcast is collective on the subcomm.
313: */
314: roots = 0;
315: for (i = 0; i < len; ++i) {
316: PetscMPIInt srank;
317: numbering[i] = offset + roots; /* only meaningful if !srank. */
319: MPI_Comm_rank(objlist[i]->comm, &srank);
320: MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);
321: if (!srank) ++roots;
322: }
323: }
324: return(0);
325: }