Actual source code: tagm.c
petsc-3.12.5 2020-03-29
1: #include <petsc/private/petscimpl.h>
2: /* ---------------------------------------------------------------- */
3: /*
4: A simple way to manage tags inside a communicator.
6: It uses the attributes to determine if a new communicator
7: is needed and to store the available tags.
9: */
12: /*@C
13: PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
14: processors that share the object MUST call this routine EXACTLY the same
15: number of times. This tag should only be used with the current objects
16: communicator; do NOT use it with any other MPI communicator.
18: Collective on PetscObject
20: Input Parameter:
21: . obj - the PETSc object; this must be cast with a (PetscObject), for example,
22: PetscObjectGetNewTag((PetscObject)mat,&tag);
24: Output Parameter:
25: . tag - the new tag
27: Level: developer
29: .seealso: PetscCommGetNewTag()
30: @*/
31: PetscErrorCode PetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag)
32: {
36: PetscCommGetNewTag(obj->comm,tag);
37: return(0);
38: }
40: /*@
41: PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
42: processors that share the communicator MUST call this routine EXACTLY the same
43: number of times. This tag should only be used with the current objects
44: communicator; do NOT use it with any other MPI communicator.
46: Collective
48: Input Parameter:
49: . comm - the MPI communicator
51: Output Parameter:
52: . tag - the new tag
54: Level: developer
56: .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
57: @*/
58: PetscErrorCode PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag)
59: {
60: PetscErrorCode ierr;
61: PetscCommCounter *counter;
62: PetscMPIInt *maxval,flg;
67: MPI_Comm_get_attr(comm,Petsc_Counter_keyval,&counter,&flg);
68: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");
70: if (counter->tag < 1) {
71: PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
72: MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
73: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
74: counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
75: }
77: *tag = counter->tag--;
78: #if defined(PETSC_USE_DEBUG)
79: /*
80: Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
81: */
82: MPI_Barrier(comm);
83: #endif
84: return(0);
85: }
87: /*@C
88: PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.
90: Collective
92: Input Parameters:
93: . comm_in - Input communicator
95: Output Parameters:
96: + comm_out - Output communicator. May be comm_in.
97: - first_tag - Tag available that has not already been used with this communicator (you may
98: pass in NULL if you do not need a tag)
100: PETSc communicators are just regular MPI communicators that keep track of which
101: tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
102: a PETSc creation routine it will attach a private communicator for use in the objects communications.
103: The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
104: level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.
106: Level: developer
108: .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
109: @*/
110: PetscErrorCode PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)
111: {
112: PetscErrorCode ierr;
113: PetscCommCounter *counter;
114: PetscMPIInt *maxval,flg;
117: PetscSpinlockLock(&PetscCommSpinLock);
118: MPI_Comm_get_attr(comm_in,Petsc_Counter_keyval,&counter,&flg);
120: if (!flg) { /* this is NOT a PETSc comm */
121: union {MPI_Comm comm; void *ptr;} ucomm;
122: /* check if this communicator has a PETSc communicator imbedded in it */
123: MPI_Comm_get_attr(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);
124: if (!flg) {
125: /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
126: MPI_Comm_dup(comm_in,comm_out);
127: MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
128: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
129: PetscNew(&counter);
131: counter->tag = *maxval;
132: counter->refcount = 0;
133: counter->namecount = 0;
135: MPI_Comm_set_attr(*comm_out,Petsc_Counter_keyval,counter);
136: PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);
138: /* save PETSc communicator inside user communicator, so we can get it next time */
139: ucomm.comm = *comm_out; /* ONLY the comm part of the union is significant. */
140: MPI_Comm_set_attr(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);
141: ucomm.comm = comm_in;
142: MPI_Comm_set_attr(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);
143: } else {
144: *comm_out = ucomm.comm;
145: /* pull out the inner MPI_Comm and hand it back to the caller */
146: MPI_Comm_get_attr(*comm_out,Petsc_Counter_keyval,&counter,&flg);
147: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
148: PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);
149: }
150: } else *comm_out = comm_in;
152: #if defined(PETSC_USE_DEBUG)
153: /*
154: Hanging here means that some processes have called PetscCommDuplicate() and others have not.
155: This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
156: ALL processes that share a communicator MUST shared objects created from that communicator.
157: */
158: MPI_Barrier(comm_in);
159: #endif
161: if (counter->tag < 1) {
162: PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
163: MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
164: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
165: counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
166: }
168: if (first_tag) *first_tag = counter->tag--;
170: counter->refcount++; /* number of references to this comm */
171: PetscSpinlockUnlock(&PetscCommSpinLock);
172: return(0);
173: }
175: /*@C
176: PetscCommDestroy - Frees communicator. Use in conjunction with PetscCommDuplicate().
178: Collective
180: Input Parameter:
181: . comm - the communicator to free
183: Level: developer
185: .seealso: PetscCommDuplicate()
186: @*/
187: PetscErrorCode PetscCommDestroy(MPI_Comm *comm)
188: {
189: PetscErrorCode ierr;
190: PetscCommCounter *counter;
191: PetscMPIInt flg;
192: MPI_Comm icomm = *comm,ocomm;
193: union {MPI_Comm comm; void *ptr;} ucomm;
196: if (*comm == MPI_COMM_NULL) return(0);
197: PetscSpinlockLock(&PetscCommSpinLock);
198: MPI_Comm_get_attr(icomm,Petsc_Counter_keyval,&counter,&flg);
199: if (!flg) { /* not a PETSc comm, check if it has an inner comm */
200: MPI_Comm_get_attr(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);
201: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
202: icomm = ucomm.comm;
203: MPI_Comm_get_attr(icomm,Petsc_Counter_keyval,&counter,&flg);
204: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
205: }
207: counter->refcount--;
209: if (!counter->refcount) {
210: /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
211: MPI_Comm_get_attr(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);
212: if (flg) {
213: ocomm = ucomm.comm;
214: MPI_Comm_get_attr(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);
215: if (flg) {
216: MPI_Comm_delete_attr(ocomm,Petsc_InnerComm_keyval);
217: } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
218: }
220: PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);
221: MPI_Comm_free(&icomm);
222: }
223: *comm = MPI_COMM_NULL;
224: PetscSpinlockUnlock(&PetscCommSpinLock);
225: return(0);
226: }
228: /*@C
229: PetscObjectsListGetGlobalNumbering - computes a global numbering
230: of PetscObjects living on subcommunicators of a given communicator.
233: Collective.
235: Input Parameters:
236: + comm - MPI_Comm
237: . len - local length of objlist
238: - objlist - a list of PETSc objects living on subcomms of comm and containing this comm rank
239: (subcomm ordering is assumed to be deadlock-free)
241: Output Parameters:
242: + count - global number of distinct subcommunicators on objlist (may be > len)
243: - numbering - global numbers of objlist entries (allocated by user)
246: Level: developer
248: @*/
249: PetscErrorCode PetscObjectsListGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
250: {
252: PetscInt i, roots, offset;
253: PetscMPIInt size, rank;
257: if (!count && !numbering) return(0);
259: MPI_Comm_size(comm, &size);
260: MPI_Comm_rank(comm, &rank);
261: roots = 0;
262: for (i = 0; i < len; ++i) {
263: PetscMPIInt srank;
264: MPI_Comm_rank(objlist[i]->comm, &srank);
265: /* Am I the root of the i-th subcomm? */
266: if (!srank) ++roots;
267: }
268: if (count) {
269: /* Obtain the sum of all roots -- the global number of distinct subcomms. */
270: MPIU_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);
271: }
272: if (numbering){
273: /* Introduce a global numbering for subcomms, initially known only by subcomm roots. */
274: /*
275: At each subcomm root number all of the subcomms it owns locally
276: and make it global by calculating the shift among all of the roots.
277: The roots are ordered using the comm ordering.
278: */
279: MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);
280: offset -= roots;
281: /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
282: /*
283: This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
284: broadcast is collective on the subcomm.
285: */
286: roots = 0;
287: for (i = 0; i < len; ++i) {
288: PetscMPIInt srank;
289: numbering[i] = offset + roots; /* only meaningful if !srank. */
291: MPI_Comm_rank(objlist[i]->comm, &srank);
292: MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);
293: if (!srank) ++roots;
294: }
295: }
296: return(0);
297: }