Actual source code: tagm.c
1: #include <petsc/private/petscimpl.h>
2: /* ---------------------------------------------------------------- */
3: /*
4: A simple way to manage tags inside a communicator.
6: It uses the attributes to determine if a new communicator
7: is needed and to store the available tags.
9: */
11: /*@C
12: PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
13: processors that share the object MUST call this routine EXACTLY the same
14: number of times. This tag should only be used with the current objects
15: communicator; do NOT use it with any other MPI communicator.
17: Collective on PetscObject
19: Input Parameter:
20: . obj - the PETSc object; this must be cast with a (PetscObject), for example,
21: PetscObjectGetNewTag((PetscObject)mat,&tag);
23: Output Parameter:
24: . tag - the new tag
26: Level: developer
28: .seealso: PetscCommGetNewTag()
29: @*/
30: PetscErrorCode PetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag)
31: {
35: PetscCommGetNewTag(obj->comm,tag);
36: return(0);
37: }
39: /*@
40: PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
41: processors that share the communicator MUST call this routine EXACTLY the same
42: number of times. This tag should only be used with the current objects
43: communicator; do NOT use it with any other MPI communicator.
45: Collective
47: Input Parameter:
48: . comm - the MPI communicator
50: Output Parameter:
51: . tag - the new tag
53: Level: developer
55: .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
56: @*/
57: PetscErrorCode PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag)
58: {
59: PetscErrorCode ierr;
60: PetscCommCounter *counter;
61: PetscMPIInt *maxval,flg;
66: MPI_Comm_get_attr(comm,Petsc_Counter_keyval,&counter,&flg);
67: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");
69: if (counter->tag < 1) {
70: PetscInfo1(NULL,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
71: MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
72: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
73: counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
74: }
76: *tag = counter->tag--;
77: if (PetscDefined(USE_DEBUG)) {
78: /*
79: Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
80: */
81: MPI_Barrier(comm);
82: }
83: return(0);
84: }
86: /*@C
87: PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.
89: Collective
91: Input Parameter:
92: . comm_in - Input communicator
94: Output Parameters:
95: + comm_out - Output communicator. May be comm_in.
96: - first_tag - Tag available that has not already been used with this communicator (you may
97: pass in NULL if you do not need a tag)
99: PETSc communicators are just regular MPI communicators that keep track of which
100: tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
101: a PETSc creation routine it will attach a private communicator for use in the objects communications.
102: The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
103: level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.
105: Level: developer
107: .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
108: @*/
109: PetscErrorCode PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)
110: {
111: PetscErrorCode ierr;
112: PetscCommCounter *counter;
113: PetscMPIInt *maxval,flg;
116: PetscSpinlockLock(&PetscCommSpinLock);
117: MPI_Comm_get_attr(comm_in,Petsc_Counter_keyval,&counter,&flg);
119: if (!flg) { /* this is NOT a PETSc comm */
120: union {MPI_Comm comm; void *ptr;} ucomm;
121: /* check if this communicator has a PETSc communicator imbedded in it */
122: MPI_Comm_get_attr(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);
123: if (!flg) {
124: /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
125: MPI_Comm_dup(comm_in,comm_out);
126: MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
127: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
128: PetscNew(&counter); /* all fields of counter are zero'ed */
129: counter->tag = *maxval;
130: MPI_Comm_set_attr(*comm_out,Petsc_Counter_keyval,counter);
131: PetscInfo3(NULL,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);
133: /* save PETSc communicator inside user communicator, so we can get it next time */
134: ucomm.comm = *comm_out; /* ONLY the comm part of the union is significant. */
135: MPI_Comm_set_attr(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);
136: ucomm.comm = comm_in;
137: MPI_Comm_set_attr(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);
138: } else {
139: *comm_out = ucomm.comm;
140: /* pull out the inner MPI_Comm and hand it back to the caller */
141: MPI_Comm_get_attr(*comm_out,Petsc_Counter_keyval,&counter,&flg);
142: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
143: PetscInfo2(NULL,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);
144: }
145: } else *comm_out = comm_in;
147: if (PetscDefined(USE_DEBUG)) {
148: /*
149: Hanging here means that some processes have called PetscCommDuplicate() and others have not.
150: This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
151: ALL processes that share a communicator MUST shared objects created from that communicator.
152: */
153: MPI_Barrier(comm_in);
154: }
156: if (counter->tag < 1) {
157: PetscInfo1(NULL,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
158: MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
159: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
160: counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
161: }
163: if (first_tag) *first_tag = counter->tag--;
165: counter->refcount++; /* number of references to this comm */
166: PetscSpinlockUnlock(&PetscCommSpinLock);
167: return(0);
168: }
170: /*@C
171: PetscCommDestroy - Frees communicator. Use in conjunction with PetscCommDuplicate().
173: Collective
175: Input Parameter:
176: . comm - the communicator to free
178: Level: developer
180: .seealso: PetscCommDuplicate()
181: @*/
182: PetscErrorCode PetscCommDestroy(MPI_Comm *comm)
183: {
184: PetscErrorCode ierr;
185: PetscCommCounter *counter;
186: PetscMPIInt flg;
187: MPI_Comm icomm = *comm,ocomm;
188: union {MPI_Comm comm; void *ptr;} ucomm;
191: if (*comm == MPI_COMM_NULL) return(0);
192: PetscSpinlockLock(&PetscCommSpinLock);
193: MPI_Comm_get_attr(icomm,Petsc_Counter_keyval,&counter,&flg);
194: if (!flg) { /* not a PETSc comm, check if it has an inner comm */
195: MPI_Comm_get_attr(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);
196: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
197: icomm = ucomm.comm;
198: MPI_Comm_get_attr(icomm,Petsc_Counter_keyval,&counter,&flg);
199: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
200: }
202: counter->refcount--;
204: if (!counter->refcount) {
205: /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
206: MPI_Comm_get_attr(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);
207: if (flg) {
208: ocomm = ucomm.comm;
209: MPI_Comm_get_attr(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);
210: if (flg) {
211: MPI_Comm_delete_attr(ocomm,Petsc_InnerComm_keyval);
212: } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
213: }
215: PetscInfo1(NULL,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);
216: MPI_Comm_free(&icomm);
217: }
218: *comm = MPI_COMM_NULL;
219: PetscSpinlockUnlock(&PetscCommSpinLock);
220: return(0);
221: }
223: /*@C
224: PetscObjectsListGetGlobalNumbering - computes a global numbering
225: of PetscObjects living on subcommunicators of a given communicator.
227: Collective.
229: Input Parameters:
230: + comm - MPI_Comm
231: . len - local length of objlist
232: - objlist - a list of PETSc objects living on subcomms of comm and containing this comm rank
233: (subcomm ordering is assumed to be deadlock-free)
235: Output Parameters:
236: + count - global number of distinct subcommunicators on objlist (may be > len)
237: - numbering - global numbers of objlist entries (allocated by user)
239: Level: developer
241: @*/
242: PetscErrorCode PetscObjectsListGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
243: {
245: PetscInt i, roots, offset;
246: PetscMPIInt size, rank;
250: if (!count && !numbering) return(0);
252: MPI_Comm_size(comm, &size);
253: MPI_Comm_rank(comm, &rank);
254: roots = 0;
255: for (i = 0; i < len; ++i) {
256: PetscMPIInt srank;
257: MPI_Comm_rank(objlist[i]->comm, &srank);
258: /* Am I the root of the i-th subcomm? */
259: if (!srank) ++roots;
260: }
261: if (count) {
262: /* Obtain the sum of all roots -- the global number of distinct subcomms. */
263: MPIU_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);
264: }
265: if (numbering) {
266: /* Introduce a global numbering for subcomms, initially known only by subcomm roots. */
267: /*
268: At each subcomm root number all of the subcomms it owns locally
269: and make it global by calculating the shift among all of the roots.
270: The roots are ordered using the comm ordering.
271: */
272: MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);
273: offset -= roots;
274: /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
275: /*
276: This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
277: broadcast is collective on the subcomm.
278: */
279: roots = 0;
280: for (i = 0; i < len; ++i) {
281: PetscMPIInt srank;
282: numbering[i] = offset + roots; /* only meaningful if !srank. */
284: MPI_Comm_rank(objlist[i]->comm, &srank);
285: MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);
286: if (!srank) ++roots;
287: }
288: }
289: return(0);
290: }