Actual source code: tagm.c

petsc-3.6.4 2016-04-12
Report Typos and Errors
  2: /*
  3:       Some PETSc utilites
  4: */
  5: #include <petsc/private/petscimpl.h>             /*I    "petscsys.h"   I*/
  6: /* ---------------------------------------------------------------- */
  7: /*
  8:    A simple way to manage tags inside a communicator.

 10:    It uses the attributes to determine if a new communicator
 11:       is needed and to store the available tags.

 13: */


 18: /*@C
 19:     PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
 20:     processors that share the object MUST call this routine EXACTLY the same
 21:     number of times.  This tag should only be used with the current objects
 22:     communicator; do NOT use it with any other MPI communicator.

 24:     Collective on PetscObject

 26:     Input Parameter:
 27: .   obj - the PETSc object; this must be cast with a (PetscObject), for example,
 28:          PetscObjectGetNewTag((PetscObject)mat,&tag);

 30:     Output Parameter:
 31: .   tag - the new tag

 33:     Level: developer

 35:     Concepts: tag^getting
 36:     Concepts: message tag^getting
 37:     Concepts: MPI message tag^getting

 39: .seealso: PetscCommGetNewTag()
 40: @*/
 41: PetscErrorCode  PetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag)
 42: {

 46:   PetscCommGetNewTag(obj->comm,tag);
 47:   return(0);
 48: }

 52: /*@
 53:     PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
 54:     processors that share the communicator MUST call this routine EXACTLY the same
 55:     number of times.  This tag should only be used with the current objects
 56:     communicator; do NOT use it with any other MPI communicator.

 58:     Collective on comm

 60:     Input Parameter:
 61: .   comm - the MPI communicator

 63:     Output Parameter:
 64: .   tag - the new tag

 66:     Level: developer

 68:     Concepts: tag^getting
 69:     Concepts: message tag^getting
 70:     Concepts: MPI message tag^getting

 72: .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
 73: @*/
 74: PetscErrorCode  PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag)
 75: {
 76:   PetscErrorCode   ierr;
 77:   PetscCommCounter *counter;
 78:   PetscMPIInt      *maxval,flg;


 83:   MPI_Attr_get(comm,Petsc_Counter_keyval,&counter,&flg);
 84:   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");

 86:   if (counter->tag < 1) {
 87:     PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
 88:     MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
 89:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
 90:     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
 91:   }

 93:   *tag = counter->tag--;
 94: #if defined(PETSC_USE_DEBUG)
 95:   /*
 96:      Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
 97:   */
 98:   MPI_Barrier(comm);
 99: #endif
100:   return(0);
101: }

105: /*@C
106:   PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.

108:   Collective on MPI_Comm

110:   Input Parameters:
111: . comm_in - Input communicator

113:   Output Parameters:
114: + comm_out - Output communicator.  May be comm_in.
115: - first_tag - Tag available that has not already been used with this communicator (you may
116:               pass in NULL if you do not need a tag)

118:   PETSc communicators are just regular MPI communicators that keep track of which
119:   tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
120:   a PETSc creation routine it will attach a private communicator for use in the objects communications.
121:   The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
122:   level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.

124:   Level: developer

126:   Concepts: communicator^duplicate

128: .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
129: @*/
130: PetscErrorCode  PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)
131: {
132:   PetscErrorCode   ierr;
133:   PetscCommCounter *counter;
134:   PetscMPIInt      *maxval,flg;

137:   MPI_Attr_get(comm_in,Petsc_Counter_keyval,&counter,&flg);

139:   if (!flg) {  /* this is NOT a PETSc comm */
140:     union {MPI_Comm comm; void *ptr;} ucomm;
141:     /* check if this communicator has a PETSc communicator imbedded in it */
142:     MPI_Attr_get(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);
143:     if (!flg) {
144:       /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
145:       MPI_Comm_dup(comm_in,comm_out);
146:       MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
147:       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
148:       PetscNew(&counter);

150:       counter->tag       = *maxval;
151:       counter->refcount  = 0;
152:       counter->namecount = 0;

154:       MPI_Attr_put(*comm_out,Petsc_Counter_keyval,counter);
155:       PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);

157:       /* save PETSc communicator inside user communicator, so we can get it next time */
158:       ucomm.comm = *comm_out;   /* ONLY the comm part of the union is significant. */
159:       MPI_Attr_put(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);
160:       ucomm.comm = comm_in;
161:       MPI_Attr_put(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);
162:     } else {
163:       *comm_out = ucomm.comm;
164:       /* pull out the inner MPI_Comm and hand it back to the caller */
165:       MPI_Attr_get(*comm_out,Petsc_Counter_keyval,&counter,&flg);
166:       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
167:       PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);
168:     }
169:   } else *comm_out = comm_in;

171: #if defined(PETSC_USE_DEBUG)
172:   /*
173:      Hanging here means that some processes have called PetscCommDuplicate() and others have not.
174:      This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
175:      ALL processes that share a communicator MUST shared objects created from that communicator.
176:   */
177:   MPI_Barrier(comm_in);
178: #endif

180:   if (counter->tag < 1) {
181:     PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
182:     MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
183:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
184:     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
185:   }

187:   if (first_tag) *first_tag = counter->tag--;

189:   counter->refcount++; /* number of references to this comm */
190:   return(0);
191: }

195: /*@C
196:    PetscCommDestroy - Frees communicator.  Use in conjunction with PetscCommDuplicate().

198:    Collective on MPI_Comm

200:    Input Parameter:
201: .  comm - the communicator to free

203:    Level: developer

205:    Concepts: communicator^destroy

207: .seealso:   PetscCommDuplicate()
208: @*/
209: PetscErrorCode  PetscCommDestroy(MPI_Comm *comm)
210: {
211:   PetscErrorCode   ierr;
212:   PetscCommCounter *counter;
213:   PetscMPIInt      flg;
214:   MPI_Comm         icomm = *comm,ocomm;
215:   union {MPI_Comm comm; void *ptr;} ucomm;

218:   if (*comm == MPI_COMM_NULL) return(0);
219:   MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);
220:   if (!flg) { /* not a PETSc comm, check if it has an inner comm */
221:     MPI_Attr_get(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);
222:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
223:     icomm = ucomm.comm;
224:     MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);
225:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
226:   }

228:   counter->refcount--;

230:   if (!counter->refcount) {
231:     /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
232:     MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);
233:     if (flg) {
234:       ocomm = ucomm.comm;
235:       MPI_Attr_get(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);
236:       if (flg) {
237:         MPI_Attr_delete(ocomm,Petsc_InnerComm_keyval);
238:       } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
239:     }

241:     PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);
242:     MPI_Comm_free(&icomm);
243:   }
244:   *comm = MPI_COMM_NULL;
245:   return(0);
246: }

248: #undef  __FUNCT__
250: /*@C
251:     PetscObjectsListGetGlobalNumbering - computes a global numbering
252:     of PetscObjects living on subcommunicators of a given communicator.


255:     Collective on comm.

257:     Input Parameters:
258: +   comm    - MPI_Comm
259: .   len     - local length of objlist
260: -   objlist - a list of PETSc objects living on subcomms of comm and containing this comm rank
261:               (subcomm ordering is assumed to be deadlock-free)

263:     Output Parameters:
264: +   count      - global number of distinct subcommunicators on objlist (may be > len)
265: -   numbering  - global numbers of objlist entries (allocated by user)


268:     Level: developer

270:     Concepts: MPI subcomm^numbering

272: @*/
273: PetscErrorCode  PetscObjectsListGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
274: {
276:   PetscInt       i, roots, offset;
277:   PetscMPIInt    size, rank;

281:   if (!count && !numbering) return(0);

283:   MPI_Comm_size(comm, &size);
284:   MPI_Comm_rank(comm, &rank);
285:   roots = 0;
286:   for (i = 0; i < len; ++i) {
287:     PetscMPIInt srank;
288:     MPI_Comm_rank(objlist[i]->comm, &srank);
289:     /* Am I the root of the i-th subcomm? */
290:     if (!srank) ++roots;
291:   }
292:   if (count) {
293:     /* Obtain the sum of all roots -- the global number of distinct subcomms. */
294:     MPI_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);
295:   }
296:   if (numbering) {
297:     /* Introduce a global numbering for subcomms, initially known only by subcomm roots. */
298:     /*
299:       At each subcomm root number all of the subcomms it owns locally
300:       and make it global by calculating the shift among all of the roots.
301:       The roots are ordered using the comm ordering.
302:     */
303:     MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);
304:     offset -= roots;
305:     /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
306:     /*
307:       This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
308:       broadcast is collective on the subcomm.
309:     */
310:     roots = 0;
311:     for (i = 0; i < len; ++i) {
312:       PetscMPIInt srank;
313:       numbering[i] = offset + roots; /* only meaningful if !srank. */

315:       MPI_Comm_rank(objlist[i]->comm, &srank);
316:       MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);
317:       if (!srank) ++roots;
318:     }
319:   }
320:   return(0);
321: }