Actual source code: tagm.c

petsc-3.4.0 2013-05-13
  2: /*
  3:       Some PETSc utilites
  4: */
  5: #include <petsc-private/petscimpl.h>             /*I    "petscsys.h"   I*/
  6: #include <petsc-private/threadcommimpl.h>
  7: /* ---------------------------------------------------------------- */
  8: /*
  9:    A simple way to manage tags inside a communicator.

 11:    It uses the attributes to determine if a new communicator
 12:       is needed and to store the available tags.

 14: */


 19: /*@C
 20:     PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
 21:     processors that share the object MUST call this routine EXACTLY the same
 22:     number of times.  This tag should only be used with the current objects
 23:     communicator; do NOT use it with any other MPI communicator.

 25:     Collective on PetscObject

 27:     Input Parameter:
 28: .   obj - the PETSc object; this must be cast with a (PetscObject), for example,
 29:          PetscObjectGetNewTag((PetscObject)mat,&tag);

 31:     Output Parameter:
 32: .   tag - the new tag

 34:     Level: developer

 36:     Concepts: tag^getting
 37:     Concepts: message tag^getting
 38:     Concepts: MPI message tag^getting

 40: .seealso: PetscCommGetNewTag()
 41: @*/
 42: PetscErrorCode  PetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag)
 43: {

 47:   PetscCommGetNewTag(obj->comm,tag);
 48:   return(0);
 49: }

 53: /*@
 54:     PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
 55:     processors that share the communicator MUST call this routine EXACTLY the same
 56:     number of times.  This tag should only be used with the current objects
 57:     communicator; do NOT use it with any other MPI communicator.

 59:     Collective on comm

 61:     Input Parameter:
 62: .   comm - the MPI communicator

 64:     Output Parameter:
 65: .   tag - the new tag

 67:     Level: developer

 69:     Concepts: tag^getting
 70:     Concepts: message tag^getting
 71:     Concepts: MPI message tag^getting

 73: .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
 74: @*/
 75: PetscErrorCode  PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag)
 76: {
 77:   PetscErrorCode   ierr;
 78:   PetscCommCounter *counter;
 79:   PetscMPIInt      *maxval,flg;


 84:   MPI_Attr_get(comm,Petsc_Counter_keyval,&counter,&flg);
 85:   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");

 87:   if (counter->tag < 1) {
 88:     PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
 89:     MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
 90:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
 91:     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
 92:   }

 94:   *tag = counter->tag--;
 95: #if defined(PETSC_USE_DEBUG)
 96:   /*
 97:      Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
 98:   */
 99:   MPI_Barrier(comm);
100: #endif
101:   return(0);
102: }

106: /*@C
107:   PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.

109:   Collective on MPI_Comm

111:   Input Parameters:
112: . comm_in - Input communicator

114:   Output Parameters:
115: + comm_out - Output communicator.  May be comm_in.
116: - first_tag - Tag available that has not already been used with this communicator (you may
117:               pass in NULL if you do not need a tag)

119:   PETSc communicators are just regular MPI communicators that keep track of which
120:   tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
121:   a PETSc creation routine it will attach a private communicator for use in the objects communications.
122:   The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
123:   level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.

125:   Level: developer

127:   Concepts: communicator^duplicate

129: .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
130: @*/
131: PetscErrorCode  PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)
132: {
133:   PetscErrorCode   ierr;
134:   PetscCommCounter *counter;
135:   PetscMPIInt      *maxval,flg;
136:   PetscInt         trank;
137:   PetscThreadComm  tcomm;

140:   MPI_Attr_get(comm_in,Petsc_Counter_keyval,&counter,&flg);

142:   if (!flg) {  /* this is NOT a PETSc comm */
143:     void *ptr;
144:     /* check if this communicator has a PETSc communicator imbedded in it */
145:     MPI_Attr_get(comm_in,Petsc_InnerComm_keyval,&ptr,&flg);
146:     if (!flg) {
147:       /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
148:       MPI_Comm_dup(comm_in,comm_out);
149:       MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
150:       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
151:       PetscMalloc(sizeof(PetscCommCounter),&counter);

153:       counter->tag       = *maxval;
154:       counter->refcount  = 0;
155:       counter->namecount = 0;

157:       MPI_Attr_put(*comm_out,Petsc_Counter_keyval,counter);
158:       PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);

160:       /* save PETSc communicator inside user communicator, so we can get it next time */
161:       /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
162:       PetscMemcpy(&ptr,comm_out,sizeof(MPI_Comm));
163:       MPI_Attr_put(comm_in,Petsc_InnerComm_keyval,ptr);
164:       /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
165:       PetscMemcpy(&ptr,&comm_in,sizeof(MPI_Comm));
166:       MPI_Attr_put(*comm_out,Petsc_OuterComm_keyval,ptr);
167:     } else {
168:       /* pull out the inner MPI_Comm and hand it back to the caller */
169:       /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
170:       PetscMemcpy(comm_out,&ptr,sizeof(MPI_Comm));
171:       MPI_Attr_get(*comm_out,Petsc_Counter_keyval,&counter,&flg);
172:       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
173:       PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);
174:     }
175:   } else *comm_out = comm_in;

177: #if defined(PETSC_USE_DEBUG)
178:   /*
179:      Hanging here means that some processes have called PetscCommDuplicate() and others have not.
180:      This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
181:      ALL processes that share a communicator MUST shared objects created from that communicator.
182:   */
183:   MPI_Barrier(comm_in);
184: #endif

186:   if (counter->tag < 1) {
187:     PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
188:     MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
189:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
190:     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
191:   }

193:   if (first_tag) *first_tag = counter->tag--;

195:   MPI_Attr_get(*comm_out,Petsc_ThreadComm_keyval,(PetscThreadComm*)&tcomm,&flg);
196:   if (!flg) {
197:     /* Threadcomm does not exist on this communicator, get the global threadcomm and attach it to this communicator */
198:     PetscCommGetThreadComm(*comm_out,&tcomm);
199:     PetscThreadCommAttach(*comm_out,tcomm);
200:   }
201:   /* Only the main thread updates counter->refcount */
202:   PetscThreadCommGetRank(tcomm,&trank);
203:   if (!trank) counter->refcount++; /* number of references to this comm */
204:   return(0);
205: }

209: /*@C
210:    PetscCommDestroy - Frees communicator.  Use in conjunction with PetscCommDuplicate().

212:    Collective on MPI_Comm

214:    Input Parameter:
215: .  comm - the communicator to free

217:    Level: developer

219:    Concepts: communicator^destroy

221: .seealso:   PetscCommDuplicate()
222: @*/
223: PetscErrorCode  PetscCommDestroy(MPI_Comm *comm)
224: {
225:   PetscErrorCode   ierr;
226:   PetscCommCounter *counter;
227:   PetscMPIInt      flg;
228:   MPI_Comm         icomm = *comm,ocomm;
229:   void             *ptr;
230:   PetscThreadComm  tcomm;

233:   if (*comm == MPI_COMM_NULL) return(0);
234:   MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);
235:   if (!flg) { /* not a PETSc comm, check if it has an inner comm */
236:     MPI_Attr_get(icomm,Petsc_InnerComm_keyval,&ptr,&flg);
237:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
238:     /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
239:     PetscMemcpy(&icomm,&ptr,sizeof(MPI_Comm));
240:     MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);
241:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
242:   }

244:   /* Only the main thread updates counter->refcount */
245:   MPI_Attr_get(icomm,Petsc_ThreadComm_keyval,(PetscThreadComm*)&tcomm,&flg);
246:   if (flg) {
247:     PetscInt trank;
248:     PetscThreadCommGetRank(tcomm,&trank);
249:     /* Only thread rank 0 updates the counter */
250:     if (!trank) counter->refcount--;
251:   } else counter->refcount--;

253:   if (!counter->refcount) {
254:     /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
255:     MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ptr,&flg);
256:     if (flg) {
257:       /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
258:       PetscMemcpy(&ocomm,&ptr,sizeof(MPI_Comm));
259:       MPI_Attr_get(ocomm,Petsc_InnerComm_keyval,&ptr,&flg);
260:       if (flg) {
261:         MPI_Attr_delete(ocomm,Petsc_InnerComm_keyval);
262:       } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
263:     }

265:     PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);
266:     MPI_Comm_free(&icomm);
267:   }
268:   *comm = MPI_COMM_NULL;
269:   return(0);
270: }

272: #undef  __FUNCT__
274: /*@C
275:     PetscObjectsGetGlobalNumbering - computes a global numbering
276:     of PetscObjects living on subcommunicators of a given communicator.
277:     This results in a deadlock-free ordering of the subcommunicators
278:     and, hence, the objects.


281:     Collective on comm.

283:     Input Parameters:
284: +   comm    - MPI_Comm
285: .   len     - length of objlist
286: -   objlist - a list of PETSc objects living on subcommunicators of comm
287:                 (subcommunicator ordering is assumed to be deadlock-free)

289:     Output Parameters:
290: +   count      - number of globally-distinct subcommunicators on objlist
291: .   numbering  - global numbers of objlist entries (allocated by user)


294:     Level: developer

296:     Concepts: MPI subcomm^numbering

298: @*/
299: PetscErrorCode  PetscObjectsGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
300: {
302:   PetscInt       i, roots, offset;
303:   PetscMPIInt    size, rank;

309:   MPI_Comm_size(comm, &size);
310:   MPI_Comm_rank(comm, &rank);
311:   roots = 0;
312:   for (i = 0; i < len; ++i) {
313:     PetscMPIInt srank;
314:     MPI_Comm_rank(objlist[i]->comm, &srank);
315:     /* Am I the root of the i-th subcomm? */
316:     if (!srank) ++roots;
317:   }
318:   /* Obtain the sum of all roots -- the global number of distinct subcomms. */
319:   MPI_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);
320:   /* Now introduce a global numbering for subcomms, initially known only by subcomm roots. */
321:   /*
322:    At the subcomm roots number the subcomms in the subcomm-root local manner,
323:    and make it global by calculating the shift.
324:    */
325:   MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);
326:   offset -= roots;
327:   /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
328:   /*
329:      This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
330:      broadcast is collective on the subcomm.
331:    */
332:   roots = 0;
333:   for (i = 0; i < len; ++i) {
334:     PetscMPIInt srank;
335:     numbering[i] = offset + roots; /* only meaningful if !srank. */

337:     MPI_Comm_rank(objlist[i]->comm, &srank);
338:     MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);
339:     if (!srank) ++roots;
340:   }
341:   return(0);
342: }