Actual source code: tagm.c

petsc-3.9.4 2018-09-11
Report Typos and Errors

  2: /*
  3:       Some PETSc utilites
  4: */
  5:  #include <petsc/private/petscimpl.h>
  6: /* ---------------------------------------------------------------- */
  7: /*
  8:    A simple way to manage tags inside a communicator.

 10:    It uses the attributes to determine if a new communicator
 11:       is needed and to store the available tags.

 13: */


 16: /*@C
 17:     PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
 18:     processors that share the object MUST call this routine EXACTLY the same
 19:     number of times.  This tag should only be used with the current objects
 20:     communicator; do NOT use it with any other MPI communicator.

 22:     Collective on PetscObject

 24:     Input Parameter:
 25: .   obj - the PETSc object; this must be cast with a (PetscObject), for example,
 26:          PetscObjectGetNewTag((PetscObject)mat,&tag);

 28:     Output Parameter:
 29: .   tag - the new tag

 31:     Level: developer

 33:     Concepts: tag^getting
 34:     Concepts: message tag^getting
 35:     Concepts: MPI message tag^getting

 37: .seealso: PetscCommGetNewTag()
 38: @*/
 39: PetscErrorCode  PetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag)
 40: {

 44:   PetscCommGetNewTag(obj->comm,tag);
 45:   return(0);
 46: }

 48: /*@
 49:     PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
 50:     processors that share the communicator MUST call this routine EXACTLY the same
 51:     number of times.  This tag should only be used with the current objects
 52:     communicator; do NOT use it with any other MPI communicator.

 54:     Collective on comm

 56:     Input Parameter:
 57: .   comm - the MPI communicator

 59:     Output Parameter:
 60: .   tag - the new tag

 62:     Level: developer

 64:     Concepts: tag^getting
 65:     Concepts: message tag^getting
 66:     Concepts: MPI message tag^getting

 68: .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
 69: @*/
 70: PetscErrorCode  PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag)
 71: {
 72:   PetscErrorCode   ierr;
 73:   PetscCommCounter *counter;
 74:   PetscMPIInt      *maxval,flg;


 79:   MPI_Comm_get_attr(comm,Petsc_Counter_keyval,&counter,&flg);
 80:   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");

 82:   if (counter->tag < 1) {
 83:     PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
 84:     MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
 85:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
 86:     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
 87:   }

 89:   *tag = counter->tag--;
 90: #if defined(PETSC_USE_DEBUG)
 91:   /*
 92:      Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
 93:   */
 94:   MPI_Barrier(comm);
 95: #endif
 96:   return(0);
 97: }

 99: /*@C
100:   PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.

102:   Collective on MPI_Comm

104:   Input Parameters:
105: . comm_in - Input communicator

107:   Output Parameters:
108: + comm_out - Output communicator.  May be comm_in.
109: - first_tag - Tag available that has not already been used with this communicator (you may
110:               pass in NULL if you do not need a tag)

112:   PETSc communicators are just regular MPI communicators that keep track of which
113:   tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
114:   a PETSc creation routine it will attach a private communicator for use in the objects communications.
115:   The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
116:   level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.

118:   Level: developer

120:   Concepts: communicator^duplicate

122: .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
123: @*/
124: PetscErrorCode  PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)
125: {
126:   PetscErrorCode   ierr;
127:   PetscCommCounter *counter;
128:   PetscMPIInt      *maxval,flg;

131:   PetscSpinlockLock(&PetscCommSpinLock);
132:   MPI_Comm_get_attr(comm_in,Petsc_Counter_keyval,&counter,&flg);

134:   if (!flg) {  /* this is NOT a PETSc comm */
135:     union {MPI_Comm comm; void *ptr;} ucomm;
136:     /* check if this communicator has a PETSc communicator imbedded in it */
137:     MPI_Comm_get_attr(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);
138:     if (!flg) {
139:       /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
140:       MPI_Comm_dup(comm_in,comm_out);
141:       MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
142:       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
143:       PetscNew(&counter);

145:       counter->tag       = *maxval;
146:       counter->refcount  = 0;
147:       counter->namecount = 0;

149:       MPI_Comm_set_attr(*comm_out,Petsc_Counter_keyval,counter);
150:       PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);

152:       /* save PETSc communicator inside user communicator, so we can get it next time */
153:       ucomm.comm = *comm_out;   /* ONLY the comm part of the union is significant. */
154:       MPI_Comm_set_attr(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);
155:       ucomm.comm = comm_in;
156:       MPI_Comm_set_attr(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);
157:     } else {
158:       *comm_out = ucomm.comm;
159:       /* pull out the inner MPI_Comm and hand it back to the caller */
160:       MPI_Comm_get_attr(*comm_out,Petsc_Counter_keyval,&counter,&flg);
161:       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
162:       PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);
163:     }
164:   } else *comm_out = comm_in;

166: #if defined(PETSC_USE_DEBUG)
167:   /*
168:      Hanging here means that some processes have called PetscCommDuplicate() and others have not.
169:      This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
170:      ALL processes that share a communicator MUST shared objects created from that communicator.
171:   */
172:   MPI_Barrier(comm_in);
173: #endif

175:   if (counter->tag < 1) {
176:     PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
177:     MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
178:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
179:     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
180:   }

182:   if (first_tag) *first_tag = counter->tag--;

184:   counter->refcount++; /* number of references to this comm */
185:   PetscSpinlockUnlock(&PetscCommSpinLock);
186:   return(0);
187: }

189: /*@C
190:    PetscCommDestroy - Frees communicator.  Use in conjunction with PetscCommDuplicate().

192:    Collective on MPI_Comm

194:    Input Parameter:
195: .  comm - the communicator to free

197:    Level: developer

199:    Concepts: communicator^destroy

201: .seealso:   PetscCommDuplicate()
202: @*/
203: PetscErrorCode  PetscCommDestroy(MPI_Comm *comm)
204: {
205:   PetscErrorCode   ierr;
206:   PetscCommCounter *counter;
207:   PetscMPIInt      flg;
208:   MPI_Comm         icomm = *comm,ocomm;
209:   union {MPI_Comm comm; void *ptr;} ucomm;

212:   if (*comm == MPI_COMM_NULL) return(0);
213:   PetscSpinlockLock(&PetscCommSpinLock);
214:   MPI_Comm_get_attr(icomm,Petsc_Counter_keyval,&counter,&flg);
215:   if (!flg) { /* not a PETSc comm, check if it has an inner comm */
216:     MPI_Comm_get_attr(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);
217:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
218:     icomm = ucomm.comm;
219:     MPI_Comm_get_attr(icomm,Petsc_Counter_keyval,&counter,&flg);
220:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
221:   }

223:   counter->refcount--;

225:   if (!counter->refcount) {
226:     /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
227:     MPI_Comm_get_attr(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);
228:     if (flg) {
229:       ocomm = ucomm.comm;
230:       MPI_Comm_get_attr(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);
231:       if (flg) {
232:         MPI_Comm_delete_attr(ocomm,Petsc_InnerComm_keyval);
233:       } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
234:     }

236:     PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);
237:     MPI_Comm_free(&icomm);
238:   }
239:   *comm = MPI_COMM_NULL;
240:   PetscSpinlockUnlock(&PetscCommSpinLock);
241:   return(0);
242: }

244: /*@C
245:     PetscObjectsListGetGlobalNumbering - computes a global numbering
246:     of PetscObjects living on subcommunicators of a given communicator.


249:     Collective on comm.

251:     Input Parameters:
252: +   comm    - MPI_Comm
253: .   len     - local length of objlist
254: -   objlist - a list of PETSc objects living on subcomms of comm and containing this comm rank
255:               (subcomm ordering is assumed to be deadlock-free)

257:     Output Parameters:
258: +   count      - global number of distinct subcommunicators on objlist (may be > len)
259: -   numbering  - global numbers of objlist entries (allocated by user)


262:     Level: developer

264:     Concepts: MPI subcomm^numbering

266: @*/
267: PetscErrorCode  PetscObjectsListGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
268: {
270:   PetscInt       i, roots, offset;
271:   PetscMPIInt    size, rank;

275:   if (!count && !numbering) return(0);

277:   MPI_Comm_size(comm, &size);
278:   MPI_Comm_rank(comm, &rank);
279:   roots = 0;
280:   for (i = 0; i < len; ++i) {
281:     PetscMPIInt srank;
282:     MPI_Comm_rank(objlist[i]->comm, &srank);
283:     /* Am I the root of the i-th subcomm? */
284:     if (!srank) ++roots;
285:   }
286:   if (count) {
287:     /* Obtain the sum of all roots -- the global number of distinct subcomms. */
288:     MPIU_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);
289:   }
290:   if (numbering){
291:     /* Introduce a global numbering for subcomms, initially known only by subcomm roots. */
292:     /*
293:       At each subcomm root number all of the subcomms it owns locally
294:       and make it global by calculating the shift among all of the roots.
295:       The roots are ordered using the comm ordering.
296:     */
297:     MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);
298:     offset -= roots;
299:     /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
300:     /*
301:       This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
302:       broadcast is collective on the subcomm.
303:     */
304:     roots = 0;
305:     for (i = 0; i < len; ++i) {
306:       PetscMPIInt srank;
307:       numbering[i] = offset + roots; /* only meaningful if !srank. */

309:       MPI_Comm_rank(objlist[i]->comm, &srank);
310:       MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);
311:       if (!srank) ++roots;
312:     }
313:   }
314:   return(0);
315: }

317: struct _n_PetscCommShared {
318:   PetscMPIInt *ranks;    /* global ranks of each rank in this shared memory comm */
319:   PetscMPIInt size;
320:   MPI_Comm    comm,scomm;
321: };

325: /*
326:    Private routine to delete internal tag/name shared memory communicator when a communicator is freed.

328:    This is called by MPI, not by users. This is called by MPI_Comm_free() when the communicator that has this  data as an attribute is freed.

330:    Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()

332: */
333: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_DelShared(MPI_Comm comm,PetscMPIInt keyval,void *val,void *extra_state)
334: {
335:   PetscErrorCode  ierr;
336:   PetscCommShared scomm = (PetscCommShared)val;

339:   PetscInfo1(0,"Deleting shared subcommunicator in a MPI_Comm %ld\n",(long)comm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
340:   MPI_Comm_free(&scomm->scomm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
341:   PetscFree(scomm->ranks);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
342:   PetscFree(val);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
343:   PetscFunctionReturn(MPI_SUCCESS);
344: }

346: #undef  __FUNCT__
348: /*@C
349:     PetscCommSharedGet - Given a PETSc communicator returns a communicator of all ranks that shared a common memory


352:     Collective on comm.

354:     Input Parameter:
355: .   comm    - MPI_Comm

357:     Output Parameter:
358: .   scomm - the shared memory communicator object

360:     Level: developer

362:     Notes: This should be called only with an PetscCommDuplicate() communictor

364:            When used with MPICH, MPICH must be configured with --download-mpich-device=ch3:nemesis

366:     Concepts: MPI subcomm^numbering

368: @*/
369: PetscErrorCode  PetscCommSharedGet(MPI_Comm comm,PetscCommShared *scomm)
370: {
371: #ifdef PETSC_HAVE_MPI_SHARED_COMM
372:   PetscErrorCode   ierr;
373:   MPI_Group        group,sgroup;
374:   PetscMPIInt      *sranks,i,flg;
375:   PetscCommCounter *counter;

378:   MPI_Comm_get_attr(comm,Petsc_Counter_keyval,&counter,&flg);
379:   if (!flg) SETERRQ(comm,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");

381:   MPI_Comm_get_attr(comm,Petsc_Shared_keyval,scomm,&flg);
382:   if (flg) return(0);

384:   PetscNew(scomm);
385:   (*scomm)->comm = comm;

387:   MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED,0, MPI_INFO_NULL,&(*scomm)->scomm);

389:   MPI_Comm_size((*scomm)->scomm,&(*scomm)->size);
390:   MPI_Comm_group(comm, &group);
391:   MPI_Comm_group((*scomm)->scomm, &sgroup);
392:   PetscMalloc1((*scomm)->size,&sranks);
393:   PetscMalloc1((*scomm)->size,&(*scomm)->ranks);
394:   for (i=0; i<(*scomm)->size; i++) sranks[i] = i;
395:   MPI_Group_translate_ranks(sgroup, (*scomm)->size, sranks, group, (*scomm)->ranks);
396:   PetscFree(sranks);
397:   MPI_Group_free(&group);
398:   MPI_Group_free(&sgroup);

400:   for (i=0; i<(*scomm)->size; i++) {
401:     PetscInfo2(NULL,"Shared memory rank %d global rank %d\n",i,(*scomm)->ranks[i]);
402:   }
403:   MPI_Comm_set_attr(comm,Petsc_Shared_keyval,*scomm);
404:   return(0);
405: #else
406:   SETERRQ(comm, PETSC_ERR_SUP, "Shared communicators need MPI-3 package support.\nPlease upgrade your MPI or reconfigure with --download-mpich.");
407: #endif
408: }

410: #undef  __FUNCT__
412: /*@C
413:     PetscCommSharedGlobalToLocal - Given a global rank returns the local rank in the shared communicator


416:     Collective on comm.

418:     Input Parameters:
419: +   scomm - the shared memory communicator object
420: -   grank - the global rank

422:     Output Parameter:
423: .   lrank - the local rank, or -1 if it does not exist

425:     Level: developer

427:     Notes:
428:            When used with MPICH, MPICH must be configured with --download-mpich-device=ch3:nemesis

430:     Developer Notes: Assumes the scomm->ranks[] is sorted

432:     It may be better to rewrite this to map multiple global ranks to local in the same function call

434:     Concepts: MPI subcomm^numbering

436: @*/
437: PetscErrorCode  PetscCommSharedGlobalToLocal(PetscCommShared scomm,PetscMPIInt grank,PetscMPIInt *lrank)
438: {
439:   PetscMPIInt    low,high,t,i;
440:   PetscBool      flg = PETSC_FALSE;

444:   *lrank = -1;
445:   if (grank < scomm->ranks[0]) return(0);
446:   if (grank > scomm->ranks[scomm->size-1]) return(0);
447:   PetscOptionsGetBool(NULL,NULL,"-noshared",&flg,NULL);
448:   if (flg) return(0);
449:   low  = 0;
450:   high = scomm->size;
451:   while (high-low > 5) {
452:     t = (low+high)/2;
453:     if (scomm->ranks[t] > grank) high = t;
454:     else low = t;
455:   }
456:   for (i=low; i<high; i++) {
457:     if (scomm->ranks[i] > grank) return(0);
458:     if (scomm->ranks[i] == grank) {
459:       int rank;
460:       *lrank = i;
461:       MPI_Comm_rank(MPI_COMM_WORLD,&rank);
462:       return(0);
463:     }
464:   }
465:   return(0);
466: }

468: #undef  __FUNCT__
470: /*@C
471:     PetscCommSharedGetComm - Returns the MPI communicator that represents all processes with common shared memory


474:     Collective on comm.

476:     Input Parameter:
477: .   scomm - PetscCommShared object obtained with PetscCommSharedGet()

479:     Output Parameter:
480: .   comm - the MPI communicator

482:     Level: developer

484: @*/
485: PetscErrorCode  PetscCommSharedGetComm(PetscCommShared scomm,MPI_Comm *comm)
486: {
488:   *comm = scomm->scomm;
489:   return(0);
490: }