Actual source code: tagm.c

petsc-3.3-p7 2013-05-11
  2: /*
  3:       Some PETSc utilites
  4: */
  5: #include <petscsys.h>             /*I    "petscsys.h"   I*/
  6: #if defined(PETSC_HAVE_STDLIB_H)
  7: #include <stdlib.h>
  8: #endif

 10: #include <petsc-private/threadcommimpl.h>
 11: /* ---------------------------------------------------------------- */
 12: /*
 13:    A simple way to manage tags inside a communicator.

 15:    It uses the attributes to determine if a new communicator
 16:       is needed and to store the available tags.

 18: */


 23: /*@C
 24:     PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All 
 25:     processors that share the object MUST call this routine EXACTLY the same
 26:     number of times.  This tag should only be used with the current objects
 27:     communicator; do NOT use it with any other MPI communicator.

 29:     Collective on PetscObject

 31:     Input Parameter:
 32: .   obj - the PETSc object; this must be cast with a (PetscObject), for example, 
 33:          PetscObjectGetNewTag((PetscObject)mat,&tag);

 35:     Output Parameter:
 36: .   tag - the new tag

 38:     Level: developer

 40:     Concepts: tag^getting
 41:     Concepts: message tag^getting
 42:     Concepts: MPI message tag^getting

 44: .seealso: PetscCommGetNewTag()
 45: @*/
 46: PetscErrorCode  PetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag)
 47: {

 51:   PetscCommGetNewTag(obj->comm,tag);
 52:   return(0);
 53: }

 57: /*@
 58:     PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All 
 59:     processors that share the communicator MUST call this routine EXACTLY the same
 60:     number of times.  This tag should only be used with the current objects
 61:     communicator; do NOT use it with any other MPI communicator.

 63:     Collective on comm

 65:     Input Parameter:
 66: .   comm - the MPI communicator

 68:     Output Parameter:
 69: .   tag - the new tag

 71:     Level: developer

 73:     Concepts: tag^getting
 74:     Concepts: message tag^getting
 75:     Concepts: MPI message tag^getting

 77: .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
 78: @*/
 79: PetscErrorCode  PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag)
 80: {
 81:   PetscErrorCode   ierr;
 82:   PetscCommCounter *counter;
 83:   PetscMPIInt      *maxval,flg;


 88:   MPI_Attr_get(comm,Petsc_Counter_keyval,&counter,&flg);
 89:   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");

 91:   if (counter->tag < 1) {
 92:     PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
 93:     MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
 94:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
 95:     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
 96:   }

 98:   *tag = counter->tag--;
 99: #if defined(PETSC_USE_DEBUG)
100:   /* 
101:      Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
102:   */
103:   MPI_Barrier(comm);
104: #endif
105:   return(0);
106: }

110: /*@C
111:   PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.

113:   Collective on MPI_Comm

115:   Input Parameters:
116: . comm_in - Input communicator

118:   Output Parameters:
119: + comm_out - Output communicator.  May be comm_in.
120: - first_tag - Tag available that has not already been used with this communicator (you may
121:               pass in PETSC_NULL if you do not need a tag)

123:   PETSc communicators are just regular MPI communicators that keep track of which 
124:   tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
125:   a PETSc creation routine it will attach a private communicator for use in the objects communications.
126:   The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
127:   level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.

129:   Level: developer

131:   Concepts: communicator^duplicate

133: .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
134: @*/
135: PetscErrorCode  PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt* first_tag)
136: {
137:   PetscErrorCode   ierr;
138:   PetscCommCounter *counter;
139:   PetscMPIInt      *maxval,flg;
140: #if defined(PETSC_THREADCOMM_ACTIVE)
141:   PetscThreadComm  tcomm;
142: #endif

145:   MPI_Attr_get(comm_in,Petsc_Counter_keyval,&counter,&flg);

147:   if (!flg) {  /* this is NOT a PETSc comm */
148:     void *ptr;
149:     /* check if this communicator has a PETSc communicator imbedded in it */
150:     MPI_Attr_get(comm_in,Petsc_InnerComm_keyval,&ptr,&flg);
151:     if (!flg) {
152:       /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
153:       MPI_Comm_dup(comm_in,comm_out);
154:       MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
155:       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
156:       PetscMalloc(sizeof(PetscCommCounter),&counter);
157:       counter->tag       = *maxval;
158:       counter->refcount  = 0;
159:       counter->namecount = 0;
160:       MPI_Attr_put(*comm_out,Petsc_Counter_keyval,counter);
161:       PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);

163:       /* save PETSc communicator inside user communicator, so we can get it next time */
164:       /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
165:       PetscMemcpy(&ptr,comm_out,sizeof(MPI_Comm));
166:       MPI_Attr_put(comm_in,Petsc_InnerComm_keyval,ptr);
167:       /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
168:       PetscMemcpy(&ptr,&comm_in,sizeof(MPI_Comm));
169:       MPI_Attr_put(*comm_out,Petsc_OuterComm_keyval,ptr);
170:     } else {
171:       /* pull out the inner MPI_Comm and hand it back to the caller */
172:       /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
173:       PetscMemcpy(comm_out,&ptr,sizeof(MPI_Comm));
174:       MPI_Attr_get(*comm_out,Petsc_Counter_keyval,&counter,&flg);
175:       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
176:       PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);
177:     }
178:   } else {
179:     *comm_out = comm_in;
180:   }

182: #if defined(PETSC_USE_DEBUG)
183:   /* 
184:      Hanging here means that some processes have called PetscCommDuplicate() and others have not.
185:      This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
186:      ALL processes that share a communicator MUST shared objects created from that communicator.
187:   */
188:   MPI_Barrier(comm_in);
189: #endif

191:   if (counter->tag < 1) {
192:     PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);
193:     MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);
194:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
195:     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
196:   }

198:   if (first_tag) {
199:     *first_tag = counter->tag--;
200:   }

202: #if defined(PETSC_THREADCOMM_ACTIVE)
203:   /* Only the main thread updates counter->refcount */
204:   MPI_Attr_get(*comm_out,Petsc_ThreadComm_keyval,(PetscThreadComm*)&tcomm,&flg);
205:   if (flg) {
206:     PetscInt trank;
207:     trank = PetscThreadCommGetRank(tcomm);
208:     if (!trank) counter->refcount++; /* number of references to this comm */
209:   } else counter->refcount++;
210: #else
211:   counter->refcount++;
212: #endif

214:   return(0);
215: }

219: /*@C
220:    PetscCommDestroy - Frees communicator.  Use in conjunction with PetscCommDuplicate().

222:    Collective on MPI_Comm

224:    Input Parameter:
225: .  comm - the communicator to free

227:    Level: developer

229:    Concepts: communicator^destroy

231: .seealso:   PetscCommDuplicate()
232: @*/
233: PetscErrorCode  PetscCommDestroy(MPI_Comm *comm)
234: {
235:   PetscErrorCode   ierr;
236:   PetscCommCounter *counter;
237:   PetscMPIInt      flg;
238:   MPI_Comm         icomm = *comm,ocomm;
239:   void             *ptr;
240: #if defined(PETSC_THREADCOMM_ACTIVE)
241:   PetscThreadComm  tcomm;
242: #endif

245:   if (*comm == MPI_COMM_NULL) return(0);
246:   MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);
247:   if (!flg) { /* not a PETSc comm, check if it has an inner comm */
248:     MPI_Attr_get(icomm,Petsc_InnerComm_keyval,&ptr,&flg);
249:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
250:     /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
251:     PetscMemcpy(&icomm,&ptr,sizeof(MPI_Comm));
252:     MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);
253:     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
254:   }

256: #if defined(PETSC_THREADCOMM_ACTIVE)
257:   /* Only the main thread updates counter->refcount */
258:   MPI_Attr_get(icomm,Petsc_ThreadComm_keyval,(PetscThreadComm*)&tcomm,&flg);
259:   if(flg) {
260:     PetscInt trank;
261:     trank = PetscThreadCommGetRank(tcomm);
262:     /* Only thread rank 0 updates the counter */
263:     if(!trank) counter->refcount--;
264:   } else counter->refcount--;
265: #else
266:   counter->refcount--;
267: #endif

269:   if (!counter->refcount) {
270:     /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
271:     MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ptr,&flg);
272:     if (flg) {
273:       /*  Use PetscMemcpy() because casting from pointer to integer of different size is not allowed with some compilers  */
274:       PetscMemcpy(&ocomm,&ptr,sizeof(MPI_Comm));
275:       MPI_Attr_get(ocomm,Petsc_InnerComm_keyval,&ptr,&flg);
276:       if (flg) {
277:         MPI_Attr_delete(ocomm,Petsc_InnerComm_keyval);
278:       } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
279:     }

281:     PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);
282:     MPI_Comm_free(&icomm);
283:   }
284:   *comm = MPI_COMM_NULL;
285:   return(0);
286: }

288: #undef  __FUNCT__
290: /*@C
291:     PetscObjectsGetGlobalNumbering - computes a global numbering
292:     of PetscObjects living on subcommunicators of a given communicator.
293:     This results in a deadlock-free ordering of the subcommunicators
294:     and, hence, the objects.


297:     Collective on comm.

299:     Input Parameters:
300: +   comm    - MPI_Comm
301: .   len     - length of objlist
302: -   objlist - a list of PETSc objects living on subcommunicators of comm
303:                 (subcommunicator ordering is assumed to be deadlock-free)

305:     Output Parameters:
306: +   count      - number of globally-distinct subcommunicators on objlist
307: .   numbering  - global numbers of objlist entries (allocated by user)


310:     Level: developer

312:     Concepts: MPI subcomm^numbering

314: @*/
315: PetscErrorCode  PetscObjectsGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
316: {
318:   PetscInt i, roots, offset;
319:   PetscMPIInt size, rank;
324:   MPI_Comm_size(comm, &size);
325:   MPI_Comm_rank(comm, &rank);
326:   roots = 0;
327:   for(i = 0; i < len; ++i) {
328:     PetscMPIInt srank;
329:     MPI_Comm_rank(objlist[i]->comm, &srank);
330:     /* Am I the root of the i-th subcomm? */
331:     if(!srank) ++roots;
332:   }
333:   /* Obtain the sum of all roots -- the global number of distinct subcomms. */
334:   MPI_Allreduce((void*)&roots,(void*)count,1,MPIU_INT,MPI_SUM,comm);
335:   /* Now introduce a global numbering for subcomms, initially known only by subcomm roots. */
336:   /* 
337:    At the subcomm roots number the subcomms in the subcomm-root local manner, 
338:    and make it global by calculating the shift.
339:    */
340:   MPI_Scan((PetscMPIInt*)&roots,(PetscMPIInt*)&offset,1,MPIU_INT,MPI_SUM,comm);
341:   offset -= roots;
342:   /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
343:   /* 
344:      This is where the assumption of a deadlock-free ordering of the subcomms is assumed: 
345:      broadcast is collective on the subcomm. 
346:    */
347:   roots = 0;
348:   for(i = 0; i < len; ++i) {
349:     PetscMPIInt srank;
350:     numbering[i] = offset + roots; /* only meaningful if !srank. */
351:     MPI_Comm_rank(objlist[i]->comm, &srank);
352:     MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);
353:     if(!srank) ++roots;
354:   }

356:   return(0);
357: }