Actual source code: garbage.c

  1: #include <petsc/private/garbagecollector.h>

  3: /* Fetches garbage hashmap from communicator */
  4: static PetscErrorCode GarbageGetHMap_Private(MPI_Comm comm, PetscGarbage *garbage)
  5: {
  6:   PetscMPIInt  flag;
  7:   PetscHMapObj garbage_map;

  9:   PetscFunctionBegin;
 10:   PetscCallMPI(MPI_Comm_get_attr(comm, Petsc_Garbage_HMap_keyval, garbage, &flag));
 11:   if (!flag) {
 12:     /* No garbage,create one */
 13:     PetscCall(PetscHMapObjCreate(&garbage_map));
 14:     garbage->map = garbage_map;
 15:     PetscCallMPI(MPI_Comm_set_attr(comm, Petsc_Garbage_HMap_keyval, garbage->ptr));
 16:   }
 17:   PetscFunctionReturn(PETSC_SUCCESS);
 18: }

 20: /*@C
 21:   PetscObjectDelayedDestroy - Adds an object to a data structure for
 22:   later destruction.

 24:   Not Collective

 26:   Input Parameter:
 27: . obj - object to be destroyed

 29:   Level: developer

 31:   Notes:
 32:   Analogue to `PetscObjectDestroy()` for use in managed languages.

 34:   A PETSc object is given a creation index at initialisation based on
 35:   the communicator it was created on and the order in which it is
 36:   created. When this function is passed a PETSc object, a pointer to
 37:   the object is stashed on a garbage dictionary (`PetscHMapObj`) which is
 38:   keyed by its creation index.

 40:   Objects stashed on this garbage dictionary can later be destroyed
 41:   with a call to `PetscGarbageCleanup()`.

 43:   This function is intended for use with managed languages such as
 44:   Python or Julia, which may not destroy objects in a deterministic
 45:   order.

 47:   Serial objects (that have a communicator with size 1) are destroyed
 48:   eagerly since deadlocks cannot occur.

 50: .seealso: `PetscGarbageCleanup()`, `PetscObjectDestroy()`
 51: @*/
 52: PetscErrorCode PetscObjectDelayedDestroy(PetscObject *obj)
 53: {
 54:   MPI_Comm     comm;
 55:   PetscMPIInt  size;
 56:   PetscInt     count;
 57:   PetscGarbage garbage;

 59:   PetscFunctionBegin;
 60:   PetscAssertPointer(obj, 1);
 61:   /* Don't stash NULL pointers */
 62:   if (*obj != NULL) {
 63:     /* Elaborate check for getting non-cyclic reference counts */
 64:     if (!(*obj)->non_cyclic_references) {
 65:       count = --(*obj)->refct;
 66:     } else {
 67:       PetscCall((*obj)->non_cyclic_references(*obj, &count));
 68:       --count;
 69:       --(*obj)->refct;
 70:     }
 71:     /* Only stash if the (non-cyclic) reference count hits 0 */
 72:     if (count == 0) {
 73:       (*obj)->refct = 1;
 74:       PetscCall(PetscObjectGetComm(*obj, &comm));
 75:       PetscCallMPI(MPI_Comm_size(comm, &size));
 76:       /* Eagerly destroy serial objects */
 77:       if (size == 1) {
 78:         PetscCall(PetscObjectDestroy(obj));
 79:       } else {
 80:         PetscCall(GarbageGetHMap_Private(comm, &garbage));
 81:         PetscCall(PetscHMapObjSet(garbage.map, (*obj)->cidx, *obj));
 82:       }
 83:     }
 84:   }
 85:   *obj = NULL;
 86:   PetscFunctionReturn(PETSC_SUCCESS);
 87: }

 89: /* Performs the intersection of 2 sorted arrays seta and setb of lengths
 90:    lena and lenb respectively,returning the result in seta and lena
 91:    This is an O(n) operation */
 92: static PetscErrorCode GarbageKeySortedIntersect_Private(PetscInt64 seta[], PetscInt *lena, PetscInt64 setb[], PetscInt lenb)
 93: {
 94:   /* The arrays seta and setb MUST be sorted! */
 95:   PetscInt ii, jj = 0, counter = 0;

 97:   PetscFunctionBegin;
 98:   if (PetscDefined(USE_DEBUG)) {
 99:     PetscBool sorted = PETSC_FALSE;
100:     /* In debug mode check whether the array are sorted */
101:     PetscCall(PetscSortedInt64(*lena, seta, &sorted));
102:     PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Provided array in argument 1 is not sorted");
103:     PetscCall(PetscSortedInt64(lenb, setb, &sorted));
104:     PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Provided array in argument 3 is not sorted");
105:   }
106:   for (ii = 0; ii < *lena; ii++) {
107:     while (jj < lenb && seta[ii] > setb[jj]) jj++;
108:     if (jj >= lenb) break;
109:     if (seta[ii] == setb[jj]) {
110:       seta[counter] = seta[ii];
111:       counter++;
112:     }
113:   }

115:   *lena = counter;
116:   PetscFunctionReturn(PETSC_SUCCESS);
117: }

119: /* Wrapper to create MPI reduce operator for set intersection */
120: void PetscGarbageKeySortedIntersect(void *inset, void *inoutset, PetscMPIInt *length, MPI_Datatype *dtype)
121: {
122:   PetscInt64 *seta, *setb;

124:   seta = (PetscInt64 *)inoutset;
125:   setb = (PetscInt64 *)inset;

127:   PetscCallAbort(PETSC_COMM_SELF, GarbageKeySortedIntersect_Private(&seta[1], (PetscInt *)&seta[0], &setb[1], (PetscInt)setb[0]));
128: }

130: /* Performs a collective allreduce intersection of one array per rank */
131: PetscErrorCode GarbageKeyAllReduceIntersect_Private(MPI_Comm comm, PetscInt64 *set, PetscInt *entries)
132: {
133:   PetscInt     ii, max_entries;
134:   PetscInt64  *sendset, *recvset;
135:   MPI_Datatype keyset_type;
136:   PetscMPIInt  imax_entries;

138:   PetscFunctionBegin;
139:   /* Sort keys first for use with `GarbageKeySortedIntersect_Private()`*/
140:   PetscCall(PetscSortInt64(*entries, set));

142:   /* Get the maximum size of all key sets */
143:   PetscCallMPI(MPIU_Allreduce(entries, &max_entries, 1, MPIU_INT, MPI_MAX, comm));
144:   PetscCall(PetscMalloc1(max_entries + 1, &sendset));
145:   PetscCall(PetscMalloc1(max_entries + 1, &recvset));
146:   sendset[0] = (PetscInt64)*entries;
147:   for (ii = 1; ii < *entries + 1; ii++) sendset[ii] = set[ii - 1];

149:   /* Create a custom data type to hold the set */
150:   PetscCall(PetscMPIIntCast(max_entries, &imax_entries));
151:   PetscCallMPI(MPI_Type_contiguous(imax_entries + 1, MPIU_INT64, &keyset_type));
152:   /* PetscCallMPI(MPI_Type_set_name(keyset_type,"PETSc garbage key set type")); */
153:   PetscCallMPI(MPI_Type_commit(&keyset_type));

155:   /* Perform custom intersect reduce operation over sets */
156:   PetscCallMPI(MPIU_Allreduce(sendset, recvset, 1, keyset_type, Petsc_Garbage_SetIntersectOp, comm));

158:   PetscCallMPI(MPI_Type_free(&keyset_type));

160:   *entries = (PetscInt)recvset[0];
161:   for (ii = 0; ii < *entries; ii++) set[ii] = recvset[ii + 1];

163:   PetscCall(PetscFree(sendset));
164:   PetscCall(PetscFree(recvset));
165:   PetscFunctionReturn(PETSC_SUCCESS);
166: }

168: /*@C
169:   PetscGarbageCleanup - Destroys objects placed in the garbage by
170:   `PetscObjectDelayedDestroy()`.

172:   Collective

174:   Input Parameter:
175: . comm - MPI communicator over which to perform collective cleanup

177:   Level: developer

179:   Notes:
180:   Implements a collective garbage collection.
181:   A per- MPI communicator garbage dictionary is created to store
182:   references to objects destroyed using `PetscObjectDelayedDestroy()`.
183:   Objects that appear in this dictionary on all MPI processes can be destroyed
184:   by calling `PetscGarbageCleanup()`.

186:   This is done as follows\:
187:   1.  Keys of the garbage dictionary, which correspond to the creation
188:   indices of the objects stashed, are sorted.
189:   2.  A collective intersection of dictionary keys is performed by all
190:   ranks in the communicator.
191:   3.  The intersection is broadcast back to all ranks in the
192:   communicator.
193:   4.  The objects on the dictionary are collectively destroyed in
194:   creation index order using a call to PetscObjectDestroy().

196:   This function is intended for use with managed languages such as
197:   Python or Julia, which may not destroy objects in a deterministic
198:   order.

200: .seealso: `PetscObjectDelayedDestroy()`
201: @*/
202: PetscErrorCode PetscGarbageCleanup(MPI_Comm comm)
203: {
204:   PetscInt     ii, entries, offset;
205:   PetscInt64  *keys;
206:   PetscObject  obj;
207:   PetscGarbage garbage;

209:   PetscFunctionBegin;
210:   /* Duplicate comm to prevent it being cleaned up by PetscObjectDestroy() */
211:   PetscCall(PetscCommDuplicate(comm, &comm, NULL));

213:   /* Grab garbage from comm and remove it
214:    this avoids calling PetscCommDestroy() and endlessly recursing */
215:   PetscCall(GarbageGetHMap_Private(comm, &garbage));
216:   PetscCallMPI(MPI_Comm_delete_attr(comm, Petsc_Garbage_HMap_keyval));

218:   /* Get keys from garbage hash map */
219:   PetscCall(PetscHMapObjGetSize(garbage.map, &entries));
220:   PetscCall(PetscMalloc1(entries, &keys));
221:   offset = 0;
222:   PetscCall(PetscHMapObjGetKeys(garbage.map, &offset, keys));

224:   /* Gather and intersect */
225:   PetscCall(GarbageKeyAllReduceIntersect_Private(comm, keys, &entries));

227:   /* Collectively destroy objects that appear in garbage in
228:      creation index order */
229:   for (ii = 0; ii < entries; ii++) {
230:     PetscCall(PetscHMapObjGet(garbage.map, keys[ii], &obj));
231:     PetscCall(PetscObjectDestroy(&obj));
232:     PetscCall(PetscFree(obj));
233:     PetscCall(PetscHMapObjDel(garbage.map, keys[ii]));
234:   }
235:   PetscCall(PetscFree(keys));

237:   /* Put garbage back */
238:   PetscCallMPI(MPI_Comm_set_attr(comm, Petsc_Garbage_HMap_keyval, garbage.ptr));
239:   PetscCall(PetscCommDestroy(&comm));
240:   PetscFunctionReturn(PETSC_SUCCESS);
241: }

243: /* Utility function for printing the contents of the garbage on a given comm */
244: PetscErrorCode PetscGarbageView(MPI_Comm comm, PetscViewer viewer)
245: {
246:   char         text[64];
247:   PetscInt     ii, entries, offset;
248:   PetscInt64  *keys;
249:   PetscObject  obj;
250:   PetscGarbage garbage;
251:   PetscMPIInt  rank;

253:   PetscFunctionBegin;
254:   PetscCall(PetscPrintf(comm, "PETSc garbage on "));
255:   if (comm == PETSC_COMM_WORLD) {
256:     PetscCall(PetscPrintf(comm, "PETSC_COMM_WORLD\n"));
257:   } else if (comm == PETSC_COMM_SELF) {
258:     PetscCall(PetscPrintf(comm, "PETSC_COMM_SELF\n"));
259:   } else {
260:     PetscCall(PetscPrintf(comm, "UNKNOWN_COMM\n"));
261:   }
262:   PetscCall(PetscCommDuplicate(comm, &comm, NULL));
263:   PetscCall(GarbageGetHMap_Private(comm, &garbage));

265:   /* Get keys from garbage hash map and sort */
266:   PetscCall(PetscHMapObjGetSize(garbage.map, &entries));
267:   PetscCall(PetscMalloc1(entries, &keys));
268:   offset = 0;
269:   PetscCall(PetscHMapObjGetKeys(garbage.map, &offset, keys));

271:   /* Pretty print entries in a table */
272:   PetscCallMPI(MPI_Comm_rank(comm, &rank));
273:   PetscCall(PetscSynchronizedPrintf(comm, "Rank %i:: ", rank));
274:   PetscCall(PetscFormatConvert("Total entries: %" PetscInt_FMT "\n", text));
275:   PetscCall(PetscSynchronizedPrintf(comm, text, entries));
276:   if (entries) {
277:     PetscCall(PetscSynchronizedPrintf(comm, "| Key   | Type                   | Name                             | Object ID |\n"));
278:     PetscCall(PetscSynchronizedPrintf(comm, "|-------|------------------------|----------------------------------|-----------|\n"));
279:   }
280:   for (ii = 0; ii < entries; ii++) {
281:     PetscCall(PetscHMapObjGet(garbage.map, keys[ii], &obj));
282:     PetscCall(PetscFormatConvert("| %5" PetscInt64_FMT " | %-22s | %-32s | %6" PetscInt_FMT "    |\n", text));
283:     PetscCall(PetscSynchronizedPrintf(comm, text, keys[ii], obj->class_name, obj->description, obj->id));
284:   }
285:   PetscCall(PetscSynchronizedFlush(comm, PETSC_STDOUT));

287:   PetscCall(PetscFree(keys));
288:   PetscCall(PetscCommDestroy(&comm));
289:   PetscFunctionReturn(PETSC_SUCCESS);
290: }