Actual source code: pmetis.c
petsc-3.11.4 2019-09-28
2: #include <../src/mat/impls/adj/mpi/mpiadj.h>
4: /*
5: Currently using ParMetis-4.0.2
6: */
8: #include <parmetis.h>
10: /*
11: The first 5 elements of this structure are the input control array to Metis
12: */
13: typedef struct {
14: PetscInt cuts; /* number of cuts made (output) */
15: PetscInt foldfactor;
16: PetscInt parallel; /* use parallel partitioner for coarse problem */
17: PetscInt indexing; /* 0 indicates C indexing, 1 Fortran */
18: PetscInt printout; /* indicates if one wishes Metis to print info */
19: PetscBool repartition;
20: } MatPartitioning_Parmetis;
22: #define CHKERRQPARMETIS(n,func) \
23: if (n == METIS_ERROR_INPUT) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS error due to wrong inputs and/or options for %s",func); \
24: else if (n == METIS_ERROR_MEMORY) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS error due to insufficient memory in %s",func); \
25: else if (n == METIS_ERROR) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS general error in %s",func); \
27: #define PetscStackCallParmetis(func,args) do {PetscStackPush(#func);status = func args;PetscStackPop; CHKERRQPARMETIS(status,#func);} while (0)
29: static PetscErrorCode MatPartitioningApply_Parmetis_Private(MatPartitioning part, PetscBool useND, PetscBool isImprove, IS *partitioning)
30: {
31: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
32: PetscErrorCode ierr;
33: PetscInt *locals = NULL;
34: Mat mat = part->adj,amat,pmat;
35: PetscBool flg;
36: PetscInt bs = 1;
41: PetscObjectTypeCompare((PetscObject)mat,MATMPIADJ,&flg);
42: if (flg) {
43: amat = mat;
44: PetscObjectReference((PetscObject)amat);
45: } else {
46: /* bs indicates if the converted matrix is "reduced" from the original and hence the
47: resulting partition results need to be stretched to match the original matrix */
48: MatConvert(mat,MATMPIADJ,MAT_INITIAL_MATRIX,&amat);
49: if (amat->rmap->n > 0) bs = mat->rmap->n/amat->rmap->n;
50: }
51: MatMPIAdjCreateNonemptySubcommMat(amat,&pmat);
52: MPI_Barrier(PetscObjectComm((PetscObject)part));
54: if (pmat) {
55: MPI_Comm pcomm,comm;
56: Mat_MPIAdj *adj = (Mat_MPIAdj*)pmat->data;
57: PetscInt *vtxdist = pmat->rmap->range;
58: PetscInt *xadj = adj->i;
59: PetscInt *adjncy = adj->j;
60: PetscInt *NDorder = NULL;
61: PetscInt itmp = 0,wgtflag=0, numflag=0, ncon=1, nparts=part->n, options[24], i, j;
62: real_t *tpwgts,*ubvec,itr=0.1;
63: int status;
65: PetscObjectGetComm((PetscObject)pmat,&pcomm);
66: #if defined(PETSC_USE_DEBUG)
67: /* check that matrix has no diagonal entries */
68: {
69: PetscInt rstart;
70: MatGetOwnershipRange(pmat,&rstart,NULL);
71: for (i=0; i<pmat->rmap->n; i++) {
72: for (j=xadj[i]; j<xadj[i+1]; j++) {
73: if (adjncy[j] == i+rstart) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Row %d has diagonal entry; Parmetis forbids diagonal entry",i+rstart);
74: }
75: }
76: }
77: #endif
79: PetscMalloc1(pmat->rmap->n,&locals);
81: if (isImprove) {
82: PetscInt i;
83: const PetscInt *part_indices;
85: ISGetIndices(*partitioning,&part_indices);
86: for (i=0; i<pmat->rmap->n; i++) {
87: locals[i] = part_indices[i*bs];
88: }
89: ISRestoreIndices(*partitioning,&part_indices);
90: ISDestroy(partitioning);
91: }
93: if (adj->values && !part->vertex_weights)
94: wgtflag = 1;
95: if (part->vertex_weights && !adj->values)
96: wgtflag = 2;
97: if (part->vertex_weights && adj->values)
98: wgtflag = 3;
100: if (PetscLogPrintInfo) {itmp = pmetis->printout; pmetis->printout = 127;}
101: PetscMalloc1(ncon*nparts,&tpwgts);
102: for (i=0; i<ncon; i++) {
103: for (j=0; j<nparts; j++) {
104: if (part->part_weights) {
105: tpwgts[i*nparts+j] = part->part_weights[i*nparts+j];
106: } else {
107: tpwgts[i*nparts+j] = 1./nparts;
108: }
109: }
110: }
111: PetscMalloc1(ncon,&ubvec);
112: for (i=0; i<ncon; i++) {
113: ubvec[i] = 1.05;
114: }
115: /* This sets the defaults */
116: options[0] = 0;
117: for (i=1; i<24; i++) {
118: options[i] = -1;
119: }
120: /* Duplicate the communicator to be sure that ParMETIS attribute caching does not interfere with PETSc. */
121: MPI_Comm_dup(pcomm,&comm);
122: if (useND) {
123: PetscInt *sizes, *seps, log2size, subd, *level;
124: PetscMPIInt size;
125: idx_t mtype = PARMETIS_MTYPE_GLOBAL, rtype = PARMETIS_SRTYPE_2PHASE, p_nseps = 1, s_nseps = 1;
126: real_t ubfrac = 1.05;
128: MPI_Comm_size(comm,&size);
129: PetscMalloc1(pmat->rmap->n,&NDorder);
130: PetscMalloc3(2*size,&sizes,4*size,&seps,size,&level);
131: PetscStackCallParmetis(ParMETIS_V32_NodeND,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)&numflag,&mtype,&rtype,&p_nseps,&s_nseps,&ubfrac,NULL/* seed */,NULL/* dbglvl */,(idx_t*)NDorder,(idx_t*)(sizes),&comm));
132: log2size = PetscLog2Real(size);
133: subd = PetscPowInt(2,log2size);
134: MatPartitioningSizesToSep_Private(subd,sizes,seps,level);
135: for (i=0;i<pmat->rmap->n;i++) {
136: PetscInt loc;
138: PetscFindInt(NDorder[i],2*subd,seps,&loc);
139: if (loc < 0) {
140: loc = -(loc+1);
141: if (loc%2) { /* part of subdomain */
142: locals[i] = loc/2;
143: } else {
144: PetscFindInt(NDorder[i],2*(subd-1),seps+2*subd,&loc);
145: loc = loc < 0 ? -(loc+1)/2 : loc/2;
146: locals[i] = level[loc];
147: }
148: } else locals[i] = loc/2;
149: }
150: PetscFree3(sizes,seps,level);
151: } else {
152: if (pmetis->repartition) {
153: PetscStackCallParmetis(ParMETIS_V3_AdaptiveRepart,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)part->vertex_weights,(idx_t*)adj->values,(idx_t*)&wgtflag,(idx_t*)&numflag,(idx_t*)&ncon,(idx_t*)&nparts,tpwgts,ubvec,&itr,(idx_t*)options,(idx_t*)&pmetis->cuts,(idx_t*)locals,&comm));
154: } else if (isImprove) {
155: PetscStackCallParmetis(ParMETIS_V3_RefineKway,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)adj->values,(idx_t*)&wgtflag,(idx_t*)&numflag,(idx_t*)&ncon,(idx_t*)&nparts,tpwgts,ubvec,(idx_t*)options,(idx_t*)&pmetis->cuts,(idx_t*)locals,&comm));
156: } else {
157: PetscStackCallParmetis(ParMETIS_V3_PartKway,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)adj->values,(idx_t*)&wgtflag,(idx_t*)&numflag,(idx_t*)&ncon,(idx_t*)&nparts,tpwgts,ubvec,(idx_t*)options,(idx_t*)&pmetis->cuts,(idx_t*)locals,&comm));
158: }
159: }
160: MPI_Comm_free(&comm);
162: PetscFree(tpwgts);
163: PetscFree(ubvec);
164: if (PetscLogPrintInfo) pmetis->printout = itmp;
166: if (bs > 1) {
167: PetscInt i,j,*newlocals;
168: PetscMalloc1(bs*pmat->rmap->n,&newlocals);
169: for (i=0; i<pmat->rmap->n; i++) {
170: for (j=0; j<bs; j++) {
171: newlocals[bs*i + j] = locals[i];
172: }
173: }
174: PetscFree(locals);
175: ISCreateGeneral(PetscObjectComm((PetscObject)part),bs*pmat->rmap->n,newlocals,PETSC_OWN_POINTER,partitioning);
176: } else {
177: ISCreateGeneral(PetscObjectComm((PetscObject)part),pmat->rmap->n,locals,PETSC_OWN_POINTER,partitioning);
178: }
179: if (useND) {
180: IS ndis;
182: if (bs > 1) {
183: ISCreateBlock(PetscObjectComm((PetscObject)part),bs,pmat->rmap->n,NDorder,PETSC_OWN_POINTER,&ndis);
184: } else {
185: ISCreateGeneral(PetscObjectComm((PetscObject)part),pmat->rmap->n,NDorder,PETSC_OWN_POINTER,&ndis);
186: }
187: ISSetPermutation(ndis);
188: PetscObjectCompose((PetscObject)(*partitioning),"_petsc_matpartitioning_ndorder",(PetscObject)ndis);
189: ISDestroy(&ndis);
190: }
191: } else {
192: ISCreateGeneral(PetscObjectComm((PetscObject)part),0,NULL,PETSC_COPY_VALUES,partitioning);
193: if (useND) {
194: IS ndis;
196: if (bs > 1) {
197: ISCreateBlock(PetscObjectComm((PetscObject)part),bs,0,NULL,PETSC_COPY_VALUES,&ndis);
198: } else {
199: ISCreateGeneral(PetscObjectComm((PetscObject)part),0,NULL,PETSC_COPY_VALUES,&ndis);
200: }
201: ISSetPermutation(ndis);
202: PetscObjectCompose((PetscObject)(*partitioning),"_petsc_matpartitioning_ndorder",(PetscObject)ndis);
203: ISDestroy(&ndis);
204: }
205: }
206: MatDestroy(&pmat);
207: MatDestroy(&amat);
208: return(0);
209: }
211: /*
212: Uses the ParMETIS parallel matrix partitioner to compute a nested dissection ordering of the matrix in parallel
213: */
214: static PetscErrorCode MatPartitioningApplyND_Parmetis(MatPartitioning part, IS *partitioning)
215: {
219: MatPartitioningApply_Parmetis_Private(part, PETSC_TRUE, PETSC_FALSE, partitioning);
220: return(0);
221: }
223: /*
224: Uses the ParMETIS parallel matrix partitioner to partition the matrix in parallel
225: */
226: static PetscErrorCode MatPartitioningApply_Parmetis(MatPartitioning part, IS *partitioning)
227: {
231: MatPartitioningApply_Parmetis_Private(part, PETSC_FALSE, PETSC_FALSE, partitioning);
232: return(0);
233: }
235: /*
236: Uses the ParMETIS to improve the quality of a partition
237: */
238: static PetscErrorCode MatPartitioningImprove_Parmetis(MatPartitioning part, IS *partitioning)
239: {
243: MatPartitioningApply_Parmetis_Private(part, PETSC_FALSE, PETSC_TRUE, partitioning);
244: return(0);
245: }
247: PetscErrorCode MatPartitioningView_Parmetis(MatPartitioning part,PetscViewer viewer)
248: {
249: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
250: PetscErrorCode ierr;
251: int rank;
252: PetscBool iascii;
255: MPI_Comm_rank(PetscObjectComm((PetscObject)part),&rank);
256: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
257: if (iascii) {
258: if (pmetis->parallel == 2) {
259: PetscViewerASCIIPrintf(viewer," Using parallel coarse grid partitioner\n");
260: } else {
261: PetscViewerASCIIPrintf(viewer," Using sequential coarse grid partitioner\n");
262: }
263: PetscViewerASCIIPrintf(viewer," Using %d fold factor\n",pmetis->foldfactor);
264: PetscViewerASCIIPushSynchronized(viewer);
265: PetscViewerASCIISynchronizedPrintf(viewer," [%d]Number of cuts found %d\n",rank,pmetis->cuts);
266: PetscViewerFlush(viewer);
267: PetscViewerASCIIPopSynchronized(viewer);
268: }
269: return(0);
270: }
272: /*@
273: MatPartitioningParmetisSetCoarseSequential - Use the sequential code to
274: do the partitioning of the coarse grid.
276: Logically Collective on MatPartitioning
278: Input Parameter:
279: . part - the partitioning context
281: Level: advanced
283: @*/
284: PetscErrorCode MatPartitioningParmetisSetCoarseSequential(MatPartitioning part)
285: {
286: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
289: pmetis->parallel = 1;
290: return(0);
291: }
293: /*@
294: MatPartitioningParmetisSetRepartition - Repartition
295: current mesh to rebalance computation.
297: Logically Collective on MatPartitioning
299: Input Parameter:
300: . part - the partitioning context
302: Level: advanced
304: @*/
305: PetscErrorCode MatPartitioningParmetisSetRepartition(MatPartitioning part)
306: {
307: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
310: pmetis->repartition = PETSC_TRUE;
311: return(0);
312: }
314: /*@
315: MatPartitioningParmetisGetEdgeCut - Returns the number of edge cuts in the vertex partition.
317: Input Parameter:
318: . part - the partitioning context
320: Output Parameter:
321: . cut - the edge cut
323: Level: advanced
325: @*/
326: PetscErrorCode MatPartitioningParmetisGetEdgeCut(MatPartitioning part, PetscInt *cut)
327: {
328: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*) part->data;
331: *cut = pmetis->cuts;
332: return(0);
333: }
335: PetscErrorCode MatPartitioningSetFromOptions_Parmetis(PetscOptionItems *PetscOptionsObject,MatPartitioning part)
336: {
338: PetscBool flag = PETSC_FALSE;
341: PetscOptionsHead(PetscOptionsObject,"Set ParMeTiS partitioning options");
342: PetscOptionsBool("-mat_partitioning_parmetis_coarse_sequential","Use sequential coarse partitioner","MatPartitioningParmetisSetCoarseSequential",flag,&flag,NULL);
343: if (flag) {
344: MatPartitioningParmetisSetCoarseSequential(part);
345: }
346: PetscOptionsBool("-mat_partitioning_parmetis_repartition","","MatPartitioningParmetisSetRepartition",flag,&flag,NULL);
347: if(flag){
348: MatPartitioningParmetisSetRepartition(part);
349: }
350: PetscOptionsTail();
351: return(0);
352: }
355: PetscErrorCode MatPartitioningDestroy_Parmetis(MatPartitioning part)
356: {
357: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
358: PetscErrorCode ierr;
361: PetscFree(pmetis);
362: return(0);
363: }
366: /*MC
367: MATPARTITIONINGPARMETIS - Creates a partitioning context via the external package PARMETIS.
369: Collective on MPI_Comm
371: Input Parameter:
372: . part - the partitioning context
374: Options Database Keys:
375: + -mat_partitioning_parmetis_coarse_sequential - use sequential PARMETIS coarse partitioner
377: Level: beginner
379: Notes:
380: See http://www-users.cs.umn.edu/~karypis/metis/
382: .keywords: Partitioning, create, context
384: .seealso: MatPartitioningSetType(), MatPartitioningType
386: M*/
388: PETSC_EXTERN PetscErrorCode MatPartitioningCreate_Parmetis(MatPartitioning part)
389: {
390: PetscErrorCode ierr;
391: MatPartitioning_Parmetis *pmetis;
394: PetscNewLog(part,&pmetis);
395: part->data = (void*)pmetis;
397: pmetis->cuts = 0; /* output variable */
398: pmetis->foldfactor = 150; /*folding factor */
399: pmetis->parallel = 2; /* use parallel partitioner for coarse grid */
400: pmetis->indexing = 0; /* index numbering starts from 0 */
401: pmetis->printout = 0; /* print no output while running */
402: pmetis->repartition = PETSC_FALSE;
404: part->ops->apply = MatPartitioningApply_Parmetis;
405: part->ops->applynd = MatPartitioningApplyND_Parmetis;
406: part->ops->improve = MatPartitioningImprove_Parmetis;
407: part->ops->view = MatPartitioningView_Parmetis;
408: part->ops->destroy = MatPartitioningDestroy_Parmetis;
409: part->ops->setfromoptions = MatPartitioningSetFromOptions_Parmetis;
410: return(0);
411: }
413: /*@
414: MatMeshToVertexGraph - This routine does not exist because ParMETIS does not provide the functionality. Uses the ParMETIS package to
415: convert a Mat that represents a mesh to a Mat the represents the graph of the coupling
416: between vertices of the cells and is suitable for partitioning with the MatPartitioning object. Use this to partition
417: vertices of a mesh. More likely you should use MatMeshToCellGraph()
419: Collective on Mat
421: Input Parameter:
422: + mesh - the graph that represents the mesh
423: - ncommonnodes - mesh elements that share this number of common nodes are considered neighbors, use 2 for triangules and
424: quadralaterials, 3 for tetrahedrals and 4 for hexahedrals
426: Output Parameter:
427: . dual - the dual graph
429: Notes:
430: Currently requires ParMetis to be installed and uses ParMETIS_V3_Mesh2Dual()
432: The columns of each row of the Mat mesh are the global vertex numbers of the vertices of that rows cell. The number of rows in mesh is
433: number of cells, the number of columns is the number of vertices.
435: Level: advanced
437: .seealso: MatMeshToCellGraph(), MatCreateMPIAdj(), MatPartitioningCreate()
439: @*/
440: PetscErrorCode MatMeshToVertexGraph(Mat mesh,PetscInt ncommonnodes,Mat *dual)
441: {
443: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ParMETIS does not provide this functionality");
444: return(0);
445: }
447: /*@
448: MatMeshToCellGraph - Uses the ParMETIS package to convert a Mat that represents a mesh to a Mat the represents the graph of the coupling
449: between cells (the "dual" graph) and is suitable for partitioning with the MatPartitioning object. Use this to partition
450: cells of a mesh.
452: Collective on Mat
454: Input Parameter:
455: + mesh - the graph that represents the mesh
456: - ncommonnodes - mesh elements that share this number of common nodes are considered neighbors, use 2 for triangules and
457: quadralaterials, 3 for tetrahedrals and 4 for hexahedrals
459: Output Parameter:
460: . dual - the dual graph
462: Notes:
463: Currently requires ParMetis to be installed and uses ParMETIS_V3_Mesh2Dual()
465: $ Each row of the mesh object represents a single cell in the mesh. For triangles it has 3 entries, quadralaterials 4 entries,
466: $ tetrahedrals 4 entries and hexahedrals 8 entries. You can mix triangles and quadrilaterals in the same mesh, but cannot
467: $ mix tetrahedrals and hexahedrals
468: $ The columns of each row of the Mat mesh are the global vertex numbers of the vertices of that row's cell.
469: $ The number of rows in mesh is number of cells, the number of columns is the number of vertices.
472: Level: advanced
474: .seealso: MatMeshToVertexGraph(), MatCreateMPIAdj(), MatPartitioningCreate()
477: @*/
478: PetscErrorCode MatMeshToCellGraph(Mat mesh,PetscInt ncommonnodes,Mat *dual)
479: {
481: PetscInt *newxadj,*newadjncy;
482: PetscInt numflag=0;
483: Mat_MPIAdj *adj = (Mat_MPIAdj*)mesh->data,*newadj;
484: PetscBool flg;
485: int status;
486: MPI_Comm comm;
489: PetscObjectTypeCompare((PetscObject)mesh,MATMPIADJ,&flg);
490: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Must use MPIAdj matrix type");
492: PetscObjectGetComm((PetscObject)mesh,&comm);
493: PetscStackCallParmetis(ParMETIS_V3_Mesh2Dual,((idx_t*)mesh->rmap->range,(idx_t*)adj->i,(idx_t*)adj->j,(idx_t*)&numflag,(idx_t*)&ncommonnodes,(idx_t**)&newxadj,(idx_t**)&newadjncy,&comm));
494: MatCreateMPIAdj(PetscObjectComm((PetscObject)mesh),mesh->rmap->n,mesh->rmap->N,newxadj,newadjncy,NULL,dual);
495: newadj = (Mat_MPIAdj*)(*dual)->data;
497: newadj->freeaijwithfree = PETSC_TRUE; /* signal the matrix should be freed with system free since space was allocated by ParMETIS */
498: return(0);
499: }