Actual source code: pmetis.c
petsc-3.8.4 2018-03-24
2: #include <../src/mat/impls/adj/mpi/mpiadj.h>
4: /*
5: Currently using ParMetis-4.0.2
6: */
8: #include <parmetis.h>
10: /*
11: The first 5 elements of this structure are the input control array to Metis
12: */
13: typedef struct {
14: PetscInt cuts; /* number of cuts made (output) */
15: PetscInt foldfactor;
16: PetscInt parallel; /* use parallel partitioner for coarse problem */
17: PetscInt indexing; /* 0 indicates C indexing, 1 Fortran */
18: PetscInt printout; /* indicates if one wishes Metis to print info */
19: PetscBool repartition;
20: } MatPartitioning_Parmetis;
22: #define CHKERRQPARMETIS(n,func) \
23: if (n == METIS_ERROR_INPUT) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS error due to wrong inputs and/or options for %s",func); \
24: else if (n == METIS_ERROR_MEMORY) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS error due to insufficient memory in %s",func); \
25: else if (n == METIS_ERROR) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS general error in %s",func); \
27: #define PetscStackCallParmetis(func,args) do {PetscStackPush(#func);status = func args;PetscStackPop; CHKERRQPARMETIS(status,#func);} while (0)
29: /*
30: Uses the ParMETIS parallel matrix partitioner to partition the matrix in parallel
31: */
32: static PetscErrorCode MatPartitioningApply_Parmetis(MatPartitioning part,IS *partitioning)
33: {
34: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
35: PetscErrorCode ierr;
36: PetscInt *locals = NULL;
37: Mat mat = part->adj,amat,pmat;
38: PetscBool flg;
39: PetscInt bs = 1;
42: PetscObjectTypeCompare((PetscObject)mat,MATMPIADJ,&flg);
43: if (flg) {
44: amat = mat;
45: PetscObjectReference((PetscObject)amat);
46: } else {
47: /* bs indicates if the converted matrix is "reduced" from the original and hence the
48: resulting partition results need to be stretched to match the original matrix */
49: MatConvert(mat,MATMPIADJ,MAT_INITIAL_MATRIX,&amat);
50: if (amat->rmap->n > 0) bs = mat->rmap->n/amat->rmap->n;
51: }
52: MatMPIAdjCreateNonemptySubcommMat(amat,&pmat);
53: MPI_Barrier(PetscObjectComm((PetscObject)part));
55: if (pmat) {
56: MPI_Comm pcomm,comm;
57: Mat_MPIAdj *adj = (Mat_MPIAdj*)pmat->data;
58: PetscInt *vtxdist = pmat->rmap->range;
59: PetscInt *xadj = adj->i;
60: PetscInt *adjncy = adj->j;
61: PetscInt itmp = 0,wgtflag=0, numflag=0, ncon=1, nparts=part->n, options[24], i, j;
62: real_t *tpwgts,*ubvec,itr=0.1;
63: int status;
65: PetscObjectGetComm((PetscObject)pmat,&pcomm);
66: #if defined(PETSC_USE_DEBUG)
67: /* check that matrix has no diagonal entries */
68: {
69: PetscInt rstart;
70: MatGetOwnershipRange(pmat,&rstart,NULL);
71: for (i=0; i<pmat->rmap->n; i++) {
72: for (j=xadj[i]; j<xadj[i+1]; j++) {
73: if (adjncy[j] == i+rstart) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Row %d has diagonal entry; Parmetis forbids diagonal entry",i+rstart);
74: }
75: }
76: }
77: #endif
79: PetscMalloc1(amat->rmap->n,&locals);
81: if (PetscLogPrintInfo) {itmp = pmetis->printout; pmetis->printout = 127;}
82: PetscMalloc1(ncon*nparts,&tpwgts);
83: for (i=0; i<ncon; i++) {
84: for (j=0; j<nparts; j++) {
85: if (part->part_weights) {
86: tpwgts[i*nparts+j] = part->part_weights[i*nparts+j];
87: } else {
88: tpwgts[i*nparts+j] = 1./nparts;
89: }
90: }
91: }
92: PetscMalloc1(ncon,&ubvec);
93: for (i=0; i<ncon; i++) {
94: ubvec[i] = 1.05;
95: }
96: /* This sets the defaults */
97: options[0] = 0;
98: for (i=1; i<24; i++) {
99: options[i] = -1;
100: }
101: /* Duplicate the communicator to be sure that ParMETIS attribute caching does not interfere with PETSc. */
102: MPI_Comm_dup(pcomm,&comm);
103: if (pmetis->repartition){
104: PetscStackCallParmetis(ParMETIS_V3_AdaptiveRepart,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)part->vertex_weights,(idx_t*)adj->values,(idx_t*)&wgtflag,(idx_t*)&numflag,(idx_t*)&ncon,(idx_t*)&nparts,tpwgts,ubvec,&itr,(idx_t*)options,(idx_t*)&pmetis->cuts,(idx_t*)locals,&comm));
105: } else{
106: PetscStackCallParmetis(ParMETIS_V3_PartKway,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)adj->values,(idx_t*)&wgtflag,(idx_t*)&numflag,(idx_t*)&ncon,(idx_t*)&nparts,tpwgts,ubvec,(idx_t*)options,(idx_t*)&pmetis->cuts,(idx_t*)locals,&comm));
107: }
108: MPI_Comm_free(&comm);
110: PetscFree(tpwgts);
111: PetscFree(ubvec);
112: if (PetscLogPrintInfo) pmetis->printout = itmp;
114: if (bs > 1) {
115: PetscInt i,j,*newlocals;
116: PetscMalloc1(bs*amat->rmap->n,&newlocals);
117: for (i=0; i<amat->rmap->n; i++) {
118: for (j=0; j<bs; j++) {
119: newlocals[bs*i + j] = locals[i];
120: }
121: }
122: PetscFree(locals);
123: ISCreateGeneral(PetscObjectComm((PetscObject)part),bs*amat->rmap->n,newlocals,PETSC_OWN_POINTER,partitioning);
124: } else {
125: ISCreateGeneral(PetscObjectComm((PetscObject)part),amat->rmap->n,locals,PETSC_OWN_POINTER,partitioning);
126: }
127: } else {
128: ISCreateGeneral(PetscObjectComm((PetscObject)part),0,NULL,PETSC_COPY_VALUES,partitioning);
129: }
131: MatDestroy(&pmat);
132: MatDestroy(&amat);
133: return(0);
134: }
136: PetscErrorCode MatPartitioningView_Parmetis(MatPartitioning part,PetscViewer viewer)
137: {
138: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
139: PetscErrorCode ierr;
140: int rank;
141: PetscBool iascii;
144: MPI_Comm_rank(PetscObjectComm((PetscObject)part),&rank);
145: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
146: if (iascii) {
147: if (pmetis->parallel == 2) {
148: PetscViewerASCIIPrintf(viewer," Using parallel coarse grid partitioner\n");
149: } else {
150: PetscViewerASCIIPrintf(viewer," Using sequential coarse grid partitioner\n");
151: }
152: PetscViewerASCIIPrintf(viewer," Using %d fold factor\n",pmetis->foldfactor);
153: PetscViewerASCIIPushSynchronized(viewer);
154: PetscViewerASCIISynchronizedPrintf(viewer," [%d]Number of cuts found %d\n",rank,pmetis->cuts);
155: PetscViewerFlush(viewer);
156: PetscViewerASCIIPopSynchronized(viewer);
157: }
158: return(0);
159: }
161: /*@
162: MatPartitioningParmetisSetCoarseSequential - Use the sequential code to
163: do the partitioning of the coarse grid.
165: Logically Collective on MatPartitioning
167: Input Parameter:
168: . part - the partitioning context
170: Level: advanced
172: @*/
173: PetscErrorCode MatPartitioningParmetisSetCoarseSequential(MatPartitioning part)
174: {
175: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
178: pmetis->parallel = 1;
179: return(0);
180: }
182: /*@
183: MatPartitioningParmetisSetRepartition - Repartition
184: current mesh to rebalance computation.
186: Logically Collective on MatPartitioning
188: Input Parameter:
189: . part - the partitioning context
191: Level: advanced
193: @*/
194: PetscErrorCode MatPartitioningParmetisSetRepartition(MatPartitioning part)
195: {
196: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
199: pmetis->repartition = PETSC_TRUE;
200: return(0);
201: }
203: /*@
204: MatPartitioningParmetisGetEdgeCut - Returns the number of edge cuts in the vertex partition.
206: Input Parameter:
207: . part - the partitioning context
209: Output Parameter:
210: . cut - the edge cut
212: Level: advanced
214: @*/
215: PetscErrorCode MatPartitioningParmetisGetEdgeCut(MatPartitioning part, PetscInt *cut)
216: {
217: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*) part->data;
220: *cut = pmetis->cuts;
221: return(0);
222: }
224: PetscErrorCode MatPartitioningSetFromOptions_Parmetis(PetscOptionItems *PetscOptionsObject,MatPartitioning part)
225: {
227: PetscBool flag = PETSC_FALSE;
230: PetscOptionsHead(PetscOptionsObject,"Set ParMeTiS partitioning options");
231: PetscOptionsBool("-mat_partitioning_parmetis_coarse_sequential","Use sequential coarse partitioner","MatPartitioningParmetisSetCoarseSequential",flag,&flag,NULL);
232: if (flag) {
233: MatPartitioningParmetisSetCoarseSequential(part);
234: }
235: PetscOptionsBool("-mat_partitioning_parmetis_repartition","","MatPartitioningParmetisSetRepartition",flag,&flag,NULL);
236: if(flag){
237: MatPartitioningParmetisSetRepartition(part);
238: }
239: PetscOptionsTail();
240: return(0);
241: }
244: PetscErrorCode MatPartitioningDestroy_Parmetis(MatPartitioning part)
245: {
246: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
247: PetscErrorCode ierr;
250: PetscFree(pmetis);
251: return(0);
252: }
255: /*MC
256: MATPARTITIONINGPARMETIS - Creates a partitioning context via the external package PARMETIS.
258: Collective on MPI_Comm
260: Input Parameter:
261: . part - the partitioning context
263: Options Database Keys:
264: + -mat_partitioning_parmetis_coarse_sequential - use sequential PARMETIS coarse partitioner
266: Level: beginner
268: Notes: See http://www-users.cs.umn.edu/~karypis/metis/
270: .keywords: Partitioning, create, context
272: .seealso: MatPartitioningSetType(), MatPartitioningType
274: M*/
276: PETSC_EXTERN PetscErrorCode MatPartitioningCreate_Parmetis(MatPartitioning part)
277: {
278: PetscErrorCode ierr;
279: MatPartitioning_Parmetis *pmetis;
282: PetscNewLog(part,&pmetis);
283: part->data = (void*)pmetis;
285: pmetis->cuts = 0; /* output variable */
286: pmetis->foldfactor = 150; /*folding factor */
287: pmetis->parallel = 2; /* use parallel partitioner for coarse grid */
288: pmetis->indexing = 0; /* index numbering starts from 0 */
289: pmetis->printout = 0; /* print no output while running */
290: pmetis->repartition = PETSC_FALSE;
292: part->ops->apply = MatPartitioningApply_Parmetis;
293: part->ops->view = MatPartitioningView_Parmetis;
294: part->ops->destroy = MatPartitioningDestroy_Parmetis;
295: part->ops->setfromoptions = MatPartitioningSetFromOptions_Parmetis;
296: return(0);
297: }
299: /*@
300: MatMeshToVertexGraph - This routine does not exist because ParMETIS does not provide the functionality. Uses the ParMETIS package to
301: convert a Mat that represents a mesh to a Mat the represents the graph of the coupling
302: between vertices of the cells and is suitable for partitioning with the MatPartitioning object. Use this to partition
303: vertices of a mesh. More likely you should use MatMeshToCellGraph()
305: Collective on Mat
307: Input Parameter:
308: + mesh - the graph that represents the mesh
309: - ncommonnodes - mesh elements that share this number of common nodes are considered neighbors, use 2 for triangules and
310: quadralaterials, 3 for tetrahedrals and 4 for hexahedrals
312: Output Parameter:
313: . dual - the dual graph
315: Notes:
316: Currently requires ParMetis to be installed and uses ParMETIS_V3_Mesh2Dual()
318: The columns of each row of the Mat mesh are the global vertex numbers of the vertices of that rows cell. The number of rows in mesh is
319: number of cells, the number of columns is the number of vertices.
321: Level: advanced
323: .seealso: MatMeshToCellGraph(), MatCreateMPIAdj(), MatPartitioningCreate()
325: @*/
326: PetscErrorCode MatMeshToVertexGraph(Mat mesh,PetscInt ncommonnodes,Mat *dual)
327: {
329: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ParMETIS does not provide this functionality");
330: return(0);
331: }
333: /*@
334: MatMeshToCellGraph - Uses the ParMETIS package to convert a Mat that represents a mesh to a Mat the represents the graph of the coupling
335: between cells (the "dual" graph) and is suitable for partitioning with the MatPartitioning object. Use this to partition
336: cells of a mesh.
338: Collective on Mat
340: Input Parameter:
341: + mesh - the graph that represents the mesh
342: - ncommonnodes - mesh elements that share this number of common nodes are considered neighbors, use 2 for triangules and
343: quadralaterials, 3 for tetrahedrals and 4 for hexahedrals
345: Output Parameter:
346: . dual - the dual graph
348: Notes:
349: Currently requires ParMetis to be installed and uses ParMETIS_V3_Mesh2Dual()
351: $ Each row of the mesh object represents a single cell in the mesh. For triangles it has 3 entries, quadralaterials 4 entries,
352: $ tetrahedrals 4 entries and hexahedrals 8 entries. You can mix triangles and quadrilaterals in the same mesh, but cannot
353: $ mix tetrahedrals and hexahedrals
354: $ The columns of each row of the Mat mesh are the global vertex numbers of the vertices of that row's cell.
355: $ The number of rows in mesh is number of cells, the number of columns is the number of vertices.
358: Level: advanced
360: .seealso: MatMeshToVertexGraph(), MatCreateMPIAdj(), MatPartitioningCreate()
363: @*/
364: PetscErrorCode MatMeshToCellGraph(Mat mesh,PetscInt ncommonnodes,Mat *dual)
365: {
367: PetscInt *newxadj,*newadjncy;
368: PetscInt numflag=0;
369: Mat_MPIAdj *adj = (Mat_MPIAdj*)mesh->data,*newadj;
370: PetscBool flg;
371: int status;
372: MPI_Comm comm;
375: PetscObjectTypeCompare((PetscObject)mesh,MATMPIADJ,&flg);
376: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Must use MPIAdj matrix type");
377:
378: PetscObjectGetComm((PetscObject)mesh,&comm);
379: PetscStackCallParmetis(ParMETIS_V3_Mesh2Dual,((idx_t*)mesh->rmap->range,(idx_t*)adj->i,(idx_t*)adj->j,(idx_t*)&numflag,(idx_t*)&ncommonnodes,(idx_t**)&newxadj,(idx_t**)&newadjncy,&comm));
380: MatCreateMPIAdj(PetscObjectComm((PetscObject)mesh),mesh->rmap->n,mesh->rmap->N,newxadj,newadjncy,NULL,dual);
381: newadj = (Mat_MPIAdj*)(*dual)->data;
383: newadj->freeaijwithfree = PETSC_TRUE; /* signal the matrix should be freed with system free since space was allocated by ParMETIS */
384: return(0);
385: }