Actual source code: pmetis.c
petsc-3.5.4 2015-05-23
2: #include <../src/mat/impls/adj/mpi/mpiadj.h> /*I "petscmat.h" I*/
4: /*
5: Currently using ParMetis-4.0.2
6: */
8: #include <parmetis.h>
10: /*
11: The first 5 elements of this structure are the input control array to Metis
12: */
13: typedef struct {
14: PetscInt cuts; /* number of cuts made (output) */
15: PetscInt foldfactor;
16: PetscInt parallel; /* use parallel partitioner for coarse problem */
17: PetscInt indexing; /* 0 indicates C indexing, 1 Fortran */
18: PetscInt printout; /* indicates if one wishes Metis to print info */
19: } MatPartitioning_Parmetis;
21: #define CHKERRQPARMETIS(n,func) \
22: if (n == METIS_ERROR_INPUT) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS error due to wrong inputs and/or options for %s",func); \
23: else if (n == METIS_ERROR_MEMORY) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS error due to insufficient memory in %s",func); \
24: else if (n == METIS_ERROR) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS general error in %s",func); \
26: #define PetscStackCallParmetis(func,args) do {PetscStackPush(#func);status = func args;PetscStackPop; CHKERRQPARMETIS(status,#func);} while (0)
28: /*
29: Uses the ParMETIS parallel matrix partitioner to partition the matrix in parallel
30: */
33: static PetscErrorCode MatPartitioningApply_Parmetis(MatPartitioning part,IS *partitioning)
34: {
35: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
36: PetscErrorCode ierr;
37: PetscInt *locals = NULL;
38: Mat mat = part->adj,amat,pmat;
39: PetscBool flg;
40: PetscInt bs = 1;
43: PetscObjectTypeCompare((PetscObject)mat,MATMPIADJ,&flg);
44: if (flg) {
45: amat = mat;
46: PetscObjectReference((PetscObject)amat);
47: } else {
48: /* bs indicates if the converted matrix is "reduced" from the original and hence the
49: resulting partition results need to be stretched to match the original matrix */
50: MatConvert(mat,MATMPIADJ,MAT_INITIAL_MATRIX,&amat);
51: if (amat->rmap->n > 0) bs = mat->rmap->n/amat->rmap->n;
52: }
53: MatMPIAdjCreateNonemptySubcommMat(amat,&pmat);
54: MPI_Barrier(PetscObjectComm((PetscObject)part));
56: if (pmat) {
57: MPI_Comm pcomm,comm;
58: Mat_MPIAdj *adj = (Mat_MPIAdj*)pmat->data;
59: PetscInt *vtxdist = pmat->rmap->range;
60: PetscInt *xadj = adj->i;
61: PetscInt *adjncy = adj->j;
62: PetscInt itmp = 0,wgtflag=0, numflag=0, ncon=1, nparts=part->n, options[24], i, j;
63: real_t *tpwgts,*ubvec;
64: int status;
66: PetscObjectGetComm((PetscObject)pmat,&pcomm);
67: #if defined(PETSC_USE_DEBUG)
68: /* check that matrix has no diagonal entries */
69: {
70: PetscInt rstart;
71: MatGetOwnershipRange(pmat,&rstart,NULL);
72: for (i=0; i<pmat->rmap->n; i++) {
73: for (j=xadj[i]; j<xadj[i+1]; j++) {
74: if (adjncy[j] == i+rstart) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Row %d has diagonal entry; Parmetis forbids diagonal entry",i+rstart);
75: }
76: }
77: }
78: #endif
80: PetscMalloc1(amat->rmap->n,&locals);
82: if (PetscLogPrintInfo) {itmp = pmetis->printout; pmetis->printout = 127;}
83: PetscMalloc1(ncon*nparts,&tpwgts);
84: for (i=0; i<ncon; i++) {
85: for (j=0; j<nparts; j++) {
86: if (part->part_weights) {
87: tpwgts[i*nparts+j] = part->part_weights[i*nparts+j];
88: } else {
89: tpwgts[i*nparts+j] = 1./nparts;
90: }
91: }
92: }
93: PetscMalloc1(ncon,&ubvec);
94: for (i=0; i<ncon; i++) {
95: ubvec[i] = 1.05;
96: }
97: /* This sets the defaults */
98: options[0] = 0;
99: for (i=1; i<24; i++) {
100: options[i] = -1;
101: }
102: /* Duplicate the communicator to be sure that ParMETIS attribute caching does not interfere with PETSc. */
103: MPI_Comm_dup(pcomm,&comm);
104: PetscStackCallParmetis(ParMETIS_V3_PartKway,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)adj->values,(idx_t*)&wgtflag,(idx_t*)&numflag,(idx_t*)&ncon,(idx_t*)&nparts,tpwgts,ubvec,(idx_t*)options,(idx_t*)&pmetis->cuts,(idx_t*)locals,&comm));
105: MPI_Comm_free(&comm);
107: PetscFree(tpwgts);
108: PetscFree(ubvec);
109: if (PetscLogPrintInfo) pmetis->printout = itmp;
110: }
112: if (bs > 1) {
113: PetscInt i,j,*newlocals;
114: PetscMalloc1(bs*amat->rmap->n,&newlocals);
115: for (i=0; i<amat->rmap->n; i++) {
116: for (j=0; j<bs; j++) {
117: newlocals[bs*i + j] = locals[i];
118: }
119: }
120: PetscFree(locals);
121: ISCreateGeneral(PetscObjectComm((PetscObject)part),bs*amat->rmap->n,newlocals,PETSC_OWN_POINTER,partitioning);
122: } else {
123: ISCreateGeneral(PetscObjectComm((PetscObject)part),amat->rmap->n,locals,PETSC_OWN_POINTER,partitioning);
124: }
126: MatDestroy(&pmat);
127: MatDestroy(&amat);
128: return(0);
129: }
133: PetscErrorCode MatPartitioningView_Parmetis(MatPartitioning part,PetscViewer viewer)
134: {
135: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
136: PetscErrorCode ierr;
137: int rank;
138: PetscBool iascii;
141: MPI_Comm_rank(PetscObjectComm((PetscObject)part),&rank);
142: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
143: if (iascii) {
144: if (pmetis->parallel == 2) {
145: PetscViewerASCIIPrintf(viewer," Using parallel coarse grid partitioner\n");
146: } else {
147: PetscViewerASCIIPrintf(viewer," Using sequential coarse grid partitioner\n");
148: }
149: PetscViewerASCIIPrintf(viewer," Using %d fold factor\n",pmetis->foldfactor);
150: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
151: PetscViewerASCIISynchronizedPrintf(viewer," [%d]Number of cuts found %d\n",rank,pmetis->cuts);
152: PetscViewerFlush(viewer);
153: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
154: }
155: return(0);
156: }
160: /*@
161: MatPartitioningParmetisSetCoarseSequential - Use the sequential code to
162: do the partitioning of the coarse grid.
164: Logically Collective on MatPartitioning
166: Input Parameter:
167: . part - the partitioning context
169: Level: advanced
171: @*/
172: PetscErrorCode MatPartitioningParmetisSetCoarseSequential(MatPartitioning part)
173: {
174: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
177: pmetis->parallel = 1;
178: return(0);
179: }
183: /*@
184: MatPartitioningParmetisGetEdgeCut - Returns the number of edge cuts in the vertex partition.
186: Input Parameter:
187: . part - the partitioning context
189: Output Parameter:
190: . cut - the edge cut
192: Level: advanced
194: @*/
195: PetscErrorCode MatPartitioningParmetisGetEdgeCut(MatPartitioning part, PetscInt *cut)
196: {
197: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*) part->data;
200: *cut = pmetis->cuts;
201: return(0);
202: }
206: PetscErrorCode MatPartitioningSetFromOptions_Parmetis(MatPartitioning part)
207: {
209: PetscBool flag = PETSC_FALSE;
212: PetscOptionsHead("Set ParMeTiS partitioning options");
213: PetscOptionsBool("-mat_partitioning_parmetis_coarse_sequential","Use sequential coarse partitioner","MatPartitioningParmetisSetCoarseSequential",flag,&flag,NULL);
214: if (flag) {
215: MatPartitioningParmetisSetCoarseSequential(part);
216: }
217: PetscOptionsTail();
218: return(0);
219: }
224: PetscErrorCode MatPartitioningDestroy_Parmetis(MatPartitioning part)
225: {
226: MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
227: PetscErrorCode ierr;
230: PetscFree(pmetis);
231: return(0);
232: }
235: /*MC
236: MATPARTITIONINGPARMETIS - Creates a partitioning context via the external package PARMETIS.
238: Collective on MPI_Comm
240: Input Parameter:
241: . part - the partitioning context
243: Options Database Keys:
244: + -mat_partitioning_parmetis_coarse_sequential - use sequential PARMETIS coarse partitioner
246: Level: beginner
248: Notes: See http://www-users.cs.umn.edu/~karypis/metis/
250: .keywords: Partitioning, create, context
252: .seealso: MatPartitioningSetType(), MatPartitioningType
254: M*/
258: PETSC_EXTERN PetscErrorCode MatPartitioningCreate_Parmetis(MatPartitioning part)
259: {
260: PetscErrorCode ierr;
261: MatPartitioning_Parmetis *pmetis;
264: PetscNewLog(part,&pmetis);
265: part->data = (void*)pmetis;
267: pmetis->cuts = 0; /* output variable */
268: pmetis->foldfactor = 150; /*folding factor */
269: pmetis->parallel = 2; /* use parallel partitioner for coarse grid */
270: pmetis->indexing = 0; /* index numbering starts from 0 */
271: pmetis->printout = 0; /* print no output while running */
273: part->ops->apply = MatPartitioningApply_Parmetis;
274: part->ops->view = MatPartitioningView_Parmetis;
275: part->ops->destroy = MatPartitioningDestroy_Parmetis;
276: part->ops->setfromoptions = MatPartitioningSetFromOptions_Parmetis;
277: return(0);
278: }
282: /*@
283: MatMeshToVertexGraph - This routine does not exist because ParMETIS does not provide the functionality. Uses the ParMETIS package to
284: convert a Mat that represents a mesh to a Mat the represents the graph of the coupling
285: between vertices of the cells and is suitable for partitioning with the MatPartitioning object. Use this to partition
286: vertices of a mesh. More likely you should use MatMeshToCellGraph()
288: Collective on Mat
290: Input Parameter:
291: + mesh - the graph that represents the mesh
292: - ncommonnodes - mesh elements that share this number of common nodes are considered neighbors, use 2 for triangules and
293: quadralaterials, 3 for tetrahedrals and 4 for hexahedrals
295: Output Parameter:
296: . dual - the dual graph
298: Notes:
299: Currently requires ParMetis to be installed and uses ParMETIS_V3_Mesh2Dual()
301: The columns of each row of the Mat mesh are the global vertex numbers of the vertices of that rows cell. The number of rows in mesh is
302: number of cells, the number of columns is the number of vertices.
304: Level: advanced
306: .seealso: MatMeshToCellGraph(), MatCreateMPIAdj(), MatPartitioningCreate()
308: @*/
309: PetscErrorCode MatMeshToVertexGraph(Mat mesh,PetscInt ncommonnodes,Mat *dual)
310: {
312: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ParMETIS does not provide this functionality");
313: return(0);
314: }
318: /*@
319: MatMeshToCellGraph - Uses the ParMETIS package to convert a Mat that represents a mesh to a Mat the represents the graph of the coupling
320: between cells (the "dual" graph) and is suitable for partitioning with the MatPartitioning object. Use this to partition
321: cells of a mesh.
323: Collective on Mat
325: Input Parameter:
326: + mesh - the graph that represents the mesh
327: - ncommonnodes - mesh elements that share this number of common nodes are considered neighbors, use 2 for triangules and
328: quadralaterials, 3 for tetrahedrals and 4 for hexahedrals
330: Output Parameter:
331: . dual - the dual graph
333: Notes:
334: Currently requires ParMetis to be installed and uses ParMETIS_V3_Mesh2Dual()
336: The columns of each row of the Mat mesh are the global vertex numbers of the vertices of that rows cell. The number of rows in mesh is
337: number of cells, the number of columns is the number of vertices.
340: Level: advanced
342: .seealso: MatMeshToVertexGraph(), MatCreateMPIAdj(), MatPartitioningCreate()
345: @*/
346: PetscErrorCode MatMeshToCellGraph(Mat mesh,PetscInt ncommonnodes,Mat *dual)
347: {
349: PetscInt *newxadj,*newadjncy;
350: PetscInt numflag=0;
351: Mat_MPIAdj *adj = (Mat_MPIAdj*)mesh->data,*newadj;
352: PetscBool flg;
353: int status;
354: MPI_Comm comm;
357: PetscObjectTypeCompare((PetscObject)mesh,MATMPIADJ,&flg);
358: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Must use MPIAdj matrix type");
359:
360: PetscObjectGetComm((PetscObject)mesh,&comm);
361: PetscStackCallParmetis(ParMETIS_V3_Mesh2Dual,((idx_t*)mesh->rmap->range,(idx_t*)adj->i,(idx_t*)adj->j,(idx_t*)&numflag,(idx_t*)&ncommonnodes,(idx_t**)&newxadj,(idx_t**)&newadjncy,&comm));
362: MatCreateMPIAdj(PetscObjectComm((PetscObject)mesh),mesh->rmap->n,mesh->rmap->N,newxadj,newadjncy,NULL,dual);
363: newadj = (Mat_MPIAdj*)(*dual)->data;
365: newadj->freeaijwithfree = PETSC_TRUE; /* signal the matrix should be freed with system free since space was allocated by ParMETIS */
366: return(0);
367: }