Actual source code: mpiadj.c
2: /*
3: Defines the basic matrix operations for the ADJ adjacency list matrix data-structure.
4: */
5: #include <../src/mat/impls/adj/mpi/mpiadj.h>
6: #include <petscsf.h>
8: /*
9: * The interface should be easy to use for both MatCreateSubMatrix (parallel sub-matrix) and MatCreateSubMatrices (sequential sub-matrices)
10: * */
11: static PetscErrorCode MatCreateSubMatrix_MPIAdj_data(Mat adj,IS irows, IS icols, PetscInt **sadj_xadj,PetscInt **sadj_adjncy,PetscInt **sadj_values)
12: {
13: PetscInt nlrows_is,icols_n,i,j,nroots,nleaves,rlocalindex,*ncols_send,*ncols_recv;
14: PetscInt nlrows_mat,*adjncy_recv,Ncols_recv,Ncols_send,*xadj_recv,*values_recv;
15: PetscInt *ncols_recv_offsets,loc,rnclos,*sadjncy,*sxadj,*svalues;
16: const PetscInt *irows_indices,*icols_indices,*xadj, *adjncy;
17: PetscMPIInt owner;
18: Mat_MPIAdj *a = (Mat_MPIAdj*)adj->data;
19: PetscLayout rmap;
20: MPI_Comm comm;
21: PetscSF sf;
22: PetscSFNode *iremote;
23: PetscBool done;
24: PetscErrorCode ierr;
27: PetscObjectGetComm((PetscObject)adj,&comm);
28: MatGetLayouts(adj,&rmap,NULL);
29: ISGetLocalSize(irows,&nlrows_is);
30: ISGetIndices(irows,&irows_indices);
31: PetscMalloc1(nlrows_is,&iremote);
32: /* construct sf graph*/
33: nleaves = nlrows_is;
34: for (i=0; i<nlrows_is; i++) {
35: owner = -1;
36: rlocalindex = -1;
37: PetscLayoutFindOwnerIndex(rmap,irows_indices[i],&owner,&rlocalindex);
38: iremote[i].rank = owner;
39: iremote[i].index = rlocalindex;
40: }
41: MatGetRowIJ(adj,0,PETSC_FALSE,PETSC_FALSE,&nlrows_mat,&xadj,&adjncy,&done);
42: PetscCalloc4(nlrows_mat,&ncols_send,nlrows_is,&xadj_recv,nlrows_is+1,&ncols_recv_offsets,nlrows_is,&ncols_recv);
43: nroots = nlrows_mat;
44: for (i=0; i<nlrows_mat; i++) {
45: ncols_send[i] = xadj[i+1]-xadj[i];
46: }
47: PetscSFCreate(comm,&sf);
48: PetscSFSetGraph(sf,nroots,nleaves,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
49: PetscSFSetType(sf,PETSCSFBASIC);
50: PetscSFSetFromOptions(sf);
51: PetscSFBcastBegin(sf,MPIU_INT,ncols_send,ncols_recv,MPI_REPLACE);
52: PetscSFBcastEnd(sf,MPIU_INT,ncols_send,ncols_recv,MPI_REPLACE);
53: PetscSFBcastBegin(sf,MPIU_INT,xadj,xadj_recv,MPI_REPLACE);
54: PetscSFBcastEnd(sf,MPIU_INT,xadj,xadj_recv,MPI_REPLACE);
55: PetscSFDestroy(&sf);
56: Ncols_recv =0;
57: for (i=0; i<nlrows_is; i++) {
58: Ncols_recv += ncols_recv[i];
59: ncols_recv_offsets[i+1] = ncols_recv[i]+ncols_recv_offsets[i];
60: }
61: Ncols_send = 0;
62: for (i=0; i<nlrows_mat; i++) {
63: Ncols_send += ncols_send[i];
64: }
65: PetscCalloc1(Ncols_recv,&iremote);
66: PetscCalloc1(Ncols_recv,&adjncy_recv);
67: nleaves = Ncols_recv;
68: Ncols_recv = 0;
69: for (i=0; i<nlrows_is; i++) {
70: PetscLayoutFindOwner(rmap,irows_indices[i],&owner);
71: for (j=0; j<ncols_recv[i]; j++) {
72: iremote[Ncols_recv].rank = owner;
73: iremote[Ncols_recv++].index = xadj_recv[i]+j;
74: }
75: }
76: ISRestoreIndices(irows,&irows_indices);
77: /*if we need to deal with edge weights ???*/
78: if (a->useedgeweights) {PetscCalloc1(Ncols_recv,&values_recv);}
79: nroots = Ncols_send;
80: PetscSFCreate(comm,&sf);
81: PetscSFSetGraph(sf,nroots,nleaves,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
82: PetscSFSetType(sf,PETSCSFBASIC);
83: PetscSFSetFromOptions(sf);
84: PetscSFBcastBegin(sf,MPIU_INT,adjncy,adjncy_recv,MPI_REPLACE);
85: PetscSFBcastEnd(sf,MPIU_INT,adjncy,adjncy_recv,MPI_REPLACE);
86: if (a->useedgeweights) {
87: PetscSFBcastBegin(sf,MPIU_INT,a->values,values_recv,MPI_REPLACE);
88: PetscSFBcastEnd(sf,MPIU_INT,a->values,values_recv,MPI_REPLACE);
89: }
90: PetscSFDestroy(&sf);
91: MatRestoreRowIJ(adj,0,PETSC_FALSE,PETSC_FALSE,&nlrows_mat,&xadj,&adjncy,&done);
92: ISGetLocalSize(icols,&icols_n);
93: ISGetIndices(icols,&icols_indices);
94: rnclos = 0;
95: for (i=0; i<nlrows_is; i++) {
96: for (j=ncols_recv_offsets[i]; j<ncols_recv_offsets[i+1]; j++) {
97: PetscFindInt(adjncy_recv[j], icols_n, icols_indices, &loc);
98: if (loc<0) {
99: adjncy_recv[j] = -1;
100: if (a->useedgeweights) values_recv[j] = -1;
101: ncols_recv[i]--;
102: } else {
103: rnclos++;
104: }
105: }
106: }
107: ISRestoreIndices(icols,&icols_indices);
108: PetscCalloc1(rnclos,&sadjncy);
109: if (a->useedgeweights) {PetscCalloc1(rnclos,&svalues);}
110: PetscCalloc1(nlrows_is+1,&sxadj);
111: rnclos = 0;
112: for (i=0; i<nlrows_is; i++) {
113: for (j=ncols_recv_offsets[i]; j<ncols_recv_offsets[i+1]; j++) {
114: if (adjncy_recv[j]<0) continue;
115: sadjncy[rnclos] = adjncy_recv[j];
116: if (a->useedgeweights) svalues[rnclos] = values_recv[j];
117: rnclos++;
118: }
119: }
120: for (i=0; i<nlrows_is; i++) {
121: sxadj[i+1] = sxadj[i]+ncols_recv[i];
122: }
123: if (sadj_xadj) { *sadj_xadj = sxadj;} else { PetscFree(sxadj);}
124: if (sadj_adjncy) { *sadj_adjncy = sadjncy;} else { PetscFree(sadjncy);}
125: if (sadj_values) {
126: if (a->useedgeweights) *sadj_values = svalues; else *sadj_values=NULL;
127: } else {
128: if (a->useedgeweights) {PetscFree(svalues);}
129: }
130: PetscFree4(ncols_send,xadj_recv,ncols_recv_offsets,ncols_recv);
131: PetscFree(adjncy_recv);
132: if (a->useedgeweights) {PetscFree(values_recv);}
133: return(0);
134: }
136: static PetscErrorCode MatCreateSubMatrices_MPIAdj_Private(Mat mat,PetscInt n,const IS irow[],const IS icol[],PetscBool subcomm,MatReuse scall,Mat *submat[])
137: {
138: PetscInt i,irow_n,icol_n,*sxadj,*sadjncy,*svalues;
139: PetscInt *indices,nindx,j,k,loc;
140: PetscMPIInt issame;
141: const PetscInt *irow_indices,*icol_indices;
142: MPI_Comm scomm_row,scomm_col,scomm_mat;
143: PetscErrorCode ierr;
146: nindx = 0;
147: /*
148: * Estimate a maximum number for allocating memory
149: */
150: for (i=0; i<n; i++) {
151: ISGetLocalSize(irow[i],&irow_n);
152: ISGetLocalSize(icol[i],&icol_n);
153: nindx = nindx>(irow_n+icol_n)? nindx:(irow_n+icol_n);
154: }
155: PetscMalloc1(nindx,&indices);
156: /* construct a submat */
157: for (i=0; i<n; i++) {
158: if (subcomm) {
159: PetscObjectGetComm((PetscObject)irow[i],&scomm_row);
160: PetscObjectGetComm((PetscObject)icol[i],&scomm_col);
161: MPI_Comm_compare(scomm_row,scomm_col,&issame);
162: if (issame != MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"row index set must have the same comm as the col index set\n");
163: MPI_Comm_compare(scomm_row,PETSC_COMM_SELF,&issame);
164: if (issame == MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP," can not use PETSC_COMM_SELF as comm when extracting a parallel submatrix\n");
165: } else {
166: scomm_row = PETSC_COMM_SELF;
167: }
168: /*get sub-matrix data*/
169: sxadj=NULL; sadjncy=NULL; svalues=NULL;
170: MatCreateSubMatrix_MPIAdj_data(mat,irow[i],icol[i],&sxadj,&sadjncy,&svalues);
171: ISGetLocalSize(irow[i],&irow_n);
172: ISGetLocalSize(icol[i],&icol_n);
173: ISGetIndices(irow[i],&irow_indices);
174: PetscArraycpy(indices,irow_indices,irow_n);
175: ISRestoreIndices(irow[i],&irow_indices);
176: ISGetIndices(icol[i],&icol_indices);
177: PetscArraycpy(indices+irow_n,icol_indices,icol_n);
178: ISRestoreIndices(icol[i],&icol_indices);
179: nindx = irow_n+icol_n;
180: PetscSortRemoveDupsInt(&nindx,indices);
181: /* renumber columns */
182: for (j=0; j<irow_n; j++) {
183: for (k=sxadj[j]; k<sxadj[j+1]; k++) {
184: PetscFindInt(sadjncy[k],nindx,indices,&loc);
185: if (loc<0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"can not find col %D",sadjncy[k]);
186: sadjncy[k] = loc;
187: }
188: }
189: if (scall==MAT_INITIAL_MATRIX) {
190: MatCreateMPIAdj(scomm_row,irow_n,icol_n,sxadj,sadjncy,svalues,submat[i]);
191: } else {
192: Mat sadj = *(submat[i]);
193: Mat_MPIAdj *sa = (Mat_MPIAdj*)((sadj)->data);
194: PetscObjectGetComm((PetscObject)sadj,&scomm_mat);
195: MPI_Comm_compare(scomm_row,scomm_mat,&issame);
196: if (issame != MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"submatrix must have the same comm as the col index set\n");
197: PetscArraycpy(sa->i,sxadj,irow_n+1);
198: PetscArraycpy(sa->j,sadjncy,sxadj[irow_n]);
199: if (svalues) {PetscArraycpy(sa->values,svalues,sxadj[irow_n]);}
200: PetscFree(sxadj);
201: PetscFree(sadjncy);
202: if (svalues) {PetscFree(svalues);}
203: }
204: }
205: PetscFree(indices);
206: return(0);
207: }
209: static PetscErrorCode MatCreateSubMatricesMPI_MPIAdj(Mat mat,PetscInt n, const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
210: {
211: PetscErrorCode ierr;
212: /*get sub-matrices across a sub communicator */
214: MatCreateSubMatrices_MPIAdj_Private(mat,n,irow,icol,PETSC_TRUE,scall,submat);
215: return(0);
216: }
218: static PetscErrorCode MatCreateSubMatrices_MPIAdj(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
219: {
220: PetscErrorCode ierr;
223: /*get sub-matrices based on PETSC_COMM_SELF */
224: MatCreateSubMatrices_MPIAdj_Private(mat,n,irow,icol,PETSC_FALSE,scall,submat);
225: return(0);
226: }
228: static PetscErrorCode MatView_MPIAdj_ASCII(Mat A,PetscViewer viewer)
229: {
230: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
231: PetscErrorCode ierr;
232: PetscInt i,j,m = A->rmap->n;
233: const char *name;
234: PetscViewerFormat format;
237: PetscObjectGetName((PetscObject)A,&name);
238: PetscViewerGetFormat(viewer,&format);
239: if (format == PETSC_VIEWER_ASCII_INFO) {
240: return(0);
241: } else if (format == PETSC_VIEWER_ASCII_MATLAB) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MATLAB format not supported");
242: else {
243: PetscViewerASCIIUseTabs(viewer,PETSC_FALSE);
244: PetscViewerASCIIPushSynchronized(viewer);
245: for (i=0; i<m; i++) {
246: PetscViewerASCIISynchronizedPrintf(viewer,"row %D:",i+A->rmap->rstart);
247: for (j=a->i[i]; j<a->i[i+1]; j++) {
248: if (a->values) {
249: PetscViewerASCIISynchronizedPrintf(viewer," (%D, %D) ",a->j[j], a->values[j]);
250: } else {
251: PetscViewerASCIISynchronizedPrintf(viewer," %D ",a->j[j]);
252: }
253: }
254: PetscViewerASCIISynchronizedPrintf(viewer,"\n");
255: }
256: PetscViewerASCIIUseTabs(viewer,PETSC_TRUE);
257: PetscViewerFlush(viewer);
258: PetscViewerASCIIPopSynchronized(viewer);
259: }
260: return(0);
261: }
263: static PetscErrorCode MatView_MPIAdj(Mat A,PetscViewer viewer)
264: {
266: PetscBool iascii;
269: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
270: if (iascii) {
271: MatView_MPIAdj_ASCII(A,viewer);
272: }
273: return(0);
274: }
276: static PetscErrorCode MatDestroy_MPIAdj(Mat mat)
277: {
278: Mat_MPIAdj *a = (Mat_MPIAdj*)mat->data;
282: #if defined(PETSC_USE_LOG)
283: PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D, NZ=%D",mat->rmap->n,mat->cmap->n,a->nz);
284: #endif
285: PetscFree(a->diag);
286: if (a->freeaij) {
287: if (a->freeaijwithfree) {
288: if (a->i) free(a->i);
289: if (a->j) free(a->j);
290: } else {
291: PetscFree(a->i);
292: PetscFree(a->j);
293: PetscFree(a->values);
294: }
295: }
296: PetscFree(a->rowvalues);
297: PetscFree(mat->data);
298: PetscObjectChangeTypeName((PetscObject)mat,NULL);
299: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAdjSetPreallocation_C",NULL);
300: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAdjCreateNonemptySubcommMat_C",NULL);
301: return(0);
302: }
304: static PetscErrorCode MatSetOption_MPIAdj(Mat A,MatOption op,PetscBool flg)
305: {
306: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
310: switch (op) {
311: case MAT_SYMMETRIC:
312: case MAT_STRUCTURALLY_SYMMETRIC:
313: case MAT_HERMITIAN:
314: a->symmetric = flg;
315: break;
316: case MAT_SYMMETRY_ETERNAL:
317: break;
318: default:
319: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
320: break;
321: }
322: return(0);
323: }
325: static PetscErrorCode MatGetRow_MPIAdj(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
326: {
327: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
331: row -= A->rmap->rstart;
332: if (row < 0 || row >= A->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row out of range");
333: *nz = a->i[row+1] - a->i[row];
334: if (v) {
335: PetscInt j;
336: if (a->rowvalues_alloc < *nz) {
337: PetscFree(a->rowvalues);
338: a->rowvalues_alloc = PetscMax(a->rowvalues_alloc*2, *nz);
339: PetscMalloc1(a->rowvalues_alloc,&a->rowvalues);
340: }
341: for (j=0; j<*nz; j++) {
342: a->rowvalues[j] = a->values ? a->values[a->i[row]+j]:1.0;
343: }
344: *v = (*nz) ? a->rowvalues : NULL;
345: }
346: if (idx) *idx = (*nz) ? a->j + a->i[row] : NULL;
347: return(0);
348: }
350: static PetscErrorCode MatRestoreRow_MPIAdj(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
351: {
353: return(0);
354: }
356: static PetscErrorCode MatEqual_MPIAdj(Mat A,Mat B,PetscBool * flg)
357: {
358: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data,*b = (Mat_MPIAdj*)B->data;
360: PetscBool flag;
363: /* If the matrix dimensions are not equal,or no of nonzeros */
364: if ((A->rmap->n != B->rmap->n) ||(a->nz != b->nz)) {
365: flag = PETSC_FALSE;
366: }
368: /* if the a->i are the same */
369: PetscArraycmp(a->i,b->i,A->rmap->n+1,&flag);
371: /* if a->j are the same */
372: PetscMemcmp(a->j,b->j,(a->nz)*sizeof(PetscInt),&flag);
374: MPIU_Allreduce(&flag,flg,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
375: return(0);
376: }
378: static PetscErrorCode MatGetRowIJ_MPIAdj(Mat A,PetscInt oshift,PetscBool symmetric,PetscBool blockcompressed,PetscInt *m,const PetscInt *inia[],const PetscInt *inja[],PetscBool *done)
379: {
380: PetscInt i;
381: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
382: PetscInt **ia = (PetscInt**)inia,**ja = (PetscInt**)inja;
385: *m = A->rmap->n;
386: *ia = a->i;
387: *ja = a->j;
388: *done = PETSC_TRUE;
389: if (oshift) {
390: for (i=0; i<(*ia)[*m]; i++) {
391: (*ja)[i]++;
392: }
393: for (i=0; i<=(*m); i++) (*ia)[i]++;
394: }
395: return(0);
396: }
398: static PetscErrorCode MatRestoreRowIJ_MPIAdj(Mat A,PetscInt oshift,PetscBool symmetric,PetscBool blockcompressed,PetscInt *m,const PetscInt *inia[],const PetscInt *inja[],PetscBool *done)
399: {
400: PetscInt i;
401: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
402: PetscInt **ia = (PetscInt**)inia,**ja = (PetscInt**)inja;
405: if (ia && a->i != *ia) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"ia passed back is not one obtained with MatGetRowIJ()");
406: if (ja && a->j != *ja) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"ja passed back is not one obtained with MatGetRowIJ()");
407: if (oshift) {
408: if (!ia) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"If oshift then you must passed in inia[] argument");
409: if (!ja) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"If oshift then you must passed in inja[] argument");
410: for (i=0; i<=(*m); i++) (*ia)[i]--;
411: for (i=0; i<(*ia)[*m]; i++) {
412: (*ja)[i]--;
413: }
414: }
415: return(0);
416: }
418: PetscErrorCode MatConvertFrom_MPIAdj(Mat A,MatType type,MatReuse reuse,Mat *newmat)
419: {
420: Mat B;
421: PetscErrorCode ierr;
422: PetscInt i,m,N,nzeros = 0,*ia,*ja,len,rstart,cnt,j,*a;
423: const PetscInt *rj;
424: const PetscScalar *ra;
425: MPI_Comm comm;
428: MatGetSize(A,NULL,&N);
429: MatGetLocalSize(A,&m,NULL);
430: MatGetOwnershipRange(A,&rstart,NULL);
432: /* count the number of nonzeros per row */
433: for (i=0; i<m; i++) {
434: MatGetRow(A,i+rstart,&len,&rj,NULL);
435: for (j=0; j<len; j++) {
436: if (rj[j] == i+rstart) {len--; break;} /* don't count diagonal */
437: }
438: nzeros += len;
439: MatRestoreRow(A,i+rstart,&len,&rj,NULL);
440: }
442: /* malloc space for nonzeros */
443: PetscMalloc1(nzeros+1,&a);
444: PetscMalloc1(N+1,&ia);
445: PetscMalloc1(nzeros+1,&ja);
447: nzeros = 0;
448: ia[0] = 0;
449: for (i=0; i<m; i++) {
450: MatGetRow(A,i+rstart,&len,&rj,&ra);
451: cnt = 0;
452: for (j=0; j<len; j++) {
453: if (rj[j] != i+rstart) { /* if not diagonal */
454: a[nzeros+cnt] = (PetscInt) PetscAbsScalar(ra[j]);
455: ja[nzeros+cnt++] = rj[j];
456: }
457: }
458: MatRestoreRow(A,i+rstart,&len,&rj,&ra);
459: nzeros += cnt;
460: ia[i+1] = nzeros;
461: }
463: PetscObjectGetComm((PetscObject)A,&comm);
464: MatCreate(comm,&B);
465: MatSetSizes(B,m,PETSC_DETERMINE,PETSC_DETERMINE,N);
466: MatSetType(B,type);
467: MatMPIAdjSetPreallocation(B,ia,ja,a);
469: if (reuse == MAT_INPLACE_MATRIX) {
470: MatHeaderReplace(A,&B);
471: } else {
472: *newmat = B;
473: }
474: return(0);
475: }
477: /* -------------------------------------------------------------------*/
478: static struct _MatOps MatOps_Values = {NULL,
479: MatGetRow_MPIAdj,
480: MatRestoreRow_MPIAdj,
481: NULL,
482: /* 4*/ NULL,
483: NULL,
484: NULL,
485: NULL,
486: NULL,
487: NULL,
488: /*10*/ NULL,
489: NULL,
490: NULL,
491: NULL,
492: NULL,
493: /*15*/ NULL,
494: MatEqual_MPIAdj,
495: NULL,
496: NULL,
497: NULL,
498: /*20*/ NULL,
499: NULL,
500: MatSetOption_MPIAdj,
501: NULL,
502: /*24*/ NULL,
503: NULL,
504: NULL,
505: NULL,
506: NULL,
507: /*29*/ NULL,
508: NULL,
509: NULL,
510: NULL,
511: NULL,
512: /*34*/ NULL,
513: NULL,
514: NULL,
515: NULL,
516: NULL,
517: /*39*/ NULL,
518: MatCreateSubMatrices_MPIAdj,
519: NULL,
520: NULL,
521: NULL,
522: /*44*/ NULL,
523: NULL,
524: MatShift_Basic,
525: NULL,
526: NULL,
527: /*49*/ NULL,
528: MatGetRowIJ_MPIAdj,
529: MatRestoreRowIJ_MPIAdj,
530: NULL,
531: NULL,
532: /*54*/ NULL,
533: NULL,
534: NULL,
535: NULL,
536: NULL,
537: /*59*/ NULL,
538: MatDestroy_MPIAdj,
539: MatView_MPIAdj,
540: MatConvertFrom_MPIAdj,
541: NULL,
542: /*64*/ NULL,
543: NULL,
544: NULL,
545: NULL,
546: NULL,
547: /*69*/ NULL,
548: NULL,
549: NULL,
550: NULL,
551: NULL,
552: /*74*/ NULL,
553: NULL,
554: NULL,
555: NULL,
556: NULL,
557: /*79*/ NULL,
558: NULL,
559: NULL,
560: NULL,
561: NULL,
562: /*84*/ NULL,
563: NULL,
564: NULL,
565: NULL,
566: NULL,
567: /*89*/ NULL,
568: NULL,
569: NULL,
570: NULL,
571: NULL,
572: /*94*/ NULL,
573: NULL,
574: NULL,
575: NULL,
576: NULL,
577: /*99*/ NULL,
578: NULL,
579: NULL,
580: NULL,
581: NULL,
582: /*104*/ NULL,
583: NULL,
584: NULL,
585: NULL,
586: NULL,
587: /*109*/ NULL,
588: NULL,
589: NULL,
590: NULL,
591: NULL,
592: /*114*/ NULL,
593: NULL,
594: NULL,
595: NULL,
596: NULL,
597: /*119*/ NULL,
598: NULL,
599: NULL,
600: NULL,
601: NULL,
602: /*124*/ NULL,
603: NULL,
604: NULL,
605: NULL,
606: MatCreateSubMatricesMPI_MPIAdj,
607: /*129*/ NULL,
608: NULL,
609: NULL,
610: NULL,
611: NULL,
612: /*134*/ NULL,
613: NULL,
614: NULL,
615: NULL,
616: NULL,
617: /*139*/ NULL,
618: NULL,
619: NULL
620: };
622: static PetscErrorCode MatMPIAdjSetPreallocation_MPIAdj(Mat B,PetscInt *i,PetscInt *j,PetscInt *values)
623: {
624: Mat_MPIAdj *b = (Mat_MPIAdj*)B->data;
625: PetscBool useedgeweights;
629: PetscLayoutSetUp(B->rmap);
630: PetscLayoutSetUp(B->cmap);
631: if (values) useedgeweights = PETSC_TRUE; else useedgeweights = PETSC_FALSE;
632: /* Make everybody knows if they are using edge weights or not */
633: MPIU_Allreduce((int*)&useedgeweights,(int*)&b->useedgeweights,1,MPI_INT,MPI_MAX,PetscObjectComm((PetscObject)B));
635: if (PetscDefined(USE_DEBUG)) {
636: PetscInt ii;
638: if (i[0] != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"First i[] index must be zero, instead it is %D\n",i[0]);
639: for (ii=1; ii<B->rmap->n; ii++) {
640: if (i[ii] < 0 || i[ii] < i[ii-1]) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i[%D]=%D index is out of range: i[%D]=%D",ii,i[ii],ii-1,i[ii-1]);
641: }
642: for (ii=0; ii<i[B->rmap->n]; ii++) {
643: if (j[ii] < 0 || j[ii] >= B->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column index %D out of range %D\n",ii,j[ii]);
644: }
645: }
646: B->preallocated = PETSC_TRUE;
648: b->j = j;
649: b->i = i;
650: b->values = values;
652: b->nz = i[B->rmap->n];
653: b->diag = NULL;
654: b->symmetric = PETSC_FALSE;
655: b->freeaij = PETSC_TRUE;
657: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
658: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
659: return(0);
660: }
662: static PetscErrorCode MatMPIAdjCreateNonemptySubcommMat_MPIAdj(Mat A,Mat *B)
663: {
664: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
666: const PetscInt *ranges;
667: MPI_Comm acomm,bcomm;
668: MPI_Group agroup,bgroup;
669: PetscMPIInt i,rank,size,nranks,*ranks;
672: *B = NULL;
673: PetscObjectGetComm((PetscObject)A,&acomm);
674: MPI_Comm_size(acomm,&size);
675: MPI_Comm_size(acomm,&rank);
676: MatGetOwnershipRanges(A,&ranges);
677: for (i=0,nranks=0; i<size; i++) {
678: if (ranges[i+1] - ranges[i] > 0) nranks++;
679: }
680: if (nranks == size) { /* All ranks have a positive number of rows, so we do not need to create a subcomm; */
681: PetscObjectReference((PetscObject)A);
682: *B = A;
683: return(0);
684: }
686: PetscMalloc1(nranks,&ranks);
687: for (i=0,nranks=0; i<size; i++) {
688: if (ranges[i+1] - ranges[i] > 0) ranks[nranks++] = i;
689: }
690: MPI_Comm_group(acomm,&agroup);
691: MPI_Group_incl(agroup,nranks,ranks,&bgroup);
692: PetscFree(ranks);
693: MPI_Comm_create(acomm,bgroup,&bcomm);
694: MPI_Group_free(&agroup);
695: MPI_Group_free(&bgroup);
696: if (bcomm != MPI_COMM_NULL) {
697: PetscInt m,N;
698: Mat_MPIAdj *b;
699: MatGetLocalSize(A,&m,NULL);
700: MatGetSize(A,NULL,&N);
701: MatCreateMPIAdj(bcomm,m,N,a->i,a->j,a->values,B);
702: b = (Mat_MPIAdj*)(*B)->data;
703: b->freeaij = PETSC_FALSE;
704: MPI_Comm_free(&bcomm);
705: }
706: return(0);
707: }
709: PetscErrorCode MatMPIAdjToSeq_MPIAdj(Mat A,Mat *B)
710: {
712: PetscInt M,N,*II,*J,NZ,nz,m,nzstart,i;
713: PetscInt *Values = NULL;
714: Mat_MPIAdj *adj = (Mat_MPIAdj*)A->data;
715: PetscMPIInt mnz,mm,*allnz,*allm,size,*dispnz,*dispm;
718: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
719: MatGetSize(A,&M,&N);
720: MatGetLocalSize(A,&m,NULL);
721: nz = adj->nz;
722: if (adj->i[m] != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"nz %D not correct i[m] %d",nz,adj->i[m]);
723: MPI_Allreduce(&nz,&NZ,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)A));
725: PetscMPIIntCast(nz,&mnz);
726: PetscMalloc2(size,&allnz,size,&dispnz);
727: MPI_Allgather(&mnz,1,MPI_INT,allnz,1,MPI_INT,PetscObjectComm((PetscObject)A));
728: dispnz[0] = 0; for (i=1; i<size; i++) dispnz[i] = dispnz[i-1]+ allnz[i-1];
729: if (adj->values) {
730: PetscMalloc1(NZ,&Values);
731: MPI_Allgatherv(adj->values,mnz,MPIU_INT,Values,allnz,dispnz,MPIU_INT,PetscObjectComm((PetscObject)A));
732: }
733: PetscMalloc1(NZ,&J);
734: MPI_Allgatherv(adj->j,mnz,MPIU_INT,J,allnz,dispnz,MPIU_INT,PetscObjectComm((PetscObject)A));
735: PetscFree2(allnz,dispnz);
736: MPI_Scan(&nz,&nzstart,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)A));
737: nzstart -= nz;
738: /* shift the i[] values so they will be correct after being received */
739: for (i=0; i<m; i++) adj->i[i] += nzstart;
740: PetscMalloc1(M+1,&II);
741: PetscMPIIntCast(m,&mm);
742: PetscMalloc2(size,&allm,size,&dispm);
743: MPI_Allgather(&mm,1,MPI_INT,allm,1,MPI_INT,PetscObjectComm((PetscObject)A));
744: dispm[0] = 0; for (i=1; i<size; i++) dispm[i] = dispm[i-1]+ allm[i-1];
745: MPI_Allgatherv(adj->i,mm,MPIU_INT,II,allm,dispm,MPIU_INT,PetscObjectComm((PetscObject)A));
746: PetscFree2(allm,dispm);
747: II[M] = NZ;
748: /* shift the i[] values back */
749: for (i=0; i<m; i++) adj->i[i] -= nzstart;
750: MatCreateMPIAdj(PETSC_COMM_SELF,M,N,II,J,Values,B);
751: return(0);
752: }
754: /*@
755: MatMPIAdjCreateNonemptySubcommMat - create the same MPIAdj matrix on a subcommunicator containing only processes owning a positive number of rows
757: Collective
759: Input Parameter:
760: . A - original MPIAdj matrix
762: Output Parameter:
763: . B - matrix on subcommunicator, NULL on ranks that owned zero rows of A
765: Level: developer
767: Note:
768: This function is mostly useful for internal use by mesh partitioning packages that require that every process owns at least one row.
770: The matrix B should be destroyed with MatDestroy(). The arrays are not copied, so B should be destroyed before A is destroyed.
772: .seealso: MatCreateMPIAdj()
773: @*/
774: PetscErrorCode MatMPIAdjCreateNonemptySubcommMat(Mat A,Mat *B)
775: {
780: PetscUseMethod(A,"MatMPIAdjCreateNonemptySubcommMat_C",(Mat,Mat*),(A,B));
781: return(0);
782: }
784: /*MC
785: MATMPIADJ - MATMPIADJ = "mpiadj" - A matrix type to be used for distributed adjacency matrices,
786: intended for use constructing orderings and partitionings.
788: Level: beginner
790: .seealso: MatCreateMPIAdj
791: M*/
793: PETSC_EXTERN PetscErrorCode MatCreate_MPIAdj(Mat B)
794: {
795: Mat_MPIAdj *b;
799: PetscNewLog(B,&b);
800: B->data = (void*)b;
801: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
802: B->assembled = PETSC_FALSE;
804: PetscObjectComposeFunction((PetscObject)B,"MatMPIAdjSetPreallocation_C",MatMPIAdjSetPreallocation_MPIAdj);
805: PetscObjectComposeFunction((PetscObject)B,"MatMPIAdjCreateNonemptySubcommMat_C",MatMPIAdjCreateNonemptySubcommMat_MPIAdj);
806: PetscObjectComposeFunction((PetscObject)B,"MatMPIAdjToSeq_C",MatMPIAdjToSeq_MPIAdj);
807: PetscObjectChangeTypeName((PetscObject)B,MATMPIADJ);
808: return(0);
809: }
811: /*@C
812: MatMPIAdjToSeq - Converts an parallel MPIAdj matrix to complete MPIAdj on each process (needed by sequential preconditioners)
814: Logically Collective
816: Input Parameter:
817: . A - the matrix
819: Output Parameter:
820: . B - the same matrix on all processes
822: Level: intermediate
824: .seealso: MatCreate(), MatCreateMPIAdj(), MatSetValues()
825: @*/
826: PetscErrorCode MatMPIAdjToSeq(Mat A,Mat *B)
827: {
831: PetscUseMethod(A,"MatMPIAdjToSeq_C",(Mat,Mat*),(A,B));
832: return(0);
833: }
835: /*@C
836: MatMPIAdjSetPreallocation - Sets the array used for storing the matrix elements
838: Logically Collective
840: Input Parameters:
841: + A - the matrix
842: . i - the indices into j for the start of each row
843: . j - the column indices for each row (sorted for each row).
844: The indices in i and j start with zero (NOT with one).
845: - values - [optional] edge weights
847: Level: intermediate
849: .seealso: MatCreate(), MatCreateMPIAdj(), MatSetValues()
850: @*/
851: PetscErrorCode MatMPIAdjSetPreallocation(Mat B,PetscInt *i,PetscInt *j,PetscInt *values)
852: {
856: PetscTryMethod(B,"MatMPIAdjSetPreallocation_C",(Mat,PetscInt*,PetscInt*,PetscInt*),(B,i,j,values));
857: return(0);
858: }
860: /*@C
861: MatCreateMPIAdj - Creates a sparse matrix representing an adjacency list.
862: The matrix does not have numerical values associated with it, but is
863: intended for ordering (to reduce bandwidth etc) and partitioning.
865: Collective
867: Input Parameters:
868: + comm - MPI communicator
869: . m - number of local rows
870: . N - number of global columns
871: . i - the indices into j for the start of each row
872: . j - the column indices for each row (sorted for each row).
873: The indices in i and j start with zero (NOT with one).
874: - values -[optional] edge weights
876: Output Parameter:
877: . A - the matrix
879: Level: intermediate
881: Notes:
882: This matrix object does not support most matrix operations, include
883: MatSetValues().
884: You must NOT free the ii, values and jj arrays yourself. PETSc will free them
885: when the matrix is destroyed; you must allocate them with PetscMalloc(). If you
886: call from Fortran you need not create the arrays with PetscMalloc().
887: Should not include the matrix diagonals.
889: If you already have a matrix, you can create its adjacency matrix by a call
890: to MatConvert, specifying a type of MATMPIADJ.
892: Possible values for MatSetOption() - MAT_STRUCTURALLY_SYMMETRIC
894: .seealso: MatCreate(), MatConvert(), MatGetOrdering()
895: @*/
896: PetscErrorCode MatCreateMPIAdj(MPI_Comm comm,PetscInt m,PetscInt N,PetscInt *i,PetscInt *j,PetscInt *values,Mat *A)
897: {
901: MatCreate(comm,A);
902: MatSetSizes(*A,m,PETSC_DETERMINE,PETSC_DETERMINE,N);
903: MatSetType(*A,MATMPIADJ);
904: MatMPIAdjSetPreallocation(*A,i,j,values);
905: return(0);
906: }