Actual source code: mpiadj.c
petsc-3.9.4 2018-09-11
2: /*
3: Defines the basic matrix operations for the ADJ adjacency list matrix data-structure.
4: */
5: #include <../src/mat/impls/adj/mpi/mpiadj.h>
6: #include <petscsf.h>
8: /*
9: * The interface should be easy to use for both MatCreateSubMatrix (parallel sub-matrix) and MatCreateSubMatrices (sequential sub-matrices)
10: * */
11: static PetscErrorCode MatCreateSubMatrix_MPIAdj_data(Mat adj,IS irows, IS icols, PetscInt **sadj_xadj,PetscInt **sadj_adjncy,PetscInt **sadj_values)
12: {
13: PetscInt nlrows_is,icols_n,i,j,nroots,nleaves,owner,rlocalindex,*ncols_send,*ncols_recv;
14: PetscInt nlrows_mat,*adjncy_recv,Ncols_recv,Ncols_send,*xadj_recv,*values_recv;
15: PetscInt *ncols_recv_offsets,loc,rnclos,*sadjncy,*sxadj,*svalues,isvalue;
16: const PetscInt *irows_indices,*icols_indices,*xadj, *adjncy;
17: Mat_MPIAdj *a = (Mat_MPIAdj*)adj->data;
18: PetscLayout rmap;
19: MPI_Comm comm;
20: PetscSF sf;
21: PetscSFNode *iremote;
22: PetscBool done;
23: PetscErrorCode ierr;
26: PetscObjectGetComm((PetscObject)adj,&comm);
27: MatGetLayouts(adj,&rmap,NULL);
28: ISGetLocalSize(irows,&nlrows_is);
29: ISGetIndices(irows,&irows_indices);
30: PetscCalloc1(nlrows_is,&iremote);
31: /* construct sf graph*/
32: nleaves = nlrows_is;
33: for (i=0; i<nlrows_is; i++){
34: owner = -1;
35: rlocalindex = -1;
36: PetscLayoutFindOwnerIndex(rmap,irows_indices[i],&owner,&rlocalindex);
37: iremote[i].rank = owner;
38: iremote[i].index = rlocalindex;
39: }
40: MatGetRowIJ(adj,0,PETSC_FALSE,PETSC_FALSE,&nlrows_mat,&xadj,&adjncy,&done);
41: PetscCalloc4(nlrows_mat,&ncols_send,nlrows_is,&xadj_recv,nlrows_is+1,&ncols_recv_offsets,nlrows_is,&ncols_recv);
42: nroots = nlrows_mat;
43: for (i=0; i<nlrows_mat; i++){
44: ncols_send[i] = xadj[i+1]-xadj[i];
45: }
46: PetscSFCreate(comm,&sf);
47: PetscSFSetGraph(sf,nroots,nleaves,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
48: PetscSFSetType(sf,PETSCSFBASIC);
49: PetscSFSetFromOptions(sf);
50: PetscSFBcastBegin(sf,MPIU_INT,ncols_send,ncols_recv);
51: PetscSFBcastEnd(sf,MPIU_INT,ncols_send,ncols_recv);
52: PetscSFBcastBegin(sf,MPIU_INT,xadj,xadj_recv);
53: PetscSFBcastEnd(sf,MPIU_INT,xadj,xadj_recv);
54: PetscSFDestroy(&sf);
55: Ncols_recv =0;
56: for (i=0; i<nlrows_is; i++){
57: Ncols_recv += ncols_recv[i];
58: ncols_recv_offsets[i+1] = ncols_recv[i]+ncols_recv_offsets[i];
59: }
60: Ncols_send = 0;
61: for(i=0; i<nlrows_mat; i++){
62: Ncols_send += ncols_send[i];
63: }
64: PetscCalloc1(Ncols_recv,&iremote);
65: PetscCalloc1(Ncols_recv,&adjncy_recv);
66: nleaves = Ncols_recv;
67: Ncols_recv = 0;
68: for (i=0; i<nlrows_is; i++){
69: PetscLayoutFindOwner(rmap,irows_indices[i],&owner);
70: for (j=0; j<ncols_recv[i]; j++){
71: iremote[Ncols_recv].rank = owner;
72: iremote[Ncols_recv++].index = xadj_recv[i]+j;
73: }
74: }
75: ISRestoreIndices(irows,&irows_indices);
76: /*if we need to deal with edge weights ???*/
77: if (a->values){isvalue=1;} else {isvalue=0;}
78: /*involve a global communication */
79: /*MPIU_Allreduce(&isvalue,&isvalue,1,MPIU_INT,MPI_SUM,comm);*/
80: if (isvalue){PetscCalloc1(Ncols_recv,&values_recv);}
81: nroots = Ncols_send;
82: PetscSFCreate(comm,&sf);
83: PetscSFSetGraph(sf,nroots,nleaves,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
84: PetscSFSetType(sf,PETSCSFBASIC);
85: PetscSFSetFromOptions(sf);
86: PetscSFBcastBegin(sf,MPIU_INT,adjncy,adjncy_recv);
87: PetscSFBcastEnd(sf,MPIU_INT,adjncy,adjncy_recv);
88: if (isvalue){
89: PetscSFBcastBegin(sf,MPIU_INT,a->values,values_recv);
90: PetscSFBcastEnd(sf,MPIU_INT,a->values,values_recv);
91: }
92: PetscSFDestroy(&sf);
93: MatRestoreRowIJ(adj,0,PETSC_FALSE,PETSC_FALSE,&nlrows_mat,&xadj,&adjncy,&done);
94: ISGetLocalSize(icols,&icols_n);
95: ISGetIndices(icols,&icols_indices);
96: rnclos = 0;
97: for (i=0; i<nlrows_is; i++){
98: for (j=ncols_recv_offsets[i]; j<ncols_recv_offsets[i+1]; j++){
99: PetscFindInt(adjncy_recv[j], icols_n, icols_indices, &loc);
100: if (loc<0){
101: adjncy_recv[j] = -1;
102: if (isvalue) values_recv[j] = -1;
103: ncols_recv[i]--;
104: } else {
105: rnclos++;
106: }
107: }
108: }
109: ISRestoreIndices(icols,&icols_indices);
110: PetscCalloc1(rnclos,&sadjncy);
111: if (isvalue) {PetscCalloc1(rnclos,&svalues);}
112: PetscCalloc1(nlrows_is+1,&sxadj);
113: rnclos = 0;
114: for(i=0; i<nlrows_is; i++){
115: for(j=ncols_recv_offsets[i]; j<ncols_recv_offsets[i+1]; j++){
116: if (adjncy_recv[j]<0) continue;
117: sadjncy[rnclos] = adjncy_recv[j];
118: if (isvalue) svalues[rnclos] = values_recv[j];
119: rnclos++;
120: }
121: }
122: for(i=0; i<nlrows_is; i++){
123: sxadj[i+1] = sxadj[i]+ncols_recv[i];
124: }
125: if (sadj_xadj) { *sadj_xadj = sxadj;} else { PetscFree(sxadj);}
126: if (sadj_adjncy){ *sadj_adjncy = sadjncy;} else { PetscFree(sadjncy);}
127: if (sadj_values){
128: if (isvalue) *sadj_values = svalues; else *sadj_values=0;
129: } else {
130: if (isvalue) {PetscFree(svalues);}
131: }
132: PetscFree4(ncols_send,xadj_recv,ncols_recv_offsets,ncols_recv);
133: PetscFree(adjncy_recv);
134: if (isvalue) {PetscFree(values_recv);}
135: return(0);
136: }
138: static PetscErrorCode MatCreateSubMatrices_MPIAdj_Private(Mat mat,PetscInt n,const IS irow[],const IS icol[],PetscBool subcomm,MatReuse scall,Mat *submat[])
139: {
140: PetscInt i,irow_n,icol_n,*sxadj,*sadjncy,*svalues;
141: PetscInt *indices,nindx,j,k,loc;
142: PetscMPIInt issame;
143: const PetscInt *irow_indices,*icol_indices;
144: MPI_Comm scomm_row,scomm_col,scomm_mat;
145: PetscErrorCode ierr;
148: nindx = 0;
149: /*
150: * Estimate a maximum number for allocating memory
151: */
152: for(i=0; i<n; i++){
153: ISGetLocalSize(irow[i],&irow_n);
154: ISGetLocalSize(icol[i],&icol_n);
155: nindx = nindx>(irow_n+icol_n)? nindx:(irow_n+icol_n);
156: }
157: PetscCalloc1(nindx,&indices);
158: /* construct a submat */
159: for(i=0; i<n; i++){
160: if (subcomm){
161: PetscObjectGetComm((PetscObject)irow[i],&scomm_row);
162: PetscObjectGetComm((PetscObject)icol[i],&scomm_col);
163: MPI_Comm_compare(scomm_row,scomm_col,&issame);
164: if (issame != MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"row index set must have the same comm as the col index set\n");
165: MPI_Comm_compare(scomm_row,PETSC_COMM_SELF,&issame);
166: if (issame == MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP," can not use PETSC_COMM_SELF as comm when extracting a parallel submatrix\n");
167: } else {
168: scomm_row = PETSC_COMM_SELF;
169: }
170: /*get sub-matrix data*/
171: sxadj=0; sadjncy=0; svalues=0;
172: MatCreateSubMatrix_MPIAdj_data(mat,irow[i],icol[i],&sxadj,&sadjncy,&svalues);
173: ISGetLocalSize(irow[i],&irow_n);
174: ISGetLocalSize(icol[i],&icol_n);
175: ISGetIndices(irow[i],&irow_indices);
176: PetscMemcpy(indices,irow_indices,sizeof(PetscInt)*irow_n);
177: ISRestoreIndices(irow[i],&irow_indices);
178: ISGetIndices(icol[i],&icol_indices);
179: PetscMemcpy(indices+irow_n,icol_indices,sizeof(PetscInt)*icol_n);
180: ISRestoreIndices(icol[i],&icol_indices);
181: nindx = irow_n+icol_n;
182: PetscSortRemoveDupsInt(&nindx,indices);
183: /* renumber columns */
184: for(j=0; j<irow_n; j++){
185: for(k=sxadj[j]; k<sxadj[j+1]; k++){
186: PetscFindInt(sadjncy[k],nindx,indices,&loc);
187: #if PETSC_USE_DEBUG
188: if (loc<0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"can not find col %d \n",sadjncy[k]);
189: #endif
190: sadjncy[k] = loc;
191: }
192: }
193: if (scall==MAT_INITIAL_MATRIX){
194: MatCreateMPIAdj(scomm_row,irow_n,icol_n,sxadj,sadjncy,svalues,submat[i]);
195: } else {
196: Mat sadj = *(submat[i]);
197: Mat_MPIAdj *sa = (Mat_MPIAdj*)((sadj)->data);
198: PetscObjectGetComm((PetscObject)sadj,&scomm_mat);
199: MPI_Comm_compare(scomm_row,scomm_mat,&issame);
200: if (issame != MPI_IDENT) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"submatrix must have the same comm as the col index set\n");
201: PetscMemcpy(sa->i,sxadj,sizeof(PetscInt)*(irow_n+1));
202: PetscMemcpy(sa->j,sadjncy,sizeof(PetscInt)*sxadj[irow_n]);
203: if (svalues){PetscMemcpy(sa->values,svalues,sizeof(PetscInt)*sxadj[irow_n]);}
204: PetscFree(sxadj);
205: PetscFree(sadjncy);
206: if (svalues) {PetscFree(svalues);}
207: }
208: }
209: PetscFree(indices);
210: return(0);
211: }
213: static PetscErrorCode MatCreateSubMatricesMPI_MPIAdj(Mat mat,PetscInt n, const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
214: {
215: PetscErrorCode ierr;
216: /*get sub-matrices across a sub communicator */
218: MatCreateSubMatrices_MPIAdj_Private(mat,n,irow,icol,PETSC_TRUE,scall,submat);
219: return(0);
220: }
222: static PetscErrorCode MatCreateSubMatrices_MPIAdj(Mat mat,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *submat[])
223: {
224: PetscErrorCode ierr;
227: /*get sub-matrices based on PETSC_COMM_SELF */
228: MatCreateSubMatrices_MPIAdj_Private(mat,n,irow,icol,PETSC_FALSE,scall,submat);
229: return(0);
230: }
232: static PetscErrorCode MatView_MPIAdj_ASCII(Mat A,PetscViewer viewer)
233: {
234: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
235: PetscErrorCode ierr;
236: PetscInt i,j,m = A->rmap->n;
237: const char *name;
238: PetscViewerFormat format;
241: PetscObjectGetName((PetscObject)A,&name);
242: PetscViewerGetFormat(viewer,&format);
243: if (format == PETSC_VIEWER_ASCII_INFO) {
244: return(0);
245: } else if (format == PETSC_VIEWER_ASCII_MATLAB) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"MATLAB format not supported");
246: else {
247: PetscViewerASCIIUseTabs(viewer,PETSC_FALSE);
248: PetscViewerASCIIPushSynchronized(viewer);
249: for (i=0; i<m; i++) {
250: PetscViewerASCIISynchronizedPrintf(viewer,"row %D:",i+A->rmap->rstart);
251: for (j=a->i[i]; j<a->i[i+1]; j++) {
252: PetscViewerASCIISynchronizedPrintf(viewer," %D ",a->j[j]);
253: }
254: PetscViewerASCIISynchronizedPrintf(viewer,"\n");
255: }
256: PetscViewerASCIIUseTabs(viewer,PETSC_TRUE);
257: PetscViewerFlush(viewer);
258: PetscViewerASCIIPopSynchronized(viewer);
259: }
260: return(0);
261: }
263: static PetscErrorCode MatView_MPIAdj(Mat A,PetscViewer viewer)
264: {
266: PetscBool iascii;
269: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
270: if (iascii) {
271: MatView_MPIAdj_ASCII(A,viewer);
272: }
273: return(0);
274: }
276: static PetscErrorCode MatDestroy_MPIAdj(Mat mat)
277: {
278: Mat_MPIAdj *a = (Mat_MPIAdj*)mat->data;
282: #if defined(PETSC_USE_LOG)
283: PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D, NZ=%D",mat->rmap->n,mat->cmap->n,a->nz);
284: #endif
285: PetscFree(a->diag);
286: if (a->freeaij) {
287: if (a->freeaijwithfree) {
288: if (a->i) free(a->i);
289: if (a->j) free(a->j);
290: } else {
291: PetscFree(a->i);
292: PetscFree(a->j);
293: PetscFree(a->values);
294: }
295: }
296: PetscFree(a->rowvalues);
297: PetscFree(mat->data);
298: PetscObjectChangeTypeName((PetscObject)mat,0);
299: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAdjSetPreallocation_C",NULL);
300: PetscObjectComposeFunction((PetscObject)mat,"MatMPIAdjCreateNonemptySubcommMat_C",NULL);
301: return(0);
302: }
304: static PetscErrorCode MatSetOption_MPIAdj(Mat A,MatOption op,PetscBool flg)
305: {
306: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
310: switch (op) {
311: case MAT_SYMMETRIC:
312: case MAT_STRUCTURALLY_SYMMETRIC:
313: case MAT_HERMITIAN:
314: a->symmetric = flg;
315: break;
316: case MAT_SYMMETRY_ETERNAL:
317: break;
318: default:
319: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
320: break;
321: }
322: return(0);
323: }
325: static PetscErrorCode MatGetRow_MPIAdj(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
326: {
327: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
331: row -= A->rmap->rstart;
332: if (row < 0 || row >= A->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row out of range");
333: *nz = a->i[row+1] - a->i[row];
334: if (v) {
335: PetscInt j;
336: if (a->rowvalues_alloc < *nz) {
337: PetscFree(a->rowvalues);
338: a->rowvalues_alloc = PetscMax(a->rowvalues_alloc*2, *nz);
339: PetscMalloc1(a->rowvalues_alloc,&a->rowvalues);
340: }
341: for (j=0; j<*nz; j++){
342: a->rowvalues[j] = a->values ? a->values[a->i[row]+j]:1.0;
343: }
344: *v = (*nz) ? a->rowvalues : NULL;
345: }
346: if (idx) *idx = (*nz) ? a->j + a->i[row] : NULL;
347: return(0);
348: }
350: static PetscErrorCode MatRestoreRow_MPIAdj(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
351: {
353: return(0);
354: }
356: static PetscErrorCode MatEqual_MPIAdj(Mat A,Mat B,PetscBool * flg)
357: {
358: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data,*b = (Mat_MPIAdj*)B->data;
360: PetscBool flag;
363: /* If the matrix dimensions are not equal,or no of nonzeros */
364: if ((A->rmap->n != B->rmap->n) ||(a->nz != b->nz)) {
365: flag = PETSC_FALSE;
366: }
368: /* if the a->i are the same */
369: PetscMemcmp(a->i,b->i,(A->rmap->n+1)*sizeof(PetscInt),&flag);
371: /* if a->j are the same */
372: PetscMemcmp(a->j,b->j,(a->nz)*sizeof(PetscInt),&flag);
374: MPIU_Allreduce(&flag,flg,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));
375: return(0);
376: }
378: static PetscErrorCode MatGetRowIJ_MPIAdj(Mat A,PetscInt oshift,PetscBool symmetric,PetscBool blockcompressed,PetscInt *m,const PetscInt *inia[],const PetscInt *inja[],PetscBool *done)
379: {
380: PetscInt i;
381: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
382: PetscInt **ia = (PetscInt**)inia,**ja = (PetscInt**)inja;
385: *m = A->rmap->n;
386: *ia = a->i;
387: *ja = a->j;
388: *done = PETSC_TRUE;
389: if (oshift) {
390: for (i=0; i<(*ia)[*m]; i++) {
391: (*ja)[i]++;
392: }
393: for (i=0; i<=(*m); i++) (*ia)[i]++;
394: }
395: return(0);
396: }
398: static PetscErrorCode MatRestoreRowIJ_MPIAdj(Mat A,PetscInt oshift,PetscBool symmetric,PetscBool blockcompressed,PetscInt *m,const PetscInt *inia[],const PetscInt *inja[],PetscBool *done)
399: {
400: PetscInt i;
401: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
402: PetscInt **ia = (PetscInt**)inia,**ja = (PetscInt**)inja;
405: if (ia && a->i != *ia) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"ia passed back is not one obtained with MatGetRowIJ()");
406: if (ja && a->j != *ja) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"ja passed back is not one obtained with MatGetRowIJ()");
407: if (oshift) {
408: if (!ia) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"If oshift then you must passed in inia[] argument");
409: if (!ja) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"If oshift then you must passed in inja[] argument");
410: for (i=0; i<=(*m); i++) (*ia)[i]--;
411: for (i=0; i<(*ia)[*m]; i++) {
412: (*ja)[i]--;
413: }
414: }
415: return(0);
416: }
418: PetscErrorCode MatConvertFrom_MPIAdj(Mat A,MatType type,MatReuse reuse,Mat *newmat)
419: {
420: Mat B;
421: PetscErrorCode ierr;
422: PetscInt i,m,N,nzeros = 0,*ia,*ja,len,rstart,cnt,j,*a;
423: const PetscInt *rj;
424: const PetscScalar *ra;
425: MPI_Comm comm;
428: MatGetSize(A,NULL,&N);
429: MatGetLocalSize(A,&m,NULL);
430: MatGetOwnershipRange(A,&rstart,NULL);
432: /* count the number of nonzeros per row */
433: for (i=0; i<m; i++) {
434: MatGetRow(A,i+rstart,&len,&rj,NULL);
435: for (j=0; j<len; j++) {
436: if (rj[j] == i+rstart) {len--; break;} /* don't count diagonal */
437: }
438: nzeros += len;
439: MatRestoreRow(A,i+rstart,&len,&rj,NULL);
440: }
442: /* malloc space for nonzeros */
443: PetscMalloc1(nzeros+1,&a);
444: PetscMalloc1(N+1,&ia);
445: PetscMalloc1(nzeros+1,&ja);
447: nzeros = 0;
448: ia[0] = 0;
449: for (i=0; i<m; i++) {
450: MatGetRow(A,i+rstart,&len,&rj,&ra);
451: cnt = 0;
452: for (j=0; j<len; j++) {
453: if (rj[j] != i+rstart) { /* if not diagonal */
454: a[nzeros+cnt] = (PetscInt) PetscAbsScalar(ra[j]);
455: ja[nzeros+cnt++] = rj[j];
456: }
457: }
458: MatRestoreRow(A,i+rstart,&len,&rj,&ra);
459: nzeros += cnt;
460: ia[i+1] = nzeros;
461: }
463: PetscObjectGetComm((PetscObject)A,&comm);
464: MatCreate(comm,&B);
465: MatSetSizes(B,m,PETSC_DETERMINE,PETSC_DETERMINE,N);
466: MatSetType(B,type);
467: MatMPIAdjSetPreallocation(B,ia,ja,a);
469: if (reuse == MAT_INPLACE_MATRIX) {
470: MatHeaderReplace(A,&B);
471: } else {
472: *newmat = B;
473: }
474: return(0);
475: }
477: /* -------------------------------------------------------------------*/
478: static struct _MatOps MatOps_Values = {0,
479: MatGetRow_MPIAdj,
480: MatRestoreRow_MPIAdj,
481: 0,
482: /* 4*/ 0,
483: 0,
484: 0,
485: 0,
486: 0,
487: 0,
488: /*10*/ 0,
489: 0,
490: 0,
491: 0,
492: 0,
493: /*15*/ 0,
494: MatEqual_MPIAdj,
495: 0,
496: 0,
497: 0,
498: /*20*/ 0,
499: 0,
500: MatSetOption_MPIAdj,
501: 0,
502: /*24*/ 0,
503: 0,
504: 0,
505: 0,
506: 0,
507: /*29*/ 0,
508: 0,
509: 0,
510: 0,
511: 0,
512: /*34*/ 0,
513: 0,
514: 0,
515: 0,
516: 0,
517: /*39*/ 0,
518: MatCreateSubMatrices_MPIAdj,
519: 0,
520: 0,
521: 0,
522: /*44*/ 0,
523: 0,
524: MatShift_Basic,
525: 0,
526: 0,
527: /*49*/ 0,
528: MatGetRowIJ_MPIAdj,
529: MatRestoreRowIJ_MPIAdj,
530: 0,
531: 0,
532: /*54*/ 0,
533: 0,
534: 0,
535: 0,
536: 0,
537: /*59*/ 0,
538: MatDestroy_MPIAdj,
539: MatView_MPIAdj,
540: MatConvertFrom_MPIAdj,
541: 0,
542: /*64*/ 0,
543: 0,
544: 0,
545: 0,
546: 0,
547: /*69*/ 0,
548: 0,
549: 0,
550: 0,
551: 0,
552: /*74*/ 0,
553: 0,
554: 0,
555: 0,
556: 0,
557: /*79*/ 0,
558: 0,
559: 0,
560: 0,
561: 0,
562: /*84*/ 0,
563: 0,
564: 0,
565: 0,
566: 0,
567: /*89*/ 0,
568: 0,
569: 0,
570: 0,
571: 0,
572: /*94*/ 0,
573: 0,
574: 0,
575: 0,
576: 0,
577: /*99*/ 0,
578: 0,
579: 0,
580: 0,
581: 0,
582: /*104*/ 0,
583: 0,
584: 0,
585: 0,
586: 0,
587: /*109*/ 0,
588: 0,
589: 0,
590: 0,
591: 0,
592: /*114*/ 0,
593: 0,
594: 0,
595: 0,
596: 0,
597: /*119*/ 0,
598: 0,
599: 0,
600: 0,
601: 0,
602: /*124*/ 0,
603: 0,
604: 0,
605: 0,
606: MatCreateSubMatricesMPI_MPIAdj,
607: /*129*/ 0,
608: 0,
609: 0,
610: 0,
611: 0,
612: /*134*/ 0,
613: 0,
614: 0,
615: 0,
616: 0,
617: /*139*/ 0,
618: 0,
619: 0
620: };
622: static PetscErrorCode MatMPIAdjSetPreallocation_MPIAdj(Mat B,PetscInt *i,PetscInt *j,PetscInt *values)
623: {
624: Mat_MPIAdj *b = (Mat_MPIAdj*)B->data;
626: #if defined(PETSC_USE_DEBUG)
627: PetscInt ii;
628: #endif
631: PetscLayoutSetUp(B->rmap);
632: PetscLayoutSetUp(B->cmap);
634: #if defined(PETSC_USE_DEBUG)
635: if (i[0] != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"First i[] index must be zero, instead it is %D\n",i[0]);
636: for (ii=1; ii<B->rmap->n; ii++) {
637: if (i[ii] < 0 || i[ii] < i[ii-1]) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i[%D]=%D index is out of range: i[%D]=%D",ii,i[ii],ii-1,i[ii-1]);
638: }
639: for (ii=0; ii<i[B->rmap->n]; ii++) {
640: if (j[ii] < 0 || j[ii] >= B->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column index %D out of range %D\n",ii,j[ii]);
641: }
642: #endif
643: B->preallocated = PETSC_TRUE;
645: b->j = j;
646: b->i = i;
647: b->values = values;
649: b->nz = i[B->rmap->n];
650: b->diag = 0;
651: b->symmetric = PETSC_FALSE;
652: b->freeaij = PETSC_TRUE;
654: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
655: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
656: return(0);
657: }
659: static PetscErrorCode MatMPIAdjCreateNonemptySubcommMat_MPIAdj(Mat A,Mat *B)
660: {
661: Mat_MPIAdj *a = (Mat_MPIAdj*)A->data;
663: const PetscInt *ranges;
664: MPI_Comm acomm,bcomm;
665: MPI_Group agroup,bgroup;
666: PetscMPIInt i,rank,size,nranks,*ranks;
669: *B = NULL;
670: PetscObjectGetComm((PetscObject)A,&acomm);
671: MPI_Comm_size(acomm,&size);
672: MPI_Comm_size(acomm,&rank);
673: MatGetOwnershipRanges(A,&ranges);
674: for (i=0,nranks=0; i<size; i++) {
675: if (ranges[i+1] - ranges[i] > 0) nranks++;
676: }
677: if (nranks == size) { /* All ranks have a positive number of rows, so we do not need to create a subcomm; */
678: PetscObjectReference((PetscObject)A);
679: *B = A;
680: return(0);
681: }
683: PetscMalloc1(nranks,&ranks);
684: for (i=0,nranks=0; i<size; i++) {
685: if (ranges[i+1] - ranges[i] > 0) ranks[nranks++] = i;
686: }
687: MPI_Comm_group(acomm,&agroup);
688: MPI_Group_incl(agroup,nranks,ranks,&bgroup);
689: PetscFree(ranks);
690: MPI_Comm_create(acomm,bgroup,&bcomm);
691: MPI_Group_free(&agroup);
692: MPI_Group_free(&bgroup);
693: if (bcomm != MPI_COMM_NULL) {
694: PetscInt m,N;
695: Mat_MPIAdj *b;
696: MatGetLocalSize(A,&m,NULL);
697: MatGetSize(A,NULL,&N);
698: MatCreateMPIAdj(bcomm,m,N,a->i,a->j,a->values,B);
699: b = (Mat_MPIAdj*)(*B)->data;
700: b->freeaij = PETSC_FALSE;
701: MPI_Comm_free(&bcomm);
702: }
703: return(0);
704: }
706: PetscErrorCode MatMPIAdjToSeq_MPIAdj(Mat A,Mat *B)
707: {
709: PetscInt M,N,*II,*J,NZ,nz,m,nzstart,i;
710: PetscInt *Values = NULL;
711: Mat_MPIAdj *adj = (Mat_MPIAdj*)A->data;
712: PetscMPIInt mnz,mm,*allnz,*allm,size,*dispnz,*dispm;
715: MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);
716: MatGetSize(A,&M,&N);
717: MatGetLocalSize(A,&m,NULL);
718: nz = adj->nz;
719: if (adj->i[m] != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"nz %D not correct i[m] %d",nz,adj->i[m]);
720: MPI_Allreduce(&nz,&NZ,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)A));
722: PetscMPIIntCast(nz,&mnz);
723: PetscCalloc2(size,&allnz,size,&dispnz);
724: MPI_Allgather(&mnz,1,MPI_INT,allnz,1,MPI_INT,PetscObjectComm((PetscObject)A));
725: dispnz[0] = 0; for (i=1; i<size; i++) dispnz[i] = dispnz[i-1]+ allnz[i-1];
726: if (adj->values) {
727: PetscMalloc1(NZ,&Values);
728: MPI_Allgatherv(adj->values,mnz,MPIU_INT,Values,allnz,dispnz,MPIU_INT,PetscObjectComm((PetscObject)A));
729: }
730: PetscMalloc1(NZ,&J);
731: MPI_Allgatherv(adj->j,mnz,MPIU_INT,J,allnz,dispnz,MPIU_INT,PetscObjectComm((PetscObject)A));
732: PetscFree2(allnz,dispnz);
733: MPI_Scan(&nz,&nzstart,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)A));
734: nzstart -= nz;
735: /* shift the i[] values so they will be correct after being received */
736: for (i=0; i<m; i++) adj->i[i] += nzstart;
737: PetscMalloc1(M+1,&II);
738: PetscMPIIntCast(m,&mm);
739: PetscMalloc2(size,&allm,size,&dispm);
740: MPI_Allgather(&mm,1,MPI_INT,allm,1,MPI_INT,PetscObjectComm((PetscObject)A));
741: dispm[0] = 0; for (i=1; i<size; i++) dispm[i] = dispm[i-1]+ allm[i-1];
742: MPI_Allgatherv(adj->i,mm,MPIU_INT,II,allm,dispm,MPIU_INT,PetscObjectComm((PetscObject)A));
743: PetscFree2(allm,dispm);
744: II[M] = NZ;
745: /* shift the i[] values back */
746: for (i=0; i<m; i++) adj->i[i] -= nzstart;
747: MatCreateMPIAdj(PETSC_COMM_SELF,M,N,II,J,Values,B);
748: return(0);
749: }
751: /*@
752: MatMPIAdjCreateNonemptySubcommMat - create the same MPIAdj matrix on a subcommunicator containing only processes owning a positive number of rows
754: Collective
756: Input Arguments:
757: . A - original MPIAdj matrix
759: Output Arguments:
760: . B - matrix on subcommunicator, NULL on ranks that owned zero rows of A
762: Level: developer
764: Note:
765: This function is mostly useful for internal use by mesh partitioning packages that require that every process owns at least one row.
767: The matrix B should be destroyed with MatDestroy(). The arrays are not copied, so B should be destroyed before A is destroyed.
769: .seealso: MatCreateMPIAdj()
770: @*/
771: PetscErrorCode MatMPIAdjCreateNonemptySubcommMat(Mat A,Mat *B)
772: {
777: PetscUseMethod(A,"MatMPIAdjCreateNonemptySubcommMat_C",(Mat,Mat*),(A,B));
778: return(0);
779: }
781: /*MC
782: MATMPIADJ - MATMPIADJ = "mpiadj" - A matrix type to be used for distributed adjacency matrices,
783: intended for use constructing orderings and partitionings.
785: Level: beginner
787: .seealso: MatCreateMPIAdj
788: M*/
790: PETSC_EXTERN PetscErrorCode MatCreate_MPIAdj(Mat B)
791: {
792: Mat_MPIAdj *b;
796: PetscNewLog(B,&b);
797: B->data = (void*)b;
798: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
799: B->assembled = PETSC_FALSE;
801: PetscObjectComposeFunction((PetscObject)B,"MatMPIAdjSetPreallocation_C",MatMPIAdjSetPreallocation_MPIAdj);
802: PetscObjectComposeFunction((PetscObject)B,"MatMPIAdjCreateNonemptySubcommMat_C",MatMPIAdjCreateNonemptySubcommMat_MPIAdj);
803: PetscObjectComposeFunction((PetscObject)B,"MatMPIAdjToSeq_C",MatMPIAdjToSeq_MPIAdj);
804: PetscObjectChangeTypeName((PetscObject)B,MATMPIADJ);
805: return(0);
806: }
808: /*@C
809: MatMPIAdjToSeq - Converts an parallel MPIAdj matrix to complete MPIAdj on each process (needed by sequential preconditioners)
811: Logically Collective on MPI_Comm
813: Input Parameter:
814: . A - the matrix
816: Output Parameter:
817: . B - the same matrix on all processes
819: Level: intermediate
821: .seealso: MatCreate(), MatCreateMPIAdj(), MatSetValues()
822: @*/
823: PetscErrorCode MatMPIAdjToSeq(Mat A,Mat *B)
824: {
828: PetscUseMethod(A,"MatMPIAdjToSeq_C",(Mat,Mat*),(A,B));
829: return(0);
830: }
832: /*@C
833: MatMPIAdjSetPreallocation - Sets the array used for storing the matrix elements
835: Logically Collective on MPI_Comm
837: Input Parameters:
838: + A - the matrix
839: . i - the indices into j for the start of each row
840: . j - the column indices for each row (sorted for each row).
841: The indices in i and j start with zero (NOT with one).
842: - values - [optional] edge weights
844: Level: intermediate
846: .seealso: MatCreate(), MatCreateMPIAdj(), MatSetValues()
847: @*/
848: PetscErrorCode MatMPIAdjSetPreallocation(Mat B,PetscInt *i,PetscInt *j,PetscInt *values)
849: {
853: PetscTryMethod(B,"MatMPIAdjSetPreallocation_C",(Mat,PetscInt*,PetscInt*,PetscInt*),(B,i,j,values));
854: return(0);
855: }
857: /*@C
858: MatCreateMPIAdj - Creates a sparse matrix representing an adjacency list.
859: The matrix does not have numerical values associated with it, but is
860: intended for ordering (to reduce bandwidth etc) and partitioning.
862: Collective on MPI_Comm
864: Input Parameters:
865: + comm - MPI communicator
866: . m - number of local rows
867: . N - number of global columns
868: . i - the indices into j for the start of each row
869: . j - the column indices for each row (sorted for each row).
870: The indices in i and j start with zero (NOT with one).
871: - values -[optional] edge weights
873: Output Parameter:
874: . A - the matrix
876: Level: intermediate
878: Notes: This matrix object does not support most matrix operations, include
879: MatSetValues().
880: You must NOT free the ii, values and jj arrays yourself. PETSc will free them
881: when the matrix is destroyed; you must allocate them with PetscMalloc(). If you
882: call from Fortran you need not create the arrays with PetscMalloc().
883: Should not include the matrix diagonals.
885: If you already have a matrix, you can create its adjacency matrix by a call
886: to MatConvert, specifying a type of MATMPIADJ.
888: Possible values for MatSetOption() - MAT_STRUCTURALLY_SYMMETRIC
890: .seealso: MatCreate(), MatConvert(), MatGetOrdering()
891: @*/
892: PetscErrorCode MatCreateMPIAdj(MPI_Comm comm,PetscInt m,PetscInt N,PetscInt *i,PetscInt *j,PetscInt *values,Mat *A)
893: {
897: MatCreate(comm,A);
898: MatSetSizes(*A,m,PETSC_DETERMINE,PETSC_DETERMINE,N);
899: MatSetType(*A,MATMPIADJ);
900: MatMPIAdjSetPreallocation(*A,i,j,values);
901: return(0);
902: }